xref: /openbmc/linux/drivers/gpu/ipu-v3/ipu-common.c (revision 7051924f771722c6dd235e693742cda6488ac700)
1 /*
2  * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2 of the License, or (at your
8  * option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/reset.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/irqdomain.h>
30 #include <linux/of_device.h>
31 
32 #include <drm/drm_fourcc.h>
33 
34 #include <video/imx-ipu-v3.h>
35 #include "ipu-prv.h"
36 
37 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
38 {
39 	return readl(ipu->cm_reg + offset);
40 }
41 
42 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
43 {
44 	writel(value, ipu->cm_reg + offset);
45 }
46 
47 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
48 {
49 	u32 val;
50 
51 	val = ipu_cm_read(ipu, IPU_SRM_PRI2);
52 	val |= 0x8;
53 	ipu_cm_write(ipu, val, IPU_SRM_PRI2);
54 }
55 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
56 
57 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58 {
59 	switch (drm_fourcc) {
60 	case DRM_FORMAT_RGB565:
61 	case DRM_FORMAT_BGR565:
62 	case DRM_FORMAT_RGB888:
63 	case DRM_FORMAT_BGR888:
64 	case DRM_FORMAT_XRGB8888:
65 	case DRM_FORMAT_XBGR8888:
66 	case DRM_FORMAT_RGBX8888:
67 	case DRM_FORMAT_BGRX8888:
68 	case DRM_FORMAT_ARGB8888:
69 	case DRM_FORMAT_ABGR8888:
70 	case DRM_FORMAT_RGBA8888:
71 	case DRM_FORMAT_BGRA8888:
72 		return IPUV3_COLORSPACE_RGB;
73 	case DRM_FORMAT_YUYV:
74 	case DRM_FORMAT_UYVY:
75 	case DRM_FORMAT_YUV420:
76 	case DRM_FORMAT_YVU420:
77 		return IPUV3_COLORSPACE_YUV;
78 	default:
79 		return IPUV3_COLORSPACE_UNKNOWN;
80 	}
81 }
82 EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
83 
84 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
85 {
86 	switch (pixelformat) {
87 	case V4L2_PIX_FMT_YUV420:
88 	case V4L2_PIX_FMT_YVU420:
89 	case V4L2_PIX_FMT_UYVY:
90 	case V4L2_PIX_FMT_YUYV:
91 		return IPUV3_COLORSPACE_YUV;
92 	case V4L2_PIX_FMT_RGB32:
93 	case V4L2_PIX_FMT_BGR32:
94 	case V4L2_PIX_FMT_RGB24:
95 	case V4L2_PIX_FMT_BGR24:
96 	case V4L2_PIX_FMT_RGB565:
97 		return IPUV3_COLORSPACE_RGB;
98 	default:
99 		return IPUV3_COLORSPACE_UNKNOWN;
100 	}
101 }
102 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
103 
104 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
105 {
106 	struct ipuv3_channel *channel;
107 
108 	dev_dbg(ipu->dev, "%s %d\n", __func__, num);
109 
110 	if (num > 63)
111 		return ERR_PTR(-ENODEV);
112 
113 	mutex_lock(&ipu->channel_lock);
114 
115 	channel = &ipu->channel[num];
116 
117 	if (channel->busy) {
118 		channel = ERR_PTR(-EBUSY);
119 		goto out;
120 	}
121 
122 	channel->busy = true;
123 	channel->num = num;
124 
125 out:
126 	mutex_unlock(&ipu->channel_lock);
127 
128 	return channel;
129 }
130 EXPORT_SYMBOL_GPL(ipu_idmac_get);
131 
132 void ipu_idmac_put(struct ipuv3_channel *channel)
133 {
134 	struct ipu_soc *ipu = channel->ipu;
135 
136 	dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
137 
138 	mutex_lock(&ipu->channel_lock);
139 
140 	channel->busy = false;
141 
142 	mutex_unlock(&ipu->channel_lock);
143 }
144 EXPORT_SYMBOL_GPL(ipu_idmac_put);
145 
146 #define idma_mask(ch)			(1 << (ch & 0x1f))
147 
148 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
149 		bool doublebuffer)
150 {
151 	struct ipu_soc *ipu = channel->ipu;
152 	unsigned long flags;
153 	u32 reg;
154 
155 	spin_lock_irqsave(&ipu->lock, flags);
156 
157 	reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
158 	if (doublebuffer)
159 		reg |= idma_mask(channel->num);
160 	else
161 		reg &= ~idma_mask(channel->num);
162 	ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
163 
164 	spin_unlock_irqrestore(&ipu->lock, flags);
165 }
166 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
167 
168 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
169 {
170 	unsigned long lock_flags;
171 	u32 val;
172 
173 	spin_lock_irqsave(&ipu->lock, lock_flags);
174 
175 	val = ipu_cm_read(ipu, IPU_DISP_GEN);
176 
177 	if (mask & IPU_CONF_DI0_EN)
178 		val |= IPU_DI0_COUNTER_RELEASE;
179 	if (mask & IPU_CONF_DI1_EN)
180 		val |= IPU_DI1_COUNTER_RELEASE;
181 
182 	ipu_cm_write(ipu, val, IPU_DISP_GEN);
183 
184 	val = ipu_cm_read(ipu, IPU_CONF);
185 	val |= mask;
186 	ipu_cm_write(ipu, val, IPU_CONF);
187 
188 	spin_unlock_irqrestore(&ipu->lock, lock_flags);
189 
190 	return 0;
191 }
192 EXPORT_SYMBOL_GPL(ipu_module_enable);
193 
194 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
195 {
196 	unsigned long lock_flags;
197 	u32 val;
198 
199 	spin_lock_irqsave(&ipu->lock, lock_flags);
200 
201 	val = ipu_cm_read(ipu, IPU_CONF);
202 	val &= ~mask;
203 	ipu_cm_write(ipu, val, IPU_CONF);
204 
205 	val = ipu_cm_read(ipu, IPU_DISP_GEN);
206 
207 	if (mask & IPU_CONF_DI0_EN)
208 		val &= ~IPU_DI0_COUNTER_RELEASE;
209 	if (mask & IPU_CONF_DI1_EN)
210 		val &= ~IPU_DI1_COUNTER_RELEASE;
211 
212 	ipu_cm_write(ipu, val, IPU_DISP_GEN);
213 
214 	spin_unlock_irqrestore(&ipu->lock, lock_flags);
215 
216 	return 0;
217 }
218 EXPORT_SYMBOL_GPL(ipu_module_disable);
219 
220 int ipu_csi_enable(struct ipu_soc *ipu, int csi)
221 {
222 	return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
223 }
224 EXPORT_SYMBOL_GPL(ipu_csi_enable);
225 
226 int ipu_csi_disable(struct ipu_soc *ipu, int csi)
227 {
228 	return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
229 }
230 EXPORT_SYMBOL_GPL(ipu_csi_disable);
231 
232 int ipu_smfc_enable(struct ipu_soc *ipu)
233 {
234 	return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
235 }
236 EXPORT_SYMBOL_GPL(ipu_smfc_enable);
237 
238 int ipu_smfc_disable(struct ipu_soc *ipu)
239 {
240 	return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
241 }
242 EXPORT_SYMBOL_GPL(ipu_smfc_disable);
243 
244 int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
245 {
246 	struct ipu_soc *ipu = channel->ipu;
247 	unsigned int chno = channel->num;
248 
249 	return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
250 }
251 EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
252 
253 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
254 {
255 	struct ipu_soc *ipu = channel->ipu;
256 	unsigned int chno = channel->num;
257 	unsigned long flags;
258 
259 	spin_lock_irqsave(&ipu->lock, flags);
260 
261 	/* Mark buffer as ready. */
262 	if (buf_num == 0)
263 		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
264 	else
265 		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
266 
267 	spin_unlock_irqrestore(&ipu->lock, flags);
268 }
269 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
270 
271 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
272 {
273 	struct ipu_soc *ipu = channel->ipu;
274 	u32 val;
275 	unsigned long flags;
276 
277 	spin_lock_irqsave(&ipu->lock, flags);
278 
279 	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
280 	val |= idma_mask(channel->num);
281 	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
282 
283 	spin_unlock_irqrestore(&ipu->lock, flags);
284 
285 	return 0;
286 }
287 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
288 
289 bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
290 {
291 	return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
292 }
293 EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
294 
295 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
296 {
297 	struct ipu_soc *ipu = channel->ipu;
298 	unsigned long timeout;
299 
300 	timeout = jiffies + msecs_to_jiffies(ms);
301 	while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
302 			idma_mask(channel->num)) {
303 		if (time_after(jiffies, timeout))
304 			return -ETIMEDOUT;
305 		cpu_relax();
306 	}
307 
308 	return 0;
309 }
310 EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
311 
312 int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
313 {
314 	unsigned long timeout;
315 
316 	timeout = jiffies + msecs_to_jiffies(ms);
317 	ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
318 	while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
319 		if (time_after(jiffies, timeout))
320 			return -ETIMEDOUT;
321 		cpu_relax();
322 	}
323 
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
327 
328 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
329 {
330 	struct ipu_soc *ipu = channel->ipu;
331 	u32 val;
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&ipu->lock, flags);
335 
336 	/* Disable DMA channel(s) */
337 	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
338 	val &= ~idma_mask(channel->num);
339 	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
340 
341 	/* Set channel buffers NOT to be ready */
342 	ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
343 
344 	if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
345 			idma_mask(channel->num)) {
346 		ipu_cm_write(ipu, idma_mask(channel->num),
347 			     IPU_CHA_BUF0_RDY(channel->num));
348 	}
349 
350 	if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
351 			idma_mask(channel->num)) {
352 		ipu_cm_write(ipu, idma_mask(channel->num),
353 			     IPU_CHA_BUF1_RDY(channel->num));
354 	}
355 
356 	ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
357 
358 	/* Reset the double buffer */
359 	val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
360 	val &= ~idma_mask(channel->num);
361 	ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
362 
363 	spin_unlock_irqrestore(&ipu->lock, flags);
364 
365 	return 0;
366 }
367 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
368 
369 static int ipu_memory_reset(struct ipu_soc *ipu)
370 {
371 	unsigned long timeout;
372 
373 	ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
374 
375 	timeout = jiffies + msecs_to_jiffies(1000);
376 	while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
377 		if (time_after(jiffies, timeout))
378 			return -ETIME;
379 		cpu_relax();
380 	}
381 
382 	return 0;
383 }
384 
385 struct ipu_devtype {
386 	const char *name;
387 	unsigned long cm_ofs;
388 	unsigned long cpmem_ofs;
389 	unsigned long srm_ofs;
390 	unsigned long tpm_ofs;
391 	unsigned long disp0_ofs;
392 	unsigned long disp1_ofs;
393 	unsigned long dc_tmpl_ofs;
394 	unsigned long vdi_ofs;
395 	enum ipuv3_type type;
396 };
397 
398 static struct ipu_devtype ipu_type_imx51 = {
399 	.name = "IPUv3EX",
400 	.cm_ofs = 0x1e000000,
401 	.cpmem_ofs = 0x1f000000,
402 	.srm_ofs = 0x1f040000,
403 	.tpm_ofs = 0x1f060000,
404 	.disp0_ofs = 0x1e040000,
405 	.disp1_ofs = 0x1e048000,
406 	.dc_tmpl_ofs = 0x1f080000,
407 	.vdi_ofs = 0x1e068000,
408 	.type = IPUV3EX,
409 };
410 
411 static struct ipu_devtype ipu_type_imx53 = {
412 	.name = "IPUv3M",
413 	.cm_ofs = 0x06000000,
414 	.cpmem_ofs = 0x07000000,
415 	.srm_ofs = 0x07040000,
416 	.tpm_ofs = 0x07060000,
417 	.disp0_ofs = 0x06040000,
418 	.disp1_ofs = 0x06048000,
419 	.dc_tmpl_ofs = 0x07080000,
420 	.vdi_ofs = 0x06068000,
421 	.type = IPUV3M,
422 };
423 
424 static struct ipu_devtype ipu_type_imx6q = {
425 	.name = "IPUv3H",
426 	.cm_ofs = 0x00200000,
427 	.cpmem_ofs = 0x00300000,
428 	.srm_ofs = 0x00340000,
429 	.tpm_ofs = 0x00360000,
430 	.disp0_ofs = 0x00240000,
431 	.disp1_ofs = 0x00248000,
432 	.dc_tmpl_ofs = 0x00380000,
433 	.vdi_ofs = 0x00268000,
434 	.type = IPUV3H,
435 };
436 
437 static const struct of_device_id imx_ipu_dt_ids[] = {
438 	{ .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
439 	{ .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
440 	{ .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
441 	{ /* sentinel */ }
442 };
443 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
444 
445 static int ipu_submodules_init(struct ipu_soc *ipu,
446 		struct platform_device *pdev, unsigned long ipu_base,
447 		struct clk *ipu_clk)
448 {
449 	char *unit;
450 	int ret;
451 	struct device *dev = &pdev->dev;
452 	const struct ipu_devtype *devtype = ipu->devtype;
453 
454 	ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
455 	if (ret) {
456 		unit = "cpmem";
457 		goto err_cpmem;
458 	}
459 
460 	ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
461 			IPU_CONF_DI0_EN, ipu_clk);
462 	if (ret) {
463 		unit = "di0";
464 		goto err_di_0;
465 	}
466 
467 	ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
468 			IPU_CONF_DI1_EN, ipu_clk);
469 	if (ret) {
470 		unit = "di1";
471 		goto err_di_1;
472 	}
473 
474 	ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
475 			IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
476 	if (ret) {
477 		unit = "dc_template";
478 		goto err_dc;
479 	}
480 
481 	ret = ipu_dmfc_init(ipu, dev, ipu_base +
482 			devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
483 	if (ret) {
484 		unit = "dmfc";
485 		goto err_dmfc;
486 	}
487 
488 	ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
489 	if (ret) {
490 		unit = "dp";
491 		goto err_dp;
492 	}
493 
494 	ret = ipu_smfc_init(ipu, dev, ipu_base +
495 			devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
496 	if (ret) {
497 		unit = "smfc";
498 		goto err_smfc;
499 	}
500 
501 	return 0;
502 
503 err_smfc:
504 	ipu_dp_exit(ipu);
505 err_dp:
506 	ipu_dmfc_exit(ipu);
507 err_dmfc:
508 	ipu_dc_exit(ipu);
509 err_dc:
510 	ipu_di_exit(ipu, 1);
511 err_di_1:
512 	ipu_di_exit(ipu, 0);
513 err_di_0:
514 	ipu_cpmem_exit(ipu);
515 err_cpmem:
516 	dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
517 	return ret;
518 }
519 
520 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
521 {
522 	unsigned long status;
523 	int i, bit, irq;
524 
525 	for (i = 0; i < num_regs; i++) {
526 
527 		status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
528 		status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
529 
530 		for_each_set_bit(bit, &status, 32) {
531 			irq = irq_linear_revmap(ipu->domain,
532 						regs[i] * 32 + bit);
533 			if (irq)
534 				generic_handle_irq(irq);
535 		}
536 	}
537 }
538 
539 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
540 {
541 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
542 	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
543 	struct irq_chip *chip = irq_get_chip(irq);
544 
545 	chained_irq_enter(chip, desc);
546 
547 	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
548 
549 	chained_irq_exit(chip, desc);
550 }
551 
552 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
553 {
554 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
555 	const int int_reg[] = { 4, 5, 8, 9};
556 	struct irq_chip *chip = irq_get_chip(irq);
557 
558 	chained_irq_enter(chip, desc);
559 
560 	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
561 
562 	chained_irq_exit(chip, desc);
563 }
564 
565 int ipu_map_irq(struct ipu_soc *ipu, int irq)
566 {
567 	int virq;
568 
569 	virq = irq_linear_revmap(ipu->domain, irq);
570 	if (!virq)
571 		virq = irq_create_mapping(ipu->domain, irq);
572 
573 	return virq;
574 }
575 EXPORT_SYMBOL_GPL(ipu_map_irq);
576 
577 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
578 		enum ipu_channel_irq irq_type)
579 {
580 	return ipu_map_irq(ipu, irq_type + channel->num);
581 }
582 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
583 
584 static void ipu_submodules_exit(struct ipu_soc *ipu)
585 {
586 	ipu_smfc_exit(ipu);
587 	ipu_dp_exit(ipu);
588 	ipu_dmfc_exit(ipu);
589 	ipu_dc_exit(ipu);
590 	ipu_di_exit(ipu, 1);
591 	ipu_di_exit(ipu, 0);
592 	ipu_cpmem_exit(ipu);
593 }
594 
595 static int platform_remove_devices_fn(struct device *dev, void *unused)
596 {
597 	struct platform_device *pdev = to_platform_device(dev);
598 
599 	platform_device_unregister(pdev);
600 
601 	return 0;
602 }
603 
604 static void platform_device_unregister_children(struct platform_device *pdev)
605 {
606 	device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
607 }
608 
609 struct ipu_platform_reg {
610 	struct ipu_client_platformdata pdata;
611 	const char *name;
612 	int reg_offset;
613 };
614 
615 static const struct ipu_platform_reg client_reg[] = {
616 	{
617 		.pdata = {
618 			.di = 0,
619 			.dc = 5,
620 			.dp = IPU_DP_FLOW_SYNC_BG,
621 			.dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
622 			.dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
623 		},
624 		.name = "imx-ipuv3-crtc",
625 	}, {
626 		.pdata = {
627 			.di = 1,
628 			.dc = 1,
629 			.dp = -EINVAL,
630 			.dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
631 			.dma[1] = -EINVAL,
632 		},
633 		.name = "imx-ipuv3-crtc",
634 	}, {
635 		.pdata = {
636 			.csi = 0,
637 			.dma[0] = IPUV3_CHANNEL_CSI0,
638 			.dma[1] = -EINVAL,
639 		},
640 		.reg_offset = IPU_CM_CSI0_REG_OFS,
641 		.name = "imx-ipuv3-camera",
642 	}, {
643 		.pdata = {
644 			.csi = 1,
645 			.dma[0] = IPUV3_CHANNEL_CSI1,
646 			.dma[1] = -EINVAL,
647 		},
648 		.reg_offset = IPU_CM_CSI1_REG_OFS,
649 		.name = "imx-ipuv3-camera",
650 	},
651 };
652 
653 static DEFINE_MUTEX(ipu_client_id_mutex);
654 static int ipu_client_id;
655 
656 static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
657 {
658 	struct device *dev = ipu->dev;
659 	unsigned i;
660 	int id, ret;
661 
662 	mutex_lock(&ipu_client_id_mutex);
663 	id = ipu_client_id;
664 	ipu_client_id += ARRAY_SIZE(client_reg);
665 	mutex_unlock(&ipu_client_id_mutex);
666 
667 	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
668 		const struct ipu_platform_reg *reg = &client_reg[i];
669 		struct platform_device *pdev;
670 		struct resource res;
671 
672 		if (reg->reg_offset) {
673 			memset(&res, 0, sizeof(res));
674 			res.flags = IORESOURCE_MEM;
675 			res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
676 			res.end = res.start + PAGE_SIZE - 1;
677 			pdev = platform_device_register_resndata(dev, reg->name,
678 				id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
679 		} else {
680 			pdev = platform_device_register_data(dev, reg->name,
681 				id++, &reg->pdata, sizeof(reg->pdata));
682 		}
683 
684 		if (IS_ERR(pdev))
685 			goto err_register;
686 	}
687 
688 	return 0;
689 
690 err_register:
691 	platform_device_unregister_children(to_platform_device(dev));
692 
693 	return ret;
694 }
695 
696 
697 static int ipu_irq_init(struct ipu_soc *ipu)
698 {
699 	struct irq_chip_generic *gc;
700 	struct irq_chip_type *ct;
701 	unsigned long unused[IPU_NUM_IRQS / 32] = {
702 		0x400100d0, 0xffe000fd,
703 		0x400100d0, 0xffe000fd,
704 		0x400100d0, 0xffe000fd,
705 		0x4077ffff, 0xffe7e1fd,
706 		0x23fffffe, 0x8880fff0,
707 		0xf98fe7d0, 0xfff81fff,
708 		0x400100d0, 0xffe000fd,
709 		0x00000000,
710 	};
711 	int ret, i;
712 
713 	ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
714 					    &irq_generic_chip_ops, ipu);
715 	if (!ipu->domain) {
716 		dev_err(ipu->dev, "failed to add irq domain\n");
717 		return -ENODEV;
718 	}
719 
720 	ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
721 					     handle_level_irq, 0,
722 					     IRQF_VALID, 0);
723 	if (ret < 0) {
724 		dev_err(ipu->dev, "failed to alloc generic irq chips\n");
725 		irq_domain_remove(ipu->domain);
726 		return ret;
727 	}
728 
729 	for (i = 0; i < IPU_NUM_IRQS; i += 32) {
730 		gc = irq_get_domain_generic_chip(ipu->domain, i);
731 		gc->reg_base = ipu->cm_reg;
732 		gc->unused = unused[i / 32];
733 		ct = gc->chip_types;
734 		ct->chip.irq_ack = irq_gc_ack_set_bit;
735 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
736 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
737 		ct->regs.ack = IPU_INT_STAT(i / 32);
738 		ct->regs.mask = IPU_INT_CTRL(i / 32);
739 	}
740 
741 	irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
742 	irq_set_handler_data(ipu->irq_sync, ipu);
743 	irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
744 	irq_set_handler_data(ipu->irq_err, ipu);
745 
746 	return 0;
747 }
748 
749 static void ipu_irq_exit(struct ipu_soc *ipu)
750 {
751 	int i, irq;
752 
753 	irq_set_chained_handler(ipu->irq_err, NULL);
754 	irq_set_handler_data(ipu->irq_err, NULL);
755 	irq_set_chained_handler(ipu->irq_sync, NULL);
756 	irq_set_handler_data(ipu->irq_sync, NULL);
757 
758 	/* TODO: remove irq_domain_generic_chips */
759 
760 	for (i = 0; i < IPU_NUM_IRQS; i++) {
761 		irq = irq_linear_revmap(ipu->domain, i);
762 		if (irq)
763 			irq_dispose_mapping(irq);
764 	}
765 
766 	irq_domain_remove(ipu->domain);
767 }
768 
769 static int ipu_probe(struct platform_device *pdev)
770 {
771 	const struct of_device_id *of_id =
772 			of_match_device(imx_ipu_dt_ids, &pdev->dev);
773 	struct ipu_soc *ipu;
774 	struct resource *res;
775 	unsigned long ipu_base;
776 	int i, ret, irq_sync, irq_err;
777 	const struct ipu_devtype *devtype;
778 
779 	devtype = of_id->data;
780 
781 	irq_sync = platform_get_irq(pdev, 0);
782 	irq_err = platform_get_irq(pdev, 1);
783 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
784 
785 	dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
786 			irq_sync, irq_err);
787 
788 	if (!res || irq_sync < 0 || irq_err < 0)
789 		return -ENODEV;
790 
791 	ipu_base = res->start;
792 
793 	ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
794 	if (!ipu)
795 		return -ENODEV;
796 
797 	for (i = 0; i < 64; i++)
798 		ipu->channel[i].ipu = ipu;
799 	ipu->devtype = devtype;
800 	ipu->ipu_type = devtype->type;
801 
802 	spin_lock_init(&ipu->lock);
803 	mutex_init(&ipu->channel_lock);
804 
805 	dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
806 			ipu_base + devtype->cm_ofs);
807 	dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
808 			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
809 	dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
810 			ipu_base + devtype->cpmem_ofs);
811 	dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
812 			ipu_base + devtype->disp0_ofs);
813 	dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
814 			ipu_base + devtype->disp1_ofs);
815 	dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
816 			ipu_base + devtype->srm_ofs);
817 	dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
818 			ipu_base + devtype->tpm_ofs);
819 	dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
820 			ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
821 	dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
822 			ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
823 	dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
824 			ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
825 	dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
826 			ipu_base + devtype->vdi_ofs);
827 
828 	ipu->cm_reg = devm_ioremap(&pdev->dev,
829 			ipu_base + devtype->cm_ofs, PAGE_SIZE);
830 	ipu->idmac_reg = devm_ioremap(&pdev->dev,
831 			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
832 			PAGE_SIZE);
833 
834 	if (!ipu->cm_reg || !ipu->idmac_reg)
835 		return -ENOMEM;
836 
837 	ipu->clk = devm_clk_get(&pdev->dev, "bus");
838 	if (IS_ERR(ipu->clk)) {
839 		ret = PTR_ERR(ipu->clk);
840 		dev_err(&pdev->dev, "clk_get failed with %d", ret);
841 		return ret;
842 	}
843 
844 	platform_set_drvdata(pdev, ipu);
845 
846 	ret = clk_prepare_enable(ipu->clk);
847 	if (ret) {
848 		dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
849 		return ret;
850 	}
851 
852 	ipu->dev = &pdev->dev;
853 	ipu->irq_sync = irq_sync;
854 	ipu->irq_err = irq_err;
855 
856 	ret = ipu_irq_init(ipu);
857 	if (ret)
858 		goto out_failed_irq;
859 
860 	ret = device_reset(&pdev->dev);
861 	if (ret) {
862 		dev_err(&pdev->dev, "failed to reset: %d\n", ret);
863 		goto out_failed_reset;
864 	}
865 	ret = ipu_memory_reset(ipu);
866 	if (ret)
867 		goto out_failed_reset;
868 
869 	/* Set MCU_T to divide MCU access window into 2 */
870 	ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
871 			IPU_DISP_GEN);
872 
873 	ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
874 	if (ret)
875 		goto failed_submodules_init;
876 
877 	ret = ipu_add_client_devices(ipu, ipu_base);
878 	if (ret) {
879 		dev_err(&pdev->dev, "adding client devices failed with %d\n",
880 				ret);
881 		goto failed_add_clients;
882 	}
883 
884 	dev_info(&pdev->dev, "%s probed\n", devtype->name);
885 
886 	return 0;
887 
888 failed_add_clients:
889 	ipu_submodules_exit(ipu);
890 failed_submodules_init:
891 out_failed_reset:
892 	ipu_irq_exit(ipu);
893 out_failed_irq:
894 	clk_disable_unprepare(ipu->clk);
895 	return ret;
896 }
897 
898 static int ipu_remove(struct platform_device *pdev)
899 {
900 	struct ipu_soc *ipu = platform_get_drvdata(pdev);
901 
902 	platform_device_unregister_children(pdev);
903 	ipu_submodules_exit(ipu);
904 	ipu_irq_exit(ipu);
905 
906 	clk_disable_unprepare(ipu->clk);
907 
908 	return 0;
909 }
910 
911 static struct platform_driver imx_ipu_driver = {
912 	.driver = {
913 		.name = "imx-ipuv3",
914 		.of_match_table = imx_ipu_dt_ids,
915 	},
916 	.probe = ipu_probe,
917 	.remove = ipu_remove,
918 };
919 
920 module_platform_driver(imx_ipu_driver);
921 
922 MODULE_ALIAS("platform:imx-ipuv3");
923 MODULE_DESCRIPTION("i.MX IPU v3 driver");
924 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
925 MODULE_LICENSE("GPL");
926