xref: /openbmc/linux/drivers/gpu/ipu-v3/ipu-common.c (revision 275876e2)
1 /*
2  * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2 of the License, or (at your
8  * option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/reset.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/irqdomain.h>
30 #include <linux/of_device.h>
31 
32 #include <drm/drm_fourcc.h>
33 
34 #include <video/imx-ipu-v3.h>
35 #include "ipu-prv.h"
36 
37 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
38 {
39 	return readl(ipu->cm_reg + offset);
40 }
41 
42 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
43 {
44 	writel(value, ipu->cm_reg + offset);
45 }
46 
47 static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
48 {
49 	return readl(ipu->idmac_reg + offset);
50 }
51 
52 static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
53 		unsigned offset)
54 {
55 	writel(value, ipu->idmac_reg + offset);
56 }
57 
58 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
59 {
60 	u32 val;
61 
62 	val = ipu_cm_read(ipu, IPU_SRM_PRI2);
63 	val |= 0x8;
64 	ipu_cm_write(ipu, val, IPU_SRM_PRI2);
65 }
66 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
67 
68 struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel)
69 {
70 	struct ipu_soc *ipu = channel->ipu;
71 
72 	return ipu->cpmem_base + channel->num;
73 }
74 EXPORT_SYMBOL_GPL(ipu_get_cpmem);
75 
76 void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel)
77 {
78 	struct ipu_soc *ipu = channel->ipu;
79 	struct ipu_ch_param __iomem *p = ipu_get_cpmem(channel);
80 	u32 val;
81 
82 	if (ipu->ipu_type == IPUV3EX)
83 		ipu_ch_param_write_field(p, IPU_FIELD_ID, 1);
84 
85 	val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(channel->num));
86 	val |= 1 << (channel->num % 32);
87 	ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(channel->num));
88 };
89 EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
90 
91 void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v)
92 {
93 	u32 bit = (wbs >> 8) % 160;
94 	u32 size = wbs & 0xff;
95 	u32 word = (wbs >> 8) / 160;
96 	u32 i = bit / 32;
97 	u32 ofs = bit % 32;
98 	u32 mask = (1 << size) - 1;
99 	u32 val;
100 
101 	pr_debug("%s %d %d %d\n", __func__, word, bit , size);
102 
103 	val = readl(&base->word[word].data[i]);
104 	val &= ~(mask << ofs);
105 	val |= v << ofs;
106 	writel(val, &base->word[word].data[i]);
107 
108 	if ((bit + size - 1) / 32 > i) {
109 		val = readl(&base->word[word].data[i + 1]);
110 		val &= ~(mask >> (ofs ? (32 - ofs) : 0));
111 		val |= v >> (ofs ? (32 - ofs) : 0);
112 		writel(val, &base->word[word].data[i + 1]);
113 	}
114 }
115 EXPORT_SYMBOL_GPL(ipu_ch_param_write_field);
116 
117 u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs)
118 {
119 	u32 bit = (wbs >> 8) % 160;
120 	u32 size = wbs & 0xff;
121 	u32 word = (wbs >> 8) / 160;
122 	u32 i = bit / 32;
123 	u32 ofs = bit % 32;
124 	u32 mask = (1 << size) - 1;
125 	u32 val = 0;
126 
127 	pr_debug("%s %d %d %d\n", __func__, word, bit , size);
128 
129 	val = (readl(&base->word[word].data[i]) >> ofs) & mask;
130 
131 	if ((bit + size - 1) / 32 > i) {
132 		u32 tmp;
133 		tmp = readl(&base->word[word].data[i + 1]);
134 		tmp &= mask >> (ofs ? (32 - ofs) : 0);
135 		val |= tmp << (ofs ? (32 - ofs) : 0);
136 	}
137 
138 	return val;
139 }
140 EXPORT_SYMBOL_GPL(ipu_ch_param_read_field);
141 
142 int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *p,
143 		const struct ipu_rgb *rgb)
144 {
145 	int bpp = 0, npb = 0, ro, go, bo, to;
146 
147 	ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
148 	go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
149 	bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
150 	to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
151 
152 	ipu_ch_param_write_field(p, IPU_FIELD_WID0, rgb->red.length - 1);
153 	ipu_ch_param_write_field(p, IPU_FIELD_OFS0, ro);
154 	ipu_ch_param_write_field(p, IPU_FIELD_WID1, rgb->green.length - 1);
155 	ipu_ch_param_write_field(p, IPU_FIELD_OFS1, go);
156 	ipu_ch_param_write_field(p, IPU_FIELD_WID2, rgb->blue.length - 1);
157 	ipu_ch_param_write_field(p, IPU_FIELD_OFS2, bo);
158 
159 	if (rgb->transp.length) {
160 		ipu_ch_param_write_field(p, IPU_FIELD_WID3,
161 				rgb->transp.length - 1);
162 		ipu_ch_param_write_field(p, IPU_FIELD_OFS3, to);
163 	} else {
164 		ipu_ch_param_write_field(p, IPU_FIELD_WID3, 7);
165 		ipu_ch_param_write_field(p, IPU_FIELD_OFS3,
166 				rgb->bits_per_pixel);
167 	}
168 
169 	switch (rgb->bits_per_pixel) {
170 	case 32:
171 		bpp = 0;
172 		npb = 15;
173 		break;
174 	case 24:
175 		bpp = 1;
176 		npb = 19;
177 		break;
178 	case 16:
179 		bpp = 3;
180 		npb = 31;
181 		break;
182 	case 8:
183 		bpp = 5;
184 		npb = 63;
185 		break;
186 	default:
187 		return -EINVAL;
188 	}
189 	ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
190 	ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
191 	ipu_ch_param_write_field(p, IPU_FIELD_PFS, 7); /* rgb mode */
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
196 
197 int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
198 		int width)
199 {
200 	int bpp = 0, npb = 0;
201 
202 	switch (width) {
203 	case 32:
204 		bpp = 0;
205 		npb = 15;
206 		break;
207 	case 24:
208 		bpp = 1;
209 		npb = 19;
210 		break;
211 	case 16:
212 		bpp = 3;
213 		npb = 31;
214 		break;
215 	case 8:
216 		bpp = 5;
217 		npb = 63;
218 		break;
219 	default:
220 		return -EINVAL;
221 	}
222 
223 	ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
224 	ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
225 	ipu_ch_param_write_field(p, IPU_FIELD_PFS, 6); /* raw mode */
226 
227 	return 0;
228 }
229 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
230 
231 void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
232 				   u32 pixel_format)
233 {
234 	switch (pixel_format) {
235 	case V4L2_PIX_FMT_UYVY:
236 		ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3);    /* bits/pixel */
237 		ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0xA);  /* pix format */
238 		ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31);   /* burst size */
239 		break;
240 	case V4L2_PIX_FMT_YUYV:
241 		ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3);    /* bits/pixel */
242 		ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0x8);  /* pix format */
243 		ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31);   /* burst size */
244 		break;
245 	}
246 }
247 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
248 
249 void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
250 		u32 pixel_format, int stride, int u_offset, int v_offset)
251 {
252 	switch (pixel_format) {
253 	case V4L2_PIX_FMT_YUV420:
254 		ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
255 		ipu_ch_param_write_field(p, IPU_FIELD_UBO, u_offset / 8);
256 		ipu_ch_param_write_field(p, IPU_FIELD_VBO, v_offset / 8);
257 		break;
258 	case V4L2_PIX_FMT_YVU420:
259 		ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
260 		ipu_ch_param_write_field(p, IPU_FIELD_UBO, v_offset / 8);
261 		ipu_ch_param_write_field(p, IPU_FIELD_VBO, u_offset / 8);
262 		break;
263 	}
264 }
265 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
266 
267 void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
268 		int stride, int height)
269 {
270 	int u_offset, v_offset;
271 	int uv_stride = 0;
272 
273 	switch (pixel_format) {
274 	case V4L2_PIX_FMT_YUV420:
275 	case V4L2_PIX_FMT_YVU420:
276 		uv_stride = stride / 2;
277 		u_offset = stride * height;
278 		v_offset = u_offset + (uv_stride * height / 2);
279 		ipu_cpmem_set_yuv_planar_full(p, pixel_format, stride,
280 				u_offset, v_offset);
281 		break;
282 	}
283 }
284 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
285 
286 static const struct ipu_rgb def_rgb_32 = {
287 	.red	= { .offset = 16, .length = 8, },
288 	.green	= { .offset =  8, .length = 8, },
289 	.blue	= { .offset =  0, .length = 8, },
290 	.transp = { .offset = 24, .length = 8, },
291 	.bits_per_pixel = 32,
292 };
293 
294 static const struct ipu_rgb def_bgr_32 = {
295 	.red	= { .offset =  0, .length = 8, },
296 	.green	= { .offset =  8, .length = 8, },
297 	.blue	= { .offset = 16, .length = 8, },
298 	.transp = { .offset = 24, .length = 8, },
299 	.bits_per_pixel = 32,
300 };
301 
302 static const struct ipu_rgb def_rgb_24 = {
303 	.red	= { .offset = 16, .length = 8, },
304 	.green	= { .offset =  8, .length = 8, },
305 	.blue	= { .offset =  0, .length = 8, },
306 	.transp = { .offset =  0, .length = 0, },
307 	.bits_per_pixel = 24,
308 };
309 
310 static const struct ipu_rgb def_bgr_24 = {
311 	.red	= { .offset =  0, .length = 8, },
312 	.green	= { .offset =  8, .length = 8, },
313 	.blue	= { .offset = 16, .length = 8, },
314 	.transp = { .offset =  0, .length = 0, },
315 	.bits_per_pixel = 24,
316 };
317 
318 static const struct ipu_rgb def_rgb_16 = {
319 	.red	= { .offset = 11, .length = 5, },
320 	.green	= { .offset =  5, .length = 6, },
321 	.blue	= { .offset =  0, .length = 5, },
322 	.transp = { .offset =  0, .length = 0, },
323 	.bits_per_pixel = 16,
324 };
325 
326 static const struct ipu_rgb def_bgr_16 = {
327 	.red	= { .offset =  0, .length = 5, },
328 	.green	= { .offset =  5, .length = 6, },
329 	.blue	= { .offset = 11, .length = 5, },
330 	.transp = { .offset =  0, .length = 0, },
331 	.bits_per_pixel = 16,
332 };
333 
334 #define Y_OFFSET(pix, x, y)	((x) + pix->width * (y))
335 #define U_OFFSET(pix, x, y)	((pix->width * pix->height) + \
336 					(pix->width * (y) / 4) + (x) / 2)
337 #define V_OFFSET(pix, x, y)	((pix->width * pix->height) + \
338 					(pix->width * pix->height / 4) + \
339 					(pix->width * (y) / 4) + (x) / 2)
340 
341 int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 drm_fourcc)
342 {
343 	switch (drm_fourcc) {
344 	case DRM_FORMAT_YUV420:
345 	case DRM_FORMAT_YVU420:
346 		/* pix format */
347 		ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 2);
348 		/* burst size */
349 		ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 63);
350 		break;
351 	case DRM_FORMAT_UYVY:
352 		/* bits/pixel */
353 		ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
354 		/* pix format */
355 		ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0xA);
356 		/* burst size */
357 		ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
358 		break;
359 	case DRM_FORMAT_YUYV:
360 		/* bits/pixel */
361 		ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
362 		/* pix format */
363 		ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0x8);
364 		/* burst size */
365 		ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
366 		break;
367 	case DRM_FORMAT_ABGR8888:
368 	case DRM_FORMAT_XBGR8888:
369 		ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
370 		break;
371 	case DRM_FORMAT_ARGB8888:
372 	case DRM_FORMAT_XRGB8888:
373 		ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
374 		break;
375 	case DRM_FORMAT_BGR888:
376 		ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
377 		break;
378 	case DRM_FORMAT_RGB888:
379 		ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
380 		break;
381 	case DRM_FORMAT_RGB565:
382 		ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
383 		break;
384 	case DRM_FORMAT_BGR565:
385 		ipu_cpmem_set_format_rgb(cpmem, &def_bgr_16);
386 		break;
387 	default:
388 		return -EINVAL;
389 	}
390 
391 	return 0;
392 }
393 EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
394 
395 /*
396  * The V4L2 spec defines packed RGB formats in memory byte order, which from
397  * point of view of the IPU corresponds to little-endian words with the first
398  * component in the least significant bits.
399  * The DRM pixel formats and IPU internal representation are ordered the other
400  * way around, with the first named component ordered at the most significant
401  * bits. Further, V4L2 formats are not well defined:
402  *     http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
403  * We choose the interpretation which matches GStreamer behavior.
404  */
405 static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
406 {
407 	switch (pixelformat) {
408 	case V4L2_PIX_FMT_RGB565:
409 		/*
410 		 * Here we choose the 'corrected' interpretation of RGBP, a
411 		 * little-endian 16-bit word with the red component at the most
412 		 * significant bits:
413 		 * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
414 		 */
415 		return DRM_FORMAT_RGB565;
416 	case V4L2_PIX_FMT_BGR24:
417 		/* B G R <=> [24:0] R:G:B */
418 		return DRM_FORMAT_RGB888;
419 	case V4L2_PIX_FMT_RGB24:
420 		/* R G B <=> [24:0] B:G:R */
421 		return DRM_FORMAT_BGR888;
422 	case V4L2_PIX_FMT_BGR32:
423 		/* B G R A <=> [32:0] A:B:G:R */
424 		return DRM_FORMAT_XRGB8888;
425 	case V4L2_PIX_FMT_RGB32:
426 		/* R G B A <=> [32:0] A:B:G:R */
427 		return DRM_FORMAT_XBGR8888;
428 	case V4L2_PIX_FMT_UYVY:
429 		return DRM_FORMAT_UYVY;
430 	case V4L2_PIX_FMT_YUYV:
431 		return DRM_FORMAT_YUYV;
432 	case V4L2_PIX_FMT_YUV420:
433 		return DRM_FORMAT_YUV420;
434 	case V4L2_PIX_FMT_YVU420:
435 		return DRM_FORMAT_YVU420;
436 	}
437 
438 	return -EINVAL;
439 }
440 
441 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
442 {
443 	switch (drm_fourcc) {
444 	case DRM_FORMAT_RGB565:
445 	case DRM_FORMAT_BGR565:
446 	case DRM_FORMAT_RGB888:
447 	case DRM_FORMAT_BGR888:
448 	case DRM_FORMAT_XRGB8888:
449 	case DRM_FORMAT_XBGR8888:
450 	case DRM_FORMAT_RGBX8888:
451 	case DRM_FORMAT_BGRX8888:
452 	case DRM_FORMAT_ARGB8888:
453 	case DRM_FORMAT_ABGR8888:
454 	case DRM_FORMAT_RGBA8888:
455 	case DRM_FORMAT_BGRA8888:
456 		return IPUV3_COLORSPACE_RGB;
457 	case DRM_FORMAT_YUYV:
458 	case DRM_FORMAT_UYVY:
459 	case DRM_FORMAT_YUV420:
460 	case DRM_FORMAT_YVU420:
461 		return IPUV3_COLORSPACE_YUV;
462 	default:
463 		return IPUV3_COLORSPACE_UNKNOWN;
464 	}
465 }
466 EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
467 
468 int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
469 		struct ipu_image *image)
470 {
471 	struct v4l2_pix_format *pix = &image->pix;
472 	int y_offset, u_offset, v_offset;
473 
474 	pr_debug("%s: resolution: %dx%d stride: %d\n",
475 			__func__, pix->width, pix->height,
476 			pix->bytesperline);
477 
478 	ipu_cpmem_set_resolution(cpmem, image->rect.width,
479 			image->rect.height);
480 	ipu_cpmem_set_stride(cpmem, pix->bytesperline);
481 
482 	ipu_cpmem_set_fmt(cpmem, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
483 
484 	switch (pix->pixelformat) {
485 	case V4L2_PIX_FMT_YUV420:
486 	case V4L2_PIX_FMT_YVU420:
487 		y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
488 		u_offset = U_OFFSET(pix, image->rect.left,
489 				image->rect.top) - y_offset;
490 		v_offset = V_OFFSET(pix, image->rect.left,
491 				image->rect.top) - y_offset;
492 
493 		ipu_cpmem_set_yuv_planar_full(cpmem, pix->pixelformat,
494 				pix->bytesperline, u_offset, v_offset);
495 		ipu_cpmem_set_buffer(cpmem, 0, image->phys + y_offset);
496 		break;
497 	case V4L2_PIX_FMT_UYVY:
498 	case V4L2_PIX_FMT_YUYV:
499 		ipu_cpmem_set_buffer(cpmem, 0, image->phys +
500 				image->rect.left * 2 +
501 				image->rect.top * image->pix.bytesperline);
502 		break;
503 	case V4L2_PIX_FMT_RGB32:
504 	case V4L2_PIX_FMT_BGR32:
505 		ipu_cpmem_set_buffer(cpmem, 0, image->phys +
506 				image->rect.left * 4 +
507 				image->rect.top * image->pix.bytesperline);
508 		break;
509 	case V4L2_PIX_FMT_RGB565:
510 		ipu_cpmem_set_buffer(cpmem, 0, image->phys +
511 				image->rect.left * 2 +
512 				image->rect.top * image->pix.bytesperline);
513 		break;
514 	case V4L2_PIX_FMT_RGB24:
515 	case V4L2_PIX_FMT_BGR24:
516 		ipu_cpmem_set_buffer(cpmem, 0, image->phys +
517 				image->rect.left * 3 +
518 				image->rect.top * image->pix.bytesperline);
519 		break;
520 	default:
521 		return -EINVAL;
522 	}
523 
524 	return 0;
525 }
526 EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
527 
528 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
529 {
530 	switch (pixelformat) {
531 	case V4L2_PIX_FMT_YUV420:
532 	case V4L2_PIX_FMT_YVU420:
533 	case V4L2_PIX_FMT_UYVY:
534 	case V4L2_PIX_FMT_YUYV:
535 		return IPUV3_COLORSPACE_YUV;
536 	case V4L2_PIX_FMT_RGB32:
537 	case V4L2_PIX_FMT_BGR32:
538 	case V4L2_PIX_FMT_RGB24:
539 	case V4L2_PIX_FMT_BGR24:
540 	case V4L2_PIX_FMT_RGB565:
541 		return IPUV3_COLORSPACE_RGB;
542 	default:
543 		return IPUV3_COLORSPACE_UNKNOWN;
544 	}
545 }
546 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
547 
548 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
549 {
550 	struct ipuv3_channel *channel;
551 
552 	dev_dbg(ipu->dev, "%s %d\n", __func__, num);
553 
554 	if (num > 63)
555 		return ERR_PTR(-ENODEV);
556 
557 	mutex_lock(&ipu->channel_lock);
558 
559 	channel = &ipu->channel[num];
560 
561 	if (channel->busy) {
562 		channel = ERR_PTR(-EBUSY);
563 		goto out;
564 	}
565 
566 	channel->busy = true;
567 	channel->num = num;
568 
569 out:
570 	mutex_unlock(&ipu->channel_lock);
571 
572 	return channel;
573 }
574 EXPORT_SYMBOL_GPL(ipu_idmac_get);
575 
576 void ipu_idmac_put(struct ipuv3_channel *channel)
577 {
578 	struct ipu_soc *ipu = channel->ipu;
579 
580 	dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
581 
582 	mutex_lock(&ipu->channel_lock);
583 
584 	channel->busy = false;
585 
586 	mutex_unlock(&ipu->channel_lock);
587 }
588 EXPORT_SYMBOL_GPL(ipu_idmac_put);
589 
590 #define idma_mask(ch)			(1 << (ch & 0x1f))
591 
592 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
593 		bool doublebuffer)
594 {
595 	struct ipu_soc *ipu = channel->ipu;
596 	unsigned long flags;
597 	u32 reg;
598 
599 	spin_lock_irqsave(&ipu->lock, flags);
600 
601 	reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
602 	if (doublebuffer)
603 		reg |= idma_mask(channel->num);
604 	else
605 		reg &= ~idma_mask(channel->num);
606 	ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
607 
608 	spin_unlock_irqrestore(&ipu->lock, flags);
609 }
610 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
611 
612 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
613 {
614 	unsigned long lock_flags;
615 	u32 val;
616 
617 	spin_lock_irqsave(&ipu->lock, lock_flags);
618 
619 	val = ipu_cm_read(ipu, IPU_DISP_GEN);
620 
621 	if (mask & IPU_CONF_DI0_EN)
622 		val |= IPU_DI0_COUNTER_RELEASE;
623 	if (mask & IPU_CONF_DI1_EN)
624 		val |= IPU_DI1_COUNTER_RELEASE;
625 
626 	ipu_cm_write(ipu, val, IPU_DISP_GEN);
627 
628 	val = ipu_cm_read(ipu, IPU_CONF);
629 	val |= mask;
630 	ipu_cm_write(ipu, val, IPU_CONF);
631 
632 	spin_unlock_irqrestore(&ipu->lock, lock_flags);
633 
634 	return 0;
635 }
636 EXPORT_SYMBOL_GPL(ipu_module_enable);
637 
638 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
639 {
640 	unsigned long lock_flags;
641 	u32 val;
642 
643 	spin_lock_irqsave(&ipu->lock, lock_flags);
644 
645 	val = ipu_cm_read(ipu, IPU_CONF);
646 	val &= ~mask;
647 	ipu_cm_write(ipu, val, IPU_CONF);
648 
649 	val = ipu_cm_read(ipu, IPU_DISP_GEN);
650 
651 	if (mask & IPU_CONF_DI0_EN)
652 		val &= ~IPU_DI0_COUNTER_RELEASE;
653 	if (mask & IPU_CONF_DI1_EN)
654 		val &= ~IPU_DI1_COUNTER_RELEASE;
655 
656 	ipu_cm_write(ipu, val, IPU_DISP_GEN);
657 
658 	spin_unlock_irqrestore(&ipu->lock, lock_flags);
659 
660 	return 0;
661 }
662 EXPORT_SYMBOL_GPL(ipu_module_disable);
663 
664 int ipu_csi_enable(struct ipu_soc *ipu, int csi)
665 {
666 	return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
667 }
668 EXPORT_SYMBOL_GPL(ipu_csi_enable);
669 
670 int ipu_csi_disable(struct ipu_soc *ipu, int csi)
671 {
672 	return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
673 }
674 EXPORT_SYMBOL_GPL(ipu_csi_disable);
675 
676 int ipu_smfc_enable(struct ipu_soc *ipu)
677 {
678 	return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
679 }
680 EXPORT_SYMBOL_GPL(ipu_smfc_enable);
681 
682 int ipu_smfc_disable(struct ipu_soc *ipu)
683 {
684 	return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
685 }
686 EXPORT_SYMBOL_GPL(ipu_smfc_disable);
687 
688 int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
689 {
690 	struct ipu_soc *ipu = channel->ipu;
691 	unsigned int chno = channel->num;
692 
693 	return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
694 }
695 EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
696 
697 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
698 {
699 	struct ipu_soc *ipu = channel->ipu;
700 	unsigned int chno = channel->num;
701 	unsigned long flags;
702 
703 	spin_lock_irqsave(&ipu->lock, flags);
704 
705 	/* Mark buffer as ready. */
706 	if (buf_num == 0)
707 		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
708 	else
709 		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
710 
711 	spin_unlock_irqrestore(&ipu->lock, flags);
712 }
713 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
714 
715 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
716 {
717 	struct ipu_soc *ipu = channel->ipu;
718 	u32 val;
719 	unsigned long flags;
720 
721 	spin_lock_irqsave(&ipu->lock, flags);
722 
723 	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
724 	val |= idma_mask(channel->num);
725 	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
726 
727 	spin_unlock_irqrestore(&ipu->lock, flags);
728 
729 	return 0;
730 }
731 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
732 
733 bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
734 {
735 	return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
736 }
737 EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
738 
739 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
740 {
741 	struct ipu_soc *ipu = channel->ipu;
742 	unsigned long timeout;
743 
744 	timeout = jiffies + msecs_to_jiffies(ms);
745 	while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
746 			idma_mask(channel->num)) {
747 		if (time_after(jiffies, timeout))
748 			return -ETIMEDOUT;
749 		cpu_relax();
750 	}
751 
752 	return 0;
753 }
754 EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
755 
756 int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
757 {
758 	unsigned long timeout;
759 
760 	timeout = jiffies + msecs_to_jiffies(ms);
761 	ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
762 	while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
763 		if (time_after(jiffies, timeout))
764 			return -ETIMEDOUT;
765 		cpu_relax();
766 	}
767 
768 	return 0;
769 }
770 EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
771 
772 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
773 {
774 	struct ipu_soc *ipu = channel->ipu;
775 	u32 val;
776 	unsigned long flags;
777 
778 	spin_lock_irqsave(&ipu->lock, flags);
779 
780 	/* Disable DMA channel(s) */
781 	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
782 	val &= ~idma_mask(channel->num);
783 	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
784 
785 	/* Set channel buffers NOT to be ready */
786 	ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
787 
788 	if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
789 			idma_mask(channel->num)) {
790 		ipu_cm_write(ipu, idma_mask(channel->num),
791 			     IPU_CHA_BUF0_RDY(channel->num));
792 	}
793 
794 	if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
795 			idma_mask(channel->num)) {
796 		ipu_cm_write(ipu, idma_mask(channel->num),
797 			     IPU_CHA_BUF1_RDY(channel->num));
798 	}
799 
800 	ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
801 
802 	/* Reset the double buffer */
803 	val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
804 	val &= ~idma_mask(channel->num);
805 	ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
806 
807 	spin_unlock_irqrestore(&ipu->lock, flags);
808 
809 	return 0;
810 }
811 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
812 
813 static int ipu_memory_reset(struct ipu_soc *ipu)
814 {
815 	unsigned long timeout;
816 
817 	ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
818 
819 	timeout = jiffies + msecs_to_jiffies(1000);
820 	while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
821 		if (time_after(jiffies, timeout))
822 			return -ETIME;
823 		cpu_relax();
824 	}
825 
826 	return 0;
827 }
828 
829 struct ipu_devtype {
830 	const char *name;
831 	unsigned long cm_ofs;
832 	unsigned long cpmem_ofs;
833 	unsigned long srm_ofs;
834 	unsigned long tpm_ofs;
835 	unsigned long disp0_ofs;
836 	unsigned long disp1_ofs;
837 	unsigned long dc_tmpl_ofs;
838 	unsigned long vdi_ofs;
839 	enum ipuv3_type type;
840 };
841 
842 static struct ipu_devtype ipu_type_imx51 = {
843 	.name = "IPUv3EX",
844 	.cm_ofs = 0x1e000000,
845 	.cpmem_ofs = 0x1f000000,
846 	.srm_ofs = 0x1f040000,
847 	.tpm_ofs = 0x1f060000,
848 	.disp0_ofs = 0x1e040000,
849 	.disp1_ofs = 0x1e048000,
850 	.dc_tmpl_ofs = 0x1f080000,
851 	.vdi_ofs = 0x1e068000,
852 	.type = IPUV3EX,
853 };
854 
855 static struct ipu_devtype ipu_type_imx53 = {
856 	.name = "IPUv3M",
857 	.cm_ofs = 0x06000000,
858 	.cpmem_ofs = 0x07000000,
859 	.srm_ofs = 0x07040000,
860 	.tpm_ofs = 0x07060000,
861 	.disp0_ofs = 0x06040000,
862 	.disp1_ofs = 0x06048000,
863 	.dc_tmpl_ofs = 0x07080000,
864 	.vdi_ofs = 0x06068000,
865 	.type = IPUV3M,
866 };
867 
868 static struct ipu_devtype ipu_type_imx6q = {
869 	.name = "IPUv3H",
870 	.cm_ofs = 0x00200000,
871 	.cpmem_ofs = 0x00300000,
872 	.srm_ofs = 0x00340000,
873 	.tpm_ofs = 0x00360000,
874 	.disp0_ofs = 0x00240000,
875 	.disp1_ofs = 0x00248000,
876 	.dc_tmpl_ofs = 0x00380000,
877 	.vdi_ofs = 0x00268000,
878 	.type = IPUV3H,
879 };
880 
881 static const struct of_device_id imx_ipu_dt_ids[] = {
882 	{ .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
883 	{ .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
884 	{ .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
885 	{ /* sentinel */ }
886 };
887 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
888 
889 static int ipu_submodules_init(struct ipu_soc *ipu,
890 		struct platform_device *pdev, unsigned long ipu_base,
891 		struct clk *ipu_clk)
892 {
893 	char *unit;
894 	int ret;
895 	struct device *dev = &pdev->dev;
896 	const struct ipu_devtype *devtype = ipu->devtype;
897 
898 	ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
899 			IPU_CONF_DI0_EN, ipu_clk);
900 	if (ret) {
901 		unit = "di0";
902 		goto err_di_0;
903 	}
904 
905 	ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
906 			IPU_CONF_DI1_EN, ipu_clk);
907 	if (ret) {
908 		unit = "di1";
909 		goto err_di_1;
910 	}
911 
912 	ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
913 			IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
914 	if (ret) {
915 		unit = "dc_template";
916 		goto err_dc;
917 	}
918 
919 	ret = ipu_dmfc_init(ipu, dev, ipu_base +
920 			devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
921 	if (ret) {
922 		unit = "dmfc";
923 		goto err_dmfc;
924 	}
925 
926 	ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
927 	if (ret) {
928 		unit = "dp";
929 		goto err_dp;
930 	}
931 
932 	ret = ipu_smfc_init(ipu, dev, ipu_base +
933 			devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
934 	if (ret) {
935 		unit = "smfc";
936 		goto err_smfc;
937 	}
938 
939 	return 0;
940 
941 err_smfc:
942 	ipu_dp_exit(ipu);
943 err_dp:
944 	ipu_dmfc_exit(ipu);
945 err_dmfc:
946 	ipu_dc_exit(ipu);
947 err_dc:
948 	ipu_di_exit(ipu, 1);
949 err_di_1:
950 	ipu_di_exit(ipu, 0);
951 err_di_0:
952 	dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
953 	return ret;
954 }
955 
956 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
957 {
958 	unsigned long status;
959 	int i, bit, irq;
960 
961 	for (i = 0; i < num_regs; i++) {
962 
963 		status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
964 		status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
965 
966 		for_each_set_bit(bit, &status, 32) {
967 			irq = irq_linear_revmap(ipu->domain,
968 						regs[i] * 32 + bit);
969 			if (irq)
970 				generic_handle_irq(irq);
971 		}
972 	}
973 }
974 
975 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
976 {
977 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
978 	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
979 	struct irq_chip *chip = irq_get_chip(irq);
980 
981 	chained_irq_enter(chip, desc);
982 
983 	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
984 
985 	chained_irq_exit(chip, desc);
986 }
987 
988 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
989 {
990 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
991 	const int int_reg[] = { 4, 5, 8, 9};
992 	struct irq_chip *chip = irq_get_chip(irq);
993 
994 	chained_irq_enter(chip, desc);
995 
996 	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
997 
998 	chained_irq_exit(chip, desc);
999 }
1000 
1001 int ipu_map_irq(struct ipu_soc *ipu, int irq)
1002 {
1003 	int virq;
1004 
1005 	virq = irq_linear_revmap(ipu->domain, irq);
1006 	if (!virq)
1007 		virq = irq_create_mapping(ipu->domain, irq);
1008 
1009 	return virq;
1010 }
1011 EXPORT_SYMBOL_GPL(ipu_map_irq);
1012 
1013 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
1014 		enum ipu_channel_irq irq_type)
1015 {
1016 	return ipu_map_irq(ipu, irq_type + channel->num);
1017 }
1018 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
1019 
1020 static void ipu_submodules_exit(struct ipu_soc *ipu)
1021 {
1022 	ipu_smfc_exit(ipu);
1023 	ipu_dp_exit(ipu);
1024 	ipu_dmfc_exit(ipu);
1025 	ipu_dc_exit(ipu);
1026 	ipu_di_exit(ipu, 1);
1027 	ipu_di_exit(ipu, 0);
1028 }
1029 
1030 static int platform_remove_devices_fn(struct device *dev, void *unused)
1031 {
1032 	struct platform_device *pdev = to_platform_device(dev);
1033 
1034 	platform_device_unregister(pdev);
1035 
1036 	return 0;
1037 }
1038 
1039 static void platform_device_unregister_children(struct platform_device *pdev)
1040 {
1041 	device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
1042 }
1043 
1044 struct ipu_platform_reg {
1045 	struct ipu_client_platformdata pdata;
1046 	const char *name;
1047 	int reg_offset;
1048 };
1049 
1050 static const struct ipu_platform_reg client_reg[] = {
1051 	{
1052 		.pdata = {
1053 			.di = 0,
1054 			.dc = 5,
1055 			.dp = IPU_DP_FLOW_SYNC_BG,
1056 			.dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
1057 			.dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
1058 		},
1059 		.name = "imx-ipuv3-crtc",
1060 	}, {
1061 		.pdata = {
1062 			.di = 1,
1063 			.dc = 1,
1064 			.dp = -EINVAL,
1065 			.dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
1066 			.dma[1] = -EINVAL,
1067 		},
1068 		.name = "imx-ipuv3-crtc",
1069 	}, {
1070 		.pdata = {
1071 			.csi = 0,
1072 			.dma[0] = IPUV3_CHANNEL_CSI0,
1073 			.dma[1] = -EINVAL,
1074 		},
1075 		.reg_offset = IPU_CM_CSI0_REG_OFS,
1076 		.name = "imx-ipuv3-camera",
1077 	}, {
1078 		.pdata = {
1079 			.csi = 1,
1080 			.dma[0] = IPUV3_CHANNEL_CSI1,
1081 			.dma[1] = -EINVAL,
1082 		},
1083 		.reg_offset = IPU_CM_CSI1_REG_OFS,
1084 		.name = "imx-ipuv3-camera",
1085 	},
1086 };
1087 
1088 static DEFINE_MUTEX(ipu_client_id_mutex);
1089 static int ipu_client_id;
1090 
1091 static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1092 {
1093 	struct device *dev = ipu->dev;
1094 	unsigned i;
1095 	int id, ret;
1096 
1097 	mutex_lock(&ipu_client_id_mutex);
1098 	id = ipu_client_id;
1099 	ipu_client_id += ARRAY_SIZE(client_reg);
1100 	mutex_unlock(&ipu_client_id_mutex);
1101 
1102 	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1103 		const struct ipu_platform_reg *reg = &client_reg[i];
1104 		struct platform_device *pdev;
1105 		struct resource res;
1106 
1107 		if (reg->reg_offset) {
1108 			memset(&res, 0, sizeof(res));
1109 			res.flags = IORESOURCE_MEM;
1110 			res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
1111 			res.end = res.start + PAGE_SIZE - 1;
1112 			pdev = platform_device_register_resndata(dev, reg->name,
1113 				id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
1114 		} else {
1115 			pdev = platform_device_register_data(dev, reg->name,
1116 				id++, &reg->pdata, sizeof(reg->pdata));
1117 		}
1118 
1119 		if (IS_ERR(pdev))
1120 			goto err_register;
1121 	}
1122 
1123 	return 0;
1124 
1125 err_register:
1126 	platform_device_unregister_children(to_platform_device(dev));
1127 
1128 	return ret;
1129 }
1130 
1131 
1132 static int ipu_irq_init(struct ipu_soc *ipu)
1133 {
1134 	struct irq_chip_generic *gc;
1135 	struct irq_chip_type *ct;
1136 	unsigned long unused[IPU_NUM_IRQS / 32] = {
1137 		0x400100d0, 0xffe000fd,
1138 		0x400100d0, 0xffe000fd,
1139 		0x400100d0, 0xffe000fd,
1140 		0x4077ffff, 0xffe7e1fd,
1141 		0x23fffffe, 0x8880fff0,
1142 		0xf98fe7d0, 0xfff81fff,
1143 		0x400100d0, 0xffe000fd,
1144 		0x00000000,
1145 	};
1146 	int ret, i;
1147 
1148 	ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
1149 					    &irq_generic_chip_ops, ipu);
1150 	if (!ipu->domain) {
1151 		dev_err(ipu->dev, "failed to add irq domain\n");
1152 		return -ENODEV;
1153 	}
1154 
1155 	ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
1156 					     handle_level_irq, 0,
1157 					     IRQF_VALID, 0);
1158 	if (ret < 0) {
1159 		dev_err(ipu->dev, "failed to alloc generic irq chips\n");
1160 		irq_domain_remove(ipu->domain);
1161 		return ret;
1162 	}
1163 
1164 	for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1165 		gc = irq_get_domain_generic_chip(ipu->domain, i);
1166 		gc->reg_base = ipu->cm_reg;
1167 		gc->unused = unused[i / 32];
1168 		ct = gc->chip_types;
1169 		ct->chip.irq_ack = irq_gc_ack_set_bit;
1170 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
1171 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
1172 		ct->regs.ack = IPU_INT_STAT(i / 32);
1173 		ct->regs.mask = IPU_INT_CTRL(i / 32);
1174 	}
1175 
1176 	irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
1177 	irq_set_handler_data(ipu->irq_sync, ipu);
1178 	irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
1179 	irq_set_handler_data(ipu->irq_err, ipu);
1180 
1181 	return 0;
1182 }
1183 
1184 static void ipu_irq_exit(struct ipu_soc *ipu)
1185 {
1186 	int i, irq;
1187 
1188 	irq_set_chained_handler(ipu->irq_err, NULL);
1189 	irq_set_handler_data(ipu->irq_err, NULL);
1190 	irq_set_chained_handler(ipu->irq_sync, NULL);
1191 	irq_set_handler_data(ipu->irq_sync, NULL);
1192 
1193 	/* TODO: remove irq_domain_generic_chips */
1194 
1195 	for (i = 0; i < IPU_NUM_IRQS; i++) {
1196 		irq = irq_linear_revmap(ipu->domain, i);
1197 		if (irq)
1198 			irq_dispose_mapping(irq);
1199 	}
1200 
1201 	irq_domain_remove(ipu->domain);
1202 }
1203 
1204 static int ipu_probe(struct platform_device *pdev)
1205 {
1206 	const struct of_device_id *of_id =
1207 			of_match_device(imx_ipu_dt_ids, &pdev->dev);
1208 	struct ipu_soc *ipu;
1209 	struct resource *res;
1210 	unsigned long ipu_base;
1211 	int i, ret, irq_sync, irq_err;
1212 	const struct ipu_devtype *devtype;
1213 
1214 	devtype = of_id->data;
1215 
1216 	irq_sync = platform_get_irq(pdev, 0);
1217 	irq_err = platform_get_irq(pdev, 1);
1218 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219 
1220 	dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
1221 			irq_sync, irq_err);
1222 
1223 	if (!res || irq_sync < 0 || irq_err < 0)
1224 		return -ENODEV;
1225 
1226 	ipu_base = res->start;
1227 
1228 	ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1229 	if (!ipu)
1230 		return -ENODEV;
1231 
1232 	for (i = 0; i < 64; i++)
1233 		ipu->channel[i].ipu = ipu;
1234 	ipu->devtype = devtype;
1235 	ipu->ipu_type = devtype->type;
1236 
1237 	spin_lock_init(&ipu->lock);
1238 	mutex_init(&ipu->channel_lock);
1239 
1240 	dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
1241 			ipu_base + devtype->cm_ofs);
1242 	dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
1243 			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
1244 	dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
1245 			ipu_base + devtype->cpmem_ofs);
1246 	dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
1247 			ipu_base + devtype->disp0_ofs);
1248 	dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
1249 			ipu_base + devtype->disp1_ofs);
1250 	dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
1251 			ipu_base + devtype->srm_ofs);
1252 	dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
1253 			ipu_base + devtype->tpm_ofs);
1254 	dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
1255 			ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
1256 	dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
1257 			ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
1258 	dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
1259 			ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
1260 	dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
1261 			ipu_base + devtype->vdi_ofs);
1262 
1263 	ipu->cm_reg = devm_ioremap(&pdev->dev,
1264 			ipu_base + devtype->cm_ofs, PAGE_SIZE);
1265 	ipu->idmac_reg = devm_ioremap(&pdev->dev,
1266 			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1267 			PAGE_SIZE);
1268 	ipu->cpmem_base = devm_ioremap(&pdev->dev,
1269 			ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
1270 
1271 	if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base)
1272 		return -ENOMEM;
1273 
1274 	ipu->clk = devm_clk_get(&pdev->dev, "bus");
1275 	if (IS_ERR(ipu->clk)) {
1276 		ret = PTR_ERR(ipu->clk);
1277 		dev_err(&pdev->dev, "clk_get failed with %d", ret);
1278 		return ret;
1279 	}
1280 
1281 	platform_set_drvdata(pdev, ipu);
1282 
1283 	ret = clk_prepare_enable(ipu->clk);
1284 	if (ret) {
1285 		dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1286 		return ret;
1287 	}
1288 
1289 	ipu->dev = &pdev->dev;
1290 	ipu->irq_sync = irq_sync;
1291 	ipu->irq_err = irq_err;
1292 
1293 	ret = ipu_irq_init(ipu);
1294 	if (ret)
1295 		goto out_failed_irq;
1296 
1297 	ret = device_reset(&pdev->dev);
1298 	if (ret) {
1299 		dev_err(&pdev->dev, "failed to reset: %d\n", ret);
1300 		goto out_failed_reset;
1301 	}
1302 	ret = ipu_memory_reset(ipu);
1303 	if (ret)
1304 		goto out_failed_reset;
1305 
1306 	/* Set MCU_T to divide MCU access window into 2 */
1307 	ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1308 			IPU_DISP_GEN);
1309 
1310 	ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1311 	if (ret)
1312 		goto failed_submodules_init;
1313 
1314 	ret = ipu_add_client_devices(ipu, ipu_base);
1315 	if (ret) {
1316 		dev_err(&pdev->dev, "adding client devices failed with %d\n",
1317 				ret);
1318 		goto failed_add_clients;
1319 	}
1320 
1321 	dev_info(&pdev->dev, "%s probed\n", devtype->name);
1322 
1323 	return 0;
1324 
1325 failed_add_clients:
1326 	ipu_submodules_exit(ipu);
1327 failed_submodules_init:
1328 out_failed_reset:
1329 	ipu_irq_exit(ipu);
1330 out_failed_irq:
1331 	clk_disable_unprepare(ipu->clk);
1332 	return ret;
1333 }
1334 
1335 static int ipu_remove(struct platform_device *pdev)
1336 {
1337 	struct ipu_soc *ipu = platform_get_drvdata(pdev);
1338 
1339 	platform_device_unregister_children(pdev);
1340 	ipu_submodules_exit(ipu);
1341 	ipu_irq_exit(ipu);
1342 
1343 	clk_disable_unprepare(ipu->clk);
1344 
1345 	return 0;
1346 }
1347 
1348 static struct platform_driver imx_ipu_driver = {
1349 	.driver = {
1350 		.name = "imx-ipuv3",
1351 		.of_match_table = imx_ipu_dt_ids,
1352 	},
1353 	.probe = ipu_probe,
1354 	.remove = ipu_remove,
1355 };
1356 
1357 module_platform_driver(imx_ipu_driver);
1358 
1359 MODULE_ALIAS("platform:imx-ipuv3");
1360 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1361 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1362 MODULE_LICENSE("GPL");
1363