xref: /openbmc/linux/drivers/media/platform/ti/cal/cal.c (revision 5e0266f0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI Camera Access Layer (CAL) - Driver
4  *
5  * Copyright (c) 2015-2020 Texas Instruments Inc.
6  *
7  * Authors:
8  *	Benoit Parrot <bparrot@ti.com>
9  *	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/interrupt.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 #include <linux/videodev2.h>
22 
23 #include <media/media-device.h>
24 #include <media/v4l2-async.h>
25 #include <media/v4l2-common.h>
26 #include <media/v4l2-device.h>
27 #include <media/videobuf2-core.h>
28 #include <media/videobuf2-dma-contig.h>
29 
30 #include "cal.h"
31 #include "cal_regs.h"
32 
33 MODULE_DESCRIPTION("TI CAL driver");
34 MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
35 MODULE_LICENSE("GPL v2");
36 MODULE_VERSION("0.1.0");
37 
38 int cal_video_nr = -1;
39 module_param_named(video_nr, cal_video_nr, uint, 0644);
40 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
41 
42 unsigned int cal_debug;
43 module_param_named(debug, cal_debug, uint, 0644);
44 MODULE_PARM_DESC(debug, "activates debug info");
45 
46 #ifdef CONFIG_VIDEO_TI_CAL_MC
47 #define CAL_MC_API_DEFAULT 1
48 #else
49 #define CAL_MC_API_DEFAULT 0
50 #endif
51 
52 bool cal_mc_api = CAL_MC_API_DEFAULT;
53 module_param_named(mc_api, cal_mc_api, bool, 0444);
54 MODULE_PARM_DESC(mc_api, "activates the MC API");
55 
56 /* ------------------------------------------------------------------
57  *	Format Handling
58  * ------------------------------------------------------------------
59  */
60 
61 const struct cal_format_info cal_formats[] = {
62 	{
63 		.fourcc		= V4L2_PIX_FMT_YUYV,
64 		.code		= MEDIA_BUS_FMT_YUYV8_2X8,
65 		.bpp		= 16,
66 	}, {
67 		.fourcc		= V4L2_PIX_FMT_UYVY,
68 		.code		= MEDIA_BUS_FMT_UYVY8_2X8,
69 		.bpp		= 16,
70 	}, {
71 		.fourcc		= V4L2_PIX_FMT_YVYU,
72 		.code		= MEDIA_BUS_FMT_YVYU8_2X8,
73 		.bpp		= 16,
74 	}, {
75 		.fourcc		= V4L2_PIX_FMT_VYUY,
76 		.code		= MEDIA_BUS_FMT_VYUY8_2X8,
77 		.bpp		= 16,
78 	}, {
79 		.fourcc		= V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
80 		.code		= MEDIA_BUS_FMT_RGB565_2X8_LE,
81 		.bpp		= 16,
82 	}, {
83 		.fourcc		= V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
84 		.code		= MEDIA_BUS_FMT_RGB565_2X8_BE,
85 		.bpp		= 16,
86 	}, {
87 		.fourcc		= V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
88 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
89 		.bpp		= 16,
90 	}, {
91 		.fourcc		= V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
92 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
93 		.bpp		= 16,
94 	}, {
95 		.fourcc		= V4L2_PIX_FMT_RGB24, /* rgb */
96 		.code		= MEDIA_BUS_FMT_RGB888_2X12_LE,
97 		.bpp		= 24,
98 	}, {
99 		.fourcc		= V4L2_PIX_FMT_BGR24, /* bgr */
100 		.code		= MEDIA_BUS_FMT_RGB888_2X12_BE,
101 		.bpp		= 24,
102 	}, {
103 		.fourcc		= V4L2_PIX_FMT_RGB32, /* argb */
104 		.code		= MEDIA_BUS_FMT_ARGB8888_1X32,
105 		.bpp		= 32,
106 	}, {
107 		.fourcc		= V4L2_PIX_FMT_SBGGR8,
108 		.code		= MEDIA_BUS_FMT_SBGGR8_1X8,
109 		.bpp		= 8,
110 	}, {
111 		.fourcc		= V4L2_PIX_FMT_SGBRG8,
112 		.code		= MEDIA_BUS_FMT_SGBRG8_1X8,
113 		.bpp		= 8,
114 	}, {
115 		.fourcc		= V4L2_PIX_FMT_SGRBG8,
116 		.code		= MEDIA_BUS_FMT_SGRBG8_1X8,
117 		.bpp		= 8,
118 	}, {
119 		.fourcc		= V4L2_PIX_FMT_SRGGB8,
120 		.code		= MEDIA_BUS_FMT_SRGGB8_1X8,
121 		.bpp		= 8,
122 	}, {
123 		.fourcc		= V4L2_PIX_FMT_SBGGR10,
124 		.code		= MEDIA_BUS_FMT_SBGGR10_1X10,
125 		.bpp		= 10,
126 	}, {
127 		.fourcc		= V4L2_PIX_FMT_SGBRG10,
128 		.code		= MEDIA_BUS_FMT_SGBRG10_1X10,
129 		.bpp		= 10,
130 	}, {
131 		.fourcc		= V4L2_PIX_FMT_SGRBG10,
132 		.code		= MEDIA_BUS_FMT_SGRBG10_1X10,
133 		.bpp		= 10,
134 	}, {
135 		.fourcc		= V4L2_PIX_FMT_SRGGB10,
136 		.code		= MEDIA_BUS_FMT_SRGGB10_1X10,
137 		.bpp		= 10,
138 	}, {
139 		.fourcc		= V4L2_PIX_FMT_SBGGR12,
140 		.code		= MEDIA_BUS_FMT_SBGGR12_1X12,
141 		.bpp		= 12,
142 	}, {
143 		.fourcc		= V4L2_PIX_FMT_SGBRG12,
144 		.code		= MEDIA_BUS_FMT_SGBRG12_1X12,
145 		.bpp		= 12,
146 	}, {
147 		.fourcc		= V4L2_PIX_FMT_SGRBG12,
148 		.code		= MEDIA_BUS_FMT_SGRBG12_1X12,
149 		.bpp		= 12,
150 	}, {
151 		.fourcc		= V4L2_PIX_FMT_SRGGB12,
152 		.code		= MEDIA_BUS_FMT_SRGGB12_1X12,
153 		.bpp		= 12,
154 	},
155 };
156 
157 const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats);
158 
159 const struct cal_format_info *cal_format_by_fourcc(u32 fourcc)
160 {
161 	unsigned int i;
162 
163 	for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
164 		if (cal_formats[i].fourcc == fourcc)
165 			return &cal_formats[i];
166 	}
167 
168 	return NULL;
169 }
170 
171 const struct cal_format_info *cal_format_by_code(u32 code)
172 {
173 	unsigned int i;
174 
175 	for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
176 		if (cal_formats[i].code == code)
177 			return &cal_formats[i];
178 	}
179 
180 	return NULL;
181 }
182 
183 /* ------------------------------------------------------------------
184  *	Platform Data
185  * ------------------------------------------------------------------
186  */
187 
188 static const struct cal_camerarx_data dra72x_cal_camerarx[] = {
189 	{
190 		.fields = {
191 			[F_CTRLCLKEN] = { 10, 10 },
192 			[F_CAMMODE] = { 11, 12 },
193 			[F_LANEENABLE] = { 13, 16 },
194 			[F_CSI_MODE] = { 17, 17 },
195 		},
196 		.num_lanes = 4,
197 	},
198 	{
199 		.fields = {
200 			[F_CTRLCLKEN] = { 0, 0 },
201 			[F_CAMMODE] = { 1, 2 },
202 			[F_LANEENABLE] = { 3, 4 },
203 			[F_CSI_MODE] = { 5, 5 },
204 		},
205 		.num_lanes = 2,
206 	},
207 };
208 
209 static const struct cal_data dra72x_cal_data = {
210 	.camerarx = dra72x_cal_camerarx,
211 	.num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx),
212 };
213 
214 static const struct cal_data dra72x_es1_cal_data = {
215 	.camerarx = dra72x_cal_camerarx,
216 	.num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx),
217 	.flags = DRA72_CAL_PRE_ES2_LDO_DISABLE,
218 };
219 
220 static const struct cal_camerarx_data dra76x_cal_csi_phy[] = {
221 	{
222 		.fields = {
223 			[F_CTRLCLKEN] = { 8, 8 },
224 			[F_CAMMODE] = { 9, 10 },
225 			[F_CSI_MODE] = { 11, 11 },
226 			[F_LANEENABLE] = { 27, 31 },
227 		},
228 		.num_lanes = 5,
229 	},
230 	{
231 		.fields = {
232 			[F_CTRLCLKEN] = { 0, 0 },
233 			[F_CAMMODE] = { 1, 2 },
234 			[F_CSI_MODE] = { 3, 3 },
235 			[F_LANEENABLE] = { 24, 26 },
236 		},
237 		.num_lanes = 3,
238 	},
239 };
240 
241 static const struct cal_data dra76x_cal_data = {
242 	.camerarx = dra76x_cal_csi_phy,
243 	.num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy),
244 };
245 
246 static const struct cal_camerarx_data am654_cal_csi_phy[] = {
247 	{
248 		.fields = {
249 			[F_CTRLCLKEN] = { 15, 15 },
250 			[F_CAMMODE] = { 24, 25 },
251 			[F_LANEENABLE] = { 0, 4 },
252 		},
253 		.num_lanes = 5,
254 	},
255 };
256 
257 static const struct cal_data am654_cal_data = {
258 	.camerarx = am654_cal_csi_phy,
259 	.num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy),
260 };
261 
262 /* ------------------------------------------------------------------
263  *	I/O Register Accessors
264  * ------------------------------------------------------------------
265  */
266 
267 void cal_quickdump_regs(struct cal_dev *cal)
268 {
269 	unsigned int i;
270 
271 	cal_info(cal, "CAL Registers @ 0x%pa:\n", &cal->res->start);
272 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
273 		       (__force const void *)cal->base,
274 		       resource_size(cal->res), false);
275 
276 	for (i = 0; i < cal->data->num_csi2_phy; ++i) {
277 		struct cal_camerarx *phy = cal->phy[i];
278 
279 		cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i,
280 			 &phy->res->start);
281 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
282 			       (__force const void *)phy->base,
283 			       resource_size(phy->res),
284 			       false);
285 	}
286 }
287 
288 /* ------------------------------------------------------------------
289  *	Context Management
290  * ------------------------------------------------------------------
291  */
292 
293 #define CAL_MAX_PIX_PROC 4
294 
295 static int cal_reserve_pix_proc(struct cal_dev *cal)
296 {
297 	unsigned long ret;
298 
299 	spin_lock(&cal->v4l2_dev.lock);
300 
301 	ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC);
302 
303 	if (ret == CAL_MAX_PIX_PROC) {
304 		spin_unlock(&cal->v4l2_dev.lock);
305 		return -ENOSPC;
306 	}
307 
308 	cal->reserved_pix_proc_mask |= BIT(ret);
309 
310 	spin_unlock(&cal->v4l2_dev.lock);
311 
312 	return ret;
313 }
314 
315 static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num)
316 {
317 	spin_lock(&cal->v4l2_dev.lock);
318 
319 	cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num);
320 
321 	spin_unlock(&cal->v4l2_dev.lock);
322 }
323 
324 static void cal_ctx_csi2_config(struct cal_ctx *ctx)
325 {
326 	u32 val;
327 
328 	val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx));
329 	cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK);
330 	/*
331 	 * DT type: MIPI CSI-2 Specs
332 	 *   0x1: All - DT filter is disabled
333 	 *  0x24: RGB888 1 pixel  = 3 bytes
334 	 *  0x2B: RAW10  4 pixels = 5 bytes
335 	 *  0x2A: RAW8   1 pixel  = 1 byte
336 	 *  0x1E: YUV422 2 pixels = 4 bytes
337 	 */
338 	cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK);
339 	cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK);
340 	cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK);
341 	cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
342 	cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
343 		      CAL_CSI2_CTX_PACK_MODE_MASK);
344 	cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val);
345 	ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n",
346 		ctx->phy->instance, ctx->csi2_ctx,
347 		cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)));
348 }
349 
350 static void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
351 {
352 	u32 val, extract, pack;
353 
354 	switch (ctx->fmtinfo->bpp) {
355 	case 8:
356 		extract = CAL_PIX_PROC_EXTRACT_B8;
357 		pack = CAL_PIX_PROC_PACK_B8;
358 		break;
359 	case 10:
360 		extract = CAL_PIX_PROC_EXTRACT_B10_MIPI;
361 		pack = CAL_PIX_PROC_PACK_B16;
362 		break;
363 	case 12:
364 		extract = CAL_PIX_PROC_EXTRACT_B12_MIPI;
365 		pack = CAL_PIX_PROC_PACK_B16;
366 		break;
367 	case 16:
368 		extract = CAL_PIX_PROC_EXTRACT_B16_LE;
369 		pack = CAL_PIX_PROC_PACK_B16;
370 		break;
371 	default:
372 		/*
373 		 * If you see this warning then it means that you added
374 		 * some new entry in the cal_formats[] array with a different
375 		 * bit per pixel values then the one supported below.
376 		 * Either add support for the new bpp value below or adjust
377 		 * the new entry to use one of the value below.
378 		 *
379 		 * Instead of failing here just use 8 bpp as a default.
380 		 */
381 		dev_warn_once(ctx->cal->dev,
382 			      "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
383 			      __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp);
384 		extract = CAL_PIX_PROC_EXTRACT_B8;
385 		pack = CAL_PIX_PROC_PACK_B8;
386 		break;
387 	}
388 
389 	val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc));
390 	cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK);
391 	cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
392 	cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
393 	cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK);
394 	cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK);
395 	cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK);
396 	cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val);
397 	ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc,
398 		cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)));
399 }
400 
401 static void cal_ctx_wr_dma_config(struct cal_ctx *ctx)
402 {
403 	unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline;
404 	u32 val;
405 
406 	val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
407 	cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK);
408 	cal_set_field(&val, ctx->v_fmt.fmt.pix.height,
409 		      CAL_WR_DMA_CTRL_YSIZE_MASK);
410 	cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
411 		      CAL_WR_DMA_CTRL_DTAG_MASK);
412 	cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
413 		      CAL_WR_DMA_CTRL_PATTERN_MASK);
414 	cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK);
415 	cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
416 	ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx,
417 		cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)));
418 
419 	cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx),
420 			stride / 16, CAL_WR_DMA_OFST_MASK);
421 	ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx,
422 		cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx)));
423 
424 	val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx));
425 	/* 64 bit word means no skipping */
426 	cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
427 	/*
428 	 * The XSIZE field is expressed in 64-bit units and prevents overflows
429 	 * in case of synchronization issues by limiting the number of bytes
430 	 * written per line.
431 	 */
432 	cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK);
433 	cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val);
434 	ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx,
435 		cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)));
436 }
437 
438 void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr)
439 {
440 	cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr);
441 }
442 
443 static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx)
444 {
445 	u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
446 
447 	cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
448 		      CAL_WR_DMA_CTRL_MODE_MASK);
449 	cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
450 }
451 
452 static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx)
453 {
454 	u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
455 
456 	cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS,
457 		      CAL_WR_DMA_CTRL_MODE_MASK);
458 	cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
459 }
460 
461 static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx)
462 {
463 	bool stopped;
464 
465 	spin_lock_irq(&ctx->dma.lock);
466 	stopped = ctx->dma.state == CAL_DMA_STOPPED;
467 	spin_unlock_irq(&ctx->dma.lock);
468 
469 	return stopped;
470 }
471 
472 static int
473 cal_get_remote_frame_desc_entry(struct cal_camerarx *phy,
474 				struct v4l2_mbus_frame_desc_entry *entry)
475 {
476 	struct v4l2_mbus_frame_desc fd;
477 	int ret;
478 
479 	ret = cal_camerarx_get_remote_frame_desc(phy, &fd);
480 	if (ret) {
481 		if (ret != -ENOIOCTLCMD)
482 			dev_err(phy->cal->dev,
483 				"Failed to get remote frame desc: %d\n", ret);
484 		return ret;
485 	}
486 
487 	if (fd.num_entries == 0) {
488 		dev_err(phy->cal->dev,
489 			"No streams found in the remote frame descriptor\n");
490 
491 		return -ENODEV;
492 	}
493 
494 	if (fd.num_entries > 1)
495 		dev_dbg(phy->cal->dev,
496 			"Multiple streams not supported in remote frame descriptor, using the first one\n");
497 
498 	*entry = fd.entry[0];
499 
500 	return 0;
501 }
502 
503 int cal_ctx_prepare(struct cal_ctx *ctx)
504 {
505 	struct v4l2_mbus_frame_desc_entry entry;
506 	int ret;
507 
508 	ret = cal_get_remote_frame_desc_entry(ctx->phy, &entry);
509 
510 	if (ret == -ENOIOCTLCMD) {
511 		ctx->vc = 0;
512 		ctx->datatype = CAL_CSI2_CTX_DT_ANY;
513 	} else if (!ret) {
514 		ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n",
515 			entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt);
516 
517 		ctx->vc = entry.bus.csi2.vc;
518 		ctx->datatype = entry.bus.csi2.dt;
519 	} else {
520 		return ret;
521 	}
522 
523 	ctx->use_pix_proc = !ctx->fmtinfo->meta;
524 
525 	if (ctx->use_pix_proc) {
526 		ret = cal_reserve_pix_proc(ctx->cal);
527 		if (ret < 0) {
528 			ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret);
529 			return ret;
530 		}
531 
532 		ctx->pix_proc = ret;
533 	}
534 
535 	return 0;
536 }
537 
538 void cal_ctx_unprepare(struct cal_ctx *ctx)
539 {
540 	if (ctx->use_pix_proc)
541 		cal_release_pix_proc(ctx->cal, ctx->pix_proc);
542 }
543 
544 void cal_ctx_start(struct cal_ctx *ctx)
545 {
546 	struct cal_camerarx *phy = ctx->phy;
547 
548 	/*
549 	 * Reset the frame number & sequence number, but only if the
550 	 * virtual channel is not already in use.
551 	 */
552 
553 	spin_lock(&phy->vc_lock);
554 
555 	if (phy->vc_enable_count[ctx->vc]++ == 0) {
556 		phy->vc_frame_number[ctx->vc] = 0;
557 		phy->vc_sequence[ctx->vc] = 0;
558 	}
559 
560 	spin_unlock(&phy->vc_lock);
561 
562 	ctx->dma.state = CAL_DMA_RUNNING;
563 
564 	/* Configure the CSI-2, pixel processing and write DMA contexts. */
565 	cal_ctx_csi2_config(ctx);
566 	if (ctx->use_pix_proc)
567 		cal_ctx_pix_proc_config(ctx);
568 	cal_ctx_wr_dma_config(ctx);
569 
570 	/* Enable IRQ_WDMA_END and IRQ_WDMA_START. */
571 	cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1),
572 		  CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx));
573 	cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2),
574 		  CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx));
575 
576 	cal_ctx_wr_dma_enable(ctx);
577 }
578 
579 void cal_ctx_stop(struct cal_ctx *ctx)
580 {
581 	struct cal_camerarx *phy = ctx->phy;
582 	long timeout;
583 
584 	WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
585 
586 	spin_lock(&phy->vc_lock);
587 	phy->vc_enable_count[ctx->vc]--;
588 	spin_unlock(&phy->vc_lock);
589 
590 	/*
591 	 * Request DMA stop and wait until it completes. If completion times
592 	 * out, forcefully disable the DMA.
593 	 */
594 	spin_lock_irq(&ctx->dma.lock);
595 	ctx->dma.state = CAL_DMA_STOP_REQUESTED;
596 	spin_unlock_irq(&ctx->dma.lock);
597 
598 	timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
599 				     msecs_to_jiffies(500));
600 	if (!timeout) {
601 		ctx_err(ctx, "failed to disable dma cleanly\n");
602 		cal_ctx_wr_dma_disable(ctx);
603 	}
604 
605 	/* Disable IRQ_WDMA_END and IRQ_WDMA_START. */
606 	cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1),
607 		  CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx));
608 	cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2),
609 		  CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx));
610 
611 	ctx->dma.state = CAL_DMA_STOPPED;
612 
613 	/* Disable CSI2 context */
614 	cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0);
615 
616 	/* Disable pix proc */
617 	if (ctx->use_pix_proc)
618 		cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0);
619 }
620 
621 /* ------------------------------------------------------------------
622  *	IRQ Handling
623  * ------------------------------------------------------------------
624  */
625 
626 /*
627  * Track a sequence number for each virtual channel, which is shared by
628  * all contexts using the same virtual channel. This is done using the
629  * CSI-2 frame number as a base.
630  */
631 static void cal_update_seq_number(struct cal_ctx *ctx)
632 {
633 	struct cal_dev *cal = ctx->cal;
634 	struct cal_camerarx *phy = ctx->phy;
635 	u16 prev_frame_num, frame_num;
636 	u8 vc = ctx->vc;
637 
638 	frame_num =
639 		cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) &
640 		0xffff;
641 
642 	if (phy->vc_frame_number[vc] != frame_num) {
643 		prev_frame_num = phy->vc_frame_number[vc];
644 
645 		if (prev_frame_num >= frame_num)
646 			phy->vc_sequence[vc] += 1;
647 		else
648 			phy->vc_sequence[vc] += frame_num - prev_frame_num;
649 
650 		phy->vc_frame_number[vc] = frame_num;
651 	}
652 }
653 
654 static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
655 {
656 	spin_lock(&ctx->dma.lock);
657 
658 	if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) {
659 		/*
660 		 * If a stop is requested, disable the write DMA context
661 		 * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed,
662 		 * the current frame will complete and the DMA will then stop.
663 		 */
664 		cal_ctx_wr_dma_disable(ctx);
665 		ctx->dma.state = CAL_DMA_STOP_PENDING;
666 	} else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
667 		/*
668 		 * Otherwise, if a new buffer is available, queue it to the
669 		 * hardware.
670 		 */
671 		struct cal_buffer *buf;
672 		dma_addr_t addr;
673 
674 		buf = list_first_entry(&ctx->dma.queue, struct cal_buffer,
675 				       list);
676 		addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
677 		cal_ctx_set_dma_addr(ctx, addr);
678 
679 		ctx->dma.pending = buf;
680 		list_del(&buf->list);
681 	}
682 
683 	spin_unlock(&ctx->dma.lock);
684 
685 	cal_update_seq_number(ctx);
686 }
687 
688 static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
689 {
690 	struct cal_buffer *buf = NULL;
691 
692 	spin_lock(&ctx->dma.lock);
693 
694 	/* If the DMA context was stopping, it is now stopped. */
695 	if (ctx->dma.state == CAL_DMA_STOP_PENDING) {
696 		ctx->dma.state = CAL_DMA_STOPPED;
697 		wake_up(&ctx->dma.wait);
698 	}
699 
700 	/* If a new buffer was queued, complete the current buffer. */
701 	if (ctx->dma.pending) {
702 		buf = ctx->dma.active;
703 		ctx->dma.active = ctx->dma.pending;
704 		ctx->dma.pending = NULL;
705 	}
706 
707 	spin_unlock(&ctx->dma.lock);
708 
709 	if (buf) {
710 		buf->vb.vb2_buf.timestamp = ktime_get_ns();
711 		buf->vb.field = ctx->v_fmt.fmt.pix.field;
712 		buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc];
713 
714 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
715 	}
716 }
717 
718 static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end)
719 {
720 	/*
721 	 * CAL HW interrupts are inherently racy. If we get both start and end
722 	 * interrupts, we don't know what has happened: did the DMA for a single
723 	 * frame start and end, or did one frame end and a new frame start?
724 	 *
725 	 * Usually for normal pixel frames we get the interrupts separately. If
726 	 * we do get both, we have to guess. The assumption in the code below is
727 	 * that the active vertical area is larger than the blanking vertical
728 	 * area, and thus it is more likely that we get the end of the old frame
729 	 * and the start of a new frame.
730 	 *
731 	 * However, for embedded data, which is only a few lines high, we always
732 	 * get both interrupts. Here the assumption is that we get both for the
733 	 * same frame.
734 	 */
735 	if (ctx->v_fmt.fmt.pix.height < 10) {
736 		if (start)
737 			cal_irq_wdma_start(ctx);
738 
739 		if (end)
740 			cal_irq_wdma_end(ctx);
741 	} else {
742 		if (end)
743 			cal_irq_wdma_end(ctx);
744 
745 		if (start)
746 			cal_irq_wdma_start(ctx);
747 	}
748 }
749 
750 static irqreturn_t cal_irq(int irq_cal, void *data)
751 {
752 	struct cal_dev *cal = data;
753 	u32 status[3];
754 	unsigned int i;
755 
756 	for (i = 0; i < 3; ++i) {
757 		status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i));
758 		if (status[i])
759 			cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]);
760 	}
761 
762 	if (status[0]) {
763 		if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK)
764 			dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
765 
766 		for (i = 0; i < cal->data->num_csi2_phy; ++i) {
767 			if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) {
768 				u32 cio_stat = cal_read(cal,
769 							CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
770 
771 				dev_err_ratelimited(cal->dev,
772 						    "CIO%u error: %#08x\n", i, cio_stat);
773 
774 				cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i),
775 					  cio_stat);
776 			}
777 
778 			if (status[0] & CAL_HL_IRQ_VC_MASK(i)) {
779 				u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i));
780 
781 				dev_err_ratelimited(cal->dev,
782 						    "CIO%u VC error: %#08x\n",
783 						    i, vc_stat);
784 
785 				cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat);
786 			}
787 		}
788 	}
789 
790 	for (i = 0; i < cal->num_contexts; ++i) {
791 		bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i));
792 		bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i));
793 
794 		if (start || end)
795 			cal_irq_handle_wdma(cal->ctx[i], start, end);
796 	}
797 
798 	return IRQ_HANDLED;
799 }
800 
801 /* ------------------------------------------------------------------
802  *	Asynchronous V4L2 subdev binding
803  * ------------------------------------------------------------------
804  */
805 
806 struct cal_v4l2_async_subdev {
807 	struct v4l2_async_subdev asd; /* Must be first */
808 	struct cal_camerarx *phy;
809 };
810 
811 static inline struct cal_v4l2_async_subdev *
812 to_cal_asd(struct v4l2_async_subdev *asd)
813 {
814 	return container_of(asd, struct cal_v4l2_async_subdev, asd);
815 }
816 
817 static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
818 				    struct v4l2_subdev *subdev,
819 				    struct v4l2_async_subdev *asd)
820 {
821 	struct cal_camerarx *phy = to_cal_asd(asd)->phy;
822 	int pad;
823 	int ret;
824 
825 	if (phy->source) {
826 		phy_info(phy, "Rejecting subdev %s (Already set!!)",
827 			 subdev->name);
828 		return 0;
829 	}
830 
831 	phy->source = subdev;
832 	phy_dbg(1, phy, "Using source %s for capture\n", subdev->name);
833 
834 	pad = media_entity_get_fwnode_pad(&subdev->entity,
835 					  of_fwnode_handle(phy->source_ep_node),
836 					  MEDIA_PAD_FL_SOURCE);
837 	if (pad < 0) {
838 		phy_err(phy, "Source %s has no connected source pad\n",
839 			subdev->name);
840 		return pad;
841 	}
842 
843 	ret = media_create_pad_link(&subdev->entity, pad,
844 				    &phy->subdev.entity, CAL_CAMERARX_PAD_SINK,
845 				    MEDIA_LNK_FL_IMMUTABLE |
846 				    MEDIA_LNK_FL_ENABLED);
847 	if (ret) {
848 		phy_err(phy, "Failed to create media link for source %s\n",
849 			subdev->name);
850 		return ret;
851 	}
852 
853 	return 0;
854 }
855 
856 static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier)
857 {
858 	struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier);
859 	unsigned int i;
860 	int ret;
861 
862 	for (i = 0; i < cal->num_contexts; ++i) {
863 		ret = cal_ctx_v4l2_register(cal->ctx[i]);
864 		if (ret)
865 			goto err_ctx_unreg;
866 	}
867 
868 	if (!cal_mc_api)
869 		return 0;
870 
871 	ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev);
872 	if (ret)
873 		goto err_ctx_unreg;
874 
875 	return 0;
876 
877 err_ctx_unreg:
878 	for (; i > 0; --i) {
879 		if (!cal->ctx[i - 1])
880 			continue;
881 
882 		cal_ctx_v4l2_unregister(cal->ctx[i - 1]);
883 	}
884 
885 	return ret;
886 }
887 
888 static const struct v4l2_async_notifier_operations cal_async_notifier_ops = {
889 	.bound = cal_async_notifier_bound,
890 	.complete = cal_async_notifier_complete,
891 };
892 
893 static int cal_async_notifier_register(struct cal_dev *cal)
894 {
895 	unsigned int i;
896 	int ret;
897 
898 	v4l2_async_nf_init(&cal->notifier);
899 	cal->notifier.ops = &cal_async_notifier_ops;
900 
901 	for (i = 0; i < cal->data->num_csi2_phy; ++i) {
902 		struct cal_camerarx *phy = cal->phy[i];
903 		struct cal_v4l2_async_subdev *casd;
904 		struct fwnode_handle *fwnode;
905 
906 		if (!phy->source_node)
907 			continue;
908 
909 		fwnode = of_fwnode_handle(phy->source_node);
910 		casd = v4l2_async_nf_add_fwnode(&cal->notifier,
911 						fwnode,
912 						struct cal_v4l2_async_subdev);
913 		if (IS_ERR(casd)) {
914 			phy_err(phy, "Failed to add subdev to notifier\n");
915 			ret = PTR_ERR(casd);
916 			goto error;
917 		}
918 
919 		casd->phy = phy;
920 	}
921 
922 	ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier);
923 	if (ret) {
924 		cal_err(cal, "Error registering async notifier\n");
925 		goto error;
926 	}
927 
928 	return 0;
929 
930 error:
931 	v4l2_async_nf_cleanup(&cal->notifier);
932 	return ret;
933 }
934 
935 static void cal_async_notifier_unregister(struct cal_dev *cal)
936 {
937 	v4l2_async_nf_unregister(&cal->notifier);
938 	v4l2_async_nf_cleanup(&cal->notifier);
939 }
940 
941 /* ------------------------------------------------------------------
942  *	Media and V4L2 device handling
943  * ------------------------------------------------------------------
944  */
945 
946 /*
947  * Register user-facing devices. To be called at the end of the probe function
948  * when all resources are initialized and ready.
949  */
950 static int cal_media_register(struct cal_dev *cal)
951 {
952 	int ret;
953 
954 	ret = media_device_register(&cal->mdev);
955 	if (ret) {
956 		cal_err(cal, "Failed to register media device\n");
957 		return ret;
958 	}
959 
960 	/*
961 	 * Register the async notifier. This may trigger registration of the
962 	 * V4L2 video devices if all subdevs are ready.
963 	 */
964 	ret = cal_async_notifier_register(cal);
965 	if (ret) {
966 		media_device_unregister(&cal->mdev);
967 		return ret;
968 	}
969 
970 	return 0;
971 }
972 
973 /*
974  * Unregister the user-facing devices, but don't free memory yet. To be called
975  * at the beginning of the remove function, to disallow access from userspace.
976  */
977 static void cal_media_unregister(struct cal_dev *cal)
978 {
979 	unsigned int i;
980 
981 	/* Unregister all the V4L2 video devices. */
982 	for (i = 0; i < cal->num_contexts; i++)
983 		cal_ctx_v4l2_unregister(cal->ctx[i]);
984 
985 	cal_async_notifier_unregister(cal);
986 	media_device_unregister(&cal->mdev);
987 }
988 
989 /*
990  * Initialize the in-kernel objects. To be called at the beginning of the probe
991  * function, before the V4L2 device is used by the driver.
992  */
993 static int cal_media_init(struct cal_dev *cal)
994 {
995 	struct media_device *mdev = &cal->mdev;
996 	int ret;
997 
998 	mdev->dev = cal->dev;
999 	mdev->hw_revision = cal->revision;
1000 	strscpy(mdev->model, "CAL", sizeof(mdev->model));
1001 	media_device_init(mdev);
1002 
1003 	/*
1004 	 * Initialize the V4L2 device (despite the function name, this performs
1005 	 * initialization, not registration).
1006 	 */
1007 	cal->v4l2_dev.mdev = mdev;
1008 	ret = v4l2_device_register(cal->dev, &cal->v4l2_dev);
1009 	if (ret) {
1010 		cal_err(cal, "Failed to register V4L2 device\n");
1011 		return ret;
1012 	}
1013 
1014 	vb2_dma_contig_set_max_seg_size(cal->dev, DMA_BIT_MASK(32));
1015 
1016 	return 0;
1017 }
1018 
1019 /*
1020  * Cleanup the in-kernel objects, freeing memory. To be called at the very end
1021  * of the remove sequence, when nothing (including userspace) can access the
1022  * objects anymore.
1023  */
1024 static void cal_media_cleanup(struct cal_dev *cal)
1025 {
1026 	v4l2_device_unregister(&cal->v4l2_dev);
1027 	media_device_cleanup(&cal->mdev);
1028 
1029 	vb2_dma_contig_clear_max_seg_size(cal->dev);
1030 }
1031 
1032 /* ------------------------------------------------------------------
1033  *	Initialization and module stuff
1034  * ------------------------------------------------------------------
1035  */
1036 
1037 static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
1038 {
1039 	struct cal_ctx *ctx;
1040 	int ret;
1041 
1042 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1043 	if (!ctx)
1044 		return NULL;
1045 
1046 	ctx->cal = cal;
1047 	ctx->phy = cal->phy[inst];
1048 	ctx->dma_ctx = inst;
1049 	ctx->csi2_ctx = inst;
1050 	ctx->cport = inst;
1051 
1052 	ret = cal_ctx_v4l2_init(ctx);
1053 	if (ret) {
1054 		kfree(ctx);
1055 		return NULL;
1056 	}
1057 
1058 	return ctx;
1059 }
1060 
1061 static void cal_ctx_destroy(struct cal_ctx *ctx)
1062 {
1063 	cal_ctx_v4l2_cleanup(ctx);
1064 
1065 	kfree(ctx);
1066 }
1067 
1068 static const struct of_device_id cal_of_match[] = {
1069 	{
1070 		.compatible = "ti,dra72-cal",
1071 		.data = (void *)&dra72x_cal_data,
1072 	},
1073 	{
1074 		.compatible = "ti,dra72-pre-es2-cal",
1075 		.data = (void *)&dra72x_es1_cal_data,
1076 	},
1077 	{
1078 		.compatible = "ti,dra76-cal",
1079 		.data = (void *)&dra76x_cal_data,
1080 	},
1081 	{
1082 		.compatible = "ti,am654-cal",
1083 		.data = (void *)&am654_cal_data,
1084 	},
1085 	{},
1086 };
1087 MODULE_DEVICE_TABLE(of, cal_of_match);
1088 
1089 /* Get hardware revision and info. */
1090 
1091 #define CAL_HL_HWINFO_VALUE		0xa3c90469
1092 
1093 static void cal_get_hwinfo(struct cal_dev *cal)
1094 {
1095 	u32 hwinfo;
1096 
1097 	cal->revision = cal_read(cal, CAL_HL_REVISION);
1098 	switch (FIELD_GET(CAL_HL_REVISION_SCHEME_MASK, cal->revision)) {
1099 	case CAL_HL_REVISION_SCHEME_H08:
1100 		cal_dbg(3, cal, "CAL HW revision %lu.%lu.%lu (0x%08x)\n",
1101 			FIELD_GET(CAL_HL_REVISION_MAJOR_MASK, cal->revision),
1102 			FIELD_GET(CAL_HL_REVISION_MINOR_MASK, cal->revision),
1103 			FIELD_GET(CAL_HL_REVISION_RTL_MASK, cal->revision),
1104 			cal->revision);
1105 		break;
1106 
1107 	case CAL_HL_REVISION_SCHEME_LEGACY:
1108 	default:
1109 		cal_info(cal, "Unexpected CAL HW revision 0x%08x\n",
1110 			 cal->revision);
1111 		break;
1112 	}
1113 
1114 	hwinfo = cal_read(cal, CAL_HL_HWINFO);
1115 	if (hwinfo != CAL_HL_HWINFO_VALUE)
1116 		cal_info(cal, "CAL_HL_HWINFO = 0x%08x, expected 0x%08x\n",
1117 			 hwinfo, CAL_HL_HWINFO_VALUE);
1118 }
1119 
1120 static int cal_init_camerarx_regmap(struct cal_dev *cal)
1121 {
1122 	struct platform_device *pdev = to_platform_device(cal->dev);
1123 	struct device_node *np = cal->dev->of_node;
1124 	struct regmap_config config = { };
1125 	struct regmap *syscon;
1126 	struct resource *res;
1127 	unsigned int offset;
1128 	void __iomem *base;
1129 
1130 	syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,camerrx-control",
1131 						      1, &offset);
1132 	if (!IS_ERR(syscon)) {
1133 		cal->syscon_camerrx = syscon;
1134 		cal->syscon_camerrx_offset = offset;
1135 		return 0;
1136 	}
1137 
1138 	dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n",
1139 		 PTR_ERR(syscon));
1140 
1141 	/*
1142 	 * Backward DTS compatibility. If syscon entry is not present then
1143 	 * check if the camerrx_control resource is present.
1144 	 */
1145 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1146 					   "camerrx_control");
1147 	base = devm_ioremap_resource(cal->dev, res);
1148 	if (IS_ERR(base)) {
1149 		cal_err(cal, "failed to ioremap camerrx_control\n");
1150 		return PTR_ERR(base);
1151 	}
1152 
1153 	cal_dbg(1, cal, "ioresource %s at %pa - %pa\n",
1154 		res->name, &res->start, &res->end);
1155 
1156 	config.reg_bits = 32;
1157 	config.reg_stride = 4;
1158 	config.val_bits = 32;
1159 	config.max_register = resource_size(res) - 4;
1160 
1161 	syscon = regmap_init_mmio(NULL, base, &config);
1162 	if (IS_ERR(syscon)) {
1163 		pr_err("regmap init failed\n");
1164 		return PTR_ERR(syscon);
1165 	}
1166 
1167 	/*
1168 	 * In this case the base already point to the direct CM register so no
1169 	 * need for an offset.
1170 	 */
1171 	cal->syscon_camerrx = syscon;
1172 	cal->syscon_camerrx_offset = 0;
1173 
1174 	return 0;
1175 }
1176 
1177 static int cal_probe(struct platform_device *pdev)
1178 {
1179 	struct cal_dev *cal;
1180 	bool connected = false;
1181 	unsigned int i;
1182 	int ret;
1183 	int irq;
1184 
1185 	cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL);
1186 	if (!cal)
1187 		return -ENOMEM;
1188 
1189 	cal->data = of_device_get_match_data(&pdev->dev);
1190 	if (!cal->data) {
1191 		dev_err(&pdev->dev, "Could not get feature data based on compatible version\n");
1192 		return -ENODEV;
1193 	}
1194 
1195 	cal->dev = &pdev->dev;
1196 	platform_set_drvdata(pdev, cal);
1197 
1198 	/* Acquire resources: clocks, CAMERARX regmap, I/O memory and IRQ. */
1199 	cal->fclk = devm_clk_get(&pdev->dev, "fck");
1200 	if (IS_ERR(cal->fclk)) {
1201 		dev_err(&pdev->dev, "cannot get CAL fclk\n");
1202 		return PTR_ERR(cal->fclk);
1203 	}
1204 
1205 	ret = cal_init_camerarx_regmap(cal);
1206 	if (ret < 0)
1207 		return ret;
1208 
1209 	cal->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1210 						"cal_top");
1211 	cal->base = devm_ioremap_resource(&pdev->dev, cal->res);
1212 	if (IS_ERR(cal->base))
1213 		return PTR_ERR(cal->base);
1214 
1215 	cal_dbg(1, cal, "ioresource %s at %pa - %pa\n",
1216 		cal->res->name, &cal->res->start, &cal->res->end);
1217 
1218 	irq = platform_get_irq(pdev, 0);
1219 	cal_dbg(1, cal, "got irq# %d\n", irq);
1220 	ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1221 			       cal);
1222 	if (ret)
1223 		return ret;
1224 
1225 	/* Read the revision and hardware info to verify hardware access. */
1226 	pm_runtime_enable(&pdev->dev);
1227 	ret = pm_runtime_resume_and_get(&pdev->dev);
1228 	if (ret)
1229 		goto error_pm_runtime;
1230 
1231 	cal_get_hwinfo(cal);
1232 	pm_runtime_put_sync(&pdev->dev);
1233 
1234 	/* Initialize the media device. */
1235 	ret = cal_media_init(cal);
1236 	if (ret < 0)
1237 		goto error_pm_runtime;
1238 
1239 	/* Create CAMERARX PHYs. */
1240 	for (i = 0; i < cal->data->num_csi2_phy; ++i) {
1241 		cal->phy[i] = cal_camerarx_create(cal, i);
1242 		if (IS_ERR(cal->phy[i])) {
1243 			ret = PTR_ERR(cal->phy[i]);
1244 			cal->phy[i] = NULL;
1245 			goto error_camerarx;
1246 		}
1247 
1248 		if (cal->phy[i]->source_node)
1249 			connected = true;
1250 	}
1251 
1252 	if (!connected) {
1253 		cal_err(cal, "Neither port is configured, no point in staying up\n");
1254 		ret = -ENODEV;
1255 		goto error_camerarx;
1256 	}
1257 
1258 	/* Create contexts. */
1259 	for (i = 0; i < cal->data->num_csi2_phy; ++i) {
1260 		if (!cal->phy[i]->source_node)
1261 			continue;
1262 
1263 		cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i);
1264 		if (!cal->ctx[cal->num_contexts]) {
1265 			cal_err(cal, "Failed to create context %u\n", cal->num_contexts);
1266 			ret = -ENODEV;
1267 			goto error_context;
1268 		}
1269 
1270 		cal->num_contexts++;
1271 	}
1272 
1273 	/* Register the media device. */
1274 	ret = cal_media_register(cal);
1275 	if (ret)
1276 		goto error_context;
1277 
1278 	return 0;
1279 
1280 error_context:
1281 	for (i = 0; i < cal->num_contexts; i++)
1282 		cal_ctx_destroy(cal->ctx[i]);
1283 
1284 error_camerarx:
1285 	for (i = 0; i < cal->data->num_csi2_phy; i++)
1286 		cal_camerarx_destroy(cal->phy[i]);
1287 
1288 	cal_media_cleanup(cal);
1289 
1290 error_pm_runtime:
1291 	pm_runtime_disable(&pdev->dev);
1292 
1293 	return ret;
1294 }
1295 
1296 static int cal_remove(struct platform_device *pdev)
1297 {
1298 	struct cal_dev *cal = platform_get_drvdata(pdev);
1299 	unsigned int i;
1300 	int ret;
1301 
1302 	cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME);
1303 
1304 	ret = pm_runtime_resume_and_get(&pdev->dev);
1305 
1306 	cal_media_unregister(cal);
1307 
1308 	for (i = 0; i < cal->data->num_csi2_phy; i++)
1309 		cal_camerarx_disable(cal->phy[i]);
1310 
1311 	for (i = 0; i < cal->num_contexts; i++)
1312 		cal_ctx_destroy(cal->ctx[i]);
1313 
1314 	for (i = 0; i < cal->data->num_csi2_phy; i++)
1315 		cal_camerarx_destroy(cal->phy[i]);
1316 
1317 	cal_media_cleanup(cal);
1318 
1319 	if (ret >= 0)
1320 		pm_runtime_put_sync(&pdev->dev);
1321 	pm_runtime_disable(&pdev->dev);
1322 
1323 	return 0;
1324 }
1325 
1326 static int cal_runtime_resume(struct device *dev)
1327 {
1328 	struct cal_dev *cal = dev_get_drvdata(dev);
1329 	unsigned int i;
1330 	u32 val;
1331 
1332 	if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
1333 		/*
1334 		 * Apply errata on both port everytime we (re-)enable
1335 		 * the clock
1336 		 */
1337 		for (i = 0; i < cal->data->num_csi2_phy; i++)
1338 			cal_camerarx_i913_errata(cal->phy[i]);
1339 	}
1340 
1341 	/*
1342 	 * Enable global interrupts that are not related to a particular
1343 	 * CAMERARAX or context.
1344 	 */
1345 	cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
1346 
1347 	val = cal_read(cal, CAL_CTRL);
1348 	cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128,
1349 		      CAL_CTRL_BURSTSIZE_MASK);
1350 	cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK);
1351 	cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
1352 		      CAL_CTRL_POSTED_WRITES_MASK);
1353 	cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK);
1354 	cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK);
1355 	cal_write(cal, CAL_CTRL, val);
1356 	cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL));
1357 
1358 	return 0;
1359 }
1360 
1361 static const struct dev_pm_ops cal_pm_ops = {
1362 	.runtime_resume = cal_runtime_resume,
1363 };
1364 
1365 static struct platform_driver cal_pdrv = {
1366 	.probe		= cal_probe,
1367 	.remove		= cal_remove,
1368 	.driver		= {
1369 		.name	= CAL_MODULE_NAME,
1370 		.pm	= &cal_pm_ops,
1371 		.of_match_table = cal_of_match,
1372 	},
1373 };
1374 
1375 module_platform_driver(cal_pdrv);
1376