1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * isp.c
4 *
5 * TI OMAP3 ISP - Core
6 *
7 * Copyright (C) 2006-2010 Nokia Corporation
8 * Copyright (C) 2007-2009 Texas Instruments, Inc.
9 *
10 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11 * Sakari Ailus <sakari.ailus@iki.fi>
12 *
13 * Contributors:
14 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
15 * Sakari Ailus <sakari.ailus@iki.fi>
16 * David Cohen <dacohen@gmail.com>
17 * Stanimir Varbanov <svarbanov@mm-sol.com>
18 * Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
19 * Tuukka Toivonen <tuukkat76@gmail.com>
20 * Sergio Aguirre <saaguirre@ti.com>
21 * Antti Koskipaa <akoskipa@gmail.com>
22 * Ivan T. Ivanov <iivanov@mm-sol.com>
23 * RaniSuneela <r-m@ti.com>
24 * Atanas Filipov <afilipov@mm-sol.com>
25 * Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
26 * Hiroshi DOYU <hiroshi.doyu@nokia.com>
27 * Nayden Kanchev <nkanchev@mm-sol.com>
28 * Phil Carmody <ext-phil.2.carmody@nokia.com>
29 * Artem Bityutskiy <artem.bityutskiy@nokia.com>
30 * Dominic Curran <dcurran@ti.com>
31 * Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
32 * Pallavi Kulkarni <p-kulkarni@ti.com>
33 * Vaibhav Hiremath <hvaibhav@ti.com>
34 * Mohit Jalori <mjalori@ti.com>
35 * Sameer Venkatraman <sameerv@ti.com>
36 * Senthilvadivu Guruswamy <svadivu@ti.com>
37 * Thara Gopinath <thara@ti.com>
38 * Toni Leinonen <toni.leinonen@nokia.com>
39 * Troy Laramy <t-laramy@ti.com>
40 */
41
42 #include <linux/clk.h>
43 #include <linux/clkdev.h>
44 #include <linux/delay.h>
45 #include <linux/device.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/i2c.h>
48 #include <linux/interrupt.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/module.h>
51 #include <linux/omap-iommu.h>
52 #include <linux/platform_device.h>
53 #include <linux/property.h>
54 #include <linux/regulator/consumer.h>
55 #include <linux/slab.h>
56 #include <linux/sched.h>
57 #include <linux/vmalloc.h>
58
59 #ifdef CONFIG_ARM_DMA_USE_IOMMU
60 #include <asm/dma-iommu.h>
61 #endif
62
63 #include <media/v4l2-common.h>
64 #include <media/v4l2-fwnode.h>
65 #include <media/v4l2-device.h>
66 #include <media/v4l2-mc.h>
67
68 #include "isp.h"
69 #include "ispreg.h"
70 #include "ispccdc.h"
71 #include "isppreview.h"
72 #include "ispresizer.h"
73 #include "ispcsi2.h"
74 #include "ispccp2.h"
75 #include "isph3a.h"
76 #include "isphist.h"
77
78 static unsigned int autoidle;
79 module_param(autoidle, int, 0444);
80 MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
81
82 static void isp_save_ctx(struct isp_device *isp);
83
84 static void isp_restore_ctx(struct isp_device *isp);
85
86 static const struct isp_res_mapping isp_res_maps[] = {
87 {
88 .isp_rev = ISP_REVISION_2_0,
89 .offset = {
90 /* first MMIO area */
91 0x0000, /* base, len 0x0070 */
92 0x0400, /* ccp2, len 0x01f0 */
93 0x0600, /* ccdc, len 0x00a8 */
94 0x0a00, /* hist, len 0x0048 */
95 0x0c00, /* h3a, len 0x0060 */
96 0x0e00, /* preview, len 0x00a0 */
97 0x1000, /* resizer, len 0x00ac */
98 0x1200, /* sbl, len 0x00fc */
99 /* second MMIO area */
100 0x0000, /* csi2a, len 0x0170 */
101 0x0170, /* csiphy2, len 0x000c */
102 },
103 .phy_type = ISP_PHY_TYPE_3430,
104 },
105 {
106 .isp_rev = ISP_REVISION_15_0,
107 .offset = {
108 /* first MMIO area */
109 0x0000, /* base, len 0x0070 */
110 0x0400, /* ccp2, len 0x01f0 */
111 0x0600, /* ccdc, len 0x00a8 */
112 0x0a00, /* hist, len 0x0048 */
113 0x0c00, /* h3a, len 0x0060 */
114 0x0e00, /* preview, len 0x00a0 */
115 0x1000, /* resizer, len 0x00ac */
116 0x1200, /* sbl, len 0x00fc */
117 /* second MMIO area */
118 0x0000, /* csi2a, len 0x0170 (1st area) */
119 0x0170, /* csiphy2, len 0x000c */
120 0x01c0, /* csi2a, len 0x0040 (2nd area) */
121 0x0400, /* csi2c, len 0x0170 (1st area) */
122 0x0570, /* csiphy1, len 0x000c */
123 0x05c0, /* csi2c, len 0x0040 (2nd area) */
124 },
125 .phy_type = ISP_PHY_TYPE_3630,
126 },
127 };
128
129 /* Structure for saving/restoring ISP module registers */
130 static struct isp_reg isp_reg_list[] = {
131 {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
132 {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
133 {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
134 {0, ISP_TOK_TERM, 0}
135 };
136
137 /*
138 * omap3isp_flush - Post pending L3 bus writes by doing a register readback
139 * @isp: OMAP3 ISP device
140 *
141 * In order to force posting of pending writes, we need to write and
142 * readback the same register, in this case the revision register.
143 *
144 * See this link for reference:
145 * https://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
146 */
omap3isp_flush(struct isp_device * isp)147 void omap3isp_flush(struct isp_device *isp)
148 {
149 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
150 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
151 }
152
153 /* -----------------------------------------------------------------------------
154 * XCLK
155 */
156
157 #define to_isp_xclk(_hw) container_of(_hw, struct isp_xclk, hw)
158
isp_xclk_update(struct isp_xclk * xclk,u32 divider)159 static void isp_xclk_update(struct isp_xclk *xclk, u32 divider)
160 {
161 switch (xclk->id) {
162 case ISP_XCLK_A:
163 isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
164 ISPTCTRL_CTRL_DIVA_MASK,
165 divider << ISPTCTRL_CTRL_DIVA_SHIFT);
166 break;
167 case ISP_XCLK_B:
168 isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
169 ISPTCTRL_CTRL_DIVB_MASK,
170 divider << ISPTCTRL_CTRL_DIVB_SHIFT);
171 break;
172 }
173 }
174
isp_xclk_prepare(struct clk_hw * hw)175 static int isp_xclk_prepare(struct clk_hw *hw)
176 {
177 struct isp_xclk *xclk = to_isp_xclk(hw);
178
179 omap3isp_get(xclk->isp);
180
181 return 0;
182 }
183
isp_xclk_unprepare(struct clk_hw * hw)184 static void isp_xclk_unprepare(struct clk_hw *hw)
185 {
186 struct isp_xclk *xclk = to_isp_xclk(hw);
187
188 omap3isp_put(xclk->isp);
189 }
190
isp_xclk_enable(struct clk_hw * hw)191 static int isp_xclk_enable(struct clk_hw *hw)
192 {
193 struct isp_xclk *xclk = to_isp_xclk(hw);
194 unsigned long flags;
195
196 spin_lock_irqsave(&xclk->lock, flags);
197 isp_xclk_update(xclk, xclk->divider);
198 xclk->enabled = true;
199 spin_unlock_irqrestore(&xclk->lock, flags);
200
201 return 0;
202 }
203
isp_xclk_disable(struct clk_hw * hw)204 static void isp_xclk_disable(struct clk_hw *hw)
205 {
206 struct isp_xclk *xclk = to_isp_xclk(hw);
207 unsigned long flags;
208
209 spin_lock_irqsave(&xclk->lock, flags);
210 isp_xclk_update(xclk, 0);
211 xclk->enabled = false;
212 spin_unlock_irqrestore(&xclk->lock, flags);
213 }
214
isp_xclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)215 static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw,
216 unsigned long parent_rate)
217 {
218 struct isp_xclk *xclk = to_isp_xclk(hw);
219
220 return parent_rate / xclk->divider;
221 }
222
isp_xclk_calc_divider(unsigned long * rate,unsigned long parent_rate)223 static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
224 {
225 u32 divider;
226
227 if (*rate >= parent_rate) {
228 *rate = parent_rate;
229 return ISPTCTRL_CTRL_DIV_BYPASS;
230 }
231
232 if (*rate == 0)
233 *rate = 1;
234
235 divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
236 if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
237 divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
238
239 *rate = parent_rate / divider;
240 return divider;
241 }
242
isp_xclk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)243 static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate,
244 unsigned long *parent_rate)
245 {
246 isp_xclk_calc_divider(&rate, *parent_rate);
247 return rate;
248 }
249
isp_xclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)250 static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate,
251 unsigned long parent_rate)
252 {
253 struct isp_xclk *xclk = to_isp_xclk(hw);
254 unsigned long flags;
255 u32 divider;
256
257 divider = isp_xclk_calc_divider(&rate, parent_rate);
258
259 spin_lock_irqsave(&xclk->lock, flags);
260
261 xclk->divider = divider;
262 if (xclk->enabled)
263 isp_xclk_update(xclk, divider);
264
265 spin_unlock_irqrestore(&xclk->lock, flags);
266
267 dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n",
268 __func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider);
269 return 0;
270 }
271
272 static const struct clk_ops isp_xclk_ops = {
273 .prepare = isp_xclk_prepare,
274 .unprepare = isp_xclk_unprepare,
275 .enable = isp_xclk_enable,
276 .disable = isp_xclk_disable,
277 .recalc_rate = isp_xclk_recalc_rate,
278 .round_rate = isp_xclk_round_rate,
279 .set_rate = isp_xclk_set_rate,
280 };
281
282 static const char *isp_xclk_parent_name = "cam_mclk";
283
isp_xclk_src_get(struct of_phandle_args * clkspec,void * data)284 static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
285 {
286 unsigned int idx = clkspec->args[0];
287 struct isp_device *isp = data;
288
289 if (idx >= ARRAY_SIZE(isp->xclks))
290 return ERR_PTR(-ENOENT);
291
292 return isp->xclks[idx].clk;
293 }
294
isp_xclk_init(struct isp_device * isp)295 static int isp_xclk_init(struct isp_device *isp)
296 {
297 struct device_node *np = isp->dev->of_node;
298 struct clk_init_data init = {};
299 unsigned int i;
300
301 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
302 isp->xclks[i].clk = ERR_PTR(-EINVAL);
303
304 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
305 struct isp_xclk *xclk = &isp->xclks[i];
306
307 xclk->isp = isp;
308 xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
309 xclk->divider = 1;
310 spin_lock_init(&xclk->lock);
311
312 init.name = i == 0 ? "cam_xclka" : "cam_xclkb";
313 init.ops = &isp_xclk_ops;
314 init.parent_names = &isp_xclk_parent_name;
315 init.num_parents = 1;
316
317 xclk->hw.init = &init;
318 /*
319 * The first argument is NULL in order to avoid circular
320 * reference, as this driver takes reference on the
321 * sensor subdevice modules and the sensors would take
322 * reference on this module through clk_get().
323 */
324 xclk->clk = clk_register(NULL, &xclk->hw);
325 if (IS_ERR(xclk->clk))
326 return PTR_ERR(xclk->clk);
327 }
328
329 if (np)
330 of_clk_add_provider(np, isp_xclk_src_get, isp);
331
332 return 0;
333 }
334
isp_xclk_cleanup(struct isp_device * isp)335 static void isp_xclk_cleanup(struct isp_device *isp)
336 {
337 struct device_node *np = isp->dev->of_node;
338 unsigned int i;
339
340 if (np)
341 of_clk_del_provider(np);
342
343 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
344 struct isp_xclk *xclk = &isp->xclks[i];
345
346 if (!IS_ERR(xclk->clk))
347 clk_unregister(xclk->clk);
348 }
349 }
350
351 /* -----------------------------------------------------------------------------
352 * Interrupts
353 */
354
355 /*
356 * isp_enable_interrupts - Enable ISP interrupts.
357 * @isp: OMAP3 ISP device
358 */
isp_enable_interrupts(struct isp_device * isp)359 static void isp_enable_interrupts(struct isp_device *isp)
360 {
361 static const u32 irq = IRQ0ENABLE_CSIA_IRQ
362 | IRQ0ENABLE_CSIB_IRQ
363 | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
364 | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
365 | IRQ0ENABLE_CCDC_VD0_IRQ
366 | IRQ0ENABLE_CCDC_VD1_IRQ
367 | IRQ0ENABLE_HS_VS_IRQ
368 | IRQ0ENABLE_HIST_DONE_IRQ
369 | IRQ0ENABLE_H3A_AWB_DONE_IRQ
370 | IRQ0ENABLE_H3A_AF_DONE_IRQ
371 | IRQ0ENABLE_PRV_DONE_IRQ
372 | IRQ0ENABLE_RSZ_DONE_IRQ;
373
374 isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
375 isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
376 }
377
378 /*
379 * isp_disable_interrupts - Disable ISP interrupts.
380 * @isp: OMAP3 ISP device
381 */
isp_disable_interrupts(struct isp_device * isp)382 static void isp_disable_interrupts(struct isp_device *isp)
383 {
384 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
385 }
386
387 /*
388 * isp_core_init - ISP core settings
389 * @isp: OMAP3 ISP device
390 * @idle: Consider idle state.
391 *
392 * Set the power settings for the ISP and SBL bus and configure the HS/VS
393 * interrupt source.
394 *
395 * We need to configure the HS/VS interrupt source before interrupts get
396 * enabled, as the sensor might be free-running and the ISP default setting
397 * (HS edge) would put an unnecessary burden on the CPU.
398 */
isp_core_init(struct isp_device * isp,int idle)399 static void isp_core_init(struct isp_device *isp, int idle)
400 {
401 isp_reg_writel(isp,
402 ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
403 ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
404 ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
405 ((isp->revision == ISP_REVISION_15_0) ?
406 ISP_SYSCONFIG_AUTOIDLE : 0),
407 OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
408
409 isp_reg_writel(isp,
410 (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
411 ISPCTRL_SYNC_DETECT_VSRISE,
412 OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
413 }
414
415 /*
416 * Configure the bridge and lane shifter. Valid inputs are
417 *
418 * CCDC_INPUT_PARALLEL: Parallel interface
419 * CCDC_INPUT_CSI2A: CSI2a receiver
420 * CCDC_INPUT_CCP2B: CCP2b receiver
421 * CCDC_INPUT_CSI2C: CSI2c receiver
422 *
423 * The bridge and lane shifter are configured according to the selected input
424 * and the ISP platform data.
425 */
omap3isp_configure_bridge(struct isp_device * isp,enum ccdc_input_entity input,const struct isp_parallel_cfg * parcfg,unsigned int shift,unsigned int bridge)426 void omap3isp_configure_bridge(struct isp_device *isp,
427 enum ccdc_input_entity input,
428 const struct isp_parallel_cfg *parcfg,
429 unsigned int shift, unsigned int bridge)
430 {
431 u32 ispctrl_val;
432
433 ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
434 ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
435 ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
436 ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
437 ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
438 ispctrl_val |= bridge;
439
440 switch (input) {
441 case CCDC_INPUT_PARALLEL:
442 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
443 ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
444 shift += parcfg->data_lane_shift;
445 break;
446
447 case CCDC_INPUT_CSI2A:
448 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
449 break;
450
451 case CCDC_INPUT_CCP2B:
452 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
453 break;
454
455 case CCDC_INPUT_CSI2C:
456 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
457 break;
458
459 default:
460 return;
461 }
462
463 ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
464
465 isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
466 }
467
omap3isp_hist_dma_done(struct isp_device * isp)468 void omap3isp_hist_dma_done(struct isp_device *isp)
469 {
470 if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
471 omap3isp_stat_pcr_busy(&isp->isp_hist)) {
472 /* Histogram cannot be enabled in this frame anymore */
473 atomic_set(&isp->isp_hist.buf_err, 1);
474 dev_dbg(isp->dev,
475 "hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
476 }
477 }
478
isp_isr_dbg(struct isp_device * isp,u32 irqstatus)479 static inline void __maybe_unused isp_isr_dbg(struct isp_device *isp,
480 u32 irqstatus)
481 {
482 static const char *name[] = {
483 "CSIA_IRQ",
484 "res1",
485 "res2",
486 "CSIB_LCM_IRQ",
487 "CSIB_IRQ",
488 "res5",
489 "res6",
490 "res7",
491 "CCDC_VD0_IRQ",
492 "CCDC_VD1_IRQ",
493 "CCDC_VD2_IRQ",
494 "CCDC_ERR_IRQ",
495 "H3A_AF_DONE_IRQ",
496 "H3A_AWB_DONE_IRQ",
497 "res14",
498 "res15",
499 "HIST_DONE_IRQ",
500 "CCDC_LSC_DONE",
501 "CCDC_LSC_PREFETCH_COMPLETED",
502 "CCDC_LSC_PREFETCH_ERROR",
503 "PRV_DONE_IRQ",
504 "CBUFF_IRQ",
505 "res22",
506 "res23",
507 "RSZ_DONE_IRQ",
508 "OVF_IRQ",
509 "res26",
510 "res27",
511 "MMU_ERR_IRQ",
512 "OCP_ERR_IRQ",
513 "SEC_ERR_IRQ",
514 "HS_VS_IRQ",
515 };
516 int i;
517
518 dev_dbg(isp->dev, "ISP IRQ: ");
519
520 for (i = 0; i < ARRAY_SIZE(name); i++) {
521 if ((1 << i) & irqstatus)
522 printk(KERN_CONT "%s ", name[i]);
523 }
524 printk(KERN_CONT "\n");
525 }
526
isp_isr_sbl(struct isp_device * isp)527 static void isp_isr_sbl(struct isp_device *isp)
528 {
529 struct device *dev = isp->dev;
530 struct isp_pipeline *pipe;
531 u32 sbl_pcr;
532
533 /*
534 * Handle shared buffer logic overflows for video buffers.
535 * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
536 */
537 sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
538 isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
539 sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
540
541 if (sbl_pcr)
542 dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
543
544 if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
545 pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
546 if (pipe != NULL)
547 pipe->error = true;
548 }
549
550 if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
551 pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
552 if (pipe != NULL)
553 pipe->error = true;
554 }
555
556 if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
557 pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
558 if (pipe != NULL)
559 pipe->error = true;
560 }
561
562 if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
563 pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
564 if (pipe != NULL)
565 pipe->error = true;
566 }
567
568 if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
569 | ISPSBL_PCR_RSZ2_WBL_OVF
570 | ISPSBL_PCR_RSZ3_WBL_OVF
571 | ISPSBL_PCR_RSZ4_WBL_OVF)) {
572 pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
573 if (pipe != NULL)
574 pipe->error = true;
575 }
576
577 if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
578 omap3isp_stat_sbl_overflow(&isp->isp_af);
579
580 if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
581 omap3isp_stat_sbl_overflow(&isp->isp_aewb);
582 }
583
584 /*
585 * isp_isr - Interrupt Service Routine for Camera ISP module.
586 * @irq: Not used currently.
587 * @_isp: Pointer to the OMAP3 ISP device
588 *
589 * Handles the corresponding callback if plugged in.
590 */
isp_isr(int irq,void * _isp)591 static irqreturn_t isp_isr(int irq, void *_isp)
592 {
593 static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
594 IRQ0STATUS_CCDC_LSC_DONE_IRQ |
595 IRQ0STATUS_CCDC_VD0_IRQ |
596 IRQ0STATUS_CCDC_VD1_IRQ |
597 IRQ0STATUS_HS_VS_IRQ;
598 struct isp_device *isp = _isp;
599 u32 irqstatus;
600
601 irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
602 isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
603
604 isp_isr_sbl(isp);
605
606 if (irqstatus & IRQ0STATUS_CSIA_IRQ)
607 omap3isp_csi2_isr(&isp->isp_csi2a);
608
609 if (irqstatus & IRQ0STATUS_CSIB_IRQ)
610 omap3isp_ccp2_isr(&isp->isp_ccp2);
611
612 if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
613 if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
614 omap3isp_preview_isr_frame_sync(&isp->isp_prev);
615 if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
616 omap3isp_resizer_isr_frame_sync(&isp->isp_res);
617 omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
618 omap3isp_stat_isr_frame_sync(&isp->isp_af);
619 omap3isp_stat_isr_frame_sync(&isp->isp_hist);
620 }
621
622 if (irqstatus & ccdc_events)
623 omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
624
625 if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
626 if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
627 omap3isp_resizer_isr_frame_sync(&isp->isp_res);
628 omap3isp_preview_isr(&isp->isp_prev);
629 }
630
631 if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
632 omap3isp_resizer_isr(&isp->isp_res);
633
634 if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
635 omap3isp_stat_isr(&isp->isp_aewb);
636
637 if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
638 omap3isp_stat_isr(&isp->isp_af);
639
640 if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
641 omap3isp_stat_isr(&isp->isp_hist);
642
643 omap3isp_flush(isp);
644
645 #if defined(DEBUG) && defined(ISP_ISR_DEBUG)
646 isp_isr_dbg(isp, irqstatus);
647 #endif
648
649 return IRQ_HANDLED;
650 }
651
652 static const struct media_device_ops isp_media_ops = {
653 .link_notify = v4l2_pipeline_link_notify,
654 };
655
656 /* -----------------------------------------------------------------------------
657 * Pipeline stream management
658 */
659
660 /*
661 * isp_pipeline_enable - Enable streaming on a pipeline
662 * @pipe: ISP pipeline
663 * @mode: Stream mode (single shot or continuous)
664 *
665 * Walk the entities chain starting at the pipeline output video node and start
666 * all modules in the chain in the given mode.
667 *
668 * Return 0 if successful, or the return value of the failed video::s_stream
669 * operation otherwise.
670 */
isp_pipeline_enable(struct isp_pipeline * pipe,enum isp_pipeline_stream_state mode)671 static int isp_pipeline_enable(struct isp_pipeline *pipe,
672 enum isp_pipeline_stream_state mode)
673 {
674 struct isp_device *isp = pipe->output->isp;
675 struct media_entity *entity;
676 struct media_pad *pad;
677 struct v4l2_subdev *subdev;
678 unsigned long flags;
679 int ret;
680
681 /* Refuse to start streaming if an entity included in the pipeline has
682 * crashed. This check must be performed before the loop below to avoid
683 * starting entities if the pipeline won't start anyway (those entities
684 * would then likely fail to stop, making the problem worse).
685 */
686 if (media_entity_enum_intersects(&pipe->ent_enum, &isp->crashed))
687 return -EIO;
688
689 spin_lock_irqsave(&pipe->lock, flags);
690 pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
691 spin_unlock_irqrestore(&pipe->lock, flags);
692
693 pipe->do_propagation = false;
694
695 mutex_lock(&isp->media_dev.graph_mutex);
696
697 entity = &pipe->output->video.entity;
698 while (1) {
699 pad = &entity->pads[0];
700 if (!(pad->flags & MEDIA_PAD_FL_SINK))
701 break;
702
703 pad = media_pad_remote_pad_first(pad);
704 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
705 break;
706
707 entity = pad->entity;
708 subdev = media_entity_to_v4l2_subdev(entity);
709
710 ret = v4l2_subdev_call(subdev, video, s_stream, mode);
711 if (ret < 0 && ret != -ENOIOCTLCMD) {
712 mutex_unlock(&isp->media_dev.graph_mutex);
713 return ret;
714 }
715
716 if (subdev == &isp->isp_ccdc.subdev) {
717 v4l2_subdev_call(&isp->isp_aewb.subdev, video,
718 s_stream, mode);
719 v4l2_subdev_call(&isp->isp_af.subdev, video,
720 s_stream, mode);
721 v4l2_subdev_call(&isp->isp_hist.subdev, video,
722 s_stream, mode);
723 pipe->do_propagation = true;
724 }
725
726 /* Stop at the first external sub-device. */
727 if (subdev->dev != isp->dev)
728 break;
729 }
730
731 mutex_unlock(&isp->media_dev.graph_mutex);
732
733 return 0;
734 }
735
isp_pipeline_wait_resizer(struct isp_device * isp)736 static int isp_pipeline_wait_resizer(struct isp_device *isp)
737 {
738 return omap3isp_resizer_busy(&isp->isp_res);
739 }
740
isp_pipeline_wait_preview(struct isp_device * isp)741 static int isp_pipeline_wait_preview(struct isp_device *isp)
742 {
743 return omap3isp_preview_busy(&isp->isp_prev);
744 }
745
isp_pipeline_wait_ccdc(struct isp_device * isp)746 static int isp_pipeline_wait_ccdc(struct isp_device *isp)
747 {
748 return omap3isp_stat_busy(&isp->isp_af)
749 || omap3isp_stat_busy(&isp->isp_aewb)
750 || omap3isp_stat_busy(&isp->isp_hist)
751 || omap3isp_ccdc_busy(&isp->isp_ccdc);
752 }
753
754 #define ISP_STOP_TIMEOUT msecs_to_jiffies(1000)
755
isp_pipeline_wait(struct isp_device * isp,int (* busy)(struct isp_device * isp))756 static int isp_pipeline_wait(struct isp_device *isp,
757 int(*busy)(struct isp_device *isp))
758 {
759 unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
760
761 while (!time_after(jiffies, timeout)) {
762 if (!busy(isp))
763 return 0;
764 }
765
766 return 1;
767 }
768
769 /*
770 * isp_pipeline_disable - Disable streaming on a pipeline
771 * @pipe: ISP pipeline
772 *
773 * Walk the entities chain starting at the pipeline output video node and stop
774 * all modules in the chain. Wait synchronously for the modules to be stopped if
775 * necessary.
776 *
777 * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
778 * can't be stopped (in which case a software reset of the ISP is probably
779 * necessary).
780 */
isp_pipeline_disable(struct isp_pipeline * pipe)781 static int isp_pipeline_disable(struct isp_pipeline *pipe)
782 {
783 struct isp_device *isp = pipe->output->isp;
784 struct media_entity *entity;
785 struct media_pad *pad;
786 struct v4l2_subdev *subdev;
787 int failure = 0;
788 int ret;
789
790 /*
791 * We need to stop all the modules after CCDC first or they'll
792 * never stop since they may not get a full frame from CCDC.
793 */
794 entity = &pipe->output->video.entity;
795 while (1) {
796 pad = &entity->pads[0];
797 if (!(pad->flags & MEDIA_PAD_FL_SINK))
798 break;
799
800 pad = media_pad_remote_pad_first(pad);
801 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
802 break;
803
804 entity = pad->entity;
805 subdev = media_entity_to_v4l2_subdev(entity);
806
807 if (subdev == &isp->isp_ccdc.subdev) {
808 v4l2_subdev_call(&isp->isp_aewb.subdev,
809 video, s_stream, 0);
810 v4l2_subdev_call(&isp->isp_af.subdev,
811 video, s_stream, 0);
812 v4l2_subdev_call(&isp->isp_hist.subdev,
813 video, s_stream, 0);
814 }
815
816 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
817
818 /* Stop at the first external sub-device. */
819 if (subdev->dev != isp->dev)
820 break;
821
822 if (subdev == &isp->isp_res.subdev)
823 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
824 else if (subdev == &isp->isp_prev.subdev)
825 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview);
826 else if (subdev == &isp->isp_ccdc.subdev)
827 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
828
829 /* Handle stop failures. An entity that fails to stop can
830 * usually just be restarted. Flag the stop failure nonetheless
831 * to trigger an ISP reset the next time the device is released,
832 * just in case.
833 *
834 * The preview engine is a special case. A failure to stop can
835 * mean a hardware crash. When that happens the preview engine
836 * won't respond to read/write operations on the L4 bus anymore,
837 * resulting in a bus fault and a kernel oops next time it gets
838 * accessed. Mark it as crashed to prevent pipelines including
839 * it from being started.
840 */
841 if (ret) {
842 dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
843 isp->stop_failure = true;
844 if (subdev == &isp->isp_prev.subdev)
845 media_entity_enum_set(&isp->crashed,
846 &subdev->entity);
847 failure = -ETIMEDOUT;
848 }
849 }
850
851 return failure;
852 }
853
854 /*
855 * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
856 * @pipe: ISP pipeline
857 * @state: Stream state (stopped, single shot or continuous)
858 *
859 * Set the pipeline to the given stream state. Pipelines can be started in
860 * single-shot or continuous mode.
861 *
862 * Return 0 if successful, or the return value of the failed video::s_stream
863 * operation otherwise. The pipeline state is not updated when the operation
864 * fails, except when stopping the pipeline.
865 */
omap3isp_pipeline_set_stream(struct isp_pipeline * pipe,enum isp_pipeline_stream_state state)866 int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
867 enum isp_pipeline_stream_state state)
868 {
869 int ret;
870
871 if (state == ISP_PIPELINE_STREAM_STOPPED)
872 ret = isp_pipeline_disable(pipe);
873 else
874 ret = isp_pipeline_enable(pipe, state);
875
876 if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
877 pipe->stream_state = state;
878
879 return ret;
880 }
881
882 /*
883 * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
884 * @pipe: ISP pipeline
885 *
886 * Cancelling a stream mark all buffers on all video nodes in the pipeline as
887 * erroneous and makes sure no new buffer can be queued. This function is called
888 * when a fatal error that prevents any further operation on the pipeline
889 * occurs.
890 */
omap3isp_pipeline_cancel_stream(struct isp_pipeline * pipe)891 void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
892 {
893 if (pipe->input)
894 omap3isp_video_cancel_stream(pipe->input);
895 if (pipe->output)
896 omap3isp_video_cancel_stream(pipe->output);
897 }
898
899 /*
900 * isp_pipeline_resume - Resume streaming on a pipeline
901 * @pipe: ISP pipeline
902 *
903 * Resume video output and input and re-enable pipeline.
904 */
isp_pipeline_resume(struct isp_pipeline * pipe)905 static void isp_pipeline_resume(struct isp_pipeline *pipe)
906 {
907 int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
908
909 omap3isp_video_resume(pipe->output, !singleshot);
910 if (singleshot)
911 omap3isp_video_resume(pipe->input, 0);
912 isp_pipeline_enable(pipe, pipe->stream_state);
913 }
914
915 /*
916 * isp_pipeline_suspend - Suspend streaming on a pipeline
917 * @pipe: ISP pipeline
918 *
919 * Suspend pipeline.
920 */
isp_pipeline_suspend(struct isp_pipeline * pipe)921 static void isp_pipeline_suspend(struct isp_pipeline *pipe)
922 {
923 isp_pipeline_disable(pipe);
924 }
925
926 /*
927 * isp_pipeline_is_last - Verify if entity has an enabled link to the output
928 * video node
929 * @me: ISP module's media entity
930 *
931 * Returns 1 if the entity has an enabled link to the output video node or 0
932 * otherwise. It's true only while pipeline can have no more than one output
933 * node.
934 */
isp_pipeline_is_last(struct media_entity * me)935 static int isp_pipeline_is_last(struct media_entity *me)
936 {
937 struct isp_pipeline *pipe;
938 struct media_pad *pad;
939
940 pipe = to_isp_pipeline(me);
941 if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
942 return 0;
943 pad = media_pad_remote_pad_first(&pipe->output->pad);
944 return pad->entity == me;
945 }
946
947 /*
948 * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
949 * @me: ISP module's media entity
950 *
951 * Suspend the whole pipeline if module's entity has an enabled link to the
952 * output video node. It works only while pipeline can have no more than one
953 * output node.
954 */
isp_suspend_module_pipeline(struct media_entity * me)955 static void isp_suspend_module_pipeline(struct media_entity *me)
956 {
957 if (isp_pipeline_is_last(me))
958 isp_pipeline_suspend(to_isp_pipeline(me));
959 }
960
961 /*
962 * isp_resume_module_pipeline - Resume pipeline to which belongs the module
963 * @me: ISP module's media entity
964 *
965 * Resume the whole pipeline if module's entity has an enabled link to the
966 * output video node. It works only while pipeline can have no more than one
967 * output node.
968 */
isp_resume_module_pipeline(struct media_entity * me)969 static void isp_resume_module_pipeline(struct media_entity *me)
970 {
971 if (isp_pipeline_is_last(me))
972 isp_pipeline_resume(to_isp_pipeline(me));
973 }
974
975 /*
976 * isp_suspend_modules - Suspend ISP submodules.
977 * @isp: OMAP3 ISP device
978 *
979 * Returns 0 if suspend left in idle state all the submodules properly,
980 * or returns 1 if a general Reset is required to suspend the submodules.
981 */
isp_suspend_modules(struct isp_device * isp)982 static int __maybe_unused isp_suspend_modules(struct isp_device *isp)
983 {
984 unsigned long timeout;
985
986 omap3isp_stat_suspend(&isp->isp_aewb);
987 omap3isp_stat_suspend(&isp->isp_af);
988 omap3isp_stat_suspend(&isp->isp_hist);
989 isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
990 isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
991 isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
992 isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
993 isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
994
995 timeout = jiffies + ISP_STOP_TIMEOUT;
996 while (omap3isp_stat_busy(&isp->isp_af)
997 || omap3isp_stat_busy(&isp->isp_aewb)
998 || omap3isp_stat_busy(&isp->isp_hist)
999 || omap3isp_preview_busy(&isp->isp_prev)
1000 || omap3isp_resizer_busy(&isp->isp_res)
1001 || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
1002 if (time_after(jiffies, timeout)) {
1003 dev_info(isp->dev, "can't stop modules.\n");
1004 return 1;
1005 }
1006 msleep(1);
1007 }
1008
1009 return 0;
1010 }
1011
1012 /*
1013 * isp_resume_modules - Resume ISP submodules.
1014 * @isp: OMAP3 ISP device
1015 */
isp_resume_modules(struct isp_device * isp)1016 static void __maybe_unused isp_resume_modules(struct isp_device *isp)
1017 {
1018 omap3isp_stat_resume(&isp->isp_aewb);
1019 omap3isp_stat_resume(&isp->isp_af);
1020 omap3isp_stat_resume(&isp->isp_hist);
1021 isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
1022 isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
1023 isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
1024 isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
1025 isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
1026 }
1027
1028 /*
1029 * isp_reset - Reset ISP with a timeout wait for idle.
1030 * @isp: OMAP3 ISP device
1031 */
isp_reset(struct isp_device * isp)1032 static int isp_reset(struct isp_device *isp)
1033 {
1034 unsigned long timeout = 0;
1035
1036 isp_reg_writel(isp,
1037 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
1038 | ISP_SYSCONFIG_SOFTRESET,
1039 OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
1040 while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
1041 ISP_SYSSTATUS) & 0x1)) {
1042 if (timeout++ > 10000) {
1043 dev_alert(isp->dev, "cannot reset ISP\n");
1044 return -ETIMEDOUT;
1045 }
1046 udelay(1);
1047 }
1048
1049 isp->stop_failure = false;
1050 media_entity_enum_zero(&isp->crashed);
1051 return 0;
1052 }
1053
1054 /*
1055 * isp_save_context - Saves the values of the ISP module registers.
1056 * @isp: OMAP3 ISP device
1057 * @reg_list: Structure containing pairs of register address and value to
1058 * modify on OMAP.
1059 */
1060 static void
isp_save_context(struct isp_device * isp,struct isp_reg * reg_list)1061 isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
1062 {
1063 struct isp_reg *next = reg_list;
1064
1065 for (; next->reg != ISP_TOK_TERM; next++)
1066 next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
1067 }
1068
1069 /*
1070 * isp_restore_context - Restores the values of the ISP module registers.
1071 * @isp: OMAP3 ISP device
1072 * @reg_list: Structure containing pairs of register address and value to
1073 * modify on OMAP.
1074 */
1075 static void
isp_restore_context(struct isp_device * isp,struct isp_reg * reg_list)1076 isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
1077 {
1078 struct isp_reg *next = reg_list;
1079
1080 for (; next->reg != ISP_TOK_TERM; next++)
1081 isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
1082 }
1083
1084 /*
1085 * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1086 * @isp: OMAP3 ISP device
1087 *
1088 * Routine for saving the context of each module in the ISP.
1089 * CCDC, HIST, H3A, PREV, RESZ and MMU.
1090 */
isp_save_ctx(struct isp_device * isp)1091 static void isp_save_ctx(struct isp_device *isp)
1092 {
1093 isp_save_context(isp, isp_reg_list);
1094 omap_iommu_save_ctx(isp->dev);
1095 }
1096
1097 /*
1098 * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1099 * @isp: OMAP3 ISP device
1100 *
1101 * Routine for restoring the context of each module in the ISP.
1102 * CCDC, HIST, H3A, PREV, RESZ and MMU.
1103 */
isp_restore_ctx(struct isp_device * isp)1104 static void isp_restore_ctx(struct isp_device *isp)
1105 {
1106 isp_restore_context(isp, isp_reg_list);
1107 omap_iommu_restore_ctx(isp->dev);
1108 omap3isp_ccdc_restore_context(isp);
1109 omap3isp_preview_restore_context(isp);
1110 }
1111
1112 /* -----------------------------------------------------------------------------
1113 * SBL resources management
1114 */
1115 #define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \
1116 OMAP3_ISP_SBL_CCDC_LSC_READ | \
1117 OMAP3_ISP_SBL_PREVIEW_READ | \
1118 OMAP3_ISP_SBL_RESIZER_READ)
1119 #define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \
1120 OMAP3_ISP_SBL_CSI2A_WRITE | \
1121 OMAP3_ISP_SBL_CSI2C_WRITE | \
1122 OMAP3_ISP_SBL_CCDC_WRITE | \
1123 OMAP3_ISP_SBL_PREVIEW_WRITE)
1124
omap3isp_sbl_enable(struct isp_device * isp,enum isp_sbl_resource res)1125 void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
1126 {
1127 u32 sbl = 0;
1128
1129 isp->sbl_resources |= res;
1130
1131 if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
1132 sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1133
1134 if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
1135 sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1136
1137 if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
1138 sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1139
1140 if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
1141 sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1142
1143 if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
1144 sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1145
1146 if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
1147 sbl |= ISPCTRL_SBL_RD_RAM_EN;
1148
1149 isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1150 }
1151
omap3isp_sbl_disable(struct isp_device * isp,enum isp_sbl_resource res)1152 void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
1153 {
1154 u32 sbl = 0;
1155
1156 isp->sbl_resources &= ~res;
1157
1158 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
1159 sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1160
1161 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
1162 sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1163
1164 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
1165 sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1166
1167 if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
1168 sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1169
1170 if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
1171 sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1172
1173 if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
1174 sbl |= ISPCTRL_SBL_RD_RAM_EN;
1175
1176 isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1177 }
1178
1179 /*
1180 * isp_module_sync_idle - Helper to sync module with its idle state
1181 * @me: ISP submodule's media entity
1182 * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1183 * @stopping: flag which tells module wants to stop
1184 *
1185 * This function checks if ISP submodule needs to wait for next interrupt. If
1186 * yes, makes the caller to sleep while waiting for such event.
1187 */
omap3isp_module_sync_idle(struct media_entity * me,wait_queue_head_t * wait,atomic_t * stopping)1188 int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
1189 atomic_t *stopping)
1190 {
1191 struct isp_pipeline *pipe = to_isp_pipeline(me);
1192
1193 if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
1194 (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
1195 !isp_pipeline_ready(pipe)))
1196 return 0;
1197
1198 /*
1199 * atomic_set() doesn't include memory barrier on ARM platform for SMP
1200 * scenario. We'll call it here to avoid race conditions.
1201 */
1202 atomic_set(stopping, 1);
1203 smp_mb();
1204
1205 /*
1206 * If module is the last one, it's writing to memory. In this case,
1207 * it's necessary to check if the module is already paused due to
1208 * DMA queue underrun or if it has to wait for next interrupt to be
1209 * idle.
1210 * If it isn't the last one, the function won't sleep but *stopping
1211 * will still be set to warn next submodule caller's interrupt the
1212 * module wants to be idle.
1213 */
1214 if (isp_pipeline_is_last(me)) {
1215 struct isp_video *video = pipe->output;
1216 unsigned long flags;
1217 spin_lock_irqsave(&video->irqlock, flags);
1218 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
1219 spin_unlock_irqrestore(&video->irqlock, flags);
1220 atomic_set(stopping, 0);
1221 smp_mb();
1222 return 0;
1223 }
1224 spin_unlock_irqrestore(&video->irqlock, flags);
1225 if (!wait_event_timeout(*wait, !atomic_read(stopping),
1226 msecs_to_jiffies(1000))) {
1227 atomic_set(stopping, 0);
1228 smp_mb();
1229 return -ETIMEDOUT;
1230 }
1231 }
1232
1233 return 0;
1234 }
1235
1236 /*
1237 * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
1238 * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1239 * @stopping: flag which tells module wants to stop
1240 *
1241 * This function checks if ISP submodule was stopping. In case of yes, it
1242 * notices the caller by setting stopping to 0 and waking up the wait queue.
1243 * Returns 1 if it was stopping or 0 otherwise.
1244 */
omap3isp_module_sync_is_stopping(wait_queue_head_t * wait,atomic_t * stopping)1245 int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
1246 atomic_t *stopping)
1247 {
1248 if (atomic_cmpxchg(stopping, 1, 0)) {
1249 wake_up(wait);
1250 return 1;
1251 }
1252
1253 return 0;
1254 }
1255
1256 /* --------------------------------------------------------------------------
1257 * Clock management
1258 */
1259
1260 #define ISPCTRL_CLKS_MASK (ISPCTRL_H3A_CLK_EN | \
1261 ISPCTRL_HIST_CLK_EN | \
1262 ISPCTRL_RSZ_CLK_EN | \
1263 (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
1264 (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
1265
__isp_subclk_update(struct isp_device * isp)1266 static void __isp_subclk_update(struct isp_device *isp)
1267 {
1268 u32 clk = 0;
1269
1270 /* AEWB and AF share the same clock. */
1271 if (isp->subclk_resources &
1272 (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
1273 clk |= ISPCTRL_H3A_CLK_EN;
1274
1275 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
1276 clk |= ISPCTRL_HIST_CLK_EN;
1277
1278 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
1279 clk |= ISPCTRL_RSZ_CLK_EN;
1280
1281 /* NOTE: For CCDC & Preview submodules, we need to affect internal
1282 * RAM as well.
1283 */
1284 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
1285 clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
1286
1287 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
1288 clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
1289
1290 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
1291 ISPCTRL_CLKS_MASK, clk);
1292 }
1293
omap3isp_subclk_enable(struct isp_device * isp,enum isp_subclk_resource res)1294 void omap3isp_subclk_enable(struct isp_device *isp,
1295 enum isp_subclk_resource res)
1296 {
1297 isp->subclk_resources |= res;
1298
1299 __isp_subclk_update(isp);
1300 }
1301
omap3isp_subclk_disable(struct isp_device * isp,enum isp_subclk_resource res)1302 void omap3isp_subclk_disable(struct isp_device *isp,
1303 enum isp_subclk_resource res)
1304 {
1305 isp->subclk_resources &= ~res;
1306
1307 __isp_subclk_update(isp);
1308 }
1309
1310 /*
1311 * isp_enable_clocks - Enable ISP clocks
1312 * @isp: OMAP3 ISP device
1313 *
1314 * Return 0 if successful, or clk_prepare_enable return value if any of them
1315 * fails.
1316 */
isp_enable_clocks(struct isp_device * isp)1317 static int isp_enable_clocks(struct isp_device *isp)
1318 {
1319 int r;
1320 unsigned long rate;
1321
1322 r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
1323 if (r) {
1324 dev_err(isp->dev, "failed to enable cam_ick clock\n");
1325 goto out_clk_enable_ick;
1326 }
1327 r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
1328 if (r) {
1329 dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
1330 goto out_clk_enable_mclk;
1331 }
1332 r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
1333 if (r) {
1334 dev_err(isp->dev, "failed to enable cam_mclk clock\n");
1335 goto out_clk_enable_mclk;
1336 }
1337 rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
1338 if (rate != CM_CAM_MCLK_HZ)
1339 dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
1340 " expected : %d\n"
1341 " actual : %ld\n", CM_CAM_MCLK_HZ, rate);
1342 r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
1343 if (r) {
1344 dev_err(isp->dev, "failed to enable csi2_fck clock\n");
1345 goto out_clk_enable_csi2_fclk;
1346 }
1347 return 0;
1348
1349 out_clk_enable_csi2_fclk:
1350 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1351 out_clk_enable_mclk:
1352 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1353 out_clk_enable_ick:
1354 return r;
1355 }
1356
1357 /*
1358 * isp_disable_clocks - Disable ISP clocks
1359 * @isp: OMAP3 ISP device
1360 */
isp_disable_clocks(struct isp_device * isp)1361 static void isp_disable_clocks(struct isp_device *isp)
1362 {
1363 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1364 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1365 clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
1366 }
1367
1368 static const char *isp_clocks[] = {
1369 "cam_ick",
1370 "cam_mclk",
1371 "csi2_96m_fck",
1372 "l3_ick",
1373 };
1374
isp_get_clocks(struct isp_device * isp)1375 static int isp_get_clocks(struct isp_device *isp)
1376 {
1377 struct clk *clk;
1378 unsigned int i;
1379
1380 for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
1381 clk = devm_clk_get(isp->dev, isp_clocks[i]);
1382 if (IS_ERR(clk)) {
1383 dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
1384 return PTR_ERR(clk);
1385 }
1386
1387 isp->clock[i] = clk;
1388 }
1389
1390 return 0;
1391 }
1392
1393 /*
1394 * omap3isp_get - Acquire the ISP resource.
1395 *
1396 * Initializes the clocks for the first acquire.
1397 *
1398 * Increment the reference count on the ISP. If the first reference is taken,
1399 * enable clocks and power-up all submodules.
1400 *
1401 * Return a pointer to the ISP device structure, or NULL if an error occurred.
1402 */
__omap3isp_get(struct isp_device * isp,bool irq)1403 static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
1404 {
1405 struct isp_device *__isp = isp;
1406
1407 if (isp == NULL)
1408 return NULL;
1409
1410 mutex_lock(&isp->isp_mutex);
1411 if (isp->ref_count > 0)
1412 goto out;
1413
1414 if (isp_enable_clocks(isp) < 0) {
1415 __isp = NULL;
1416 goto out;
1417 }
1418
1419 /* We don't want to restore context before saving it! */
1420 if (isp->has_context)
1421 isp_restore_ctx(isp);
1422
1423 if (irq)
1424 isp_enable_interrupts(isp);
1425
1426 out:
1427 if (__isp != NULL)
1428 isp->ref_count++;
1429 mutex_unlock(&isp->isp_mutex);
1430
1431 return __isp;
1432 }
1433
omap3isp_get(struct isp_device * isp)1434 struct isp_device *omap3isp_get(struct isp_device *isp)
1435 {
1436 return __omap3isp_get(isp, true);
1437 }
1438
1439 /*
1440 * omap3isp_put - Release the ISP
1441 *
1442 * Decrement the reference count on the ISP. If the last reference is released,
1443 * power-down all submodules, disable clocks and free temporary buffers.
1444 */
__omap3isp_put(struct isp_device * isp,bool save_ctx)1445 static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
1446 {
1447 if (isp == NULL)
1448 return;
1449
1450 mutex_lock(&isp->isp_mutex);
1451 BUG_ON(isp->ref_count == 0);
1452 if (--isp->ref_count == 0) {
1453 isp_disable_interrupts(isp);
1454 if (save_ctx) {
1455 isp_save_ctx(isp);
1456 isp->has_context = 1;
1457 }
1458 /* Reset the ISP if an entity has failed to stop. This is the
1459 * only way to recover from such conditions.
1460 */
1461 if (!media_entity_enum_empty(&isp->crashed) ||
1462 isp->stop_failure)
1463 isp_reset(isp);
1464 isp_disable_clocks(isp);
1465 }
1466 mutex_unlock(&isp->isp_mutex);
1467 }
1468
omap3isp_put(struct isp_device * isp)1469 void omap3isp_put(struct isp_device *isp)
1470 {
1471 __omap3isp_put(isp, true);
1472 }
1473
1474 /* --------------------------------------------------------------------------
1475 * Platform device driver
1476 */
1477
1478 /*
1479 * omap3isp_print_status - Prints the values of the ISP Control Module registers
1480 * @isp: OMAP3 ISP device
1481 */
1482 #define ISP_PRINT_REGISTER(isp, name)\
1483 dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
1484 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
1485 #define SBL_PRINT_REGISTER(isp, name)\
1486 dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
1487 isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
1488
omap3isp_print_status(struct isp_device * isp)1489 void omap3isp_print_status(struct isp_device *isp)
1490 {
1491 dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
1492
1493 ISP_PRINT_REGISTER(isp, SYSCONFIG);
1494 ISP_PRINT_REGISTER(isp, SYSSTATUS);
1495 ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
1496 ISP_PRINT_REGISTER(isp, IRQ0STATUS);
1497 ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
1498 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
1499 ISP_PRINT_REGISTER(isp, CTRL);
1500 ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
1501 ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
1502 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
1503 ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
1504 ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
1505 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
1506 ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
1507 ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
1508
1509 SBL_PRINT_REGISTER(isp, PCR);
1510 SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
1511
1512 dev_dbg(isp->dev, "--------------------------------------------\n");
1513 }
1514
1515 #ifdef CONFIG_PM
1516
1517 /*
1518 * Power management support.
1519 *
1520 * As the ISP can't properly handle an input video stream interruption on a non
1521 * frame boundary, the ISP pipelines need to be stopped before sensors get
1522 * suspended. However, as suspending the sensors can require a running clock,
1523 * which can be provided by the ISP, the ISP can't be completely suspended
1524 * before the sensor.
1525 *
1526 * To solve this problem power management support is split into prepare/complete
1527 * and suspend/resume operations. The pipelines are stopped in prepare() and the
1528 * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in
1529 * resume(), and the pipelines are restarted in complete().
1530 *
1531 * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
1532 * yet.
1533 */
isp_pm_prepare(struct device * dev)1534 static int isp_pm_prepare(struct device *dev)
1535 {
1536 struct isp_device *isp = dev_get_drvdata(dev);
1537 int reset;
1538
1539 WARN_ON(mutex_is_locked(&isp->isp_mutex));
1540
1541 if (isp->ref_count == 0)
1542 return 0;
1543
1544 reset = isp_suspend_modules(isp);
1545 isp_disable_interrupts(isp);
1546 isp_save_ctx(isp);
1547 if (reset)
1548 isp_reset(isp);
1549
1550 return 0;
1551 }
1552
isp_pm_suspend(struct device * dev)1553 static int isp_pm_suspend(struct device *dev)
1554 {
1555 struct isp_device *isp = dev_get_drvdata(dev);
1556
1557 WARN_ON(mutex_is_locked(&isp->isp_mutex));
1558
1559 if (isp->ref_count)
1560 isp_disable_clocks(isp);
1561
1562 return 0;
1563 }
1564
isp_pm_resume(struct device * dev)1565 static int isp_pm_resume(struct device *dev)
1566 {
1567 struct isp_device *isp = dev_get_drvdata(dev);
1568
1569 if (isp->ref_count == 0)
1570 return 0;
1571
1572 return isp_enable_clocks(isp);
1573 }
1574
isp_pm_complete(struct device * dev)1575 static void isp_pm_complete(struct device *dev)
1576 {
1577 struct isp_device *isp = dev_get_drvdata(dev);
1578
1579 if (isp->ref_count == 0)
1580 return;
1581
1582 isp_restore_ctx(isp);
1583 isp_enable_interrupts(isp);
1584 isp_resume_modules(isp);
1585 }
1586
1587 #else
1588
1589 #define isp_pm_prepare NULL
1590 #define isp_pm_suspend NULL
1591 #define isp_pm_resume NULL
1592 #define isp_pm_complete NULL
1593
1594 #endif /* CONFIG_PM */
1595
isp_unregister_entities(struct isp_device * isp)1596 static void isp_unregister_entities(struct isp_device *isp)
1597 {
1598 media_device_unregister(&isp->media_dev);
1599
1600 omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1601 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1602 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
1603 omap3isp_preview_unregister_entities(&isp->isp_prev);
1604 omap3isp_resizer_unregister_entities(&isp->isp_res);
1605 omap3isp_stat_unregister_entities(&isp->isp_aewb);
1606 omap3isp_stat_unregister_entities(&isp->isp_af);
1607 omap3isp_stat_unregister_entities(&isp->isp_hist);
1608
1609 v4l2_device_unregister(&isp->v4l2_dev);
1610 media_device_cleanup(&isp->media_dev);
1611 }
1612
isp_link_entity(struct isp_device * isp,struct media_entity * entity,enum isp_interface_type interface)1613 static int isp_link_entity(
1614 struct isp_device *isp, struct media_entity *entity,
1615 enum isp_interface_type interface)
1616 {
1617 struct media_entity *input;
1618 unsigned int flags;
1619 unsigned int pad;
1620 unsigned int i;
1621
1622 /* Connect the sensor to the correct interface module.
1623 * Parallel sensors are connected directly to the CCDC, while
1624 * serial sensors are connected to the CSI2a, CCP2b or CSI2c
1625 * receiver through CSIPHY1 or CSIPHY2.
1626 */
1627 switch (interface) {
1628 case ISP_INTERFACE_PARALLEL:
1629 input = &isp->isp_ccdc.subdev.entity;
1630 pad = CCDC_PAD_SINK;
1631 flags = 0;
1632 break;
1633
1634 case ISP_INTERFACE_CSI2A_PHY2:
1635 input = &isp->isp_csi2a.subdev.entity;
1636 pad = CSI2_PAD_SINK;
1637 flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1638 break;
1639
1640 case ISP_INTERFACE_CCP2B_PHY1:
1641 case ISP_INTERFACE_CCP2B_PHY2:
1642 input = &isp->isp_ccp2.subdev.entity;
1643 pad = CCP2_PAD_SINK;
1644 flags = 0;
1645 break;
1646
1647 case ISP_INTERFACE_CSI2C_PHY1:
1648 input = &isp->isp_csi2c.subdev.entity;
1649 pad = CSI2_PAD_SINK;
1650 flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1651 break;
1652
1653 default:
1654 dev_err(isp->dev, "%s: invalid interface type %u\n", __func__,
1655 interface);
1656 return -EINVAL;
1657 }
1658
1659 /*
1660 * Not all interfaces are available on all revisions of the
1661 * ISP. The sub-devices of those interfaces aren't initialised
1662 * in such a case. Check this by ensuring the num_pads is
1663 * non-zero.
1664 */
1665 if (!input->num_pads) {
1666 dev_err(isp->dev, "%s: invalid input %u\n", entity->name,
1667 interface);
1668 return -EINVAL;
1669 }
1670
1671 for (i = 0; i < entity->num_pads; i++) {
1672 if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
1673 break;
1674 }
1675 if (i == entity->num_pads) {
1676 dev_err(isp->dev, "%s: no source pad in external entity %s\n",
1677 __func__, entity->name);
1678 return -EINVAL;
1679 }
1680
1681 return media_create_pad_link(entity, i, input, pad, flags);
1682 }
1683
isp_register_entities(struct isp_device * isp)1684 static int isp_register_entities(struct isp_device *isp)
1685 {
1686 int ret;
1687
1688 isp->media_dev.dev = isp->dev;
1689 strscpy(isp->media_dev.model, "TI OMAP3 ISP",
1690 sizeof(isp->media_dev.model));
1691 isp->media_dev.hw_revision = isp->revision;
1692 isp->media_dev.ops = &isp_media_ops;
1693 media_device_init(&isp->media_dev);
1694
1695 isp->v4l2_dev.mdev = &isp->media_dev;
1696 ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
1697 if (ret < 0) {
1698 dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
1699 __func__, ret);
1700 goto done;
1701 }
1702
1703 /* Register internal entities */
1704 ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
1705 if (ret < 0)
1706 goto done;
1707
1708 ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
1709 if (ret < 0)
1710 goto done;
1711
1712 ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
1713 if (ret < 0)
1714 goto done;
1715
1716 ret = omap3isp_preview_register_entities(&isp->isp_prev,
1717 &isp->v4l2_dev);
1718 if (ret < 0)
1719 goto done;
1720
1721 ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
1722 if (ret < 0)
1723 goto done;
1724
1725 ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
1726 if (ret < 0)
1727 goto done;
1728
1729 ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
1730 if (ret < 0)
1731 goto done;
1732
1733 ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
1734 if (ret < 0)
1735 goto done;
1736
1737 done:
1738 if (ret < 0)
1739 isp_unregister_entities(isp);
1740
1741 return ret;
1742 }
1743
1744 /*
1745 * isp_create_links() - Create links for internal and external ISP entities
1746 * @isp : Pointer to ISP device
1747 *
1748 * This function creates all links between ISP internal and external entities.
1749 *
1750 * Return: A negative error code on failure or zero on success. Possible error
1751 * codes are those returned by media_create_pad_link().
1752 */
isp_create_links(struct isp_device * isp)1753 static int isp_create_links(struct isp_device *isp)
1754 {
1755 int ret;
1756
1757 /* Create links between entities and video nodes. */
1758 ret = media_create_pad_link(
1759 &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
1760 &isp->isp_csi2a.video_out.video.entity, 0, 0);
1761 if (ret < 0)
1762 return ret;
1763
1764 ret = media_create_pad_link(
1765 &isp->isp_ccp2.video_in.video.entity, 0,
1766 &isp->isp_ccp2.subdev.entity, CCP2_PAD_SINK, 0);
1767 if (ret < 0)
1768 return ret;
1769
1770 ret = media_create_pad_link(
1771 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
1772 &isp->isp_ccdc.video_out.video.entity, 0, 0);
1773 if (ret < 0)
1774 return ret;
1775
1776 ret = media_create_pad_link(
1777 &isp->isp_prev.video_in.video.entity, 0,
1778 &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
1779 if (ret < 0)
1780 return ret;
1781
1782 ret = media_create_pad_link(
1783 &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
1784 &isp->isp_prev.video_out.video.entity, 0, 0);
1785 if (ret < 0)
1786 return ret;
1787
1788 ret = media_create_pad_link(
1789 &isp->isp_res.video_in.video.entity, 0,
1790 &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1791 if (ret < 0)
1792 return ret;
1793
1794 ret = media_create_pad_link(
1795 &isp->isp_res.subdev.entity, RESZ_PAD_SOURCE,
1796 &isp->isp_res.video_out.video.entity, 0, 0);
1797
1798 if (ret < 0)
1799 return ret;
1800
1801 /* Create links between entities. */
1802 ret = media_create_pad_link(
1803 &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
1804 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
1805 if (ret < 0)
1806 return ret;
1807
1808 ret = media_create_pad_link(
1809 &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
1810 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
1811 if (ret < 0)
1812 return ret;
1813
1814 ret = media_create_pad_link(
1815 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1816 &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
1817 if (ret < 0)
1818 return ret;
1819
1820 ret = media_create_pad_link(
1821 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
1822 &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1823 if (ret < 0)
1824 return ret;
1825
1826 ret = media_create_pad_link(
1827 &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
1828 &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1829 if (ret < 0)
1830 return ret;
1831
1832 ret = media_create_pad_link(
1833 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1834 &isp->isp_aewb.subdev.entity, 0,
1835 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1836 if (ret < 0)
1837 return ret;
1838
1839 ret = media_create_pad_link(
1840 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1841 &isp->isp_af.subdev.entity, 0,
1842 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1843 if (ret < 0)
1844 return ret;
1845
1846 ret = media_create_pad_link(
1847 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1848 &isp->isp_hist.subdev.entity, 0,
1849 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1850 if (ret < 0)
1851 return ret;
1852
1853 return 0;
1854 }
1855
isp_cleanup_modules(struct isp_device * isp)1856 static void isp_cleanup_modules(struct isp_device *isp)
1857 {
1858 omap3isp_h3a_aewb_cleanup(isp);
1859 omap3isp_h3a_af_cleanup(isp);
1860 omap3isp_hist_cleanup(isp);
1861 omap3isp_resizer_cleanup(isp);
1862 omap3isp_preview_cleanup(isp);
1863 omap3isp_ccdc_cleanup(isp);
1864 omap3isp_ccp2_cleanup(isp);
1865 omap3isp_csi2_cleanup(isp);
1866 omap3isp_csiphy_cleanup(isp);
1867 }
1868
isp_initialize_modules(struct isp_device * isp)1869 static int isp_initialize_modules(struct isp_device *isp)
1870 {
1871 int ret;
1872
1873 ret = omap3isp_csiphy_init(isp);
1874 if (ret < 0) {
1875 dev_err(isp->dev, "CSI PHY initialization failed\n");
1876 return ret;
1877 }
1878
1879 ret = omap3isp_csi2_init(isp);
1880 if (ret < 0) {
1881 dev_err(isp->dev, "CSI2 initialization failed\n");
1882 goto error_csi2;
1883 }
1884
1885 ret = omap3isp_ccp2_init(isp);
1886 if (ret < 0) {
1887 dev_err_probe(isp->dev, ret, "CCP2 initialization failed\n");
1888 goto error_ccp2;
1889 }
1890
1891 ret = omap3isp_ccdc_init(isp);
1892 if (ret < 0) {
1893 dev_err(isp->dev, "CCDC initialization failed\n");
1894 goto error_ccdc;
1895 }
1896
1897 ret = omap3isp_preview_init(isp);
1898 if (ret < 0) {
1899 dev_err(isp->dev, "Preview initialization failed\n");
1900 goto error_preview;
1901 }
1902
1903 ret = omap3isp_resizer_init(isp);
1904 if (ret < 0) {
1905 dev_err(isp->dev, "Resizer initialization failed\n");
1906 goto error_resizer;
1907 }
1908
1909 ret = omap3isp_hist_init(isp);
1910 if (ret < 0) {
1911 dev_err(isp->dev, "Histogram initialization failed\n");
1912 goto error_hist;
1913 }
1914
1915 ret = omap3isp_h3a_aewb_init(isp);
1916 if (ret < 0) {
1917 dev_err(isp->dev, "H3A AEWB initialization failed\n");
1918 goto error_h3a_aewb;
1919 }
1920
1921 ret = omap3isp_h3a_af_init(isp);
1922 if (ret < 0) {
1923 dev_err(isp->dev, "H3A AF initialization failed\n");
1924 goto error_h3a_af;
1925 }
1926
1927 return 0;
1928
1929 error_h3a_af:
1930 omap3isp_h3a_aewb_cleanup(isp);
1931 error_h3a_aewb:
1932 omap3isp_hist_cleanup(isp);
1933 error_hist:
1934 omap3isp_resizer_cleanup(isp);
1935 error_resizer:
1936 omap3isp_preview_cleanup(isp);
1937 error_preview:
1938 omap3isp_ccdc_cleanup(isp);
1939 error_ccdc:
1940 omap3isp_ccp2_cleanup(isp);
1941 error_ccp2:
1942 omap3isp_csi2_cleanup(isp);
1943 error_csi2:
1944 omap3isp_csiphy_cleanup(isp);
1945
1946 return ret;
1947 }
1948
isp_detach_iommu(struct isp_device * isp)1949 static void isp_detach_iommu(struct isp_device *isp)
1950 {
1951 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1952 arm_iommu_detach_device(isp->dev);
1953 arm_iommu_release_mapping(isp->mapping);
1954 isp->mapping = NULL;
1955 #endif
1956 }
1957
isp_attach_iommu(struct isp_device * isp)1958 static int isp_attach_iommu(struct isp_device *isp)
1959 {
1960 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1961 struct dma_iommu_mapping *mapping;
1962 int ret;
1963
1964 /*
1965 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
1966 * VAs. This will allocate a corresponding IOMMU domain.
1967 */
1968 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
1969 if (IS_ERR(mapping)) {
1970 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
1971 return PTR_ERR(mapping);
1972 }
1973
1974 isp->mapping = mapping;
1975
1976 /* Attach the ARM VA mapping to the device. */
1977 ret = arm_iommu_attach_device(isp->dev, mapping);
1978 if (ret < 0) {
1979 dev_err(isp->dev, "failed to attach device to VA mapping\n");
1980 goto error;
1981 }
1982
1983 return 0;
1984
1985 error:
1986 arm_iommu_release_mapping(isp->mapping);
1987 isp->mapping = NULL;
1988 return ret;
1989 #else
1990 return -ENODEV;
1991 #endif
1992 }
1993
1994 /*
1995 * isp_remove - Remove ISP platform device
1996 * @pdev: Pointer to ISP platform device
1997 *
1998 * Always returns 0.
1999 */
isp_remove(struct platform_device * pdev)2000 static void isp_remove(struct platform_device *pdev)
2001 {
2002 struct isp_device *isp = platform_get_drvdata(pdev);
2003
2004 v4l2_async_nf_unregister(&isp->notifier);
2005 v4l2_async_nf_cleanup(&isp->notifier);
2006 isp_unregister_entities(isp);
2007 isp_cleanup_modules(isp);
2008 isp_xclk_cleanup(isp);
2009
2010 __omap3isp_get(isp, false);
2011 isp_detach_iommu(isp);
2012 __omap3isp_put(isp, false);
2013
2014 media_entity_enum_cleanup(&isp->crashed);
2015
2016 kfree(isp);
2017 }
2018
2019 enum isp_of_phy {
2020 ISP_OF_PHY_PARALLEL = 0,
2021 ISP_OF_PHY_CSIPHY1,
2022 ISP_OF_PHY_CSIPHY2,
2023 };
2024
isp_subdev_notifier_bound(struct v4l2_async_notifier * async,struct v4l2_subdev * sd,struct v4l2_async_connection * asc)2025 static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
2026 struct v4l2_subdev *sd,
2027 struct v4l2_async_connection *asc)
2028 {
2029 struct isp_device *isp = container_of(async, struct isp_device,
2030 notifier);
2031 struct isp_bus_cfg *bus_cfg =
2032 &container_of(asc, struct isp_async_subdev, asd)->bus;
2033 int ret;
2034
2035 mutex_lock(&isp->media_dev.graph_mutex);
2036 ret = isp_link_entity(isp, &sd->entity, bus_cfg->interface);
2037 mutex_unlock(&isp->media_dev.graph_mutex);
2038
2039 return ret;
2040 }
2041
isp_subdev_notifier_complete(struct v4l2_async_notifier * async)2042 static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
2043 {
2044 struct isp_device *isp = container_of(async, struct isp_device,
2045 notifier);
2046 int ret;
2047
2048 mutex_lock(&isp->media_dev.graph_mutex);
2049 ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
2050 mutex_unlock(&isp->media_dev.graph_mutex);
2051 if (ret)
2052 return ret;
2053
2054 ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
2055 if (ret < 0)
2056 return ret;
2057
2058 return media_device_register(&isp->media_dev);
2059 }
2060
isp_parse_of_parallel_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2061 static void isp_parse_of_parallel_endpoint(struct device *dev,
2062 struct v4l2_fwnode_endpoint *vep,
2063 struct isp_bus_cfg *buscfg)
2064 {
2065 buscfg->interface = ISP_INTERFACE_PARALLEL;
2066 buscfg->bus.parallel.data_lane_shift = vep->bus.parallel.data_shift;
2067 buscfg->bus.parallel.clk_pol =
2068 !!(vep->bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING);
2069 buscfg->bus.parallel.hs_pol =
2070 !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
2071 buscfg->bus.parallel.vs_pol =
2072 !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
2073 buscfg->bus.parallel.fld_pol =
2074 !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
2075 buscfg->bus.parallel.data_pol =
2076 !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
2077 buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
2078 }
2079
isp_parse_of_csi2_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2080 static void isp_parse_of_csi2_endpoint(struct device *dev,
2081 struct v4l2_fwnode_endpoint *vep,
2082 struct isp_bus_cfg *buscfg)
2083 {
2084 unsigned int i;
2085
2086 buscfg->bus.csi2.lanecfg.clk.pos = vep->bus.mipi_csi2.clock_lane;
2087 buscfg->bus.csi2.lanecfg.clk.pol =
2088 vep->bus.mipi_csi2.lane_polarities[0];
2089 dev_dbg(dev, "clock lane polarity %u, pos %u\n",
2090 buscfg->bus.csi2.lanecfg.clk.pol,
2091 buscfg->bus.csi2.lanecfg.clk.pos);
2092
2093 buscfg->bus.csi2.num_data_lanes = vep->bus.mipi_csi2.num_data_lanes;
2094
2095 for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
2096 buscfg->bus.csi2.lanecfg.data[i].pos =
2097 vep->bus.mipi_csi2.data_lanes[i];
2098 buscfg->bus.csi2.lanecfg.data[i].pol =
2099 vep->bus.mipi_csi2.lane_polarities[i + 1];
2100 dev_dbg(dev,
2101 "data lane %u polarity %u, pos %u\n", i,
2102 buscfg->bus.csi2.lanecfg.data[i].pol,
2103 buscfg->bus.csi2.lanecfg.data[i].pos);
2104 }
2105 /*
2106 * FIXME: now we assume the CRC is always there. Implement a way to
2107 * obtain this information from the sensor. Frame descriptors, perhaps?
2108 */
2109 buscfg->bus.csi2.crc = 1;
2110 }
2111
isp_parse_of_csi1_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2112 static void isp_parse_of_csi1_endpoint(struct device *dev,
2113 struct v4l2_fwnode_endpoint *vep,
2114 struct isp_bus_cfg *buscfg)
2115 {
2116 buscfg->bus.ccp2.lanecfg.clk.pos = vep->bus.mipi_csi1.clock_lane;
2117 buscfg->bus.ccp2.lanecfg.clk.pol = vep->bus.mipi_csi1.lane_polarity[0];
2118 dev_dbg(dev, "clock lane polarity %u, pos %u\n",
2119 buscfg->bus.ccp2.lanecfg.clk.pol,
2120 buscfg->bus.ccp2.lanecfg.clk.pos);
2121
2122 buscfg->bus.ccp2.lanecfg.data[0].pos = vep->bus.mipi_csi1.data_lane;
2123 buscfg->bus.ccp2.lanecfg.data[0].pol =
2124 vep->bus.mipi_csi1.lane_polarity[1];
2125
2126 dev_dbg(dev, "data lane polarity %u, pos %u\n",
2127 buscfg->bus.ccp2.lanecfg.data[0].pol,
2128 buscfg->bus.ccp2.lanecfg.data[0].pos);
2129
2130 buscfg->bus.ccp2.strobe_clk_pol = vep->bus.mipi_csi1.clock_inv;
2131 buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
2132 buscfg->bus.ccp2.ccp2_mode = vep->bus_type == V4L2_MBUS_CCP2;
2133 buscfg->bus.ccp2.vp_clk_pol = 1;
2134
2135 buscfg->bus.ccp2.crc = 1;
2136 }
2137
2138 static struct {
2139 u32 phy;
2140 u32 csi2_if;
2141 u32 csi1_if;
2142 } isp_bus_interfaces[2] = {
2143 { ISP_OF_PHY_CSIPHY1,
2144 ISP_INTERFACE_CSI2C_PHY1, ISP_INTERFACE_CCP2B_PHY1 },
2145 { ISP_OF_PHY_CSIPHY2,
2146 ISP_INTERFACE_CSI2A_PHY2, ISP_INTERFACE_CCP2B_PHY2 },
2147 };
2148
isp_parse_of_endpoints(struct isp_device * isp)2149 static int isp_parse_of_endpoints(struct isp_device *isp)
2150 {
2151 struct fwnode_handle *ep;
2152 struct isp_async_subdev *isd = NULL;
2153 unsigned int i;
2154
2155 ep = fwnode_graph_get_endpoint_by_id(
2156 dev_fwnode(isp->dev), ISP_OF_PHY_PARALLEL, 0,
2157 FWNODE_GRAPH_ENDPOINT_NEXT);
2158
2159 if (ep) {
2160 struct v4l2_fwnode_endpoint vep = {
2161 .bus_type = V4L2_MBUS_PARALLEL
2162 };
2163 int ret;
2164
2165 dev_dbg(isp->dev, "parsing parallel interface\n");
2166
2167 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2168
2169 if (!ret) {
2170 isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
2171 ep, struct
2172 isp_async_subdev);
2173 if (!IS_ERR(isd))
2174 isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
2175 }
2176
2177 fwnode_handle_put(ep);
2178 }
2179
2180 for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) {
2181 struct v4l2_fwnode_endpoint vep = {
2182 .bus_type = V4L2_MBUS_CSI2_DPHY
2183 };
2184 int ret;
2185
2186 ep = fwnode_graph_get_endpoint_by_id(
2187 dev_fwnode(isp->dev), isp_bus_interfaces[i].phy, 0,
2188 FWNODE_GRAPH_ENDPOINT_NEXT);
2189
2190 if (!ep)
2191 continue;
2192
2193 dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i,
2194 to_of_node(ep));
2195
2196 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2197 if (ret == -ENXIO) {
2198 vep = (struct v4l2_fwnode_endpoint)
2199 { .bus_type = V4L2_MBUS_CSI1 };
2200 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2201
2202 if (ret == -ENXIO) {
2203 vep = (struct v4l2_fwnode_endpoint)
2204 { .bus_type = V4L2_MBUS_CCP2 };
2205 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2206 }
2207 }
2208
2209 if (!ret) {
2210 isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
2211 ep,
2212 struct
2213 isp_async_subdev);
2214
2215 if (!IS_ERR(isd)) {
2216 switch (vep.bus_type) {
2217 case V4L2_MBUS_CSI2_DPHY:
2218 isd->bus.interface =
2219 isp_bus_interfaces[i].csi2_if;
2220 isp_parse_of_csi2_endpoint(isp->dev, &vep, &isd->bus);
2221 break;
2222 case V4L2_MBUS_CSI1:
2223 case V4L2_MBUS_CCP2:
2224 isd->bus.interface =
2225 isp_bus_interfaces[i].csi1_if;
2226 isp_parse_of_csi1_endpoint(isp->dev, &vep,
2227 &isd->bus);
2228 break;
2229 default:
2230 break;
2231 }
2232 }
2233 }
2234
2235 fwnode_handle_put(ep);
2236 }
2237
2238 return 0;
2239 }
2240
2241 static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
2242 .bound = isp_subdev_notifier_bound,
2243 .complete = isp_subdev_notifier_complete,
2244 };
2245
2246 /*
2247 * isp_probe - Probe ISP platform device
2248 * @pdev: Pointer to ISP platform device
2249 *
2250 * Returns 0 if successful,
2251 * -ENOMEM if no memory available,
2252 * -ENODEV if no platform device resources found
2253 * or no space for remapping registers,
2254 * -EINVAL if couldn't install ISR,
2255 * or clk_get return error value.
2256 */
isp_probe(struct platform_device * pdev)2257 static int isp_probe(struct platform_device *pdev)
2258 {
2259 struct isp_device *isp;
2260 struct resource *mem;
2261 int ret;
2262 int i, m;
2263
2264 isp = kzalloc(sizeof(*isp), GFP_KERNEL);
2265 if (!isp) {
2266 dev_err(&pdev->dev, "could not allocate memory\n");
2267 return -ENOMEM;
2268 }
2269
2270 ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node),
2271 "ti,phy-type", &isp->phy_type);
2272 if (ret)
2273 goto error_release_isp;
2274
2275 isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2276 "syscon");
2277 if (IS_ERR(isp->syscon)) {
2278 ret = PTR_ERR(isp->syscon);
2279 goto error_release_isp;
2280 }
2281
2282 ret = of_property_read_u32_index(pdev->dev.of_node,
2283 "syscon", 1, &isp->syscon_offset);
2284 if (ret)
2285 goto error_release_isp;
2286
2287 isp->autoidle = autoidle;
2288
2289 mutex_init(&isp->isp_mutex);
2290 spin_lock_init(&isp->stat_lock);
2291 isp->dev = &pdev->dev;
2292
2293 isp->ref_count = 0;
2294
2295 ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
2296 if (ret)
2297 goto error;
2298
2299 platform_set_drvdata(pdev, isp);
2300
2301 /* Regulators */
2302 isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
2303 if (IS_ERR(isp->isp_csiphy1.vdd)) {
2304 ret = PTR_ERR(isp->isp_csiphy1.vdd);
2305 goto error;
2306 }
2307
2308 isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
2309 if (IS_ERR(isp->isp_csiphy2.vdd)) {
2310 ret = PTR_ERR(isp->isp_csiphy2.vdd);
2311 goto error;
2312 }
2313
2314 /* Clocks
2315 *
2316 * The ISP clock tree is revision-dependent. We thus need to enable ICLK
2317 * manually to read the revision before calling __omap3isp_get().
2318 *
2319 * Start by mapping the ISP MMIO area, which is in two pieces.
2320 * The ISP IOMMU is in between. Map both now, and fill in the
2321 * ISP revision specific portions a little later in the
2322 * function.
2323 */
2324 for (i = 0; i < 2; i++) {
2325 unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
2326
2327 isp->mmio_base[map_idx] =
2328 devm_platform_get_and_ioremap_resource(pdev, i, &mem);
2329 if (IS_ERR(isp->mmio_base[map_idx])) {
2330 ret = PTR_ERR(isp->mmio_base[map_idx]);
2331 goto error;
2332 }
2333 }
2334
2335 ret = isp_get_clocks(isp);
2336 if (ret < 0)
2337 goto error;
2338
2339 ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
2340 if (ret < 0)
2341 goto error;
2342
2343 isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
2344 dev_info(isp->dev, "Revision %d.%d found\n",
2345 (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
2346
2347 clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
2348
2349 if (__omap3isp_get(isp, false) == NULL) {
2350 ret = -ENODEV;
2351 goto error;
2352 }
2353
2354 ret = isp_reset(isp);
2355 if (ret < 0)
2356 goto error_isp;
2357
2358 ret = isp_xclk_init(isp);
2359 if (ret < 0)
2360 goto error_isp;
2361
2362 /* Memory resources */
2363 for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
2364 if (isp->revision == isp_res_maps[m].isp_rev)
2365 break;
2366
2367 if (m == ARRAY_SIZE(isp_res_maps)) {
2368 dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
2369 (isp->revision & 0xf0) >> 4, isp->revision & 0xf);
2370 ret = -ENODEV;
2371 goto error_isp;
2372 }
2373
2374 for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
2375 isp->mmio_base[i] =
2376 isp->mmio_base[0] + isp_res_maps[m].offset[i];
2377
2378 for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++)
2379 isp->mmio_base[i] =
2380 isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1]
2381 + isp_res_maps[m].offset[i];
2382
2383 isp->mmio_hist_base_phys =
2384 mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST];
2385
2386 /* IOMMU */
2387 ret = isp_attach_iommu(isp);
2388 if (ret < 0) {
2389 dev_err(&pdev->dev, "unable to attach to IOMMU\n");
2390 goto error_isp;
2391 }
2392
2393 /* Interrupt */
2394 ret = platform_get_irq(pdev, 0);
2395 if (ret < 0)
2396 goto error_iommu;
2397 isp->irq_num = ret;
2398
2399 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
2400 "OMAP3 ISP", isp)) {
2401 dev_err(isp->dev, "Unable to request IRQ\n");
2402 ret = -EINVAL;
2403 goto error_iommu;
2404 }
2405
2406 /* Entities */
2407 ret = isp_initialize_modules(isp);
2408 if (ret < 0)
2409 goto error_iommu;
2410
2411 ret = isp_register_entities(isp);
2412 if (ret < 0)
2413 goto error_modules;
2414
2415 ret = isp_create_links(isp);
2416 if (ret < 0)
2417 goto error_register_entities;
2418
2419 isp->notifier.ops = &isp_subdev_notifier_ops;
2420
2421 v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
2422
2423 ret = isp_parse_of_endpoints(isp);
2424 if (ret < 0)
2425 goto error_register_entities;
2426
2427 ret = v4l2_async_nf_register(&isp->notifier);
2428 if (ret)
2429 goto error_register_entities;
2430
2431 isp_core_init(isp, 1);
2432 omap3isp_put(isp);
2433
2434 return 0;
2435
2436 error_register_entities:
2437 v4l2_async_nf_cleanup(&isp->notifier);
2438 isp_unregister_entities(isp);
2439 error_modules:
2440 isp_cleanup_modules(isp);
2441 error_iommu:
2442 isp_detach_iommu(isp);
2443 error_isp:
2444 isp_xclk_cleanup(isp);
2445 __omap3isp_put(isp, false);
2446 error:
2447 mutex_destroy(&isp->isp_mutex);
2448 error_release_isp:
2449 kfree(isp);
2450
2451 return ret;
2452 }
2453
2454 static const struct dev_pm_ops omap3isp_pm_ops = {
2455 .prepare = isp_pm_prepare,
2456 .suspend = isp_pm_suspend,
2457 .resume = isp_pm_resume,
2458 .complete = isp_pm_complete,
2459 };
2460
2461 static const struct platform_device_id omap3isp_id_table[] = {
2462 { "omap3isp", 0 },
2463 { },
2464 };
2465 MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
2466
2467 static const struct of_device_id omap3isp_of_table[] = {
2468 { .compatible = "ti,omap3-isp" },
2469 { },
2470 };
2471 MODULE_DEVICE_TABLE(of, omap3isp_of_table);
2472
2473 static struct platform_driver omap3isp_driver = {
2474 .probe = isp_probe,
2475 .remove_new = isp_remove,
2476 .id_table = omap3isp_id_table,
2477 .driver = {
2478 .name = "omap3isp",
2479 .pm = &omap3isp_pm_ops,
2480 .of_match_table = omap3isp_of_table,
2481 },
2482 };
2483
2484 module_platform_driver(omap3isp_driver);
2485
2486 MODULE_AUTHOR("Nokia Corporation");
2487 MODULE_DESCRIPTION("TI OMAP3 ISP driver");
2488 MODULE_LICENSE("GPL");
2489 MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
2490