1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo <samu.onkalo@intel.com>
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-ioctl.h>
32 #include <media/videobuf2-dma-sg.h>
33 
34 #include "ipu3-cio2.h"
35 
36 struct ipu3_cio2_fmt {
37 	u32 mbus_code;
38 	u32 fourcc;
39 	u8 mipicode;
40 	u8 bpp;
41 };
42 
43 /*
44  * These are raw formats used in Intel's third generation of
45  * Image Processing Unit known as IPU3.
46  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
47  * last LSB 6 bits unused.
48  */
49 static const struct ipu3_cio2_fmt formats[] = {
50 	{	/* put default entry at beginning */
51 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
52 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
53 		.mipicode	= 0x2b,
54 		.bpp		= 10,
55 	}, {
56 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
57 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
58 		.mipicode	= 0x2b,
59 		.bpp		= 10,
60 	}, {
61 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
62 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
63 		.mipicode	= 0x2b,
64 		.bpp		= 10,
65 	}, {
66 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
67 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
68 		.mipicode	= 0x2b,
69 		.bpp		= 10,
70 	}, {
71 		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
72 		.fourcc		= V4L2_PIX_FMT_IPU3_Y10,
73 		.mipicode	= 0x2b,
74 		.bpp		= 10,
75 	},
76 };
77 
78 /*
79  * cio2_find_format - lookup color format by fourcc or/and media bus code
80  * @pixelformat: fourcc to match, ignored if null
81  * @mbus_code: media bus code to match, ignored if null
82  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)83 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
84 						    const u32 *mbus_code)
85 {
86 	unsigned int i;
87 
88 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
89 		if (pixelformat && *pixelformat != formats[i].fourcc)
90 			continue;
91 		if (mbus_code && *mbus_code != formats[i].mbus_code)
92 			continue;
93 
94 		return &formats[i];
95 	}
96 
97 	return NULL;
98 }
99 
cio2_bytesperline(const unsigned int width)100 static inline u32 cio2_bytesperline(const unsigned int width)
101 {
102 	/*
103 	 * 64 bytes for every 50 pixels, the line length
104 	 * in bytes is multiple of 64 (line end alignment).
105 	 */
106 	return DIV_ROUND_UP(width, 50) * 64;
107 }
108 
109 /**************** FBPT operations ****************/
110 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)111 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
112 {
113 	struct device *dev = &cio2->pci_dev->dev;
114 
115 	if (cio2->dummy_lop) {
116 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
117 				  cio2->dummy_lop_bus_addr);
118 		cio2->dummy_lop = NULL;
119 	}
120 	if (cio2->dummy_page) {
121 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
122 				  cio2->dummy_page_bus_addr);
123 		cio2->dummy_page = NULL;
124 	}
125 }
126 
cio2_fbpt_init_dummy(struct cio2_device * cio2)127 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
128 {
129 	struct device *dev = &cio2->pci_dev->dev;
130 	unsigned int i;
131 
132 	cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
133 					      &cio2->dummy_page_bus_addr,
134 					      GFP_KERNEL);
135 	cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
136 					     &cio2->dummy_lop_bus_addr,
137 					     GFP_KERNEL);
138 	if (!cio2->dummy_page || !cio2->dummy_lop) {
139 		cio2_fbpt_exit_dummy(cio2);
140 		return -ENOMEM;
141 	}
142 	/*
143 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
144 	 * Initialize each entry to dummy_page bus base address.
145 	 */
146 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
147 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
148 
149 	return 0;
150 }
151 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])152 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
153 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
154 {
155 	/*
156 	 * The CPU first initializes some fields in fbpt, then sets
157 	 * the VALID bit, this barrier is to ensure that the DMA(device)
158 	 * does not see the VALID bit enabled before other fields are
159 	 * initialized; otherwise it could lead to havoc.
160 	 */
161 	dma_wmb();
162 
163 	/*
164 	 * Request interrupts for start and completion
165 	 * Valid bit is applicable only to 1st entry
166 	 */
167 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
168 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
169 }
170 
171 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])172 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
173 				       struct cio2_fbpt_entry
174 				       entry[CIO2_MAX_LOPS])
175 {
176 	unsigned int i;
177 
178 	entry[0].first_entry.first_page_offset = 0;
179 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
180 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
181 
182 	for (i = 0; i < CIO2_MAX_LOPS; i++)
183 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
184 
185 	cio2_fbpt_entry_enable(cio2, entry);
186 }
187 
188 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])189 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
190 				     struct cio2_buffer *b,
191 				     struct cio2_fbpt_entry
192 				     entry[CIO2_MAX_LOPS])
193 {
194 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
195 	unsigned int length = vb->planes[0].length;
196 	int remaining, i;
197 
198 	entry[0].first_entry.first_page_offset = b->offset;
199 	remaining = length + entry[0].first_entry.first_page_offset;
200 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
201 	/*
202 	 * last_page_available_bytes has the offset of the last byte in the
203 	 * last page which is still accessible by DMA. DMA cannot access
204 	 * beyond this point. Valid range for this is from 0 to 4095.
205 	 * 0 indicates 1st byte in the page is DMA accessible.
206 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
207 	 * is available for DMA transfer.
208 	 */
209 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
210 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
211 	/* Fill FBPT */
212 	remaining = length;
213 	i = 0;
214 	while (remaining > 0) {
215 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
216 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
217 		entry++;
218 		i++;
219 	}
220 
221 	/*
222 	 * The first not meaningful FBPT entry should point to a valid LOP
223 	 */
224 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
225 
226 	cio2_fbpt_entry_enable(cio2, entry);
227 }
228 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)229 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
230 {
231 	struct device *dev = &cio2->pci_dev->dev;
232 
233 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
234 				     GFP_KERNEL);
235 	if (!q->fbpt)
236 		return -ENOMEM;
237 
238 	return 0;
239 }
240 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)241 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
242 {
243 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
244 }
245 
246 /**************** CSI2 hardware setup ****************/
247 
248 /*
249  * The CSI2 receiver has several parameters affecting
250  * the receiver timings. These depend on the MIPI bus frequency
251  * F in Hz (sensor transmitter rate) as follows:
252  *     register value = (A/1e9 + B * UI) / COUNT_ACC
253  * where
254  *      UI = 1 / (2 * F) in seconds
255  *      COUNT_ACC = counter accuracy in seconds
256  *      For IPU3 COUNT_ACC = 0.0625
257  *
258  * A and B are coefficients from the table below,
259  * depending whether the register minimum or maximum value is
260  * calculated.
261  *                                     Minimum     Maximum
262  * Clock lane                          A     B     A     B
263  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
264  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
265  * Data lanes
266  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
267  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
268  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
269  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
270  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
271  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
272  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
273  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
274  *
275  * We use the minimum values of both A and B.
276  */
277 
278 /*
279  * shift for keeping value range suitable for 32-bit integer arithmetic
280  */
281 #define LIMIT_SHIFT	8
282 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)283 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
284 {
285 	const u32 accinv = 16; /* invert of counter resolution */
286 	const u32 uiinv = 500000000; /* 1e9 / 2 */
287 	s32 r;
288 
289 	freq >>= LIMIT_SHIFT;
290 
291 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
292 		return def;
293 	/*
294 	 * b could be 0, -2 or -8, so |accinv * b| is always
295 	 * less than (1 << ds) and thus |r| < 500000000.
296 	 */
297 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
298 	r = r / (s32)freq;
299 	/* max value of a is 95 */
300 	r += accinv * a;
301 
302 	return r;
303 };
304 
305 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)306 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
307 				 struct cio2_csi2_timing *timing,
308 				 unsigned int bpp, unsigned int lanes)
309 {
310 	struct device *dev = &cio2->pci_dev->dev;
311 	s64 freq;
312 
313 	if (!q->sensor)
314 		return -ENODEV;
315 
316 	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
317 	if (freq < 0) {
318 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
319 		return freq;
320 	}
321 
322 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
323 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
324 					    freq,
325 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
326 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
327 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
328 					    freq,
329 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
330 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
331 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
332 					    freq,
333 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
334 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
335 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
336 					    freq,
337 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
338 
339 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
340 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
341 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
342 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
343 
344 	return 0;
345 };
346 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)347 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
348 {
349 	static const int NUM_VCS = 4;
350 	static const int SID;	/* Stream id */
351 	static const int ENTRY;
352 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
353 					CIO2_FBPT_SUBENTRY_UNIT);
354 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
355 	const struct ipu3_cio2_fmt *fmt;
356 	void __iomem *const base = cio2->base;
357 	u8 lanes, csi2bus = q->csi2.port;
358 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
359 	struct cio2_csi2_timing timing = { 0 };
360 	int i, r;
361 
362 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
363 	if (!fmt)
364 		return -EINVAL;
365 
366 	lanes = q->csi2.lanes;
367 
368 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
369 	if (r)
370 		return r;
371 
372 	writel(timing.clk_termen, q->csi_rx_base +
373 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374 	writel(timing.clk_settle, q->csi_rx_base +
375 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
376 
377 	for (i = 0; i < lanes; i++) {
378 		writel(timing.dat_termen, q->csi_rx_base +
379 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
380 		writel(timing.dat_settle, q->csi_rx_base +
381 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
382 	}
383 
384 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
385 	       CIO2_PBM_WMCTRL1_MID1_2CK |
386 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
387 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
388 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
389 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
390 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
391 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
392 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
393 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
394 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
395 	       CIO2_PBM_ARB_CTRL_LE_EN |
396 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
397 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
398 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
399 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
400 	       base + CIO2_REG_PBM_ARB_CTRL);
401 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
402 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
403 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
404 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
405 
406 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
407 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
408 
409 	/* Configure MIPI backend */
410 	for (i = 0; i < NUM_VCS; i++)
411 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
412 
413 	/* There are 16 short packet LUT entry */
414 	for (i = 0; i < 16; i++)
415 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
416 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
417 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
418 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
419 
420 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
421 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
422 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
423 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
424 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
425 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
426 
427 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
428 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
429 	       base + CIO2_REG_INT_EN);
430 
431 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
432 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
433 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
434 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
435 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
436 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
437 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
438 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
439 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
440 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
441 
442 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
443 	writel(CIO2_CGC_PRIM_TGE |
444 	       CIO2_CGC_SIDE_TGE |
445 	       CIO2_CGC_XOSC_TGE |
446 	       CIO2_CGC_D3I3_TGE |
447 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
448 	       CIO2_CGC_CSI2_PORT_DCGE |
449 	       CIO2_CGC_SIDE_DCGE |
450 	       CIO2_CGC_PRIM_DCGE |
451 	       CIO2_CGC_ROSC_DCGE |
452 	       CIO2_CGC_XOSC_DCGE |
453 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
454 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
455 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
456 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
457 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
458 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
459 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
460 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
461 	       base + CIO2_REG_LTRVAL01);
462 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 	       base + CIO2_REG_LTRVAL23);
467 
468 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
469 		writel(0, base + CIO2_REG_CDMABA(i));
470 		writel(0, base + CIO2_REG_CDMAC0(i));
471 		writel(0, base + CIO2_REG_CDMAC1(i));
472 	}
473 
474 	/* Enable DMA */
475 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
476 
477 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
478 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
479 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
480 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
481 	       CIO2_CDMAC0_DMA_EN |
482 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
483 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
484 
485 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
486 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
487 
488 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
489 
490 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
491 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
492 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
493 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
494 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
495 
496 	/* Clear interrupts */
497 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
498 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
499 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
500 	writel(~0, base + CIO2_REG_INT_STS);
501 
502 	/* Enable devices, starting from the last device in the pipe */
503 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
504 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
505 
506 	return 0;
507 }
508 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)509 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
510 {
511 	struct device *dev = &cio2->pci_dev->dev;
512 	void __iomem *const base = cio2->base;
513 	unsigned int i;
514 	u32 value;
515 	int ret;
516 
517 	/* Disable CSI receiver and MIPI backend devices */
518 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
519 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
520 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
521 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
522 
523 	/* Halt DMA */
524 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
525 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
526 				 value, value & CIO2_CDMAC0_DMA_HALTED,
527 				 4000, 2000000);
528 	if (ret)
529 		dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
530 
531 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
532 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
536 	}
537 }
538 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
540 {
541 	struct device *dev = &cio2->pci_dev->dev;
542 	struct cio2_queue *q = cio2->cur_queue;
543 	struct cio2_fbpt_entry *entry;
544 	u64 ns = ktime_get_ns();
545 
546 	if (dma_chan >= CIO2_QUEUES) {
547 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
548 		return;
549 	}
550 
551 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553 		dev_warn(dev, "no ready buffers found on DMA channel %u\n",
554 			 dma_chan);
555 		return;
556 	}
557 
558 	/* Find out which buffer(s) are ready */
559 	do {
560 		struct cio2_buffer *b;
561 
562 		b = q->bufs[q->bufs_first];
563 		if (b) {
564 			unsigned int received = entry[1].second_entry.num_of_bytes;
565 			unsigned long payload =
566 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
567 
568 			q->bufs[q->bufs_first] = NULL;
569 			atomic_dec(&q->bufs_queued);
570 			dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
571 
572 			b->vbb.vb2_buf.timestamp = ns;
573 			b->vbb.field = V4L2_FIELD_NONE;
574 			b->vbb.sequence = atomic_read(&q->frame_sequence);
575 			if (payload != received)
576 				dev_warn(dev,
577 					 "payload length is %lu, received %u\n",
578 					 payload, received);
579 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
580 		}
581 		atomic_inc(&q->frame_sequence);
582 		cio2_fbpt_entry_init_dummy(cio2, entry);
583 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
586 }
587 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
589 {
590 	/*
591 	 * For the user space camera control algorithms it is essential
592 	 * to know when the reception of a frame has begun. That's often
593 	 * the best timing information to get from the hardware.
594 	 */
595 	struct v4l2_event event = {
596 		.type = V4L2_EVENT_FRAME_SYNC,
597 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
598 	};
599 
600 	v4l2_event_queue(q->subdev.devnode, &event);
601 }
602 
603 static const char *const cio2_irq_errs[] = {
604 	"single packet header error corrected",
605 	"multiple packet header errors detected",
606 	"payload checksum (CRC) error",
607 	"fifo overflow",
608 	"reserved short packet data type detected",
609 	"reserved long packet data type detected",
610 	"incomplete long packet detected",
611 	"frame sync error",
612 	"line sync error",
613 	"DPHY start of transmission error",
614 	"DPHY synchronization error",
615 	"escape mode error",
616 	"escape mode trigger event",
617 	"escape mode ultra-low power state for data lane(s)",
618 	"escape mode ultra-low power state exit for clock lane",
619 	"inter-frame short packet discarded",
620 	"inter-frame long packet discarded",
621 	"non-matching Long Packet stalled",
622 };
623 
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)624 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
625 {
626 	unsigned long csi2_status = status;
627 	unsigned int i;
628 
629 	for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
630 		dev_err(dev, "CSI-2 receiver port %i: %s\n",
631 			port, cio2_irq_errs[i]);
632 
633 	if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
634 		dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
635 			 csi2_status, port);
636 }
637 
638 static const char *const cio2_port_errs[] = {
639 	"ECC recoverable",
640 	"DPHY not recoverable",
641 	"ECC not recoverable",
642 	"CRC error",
643 	"INTERFRAMEDATA",
644 	"PKT2SHORT",
645 	"PKT2LONG",
646 };
647 
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)648 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
649 {
650 	unsigned long port_status = status;
651 	unsigned int i;
652 
653 	for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
654 		dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
655 }
656 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)657 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
658 {
659 	struct device *dev = &cio2->pci_dev->dev;
660 	void __iomem *const base = cio2->base;
661 
662 	if (int_status & CIO2_INT_IOOE) {
663 		/*
664 		 * Interrupt on Output Error:
665 		 * 1) SRAM is full and FS received, or
666 		 * 2) An invalid bit detected by DMA.
667 		 */
668 		u32 oe_status, oe_clear;
669 
670 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
671 		oe_status = oe_clear;
672 
673 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
674 			dev_err(dev, "DMA output error: 0x%x\n",
675 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
676 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
677 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
678 		}
679 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
680 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
681 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
682 				>> CIO2_INT_EXT_OE_OES_SHIFT);
683 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
684 		}
685 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
686 		if (oe_status)
687 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
688 				 oe_status);
689 		int_status &= ~CIO2_INT_IOOE;
690 	}
691 
692 	if (int_status & CIO2_INT_IOC_MASK) {
693 		/* DMA IO done -- frame ready */
694 		u32 clr = 0;
695 		unsigned int d;
696 
697 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
698 			if (int_status & CIO2_INT_IOC(d)) {
699 				clr |= CIO2_INT_IOC(d);
700 				cio2_buffer_done(cio2, d);
701 			}
702 		int_status &= ~clr;
703 	}
704 
705 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
706 		/* DMA IO starts or reached specified line */
707 		u32 clr = 0;
708 		unsigned int d;
709 
710 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
711 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
712 				clr |= CIO2_INT_IOS_IOLN(d);
713 				if (d == CIO2_DMA_CHAN)
714 					cio2_queue_event_sof(cio2,
715 							     cio2->cur_queue);
716 			}
717 		int_status &= ~clr;
718 	}
719 
720 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
721 		/* CSI2 receiver (error) interrupt */
722 		unsigned int port;
723 		u32 ie_status;
724 
725 		ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
726 
727 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
728 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
729 
730 			cio2_irq_log_port_errs(dev, port, port_status);
731 
732 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
733 				void __iomem *csi_rx_base =
734 						base + CIO2_REG_PIPE_BASE(port);
735 				u32 csi2_status;
736 
737 				csi2_status = readl(csi_rx_base +
738 						CIO2_REG_IRQCTRL_STATUS);
739 
740 				cio2_irq_log_irq_errs(dev, port, csi2_status);
741 
742 				writel(csi2_status,
743 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
744 			}
745 		}
746 
747 		writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
748 
749 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
750 	}
751 
752 	if (int_status)
753 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
754 }
755 
cio2_irq(int irq,void * cio2_ptr)756 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
757 {
758 	struct cio2_device *cio2 = cio2_ptr;
759 	void __iomem *const base = cio2->base;
760 	struct device *dev = &cio2->pci_dev->dev;
761 	u32 int_status;
762 
763 	int_status = readl(base + CIO2_REG_INT_STS);
764 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
765 	if (!int_status)
766 		return IRQ_NONE;
767 
768 	do {
769 		writel(int_status, base + CIO2_REG_INT_STS);
770 		cio2_irq_handle_once(cio2, int_status);
771 		int_status = readl(base + CIO2_REG_INT_STS);
772 		if (int_status)
773 			dev_dbg(dev, "pending status 0x%x\n", int_status);
774 	} while (int_status);
775 
776 	return IRQ_HANDLED;
777 }
778 
779 /**************** Videobuf2 interface ****************/
780 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)781 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
782 					enum vb2_buffer_state state)
783 {
784 	unsigned int i;
785 
786 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
787 		if (q->bufs[i]) {
788 			atomic_dec(&q->bufs_queued);
789 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
790 					state);
791 			q->bufs[i] = NULL;
792 		}
793 	}
794 }
795 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])796 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
797 				unsigned int *num_buffers,
798 				unsigned int *num_planes,
799 				unsigned int sizes[],
800 				struct device *alloc_devs[])
801 {
802 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
803 	struct device *dev = &cio2->pci_dev->dev;
804 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
805 	unsigned int i;
806 
807 	if (*num_planes && *num_planes < q->format.num_planes)
808 		return -EINVAL;
809 
810 	for (i = 0; i < q->format.num_planes; ++i) {
811 		if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
812 			return -EINVAL;
813 		sizes[i] = q->format.plane_fmt[i].sizeimage;
814 		alloc_devs[i] = dev;
815 	}
816 
817 	*num_planes = q->format.num_planes;
818 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
819 
820 	/* Initialize buffer queue */
821 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
822 		q->bufs[i] = NULL;
823 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
824 	}
825 	atomic_set(&q->bufs_queued, 0);
826 	q->bufs_first = 0;
827 	q->bufs_next = 0;
828 
829 	return 0;
830 }
831 
832 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)833 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
834 {
835 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
836 	struct device *dev = &cio2->pci_dev->dev;
837 	struct cio2_buffer *b = to_cio2_buffer(vb);
838 	unsigned int pages = PFN_UP(vb->planes[0].length);
839 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840 	struct sg_table *sg;
841 	struct sg_dma_page_iter sg_iter;
842 	unsigned int i, j;
843 
844 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
845 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
846 			vb->planes[0].length);
847 		return -ENOSPC;		/* Should never happen */
848 	}
849 
850 	memset(b->lop, 0, sizeof(b->lop));
851 	/* Allocate LOP table */
852 	for (i = 0; i < lops; i++) {
853 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
854 					       &b->lop_bus_addr[i], GFP_KERNEL);
855 		if (!b->lop[i])
856 			goto fail;
857 	}
858 
859 	/* Fill LOP */
860 	sg = vb2_dma_sg_plane_desc(vb, 0);
861 	if (!sg)
862 		return -ENOMEM;
863 
864 	if (sg->nents && sg->sgl)
865 		b->offset = sg->sgl->offset;
866 
867 	i = j = 0;
868 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
869 		if (!pages--)
870 			break;
871 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872 		j++;
873 		if (j == CIO2_LOP_ENTRIES) {
874 			i++;
875 			j = 0;
876 		}
877 	}
878 
879 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
880 	return 0;
881 fail:
882 	while (i--)
883 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
884 	return -ENOMEM;
885 }
886 
887 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)888 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889 {
890 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
891 	struct device *dev = &cio2->pci_dev->dev;
892 	struct cio2_queue *q =
893 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
894 	struct cio2_buffer *b = to_cio2_buffer(vb);
895 	struct cio2_fbpt_entry *entry;
896 	unsigned long flags;
897 	unsigned int i, j, next = q->bufs_next;
898 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
899 	u32 fbpt_rp;
900 
901 	dev_dbg(dev, "queue buffer %d\n", vb->index);
902 
903 	/*
904 	 * This code queues the buffer to the CIO2 DMA engine, which starts
905 	 * running once streaming has started. It is possible that this code
906 	 * gets pre-empted due to increased CPU load. Upon this, the driver
907 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
908 	 * engine. When the DMA engine encounters an FBPT entry without the
909 	 * VALID bit set, the DMA engine halts, which requires a restart of
910 	 * the DMA engine and sensor, to continue streaming.
911 	 * This is not desired and is highly unlikely given that there are
912 	 * 32 FBPT entries that the DMA engine needs to process, to run into
913 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
914 	 * by disabling interrupts for the duration of this queueing.
915 	 */
916 	local_irq_save(flags);
917 
918 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
919 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
920 		   & CIO2_CDMARI_FBPT_RP_MASK;
921 
922 	/*
923 	 * fbpt_rp is the fbpt entry that the dma is currently working
924 	 * on, but since it could jump to next entry at any time,
925 	 * assume that we might already be there.
926 	 */
927 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928 
929 	if (bufs_queued <= 1 || fbpt_rp == next)
930 		/* Buffers were drained */
931 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932 
933 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934 		/*
935 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
936 		 * hw, the user has requested N buffer queue. The driver
937 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
938 		 * user queues a buffer, there necessarily is a free buffer.
939 		 */
940 		if (!q->bufs[next]) {
941 			q->bufs[next] = b;
942 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
943 			cio2_fbpt_entry_init_buf(cio2, b, entry);
944 			local_irq_restore(flags);
945 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
946 			for (j = 0; j < vb->num_planes; j++)
947 				vb2_set_plane_payload(vb, j,
948 					q->format.plane_fmt[j].sizeimage);
949 			return;
950 		}
951 
952 		dev_dbg(dev, "entry %i was full!\n", next);
953 		next = (next + 1) % CIO2_MAX_BUFFERS;
954 	}
955 
956 	local_irq_restore(flags);
957 	dev_err(dev, "error: all cio2 entries were full!\n");
958 	atomic_dec(&q->bufs_queued);
959 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
960 }
961 
962 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)963 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964 {
965 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
966 	struct device *dev = &cio2->pci_dev->dev;
967 	struct cio2_buffer *b = to_cio2_buffer(vb);
968 	unsigned int i;
969 
970 	/* Free LOP table */
971 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
972 		if (b->lop[i])
973 			dma_free_coherent(dev, PAGE_SIZE,
974 					  b->lop[i], b->lop_bus_addr[i]);
975 	}
976 }
977 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)978 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979 {
980 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
981 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
982 	struct device *dev = &cio2->pci_dev->dev;
983 	int r;
984 
985 	cio2->cur_queue = q;
986 	atomic_set(&q->frame_sequence, 0);
987 
988 	r = pm_runtime_resume_and_get(dev);
989 	if (r < 0) {
990 		dev_info(dev, "failed to set power %d\n", r);
991 		return r;
992 	}
993 
994 	r = video_device_pipeline_start(&q->vdev, &q->pipe);
995 	if (r)
996 		goto fail_pipeline;
997 
998 	r = cio2_hw_init(cio2, q);
999 	if (r)
1000 		goto fail_hw;
1001 
1002 	/* Start streaming on sensor */
1003 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004 	if (r)
1005 		goto fail_csi2_subdev;
1006 
1007 	cio2->streaming = true;
1008 
1009 	return 0;
1010 
1011 fail_csi2_subdev:
1012 	cio2_hw_exit(cio2, q);
1013 fail_hw:
1014 	video_device_pipeline_stop(&q->vdev);
1015 fail_pipeline:
1016 	dev_dbg(dev, "failed to start streaming (%d)\n", r);
1017 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1018 	pm_runtime_put(dev);
1019 
1020 	return r;
1021 }
1022 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1023 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024 {
1025 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1026 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 	struct device *dev = &cio2->pci_dev->dev;
1028 
1029 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1030 		dev_err(dev, "failed to stop sensor streaming\n");
1031 
1032 	cio2_hw_exit(cio2, q);
1033 	synchronize_irq(cio2->pci_dev->irq);
1034 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1035 	video_device_pipeline_stop(&q->vdev);
1036 	pm_runtime_put(dev);
1037 	cio2->streaming = false;
1038 }
1039 
1040 static const struct vb2_ops cio2_vb2_ops = {
1041 	.buf_init = cio2_vb2_buf_init,
1042 	.buf_queue = cio2_vb2_buf_queue,
1043 	.buf_cleanup = cio2_vb2_buf_cleanup,
1044 	.queue_setup = cio2_vb2_queue_setup,
1045 	.start_streaming = cio2_vb2_start_streaming,
1046 	.stop_streaming = cio2_vb2_stop_streaming,
1047 	.wait_prepare = vb2_ops_wait_prepare,
1048 	.wait_finish = vb2_ops_wait_finish,
1049 };
1050 
1051 /**************** V4L2 interface ****************/
1052 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1053 static int cio2_v4l2_querycap(struct file *file, void *fh,
1054 			      struct v4l2_capability *cap)
1055 {
1056 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1057 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1058 
1059 	return 0;
1060 }
1061 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1062 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1063 			      struct v4l2_fmtdesc *f)
1064 {
1065 	if (f->index >= ARRAY_SIZE(formats))
1066 		return -EINVAL;
1067 
1068 	f->pixelformat = formats[f->index].fourcc;
1069 
1070 	return 0;
1071 }
1072 
1073 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1074 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1075 {
1076 	struct cio2_queue *q = file_to_cio2_queue(file);
1077 
1078 	f->fmt.pix_mp = q->format;
1079 
1080 	return 0;
1081 }
1082 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1083 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1084 {
1085 	const struct ipu3_cio2_fmt *fmt;
1086 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1087 
1088 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1089 	if (!fmt)
1090 		fmt = &formats[0];
1091 
1092 	/* Only supports up to 4224x3136 */
1093 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1094 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1095 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1096 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1097 
1098 	mpix->num_planes = 1;
1099 	mpix->pixelformat = fmt->fourcc;
1100 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1101 	mpix->field = V4L2_FIELD_NONE;
1102 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1103 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1104 							mpix->height;
1105 
1106 	/* use default */
1107 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1108 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1109 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1110 
1111 	return 0;
1112 }
1113 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1114 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1115 {
1116 	struct cio2_queue *q = file_to_cio2_queue(file);
1117 
1118 	cio2_v4l2_try_fmt(file, fh, f);
1119 	q->format = f->fmt.pix_mp;
1120 
1121 	return 0;
1122 }
1123 
1124 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1125 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1126 {
1127 	if (input->index > 0)
1128 		return -EINVAL;
1129 
1130 	strscpy(input->name, "camera", sizeof(input->name));
1131 	input->type = V4L2_INPUT_TYPE_CAMERA;
1132 
1133 	return 0;
1134 }
1135 
1136 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1137 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1138 {
1139 	*input = 0;
1140 
1141 	return 0;
1142 }
1143 
1144 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1145 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1146 {
1147 	return input == 0 ? 0 : -EINVAL;
1148 }
1149 
1150 static const struct v4l2_file_operations cio2_v4l2_fops = {
1151 	.owner = THIS_MODULE,
1152 	.unlocked_ioctl = video_ioctl2,
1153 	.open = v4l2_fh_open,
1154 	.release = vb2_fop_release,
1155 	.poll = vb2_fop_poll,
1156 	.mmap = vb2_fop_mmap,
1157 };
1158 
1159 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1160 	.vidioc_querycap = cio2_v4l2_querycap,
1161 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1162 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1163 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1164 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1165 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1166 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1167 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1168 	.vidioc_querybuf = vb2_ioctl_querybuf,
1169 	.vidioc_qbuf = vb2_ioctl_qbuf,
1170 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1171 	.vidioc_streamon = vb2_ioctl_streamon,
1172 	.vidioc_streamoff = vb2_ioctl_streamoff,
1173 	.vidioc_expbuf = vb2_ioctl_expbuf,
1174 	.vidioc_enum_input = cio2_video_enum_input,
1175 	.vidioc_g_input	= cio2_video_g_input,
1176 	.vidioc_s_input	= cio2_video_s_input,
1177 };
1178 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1179 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1180 				       struct v4l2_fh *fh,
1181 				       struct v4l2_event_subscription *sub)
1182 {
1183 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1184 		return -EINVAL;
1185 
1186 	/* Line number. For now only zero accepted. */
1187 	if (sub->id != 0)
1188 		return -EINVAL;
1189 
1190 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1191 }
1192 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1193 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1194 {
1195 	struct v4l2_mbus_framefmt *format;
1196 	const struct v4l2_mbus_framefmt fmt_default = {
1197 		.width = 1936,
1198 		.height = 1096,
1199 		.code = formats[0].mbus_code,
1200 		.field = V4L2_FIELD_NONE,
1201 		.colorspace = V4L2_COLORSPACE_RAW,
1202 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1203 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1204 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1205 	};
1206 
1207 	/* Initialize try_fmt */
1208 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1209 	*format = fmt_default;
1210 
1211 	/* same as sink */
1212 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1213 	*format = fmt_default;
1214 
1215 	return 0;
1216 }
1217 
1218 /*
1219  * cio2_subdev_get_fmt - Handle get format by pads subdev method
1220  * @sd : pointer to v4l2 subdev structure
1221  * @cfg: V4L2 subdev pad config
1222  * @fmt: pointer to v4l2 subdev format structure
1223  * return -EINVAL or zero on success
1224  */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1225 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1226 			       struct v4l2_subdev_state *sd_state,
1227 			       struct v4l2_subdev_format *fmt)
1228 {
1229 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1230 
1231 	mutex_lock(&q->subdev_lock);
1232 
1233 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1234 		fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1235 							  fmt->pad);
1236 	else
1237 		fmt->format = q->subdev_fmt;
1238 
1239 	mutex_unlock(&q->subdev_lock);
1240 
1241 	return 0;
1242 }
1243 
1244 /*
1245  * cio2_subdev_set_fmt - Handle set format by pads subdev method
1246  * @sd : pointer to v4l2 subdev structure
1247  * @cfg: V4L2 subdev pad config
1248  * @fmt: pointer to v4l2 subdev format structure
1249  * return -EINVAL or zero on success
1250  */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1251 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1252 			       struct v4l2_subdev_state *sd_state,
1253 			       struct v4l2_subdev_format *fmt)
1254 {
1255 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1256 	struct v4l2_mbus_framefmt *mbus;
1257 	u32 mbus_code = fmt->format.code;
1258 	unsigned int i;
1259 
1260 	/*
1261 	 * Only allow setting sink pad format;
1262 	 * source always propagates from sink
1263 	 */
1264 	if (fmt->pad == CIO2_PAD_SOURCE)
1265 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1266 
1267 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1268 		mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1269 	else
1270 		mbus = &q->subdev_fmt;
1271 
1272 	fmt->format.code = formats[0].mbus_code;
1273 
1274 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1275 		if (formats[i].mbus_code == mbus_code) {
1276 			fmt->format.code = mbus_code;
1277 			break;
1278 		}
1279 	}
1280 
1281 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1282 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1283 	fmt->format.field = V4L2_FIELD_NONE;
1284 
1285 	mutex_lock(&q->subdev_lock);
1286 	*mbus = fmt->format;
1287 	mutex_unlock(&q->subdev_lock);
1288 
1289 	return 0;
1290 }
1291 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1292 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1293 				      struct v4l2_subdev_state *sd_state,
1294 				      struct v4l2_subdev_mbus_code_enum *code)
1295 {
1296 	if (code->index >= ARRAY_SIZE(formats))
1297 		return -EINVAL;
1298 
1299 	code->code = formats[code->index].mbus_code;
1300 	return 0;
1301 }
1302 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1303 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1304 						struct v4l2_subdev_format *fmt)
1305 {
1306 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1307 		struct v4l2_subdev *sd =
1308 			media_entity_to_v4l2_subdev(pad->entity);
1309 
1310 		memset(fmt, 0, sizeof(*fmt));
1311 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1312 		fmt->pad = pad->index;
1313 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1314 	}
1315 
1316 	return -EINVAL;
1317 }
1318 
cio2_video_link_validate(struct media_link * link)1319 static int cio2_video_link_validate(struct media_link *link)
1320 {
1321 	struct media_entity *entity = link->sink->entity;
1322 	struct video_device *vd = media_entity_to_video_device(entity);
1323 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1324 	struct cio2_device *cio2 = video_get_drvdata(vd);
1325 	struct device *dev = &cio2->pci_dev->dev;
1326 	struct v4l2_subdev_format source_fmt;
1327 	int ret;
1328 
1329 	if (!media_pad_remote_pad_first(entity->pads)) {
1330 		dev_info(dev, "video node %s pad not connected\n", vd->name);
1331 		return -ENOTCONN;
1332 	}
1333 
1334 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1335 	if (ret < 0)
1336 		return 0;
1337 
1338 	if (source_fmt.format.width != q->format.width ||
1339 	    source_fmt.format.height != q->format.height) {
1340 		dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1341 			q->format.width, q->format.height,
1342 			source_fmt.format.width, source_fmt.format.height);
1343 		return -EINVAL;
1344 	}
1345 
1346 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1347 		return -EINVAL;
1348 
1349 	return 0;
1350 }
1351 
1352 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1353 	.subscribe_event = cio2_subdev_subscribe_event,
1354 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1355 };
1356 
1357 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1358 	.open = cio2_subdev_open,
1359 };
1360 
1361 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1362 	.link_validate = v4l2_subdev_link_validate_default,
1363 	.get_fmt = cio2_subdev_get_fmt,
1364 	.set_fmt = cio2_subdev_set_fmt,
1365 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1366 };
1367 
1368 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1369 	.core = &cio2_subdev_core_ops,
1370 	.pad = &cio2_subdev_pad_ops,
1371 };
1372 
1373 /******* V4L2 sub-device asynchronous registration callbacks***********/
1374 
1375 struct sensor_async_subdev {
1376 	struct v4l2_async_connection asd;
1377 	struct csi2_bus_info csi2;
1378 };
1379 
1380 #define to_sensor_asd(__asd)	\
1381 	container_of_const(__asd, struct sensor_async_subdev, asd)
1382 
1383 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1384 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1385 			       struct v4l2_subdev *sd,
1386 			       struct v4l2_async_connection *asd)
1387 {
1388 	struct cio2_device *cio2 = to_cio2_device(notifier);
1389 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1390 	struct cio2_queue *q;
1391 	int ret;
1392 
1393 	if (cio2->queue[s_asd->csi2.port].sensor)
1394 		return -EBUSY;
1395 
1396 	ret = ipu_bridge_instantiate_vcm(sd->dev);
1397 	if (ret)
1398 		return ret;
1399 
1400 	q = &cio2->queue[s_asd->csi2.port];
1401 
1402 	q->csi2 = s_asd->csi2;
1403 	q->sensor = sd;
1404 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1405 
1406 	return 0;
1407 }
1408 
1409 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1410 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1411 				 struct v4l2_subdev *sd,
1412 				 struct v4l2_async_connection *asd)
1413 {
1414 	struct cio2_device *cio2 = to_cio2_device(notifier);
1415 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1416 
1417 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1418 }
1419 
1420 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1421 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1422 {
1423 	struct cio2_device *cio2 = to_cio2_device(notifier);
1424 	struct device *dev = &cio2->pci_dev->dev;
1425 	struct sensor_async_subdev *s_asd;
1426 	struct v4l2_async_connection *asd;
1427 	struct cio2_queue *q;
1428 	int ret;
1429 
1430 	list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1431 		s_asd = to_sensor_asd(asd);
1432 		q = &cio2->queue[s_asd->csi2.port];
1433 
1434 		ret = media_entity_get_fwnode_pad(&q->sensor->entity,
1435 						  s_asd->asd.match.fwnode,
1436 						  MEDIA_PAD_FL_SOURCE);
1437 		if (ret < 0) {
1438 			dev_err(dev, "no pad for endpoint %pfw (%d)\n",
1439 				s_asd->asd.match.fwnode, ret);
1440 			return ret;
1441 		}
1442 
1443 		ret = media_create_pad_link(&q->sensor->entity, ret,
1444 					    &q->subdev.entity, CIO2_PAD_SINK,
1445 					    0);
1446 		if (ret) {
1447 			dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
1448 				q->sensor->name, s_asd->asd.match.fwnode, ret);
1449 			return ret;
1450 		}
1451 	}
1452 
1453 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1454 }
1455 
1456 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1457 	.bound = cio2_notifier_bound,
1458 	.unbind = cio2_notifier_unbind,
1459 	.complete = cio2_notifier_complete,
1460 };
1461 
cio2_parse_firmware(struct cio2_device * cio2)1462 static int cio2_parse_firmware(struct cio2_device *cio2)
1463 {
1464 	struct device *dev = &cio2->pci_dev->dev;
1465 	unsigned int i;
1466 	int ret;
1467 
1468 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1469 		struct v4l2_fwnode_endpoint vep = {
1470 			.bus_type = V4L2_MBUS_CSI2_DPHY
1471 		};
1472 		struct sensor_async_subdev *s_asd;
1473 		struct fwnode_handle *ep;
1474 
1475 		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1476 						FWNODE_GRAPH_ENDPOINT_NEXT);
1477 		if (!ep)
1478 			continue;
1479 
1480 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1481 		if (ret)
1482 			goto err_parse;
1483 
1484 		s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1485 							struct
1486 							sensor_async_subdev);
1487 		if (IS_ERR(s_asd)) {
1488 			ret = PTR_ERR(s_asd);
1489 			goto err_parse;
1490 		}
1491 
1492 		s_asd->csi2.port = vep.base.port;
1493 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1494 
1495 		fwnode_handle_put(ep);
1496 
1497 		continue;
1498 
1499 err_parse:
1500 		fwnode_handle_put(ep);
1501 		return ret;
1502 	}
1503 
1504 	/*
1505 	 * Proceed even without sensors connected to allow the device to
1506 	 * suspend.
1507 	 */
1508 	cio2->notifier.ops = &cio2_async_ops;
1509 	ret = v4l2_async_nf_register(&cio2->notifier);
1510 	if (ret)
1511 		dev_err(dev, "failed to register async notifier : %d\n", ret);
1512 
1513 	return ret;
1514 }
1515 
1516 /**************** Queue initialization ****************/
1517 static const struct media_entity_operations cio2_media_ops = {
1518 	.link_validate = v4l2_subdev_link_validate,
1519 };
1520 
1521 static const struct media_entity_operations cio2_video_entity_ops = {
1522 	.link_validate = cio2_video_link_validate,
1523 };
1524 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1525 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1526 {
1527 	static const u32 default_width = 1936;
1528 	static const u32 default_height = 1096;
1529 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1530 	struct device *dev = &cio2->pci_dev->dev;
1531 	struct video_device *vdev = &q->vdev;
1532 	struct vb2_queue *vbq = &q->vbq;
1533 	struct v4l2_subdev *subdev = &q->subdev;
1534 	struct v4l2_mbus_framefmt *fmt;
1535 	int r;
1536 
1537 	/* Initialize miscellaneous variables */
1538 	mutex_init(&q->lock);
1539 	mutex_init(&q->subdev_lock);
1540 
1541 	/* Initialize formats to default values */
1542 	fmt = &q->subdev_fmt;
1543 	fmt->width = default_width;
1544 	fmt->height = default_height;
1545 	fmt->code = dflt_fmt.mbus_code;
1546 	fmt->field = V4L2_FIELD_NONE;
1547 
1548 	q->format.width = default_width;
1549 	q->format.height = default_height;
1550 	q->format.pixelformat = dflt_fmt.fourcc;
1551 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1552 	q->format.field = V4L2_FIELD_NONE;
1553 	q->format.num_planes = 1;
1554 	q->format.plane_fmt[0].bytesperline =
1555 				cio2_bytesperline(q->format.width);
1556 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1557 						q->format.height;
1558 
1559 	/* Initialize fbpt */
1560 	r = cio2_fbpt_init(cio2, q);
1561 	if (r)
1562 		goto fail_fbpt;
1563 
1564 	/* Initialize media entities */
1565 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1566 		MEDIA_PAD_FL_MUST_CONNECT;
1567 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1568 	subdev->entity.ops = &cio2_media_ops;
1569 	subdev->internal_ops = &cio2_subdev_internal_ops;
1570 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1571 	if (r) {
1572 		dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1573 		goto fail_subdev_media_entity;
1574 	}
1575 
1576 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1577 	vdev->entity.ops = &cio2_video_entity_ops;
1578 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1579 	if (r) {
1580 		dev_err(dev, "failed initialize videodev media entity (%d)\n",
1581 			r);
1582 		goto fail_vdev_media_entity;
1583 	}
1584 
1585 	/* Initialize subdev */
1586 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1587 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1588 	subdev->owner = THIS_MODULE;
1589 	snprintf(subdev->name, sizeof(subdev->name),
1590 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1591 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1592 	v4l2_set_subdevdata(subdev, cio2);
1593 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1594 	if (r) {
1595 		dev_err(dev, "failed initialize subdev (%d)\n", r);
1596 		goto fail_subdev;
1597 	}
1598 
1599 	/* Initialize vbq */
1600 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1601 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1602 	vbq->ops = &cio2_vb2_ops;
1603 	vbq->mem_ops = &vb2_dma_sg_memops;
1604 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1605 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1606 	vbq->min_buffers_needed = 1;
1607 	vbq->drv_priv = cio2;
1608 	vbq->lock = &q->lock;
1609 	r = vb2_queue_init(vbq);
1610 	if (r) {
1611 		dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1612 		goto fail_subdev;
1613 	}
1614 
1615 	/* Initialize vdev */
1616 	snprintf(vdev->name, sizeof(vdev->name),
1617 		 "%s %td", CIO2_NAME, q - cio2->queue);
1618 	vdev->release = video_device_release_empty;
1619 	vdev->fops = &cio2_v4l2_fops;
1620 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1621 	vdev->lock = &cio2->lock;
1622 	vdev->v4l2_dev = &cio2->v4l2_dev;
1623 	vdev->queue = &q->vbq;
1624 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1625 	video_set_drvdata(vdev, cio2);
1626 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1627 	if (r) {
1628 		dev_err(dev, "failed to register video device (%d)\n", r);
1629 		goto fail_vdev;
1630 	}
1631 
1632 	/* Create link from CIO2 subdev to output node */
1633 	r = media_create_pad_link(
1634 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1635 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1636 	if (r)
1637 		goto fail_link;
1638 
1639 	return 0;
1640 
1641 fail_link:
1642 	vb2_video_unregister_device(&q->vdev);
1643 fail_vdev:
1644 	v4l2_device_unregister_subdev(subdev);
1645 fail_subdev:
1646 	media_entity_cleanup(&vdev->entity);
1647 fail_vdev_media_entity:
1648 	media_entity_cleanup(&subdev->entity);
1649 fail_subdev_media_entity:
1650 	cio2_fbpt_exit(q, dev);
1651 fail_fbpt:
1652 	mutex_destroy(&q->subdev_lock);
1653 	mutex_destroy(&q->lock);
1654 
1655 	return r;
1656 }
1657 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1658 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1659 {
1660 	vb2_video_unregister_device(&q->vdev);
1661 	media_entity_cleanup(&q->vdev.entity);
1662 	v4l2_device_unregister_subdev(&q->subdev);
1663 	media_entity_cleanup(&q->subdev.entity);
1664 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1665 	mutex_destroy(&q->subdev_lock);
1666 	mutex_destroy(&q->lock);
1667 }
1668 
cio2_queues_init(struct cio2_device * cio2)1669 static int cio2_queues_init(struct cio2_device *cio2)
1670 {
1671 	int i, r;
1672 
1673 	for (i = 0; i < CIO2_QUEUES; i++) {
1674 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1675 		if (r)
1676 			break;
1677 	}
1678 
1679 	if (i == CIO2_QUEUES)
1680 		return 0;
1681 
1682 	for (i--; i >= 0; i--)
1683 		cio2_queue_exit(cio2, &cio2->queue[i]);
1684 
1685 	return r;
1686 }
1687 
cio2_queues_exit(struct cio2_device * cio2)1688 static void cio2_queues_exit(struct cio2_device *cio2)
1689 {
1690 	unsigned int i;
1691 
1692 	for (i = 0; i < CIO2_QUEUES; i++)
1693 		cio2_queue_exit(cio2, &cio2->queue[i]);
1694 }
1695 
cio2_check_fwnode_graph(struct fwnode_handle * fwnode)1696 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1697 {
1698 	struct fwnode_handle *endpoint;
1699 
1700 	if (IS_ERR_OR_NULL(fwnode))
1701 		return -EINVAL;
1702 
1703 	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1704 	if (endpoint) {
1705 		fwnode_handle_put(endpoint);
1706 		return 0;
1707 	}
1708 
1709 	return cio2_check_fwnode_graph(fwnode->secondary);
1710 }
1711 
1712 /**************** PCI interface ****************/
1713 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1714 static int cio2_pci_probe(struct pci_dev *pci_dev,
1715 			  const struct pci_device_id *id)
1716 {
1717 	struct device *dev = &pci_dev->dev;
1718 	struct fwnode_handle *fwnode = dev_fwnode(dev);
1719 	struct cio2_device *cio2;
1720 	int r;
1721 
1722 	/*
1723 	 * On some platforms no connections to sensors are defined in firmware,
1724 	 * if the device has no endpoints then we can try to build those as
1725 	 * software_nodes parsed from SSDB.
1726 	 */
1727 	r = cio2_check_fwnode_graph(fwnode);
1728 	if (r) {
1729 		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1730 			dev_err(dev, "fwnode graph has no endpoints connected\n");
1731 			return -EINVAL;
1732 		}
1733 
1734 		r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1735 		if (r)
1736 			return r;
1737 	}
1738 
1739 	cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1740 	if (!cio2)
1741 		return -ENOMEM;
1742 	cio2->pci_dev = pci_dev;
1743 
1744 	r = pcim_enable_device(pci_dev);
1745 	if (r) {
1746 		dev_err(dev, "failed to enable device (%d)\n", r);
1747 		return r;
1748 	}
1749 
1750 	dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1751 		 pci_dev->device, pci_dev->revision);
1752 
1753 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1754 	if (r) {
1755 		dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1756 		return -ENODEV;
1757 	}
1758 
1759 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1760 
1761 	pci_set_drvdata(pci_dev, cio2);
1762 
1763 	pci_set_master(pci_dev);
1764 
1765 	r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1766 	if (r) {
1767 		dev_err(dev, "failed to set DMA mask (%d)\n", r);
1768 		return -ENODEV;
1769 	}
1770 
1771 	r = pci_enable_msi(pci_dev);
1772 	if (r) {
1773 		dev_err(dev, "failed to enable MSI (%d)\n", r);
1774 		return r;
1775 	}
1776 
1777 	r = cio2_fbpt_init_dummy(cio2);
1778 	if (r)
1779 		return r;
1780 
1781 	mutex_init(&cio2->lock);
1782 
1783 	cio2->media_dev.dev = dev;
1784 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785 		sizeof(cio2->media_dev.model));
1786 	cio2->media_dev.hw_revision = 0;
1787 
1788 	media_device_init(&cio2->media_dev);
1789 	r = media_device_register(&cio2->media_dev);
1790 	if (r < 0)
1791 		goto fail_mutex_destroy;
1792 
1793 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1794 	r = v4l2_device_register(dev, &cio2->v4l2_dev);
1795 	if (r) {
1796 		dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1797 		goto fail_media_device_unregister;
1798 	}
1799 
1800 	r = cio2_queues_init(cio2);
1801 	if (r)
1802 		goto fail_v4l2_device_unregister;
1803 
1804 	v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1805 
1806 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1807 			     CIO2_NAME, cio2);
1808 	if (r) {
1809 		dev_err(dev, "failed to request IRQ (%d)\n", r);
1810 		goto fail_clean_notifier;
1811 	}
1812 
1813 	/* Register notifier for subdevices we care */
1814 	r = cio2_parse_firmware(cio2);
1815 	if (r)
1816 		goto fail_clean_notifier;
1817 
1818 	pm_runtime_put_noidle(dev);
1819 	pm_runtime_allow(dev);
1820 
1821 	return 0;
1822 
1823 fail_clean_notifier:
1824 	v4l2_async_nf_unregister(&cio2->notifier);
1825 	v4l2_async_nf_cleanup(&cio2->notifier);
1826 	cio2_queues_exit(cio2);
1827 fail_v4l2_device_unregister:
1828 	v4l2_device_unregister(&cio2->v4l2_dev);
1829 fail_media_device_unregister:
1830 	media_device_unregister(&cio2->media_dev);
1831 	media_device_cleanup(&cio2->media_dev);
1832 fail_mutex_destroy:
1833 	mutex_destroy(&cio2->lock);
1834 	cio2_fbpt_exit_dummy(cio2);
1835 
1836 	return r;
1837 }
1838 
cio2_pci_remove(struct pci_dev * pci_dev)1839 static void cio2_pci_remove(struct pci_dev *pci_dev)
1840 {
1841 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1842 
1843 	media_device_unregister(&cio2->media_dev);
1844 	v4l2_async_nf_unregister(&cio2->notifier);
1845 	v4l2_async_nf_cleanup(&cio2->notifier);
1846 	cio2_queues_exit(cio2);
1847 	cio2_fbpt_exit_dummy(cio2);
1848 	v4l2_device_unregister(&cio2->v4l2_dev);
1849 	media_device_cleanup(&cio2->media_dev);
1850 	mutex_destroy(&cio2->lock);
1851 
1852 	pm_runtime_forbid(&pci_dev->dev);
1853 	pm_runtime_get_noresume(&pci_dev->dev);
1854 }
1855 
cio2_runtime_suspend(struct device * dev)1856 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1857 {
1858 	struct pci_dev *pci_dev = to_pci_dev(dev);
1859 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860 	void __iomem *const base = cio2->base;
1861 	u16 pm;
1862 
1863 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1864 	dev_dbg(dev, "cio2 runtime suspend.\n");
1865 
1866 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1867 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1868 	pm |= CIO2_PMCSR_D3;
1869 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1870 
1871 	return 0;
1872 }
1873 
cio2_runtime_resume(struct device * dev)1874 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1875 {
1876 	struct pci_dev *pci_dev = to_pci_dev(dev);
1877 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1878 	void __iomem *const base = cio2->base;
1879 	u16 pm;
1880 
1881 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1882 	dev_dbg(dev, "cio2 runtime resume.\n");
1883 
1884 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1885 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1886 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1887 
1888 	return 0;
1889 }
1890 
1891 /*
1892  * Helper function to advance all the elements of a circular buffer by "start"
1893  * positions
1894  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1895 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1896 {
1897 	struct {
1898 		size_t begin, end;
1899 	} arr[2] = {
1900 		{ 0, start - 1 },
1901 		{ start, elems - 1 },
1902 	};
1903 
1904 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1905 
1906 	/* Loop as long as we have out-of-place entries */
1907 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1908 		size_t size0, i;
1909 
1910 		/*
1911 		 * Find the number of entries that can be arranged on this
1912 		 * iteration.
1913 		 */
1914 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1915 
1916 		/* Swap the entries in two parts of the array. */
1917 		for (i = 0; i < size0; i++) {
1918 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1919 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1920 			size_t j;
1921 
1922 			for (j = 0; j < elem_size; j++)
1923 				swap(d[j], s[j]);
1924 		}
1925 
1926 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1927 			/* The end of the first array remains unarranged. */
1928 			arr[0].begin += size0;
1929 		} else {
1930 			/*
1931 			 * The first array is fully arranged so we proceed
1932 			 * handling the next one.
1933 			 */
1934 			arr[0].begin = arr[1].begin;
1935 			arr[0].end = arr[1].begin + size0 - 1;
1936 			arr[1].begin += size0;
1937 		}
1938 	}
1939 }
1940 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1941 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1942 {
1943 	unsigned int i, j;
1944 
1945 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1946 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1947 		if (q->bufs[j])
1948 			break;
1949 
1950 	if (i == CIO2_MAX_BUFFERS)
1951 		return;
1952 
1953 	if (j) {
1954 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1955 			CIO2_MAX_BUFFERS, j);
1956 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1957 			CIO2_MAX_BUFFERS, j);
1958 	}
1959 
1960 	/*
1961 	 * DMA clears the valid bit when accessing the buffer.
1962 	 * When stopping stream in suspend callback, some of the buffers
1963 	 * may be in invalid state. After resume, when DMA meets the invalid
1964 	 * buffer, it will halt and stop receiving new data.
1965 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1966 	 */
1967 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1968 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1969 }
1970 
cio2_suspend(struct device * dev)1971 static int __maybe_unused cio2_suspend(struct device *dev)
1972 {
1973 	struct pci_dev *pci_dev = to_pci_dev(dev);
1974 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1975 	struct cio2_queue *q = cio2->cur_queue;
1976 	int r;
1977 
1978 	dev_dbg(dev, "cio2 suspend\n");
1979 	if (!cio2->streaming)
1980 		return 0;
1981 
1982 	/* Stop stream */
1983 	r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1984 	if (r) {
1985 		dev_err(dev, "failed to stop sensor streaming\n");
1986 		return r;
1987 	}
1988 
1989 	cio2_hw_exit(cio2, q);
1990 	synchronize_irq(pci_dev->irq);
1991 
1992 	pm_runtime_force_suspend(dev);
1993 
1994 	/*
1995 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1996 	 * so relocate the queued buffs to the fbpt head before suspend.
1997 	 */
1998 	cio2_fbpt_rearrange(cio2, q);
1999 	q->bufs_first = 0;
2000 	q->bufs_next = 0;
2001 
2002 	return 0;
2003 }
2004 
cio2_resume(struct device * dev)2005 static int __maybe_unused cio2_resume(struct device *dev)
2006 {
2007 	struct cio2_device *cio2 = dev_get_drvdata(dev);
2008 	struct cio2_queue *q = cio2->cur_queue;
2009 	int r;
2010 
2011 	dev_dbg(dev, "cio2 resume\n");
2012 	if (!cio2->streaming)
2013 		return 0;
2014 	/* Start stream */
2015 	r = pm_runtime_force_resume(dev);
2016 	if (r < 0) {
2017 		dev_err(dev, "failed to set power %d\n", r);
2018 		return r;
2019 	}
2020 
2021 	r = cio2_hw_init(cio2, q);
2022 	if (r) {
2023 		dev_err(dev, "fail to init cio2 hw\n");
2024 		return r;
2025 	}
2026 
2027 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2028 	if (r) {
2029 		dev_err(dev, "fail to start sensor streaming\n");
2030 		cio2_hw_exit(cio2, q);
2031 	}
2032 
2033 	return r;
2034 }
2035 
2036 static const struct dev_pm_ops cio2_pm_ops = {
2037 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2038 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2039 };
2040 
2041 static const struct pci_device_id cio2_pci_id_table[] = {
2042 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2043 	{ }
2044 };
2045 
2046 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2047 
2048 static struct pci_driver cio2_pci_driver = {
2049 	.name = CIO2_NAME,
2050 	.id_table = cio2_pci_id_table,
2051 	.probe = cio2_pci_probe,
2052 	.remove = cio2_pci_remove,
2053 	.driver = {
2054 		.pm = &cio2_pm_ops,
2055 	},
2056 };
2057 
2058 module_pci_driver(cio2_pci_driver);
2059 
2060 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2061 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2062 MODULE_AUTHOR("Jian Xu Zheng");
2063 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2064 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2065 MODULE_LICENSE("GPL v2");
2066 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2067 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
2068