xref: /openbmc/linux/drivers/media/pci/intel/ipu3/ipu3-cio2.c (revision b181f7029bd71238ac2754ce7052dffd69432085)
1198109eaSBingbu Cao // SPDX-License-Identifier: GPL-2.0
2198109eaSBingbu Cao /*
3198109eaSBingbu Cao  * Copyright (C) 2017,2020 Intel Corporation
4198109eaSBingbu Cao  *
5198109eaSBingbu Cao  * Based partially on Intel IPU4 driver written by
6198109eaSBingbu Cao  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7198109eaSBingbu Cao  *  Samu Onkalo <samu.onkalo@intel.com>
8198109eaSBingbu Cao  *  Jouni Högander <jouni.hogander@intel.com>
9198109eaSBingbu Cao  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
10198109eaSBingbu Cao  *  Antti Laakso <antti.laakso@intel.com>
11198109eaSBingbu Cao  * et al.
12198109eaSBingbu Cao  */
13198109eaSBingbu Cao 
14198109eaSBingbu Cao #include <linux/bitops.h>
15198109eaSBingbu Cao #include <linux/delay.h>
16198109eaSBingbu Cao #include <linux/interrupt.h>
17198109eaSBingbu Cao #include <linux/iopoll.h>
18198109eaSBingbu Cao #include <linux/mm.h>
19198109eaSBingbu Cao #include <linux/module.h>
20198109eaSBingbu Cao #include <linux/pci.h>
21198109eaSBingbu Cao #include <linux/pfn.h>
22198109eaSBingbu Cao #include <linux/pm_runtime.h>
23198109eaSBingbu Cao #include <linux/property.h>
24198109eaSBingbu Cao #include <linux/vmalloc.h>
2521fabfb1SHans de Goede 
2621fabfb1SHans de Goede #include <media/ipu-bridge.h>
27198109eaSBingbu Cao #include <media/v4l2-ctrls.h>
28198109eaSBingbu Cao #include <media/v4l2-device.h>
29198109eaSBingbu Cao #include <media/v4l2-event.h>
30198109eaSBingbu Cao #include <media/v4l2-fwnode.h>
31198109eaSBingbu Cao #include <media/v4l2-ioctl.h>
32198109eaSBingbu Cao #include <media/videobuf2-dma-sg.h>
33198109eaSBingbu Cao 
34198109eaSBingbu Cao #include "ipu3-cio2.h"
35198109eaSBingbu Cao 
36198109eaSBingbu Cao struct ipu3_cio2_fmt {
37198109eaSBingbu Cao 	u32 mbus_code;
38198109eaSBingbu Cao 	u32 fourcc;
39198109eaSBingbu Cao 	u8 mipicode;
40198109eaSBingbu Cao 	u8 bpp;
41198109eaSBingbu Cao };
42198109eaSBingbu Cao 
43198109eaSBingbu Cao /*
44198109eaSBingbu Cao  * These are raw formats used in Intel's third generation of
45198109eaSBingbu Cao  * Image Processing Unit known as IPU3.
46198109eaSBingbu Cao  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
47198109eaSBingbu Cao  * last LSB 6 bits unused.
48198109eaSBingbu Cao  */
49198109eaSBingbu Cao static const struct ipu3_cio2_fmt formats[] = {
50198109eaSBingbu Cao 	{	/* put default entry at beginning */
51198109eaSBingbu Cao 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
52198109eaSBingbu Cao 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
53198109eaSBingbu Cao 		.mipicode	= 0x2b,
54198109eaSBingbu Cao 		.bpp		= 10,
55198109eaSBingbu Cao 	}, {
56198109eaSBingbu Cao 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
57198109eaSBingbu Cao 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
58198109eaSBingbu Cao 		.mipicode	= 0x2b,
59198109eaSBingbu Cao 		.bpp		= 10,
60198109eaSBingbu Cao 	}, {
61198109eaSBingbu Cao 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
62198109eaSBingbu Cao 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
63198109eaSBingbu Cao 		.mipicode	= 0x2b,
64198109eaSBingbu Cao 		.bpp		= 10,
65198109eaSBingbu Cao 	}, {
66198109eaSBingbu Cao 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
67198109eaSBingbu Cao 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
68198109eaSBingbu Cao 		.mipicode	= 0x2b,
69198109eaSBingbu Cao 		.bpp		= 10,
70198109eaSBingbu Cao 	}, {
71198109eaSBingbu Cao 		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
72198109eaSBingbu Cao 		.fourcc		= V4L2_PIX_FMT_IPU3_Y10,
73198109eaSBingbu Cao 		.mipicode	= 0x2b,
74198109eaSBingbu Cao 		.bpp		= 10,
75198109eaSBingbu Cao 	},
76198109eaSBingbu Cao };
77198109eaSBingbu Cao 
78198109eaSBingbu Cao /*
79198109eaSBingbu Cao  * cio2_find_format - lookup color format by fourcc or/and media bus code
80198109eaSBingbu Cao  * @pixelformat: fourcc to match, ignored if null
81198109eaSBingbu Cao  * @mbus_code: media bus code to match, ignored if null
82198109eaSBingbu Cao  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)83198109eaSBingbu Cao static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
84198109eaSBingbu Cao 						    const u32 *mbus_code)
85198109eaSBingbu Cao {
86198109eaSBingbu Cao 	unsigned int i;
87198109eaSBingbu Cao 
88198109eaSBingbu Cao 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
89198109eaSBingbu Cao 		if (pixelformat && *pixelformat != formats[i].fourcc)
90198109eaSBingbu Cao 			continue;
91198109eaSBingbu Cao 		if (mbus_code && *mbus_code != formats[i].mbus_code)
92198109eaSBingbu Cao 			continue;
93198109eaSBingbu Cao 
94198109eaSBingbu Cao 		return &formats[i];
95198109eaSBingbu Cao 	}
96198109eaSBingbu Cao 
97198109eaSBingbu Cao 	return NULL;
98198109eaSBingbu Cao }
99198109eaSBingbu Cao 
cio2_bytesperline(const unsigned int width)100198109eaSBingbu Cao static inline u32 cio2_bytesperline(const unsigned int width)
101198109eaSBingbu Cao {
102198109eaSBingbu Cao 	/*
103198109eaSBingbu Cao 	 * 64 bytes for every 50 pixels, the line length
104198109eaSBingbu Cao 	 * in bytes is multiple of 64 (line end alignment).
105198109eaSBingbu Cao 	 */
106198109eaSBingbu Cao 	return DIV_ROUND_UP(width, 50) * 64;
107198109eaSBingbu Cao }
108198109eaSBingbu Cao 
109198109eaSBingbu Cao /**************** FBPT operations ****************/
110198109eaSBingbu Cao 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)111198109eaSBingbu Cao static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
112198109eaSBingbu Cao {
113198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
114198109eaSBingbu Cao 
115198109eaSBingbu Cao 	if (cio2->dummy_lop) {
116198109eaSBingbu Cao 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
117198109eaSBingbu Cao 				  cio2->dummy_lop_bus_addr);
118198109eaSBingbu Cao 		cio2->dummy_lop = NULL;
119198109eaSBingbu Cao 	}
120198109eaSBingbu Cao 	if (cio2->dummy_page) {
121198109eaSBingbu Cao 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
122198109eaSBingbu Cao 				  cio2->dummy_page_bus_addr);
123198109eaSBingbu Cao 		cio2->dummy_page = NULL;
124198109eaSBingbu Cao 	}
125198109eaSBingbu Cao }
126198109eaSBingbu Cao 
cio2_fbpt_init_dummy(struct cio2_device * cio2)127198109eaSBingbu Cao static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
128198109eaSBingbu Cao {
129198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
130198109eaSBingbu Cao 	unsigned int i;
131198109eaSBingbu Cao 
132198109eaSBingbu Cao 	cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
133198109eaSBingbu Cao 					      &cio2->dummy_page_bus_addr,
134198109eaSBingbu Cao 					      GFP_KERNEL);
135198109eaSBingbu Cao 	cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
136198109eaSBingbu Cao 					     &cio2->dummy_lop_bus_addr,
137198109eaSBingbu Cao 					     GFP_KERNEL);
138198109eaSBingbu Cao 	if (!cio2->dummy_page || !cio2->dummy_lop) {
139198109eaSBingbu Cao 		cio2_fbpt_exit_dummy(cio2);
140198109eaSBingbu Cao 		return -ENOMEM;
141198109eaSBingbu Cao 	}
142198109eaSBingbu Cao 	/*
143198109eaSBingbu Cao 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
144198109eaSBingbu Cao 	 * Initialize each entry to dummy_page bus base address.
145198109eaSBingbu Cao 	 */
146198109eaSBingbu Cao 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
147198109eaSBingbu Cao 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
148198109eaSBingbu Cao 
149198109eaSBingbu Cao 	return 0;
150198109eaSBingbu Cao }
151198109eaSBingbu Cao 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])152198109eaSBingbu Cao static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
153198109eaSBingbu Cao 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
154198109eaSBingbu Cao {
155198109eaSBingbu Cao 	/*
156198109eaSBingbu Cao 	 * The CPU first initializes some fields in fbpt, then sets
157198109eaSBingbu Cao 	 * the VALID bit, this barrier is to ensure that the DMA(device)
158198109eaSBingbu Cao 	 * does not see the VALID bit enabled before other fields are
159198109eaSBingbu Cao 	 * initialized; otherwise it could lead to havoc.
160198109eaSBingbu Cao 	 */
161198109eaSBingbu Cao 	dma_wmb();
162198109eaSBingbu Cao 
163198109eaSBingbu Cao 	/*
164198109eaSBingbu Cao 	 * Request interrupts for start and completion
165198109eaSBingbu Cao 	 * Valid bit is applicable only to 1st entry
166198109eaSBingbu Cao 	 */
167198109eaSBingbu Cao 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
168198109eaSBingbu Cao 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
169198109eaSBingbu Cao }
170198109eaSBingbu Cao 
171198109eaSBingbu Cao /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])172198109eaSBingbu Cao static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
173198109eaSBingbu Cao 				       struct cio2_fbpt_entry
174198109eaSBingbu Cao 				       entry[CIO2_MAX_LOPS])
175198109eaSBingbu Cao {
176198109eaSBingbu Cao 	unsigned int i;
177198109eaSBingbu Cao 
178198109eaSBingbu Cao 	entry[0].first_entry.first_page_offset = 0;
179198109eaSBingbu Cao 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
180198109eaSBingbu Cao 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
181198109eaSBingbu Cao 
182198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_LOPS; i++)
183198109eaSBingbu Cao 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
184198109eaSBingbu Cao 
185198109eaSBingbu Cao 	cio2_fbpt_entry_enable(cio2, entry);
186198109eaSBingbu Cao }
187198109eaSBingbu Cao 
188198109eaSBingbu Cao /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])189198109eaSBingbu Cao static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
190198109eaSBingbu Cao 				     struct cio2_buffer *b,
191198109eaSBingbu Cao 				     struct cio2_fbpt_entry
192198109eaSBingbu Cao 				     entry[CIO2_MAX_LOPS])
193198109eaSBingbu Cao {
194198109eaSBingbu Cao 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
195198109eaSBingbu Cao 	unsigned int length = vb->planes[0].length;
196198109eaSBingbu Cao 	int remaining, i;
197198109eaSBingbu Cao 
198198109eaSBingbu Cao 	entry[0].first_entry.first_page_offset = b->offset;
199198109eaSBingbu Cao 	remaining = length + entry[0].first_entry.first_page_offset;
200198109eaSBingbu Cao 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
201198109eaSBingbu Cao 	/*
202198109eaSBingbu Cao 	 * last_page_available_bytes has the offset of the last byte in the
203198109eaSBingbu Cao 	 * last page which is still accessible by DMA. DMA cannot access
204198109eaSBingbu Cao 	 * beyond this point. Valid range for this is from 0 to 4095.
205198109eaSBingbu Cao 	 * 0 indicates 1st byte in the page is DMA accessible.
206198109eaSBingbu Cao 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
207198109eaSBingbu Cao 	 * is available for DMA transfer.
208198109eaSBingbu Cao 	 */
209198109eaSBingbu Cao 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
210198109eaSBingbu Cao 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
211198109eaSBingbu Cao 	/* Fill FBPT */
212198109eaSBingbu Cao 	remaining = length;
213198109eaSBingbu Cao 	i = 0;
214198109eaSBingbu Cao 	while (remaining > 0) {
215198109eaSBingbu Cao 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
216198109eaSBingbu Cao 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
217198109eaSBingbu Cao 		entry++;
218198109eaSBingbu Cao 		i++;
219198109eaSBingbu Cao 	}
220198109eaSBingbu Cao 
221198109eaSBingbu Cao 	/*
222198109eaSBingbu Cao 	 * The first not meaningful FBPT entry should point to a valid LOP
223198109eaSBingbu Cao 	 */
224198109eaSBingbu Cao 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
225198109eaSBingbu Cao 
226198109eaSBingbu Cao 	cio2_fbpt_entry_enable(cio2, entry);
227198109eaSBingbu Cao }
228198109eaSBingbu Cao 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)229198109eaSBingbu Cao static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
230198109eaSBingbu Cao {
231198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
232198109eaSBingbu Cao 
233198109eaSBingbu Cao 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
234198109eaSBingbu Cao 				     GFP_KERNEL);
235198109eaSBingbu Cao 	if (!q->fbpt)
236198109eaSBingbu Cao 		return -ENOMEM;
237198109eaSBingbu Cao 
238198109eaSBingbu Cao 	return 0;
239198109eaSBingbu Cao }
240198109eaSBingbu Cao 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)241198109eaSBingbu Cao static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
242198109eaSBingbu Cao {
243198109eaSBingbu Cao 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
244198109eaSBingbu Cao }
245198109eaSBingbu Cao 
246198109eaSBingbu Cao /**************** CSI2 hardware setup ****************/
247198109eaSBingbu Cao 
248198109eaSBingbu Cao /*
249198109eaSBingbu Cao  * The CSI2 receiver has several parameters affecting
250198109eaSBingbu Cao  * the receiver timings. These depend on the MIPI bus frequency
251198109eaSBingbu Cao  * F in Hz (sensor transmitter rate) as follows:
252198109eaSBingbu Cao  *     register value = (A/1e9 + B * UI) / COUNT_ACC
253198109eaSBingbu Cao  * where
254198109eaSBingbu Cao  *      UI = 1 / (2 * F) in seconds
255198109eaSBingbu Cao  *      COUNT_ACC = counter accuracy in seconds
256198109eaSBingbu Cao  *      For IPU3 COUNT_ACC = 0.0625
257198109eaSBingbu Cao  *
258198109eaSBingbu Cao  * A and B are coefficients from the table below,
259198109eaSBingbu Cao  * depending whether the register minimum or maximum value is
260198109eaSBingbu Cao  * calculated.
261198109eaSBingbu Cao  *                                     Minimum     Maximum
262198109eaSBingbu Cao  * Clock lane                          A     B     A     B
263198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
264198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
265198109eaSBingbu Cao  * Data lanes
266198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
267198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
268198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
269198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
270198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
271198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
272198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
273198109eaSBingbu Cao  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
274198109eaSBingbu Cao  *
275198109eaSBingbu Cao  * We use the minimum values of both A and B.
276198109eaSBingbu Cao  */
277198109eaSBingbu Cao 
278198109eaSBingbu Cao /*
279198109eaSBingbu Cao  * shift for keeping value range suitable for 32-bit integer arithmetic
280198109eaSBingbu Cao  */
281198109eaSBingbu Cao #define LIMIT_SHIFT	8
282198109eaSBingbu Cao 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)283198109eaSBingbu Cao static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
284198109eaSBingbu Cao {
285198109eaSBingbu Cao 	const u32 accinv = 16; /* invert of counter resolution */
286198109eaSBingbu Cao 	const u32 uiinv = 500000000; /* 1e9 / 2 */
287198109eaSBingbu Cao 	s32 r;
288198109eaSBingbu Cao 
289198109eaSBingbu Cao 	freq >>= LIMIT_SHIFT;
290198109eaSBingbu Cao 
291198109eaSBingbu Cao 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
292198109eaSBingbu Cao 		return def;
293198109eaSBingbu Cao 	/*
294198109eaSBingbu Cao 	 * b could be 0, -2 or -8, so |accinv * b| is always
295198109eaSBingbu Cao 	 * less than (1 << ds) and thus |r| < 500000000.
296198109eaSBingbu Cao 	 */
297198109eaSBingbu Cao 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
298198109eaSBingbu Cao 	r = r / (s32)freq;
299198109eaSBingbu Cao 	/* max value of a is 95 */
300198109eaSBingbu Cao 	r += accinv * a;
301198109eaSBingbu Cao 
302198109eaSBingbu Cao 	return r;
303198109eaSBingbu Cao };
304198109eaSBingbu Cao 
305198109eaSBingbu Cao /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)306198109eaSBingbu Cao static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
307198109eaSBingbu Cao 				 struct cio2_csi2_timing *timing,
308198109eaSBingbu Cao 				 unsigned int bpp, unsigned int lanes)
309198109eaSBingbu Cao {
310198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
311198109eaSBingbu Cao 	s64 freq;
312198109eaSBingbu Cao 
313198109eaSBingbu Cao 	if (!q->sensor)
314198109eaSBingbu Cao 		return -ENODEV;
315198109eaSBingbu Cao 
316198109eaSBingbu Cao 	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
317198109eaSBingbu Cao 	if (freq < 0) {
318198109eaSBingbu Cao 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
319198109eaSBingbu Cao 		return freq;
320198109eaSBingbu Cao 	}
321198109eaSBingbu Cao 
322198109eaSBingbu Cao 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
323198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
324198109eaSBingbu Cao 					    freq,
325198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
326198109eaSBingbu Cao 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
327198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
328198109eaSBingbu Cao 					    freq,
329198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
330198109eaSBingbu Cao 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
331198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
332198109eaSBingbu Cao 					    freq,
333198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
334198109eaSBingbu Cao 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
335198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
336198109eaSBingbu Cao 					    freq,
337198109eaSBingbu Cao 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
338198109eaSBingbu Cao 
339198109eaSBingbu Cao 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
340198109eaSBingbu Cao 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
341198109eaSBingbu Cao 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
342198109eaSBingbu Cao 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
343198109eaSBingbu Cao 
344198109eaSBingbu Cao 	return 0;
345198109eaSBingbu Cao };
346198109eaSBingbu Cao 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)347198109eaSBingbu Cao static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
348198109eaSBingbu Cao {
349198109eaSBingbu Cao 	static const int NUM_VCS = 4;
350198109eaSBingbu Cao 	static const int SID;	/* Stream id */
351198109eaSBingbu Cao 	static const int ENTRY;
352198109eaSBingbu Cao 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
353198109eaSBingbu Cao 					CIO2_FBPT_SUBENTRY_UNIT);
354198109eaSBingbu Cao 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
355198109eaSBingbu Cao 	const struct ipu3_cio2_fmt *fmt;
356198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
357198109eaSBingbu Cao 	u8 lanes, csi2bus = q->csi2.port;
358198109eaSBingbu Cao 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
3599d7531beSSakari Ailus 	struct cio2_csi2_timing timing = { 0 };
360198109eaSBingbu Cao 	int i, r;
361198109eaSBingbu Cao 
362198109eaSBingbu Cao 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
363198109eaSBingbu Cao 	if (!fmt)
364198109eaSBingbu Cao 		return -EINVAL;
365198109eaSBingbu Cao 
366198109eaSBingbu Cao 	lanes = q->csi2.lanes;
367198109eaSBingbu Cao 
368198109eaSBingbu Cao 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
369198109eaSBingbu Cao 	if (r)
370198109eaSBingbu Cao 		return r;
371198109eaSBingbu Cao 
372198109eaSBingbu Cao 	writel(timing.clk_termen, q->csi_rx_base +
373198109eaSBingbu Cao 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374198109eaSBingbu Cao 	writel(timing.clk_settle, q->csi_rx_base +
375198109eaSBingbu Cao 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
376198109eaSBingbu Cao 
377198109eaSBingbu Cao 	for (i = 0; i < lanes; i++) {
378198109eaSBingbu Cao 		writel(timing.dat_termen, q->csi_rx_base +
379198109eaSBingbu Cao 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
380198109eaSBingbu Cao 		writel(timing.dat_settle, q->csi_rx_base +
381198109eaSBingbu Cao 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
382198109eaSBingbu Cao 	}
383198109eaSBingbu Cao 
384198109eaSBingbu Cao 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
385198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL1_MID1_2CK |
386198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
387198109eaSBingbu Cao 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
388198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
389198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
390198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
391198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
392198109eaSBingbu Cao 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
393198109eaSBingbu Cao 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
394198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
395198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_LE_EN |
396198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
397198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
398198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
399198109eaSBingbu Cao 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
400198109eaSBingbu Cao 	       base + CIO2_REG_PBM_ARB_CTRL);
401198109eaSBingbu Cao 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
402198109eaSBingbu Cao 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
403198109eaSBingbu Cao 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
404198109eaSBingbu Cao 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
405198109eaSBingbu Cao 
406198109eaSBingbu Cao 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
407198109eaSBingbu Cao 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
408198109eaSBingbu Cao 
409198109eaSBingbu Cao 	/* Configure MIPI backend */
410198109eaSBingbu Cao 	for (i = 0; i < NUM_VCS; i++)
411198109eaSBingbu Cao 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
412198109eaSBingbu Cao 
413198109eaSBingbu Cao 	/* There are 16 short packet LUT entry */
414198109eaSBingbu Cao 	for (i = 0; i < 16; i++)
415198109eaSBingbu Cao 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
416198109eaSBingbu Cao 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
417198109eaSBingbu Cao 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
418198109eaSBingbu Cao 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
419198109eaSBingbu Cao 
420198109eaSBingbu Cao 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
421198109eaSBingbu Cao 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
422198109eaSBingbu Cao 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
423198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
424198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
425198109eaSBingbu Cao 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
426198109eaSBingbu Cao 
427198109eaSBingbu Cao 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
428198109eaSBingbu Cao 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
429198109eaSBingbu Cao 	       base + CIO2_REG_INT_EN);
430198109eaSBingbu Cao 
431198109eaSBingbu Cao 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
432198109eaSBingbu Cao 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
433198109eaSBingbu Cao 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
434198109eaSBingbu Cao 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
435198109eaSBingbu Cao 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
436198109eaSBingbu Cao 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
437198109eaSBingbu Cao 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
438198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
439198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
440198109eaSBingbu Cao 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
441198109eaSBingbu Cao 
442198109eaSBingbu Cao 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
443198109eaSBingbu Cao 	writel(CIO2_CGC_PRIM_TGE |
444198109eaSBingbu Cao 	       CIO2_CGC_SIDE_TGE |
445198109eaSBingbu Cao 	       CIO2_CGC_XOSC_TGE |
446198109eaSBingbu Cao 	       CIO2_CGC_D3I3_TGE |
447198109eaSBingbu Cao 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
448198109eaSBingbu Cao 	       CIO2_CGC_CSI2_PORT_DCGE |
449198109eaSBingbu Cao 	       CIO2_CGC_SIDE_DCGE |
450198109eaSBingbu Cao 	       CIO2_CGC_PRIM_DCGE |
451198109eaSBingbu Cao 	       CIO2_CGC_ROSC_DCGE |
452198109eaSBingbu Cao 	       CIO2_CGC_XOSC_DCGE |
453198109eaSBingbu Cao 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
454198109eaSBingbu Cao 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
455198109eaSBingbu Cao 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
456198109eaSBingbu Cao 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
457198109eaSBingbu Cao 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
458198109eaSBingbu Cao 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
459198109eaSBingbu Cao 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
460198109eaSBingbu Cao 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
461198109eaSBingbu Cao 	       base + CIO2_REG_LTRVAL01);
462198109eaSBingbu Cao 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463198109eaSBingbu Cao 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464198109eaSBingbu Cao 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465198109eaSBingbu Cao 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466198109eaSBingbu Cao 	       base + CIO2_REG_LTRVAL23);
467198109eaSBingbu Cao 
468198109eaSBingbu Cao 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
469198109eaSBingbu Cao 		writel(0, base + CIO2_REG_CDMABA(i));
470198109eaSBingbu Cao 		writel(0, base + CIO2_REG_CDMAC0(i));
471198109eaSBingbu Cao 		writel(0, base + CIO2_REG_CDMAC1(i));
472198109eaSBingbu Cao 	}
473198109eaSBingbu Cao 
474198109eaSBingbu Cao 	/* Enable DMA */
475198109eaSBingbu Cao 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
476198109eaSBingbu Cao 
477198109eaSBingbu Cao 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
478198109eaSBingbu Cao 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
479198109eaSBingbu Cao 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
480198109eaSBingbu Cao 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
481198109eaSBingbu Cao 	       CIO2_CDMAC0_DMA_EN |
482198109eaSBingbu Cao 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
483198109eaSBingbu Cao 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
484198109eaSBingbu Cao 
485198109eaSBingbu Cao 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
486198109eaSBingbu Cao 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
487198109eaSBingbu Cao 
488198109eaSBingbu Cao 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
489198109eaSBingbu Cao 
490198109eaSBingbu Cao 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
491198109eaSBingbu Cao 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
492198109eaSBingbu Cao 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
493198109eaSBingbu Cao 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
494198109eaSBingbu Cao 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
495198109eaSBingbu Cao 
496198109eaSBingbu Cao 	/* Clear interrupts */
497198109eaSBingbu Cao 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
498198109eaSBingbu Cao 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
499198109eaSBingbu Cao 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
500198109eaSBingbu Cao 	writel(~0, base + CIO2_REG_INT_STS);
501198109eaSBingbu Cao 
502198109eaSBingbu Cao 	/* Enable devices, starting from the last device in the pipe */
503198109eaSBingbu Cao 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
504198109eaSBingbu Cao 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
505198109eaSBingbu Cao 
506198109eaSBingbu Cao 	return 0;
507198109eaSBingbu Cao }
508198109eaSBingbu Cao 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)509198109eaSBingbu Cao static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
510198109eaSBingbu Cao {
511198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
512198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
513198109eaSBingbu Cao 	unsigned int i;
514198109eaSBingbu Cao 	u32 value;
515198109eaSBingbu Cao 	int ret;
516198109eaSBingbu Cao 
517198109eaSBingbu Cao 	/* Disable CSI receiver and MIPI backend devices */
518198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
519198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
520198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
521198109eaSBingbu Cao 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
522198109eaSBingbu Cao 
523198109eaSBingbu Cao 	/* Halt DMA */
524198109eaSBingbu Cao 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
525198109eaSBingbu Cao 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
526198109eaSBingbu Cao 				 value, value & CIO2_CDMAC0_DMA_HALTED,
527198109eaSBingbu Cao 				 4000, 2000000);
528198109eaSBingbu Cao 	if (ret)
529198109eaSBingbu Cao 		dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
530198109eaSBingbu Cao 
531198109eaSBingbu Cao 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
532198109eaSBingbu Cao 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533198109eaSBingbu Cao 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534198109eaSBingbu Cao 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535198109eaSBingbu Cao 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
536198109eaSBingbu Cao 	}
537198109eaSBingbu Cao }
538198109eaSBingbu Cao 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)539198109eaSBingbu Cao static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
540198109eaSBingbu Cao {
541198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
542198109eaSBingbu Cao 	struct cio2_queue *q = cio2->cur_queue;
543198109eaSBingbu Cao 	struct cio2_fbpt_entry *entry;
544198109eaSBingbu Cao 	u64 ns = ktime_get_ns();
545198109eaSBingbu Cao 
546198109eaSBingbu Cao 	if (dma_chan >= CIO2_QUEUES) {
547198109eaSBingbu Cao 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
548198109eaSBingbu Cao 		return;
549198109eaSBingbu Cao 	}
550198109eaSBingbu Cao 
551198109eaSBingbu Cao 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552198109eaSBingbu Cao 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553198109eaSBingbu Cao 		dev_warn(dev, "no ready buffers found on DMA channel %u\n",
554198109eaSBingbu Cao 			 dma_chan);
555198109eaSBingbu Cao 		return;
556198109eaSBingbu Cao 	}
557198109eaSBingbu Cao 
558198109eaSBingbu Cao 	/* Find out which buffer(s) are ready */
559198109eaSBingbu Cao 	do {
560198109eaSBingbu Cao 		struct cio2_buffer *b;
561198109eaSBingbu Cao 
562198109eaSBingbu Cao 		b = q->bufs[q->bufs_first];
563198109eaSBingbu Cao 		if (b) {
564198109eaSBingbu Cao 			unsigned int received = entry[1].second_entry.num_of_bytes;
565198109eaSBingbu Cao 			unsigned long payload =
566198109eaSBingbu Cao 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
567198109eaSBingbu Cao 
568198109eaSBingbu Cao 			q->bufs[q->bufs_first] = NULL;
569198109eaSBingbu Cao 			atomic_dec(&q->bufs_queued);
570198109eaSBingbu Cao 			dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
571198109eaSBingbu Cao 
572198109eaSBingbu Cao 			b->vbb.vb2_buf.timestamp = ns;
573198109eaSBingbu Cao 			b->vbb.field = V4L2_FIELD_NONE;
574198109eaSBingbu Cao 			b->vbb.sequence = atomic_read(&q->frame_sequence);
575198109eaSBingbu Cao 			if (payload != received)
576198109eaSBingbu Cao 				dev_warn(dev,
577198109eaSBingbu Cao 					 "payload length is %lu, received %u\n",
578198109eaSBingbu Cao 					 payload, received);
579198109eaSBingbu Cao 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
580198109eaSBingbu Cao 		}
581198109eaSBingbu Cao 		atomic_inc(&q->frame_sequence);
582198109eaSBingbu Cao 		cio2_fbpt_entry_init_dummy(cio2, entry);
583198109eaSBingbu Cao 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584198109eaSBingbu Cao 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585198109eaSBingbu Cao 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
586198109eaSBingbu Cao }
587198109eaSBingbu Cao 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)588198109eaSBingbu Cao static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
589198109eaSBingbu Cao {
590198109eaSBingbu Cao 	/*
591198109eaSBingbu Cao 	 * For the user space camera control algorithms it is essential
592198109eaSBingbu Cao 	 * to know when the reception of a frame has begun. That's often
593198109eaSBingbu Cao 	 * the best timing information to get from the hardware.
594198109eaSBingbu Cao 	 */
595198109eaSBingbu Cao 	struct v4l2_event event = {
596198109eaSBingbu Cao 		.type = V4L2_EVENT_FRAME_SYNC,
597198109eaSBingbu Cao 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
598198109eaSBingbu Cao 	};
599198109eaSBingbu Cao 
600198109eaSBingbu Cao 	v4l2_event_queue(q->subdev.devnode, &event);
601198109eaSBingbu Cao }
602198109eaSBingbu Cao 
603198109eaSBingbu Cao static const char *const cio2_irq_errs[] = {
604198109eaSBingbu Cao 	"single packet header error corrected",
605198109eaSBingbu Cao 	"multiple packet header errors detected",
606198109eaSBingbu Cao 	"payload checksum (CRC) error",
607198109eaSBingbu Cao 	"fifo overflow",
608198109eaSBingbu Cao 	"reserved short packet data type detected",
609198109eaSBingbu Cao 	"reserved long packet data type detected",
610198109eaSBingbu Cao 	"incomplete long packet detected",
611198109eaSBingbu Cao 	"frame sync error",
612198109eaSBingbu Cao 	"line sync error",
613198109eaSBingbu Cao 	"DPHY start of transmission error",
614198109eaSBingbu Cao 	"DPHY synchronization error",
615198109eaSBingbu Cao 	"escape mode error",
616198109eaSBingbu Cao 	"escape mode trigger event",
617198109eaSBingbu Cao 	"escape mode ultra-low power state for data lane(s)",
618198109eaSBingbu Cao 	"escape mode ultra-low power state exit for clock lane",
619198109eaSBingbu Cao 	"inter-frame short packet discarded",
620198109eaSBingbu Cao 	"inter-frame long packet discarded",
621198109eaSBingbu Cao 	"non-matching Long Packet stalled",
622198109eaSBingbu Cao };
623198109eaSBingbu Cao 
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)624198109eaSBingbu Cao static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
625198109eaSBingbu Cao {
626198109eaSBingbu Cao 	unsigned long csi2_status = status;
627198109eaSBingbu Cao 	unsigned int i;
628198109eaSBingbu Cao 
629198109eaSBingbu Cao 	for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
630198109eaSBingbu Cao 		dev_err(dev, "CSI-2 receiver port %i: %s\n",
631198109eaSBingbu Cao 			port, cio2_irq_errs[i]);
632198109eaSBingbu Cao 
633198109eaSBingbu Cao 	if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
634198109eaSBingbu Cao 		dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
635198109eaSBingbu Cao 			 csi2_status, port);
636198109eaSBingbu Cao }
637198109eaSBingbu Cao 
638198109eaSBingbu Cao static const char *const cio2_port_errs[] = {
639198109eaSBingbu Cao 	"ECC recoverable",
640198109eaSBingbu Cao 	"DPHY not recoverable",
641198109eaSBingbu Cao 	"ECC not recoverable",
642198109eaSBingbu Cao 	"CRC error",
643198109eaSBingbu Cao 	"INTERFRAMEDATA",
644198109eaSBingbu Cao 	"PKT2SHORT",
645198109eaSBingbu Cao 	"PKT2LONG",
646198109eaSBingbu Cao };
647198109eaSBingbu Cao 
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)648198109eaSBingbu Cao static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
649198109eaSBingbu Cao {
650198109eaSBingbu Cao 	unsigned long port_status = status;
651198109eaSBingbu Cao 	unsigned int i;
652198109eaSBingbu Cao 
653198109eaSBingbu Cao 	for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
654198109eaSBingbu Cao 		dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
655198109eaSBingbu Cao }
656198109eaSBingbu Cao 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)657198109eaSBingbu Cao static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
658198109eaSBingbu Cao {
659198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
660198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
661198109eaSBingbu Cao 
662198109eaSBingbu Cao 	if (int_status & CIO2_INT_IOOE) {
663198109eaSBingbu Cao 		/*
664198109eaSBingbu Cao 		 * Interrupt on Output Error:
665198109eaSBingbu Cao 		 * 1) SRAM is full and FS received, or
666198109eaSBingbu Cao 		 * 2) An invalid bit detected by DMA.
667198109eaSBingbu Cao 		 */
668198109eaSBingbu Cao 		u32 oe_status, oe_clear;
669198109eaSBingbu Cao 
670198109eaSBingbu Cao 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
671198109eaSBingbu Cao 		oe_status = oe_clear;
672198109eaSBingbu Cao 
673198109eaSBingbu Cao 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
674198109eaSBingbu Cao 			dev_err(dev, "DMA output error: 0x%x\n",
675198109eaSBingbu Cao 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
676198109eaSBingbu Cao 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
677198109eaSBingbu Cao 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
678198109eaSBingbu Cao 		}
679198109eaSBingbu Cao 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
680198109eaSBingbu Cao 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
681198109eaSBingbu Cao 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
682198109eaSBingbu Cao 				>> CIO2_INT_EXT_OE_OES_SHIFT);
683198109eaSBingbu Cao 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
684198109eaSBingbu Cao 		}
685198109eaSBingbu Cao 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
686198109eaSBingbu Cao 		if (oe_status)
687198109eaSBingbu Cao 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
688198109eaSBingbu Cao 				 oe_status);
689198109eaSBingbu Cao 		int_status &= ~CIO2_INT_IOOE;
690198109eaSBingbu Cao 	}
691198109eaSBingbu Cao 
692198109eaSBingbu Cao 	if (int_status & CIO2_INT_IOC_MASK) {
693198109eaSBingbu Cao 		/* DMA IO done -- frame ready */
694198109eaSBingbu Cao 		u32 clr = 0;
695198109eaSBingbu Cao 		unsigned int d;
696198109eaSBingbu Cao 
697198109eaSBingbu Cao 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
698198109eaSBingbu Cao 			if (int_status & CIO2_INT_IOC(d)) {
699198109eaSBingbu Cao 				clr |= CIO2_INT_IOC(d);
700198109eaSBingbu Cao 				cio2_buffer_done(cio2, d);
701198109eaSBingbu Cao 			}
702198109eaSBingbu Cao 		int_status &= ~clr;
703198109eaSBingbu Cao 	}
704198109eaSBingbu Cao 
705198109eaSBingbu Cao 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
706198109eaSBingbu Cao 		/* DMA IO starts or reached specified line */
707198109eaSBingbu Cao 		u32 clr = 0;
708198109eaSBingbu Cao 		unsigned int d;
709198109eaSBingbu Cao 
710198109eaSBingbu Cao 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
711198109eaSBingbu Cao 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
712198109eaSBingbu Cao 				clr |= CIO2_INT_IOS_IOLN(d);
713198109eaSBingbu Cao 				if (d == CIO2_DMA_CHAN)
714198109eaSBingbu Cao 					cio2_queue_event_sof(cio2,
715198109eaSBingbu Cao 							     cio2->cur_queue);
716198109eaSBingbu Cao 			}
717198109eaSBingbu Cao 		int_status &= ~clr;
718198109eaSBingbu Cao 	}
719198109eaSBingbu Cao 
720198109eaSBingbu Cao 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
721198109eaSBingbu Cao 		/* CSI2 receiver (error) interrupt */
722198109eaSBingbu Cao 		unsigned int port;
723198109eaSBingbu Cao 		u32 ie_status;
724198109eaSBingbu Cao 
725198109eaSBingbu Cao 		ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
726198109eaSBingbu Cao 
727198109eaSBingbu Cao 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
728198109eaSBingbu Cao 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
729198109eaSBingbu Cao 
730198109eaSBingbu Cao 			cio2_irq_log_port_errs(dev, port, port_status);
731198109eaSBingbu Cao 
732198109eaSBingbu Cao 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
733198109eaSBingbu Cao 				void __iomem *csi_rx_base =
734198109eaSBingbu Cao 						base + CIO2_REG_PIPE_BASE(port);
735198109eaSBingbu Cao 				u32 csi2_status;
736198109eaSBingbu Cao 
737198109eaSBingbu Cao 				csi2_status = readl(csi_rx_base +
738198109eaSBingbu Cao 						CIO2_REG_IRQCTRL_STATUS);
739198109eaSBingbu Cao 
740198109eaSBingbu Cao 				cio2_irq_log_irq_errs(dev, port, csi2_status);
741198109eaSBingbu Cao 
742198109eaSBingbu Cao 				writel(csi2_status,
743198109eaSBingbu Cao 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
744198109eaSBingbu Cao 			}
745198109eaSBingbu Cao 		}
746198109eaSBingbu Cao 
747198109eaSBingbu Cao 		writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
748198109eaSBingbu Cao 
749198109eaSBingbu Cao 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
750198109eaSBingbu Cao 	}
751198109eaSBingbu Cao 
752198109eaSBingbu Cao 	if (int_status)
753198109eaSBingbu Cao 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
754198109eaSBingbu Cao }
755198109eaSBingbu Cao 
cio2_irq(int irq,void * cio2_ptr)756198109eaSBingbu Cao static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
757198109eaSBingbu Cao {
758198109eaSBingbu Cao 	struct cio2_device *cio2 = cio2_ptr;
759198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
760198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
761198109eaSBingbu Cao 	u32 int_status;
762198109eaSBingbu Cao 
763198109eaSBingbu Cao 	int_status = readl(base + CIO2_REG_INT_STS);
764198109eaSBingbu Cao 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
765198109eaSBingbu Cao 	if (!int_status)
766198109eaSBingbu Cao 		return IRQ_NONE;
767198109eaSBingbu Cao 
768198109eaSBingbu Cao 	do {
769198109eaSBingbu Cao 		writel(int_status, base + CIO2_REG_INT_STS);
770198109eaSBingbu Cao 		cio2_irq_handle_once(cio2, int_status);
771198109eaSBingbu Cao 		int_status = readl(base + CIO2_REG_INT_STS);
772198109eaSBingbu Cao 		if (int_status)
773198109eaSBingbu Cao 			dev_dbg(dev, "pending status 0x%x\n", int_status);
774198109eaSBingbu Cao 	} while (int_status);
775198109eaSBingbu Cao 
776198109eaSBingbu Cao 	return IRQ_HANDLED;
777198109eaSBingbu Cao }
778198109eaSBingbu Cao 
779198109eaSBingbu Cao /**************** Videobuf2 interface ****************/
780198109eaSBingbu Cao 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)781198109eaSBingbu Cao static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
782198109eaSBingbu Cao 					enum vb2_buffer_state state)
783198109eaSBingbu Cao {
784198109eaSBingbu Cao 	unsigned int i;
785198109eaSBingbu Cao 
786198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
787198109eaSBingbu Cao 		if (q->bufs[i]) {
788198109eaSBingbu Cao 			atomic_dec(&q->bufs_queued);
789198109eaSBingbu Cao 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
790198109eaSBingbu Cao 					state);
791198109eaSBingbu Cao 			q->bufs[i] = NULL;
792198109eaSBingbu Cao 		}
793198109eaSBingbu Cao 	}
794198109eaSBingbu Cao }
795198109eaSBingbu Cao 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])796198109eaSBingbu Cao static int cio2_vb2_queue_setup(struct vb2_queue *vq,
797198109eaSBingbu Cao 				unsigned int *num_buffers,
798198109eaSBingbu Cao 				unsigned int *num_planes,
799198109eaSBingbu Cao 				unsigned int sizes[],
800198109eaSBingbu Cao 				struct device *alloc_devs[])
801198109eaSBingbu Cao {
802198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
803198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
804198109eaSBingbu Cao 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
805198109eaSBingbu Cao 	unsigned int i;
806198109eaSBingbu Cao 
807198109eaSBingbu Cao 	if (*num_planes && *num_planes < q->format.num_planes)
808198109eaSBingbu Cao 		return -EINVAL;
809198109eaSBingbu Cao 
810198109eaSBingbu Cao 	for (i = 0; i < q->format.num_planes; ++i) {
811198109eaSBingbu Cao 		if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
812198109eaSBingbu Cao 			return -EINVAL;
813198109eaSBingbu Cao 		sizes[i] = q->format.plane_fmt[i].sizeimage;
814198109eaSBingbu Cao 		alloc_devs[i] = dev;
815198109eaSBingbu Cao 	}
816198109eaSBingbu Cao 
817198109eaSBingbu Cao 	*num_planes = q->format.num_planes;
818198109eaSBingbu Cao 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
819198109eaSBingbu Cao 
820198109eaSBingbu Cao 	/* Initialize buffer queue */
821198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
822198109eaSBingbu Cao 		q->bufs[i] = NULL;
823198109eaSBingbu Cao 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
824198109eaSBingbu Cao 	}
825198109eaSBingbu Cao 	atomic_set(&q->bufs_queued, 0);
826198109eaSBingbu Cao 	q->bufs_first = 0;
827198109eaSBingbu Cao 	q->bufs_next = 0;
828198109eaSBingbu Cao 
829198109eaSBingbu Cao 	return 0;
830198109eaSBingbu Cao }
831198109eaSBingbu Cao 
832198109eaSBingbu Cao /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)833198109eaSBingbu Cao static int cio2_vb2_buf_init(struct vb2_buffer *vb)
834198109eaSBingbu Cao {
835198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
836198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
837198109eaSBingbu Cao 	struct cio2_buffer *b = to_cio2_buffer(vb);
838198109eaSBingbu Cao 	unsigned int pages = PFN_UP(vb->planes[0].length);
839198109eaSBingbu Cao 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840198109eaSBingbu Cao 	struct sg_table *sg;
841198109eaSBingbu Cao 	struct sg_dma_page_iter sg_iter;
842198109eaSBingbu Cao 	unsigned int i, j;
843198109eaSBingbu Cao 
844198109eaSBingbu Cao 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
845198109eaSBingbu Cao 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
846198109eaSBingbu Cao 			vb->planes[0].length);
847198109eaSBingbu Cao 		return -ENOSPC;		/* Should never happen */
848198109eaSBingbu Cao 	}
849198109eaSBingbu Cao 
850198109eaSBingbu Cao 	memset(b->lop, 0, sizeof(b->lop));
851198109eaSBingbu Cao 	/* Allocate LOP table */
852198109eaSBingbu Cao 	for (i = 0; i < lops; i++) {
853198109eaSBingbu Cao 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
854198109eaSBingbu Cao 					       &b->lop_bus_addr[i], GFP_KERNEL);
855198109eaSBingbu Cao 		if (!b->lop[i])
856198109eaSBingbu Cao 			goto fail;
857198109eaSBingbu Cao 	}
858198109eaSBingbu Cao 
859198109eaSBingbu Cao 	/* Fill LOP */
860198109eaSBingbu Cao 	sg = vb2_dma_sg_plane_desc(vb, 0);
861198109eaSBingbu Cao 	if (!sg)
862198109eaSBingbu Cao 		return -ENOMEM;
863198109eaSBingbu Cao 
864198109eaSBingbu Cao 	if (sg->nents && sg->sgl)
865198109eaSBingbu Cao 		b->offset = sg->sgl->offset;
866198109eaSBingbu Cao 
867198109eaSBingbu Cao 	i = j = 0;
868198109eaSBingbu Cao 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
869198109eaSBingbu Cao 		if (!pages--)
870198109eaSBingbu Cao 			break;
871198109eaSBingbu Cao 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872198109eaSBingbu Cao 		j++;
873198109eaSBingbu Cao 		if (j == CIO2_LOP_ENTRIES) {
874198109eaSBingbu Cao 			i++;
875198109eaSBingbu Cao 			j = 0;
876198109eaSBingbu Cao 		}
877198109eaSBingbu Cao 	}
878198109eaSBingbu Cao 
879198109eaSBingbu Cao 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
880198109eaSBingbu Cao 	return 0;
881198109eaSBingbu Cao fail:
882198109eaSBingbu Cao 	while (i--)
883198109eaSBingbu Cao 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
884198109eaSBingbu Cao 	return -ENOMEM;
885198109eaSBingbu Cao }
886198109eaSBingbu Cao 
887198109eaSBingbu Cao /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)888198109eaSBingbu Cao static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889198109eaSBingbu Cao {
890198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
891198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
892198109eaSBingbu Cao 	struct cio2_queue *q =
893198109eaSBingbu Cao 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
894198109eaSBingbu Cao 	struct cio2_buffer *b = to_cio2_buffer(vb);
895198109eaSBingbu Cao 	struct cio2_fbpt_entry *entry;
896198109eaSBingbu Cao 	unsigned long flags;
897198109eaSBingbu Cao 	unsigned int i, j, next = q->bufs_next;
898198109eaSBingbu Cao 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
899198109eaSBingbu Cao 	u32 fbpt_rp;
900198109eaSBingbu Cao 
901198109eaSBingbu Cao 	dev_dbg(dev, "queue buffer %d\n", vb->index);
902198109eaSBingbu Cao 
903198109eaSBingbu Cao 	/*
904198109eaSBingbu Cao 	 * This code queues the buffer to the CIO2 DMA engine, which starts
905198109eaSBingbu Cao 	 * running once streaming has started. It is possible that this code
906198109eaSBingbu Cao 	 * gets pre-empted due to increased CPU load. Upon this, the driver
907198109eaSBingbu Cao 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
908198109eaSBingbu Cao 	 * engine. When the DMA engine encounters an FBPT entry without the
909198109eaSBingbu Cao 	 * VALID bit set, the DMA engine halts, which requires a restart of
910198109eaSBingbu Cao 	 * the DMA engine and sensor, to continue streaming.
911198109eaSBingbu Cao 	 * This is not desired and is highly unlikely given that there are
912198109eaSBingbu Cao 	 * 32 FBPT entries that the DMA engine needs to process, to run into
913198109eaSBingbu Cao 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
914198109eaSBingbu Cao 	 * by disabling interrupts for the duration of this queueing.
915198109eaSBingbu Cao 	 */
916198109eaSBingbu Cao 	local_irq_save(flags);
917198109eaSBingbu Cao 
918198109eaSBingbu Cao 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
919198109eaSBingbu Cao 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
920198109eaSBingbu Cao 		   & CIO2_CDMARI_FBPT_RP_MASK;
921198109eaSBingbu Cao 
922198109eaSBingbu Cao 	/*
923198109eaSBingbu Cao 	 * fbpt_rp is the fbpt entry that the dma is currently working
924198109eaSBingbu Cao 	 * on, but since it could jump to next entry at any time,
925198109eaSBingbu Cao 	 * assume that we might already be there.
926198109eaSBingbu Cao 	 */
927198109eaSBingbu Cao 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928198109eaSBingbu Cao 
929198109eaSBingbu Cao 	if (bufs_queued <= 1 || fbpt_rp == next)
930198109eaSBingbu Cao 		/* Buffers were drained */
931198109eaSBingbu Cao 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932198109eaSBingbu Cao 
933198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934198109eaSBingbu Cao 		/*
935198109eaSBingbu Cao 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
936198109eaSBingbu Cao 		 * hw, the user has requested N buffer queue. The driver
937198109eaSBingbu Cao 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
938198109eaSBingbu Cao 		 * user queues a buffer, there necessarily is a free buffer.
939198109eaSBingbu Cao 		 */
940198109eaSBingbu Cao 		if (!q->bufs[next]) {
941198109eaSBingbu Cao 			q->bufs[next] = b;
942198109eaSBingbu Cao 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
943198109eaSBingbu Cao 			cio2_fbpt_entry_init_buf(cio2, b, entry);
944198109eaSBingbu Cao 			local_irq_restore(flags);
945198109eaSBingbu Cao 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
946198109eaSBingbu Cao 			for (j = 0; j < vb->num_planes; j++)
947198109eaSBingbu Cao 				vb2_set_plane_payload(vb, j,
948198109eaSBingbu Cao 					q->format.plane_fmt[j].sizeimage);
949198109eaSBingbu Cao 			return;
950198109eaSBingbu Cao 		}
951198109eaSBingbu Cao 
952198109eaSBingbu Cao 		dev_dbg(dev, "entry %i was full!\n", next);
953198109eaSBingbu Cao 		next = (next + 1) % CIO2_MAX_BUFFERS;
954198109eaSBingbu Cao 	}
955198109eaSBingbu Cao 
956198109eaSBingbu Cao 	local_irq_restore(flags);
957198109eaSBingbu Cao 	dev_err(dev, "error: all cio2 entries were full!\n");
958198109eaSBingbu Cao 	atomic_dec(&q->bufs_queued);
959198109eaSBingbu Cao 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
960198109eaSBingbu Cao }
961198109eaSBingbu Cao 
962198109eaSBingbu Cao /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)963198109eaSBingbu Cao static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964198109eaSBingbu Cao {
965198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
966198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
967198109eaSBingbu Cao 	struct cio2_buffer *b = to_cio2_buffer(vb);
968198109eaSBingbu Cao 	unsigned int i;
969198109eaSBingbu Cao 
970198109eaSBingbu Cao 	/* Free LOP table */
971198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
972198109eaSBingbu Cao 		if (b->lop[i])
973198109eaSBingbu Cao 			dma_free_coherent(dev, PAGE_SIZE,
974198109eaSBingbu Cao 					  b->lop[i], b->lop_bus_addr[i]);
975198109eaSBingbu Cao 	}
976198109eaSBingbu Cao }
977198109eaSBingbu Cao 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)978198109eaSBingbu Cao static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979198109eaSBingbu Cao {
980198109eaSBingbu Cao 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
981198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
982198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
983198109eaSBingbu Cao 	int r;
984198109eaSBingbu Cao 
985198109eaSBingbu Cao 	cio2->cur_queue = q;
986198109eaSBingbu Cao 	atomic_set(&q->frame_sequence, 0);
987198109eaSBingbu Cao 
988198109eaSBingbu Cao 	r = pm_runtime_resume_and_get(dev);
989198109eaSBingbu Cao 	if (r < 0) {
990198109eaSBingbu Cao 		dev_info(dev, "failed to set power %d\n", r);
991198109eaSBingbu Cao 		return r;
992198109eaSBingbu Cao 	}
993198109eaSBingbu Cao 
994198109eaSBingbu Cao 	r = video_device_pipeline_start(&q->vdev, &q->pipe);
995198109eaSBingbu Cao 	if (r)
996198109eaSBingbu Cao 		goto fail_pipeline;
997198109eaSBingbu Cao 
998198109eaSBingbu Cao 	r = cio2_hw_init(cio2, q);
999198109eaSBingbu Cao 	if (r)
1000198109eaSBingbu Cao 		goto fail_hw;
1001198109eaSBingbu Cao 
1002198109eaSBingbu Cao 	/* Start streaming on sensor */
1003198109eaSBingbu Cao 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004198109eaSBingbu Cao 	if (r)
1005198109eaSBingbu Cao 		goto fail_csi2_subdev;
1006198109eaSBingbu Cao 
1007198109eaSBingbu Cao 	cio2->streaming = true;
1008198109eaSBingbu Cao 
1009198109eaSBingbu Cao 	return 0;
1010198109eaSBingbu Cao 
1011198109eaSBingbu Cao fail_csi2_subdev:
1012198109eaSBingbu Cao 	cio2_hw_exit(cio2, q);
1013198109eaSBingbu Cao fail_hw:
1014198109eaSBingbu Cao 	video_device_pipeline_stop(&q->vdev);
1015198109eaSBingbu Cao fail_pipeline:
1016198109eaSBingbu Cao 	dev_dbg(dev, "failed to start streaming (%d)\n", r);
1017198109eaSBingbu Cao 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1018198109eaSBingbu Cao 	pm_runtime_put(dev);
1019198109eaSBingbu Cao 
1020198109eaSBingbu Cao 	return r;
1021198109eaSBingbu Cao }
1022198109eaSBingbu Cao 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1023198109eaSBingbu Cao static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024198109eaSBingbu Cao {
1025198109eaSBingbu Cao 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1026198109eaSBingbu Cao 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
1028198109eaSBingbu Cao 
1029198109eaSBingbu Cao 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1030198109eaSBingbu Cao 		dev_err(dev, "failed to stop sensor streaming\n");
1031198109eaSBingbu Cao 
1032198109eaSBingbu Cao 	cio2_hw_exit(cio2, q);
1033198109eaSBingbu Cao 	synchronize_irq(cio2->pci_dev->irq);
1034198109eaSBingbu Cao 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1035198109eaSBingbu Cao 	video_device_pipeline_stop(&q->vdev);
1036198109eaSBingbu Cao 	pm_runtime_put(dev);
1037198109eaSBingbu Cao 	cio2->streaming = false;
1038198109eaSBingbu Cao }
1039198109eaSBingbu Cao 
1040198109eaSBingbu Cao static const struct vb2_ops cio2_vb2_ops = {
1041198109eaSBingbu Cao 	.buf_init = cio2_vb2_buf_init,
1042198109eaSBingbu Cao 	.buf_queue = cio2_vb2_buf_queue,
1043198109eaSBingbu Cao 	.buf_cleanup = cio2_vb2_buf_cleanup,
1044198109eaSBingbu Cao 	.queue_setup = cio2_vb2_queue_setup,
1045198109eaSBingbu Cao 	.start_streaming = cio2_vb2_start_streaming,
1046198109eaSBingbu Cao 	.stop_streaming = cio2_vb2_stop_streaming,
1047198109eaSBingbu Cao 	.wait_prepare = vb2_ops_wait_prepare,
1048198109eaSBingbu Cao 	.wait_finish = vb2_ops_wait_finish,
1049198109eaSBingbu Cao };
1050198109eaSBingbu Cao 
1051198109eaSBingbu Cao /**************** V4L2 interface ****************/
1052198109eaSBingbu Cao 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1053198109eaSBingbu Cao static int cio2_v4l2_querycap(struct file *file, void *fh,
1054198109eaSBingbu Cao 			      struct v4l2_capability *cap)
1055198109eaSBingbu Cao {
1056198109eaSBingbu Cao 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1057198109eaSBingbu Cao 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1058198109eaSBingbu Cao 
1059198109eaSBingbu Cao 	return 0;
1060198109eaSBingbu Cao }
1061198109eaSBingbu Cao 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1062198109eaSBingbu Cao static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1063198109eaSBingbu Cao 			      struct v4l2_fmtdesc *f)
1064198109eaSBingbu Cao {
1065198109eaSBingbu Cao 	if (f->index >= ARRAY_SIZE(formats))
1066198109eaSBingbu Cao 		return -EINVAL;
1067198109eaSBingbu Cao 
1068198109eaSBingbu Cao 	f->pixelformat = formats[f->index].fourcc;
1069198109eaSBingbu Cao 
1070198109eaSBingbu Cao 	return 0;
1071198109eaSBingbu Cao }
1072198109eaSBingbu Cao 
1073198109eaSBingbu Cao /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1074198109eaSBingbu Cao static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1075198109eaSBingbu Cao {
1076198109eaSBingbu Cao 	struct cio2_queue *q = file_to_cio2_queue(file);
1077198109eaSBingbu Cao 
1078198109eaSBingbu Cao 	f->fmt.pix_mp = q->format;
1079198109eaSBingbu Cao 
1080198109eaSBingbu Cao 	return 0;
1081198109eaSBingbu Cao }
1082198109eaSBingbu Cao 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1083198109eaSBingbu Cao static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1084198109eaSBingbu Cao {
1085198109eaSBingbu Cao 	const struct ipu3_cio2_fmt *fmt;
1086198109eaSBingbu Cao 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1087198109eaSBingbu Cao 
1088198109eaSBingbu Cao 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1089198109eaSBingbu Cao 	if (!fmt)
1090198109eaSBingbu Cao 		fmt = &formats[0];
1091198109eaSBingbu Cao 
1092198109eaSBingbu Cao 	/* Only supports up to 4224x3136 */
1093198109eaSBingbu Cao 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1094198109eaSBingbu Cao 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1095198109eaSBingbu Cao 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1096198109eaSBingbu Cao 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1097198109eaSBingbu Cao 
1098198109eaSBingbu Cao 	mpix->num_planes = 1;
1099198109eaSBingbu Cao 	mpix->pixelformat = fmt->fourcc;
1100198109eaSBingbu Cao 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1101198109eaSBingbu Cao 	mpix->field = V4L2_FIELD_NONE;
1102198109eaSBingbu Cao 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1103198109eaSBingbu Cao 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1104198109eaSBingbu Cao 							mpix->height;
1105198109eaSBingbu Cao 
1106198109eaSBingbu Cao 	/* use default */
1107198109eaSBingbu Cao 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1108198109eaSBingbu Cao 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1109198109eaSBingbu Cao 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1110198109eaSBingbu Cao 
1111198109eaSBingbu Cao 	return 0;
1112198109eaSBingbu Cao }
1113198109eaSBingbu Cao 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1114198109eaSBingbu Cao static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1115198109eaSBingbu Cao {
1116198109eaSBingbu Cao 	struct cio2_queue *q = file_to_cio2_queue(file);
1117198109eaSBingbu Cao 
1118198109eaSBingbu Cao 	cio2_v4l2_try_fmt(file, fh, f);
1119198109eaSBingbu Cao 	q->format = f->fmt.pix_mp;
1120198109eaSBingbu Cao 
1121198109eaSBingbu Cao 	return 0;
1122198109eaSBingbu Cao }
1123198109eaSBingbu Cao 
1124198109eaSBingbu Cao static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1125198109eaSBingbu Cao cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1126198109eaSBingbu Cao {
1127198109eaSBingbu Cao 	if (input->index > 0)
1128198109eaSBingbu Cao 		return -EINVAL;
1129198109eaSBingbu Cao 
1130198109eaSBingbu Cao 	strscpy(input->name, "camera", sizeof(input->name));
1131198109eaSBingbu Cao 	input->type = V4L2_INPUT_TYPE_CAMERA;
1132198109eaSBingbu Cao 
1133198109eaSBingbu Cao 	return 0;
1134198109eaSBingbu Cao }
1135198109eaSBingbu Cao 
1136198109eaSBingbu Cao static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1137198109eaSBingbu Cao cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1138198109eaSBingbu Cao {
1139198109eaSBingbu Cao 	*input = 0;
1140198109eaSBingbu Cao 
1141198109eaSBingbu Cao 	return 0;
1142198109eaSBingbu Cao }
1143198109eaSBingbu Cao 
1144198109eaSBingbu Cao static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1145198109eaSBingbu Cao cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1146198109eaSBingbu Cao {
1147198109eaSBingbu Cao 	return input == 0 ? 0 : -EINVAL;
1148198109eaSBingbu Cao }
1149198109eaSBingbu Cao 
1150198109eaSBingbu Cao static const struct v4l2_file_operations cio2_v4l2_fops = {
1151198109eaSBingbu Cao 	.owner = THIS_MODULE,
1152198109eaSBingbu Cao 	.unlocked_ioctl = video_ioctl2,
1153198109eaSBingbu Cao 	.open = v4l2_fh_open,
1154198109eaSBingbu Cao 	.release = vb2_fop_release,
1155198109eaSBingbu Cao 	.poll = vb2_fop_poll,
1156198109eaSBingbu Cao 	.mmap = vb2_fop_mmap,
1157198109eaSBingbu Cao };
1158198109eaSBingbu Cao 
1159198109eaSBingbu Cao static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1160198109eaSBingbu Cao 	.vidioc_querycap = cio2_v4l2_querycap,
1161198109eaSBingbu Cao 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1162198109eaSBingbu Cao 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1163198109eaSBingbu Cao 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1164198109eaSBingbu Cao 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1165198109eaSBingbu Cao 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1166198109eaSBingbu Cao 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1167198109eaSBingbu Cao 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1168198109eaSBingbu Cao 	.vidioc_querybuf = vb2_ioctl_querybuf,
1169198109eaSBingbu Cao 	.vidioc_qbuf = vb2_ioctl_qbuf,
1170198109eaSBingbu Cao 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1171198109eaSBingbu Cao 	.vidioc_streamon = vb2_ioctl_streamon,
1172198109eaSBingbu Cao 	.vidioc_streamoff = vb2_ioctl_streamoff,
1173198109eaSBingbu Cao 	.vidioc_expbuf = vb2_ioctl_expbuf,
1174198109eaSBingbu Cao 	.vidioc_enum_input = cio2_video_enum_input,
1175198109eaSBingbu Cao 	.vidioc_g_input	= cio2_video_g_input,
1176198109eaSBingbu Cao 	.vidioc_s_input	= cio2_video_s_input,
1177198109eaSBingbu Cao };
1178198109eaSBingbu Cao 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1179198109eaSBingbu Cao static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1180198109eaSBingbu Cao 				       struct v4l2_fh *fh,
1181198109eaSBingbu Cao 				       struct v4l2_event_subscription *sub)
1182198109eaSBingbu Cao {
1183198109eaSBingbu Cao 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1184198109eaSBingbu Cao 		return -EINVAL;
1185198109eaSBingbu Cao 
1186198109eaSBingbu Cao 	/* Line number. For now only zero accepted. */
1187198109eaSBingbu Cao 	if (sub->id != 0)
1188198109eaSBingbu Cao 		return -EINVAL;
1189198109eaSBingbu Cao 
1190198109eaSBingbu Cao 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1191198109eaSBingbu Cao }
1192198109eaSBingbu Cao 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1193198109eaSBingbu Cao static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1194198109eaSBingbu Cao {
1195198109eaSBingbu Cao 	struct v4l2_mbus_framefmt *format;
1196198109eaSBingbu Cao 	const struct v4l2_mbus_framefmt fmt_default = {
1197198109eaSBingbu Cao 		.width = 1936,
1198198109eaSBingbu Cao 		.height = 1096,
1199198109eaSBingbu Cao 		.code = formats[0].mbus_code,
1200198109eaSBingbu Cao 		.field = V4L2_FIELD_NONE,
1201198109eaSBingbu Cao 		.colorspace = V4L2_COLORSPACE_RAW,
1202198109eaSBingbu Cao 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1203198109eaSBingbu Cao 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1204198109eaSBingbu Cao 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1205198109eaSBingbu Cao 	};
1206198109eaSBingbu Cao 
1207198109eaSBingbu Cao 	/* Initialize try_fmt */
1208198109eaSBingbu Cao 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1209198109eaSBingbu Cao 	*format = fmt_default;
1210198109eaSBingbu Cao 
1211198109eaSBingbu Cao 	/* same as sink */
1212198109eaSBingbu Cao 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1213198109eaSBingbu Cao 	*format = fmt_default;
1214198109eaSBingbu Cao 
1215198109eaSBingbu Cao 	return 0;
1216198109eaSBingbu Cao }
1217198109eaSBingbu Cao 
1218198109eaSBingbu Cao /*
1219198109eaSBingbu Cao  * cio2_subdev_get_fmt - Handle get format by pads subdev method
1220198109eaSBingbu Cao  * @sd : pointer to v4l2 subdev structure
1221198109eaSBingbu Cao  * @cfg: V4L2 subdev pad config
1222198109eaSBingbu Cao  * @fmt: pointer to v4l2 subdev format structure
1223198109eaSBingbu Cao  * return -EINVAL or zero on success
1224198109eaSBingbu Cao  */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1225198109eaSBingbu Cao static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1226198109eaSBingbu Cao 			       struct v4l2_subdev_state *sd_state,
1227198109eaSBingbu Cao 			       struct v4l2_subdev_format *fmt)
1228198109eaSBingbu Cao {
1229198109eaSBingbu Cao 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1230198109eaSBingbu Cao 
1231198109eaSBingbu Cao 	mutex_lock(&q->subdev_lock);
1232198109eaSBingbu Cao 
1233198109eaSBingbu Cao 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1234198109eaSBingbu Cao 		fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1235198109eaSBingbu Cao 							  fmt->pad);
1236198109eaSBingbu Cao 	else
1237198109eaSBingbu Cao 		fmt->format = q->subdev_fmt;
1238198109eaSBingbu Cao 
1239198109eaSBingbu Cao 	mutex_unlock(&q->subdev_lock);
1240198109eaSBingbu Cao 
1241198109eaSBingbu Cao 	return 0;
1242198109eaSBingbu Cao }
1243198109eaSBingbu Cao 
1244198109eaSBingbu Cao /*
1245198109eaSBingbu Cao  * cio2_subdev_set_fmt - Handle set format by pads subdev method
1246198109eaSBingbu Cao  * @sd : pointer to v4l2 subdev structure
1247198109eaSBingbu Cao  * @cfg: V4L2 subdev pad config
1248198109eaSBingbu Cao  * @fmt: pointer to v4l2 subdev format structure
1249198109eaSBingbu Cao  * return -EINVAL or zero on success
1250198109eaSBingbu Cao  */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1251198109eaSBingbu Cao static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1252198109eaSBingbu Cao 			       struct v4l2_subdev_state *sd_state,
1253198109eaSBingbu Cao 			       struct v4l2_subdev_format *fmt)
1254198109eaSBingbu Cao {
1255198109eaSBingbu Cao 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1256198109eaSBingbu Cao 	struct v4l2_mbus_framefmt *mbus;
1257198109eaSBingbu Cao 	u32 mbus_code = fmt->format.code;
1258198109eaSBingbu Cao 	unsigned int i;
1259198109eaSBingbu Cao 
1260198109eaSBingbu Cao 	/*
1261198109eaSBingbu Cao 	 * Only allow setting sink pad format;
1262198109eaSBingbu Cao 	 * source always propagates from sink
1263198109eaSBingbu Cao 	 */
1264198109eaSBingbu Cao 	if (fmt->pad == CIO2_PAD_SOURCE)
1265198109eaSBingbu Cao 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1266198109eaSBingbu Cao 
1267198109eaSBingbu Cao 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1268198109eaSBingbu Cao 		mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1269198109eaSBingbu Cao 	else
1270198109eaSBingbu Cao 		mbus = &q->subdev_fmt;
1271198109eaSBingbu Cao 
1272198109eaSBingbu Cao 	fmt->format.code = formats[0].mbus_code;
1273198109eaSBingbu Cao 
1274198109eaSBingbu Cao 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1275198109eaSBingbu Cao 		if (formats[i].mbus_code == mbus_code) {
1276198109eaSBingbu Cao 			fmt->format.code = mbus_code;
1277198109eaSBingbu Cao 			break;
1278198109eaSBingbu Cao 		}
1279198109eaSBingbu Cao 	}
1280198109eaSBingbu Cao 
1281198109eaSBingbu Cao 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1282198109eaSBingbu Cao 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1283198109eaSBingbu Cao 	fmt->format.field = V4L2_FIELD_NONE;
1284198109eaSBingbu Cao 
1285198109eaSBingbu Cao 	mutex_lock(&q->subdev_lock);
1286198109eaSBingbu Cao 	*mbus = fmt->format;
1287198109eaSBingbu Cao 	mutex_unlock(&q->subdev_lock);
1288198109eaSBingbu Cao 
1289198109eaSBingbu Cao 	return 0;
1290198109eaSBingbu Cao }
1291198109eaSBingbu Cao 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1292198109eaSBingbu Cao static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1293198109eaSBingbu Cao 				      struct v4l2_subdev_state *sd_state,
1294198109eaSBingbu Cao 				      struct v4l2_subdev_mbus_code_enum *code)
1295198109eaSBingbu Cao {
1296198109eaSBingbu Cao 	if (code->index >= ARRAY_SIZE(formats))
1297198109eaSBingbu Cao 		return -EINVAL;
1298198109eaSBingbu Cao 
1299198109eaSBingbu Cao 	code->code = formats[code->index].mbus_code;
1300198109eaSBingbu Cao 	return 0;
1301198109eaSBingbu Cao }
1302198109eaSBingbu Cao 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1303198109eaSBingbu Cao static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1304198109eaSBingbu Cao 						struct v4l2_subdev_format *fmt)
1305198109eaSBingbu Cao {
1306198109eaSBingbu Cao 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1307198109eaSBingbu Cao 		struct v4l2_subdev *sd =
1308198109eaSBingbu Cao 			media_entity_to_v4l2_subdev(pad->entity);
1309198109eaSBingbu Cao 
1310198109eaSBingbu Cao 		memset(fmt, 0, sizeof(*fmt));
1311198109eaSBingbu Cao 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1312198109eaSBingbu Cao 		fmt->pad = pad->index;
1313198109eaSBingbu Cao 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1314198109eaSBingbu Cao 	}
1315198109eaSBingbu Cao 
1316198109eaSBingbu Cao 	return -EINVAL;
1317198109eaSBingbu Cao }
1318198109eaSBingbu Cao 
cio2_video_link_validate(struct media_link * link)1319198109eaSBingbu Cao static int cio2_video_link_validate(struct media_link *link)
1320198109eaSBingbu Cao {
1321198109eaSBingbu Cao 	struct media_entity *entity = link->sink->entity;
1322198109eaSBingbu Cao 	struct video_device *vd = media_entity_to_video_device(entity);
1323198109eaSBingbu Cao 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1324198109eaSBingbu Cao 	struct cio2_device *cio2 = video_get_drvdata(vd);
1325198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
1326198109eaSBingbu Cao 	struct v4l2_subdev_format source_fmt;
1327198109eaSBingbu Cao 	int ret;
1328198109eaSBingbu Cao 
1329198109eaSBingbu Cao 	if (!media_pad_remote_pad_first(entity->pads)) {
1330198109eaSBingbu Cao 		dev_info(dev, "video node %s pad not connected\n", vd->name);
1331198109eaSBingbu Cao 		return -ENOTCONN;
1332198109eaSBingbu Cao 	}
1333198109eaSBingbu Cao 
1334198109eaSBingbu Cao 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1335198109eaSBingbu Cao 	if (ret < 0)
1336198109eaSBingbu Cao 		return 0;
1337198109eaSBingbu Cao 
1338198109eaSBingbu Cao 	if (source_fmt.format.width != q->format.width ||
1339198109eaSBingbu Cao 	    source_fmt.format.height != q->format.height) {
1340198109eaSBingbu Cao 		dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1341198109eaSBingbu Cao 			q->format.width, q->format.height,
1342198109eaSBingbu Cao 			source_fmt.format.width, source_fmt.format.height);
1343198109eaSBingbu Cao 		return -EINVAL;
1344198109eaSBingbu Cao 	}
1345198109eaSBingbu Cao 
1346198109eaSBingbu Cao 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1347198109eaSBingbu Cao 		return -EINVAL;
1348198109eaSBingbu Cao 
1349198109eaSBingbu Cao 	return 0;
1350198109eaSBingbu Cao }
1351198109eaSBingbu Cao 
1352198109eaSBingbu Cao static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1353198109eaSBingbu Cao 	.subscribe_event = cio2_subdev_subscribe_event,
1354198109eaSBingbu Cao 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1355198109eaSBingbu Cao };
1356198109eaSBingbu Cao 
1357198109eaSBingbu Cao static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1358198109eaSBingbu Cao 	.open = cio2_subdev_open,
1359198109eaSBingbu Cao };
1360198109eaSBingbu Cao 
1361198109eaSBingbu Cao static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1362198109eaSBingbu Cao 	.link_validate = v4l2_subdev_link_validate_default,
1363198109eaSBingbu Cao 	.get_fmt = cio2_subdev_get_fmt,
1364198109eaSBingbu Cao 	.set_fmt = cio2_subdev_set_fmt,
1365198109eaSBingbu Cao 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1366198109eaSBingbu Cao };
1367198109eaSBingbu Cao 
1368198109eaSBingbu Cao static const struct v4l2_subdev_ops cio2_subdev_ops = {
1369198109eaSBingbu Cao 	.core = &cio2_subdev_core_ops,
1370198109eaSBingbu Cao 	.pad = &cio2_subdev_pad_ops,
1371198109eaSBingbu Cao };
1372198109eaSBingbu Cao 
1373198109eaSBingbu Cao /******* V4L2 sub-device asynchronous registration callbacks***********/
1374198109eaSBingbu Cao 
1375198109eaSBingbu Cao struct sensor_async_subdev {
1376adb2dcd5SSakari Ailus 	struct v4l2_async_connection asd;
1377198109eaSBingbu Cao 	struct csi2_bus_info csi2;
1378198109eaSBingbu Cao };
1379198109eaSBingbu Cao 
1380198109eaSBingbu Cao #define to_sensor_asd(__asd)	\
1381198109eaSBingbu Cao 	container_of_const(__asd, struct sensor_async_subdev, asd)
1382198109eaSBingbu Cao 
1383198109eaSBingbu Cao /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1384198109eaSBingbu Cao static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1385198109eaSBingbu Cao 			       struct v4l2_subdev *sd,
1386adb2dcd5SSakari Ailus 			       struct v4l2_async_connection *asd)
1387198109eaSBingbu Cao {
1388198109eaSBingbu Cao 	struct cio2_device *cio2 = to_cio2_device(notifier);
1389198109eaSBingbu Cao 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1390198109eaSBingbu Cao 	struct cio2_queue *q;
13918e3e916eSHans de Goede 	int ret;
1392198109eaSBingbu Cao 
1393198109eaSBingbu Cao 	if (cio2->queue[s_asd->csi2.port].sensor)
1394198109eaSBingbu Cao 		return -EBUSY;
1395198109eaSBingbu Cao 
13968e3e916eSHans de Goede 	ret = ipu_bridge_instantiate_vcm(sd->dev);
13978e3e916eSHans de Goede 	if (ret)
13988e3e916eSHans de Goede 		return ret;
13998e3e916eSHans de Goede 
1400198109eaSBingbu Cao 	q = &cio2->queue[s_asd->csi2.port];
1401198109eaSBingbu Cao 
1402198109eaSBingbu Cao 	q->csi2 = s_asd->csi2;
1403198109eaSBingbu Cao 	q->sensor = sd;
1404198109eaSBingbu Cao 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1405198109eaSBingbu Cao 
1406198109eaSBingbu Cao 	return 0;
1407198109eaSBingbu Cao }
1408198109eaSBingbu Cao 
1409198109eaSBingbu Cao /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1410198109eaSBingbu Cao static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1411198109eaSBingbu Cao 				 struct v4l2_subdev *sd,
1412adb2dcd5SSakari Ailus 				 struct v4l2_async_connection *asd)
1413198109eaSBingbu Cao {
1414198109eaSBingbu Cao 	struct cio2_device *cio2 = to_cio2_device(notifier);
1415198109eaSBingbu Cao 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1416198109eaSBingbu Cao 
1417198109eaSBingbu Cao 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1418198109eaSBingbu Cao }
1419198109eaSBingbu Cao 
1420198109eaSBingbu Cao /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1421198109eaSBingbu Cao static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1422198109eaSBingbu Cao {
1423198109eaSBingbu Cao 	struct cio2_device *cio2 = to_cio2_device(notifier);
1424198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
1425198109eaSBingbu Cao 	struct sensor_async_subdev *s_asd;
1426adb2dcd5SSakari Ailus 	struct v4l2_async_connection *asd;
1427198109eaSBingbu Cao 	struct cio2_queue *q;
1428198109eaSBingbu Cao 	int ret;
1429198109eaSBingbu Cao 
14309bf19fbfSSakari Ailus 	list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1431198109eaSBingbu Cao 		s_asd = to_sensor_asd(asd);
1432198109eaSBingbu Cao 		q = &cio2->queue[s_asd->csi2.port];
1433198109eaSBingbu Cao 
1434198109eaSBingbu Cao 		ret = media_entity_get_fwnode_pad(&q->sensor->entity,
1435198109eaSBingbu Cao 						  s_asd->asd.match.fwnode,
1436198109eaSBingbu Cao 						  MEDIA_PAD_FL_SOURCE);
1437198109eaSBingbu Cao 		if (ret < 0) {
1438198109eaSBingbu Cao 			dev_err(dev, "no pad for endpoint %pfw (%d)\n",
1439198109eaSBingbu Cao 				s_asd->asd.match.fwnode, ret);
1440198109eaSBingbu Cao 			return ret;
1441198109eaSBingbu Cao 		}
1442198109eaSBingbu Cao 
1443198109eaSBingbu Cao 		ret = media_create_pad_link(&q->sensor->entity, ret,
1444198109eaSBingbu Cao 					    &q->subdev.entity, CIO2_PAD_SINK,
1445198109eaSBingbu Cao 					    0);
1446198109eaSBingbu Cao 		if (ret) {
1447198109eaSBingbu Cao 			dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
1448198109eaSBingbu Cao 				q->sensor->name, s_asd->asd.match.fwnode, ret);
1449198109eaSBingbu Cao 			return ret;
1450198109eaSBingbu Cao 		}
1451198109eaSBingbu Cao 	}
1452198109eaSBingbu Cao 
1453198109eaSBingbu Cao 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1454198109eaSBingbu Cao }
1455198109eaSBingbu Cao 
1456198109eaSBingbu Cao static const struct v4l2_async_notifier_operations cio2_async_ops = {
1457198109eaSBingbu Cao 	.bound = cio2_notifier_bound,
1458198109eaSBingbu Cao 	.unbind = cio2_notifier_unbind,
1459198109eaSBingbu Cao 	.complete = cio2_notifier_complete,
1460198109eaSBingbu Cao };
1461198109eaSBingbu Cao 
cio2_parse_firmware(struct cio2_device * cio2)1462198109eaSBingbu Cao static int cio2_parse_firmware(struct cio2_device *cio2)
1463198109eaSBingbu Cao {
1464198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
1465198109eaSBingbu Cao 	unsigned int i;
1466198109eaSBingbu Cao 	int ret;
1467198109eaSBingbu Cao 
1468198109eaSBingbu Cao 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1469198109eaSBingbu Cao 		struct v4l2_fwnode_endpoint vep = {
1470198109eaSBingbu Cao 			.bus_type = V4L2_MBUS_CSI2_DPHY
1471198109eaSBingbu Cao 		};
1472198109eaSBingbu Cao 		struct sensor_async_subdev *s_asd;
1473198109eaSBingbu Cao 		struct fwnode_handle *ep;
1474198109eaSBingbu Cao 
1475198109eaSBingbu Cao 		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1476198109eaSBingbu Cao 						FWNODE_GRAPH_ENDPOINT_NEXT);
1477198109eaSBingbu Cao 		if (!ep)
1478198109eaSBingbu Cao 			continue;
1479198109eaSBingbu Cao 
1480198109eaSBingbu Cao 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1481198109eaSBingbu Cao 		if (ret)
1482198109eaSBingbu Cao 			goto err_parse;
1483198109eaSBingbu Cao 
1484198109eaSBingbu Cao 		s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1485198109eaSBingbu Cao 							struct
1486198109eaSBingbu Cao 							sensor_async_subdev);
1487198109eaSBingbu Cao 		if (IS_ERR(s_asd)) {
1488198109eaSBingbu Cao 			ret = PTR_ERR(s_asd);
1489198109eaSBingbu Cao 			goto err_parse;
1490198109eaSBingbu Cao 		}
1491198109eaSBingbu Cao 
1492198109eaSBingbu Cao 		s_asd->csi2.port = vep.base.port;
1493198109eaSBingbu Cao 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1494198109eaSBingbu Cao 
1495198109eaSBingbu Cao 		fwnode_handle_put(ep);
1496198109eaSBingbu Cao 
1497198109eaSBingbu Cao 		continue;
1498198109eaSBingbu Cao 
1499198109eaSBingbu Cao err_parse:
1500198109eaSBingbu Cao 		fwnode_handle_put(ep);
1501198109eaSBingbu Cao 		return ret;
1502198109eaSBingbu Cao 	}
1503198109eaSBingbu Cao 
1504198109eaSBingbu Cao 	/*
1505198109eaSBingbu Cao 	 * Proceed even without sensors connected to allow the device to
1506198109eaSBingbu Cao 	 * suspend.
1507198109eaSBingbu Cao 	 */
1508198109eaSBingbu Cao 	cio2->notifier.ops = &cio2_async_ops;
1509b8ec754aSSakari Ailus 	ret = v4l2_async_nf_register(&cio2->notifier);
1510198109eaSBingbu Cao 	if (ret)
1511198109eaSBingbu Cao 		dev_err(dev, "failed to register async notifier : %d\n", ret);
1512198109eaSBingbu Cao 
1513198109eaSBingbu Cao 	return ret;
1514198109eaSBingbu Cao }
1515198109eaSBingbu Cao 
1516198109eaSBingbu Cao /**************** Queue initialization ****************/
1517198109eaSBingbu Cao static const struct media_entity_operations cio2_media_ops = {
1518198109eaSBingbu Cao 	.link_validate = v4l2_subdev_link_validate,
1519198109eaSBingbu Cao };
1520198109eaSBingbu Cao 
1521198109eaSBingbu Cao static const struct media_entity_operations cio2_video_entity_ops = {
1522198109eaSBingbu Cao 	.link_validate = cio2_video_link_validate,
1523198109eaSBingbu Cao };
1524198109eaSBingbu Cao 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1525198109eaSBingbu Cao static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1526198109eaSBingbu Cao {
1527198109eaSBingbu Cao 	static const u32 default_width = 1936;
1528198109eaSBingbu Cao 	static const u32 default_height = 1096;
1529198109eaSBingbu Cao 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1530198109eaSBingbu Cao 	struct device *dev = &cio2->pci_dev->dev;
1531198109eaSBingbu Cao 	struct video_device *vdev = &q->vdev;
1532198109eaSBingbu Cao 	struct vb2_queue *vbq = &q->vbq;
1533198109eaSBingbu Cao 	struct v4l2_subdev *subdev = &q->subdev;
1534198109eaSBingbu Cao 	struct v4l2_mbus_framefmt *fmt;
1535198109eaSBingbu Cao 	int r;
1536198109eaSBingbu Cao 
1537198109eaSBingbu Cao 	/* Initialize miscellaneous variables */
1538198109eaSBingbu Cao 	mutex_init(&q->lock);
1539198109eaSBingbu Cao 	mutex_init(&q->subdev_lock);
1540198109eaSBingbu Cao 
1541198109eaSBingbu Cao 	/* Initialize formats to default values */
1542198109eaSBingbu Cao 	fmt = &q->subdev_fmt;
1543198109eaSBingbu Cao 	fmt->width = default_width;
1544198109eaSBingbu Cao 	fmt->height = default_height;
1545198109eaSBingbu Cao 	fmt->code = dflt_fmt.mbus_code;
1546198109eaSBingbu Cao 	fmt->field = V4L2_FIELD_NONE;
1547198109eaSBingbu Cao 
1548198109eaSBingbu Cao 	q->format.width = default_width;
1549198109eaSBingbu Cao 	q->format.height = default_height;
1550198109eaSBingbu Cao 	q->format.pixelformat = dflt_fmt.fourcc;
1551198109eaSBingbu Cao 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1552198109eaSBingbu Cao 	q->format.field = V4L2_FIELD_NONE;
1553198109eaSBingbu Cao 	q->format.num_planes = 1;
1554198109eaSBingbu Cao 	q->format.plane_fmt[0].bytesperline =
1555198109eaSBingbu Cao 				cio2_bytesperline(q->format.width);
1556198109eaSBingbu Cao 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1557198109eaSBingbu Cao 						q->format.height;
1558198109eaSBingbu Cao 
1559198109eaSBingbu Cao 	/* Initialize fbpt */
1560198109eaSBingbu Cao 	r = cio2_fbpt_init(cio2, q);
1561198109eaSBingbu Cao 	if (r)
1562198109eaSBingbu Cao 		goto fail_fbpt;
1563198109eaSBingbu Cao 
1564198109eaSBingbu Cao 	/* Initialize media entities */
1565198109eaSBingbu Cao 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1566198109eaSBingbu Cao 		MEDIA_PAD_FL_MUST_CONNECT;
1567198109eaSBingbu Cao 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1568198109eaSBingbu Cao 	subdev->entity.ops = &cio2_media_ops;
1569198109eaSBingbu Cao 	subdev->internal_ops = &cio2_subdev_internal_ops;
1570198109eaSBingbu Cao 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1571198109eaSBingbu Cao 	if (r) {
1572198109eaSBingbu Cao 		dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1573198109eaSBingbu Cao 		goto fail_subdev_media_entity;
1574198109eaSBingbu Cao 	}
1575198109eaSBingbu Cao 
1576198109eaSBingbu Cao 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1577198109eaSBingbu Cao 	vdev->entity.ops = &cio2_video_entity_ops;
1578198109eaSBingbu Cao 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1579198109eaSBingbu Cao 	if (r) {
1580198109eaSBingbu Cao 		dev_err(dev, "failed initialize videodev media entity (%d)\n",
1581198109eaSBingbu Cao 			r);
1582198109eaSBingbu Cao 		goto fail_vdev_media_entity;
1583198109eaSBingbu Cao 	}
1584198109eaSBingbu Cao 
1585198109eaSBingbu Cao 	/* Initialize subdev */
1586198109eaSBingbu Cao 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1587198109eaSBingbu Cao 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1588198109eaSBingbu Cao 	subdev->owner = THIS_MODULE;
1589198109eaSBingbu Cao 	snprintf(subdev->name, sizeof(subdev->name),
1590198109eaSBingbu Cao 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1591198109eaSBingbu Cao 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1592198109eaSBingbu Cao 	v4l2_set_subdevdata(subdev, cio2);
1593198109eaSBingbu Cao 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1594198109eaSBingbu Cao 	if (r) {
1595198109eaSBingbu Cao 		dev_err(dev, "failed initialize subdev (%d)\n", r);
1596198109eaSBingbu Cao 		goto fail_subdev;
1597198109eaSBingbu Cao 	}
1598198109eaSBingbu Cao 
1599198109eaSBingbu Cao 	/* Initialize vbq */
1600198109eaSBingbu Cao 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1601198109eaSBingbu Cao 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1602198109eaSBingbu Cao 	vbq->ops = &cio2_vb2_ops;
1603198109eaSBingbu Cao 	vbq->mem_ops = &vb2_dma_sg_memops;
1604198109eaSBingbu Cao 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1605198109eaSBingbu Cao 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1606198109eaSBingbu Cao 	vbq->min_buffers_needed = 1;
1607198109eaSBingbu Cao 	vbq->drv_priv = cio2;
1608198109eaSBingbu Cao 	vbq->lock = &q->lock;
1609198109eaSBingbu Cao 	r = vb2_queue_init(vbq);
1610198109eaSBingbu Cao 	if (r) {
1611198109eaSBingbu Cao 		dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1612198109eaSBingbu Cao 		goto fail_subdev;
1613198109eaSBingbu Cao 	}
1614198109eaSBingbu Cao 
1615198109eaSBingbu Cao 	/* Initialize vdev */
1616198109eaSBingbu Cao 	snprintf(vdev->name, sizeof(vdev->name),
1617198109eaSBingbu Cao 		 "%s %td", CIO2_NAME, q - cio2->queue);
1618198109eaSBingbu Cao 	vdev->release = video_device_release_empty;
1619198109eaSBingbu Cao 	vdev->fops = &cio2_v4l2_fops;
1620198109eaSBingbu Cao 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1621198109eaSBingbu Cao 	vdev->lock = &cio2->lock;
1622198109eaSBingbu Cao 	vdev->v4l2_dev = &cio2->v4l2_dev;
1623198109eaSBingbu Cao 	vdev->queue = &q->vbq;
1624198109eaSBingbu Cao 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1625198109eaSBingbu Cao 	video_set_drvdata(vdev, cio2);
1626198109eaSBingbu Cao 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1627198109eaSBingbu Cao 	if (r) {
1628198109eaSBingbu Cao 		dev_err(dev, "failed to register video device (%d)\n", r);
1629198109eaSBingbu Cao 		goto fail_vdev;
1630198109eaSBingbu Cao 	}
1631198109eaSBingbu Cao 
1632198109eaSBingbu Cao 	/* Create link from CIO2 subdev to output node */
1633198109eaSBingbu Cao 	r = media_create_pad_link(
1634198109eaSBingbu Cao 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1635198109eaSBingbu Cao 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1636198109eaSBingbu Cao 	if (r)
1637198109eaSBingbu Cao 		goto fail_link;
1638198109eaSBingbu Cao 
1639198109eaSBingbu Cao 	return 0;
1640198109eaSBingbu Cao 
1641198109eaSBingbu Cao fail_link:
1642198109eaSBingbu Cao 	vb2_video_unregister_device(&q->vdev);
1643198109eaSBingbu Cao fail_vdev:
1644198109eaSBingbu Cao 	v4l2_device_unregister_subdev(subdev);
1645198109eaSBingbu Cao fail_subdev:
1646198109eaSBingbu Cao 	media_entity_cleanup(&vdev->entity);
1647198109eaSBingbu Cao fail_vdev_media_entity:
1648198109eaSBingbu Cao 	media_entity_cleanup(&subdev->entity);
1649198109eaSBingbu Cao fail_subdev_media_entity:
1650198109eaSBingbu Cao 	cio2_fbpt_exit(q, dev);
1651198109eaSBingbu Cao fail_fbpt:
1652198109eaSBingbu Cao 	mutex_destroy(&q->subdev_lock);
1653198109eaSBingbu Cao 	mutex_destroy(&q->lock);
1654198109eaSBingbu Cao 
1655198109eaSBingbu Cao 	return r;
1656198109eaSBingbu Cao }
1657198109eaSBingbu Cao 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1658198109eaSBingbu Cao static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1659198109eaSBingbu Cao {
1660198109eaSBingbu Cao 	vb2_video_unregister_device(&q->vdev);
1661198109eaSBingbu Cao 	media_entity_cleanup(&q->vdev.entity);
1662198109eaSBingbu Cao 	v4l2_device_unregister_subdev(&q->subdev);
1663198109eaSBingbu Cao 	media_entity_cleanup(&q->subdev.entity);
1664198109eaSBingbu Cao 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1665198109eaSBingbu Cao 	mutex_destroy(&q->subdev_lock);
1666198109eaSBingbu Cao 	mutex_destroy(&q->lock);
1667198109eaSBingbu Cao }
1668198109eaSBingbu Cao 
cio2_queues_init(struct cio2_device * cio2)1669198109eaSBingbu Cao static int cio2_queues_init(struct cio2_device *cio2)
1670198109eaSBingbu Cao {
1671198109eaSBingbu Cao 	int i, r;
1672198109eaSBingbu Cao 
1673198109eaSBingbu Cao 	for (i = 0; i < CIO2_QUEUES; i++) {
1674198109eaSBingbu Cao 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1675198109eaSBingbu Cao 		if (r)
1676198109eaSBingbu Cao 			break;
1677198109eaSBingbu Cao 	}
1678198109eaSBingbu Cao 
1679198109eaSBingbu Cao 	if (i == CIO2_QUEUES)
1680198109eaSBingbu Cao 		return 0;
1681198109eaSBingbu Cao 
1682198109eaSBingbu Cao 	for (i--; i >= 0; i--)
1683198109eaSBingbu Cao 		cio2_queue_exit(cio2, &cio2->queue[i]);
1684198109eaSBingbu Cao 
1685198109eaSBingbu Cao 	return r;
1686198109eaSBingbu Cao }
1687198109eaSBingbu Cao 
cio2_queues_exit(struct cio2_device * cio2)1688198109eaSBingbu Cao static void cio2_queues_exit(struct cio2_device *cio2)
1689198109eaSBingbu Cao {
1690198109eaSBingbu Cao 	unsigned int i;
1691198109eaSBingbu Cao 
1692198109eaSBingbu Cao 	for (i = 0; i < CIO2_QUEUES; i++)
1693198109eaSBingbu Cao 		cio2_queue_exit(cio2, &cio2->queue[i]);
1694198109eaSBingbu Cao }
1695198109eaSBingbu Cao 
cio2_check_fwnode_graph(struct fwnode_handle * fwnode)1696198109eaSBingbu Cao static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1697198109eaSBingbu Cao {
1698198109eaSBingbu Cao 	struct fwnode_handle *endpoint;
1699198109eaSBingbu Cao 
1700198109eaSBingbu Cao 	if (IS_ERR_OR_NULL(fwnode))
1701198109eaSBingbu Cao 		return -EINVAL;
1702198109eaSBingbu Cao 
1703198109eaSBingbu Cao 	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1704198109eaSBingbu Cao 	if (endpoint) {
1705198109eaSBingbu Cao 		fwnode_handle_put(endpoint);
1706198109eaSBingbu Cao 		return 0;
1707198109eaSBingbu Cao 	}
1708198109eaSBingbu Cao 
1709198109eaSBingbu Cao 	return cio2_check_fwnode_graph(fwnode->secondary);
1710198109eaSBingbu Cao }
1711198109eaSBingbu Cao 
1712198109eaSBingbu Cao /**************** PCI interface ****************/
1713198109eaSBingbu Cao 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1714198109eaSBingbu Cao static int cio2_pci_probe(struct pci_dev *pci_dev,
1715198109eaSBingbu Cao 			  const struct pci_device_id *id)
1716198109eaSBingbu Cao {
1717198109eaSBingbu Cao 	struct device *dev = &pci_dev->dev;
1718198109eaSBingbu Cao 	struct fwnode_handle *fwnode = dev_fwnode(dev);
1719198109eaSBingbu Cao 	struct cio2_device *cio2;
1720198109eaSBingbu Cao 	int r;
1721198109eaSBingbu Cao 
1722198109eaSBingbu Cao 	/*
1723198109eaSBingbu Cao 	 * On some platforms no connections to sensors are defined in firmware,
1724198109eaSBingbu Cao 	 * if the device has no endpoints then we can try to build those as
1725198109eaSBingbu Cao 	 * software_nodes parsed from SSDB.
1726198109eaSBingbu Cao 	 */
1727198109eaSBingbu Cao 	r = cio2_check_fwnode_graph(fwnode);
1728198109eaSBingbu Cao 	if (r) {
1729198109eaSBingbu Cao 		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1730198109eaSBingbu Cao 			dev_err(dev, "fwnode graph has no endpoints connected\n");
1731198109eaSBingbu Cao 			return -EINVAL;
1732198109eaSBingbu Cao 		}
1733198109eaSBingbu Cao 
1734998af180SHans de Goede 		r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1735198109eaSBingbu Cao 		if (r)
1736198109eaSBingbu Cao 			return r;
1737198109eaSBingbu Cao 	}
1738198109eaSBingbu Cao 
1739198109eaSBingbu Cao 	cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1740198109eaSBingbu Cao 	if (!cio2)
1741198109eaSBingbu Cao 		return -ENOMEM;
1742198109eaSBingbu Cao 	cio2->pci_dev = pci_dev;
1743198109eaSBingbu Cao 
1744198109eaSBingbu Cao 	r = pcim_enable_device(pci_dev);
1745198109eaSBingbu Cao 	if (r) {
1746198109eaSBingbu Cao 		dev_err(dev, "failed to enable device (%d)\n", r);
1747198109eaSBingbu Cao 		return r;
1748198109eaSBingbu Cao 	}
1749198109eaSBingbu Cao 
1750198109eaSBingbu Cao 	dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1751198109eaSBingbu Cao 		 pci_dev->device, pci_dev->revision);
1752198109eaSBingbu Cao 
1753198109eaSBingbu Cao 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1754198109eaSBingbu Cao 	if (r) {
1755198109eaSBingbu Cao 		dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1756198109eaSBingbu Cao 		return -ENODEV;
1757198109eaSBingbu Cao 	}
1758198109eaSBingbu Cao 
1759198109eaSBingbu Cao 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1760198109eaSBingbu Cao 
1761198109eaSBingbu Cao 	pci_set_drvdata(pci_dev, cio2);
1762198109eaSBingbu Cao 
1763198109eaSBingbu Cao 	pci_set_master(pci_dev);
1764198109eaSBingbu Cao 
1765198109eaSBingbu Cao 	r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1766198109eaSBingbu Cao 	if (r) {
1767198109eaSBingbu Cao 		dev_err(dev, "failed to set DMA mask (%d)\n", r);
1768198109eaSBingbu Cao 		return -ENODEV;
1769198109eaSBingbu Cao 	}
1770198109eaSBingbu Cao 
1771198109eaSBingbu Cao 	r = pci_enable_msi(pci_dev);
1772198109eaSBingbu Cao 	if (r) {
1773198109eaSBingbu Cao 		dev_err(dev, "failed to enable MSI (%d)\n", r);
1774198109eaSBingbu Cao 		return r;
1775198109eaSBingbu Cao 	}
1776198109eaSBingbu Cao 
1777198109eaSBingbu Cao 	r = cio2_fbpt_init_dummy(cio2);
1778198109eaSBingbu Cao 	if (r)
1779198109eaSBingbu Cao 		return r;
1780198109eaSBingbu Cao 
1781198109eaSBingbu Cao 	mutex_init(&cio2->lock);
1782198109eaSBingbu Cao 
1783198109eaSBingbu Cao 	cio2->media_dev.dev = dev;
1784198109eaSBingbu Cao 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785198109eaSBingbu Cao 		sizeof(cio2->media_dev.model));
1786198109eaSBingbu Cao 	cio2->media_dev.hw_revision = 0;
1787198109eaSBingbu Cao 
1788198109eaSBingbu Cao 	media_device_init(&cio2->media_dev);
1789198109eaSBingbu Cao 	r = media_device_register(&cio2->media_dev);
1790198109eaSBingbu Cao 	if (r < 0)
1791198109eaSBingbu Cao 		goto fail_mutex_destroy;
1792198109eaSBingbu Cao 
1793198109eaSBingbu Cao 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1794198109eaSBingbu Cao 	r = v4l2_device_register(dev, &cio2->v4l2_dev);
1795198109eaSBingbu Cao 	if (r) {
1796198109eaSBingbu Cao 		dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1797198109eaSBingbu Cao 		goto fail_media_device_unregister;
1798198109eaSBingbu Cao 	}
1799198109eaSBingbu Cao 
1800198109eaSBingbu Cao 	r = cio2_queues_init(cio2);
1801198109eaSBingbu Cao 	if (r)
1802198109eaSBingbu Cao 		goto fail_v4l2_device_unregister;
1803198109eaSBingbu Cao 
1804b8ec754aSSakari Ailus 	v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1805198109eaSBingbu Cao 
1806198109eaSBingbu Cao 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1807198109eaSBingbu Cao 			     CIO2_NAME, cio2);
1808198109eaSBingbu Cao 	if (r) {
1809198109eaSBingbu Cao 		dev_err(dev, "failed to request IRQ (%d)\n", r);
1810198109eaSBingbu Cao 		goto fail_clean_notifier;
1811198109eaSBingbu Cao 	}
1812198109eaSBingbu Cao 
1813*db572c39SSakari Ailus 	/* Register notifier for subdevices we care */
1814*db572c39SSakari Ailus 	r = cio2_parse_firmware(cio2);
1815*db572c39SSakari Ailus 	if (r)
1816*db572c39SSakari Ailus 		goto fail_clean_notifier;
1817*db572c39SSakari Ailus 
1818198109eaSBingbu Cao 	pm_runtime_put_noidle(dev);
1819198109eaSBingbu Cao 	pm_runtime_allow(dev);
1820198109eaSBingbu Cao 
1821198109eaSBingbu Cao 	return 0;
1822198109eaSBingbu Cao 
1823198109eaSBingbu Cao fail_clean_notifier:
1824198109eaSBingbu Cao 	v4l2_async_nf_unregister(&cio2->notifier);
1825198109eaSBingbu Cao 	v4l2_async_nf_cleanup(&cio2->notifier);
1826198109eaSBingbu Cao 	cio2_queues_exit(cio2);
1827198109eaSBingbu Cao fail_v4l2_device_unregister:
1828198109eaSBingbu Cao 	v4l2_device_unregister(&cio2->v4l2_dev);
1829198109eaSBingbu Cao fail_media_device_unregister:
1830198109eaSBingbu Cao 	media_device_unregister(&cio2->media_dev);
1831198109eaSBingbu Cao 	media_device_cleanup(&cio2->media_dev);
1832198109eaSBingbu Cao fail_mutex_destroy:
1833198109eaSBingbu Cao 	mutex_destroy(&cio2->lock);
1834198109eaSBingbu Cao 	cio2_fbpt_exit_dummy(cio2);
1835198109eaSBingbu Cao 
1836198109eaSBingbu Cao 	return r;
1837198109eaSBingbu Cao }
1838198109eaSBingbu Cao 
cio2_pci_remove(struct pci_dev * pci_dev)1839198109eaSBingbu Cao static void cio2_pci_remove(struct pci_dev *pci_dev)
1840198109eaSBingbu Cao {
1841198109eaSBingbu Cao 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1842198109eaSBingbu Cao 
1843198109eaSBingbu Cao 	media_device_unregister(&cio2->media_dev);
1844198109eaSBingbu Cao 	v4l2_async_nf_unregister(&cio2->notifier);
1845198109eaSBingbu Cao 	v4l2_async_nf_cleanup(&cio2->notifier);
1846198109eaSBingbu Cao 	cio2_queues_exit(cio2);
1847198109eaSBingbu Cao 	cio2_fbpt_exit_dummy(cio2);
1848198109eaSBingbu Cao 	v4l2_device_unregister(&cio2->v4l2_dev);
1849198109eaSBingbu Cao 	media_device_cleanup(&cio2->media_dev);
1850198109eaSBingbu Cao 	mutex_destroy(&cio2->lock);
1851198109eaSBingbu Cao 
1852198109eaSBingbu Cao 	pm_runtime_forbid(&pci_dev->dev);
1853198109eaSBingbu Cao 	pm_runtime_get_noresume(&pci_dev->dev);
1854198109eaSBingbu Cao }
1855198109eaSBingbu Cao 
cio2_runtime_suspend(struct device * dev)1856198109eaSBingbu Cao static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1857198109eaSBingbu Cao {
1858198109eaSBingbu Cao 	struct pci_dev *pci_dev = to_pci_dev(dev);
1859198109eaSBingbu Cao 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
1861198109eaSBingbu Cao 	u16 pm;
1862198109eaSBingbu Cao 
1863198109eaSBingbu Cao 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1864198109eaSBingbu Cao 	dev_dbg(dev, "cio2 runtime suspend.\n");
1865198109eaSBingbu Cao 
1866198109eaSBingbu Cao 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1867198109eaSBingbu Cao 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1868198109eaSBingbu Cao 	pm |= CIO2_PMCSR_D3;
1869198109eaSBingbu Cao 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1870198109eaSBingbu Cao 
1871198109eaSBingbu Cao 	return 0;
1872198109eaSBingbu Cao }
1873198109eaSBingbu Cao 
cio2_runtime_resume(struct device * dev)1874198109eaSBingbu Cao static int __maybe_unused cio2_runtime_resume(struct device *dev)
1875198109eaSBingbu Cao {
1876198109eaSBingbu Cao 	struct pci_dev *pci_dev = to_pci_dev(dev);
1877198109eaSBingbu Cao 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1878198109eaSBingbu Cao 	void __iomem *const base = cio2->base;
1879198109eaSBingbu Cao 	u16 pm;
1880198109eaSBingbu Cao 
1881198109eaSBingbu Cao 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1882198109eaSBingbu Cao 	dev_dbg(dev, "cio2 runtime resume.\n");
1883198109eaSBingbu Cao 
1884198109eaSBingbu Cao 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1885198109eaSBingbu Cao 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1886198109eaSBingbu Cao 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1887198109eaSBingbu Cao 
1888198109eaSBingbu Cao 	return 0;
1889198109eaSBingbu Cao }
1890198109eaSBingbu Cao 
1891198109eaSBingbu Cao /*
1892198109eaSBingbu Cao  * Helper function to advance all the elements of a circular buffer by "start"
1893198109eaSBingbu Cao  * positions
1894198109eaSBingbu Cao  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1895198109eaSBingbu Cao static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1896198109eaSBingbu Cao {
1897198109eaSBingbu Cao 	struct {
1898198109eaSBingbu Cao 		size_t begin, end;
1899198109eaSBingbu Cao 	} arr[2] = {
1900198109eaSBingbu Cao 		{ 0, start - 1 },
1901198109eaSBingbu Cao 		{ start, elems - 1 },
1902198109eaSBingbu Cao 	};
1903198109eaSBingbu Cao 
1904198109eaSBingbu Cao #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1905198109eaSBingbu Cao 
1906198109eaSBingbu Cao 	/* Loop as long as we have out-of-place entries */
1907198109eaSBingbu Cao 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1908198109eaSBingbu Cao 		size_t size0, i;
1909198109eaSBingbu Cao 
1910198109eaSBingbu Cao 		/*
1911198109eaSBingbu Cao 		 * Find the number of entries that can be arranged on this
1912198109eaSBingbu Cao 		 * iteration.
1913198109eaSBingbu Cao 		 */
1914198109eaSBingbu Cao 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1915198109eaSBingbu Cao 
1916198109eaSBingbu Cao 		/* Swap the entries in two parts of the array. */
1917198109eaSBingbu Cao 		for (i = 0; i < size0; i++) {
1918198109eaSBingbu Cao 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1919198109eaSBingbu Cao 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1920198109eaSBingbu Cao 			size_t j;
1921198109eaSBingbu Cao 
1922198109eaSBingbu Cao 			for (j = 0; j < elem_size; j++)
1923198109eaSBingbu Cao 				swap(d[j], s[j]);
1924198109eaSBingbu Cao 		}
1925198109eaSBingbu Cao 
1926198109eaSBingbu Cao 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1927198109eaSBingbu Cao 			/* The end of the first array remains unarranged. */
1928198109eaSBingbu Cao 			arr[0].begin += size0;
1929198109eaSBingbu Cao 		} else {
1930198109eaSBingbu Cao 			/*
1931198109eaSBingbu Cao 			 * The first array is fully arranged so we proceed
1932198109eaSBingbu Cao 			 * handling the next one.
1933198109eaSBingbu Cao 			 */
1934198109eaSBingbu Cao 			arr[0].begin = arr[1].begin;
1935198109eaSBingbu Cao 			arr[0].end = arr[1].begin + size0 - 1;
1936198109eaSBingbu Cao 			arr[1].begin += size0;
1937198109eaSBingbu Cao 		}
1938198109eaSBingbu Cao 	}
1939198109eaSBingbu Cao }
1940198109eaSBingbu Cao 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1941198109eaSBingbu Cao static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1942198109eaSBingbu Cao {
1943198109eaSBingbu Cao 	unsigned int i, j;
1944198109eaSBingbu Cao 
1945198109eaSBingbu Cao 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1946198109eaSBingbu Cao 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1947198109eaSBingbu Cao 		if (q->bufs[j])
1948198109eaSBingbu Cao 			break;
1949198109eaSBingbu Cao 
1950198109eaSBingbu Cao 	if (i == CIO2_MAX_BUFFERS)
1951198109eaSBingbu Cao 		return;
1952198109eaSBingbu Cao 
1953198109eaSBingbu Cao 	if (j) {
1954198109eaSBingbu Cao 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1955198109eaSBingbu Cao 			CIO2_MAX_BUFFERS, j);
1956198109eaSBingbu Cao 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1957198109eaSBingbu Cao 			CIO2_MAX_BUFFERS, j);
1958198109eaSBingbu Cao 	}
1959198109eaSBingbu Cao 
1960198109eaSBingbu Cao 	/*
1961198109eaSBingbu Cao 	 * DMA clears the valid bit when accessing the buffer.
1962198109eaSBingbu Cao 	 * When stopping stream in suspend callback, some of the buffers
1963198109eaSBingbu Cao 	 * may be in invalid state. After resume, when DMA meets the invalid
1964198109eaSBingbu Cao 	 * buffer, it will halt and stop receiving new data.
1965198109eaSBingbu Cao 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1966198109eaSBingbu Cao 	 */
1967198109eaSBingbu Cao 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1968198109eaSBingbu Cao 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1969198109eaSBingbu Cao }
1970198109eaSBingbu Cao 
cio2_suspend(struct device * dev)1971198109eaSBingbu Cao static int __maybe_unused cio2_suspend(struct device *dev)
1972198109eaSBingbu Cao {
1973198109eaSBingbu Cao 	struct pci_dev *pci_dev = to_pci_dev(dev);
1974198109eaSBingbu Cao 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1975198109eaSBingbu Cao 	struct cio2_queue *q = cio2->cur_queue;
1976198109eaSBingbu Cao 	int r;
1977198109eaSBingbu Cao 
1978198109eaSBingbu Cao 	dev_dbg(dev, "cio2 suspend\n");
1979198109eaSBingbu Cao 	if (!cio2->streaming)
1980198109eaSBingbu Cao 		return 0;
1981198109eaSBingbu Cao 
1982198109eaSBingbu Cao 	/* Stop stream */
1983198109eaSBingbu Cao 	r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1984198109eaSBingbu Cao 	if (r) {
1985198109eaSBingbu Cao 		dev_err(dev, "failed to stop sensor streaming\n");
1986198109eaSBingbu Cao 		return r;
1987198109eaSBingbu Cao 	}
1988198109eaSBingbu Cao 
1989198109eaSBingbu Cao 	cio2_hw_exit(cio2, q);
1990198109eaSBingbu Cao 	synchronize_irq(pci_dev->irq);
1991198109eaSBingbu Cao 
1992198109eaSBingbu Cao 	pm_runtime_force_suspend(dev);
1993198109eaSBingbu Cao 
1994198109eaSBingbu Cao 	/*
1995198109eaSBingbu Cao 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1996198109eaSBingbu Cao 	 * so relocate the queued buffs to the fbpt head before suspend.
1997198109eaSBingbu Cao 	 */
1998198109eaSBingbu Cao 	cio2_fbpt_rearrange(cio2, q);
1999198109eaSBingbu Cao 	q->bufs_first = 0;
2000198109eaSBingbu Cao 	q->bufs_next = 0;
2001198109eaSBingbu Cao 
2002198109eaSBingbu Cao 	return 0;
2003198109eaSBingbu Cao }
2004198109eaSBingbu Cao 
cio2_resume(struct device * dev)2005198109eaSBingbu Cao static int __maybe_unused cio2_resume(struct device *dev)
2006198109eaSBingbu Cao {
2007198109eaSBingbu Cao 	struct cio2_device *cio2 = dev_get_drvdata(dev);
2008198109eaSBingbu Cao 	struct cio2_queue *q = cio2->cur_queue;
2009198109eaSBingbu Cao 	int r;
2010198109eaSBingbu Cao 
2011198109eaSBingbu Cao 	dev_dbg(dev, "cio2 resume\n");
2012198109eaSBingbu Cao 	if (!cio2->streaming)
2013198109eaSBingbu Cao 		return 0;
2014198109eaSBingbu Cao 	/* Start stream */
2015198109eaSBingbu Cao 	r = pm_runtime_force_resume(dev);
2016198109eaSBingbu Cao 	if (r < 0) {
2017198109eaSBingbu Cao 		dev_err(dev, "failed to set power %d\n", r);
2018198109eaSBingbu Cao 		return r;
2019198109eaSBingbu Cao 	}
2020198109eaSBingbu Cao 
2021198109eaSBingbu Cao 	r = cio2_hw_init(cio2, q);
2022198109eaSBingbu Cao 	if (r) {
2023198109eaSBingbu Cao 		dev_err(dev, "fail to init cio2 hw\n");
2024198109eaSBingbu Cao 		return r;
2025198109eaSBingbu Cao 	}
2026198109eaSBingbu Cao 
2027198109eaSBingbu Cao 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2028198109eaSBingbu Cao 	if (r) {
2029198109eaSBingbu Cao 		dev_err(dev, "fail to start sensor streaming\n");
2030198109eaSBingbu Cao 		cio2_hw_exit(cio2, q);
2031198109eaSBingbu Cao 	}
2032198109eaSBingbu Cao 
2033198109eaSBingbu Cao 	return r;
2034198109eaSBingbu Cao }
2035198109eaSBingbu Cao 
2036198109eaSBingbu Cao static const struct dev_pm_ops cio2_pm_ops = {
2037198109eaSBingbu Cao 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2038198109eaSBingbu Cao 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2039198109eaSBingbu Cao };
2040198109eaSBingbu Cao 
2041198109eaSBingbu Cao static const struct pci_device_id cio2_pci_id_table[] = {
2042198109eaSBingbu Cao 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2043198109eaSBingbu Cao 	{ }
2044198109eaSBingbu Cao };
2045198109eaSBingbu Cao 
2046198109eaSBingbu Cao MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2047198109eaSBingbu Cao 
2048198109eaSBingbu Cao static struct pci_driver cio2_pci_driver = {
2049198109eaSBingbu Cao 	.name = CIO2_NAME,
2050198109eaSBingbu Cao 	.id_table = cio2_pci_id_table,
2051198109eaSBingbu Cao 	.probe = cio2_pci_probe,
2052198109eaSBingbu Cao 	.remove = cio2_pci_remove,
2053198109eaSBingbu Cao 	.driver = {
2054198109eaSBingbu Cao 		.pm = &cio2_pm_ops,
2055198109eaSBingbu Cao 	},
2056198109eaSBingbu Cao };
2057198109eaSBingbu Cao 
2058198109eaSBingbu Cao module_pci_driver(cio2_pci_driver);
2059198109eaSBingbu Cao 
2060198109eaSBingbu Cao MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2061198109eaSBingbu Cao MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2062198109eaSBingbu Cao MODULE_AUTHOR("Jian Xu Zheng");
2063198109eaSBingbu Cao MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2064198109eaSBingbu Cao MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2065198109eaSBingbu Cao MODULE_LICENSE("GPL v2");
2066198109eaSBingbu Cao MODULE_DESCRIPTION("IPU3 CIO2 driver");
2067198109eaSBingbu Cao MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
2068