xref: /openbmc/linux/drivers/media/v4l2-core/v4l2-common.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *	Video for Linux Two
4   *
5   *	A generic video device interface for the LINUX operating system
6   *	using a set of device structures/vectors for low level operations.
7   *
8   *	This file replaces the videodev.c file that comes with the
9   *	regular kernel distribution.
10   *
11   * Author:	Bill Dirks <bill@thedirks.org>
12   *		based on code by Alan Cox, <alan@cymru.net>
13   */
14  
15  /*
16   * Video capture interface for Linux
17   *
18   *	A generic video device interface for the LINUX operating system
19   *	using a set of device structures/vectors for low level operations.
20   *
21   * Author:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
22   *
23   * Fixes:
24   */
25  
26  /*
27   * Video4linux 1/2 integration by Justin Schoeman
28   * <justin@suntiger.ee.up.ac.za>
29   * 2.4 PROCFS support ported from 2.4 kernels by
30   *  Iñaki García Etxebarria <garetxe@euskalnet.net>
31   * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32   * 2.4 devfs support ported from 2.4 kernels by
33   *  Dan Merillat <dan@merillat.org>
34   * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
35   */
36  
37  #include <linux/module.h>
38  #include <linux/types.h>
39  #include <linux/kernel.h>
40  #include <linux/mm.h>
41  #include <linux/string.h>
42  #include <linux/errno.h>
43  #include <linux/uaccess.h>
44  #include <asm/io.h>
45  #include <asm/div64.h>
46  #include <media/v4l2-common.h>
47  #include <media/v4l2-device.h>
48  #include <media/v4l2-ctrls.h>
49  
50  #include <linux/videodev2.h>
51  
52  /*
53   *
54   *	V 4 L 2   D R I V E R   H E L P E R   A P I
55   *
56   */
57  
58  /*
59   *  Video Standard Operations (contributed by Michael Schimek)
60   */
61  
62  /* Helper functions for control handling			     */
63  
64  /* Fill in a struct v4l2_queryctrl */
v4l2_ctrl_query_fill(struct v4l2_queryctrl * qctrl,s32 _min,s32 _max,s32 _step,s32 _def)65  int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
66  {
67  	const char *name;
68  	s64 min = _min;
69  	s64 max = _max;
70  	u64 step = _step;
71  	s64 def = _def;
72  
73  	v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
74  		       &min, &max, &step, &def, &qctrl->flags);
75  
76  	if (name == NULL)
77  		return -EINVAL;
78  
79  	qctrl->minimum = min;
80  	qctrl->maximum = max;
81  	qctrl->step = step;
82  	qctrl->default_value = def;
83  	qctrl->reserved[0] = qctrl->reserved[1] = 0;
84  	strscpy(qctrl->name, name, sizeof(qctrl->name));
85  	return 0;
86  }
87  EXPORT_SYMBOL(v4l2_ctrl_query_fill);
88  
89  /* Clamp x to be between min and max, aligned to a multiple of 2^align.  min
90   * and max don't have to be aligned, but there must be at least one valid
91   * value.  E.g., min=17,max=31,align=4 is not allowed as there are no multiples
92   * of 16 between 17 and 31.  */
clamp_align(unsigned int x,unsigned int min,unsigned int max,unsigned int align)93  static unsigned int clamp_align(unsigned int x, unsigned int min,
94  				unsigned int max, unsigned int align)
95  {
96  	/* Bits that must be zero to be aligned */
97  	unsigned int mask = ~((1 << align) - 1);
98  
99  	/* Clamp to aligned min and max */
100  	x = clamp(x, (min + ~mask) & mask, max & mask);
101  
102  	/* Round to nearest aligned value */
103  	if (align)
104  		x = (x + (1 << (align - 1))) & mask;
105  
106  	return x;
107  }
108  
clamp_roundup(unsigned int x,unsigned int min,unsigned int max,unsigned int alignment)109  static unsigned int clamp_roundup(unsigned int x, unsigned int min,
110  				   unsigned int max, unsigned int alignment)
111  {
112  	x = clamp(x, min, max);
113  	if (alignment)
114  		x = round_up(x, alignment);
115  
116  	return x;
117  }
118  
v4l_bound_align_image(u32 * w,unsigned int wmin,unsigned int wmax,unsigned int walign,u32 * h,unsigned int hmin,unsigned int hmax,unsigned int halign,unsigned int salign)119  void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
120  			   unsigned int walign,
121  			   u32 *h, unsigned int hmin, unsigned int hmax,
122  			   unsigned int halign, unsigned int salign)
123  {
124  	*w = clamp_align(*w, wmin, wmax, walign);
125  	*h = clamp_align(*h, hmin, hmax, halign);
126  
127  	/* Usually we don't need to align the size and are done now. */
128  	if (!salign)
129  		return;
130  
131  	/* How much alignment do we have? */
132  	walign = __ffs(*w);
133  	halign = __ffs(*h);
134  	/* Enough to satisfy the image alignment? */
135  	if (walign + halign < salign) {
136  		/* Max walign where there is still a valid width */
137  		unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
138  		/* Max halign where there is still a valid height */
139  		unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
140  
141  		/* up the smaller alignment until we have enough */
142  		do {
143  			if (halign >= hmaxa ||
144  			    (walign <= halign && walign < wmaxa)) {
145  				*w = clamp_align(*w, wmin, wmax, walign + 1);
146  				walign = __ffs(*w);
147  			} else {
148  				*h = clamp_align(*h, hmin, hmax, halign + 1);
149  				halign = __ffs(*h);
150  			}
151  		} while (halign + walign < salign);
152  	}
153  }
154  EXPORT_SYMBOL_GPL(v4l_bound_align_image);
155  
156  const void *
__v4l2_find_nearest_size(const void * array,size_t array_size,size_t entry_size,size_t width_offset,size_t height_offset,s32 width,s32 height)157  __v4l2_find_nearest_size(const void *array, size_t array_size,
158  			 size_t entry_size, size_t width_offset,
159  			 size_t height_offset, s32 width, s32 height)
160  {
161  	u32 error, min_error = U32_MAX;
162  	const void *best = NULL;
163  	unsigned int i;
164  
165  	if (!array)
166  		return NULL;
167  
168  	for (i = 0; i < array_size; i++, array += entry_size) {
169  		const u32 *entry_width = array + width_offset;
170  		const u32 *entry_height = array + height_offset;
171  
172  		error = abs(*entry_width - width) + abs(*entry_height - height);
173  		if (error > min_error)
174  			continue;
175  
176  		min_error = error;
177  		best = array;
178  		if (!error)
179  			break;
180  	}
181  
182  	return best;
183  }
184  EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
185  
v4l2_g_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)186  int v4l2_g_parm_cap(struct video_device *vdev,
187  		    struct v4l2_subdev *sd, struct v4l2_streamparm *a)
188  {
189  	struct v4l2_subdev_frame_interval ival = { 0 };
190  	int ret;
191  
192  	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
193  	    a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
194  		return -EINVAL;
195  
196  	if (vdev->device_caps & V4L2_CAP_READWRITE)
197  		a->parm.capture.readbuffers = 2;
198  	if (v4l2_subdev_has_op(sd, video, g_frame_interval))
199  		a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
200  	ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival);
201  	if (!ret)
202  		a->parm.capture.timeperframe = ival.interval;
203  	return ret;
204  }
205  EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
206  
v4l2_s_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)207  int v4l2_s_parm_cap(struct video_device *vdev,
208  		    struct v4l2_subdev *sd, struct v4l2_streamparm *a)
209  {
210  	struct v4l2_subdev_frame_interval ival = {
211  		.interval = a->parm.capture.timeperframe
212  	};
213  	int ret;
214  
215  	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
216  	    a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
217  		return -EINVAL;
218  
219  	memset(&a->parm, 0, sizeof(a->parm));
220  	if (vdev->device_caps & V4L2_CAP_READWRITE)
221  		a->parm.capture.readbuffers = 2;
222  	else
223  		a->parm.capture.readbuffers = 0;
224  
225  	if (v4l2_subdev_has_op(sd, video, g_frame_interval))
226  		a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
227  	ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival);
228  	if (!ret)
229  		a->parm.capture.timeperframe = ival.interval;
230  	return ret;
231  }
232  EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
233  
v4l2_format_info(u32 format)234  const struct v4l2_format_info *v4l2_format_info(u32 format)
235  {
236  	static const struct v4l2_format_info formats[] = {
237  		/* RGB formats */
238  		{ .format = V4L2_PIX_FMT_BGR24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
239  		{ .format = V4L2_PIX_FMT_RGB24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
240  		{ .format = V4L2_PIX_FMT_HSV24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
241  		{ .format = V4L2_PIX_FMT_BGR32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
242  		{ .format = V4L2_PIX_FMT_XBGR32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
243  		{ .format = V4L2_PIX_FMT_BGRX32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
244  		{ .format = V4L2_PIX_FMT_RGB32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
245  		{ .format = V4L2_PIX_FMT_XRGB32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
246  		{ .format = V4L2_PIX_FMT_RGBX32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
247  		{ .format = V4L2_PIX_FMT_HSV32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
248  		{ .format = V4L2_PIX_FMT_ARGB32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
249  		{ .format = V4L2_PIX_FMT_RGBA32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
250  		{ .format = V4L2_PIX_FMT_ABGR32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
251  		{ .format = V4L2_PIX_FMT_BGRA32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
252  		{ .format = V4L2_PIX_FMT_RGB565,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
253  		{ .format = V4L2_PIX_FMT_RGB555,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
254  		{ .format = V4L2_PIX_FMT_BGR666,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
255  		{ .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
256  		{ .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
257  
258  		/* YUV packed formats */
259  		{ .format = V4L2_PIX_FMT_YUYV,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
260  		{ .format = V4L2_PIX_FMT_YVYU,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
261  		{ .format = V4L2_PIX_FMT_UYVY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
262  		{ .format = V4L2_PIX_FMT_VYUY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
263  		{ .format = V4L2_PIX_FMT_Y212,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
264  		{ .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
265  		{ .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
266  		  .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
267  		{ .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
268  		  .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
269  
270  		/* YUV planar formats */
271  		{ .format = V4L2_PIX_FMT_NV12,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
272  		{ .format = V4L2_PIX_FMT_NV21,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
273  		{ .format = V4L2_PIX_FMT_NV16,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
274  		{ .format = V4L2_PIX_FMT_NV61,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
275  		{ .format = V4L2_PIX_FMT_NV24,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
276  		{ .format = V4L2_PIX_FMT_NV42,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
277  		{ .format = V4L2_PIX_FMT_P010,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
278  		{ .format = V4L2_PIX_FMT_P012,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
279  
280  		{ .format = V4L2_PIX_FMT_YUV410,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
281  		{ .format = V4L2_PIX_FMT_YVU410,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
282  		{ .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
283  		{ .format = V4L2_PIX_FMT_YUV420,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
284  		{ .format = V4L2_PIX_FMT_YVU420,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
285  		{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
286  		{ .format = V4L2_PIX_FMT_GREY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
287  
288  		/* Tiled YUV formats */
289  		{ .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
290  		{ .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
291  		  .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
292  		{ .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
293  
294  		/* YUV planar formats, non contiguous variant */
295  		{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
296  		{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
297  		{ .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
298  		{ .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
299  		{ .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
300  		{ .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
301  
302  		{ .format = V4L2_PIX_FMT_NV12M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
303  		{ .format = V4L2_PIX_FMT_NV21M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
304  		{ .format = V4L2_PIX_FMT_NV16M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
305  		{ .format = V4L2_PIX_FMT_NV61M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
306  		{ .format = V4L2_PIX_FMT_P012M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
307  
308  		/* Bayer RGB formats */
309  		{ .format = V4L2_PIX_FMT_SBGGR8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
310  		{ .format = V4L2_PIX_FMT_SGBRG8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
311  		{ .format = V4L2_PIX_FMT_SGRBG8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
312  		{ .format = V4L2_PIX_FMT_SRGGB8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
313  		{ .format = V4L2_PIX_FMT_SBGGR10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
314  		{ .format = V4L2_PIX_FMT_SGBRG10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
315  		{ .format = V4L2_PIX_FMT_SGRBG10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
316  		{ .format = V4L2_PIX_FMT_SRGGB10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
317  		{ .format = V4L2_PIX_FMT_SBGGR10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
318  		{ .format = V4L2_PIX_FMT_SGBRG10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
319  		{ .format = V4L2_PIX_FMT_SGRBG10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
320  		{ .format = V4L2_PIX_FMT_SRGGB10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
321  		{ .format = V4L2_PIX_FMT_SBGGR10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
322  		{ .format = V4L2_PIX_FMT_SGBRG10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
323  		{ .format = V4L2_PIX_FMT_SGRBG10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
324  		{ .format = V4L2_PIX_FMT_SRGGB10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
325  		{ .format = V4L2_PIX_FMT_SBGGR12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
326  		{ .format = V4L2_PIX_FMT_SGBRG12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
327  		{ .format = V4L2_PIX_FMT_SGRBG12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
328  		{ .format = V4L2_PIX_FMT_SRGGB12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
329  	};
330  	unsigned int i;
331  
332  	for (i = 0; i < ARRAY_SIZE(formats); ++i)
333  		if (formats[i].format == format)
334  			return &formats[i];
335  	return NULL;
336  }
337  EXPORT_SYMBOL(v4l2_format_info);
338  
v4l2_format_block_width(const struct v4l2_format_info * info,int plane)339  static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
340  {
341  	if (!info->block_w[plane])
342  		return 1;
343  	return info->block_w[plane];
344  }
345  
v4l2_format_block_height(const struct v4l2_format_info * info,int plane)346  static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
347  {
348  	if (!info->block_h[plane])
349  		return 1;
350  	return info->block_h[plane];
351  }
352  
v4l2_apply_frmsize_constraints(u32 * width,u32 * height,const struct v4l2_frmsize_stepwise * frmsize)353  void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
354  				    const struct v4l2_frmsize_stepwise *frmsize)
355  {
356  	if (!frmsize)
357  		return;
358  
359  	/*
360  	 * Clamp width/height to meet min/max constraints and round it up to
361  	 * macroblock alignment.
362  	 */
363  	*width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
364  			       frmsize->step_width);
365  	*height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
366  				frmsize->step_height);
367  }
368  EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
369  
v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane * pixfmt,u32 pixelformat,u32 width,u32 height)370  int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
371  			u32 pixelformat, u32 width, u32 height)
372  {
373  	const struct v4l2_format_info *info;
374  	struct v4l2_plane_pix_format *plane;
375  	int i;
376  
377  	info = v4l2_format_info(pixelformat);
378  	if (!info)
379  		return -EINVAL;
380  
381  	pixfmt->width = width;
382  	pixfmt->height = height;
383  	pixfmt->pixelformat = pixelformat;
384  	pixfmt->num_planes = info->mem_planes;
385  
386  	if (info->mem_planes == 1) {
387  		plane = &pixfmt->plane_fmt[0];
388  		plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
389  		plane->sizeimage = 0;
390  
391  		for (i = 0; i < info->comp_planes; i++) {
392  			unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
393  			unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
394  			unsigned int aligned_width;
395  			unsigned int aligned_height;
396  
397  			aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
398  			aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
399  
400  			plane->sizeimage += info->bpp[i] *
401  				DIV_ROUND_UP(aligned_width, hdiv) *
402  				DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
403  		}
404  	} else {
405  		for (i = 0; i < info->comp_planes; i++) {
406  			unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
407  			unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
408  			unsigned int aligned_width;
409  			unsigned int aligned_height;
410  
411  			aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
412  			aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
413  
414  			plane = &pixfmt->plane_fmt[i];
415  			plane->bytesperline =
416  				info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
417  			plane->sizeimage =
418  				plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
419  		}
420  	}
421  	return 0;
422  }
423  EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
424  
v4l2_fill_pixfmt(struct v4l2_pix_format * pixfmt,u32 pixelformat,u32 width,u32 height)425  int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
426  		     u32 width, u32 height)
427  {
428  	const struct v4l2_format_info *info;
429  	int i;
430  
431  	info = v4l2_format_info(pixelformat);
432  	if (!info)
433  		return -EINVAL;
434  
435  	/* Single planar API cannot be used for multi plane formats. */
436  	if (info->mem_planes > 1)
437  		return -EINVAL;
438  
439  	pixfmt->width = width;
440  	pixfmt->height = height;
441  	pixfmt->pixelformat = pixelformat;
442  	pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
443  	pixfmt->sizeimage = 0;
444  
445  	for (i = 0; i < info->comp_planes; i++) {
446  		unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
447  		unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
448  		unsigned int aligned_width;
449  		unsigned int aligned_height;
450  
451  		aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
452  		aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
453  
454  		pixfmt->sizeimage += info->bpp[i] *
455  			DIV_ROUND_UP(aligned_width, hdiv) *
456  			DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
457  	}
458  	return 0;
459  }
460  EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
461  
v4l2_get_link_freq(struct v4l2_ctrl_handler * handler,unsigned int mul,unsigned int div)462  s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
463  		       unsigned int div)
464  {
465  	struct v4l2_ctrl *ctrl;
466  	s64 freq;
467  
468  	ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
469  	if (ctrl) {
470  		struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
471  		int ret;
472  
473  		qm.index = v4l2_ctrl_g_ctrl(ctrl);
474  
475  		ret = v4l2_querymenu(handler, &qm);
476  		if (ret)
477  			return -ENOENT;
478  
479  		freq = qm.value;
480  	} else {
481  		if (!mul || !div)
482  			return -ENOENT;
483  
484  		ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
485  		if (!ctrl)
486  			return -ENOENT;
487  
488  		freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
489  
490  		pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
491  			__func__);
492  		pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
493  			__func__);
494  	}
495  
496  	return freq > 0 ? freq : -EINVAL;
497  }
498  EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
499  
500  /*
501   * Simplify a fraction using a simple continued fraction decomposition. The
502   * idea here is to convert fractions such as 333333/10000000 to 1/30 using
503   * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
504   * arbitrary parameters to remove non-significative terms from the simple
505   * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
506   * respectively seems to give nice results.
507   */
v4l2_simplify_fraction(u32 * numerator,u32 * denominator,unsigned int n_terms,unsigned int threshold)508  void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
509  		unsigned int n_terms, unsigned int threshold)
510  {
511  	u32 *an;
512  	u32 x, y, r;
513  	unsigned int i, n;
514  
515  	an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
516  	if (an == NULL)
517  		return;
518  
519  	/*
520  	 * Convert the fraction to a simple continued fraction. See
521  	 * https://en.wikipedia.org/wiki/Continued_fraction
522  	 * Stop if the current term is bigger than or equal to the given
523  	 * threshold.
524  	 */
525  	x = *numerator;
526  	y = *denominator;
527  
528  	for (n = 0; n < n_terms && y != 0; ++n) {
529  		an[n] = x / y;
530  		if (an[n] >= threshold) {
531  			if (n < 2)
532  				n++;
533  			break;
534  		}
535  
536  		r = x - an[n] * y;
537  		x = y;
538  		y = r;
539  	}
540  
541  	/* Expand the simple continued fraction back to an integer fraction. */
542  	x = 0;
543  	y = 1;
544  
545  	for (i = n; i > 0; --i) {
546  		r = y;
547  		y = an[i-1] * y + x;
548  		x = r;
549  	}
550  
551  	*numerator = y;
552  	*denominator = x;
553  	kfree(an);
554  }
555  EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
556  
557  /*
558   * Convert a fraction to a frame interval in 100ns multiples. The idea here is
559   * to compute numerator / denominator * 10000000 using 32 bit fixed point
560   * arithmetic only.
561   */
v4l2_fraction_to_interval(u32 numerator,u32 denominator)562  u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
563  {
564  	u32 multiplier;
565  
566  	/* Saturate the result if the operation would overflow. */
567  	if (denominator == 0 ||
568  	    numerator/denominator >= ((u32)-1)/10000000)
569  		return (u32)-1;
570  
571  	/*
572  	 * Divide both the denominator and the multiplier by two until
573  	 * numerator * multiplier doesn't overflow. If anyone knows a better
574  	 * algorithm please let me know.
575  	 */
576  	multiplier = 10000000;
577  	while (numerator > ((u32)-1)/multiplier) {
578  		multiplier /= 2;
579  		denominator /= 2;
580  	}
581  
582  	return denominator ? numerator * multiplier / denominator : 0;
583  }
584  EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
585