1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Video for Linux Two 4 * 5 * A generic video device interface for the LINUX operating system 6 * using a set of device structures/vectors for low level operations. 7 * 8 * This file replaces the videodev.c file that comes with the 9 * regular kernel distribution. 10 * 11 * Author: Bill Dirks <bill@thedirks.org> 12 * based on code by Alan Cox, <alan@cymru.net> 13 */ 14 15 /* 16 * Video capture interface for Linux 17 * 18 * A generic video device interface for the LINUX operating system 19 * using a set of device structures/vectors for low level operations. 20 * 21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk> 22 * 23 * Fixes: 24 */ 25 26 /* 27 * Video4linux 1/2 integration by Justin Schoeman 28 * <justin@suntiger.ee.up.ac.za> 29 * 2.4 PROCFS support ported from 2.4 kernels by 30 * Iñaki García Etxebarria <garetxe@euskalnet.net> 31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org> 32 * 2.4 devfs support ported from 2.4 kernels by 33 * Dan Merillat <dan@merillat.org> 34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/string.h> 42 #include <linux/errno.h> 43 #include <linux/uaccess.h> 44 #include <asm/io.h> 45 #include <asm/div64.h> 46 #include <media/v4l2-common.h> 47 #include <media/v4l2-device.h> 48 #include <media/v4l2-ctrls.h> 49 50 #include <linux/videodev2.h> 51 52 /* 53 * 54 * V 4 L 2 D R I V E R H E L P E R A P I 55 * 56 */ 57 58 /* 59 * Video Standard Operations (contributed by Michael Schimek) 60 */ 61 62 /* Helper functions for control handling */ 63 64 /* Fill in a struct v4l2_queryctrl */ 65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) 66 { 67 const char *name; 68 s64 min = _min; 69 s64 max = _max; 70 u64 step = _step; 71 s64 def = _def; 72 73 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, 74 &min, &max, &step, &def, &qctrl->flags); 75 76 if (name == NULL) 77 return -EINVAL; 78 79 qctrl->minimum = min; 80 qctrl->maximum = max; 81 qctrl->step = step; 82 qctrl->default_value = def; 83 qctrl->reserved[0] = qctrl->reserved[1] = 0; 84 strscpy(qctrl->name, name, sizeof(qctrl->name)); 85 return 0; 86 } 87 EXPORT_SYMBOL(v4l2_ctrl_query_fill); 88 89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min 90 * and max don't have to be aligned, but there must be at least one valid 91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples 92 * of 16 between 17 and 31. */ 93 static unsigned int clamp_align(unsigned int x, unsigned int min, 94 unsigned int max, unsigned int align) 95 { 96 /* Bits that must be zero to be aligned */ 97 unsigned int mask = ~((1 << align) - 1); 98 99 /* Clamp to aligned min and max */ 100 x = clamp(x, (min + ~mask) & mask, max & mask); 101 102 /* Round to nearest aligned value */ 103 if (align) 104 x = (x + (1 << (align - 1))) & mask; 105 106 return x; 107 } 108 109 static unsigned int clamp_roundup(unsigned int x, unsigned int min, 110 unsigned int max, unsigned int alignment) 111 { 112 x = clamp(x, min, max); 113 if (alignment) 114 x = round_up(x, alignment); 115 116 return x; 117 } 118 119 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, 120 unsigned int walign, 121 u32 *h, unsigned int hmin, unsigned int hmax, 122 unsigned int halign, unsigned int salign) 123 { 124 *w = clamp_align(*w, wmin, wmax, walign); 125 *h = clamp_align(*h, hmin, hmax, halign); 126 127 /* Usually we don't need to align the size and are done now. */ 128 if (!salign) 129 return; 130 131 /* How much alignment do we have? */ 132 walign = __ffs(*w); 133 halign = __ffs(*h); 134 /* Enough to satisfy the image alignment? */ 135 if (walign + halign < salign) { 136 /* Max walign where there is still a valid width */ 137 unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); 138 /* Max halign where there is still a valid height */ 139 unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); 140 141 /* up the smaller alignment until we have enough */ 142 do { 143 if (halign >= hmaxa || 144 (walign <= halign && walign < wmaxa)) { 145 *w = clamp_align(*w, wmin, wmax, walign + 1); 146 walign = __ffs(*w); 147 } else { 148 *h = clamp_align(*h, hmin, hmax, halign + 1); 149 halign = __ffs(*h); 150 } 151 } while (halign + walign < salign); 152 } 153 } 154 EXPORT_SYMBOL_GPL(v4l_bound_align_image); 155 156 const void * 157 __v4l2_find_nearest_size(const void *array, size_t array_size, 158 size_t entry_size, size_t width_offset, 159 size_t height_offset, s32 width, s32 height) 160 { 161 u32 error, min_error = U32_MAX; 162 const void *best = NULL; 163 unsigned int i; 164 165 if (!array) 166 return NULL; 167 168 for (i = 0; i < array_size; i++, array += entry_size) { 169 const u32 *entry_width = array + width_offset; 170 const u32 *entry_height = array + height_offset; 171 172 error = abs(*entry_width - width) + abs(*entry_height - height); 173 if (error > min_error) 174 continue; 175 176 min_error = error; 177 best = array; 178 if (!error) 179 break; 180 } 181 182 return best; 183 } 184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); 185 186 int v4l2_g_parm_cap(struct video_device *vdev, 187 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 188 { 189 struct v4l2_subdev_frame_interval ival = { 0 }; 190 int ret; 191 192 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 193 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 194 return -EINVAL; 195 196 if (vdev->device_caps & V4L2_CAP_READWRITE) 197 a->parm.capture.readbuffers = 2; 198 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 199 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 200 ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival); 201 if (!ret) 202 a->parm.capture.timeperframe = ival.interval; 203 return ret; 204 } 205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); 206 207 int v4l2_s_parm_cap(struct video_device *vdev, 208 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 209 { 210 struct v4l2_subdev_frame_interval ival = { 211 .interval = a->parm.capture.timeperframe 212 }; 213 int ret; 214 215 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 216 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 217 return -EINVAL; 218 219 memset(&a->parm, 0, sizeof(a->parm)); 220 if (vdev->device_caps & V4L2_CAP_READWRITE) 221 a->parm.capture.readbuffers = 2; 222 else 223 a->parm.capture.readbuffers = 0; 224 225 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 226 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 227 ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival); 228 if (!ret) 229 a->parm.capture.timeperframe = ival.interval; 230 return ret; 231 } 232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); 233 234 const struct v4l2_format_info *v4l2_format_info(u32 format) 235 { 236 static const struct v4l2_format_info formats[] = { 237 /* RGB formats */ 238 { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 239 { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 240 { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 241 { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 242 { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 243 { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 244 { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 245 { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 246 { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 247 { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 248 { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 249 { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 250 { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 251 { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 252 { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 253 { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 254 { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 255 256 /* YUV packed formats */ 257 { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 258 { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 259 { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 260 { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 261 262 /* YUV planar formats */ 263 { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 264 { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 265 { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 266 { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 267 { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 268 { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 269 { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 270 271 { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 272 { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 273 { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 }, 274 { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 275 { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 276 { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 277 { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 278 279 /* Tiled YUV formats */ 280 { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 281 { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 282 283 /* YUV planar formats, non contiguous variant */ 284 { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 285 { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 286 { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 287 { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 288 { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 289 { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 290 291 { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 292 { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 293 { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 294 { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 295 296 /* Bayer RGB formats */ 297 { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 298 { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 299 { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 300 { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 301 { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 302 { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 303 { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 304 { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 305 { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 306 { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 307 { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 308 { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 309 { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 310 { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 311 { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 312 { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 313 { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 314 { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 315 { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 316 { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 317 }; 318 unsigned int i; 319 320 for (i = 0; i < ARRAY_SIZE(formats); ++i) 321 if (formats[i].format == format) 322 return &formats[i]; 323 return NULL; 324 } 325 EXPORT_SYMBOL(v4l2_format_info); 326 327 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) 328 { 329 if (!info->block_w[plane]) 330 return 1; 331 return info->block_w[plane]; 332 } 333 334 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) 335 { 336 if (!info->block_h[plane]) 337 return 1; 338 return info->block_h[plane]; 339 } 340 341 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, 342 const struct v4l2_frmsize_stepwise *frmsize) 343 { 344 if (!frmsize) 345 return; 346 347 /* 348 * Clamp width/height to meet min/max constraints and round it up to 349 * macroblock alignment. 350 */ 351 *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, 352 frmsize->step_width); 353 *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, 354 frmsize->step_height); 355 } 356 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); 357 358 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, 359 u32 pixelformat, u32 width, u32 height) 360 { 361 const struct v4l2_format_info *info; 362 struct v4l2_plane_pix_format *plane; 363 int i; 364 365 info = v4l2_format_info(pixelformat); 366 if (!info) 367 return -EINVAL; 368 369 pixfmt->width = width; 370 pixfmt->height = height; 371 pixfmt->pixelformat = pixelformat; 372 pixfmt->num_planes = info->mem_planes; 373 374 if (info->mem_planes == 1) { 375 plane = &pixfmt->plane_fmt[0]; 376 plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 377 plane->sizeimage = 0; 378 379 for (i = 0; i < info->comp_planes; i++) { 380 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 381 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 382 unsigned int aligned_width; 383 unsigned int aligned_height; 384 385 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 386 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 387 388 plane->sizeimage += info->bpp[i] * 389 DIV_ROUND_UP(aligned_width, hdiv) * 390 DIV_ROUND_UP(aligned_height, vdiv); 391 } 392 } else { 393 for (i = 0; i < info->comp_planes; i++) { 394 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 395 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 396 unsigned int aligned_width; 397 unsigned int aligned_height; 398 399 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 400 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 401 402 plane = &pixfmt->plane_fmt[i]; 403 plane->bytesperline = 404 info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv); 405 plane->sizeimage = 406 plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv); 407 } 408 } 409 return 0; 410 } 411 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); 412 413 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, 414 u32 width, u32 height) 415 { 416 const struct v4l2_format_info *info; 417 int i; 418 419 info = v4l2_format_info(pixelformat); 420 if (!info) 421 return -EINVAL; 422 423 /* Single planar API cannot be used for multi plane formats. */ 424 if (info->mem_planes > 1) 425 return -EINVAL; 426 427 pixfmt->width = width; 428 pixfmt->height = height; 429 pixfmt->pixelformat = pixelformat; 430 pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 431 pixfmt->sizeimage = 0; 432 433 for (i = 0; i < info->comp_planes; i++) { 434 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 435 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 436 unsigned int aligned_width; 437 unsigned int aligned_height; 438 439 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 440 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 441 442 pixfmt->sizeimage += info->bpp[i] * 443 DIV_ROUND_UP(aligned_width, hdiv) * 444 DIV_ROUND_UP(aligned_height, vdiv); 445 } 446 return 0; 447 } 448 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); 449 450 s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, 451 unsigned int div) 452 { 453 struct v4l2_ctrl *ctrl; 454 s64 freq; 455 456 ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); 457 if (ctrl) { 458 struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; 459 int ret; 460 461 qm.index = v4l2_ctrl_g_ctrl(ctrl); 462 463 ret = v4l2_querymenu(handler, &qm); 464 if (ret) 465 return -ENOENT; 466 467 freq = qm.value; 468 } else { 469 if (!mul || !div) 470 return -ENOENT; 471 472 ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); 473 if (!ctrl) 474 return -ENOENT; 475 476 freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); 477 478 pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", 479 __func__); 480 pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", 481 __func__); 482 } 483 484 return freq > 0 ? freq : -EINVAL; 485 } 486 EXPORT_SYMBOL_GPL(v4l2_get_link_freq); 487 488 /* 489 * Simplify a fraction using a simple continued fraction decomposition. The 490 * idea here is to convert fractions such as 333333/10000000 to 1/30 using 491 * 32 bit arithmetic only. The algorithm is not perfect and relies upon two 492 * arbitrary parameters to remove non-significative terms from the simple 493 * continued fraction decomposition. Using 8 and 333 for n_terms and threshold 494 * respectively seems to give nice results. 495 */ 496 void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, 497 unsigned int n_terms, unsigned int threshold) 498 { 499 u32 *an; 500 u32 x, y, r; 501 unsigned int i, n; 502 503 an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); 504 if (an == NULL) 505 return; 506 507 /* 508 * Convert the fraction to a simple continued fraction. See 509 * https://en.wikipedia.org/wiki/Continued_fraction 510 * Stop if the current term is bigger than or equal to the given 511 * threshold. 512 */ 513 x = *numerator; 514 y = *denominator; 515 516 for (n = 0; n < n_terms && y != 0; ++n) { 517 an[n] = x / y; 518 if (an[n] >= threshold) { 519 if (n < 2) 520 n++; 521 break; 522 } 523 524 r = x - an[n] * y; 525 x = y; 526 y = r; 527 } 528 529 /* Expand the simple continued fraction back to an integer fraction. */ 530 x = 0; 531 y = 1; 532 533 for (i = n; i > 0; --i) { 534 r = y; 535 y = an[i-1] * y + x; 536 x = r; 537 } 538 539 *numerator = y; 540 *denominator = x; 541 kfree(an); 542 } 543 EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); 544 545 /* 546 * Convert a fraction to a frame interval in 100ns multiples. The idea here is 547 * to compute numerator / denominator * 10000000 using 32 bit fixed point 548 * arithmetic only. 549 */ 550 u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) 551 { 552 u32 multiplier; 553 554 /* Saturate the result if the operation would overflow. */ 555 if (denominator == 0 || 556 numerator/denominator >= ((u32)-1)/10000000) 557 return (u32)-1; 558 559 /* 560 * Divide both the denominator and the multiplier by two until 561 * numerator * multiplier doesn't overflow. If anyone knows a better 562 * algorithm please let me know. 563 */ 564 multiplier = 10000000; 565 while (numerator > ((u32)-1)/multiplier) { 566 multiplier /= 2; 567 denominator /= 2; 568 } 569 570 return denominator ? numerator * multiplier / denominator : 0; 571 } 572 EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); 573