1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Video for Linux Two 4 * 5 * A generic video device interface for the LINUX operating system 6 * using a set of device structures/vectors for low level operations. 7 * 8 * This file replaces the videodev.c file that comes with the 9 * regular kernel distribution. 10 * 11 * Author: Bill Dirks <bill@thedirks.org> 12 * based on code by Alan Cox, <alan@cymru.net> 13 */ 14 15 /* 16 * Video capture interface for Linux 17 * 18 * A generic video device interface for the LINUX operating system 19 * using a set of device structures/vectors for low level operations. 20 * 21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk> 22 * 23 * Fixes: 24 */ 25 26 /* 27 * Video4linux 1/2 integration by Justin Schoeman 28 * <justin@suntiger.ee.up.ac.za> 29 * 2.4 PROCFS support ported from 2.4 kernels by 30 * Iñaki García Etxebarria <garetxe@euskalnet.net> 31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org> 32 * 2.4 devfs support ported from 2.4 kernels by 33 * Dan Merillat <dan@merillat.org> 34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/string.h> 42 #include <linux/errno.h> 43 #include <linux/uaccess.h> 44 #include <asm/io.h> 45 #include <asm/div64.h> 46 #include <media/v4l2-common.h> 47 #include <media/v4l2-device.h> 48 #include <media/v4l2-ctrls.h> 49 50 #include <linux/videodev2.h> 51 52 /* 53 * 54 * V 4 L 2 D R I V E R H E L P E R A P I 55 * 56 */ 57 58 /* 59 * Video Standard Operations (contributed by Michael Schimek) 60 */ 61 62 /* Helper functions for control handling */ 63 64 /* Fill in a struct v4l2_queryctrl */ 65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) 66 { 67 const char *name; 68 s64 min = _min; 69 s64 max = _max; 70 u64 step = _step; 71 s64 def = _def; 72 73 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, 74 &min, &max, &step, &def, &qctrl->flags); 75 76 if (name == NULL) 77 return -EINVAL; 78 79 qctrl->minimum = min; 80 qctrl->maximum = max; 81 qctrl->step = step; 82 qctrl->default_value = def; 83 qctrl->reserved[0] = qctrl->reserved[1] = 0; 84 strscpy(qctrl->name, name, sizeof(qctrl->name)); 85 return 0; 86 } 87 EXPORT_SYMBOL(v4l2_ctrl_query_fill); 88 89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min 90 * and max don't have to be aligned, but there must be at least one valid 91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples 92 * of 16 between 17 and 31. */ 93 static unsigned int clamp_align(unsigned int x, unsigned int min, 94 unsigned int max, unsigned int align) 95 { 96 /* Bits that must be zero to be aligned */ 97 unsigned int mask = ~((1 << align) - 1); 98 99 /* Clamp to aligned min and max */ 100 x = clamp(x, (min + ~mask) & mask, max & mask); 101 102 /* Round to nearest aligned value */ 103 if (align) 104 x = (x + (1 << (align - 1))) & mask; 105 106 return x; 107 } 108 109 static unsigned int clamp_roundup(unsigned int x, unsigned int min, 110 unsigned int max, unsigned int alignment) 111 { 112 x = clamp(x, min, max); 113 if (alignment) 114 x = round_up(x, alignment); 115 116 return x; 117 } 118 119 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, 120 unsigned int walign, 121 u32 *h, unsigned int hmin, unsigned int hmax, 122 unsigned int halign, unsigned int salign) 123 { 124 *w = clamp_align(*w, wmin, wmax, walign); 125 *h = clamp_align(*h, hmin, hmax, halign); 126 127 /* Usually we don't need to align the size and are done now. */ 128 if (!salign) 129 return; 130 131 /* How much alignment do we have? */ 132 walign = __ffs(*w); 133 halign = __ffs(*h); 134 /* Enough to satisfy the image alignment? */ 135 if (walign + halign < salign) { 136 /* Max walign where there is still a valid width */ 137 unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); 138 /* Max halign where there is still a valid height */ 139 unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); 140 141 /* up the smaller alignment until we have enough */ 142 do { 143 if (halign >= hmaxa || 144 (walign <= halign && walign < wmaxa)) { 145 *w = clamp_align(*w, wmin, wmax, walign + 1); 146 walign = __ffs(*w); 147 } else { 148 *h = clamp_align(*h, hmin, hmax, halign + 1); 149 halign = __ffs(*h); 150 } 151 } while (halign + walign < salign); 152 } 153 } 154 EXPORT_SYMBOL_GPL(v4l_bound_align_image); 155 156 const void * 157 __v4l2_find_nearest_size(const void *array, size_t array_size, 158 size_t entry_size, size_t width_offset, 159 size_t height_offset, s32 width, s32 height) 160 { 161 u32 error, min_error = U32_MAX; 162 const void *best = NULL; 163 unsigned int i; 164 165 if (!array) 166 return NULL; 167 168 for (i = 0; i < array_size; i++, array += entry_size) { 169 const u32 *entry_width = array + width_offset; 170 const u32 *entry_height = array + height_offset; 171 172 error = abs(*entry_width - width) + abs(*entry_height - height); 173 if (error > min_error) 174 continue; 175 176 min_error = error; 177 best = array; 178 if (!error) 179 break; 180 } 181 182 return best; 183 } 184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); 185 186 int v4l2_g_parm_cap(struct video_device *vdev, 187 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 188 { 189 struct v4l2_subdev_frame_interval ival = { 0 }; 190 int ret; 191 192 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 193 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 194 return -EINVAL; 195 196 if (vdev->device_caps & V4L2_CAP_READWRITE) 197 a->parm.capture.readbuffers = 2; 198 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 199 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 200 ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival); 201 if (!ret) 202 a->parm.capture.timeperframe = ival.interval; 203 return ret; 204 } 205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); 206 207 int v4l2_s_parm_cap(struct video_device *vdev, 208 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 209 { 210 struct v4l2_subdev_frame_interval ival = { 211 .interval = a->parm.capture.timeperframe 212 }; 213 int ret; 214 215 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 216 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 217 return -EINVAL; 218 219 memset(&a->parm, 0, sizeof(a->parm)); 220 if (vdev->device_caps & V4L2_CAP_READWRITE) 221 a->parm.capture.readbuffers = 2; 222 else 223 a->parm.capture.readbuffers = 0; 224 225 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 226 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 227 ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival); 228 if (!ret) 229 a->parm.capture.timeperframe = ival.interval; 230 return ret; 231 } 232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); 233 234 const struct v4l2_format_info *v4l2_format_info(u32 format) 235 { 236 static const struct v4l2_format_info formats[] = { 237 /* RGB formats */ 238 { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 239 { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 240 { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 241 { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 242 { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 243 { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 244 { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 245 { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 246 { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 247 { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 248 { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 249 { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 250 { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 251 { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 252 { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 253 { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 254 { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 255 { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 256 { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 257 258 /* YUV packed formats */ 259 { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 260 { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 261 { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 262 { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 263 { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 264 { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 265 266 /* YUV planar formats */ 267 { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 268 { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 269 { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 270 { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 271 { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 272 { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 273 { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 274 { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 275 276 { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 277 { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 278 { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 }, 279 { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 280 { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 281 { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 282 { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 283 284 /* Tiled YUV formats */ 285 { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 286 { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 287 288 /* YUV planar formats, non contiguous variant */ 289 { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 290 { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 291 { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 292 { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 293 { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 294 { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 295 296 { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 297 { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 298 { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 299 { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 300 { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 301 302 /* Bayer RGB formats */ 303 { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 304 { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 305 { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 306 { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 307 { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 308 { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 309 { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 310 { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 311 { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 312 { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 313 { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 314 { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 315 { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 316 { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 317 { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 318 { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 319 { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 320 { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 321 { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 322 { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 323 }; 324 unsigned int i; 325 326 for (i = 0; i < ARRAY_SIZE(formats); ++i) 327 if (formats[i].format == format) 328 return &formats[i]; 329 return NULL; 330 } 331 EXPORT_SYMBOL(v4l2_format_info); 332 333 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) 334 { 335 if (!info->block_w[plane]) 336 return 1; 337 return info->block_w[plane]; 338 } 339 340 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) 341 { 342 if (!info->block_h[plane]) 343 return 1; 344 return info->block_h[plane]; 345 } 346 347 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, 348 const struct v4l2_frmsize_stepwise *frmsize) 349 { 350 if (!frmsize) 351 return; 352 353 /* 354 * Clamp width/height to meet min/max constraints and round it up to 355 * macroblock alignment. 356 */ 357 *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, 358 frmsize->step_width); 359 *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, 360 frmsize->step_height); 361 } 362 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); 363 364 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, 365 u32 pixelformat, u32 width, u32 height) 366 { 367 const struct v4l2_format_info *info; 368 struct v4l2_plane_pix_format *plane; 369 int i; 370 371 info = v4l2_format_info(pixelformat); 372 if (!info) 373 return -EINVAL; 374 375 pixfmt->width = width; 376 pixfmt->height = height; 377 pixfmt->pixelformat = pixelformat; 378 pixfmt->num_planes = info->mem_planes; 379 380 if (info->mem_planes == 1) { 381 plane = &pixfmt->plane_fmt[0]; 382 plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 383 plane->sizeimage = 0; 384 385 for (i = 0; i < info->comp_planes; i++) { 386 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 387 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 388 unsigned int aligned_width; 389 unsigned int aligned_height; 390 391 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 392 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 393 394 plane->sizeimage += info->bpp[i] * 395 DIV_ROUND_UP(aligned_width, hdiv) * 396 DIV_ROUND_UP(aligned_height, vdiv); 397 } 398 } else { 399 for (i = 0; i < info->comp_planes; i++) { 400 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 401 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 402 unsigned int aligned_width; 403 unsigned int aligned_height; 404 405 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 406 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 407 408 plane = &pixfmt->plane_fmt[i]; 409 plane->bytesperline = 410 info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv); 411 plane->sizeimage = 412 plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv); 413 } 414 } 415 return 0; 416 } 417 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); 418 419 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, 420 u32 width, u32 height) 421 { 422 const struct v4l2_format_info *info; 423 int i; 424 425 info = v4l2_format_info(pixelformat); 426 if (!info) 427 return -EINVAL; 428 429 /* Single planar API cannot be used for multi plane formats. */ 430 if (info->mem_planes > 1) 431 return -EINVAL; 432 433 pixfmt->width = width; 434 pixfmt->height = height; 435 pixfmt->pixelformat = pixelformat; 436 pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 437 pixfmt->sizeimage = 0; 438 439 for (i = 0; i < info->comp_planes; i++) { 440 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 441 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 442 unsigned int aligned_width; 443 unsigned int aligned_height; 444 445 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 446 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 447 448 pixfmt->sizeimage += info->bpp[i] * 449 DIV_ROUND_UP(aligned_width, hdiv) * 450 DIV_ROUND_UP(aligned_height, vdiv); 451 } 452 return 0; 453 } 454 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); 455 456 s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, 457 unsigned int div) 458 { 459 struct v4l2_ctrl *ctrl; 460 s64 freq; 461 462 ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); 463 if (ctrl) { 464 struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; 465 int ret; 466 467 qm.index = v4l2_ctrl_g_ctrl(ctrl); 468 469 ret = v4l2_querymenu(handler, &qm); 470 if (ret) 471 return -ENOENT; 472 473 freq = qm.value; 474 } else { 475 if (!mul || !div) 476 return -ENOENT; 477 478 ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); 479 if (!ctrl) 480 return -ENOENT; 481 482 freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); 483 484 pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", 485 __func__); 486 pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", 487 __func__); 488 } 489 490 return freq > 0 ? freq : -EINVAL; 491 } 492 EXPORT_SYMBOL_GPL(v4l2_get_link_freq); 493 494 /* 495 * Simplify a fraction using a simple continued fraction decomposition. The 496 * idea here is to convert fractions such as 333333/10000000 to 1/30 using 497 * 32 bit arithmetic only. The algorithm is not perfect and relies upon two 498 * arbitrary parameters to remove non-significative terms from the simple 499 * continued fraction decomposition. Using 8 and 333 for n_terms and threshold 500 * respectively seems to give nice results. 501 */ 502 void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, 503 unsigned int n_terms, unsigned int threshold) 504 { 505 u32 *an; 506 u32 x, y, r; 507 unsigned int i, n; 508 509 an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); 510 if (an == NULL) 511 return; 512 513 /* 514 * Convert the fraction to a simple continued fraction. See 515 * https://en.wikipedia.org/wiki/Continued_fraction 516 * Stop if the current term is bigger than or equal to the given 517 * threshold. 518 */ 519 x = *numerator; 520 y = *denominator; 521 522 for (n = 0; n < n_terms && y != 0; ++n) { 523 an[n] = x / y; 524 if (an[n] >= threshold) { 525 if (n < 2) 526 n++; 527 break; 528 } 529 530 r = x - an[n] * y; 531 x = y; 532 y = r; 533 } 534 535 /* Expand the simple continued fraction back to an integer fraction. */ 536 x = 0; 537 y = 1; 538 539 for (i = n; i > 0; --i) { 540 r = y; 541 y = an[i-1] * y + x; 542 x = r; 543 } 544 545 *numerator = y; 546 *denominator = x; 547 kfree(an); 548 } 549 EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); 550 551 /* 552 * Convert a fraction to a frame interval in 100ns multiples. The idea here is 553 * to compute numerator / denominator * 10000000 using 32 bit fixed point 554 * arithmetic only. 555 */ 556 u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) 557 { 558 u32 multiplier; 559 560 /* Saturate the result if the operation would overflow. */ 561 if (denominator == 0 || 562 numerator/denominator >= ((u32)-1)/10000000) 563 return (u32)-1; 564 565 /* 566 * Divide both the denominator and the multiplier by two until 567 * numerator * multiplier doesn't overflow. If anyone knows a better 568 * algorithm please let me know. 569 */ 570 multiplier = 10000000; 571 while (numerator > ((u32)-1)/multiplier) { 572 multiplier /= 2; 573 denominator /= 2; 574 } 575 576 return denominator ? numerator * multiplier / denominator : 0; 577 } 578 EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); 579