1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Video for Linux Two 4 * 5 * A generic video device interface for the LINUX operating system 6 * using a set of device structures/vectors for low level operations. 7 * 8 * This file replaces the videodev.c file that comes with the 9 * regular kernel distribution. 10 * 11 * Author: Bill Dirks <bill@thedirks.org> 12 * based on code by Alan Cox, <alan@cymru.net> 13 */ 14 15 /* 16 * Video capture interface for Linux 17 * 18 * A generic video device interface for the LINUX operating system 19 * using a set of device structures/vectors for low level operations. 20 * 21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk> 22 * 23 * Fixes: 24 */ 25 26 /* 27 * Video4linux 1/2 integration by Justin Schoeman 28 * <justin@suntiger.ee.up.ac.za> 29 * 2.4 PROCFS support ported from 2.4 kernels by 30 * Iñaki García Etxebarria <garetxe@euskalnet.net> 31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org> 32 * 2.4 devfs support ported from 2.4 kernels by 33 * Dan Merillat <dan@merillat.org> 34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/string.h> 42 #include <linux/errno.h> 43 #include <linux/uaccess.h> 44 #include <asm/io.h> 45 #include <asm/div64.h> 46 #include <media/v4l2-common.h> 47 #include <media/v4l2-device.h> 48 #include <media/v4l2-ctrls.h> 49 50 #include <linux/videodev2.h> 51 52 /* 53 * 54 * V 4 L 2 D R I V E R H E L P E R A P I 55 * 56 */ 57 58 /* 59 * Video Standard Operations (contributed by Michael Schimek) 60 */ 61 62 /* Helper functions for control handling */ 63 64 /* Fill in a struct v4l2_queryctrl */ 65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) 66 { 67 const char *name; 68 s64 min = _min; 69 s64 max = _max; 70 u64 step = _step; 71 s64 def = _def; 72 73 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, 74 &min, &max, &step, &def, &qctrl->flags); 75 76 if (name == NULL) 77 return -EINVAL; 78 79 qctrl->minimum = min; 80 qctrl->maximum = max; 81 qctrl->step = step; 82 qctrl->default_value = def; 83 qctrl->reserved[0] = qctrl->reserved[1] = 0; 84 strscpy(qctrl->name, name, sizeof(qctrl->name)); 85 return 0; 86 } 87 EXPORT_SYMBOL(v4l2_ctrl_query_fill); 88 89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min 90 * and max don't have to be aligned, but there must be at least one valid 91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples 92 * of 16 between 17 and 31. */ 93 static unsigned int clamp_align(unsigned int x, unsigned int min, 94 unsigned int max, unsigned int align) 95 { 96 /* Bits that must be zero to be aligned */ 97 unsigned int mask = ~((1 << align) - 1); 98 99 /* Clamp to aligned min and max */ 100 x = clamp(x, (min + ~mask) & mask, max & mask); 101 102 /* Round to nearest aligned value */ 103 if (align) 104 x = (x + (1 << (align - 1))) & mask; 105 106 return x; 107 } 108 109 static unsigned int clamp_roundup(unsigned int x, unsigned int min, 110 unsigned int max, unsigned int alignment) 111 { 112 x = clamp(x, min, max); 113 if (alignment) 114 x = round_up(x, alignment); 115 116 return x; 117 } 118 119 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, 120 unsigned int walign, 121 u32 *h, unsigned int hmin, unsigned int hmax, 122 unsigned int halign, unsigned int salign) 123 { 124 *w = clamp_align(*w, wmin, wmax, walign); 125 *h = clamp_align(*h, hmin, hmax, halign); 126 127 /* Usually we don't need to align the size and are done now. */ 128 if (!salign) 129 return; 130 131 /* How much alignment do we have? */ 132 walign = __ffs(*w); 133 halign = __ffs(*h); 134 /* Enough to satisfy the image alignment? */ 135 if (walign + halign < salign) { 136 /* Max walign where there is still a valid width */ 137 unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); 138 /* Max halign where there is still a valid height */ 139 unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); 140 141 /* up the smaller alignment until we have enough */ 142 do { 143 if (halign >= hmaxa || 144 (walign <= halign && walign < wmaxa)) { 145 *w = clamp_align(*w, wmin, wmax, walign + 1); 146 walign = __ffs(*w); 147 } else { 148 *h = clamp_align(*h, hmin, hmax, halign + 1); 149 halign = __ffs(*h); 150 } 151 } while (halign + walign < salign); 152 } 153 } 154 EXPORT_SYMBOL_GPL(v4l_bound_align_image); 155 156 const void * 157 __v4l2_find_nearest_size(const void *array, size_t array_size, 158 size_t entry_size, size_t width_offset, 159 size_t height_offset, s32 width, s32 height) 160 { 161 u32 error, min_error = U32_MAX; 162 const void *best = NULL; 163 unsigned int i; 164 165 if (!array) 166 return NULL; 167 168 for (i = 0; i < array_size; i++, array += entry_size) { 169 const u32 *entry_width = array + width_offset; 170 const u32 *entry_height = array + height_offset; 171 172 error = abs(*entry_width - width) + abs(*entry_height - height); 173 if (error > min_error) 174 continue; 175 176 min_error = error; 177 best = array; 178 if (!error) 179 break; 180 } 181 182 return best; 183 } 184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); 185 186 int v4l2_g_parm_cap(struct video_device *vdev, 187 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 188 { 189 struct v4l2_subdev_frame_interval ival = { 0 }; 190 int ret; 191 192 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 193 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 194 return -EINVAL; 195 196 if (vdev->device_caps & V4L2_CAP_READWRITE) 197 a->parm.capture.readbuffers = 2; 198 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 199 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 200 ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival); 201 if (!ret) 202 a->parm.capture.timeperframe = ival.interval; 203 return ret; 204 } 205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); 206 207 int v4l2_s_parm_cap(struct video_device *vdev, 208 struct v4l2_subdev *sd, struct v4l2_streamparm *a) 209 { 210 struct v4l2_subdev_frame_interval ival = { 211 .interval = a->parm.capture.timeperframe 212 }; 213 int ret; 214 215 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 216 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 217 return -EINVAL; 218 219 memset(&a->parm, 0, sizeof(a->parm)); 220 if (vdev->device_caps & V4L2_CAP_READWRITE) 221 a->parm.capture.readbuffers = 2; 222 else 223 a->parm.capture.readbuffers = 0; 224 225 if (v4l2_subdev_has_op(sd, video, g_frame_interval)) 226 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 227 ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival); 228 if (!ret) 229 a->parm.capture.timeperframe = ival.interval; 230 return ret; 231 } 232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); 233 234 const struct v4l2_format_info *v4l2_format_info(u32 format) 235 { 236 static const struct v4l2_format_info formats[] = { 237 /* RGB formats */ 238 { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 239 { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 240 { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 241 { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 242 { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 243 { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 244 { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 245 { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 246 { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 247 { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 248 { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 249 { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 250 { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 251 { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 252 { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 253 { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 254 { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 255 256 /* YUV packed formats */ 257 { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 258 { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 259 { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 260 { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 261 262 /* YUV planar formats */ 263 { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 264 { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 265 { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 266 { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 267 { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 268 { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 269 270 { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 271 { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, 272 { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 }, 273 { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 274 { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 275 { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 276 { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 277 278 /* YUV planar formats, non contiguous variant */ 279 { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 280 { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 }, 281 { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 282 { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 }, 283 { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 284 { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 }, 285 286 { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 287 { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, 288 { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 289 { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, 290 291 /* Bayer RGB formats */ 292 { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 293 { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 294 { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 295 { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 296 { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 297 { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 298 { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 299 { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 300 { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 301 { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 302 { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 303 { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 304 { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 305 { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 306 { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 307 { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 308 { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 309 { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 310 { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 311 { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, 312 }; 313 unsigned int i; 314 315 for (i = 0; i < ARRAY_SIZE(formats); ++i) 316 if (formats[i].format == format) 317 return &formats[i]; 318 return NULL; 319 } 320 EXPORT_SYMBOL(v4l2_format_info); 321 322 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) 323 { 324 if (!info->block_w[plane]) 325 return 1; 326 return info->block_w[plane]; 327 } 328 329 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) 330 { 331 if (!info->block_h[plane]) 332 return 1; 333 return info->block_h[plane]; 334 } 335 336 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, 337 const struct v4l2_frmsize_stepwise *frmsize) 338 { 339 if (!frmsize) 340 return; 341 342 /* 343 * Clamp width/height to meet min/max constraints and round it up to 344 * macroblock alignment. 345 */ 346 *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, 347 frmsize->step_width); 348 *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, 349 frmsize->step_height); 350 } 351 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); 352 353 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, 354 u32 pixelformat, u32 width, u32 height) 355 { 356 const struct v4l2_format_info *info; 357 struct v4l2_plane_pix_format *plane; 358 int i; 359 360 info = v4l2_format_info(pixelformat); 361 if (!info) 362 return -EINVAL; 363 364 pixfmt->width = width; 365 pixfmt->height = height; 366 pixfmt->pixelformat = pixelformat; 367 pixfmt->num_planes = info->mem_planes; 368 369 if (info->mem_planes == 1) { 370 plane = &pixfmt->plane_fmt[0]; 371 plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 372 plane->sizeimage = 0; 373 374 for (i = 0; i < info->comp_planes; i++) { 375 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 376 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 377 unsigned int aligned_width; 378 unsigned int aligned_height; 379 380 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 381 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 382 383 plane->sizeimage += info->bpp[i] * 384 DIV_ROUND_UP(aligned_width, hdiv) * 385 DIV_ROUND_UP(aligned_height, vdiv); 386 } 387 } else { 388 for (i = 0; i < info->comp_planes; i++) { 389 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 390 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 391 unsigned int aligned_width; 392 unsigned int aligned_height; 393 394 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 395 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 396 397 plane = &pixfmt->plane_fmt[i]; 398 plane->bytesperline = 399 info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv); 400 plane->sizeimage = 401 plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv); 402 } 403 } 404 return 0; 405 } 406 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); 407 408 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, 409 u32 width, u32 height) 410 { 411 const struct v4l2_format_info *info; 412 int i; 413 414 info = v4l2_format_info(pixelformat); 415 if (!info) 416 return -EINVAL; 417 418 /* Single planar API cannot be used for multi plane formats. */ 419 if (info->mem_planes > 1) 420 return -EINVAL; 421 422 pixfmt->width = width; 423 pixfmt->height = height; 424 pixfmt->pixelformat = pixelformat; 425 pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0]; 426 pixfmt->sizeimage = 0; 427 428 for (i = 0; i < info->comp_planes; i++) { 429 unsigned int hdiv = (i == 0) ? 1 : info->hdiv; 430 unsigned int vdiv = (i == 0) ? 1 : info->vdiv; 431 unsigned int aligned_width; 432 unsigned int aligned_height; 433 434 aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); 435 aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); 436 437 pixfmt->sizeimage += info->bpp[i] * 438 DIV_ROUND_UP(aligned_width, hdiv) * 439 DIV_ROUND_UP(aligned_height, vdiv); 440 } 441 return 0; 442 } 443 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); 444