1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 /* Sizes must be in increasing order */ 25 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 26 { 320, 180 }, 27 { 640, 360 }, 28 { 640, 480 }, 29 { 1280, 720 }, 30 { 1920, 1080 }, 31 { 3840, 2160 }, 32 }; 33 34 /* 35 * Intervals must be in increasing order and there must be twice as many 36 * elements in this array as there are in webcam_sizes. 37 */ 38 static const struct v4l2_fract webcam_intervals[] = { 39 { 1, 1 }, 40 { 1, 2 }, 41 { 1, 4 }, 42 { 1, 5 }, 43 { 1, 10 }, 44 { 2, 25 }, 45 { 1, 15 }, /* 7 - maximum for 2160p */ 46 { 1, 25 }, 47 { 1, 30 }, /* 9 - maximum for 1080p */ 48 { 1, 40 }, 49 { 1, 50 }, 50 { 1, 60 }, /* 12 - maximum for 720p */ 51 { 1, 120 }, 52 }; 53 54 /* Limit maximum FPS rates for high resolutions */ 55 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 56 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 57 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 58 59 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 60 unsigned int frmsize_idx) 61 { 62 if (webcam_sizes[frmsize_idx].height >= 2160) 63 return IVAL_COUNT_2160P; 64 65 if (webcam_sizes[frmsize_idx].height >= 1080) 66 return IVAL_COUNT_1080P; 67 68 if (webcam_sizes[frmsize_idx].height >= 720) 69 return IVAL_COUNT_720P; 70 71 /* For low resolutions, allow all FPS rates */ 72 return ARRAY_SIZE(webcam_intervals); 73 } 74 75 static int vid_cap_queue_setup(struct vb2_queue *vq, 76 unsigned *nbuffers, unsigned *nplanes, 77 unsigned sizes[], struct device *alloc_devs[]) 78 { 79 struct vivid_dev *dev = vb2_get_drv_priv(vq); 80 unsigned buffers = tpg_g_buffers(&dev->tpg); 81 unsigned h = dev->fmt_cap_rect.height; 82 unsigned p; 83 84 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 85 /* 86 * You cannot use read() with FIELD_ALTERNATE since the field 87 * information (TOP/BOTTOM) cannot be passed back to the user. 88 */ 89 if (vb2_fileio_is_active(vq)) 90 return -EINVAL; 91 } 92 93 if (dev->queue_setup_error) { 94 /* 95 * Error injection: test what happens if queue_setup() returns 96 * an error. 97 */ 98 dev->queue_setup_error = false; 99 return -EINVAL; 100 } 101 if (*nplanes) { 102 /* 103 * Check if the number of requested planes match 104 * the number of buffers in the current format. You can't mix that. 105 */ 106 if (*nplanes != buffers) 107 return -EINVAL; 108 for (p = 0; p < buffers; p++) { 109 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 110 dev->fmt_cap->data_offset[p]) 111 return -EINVAL; 112 } 113 } else { 114 for (p = 0; p < buffers; p++) 115 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 116 dev->fmt_cap->vdownsampling[p] + 117 dev->fmt_cap->data_offset[p]; 118 } 119 120 if (vq->num_buffers + *nbuffers < 2) 121 *nbuffers = 2 - vq->num_buffers; 122 123 *nplanes = buffers; 124 125 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 126 for (p = 0; p < buffers; p++) 127 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 128 129 return 0; 130 } 131 132 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 133 { 134 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 135 unsigned long size; 136 unsigned buffers = tpg_g_buffers(&dev->tpg); 137 unsigned p; 138 139 dprintk(dev, 1, "%s\n", __func__); 140 141 if (WARN_ON(NULL == dev->fmt_cap)) 142 return -EINVAL; 143 144 if (dev->buf_prepare_error) { 145 /* 146 * Error injection: test what happens if buf_prepare() returns 147 * an error. 148 */ 149 dev->buf_prepare_error = false; 150 return -EINVAL; 151 } 152 for (p = 0; p < buffers; p++) { 153 size = (tpg_g_line_width(&dev->tpg, p) * 154 dev->fmt_cap_rect.height) / 155 dev->fmt_cap->vdownsampling[p] + 156 dev->fmt_cap->data_offset[p]; 157 158 if (vb2_plane_size(vb, p) < size) { 159 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 160 __func__, p, vb2_plane_size(vb, p), size); 161 return -EINVAL; 162 } 163 164 vb2_set_plane_payload(vb, p, size); 165 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 166 } 167 168 return 0; 169 } 170 171 static void vid_cap_buf_finish(struct vb2_buffer *vb) 172 { 173 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 174 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 175 struct v4l2_timecode *tc = &vbuf->timecode; 176 unsigned fps = 25; 177 unsigned seq = vbuf->sequence; 178 179 if (!vivid_is_sdtv_cap(dev)) 180 return; 181 182 /* 183 * Set the timecode. Rarely used, so it is interesting to 184 * test this. 185 */ 186 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 187 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 188 fps = 30; 189 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 190 tc->flags = 0; 191 tc->frames = seq % fps; 192 tc->seconds = (seq / fps) % 60; 193 tc->minutes = (seq / (60 * fps)) % 60; 194 tc->hours = (seq / (60 * 60 * fps)) % 24; 195 } 196 197 static void vid_cap_buf_queue(struct vb2_buffer *vb) 198 { 199 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 200 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 201 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 202 203 dprintk(dev, 1, "%s\n", __func__); 204 205 spin_lock(&dev->slock); 206 list_add_tail(&buf->list, &dev->vid_cap_active); 207 spin_unlock(&dev->slock); 208 } 209 210 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 211 { 212 struct vivid_dev *dev = vb2_get_drv_priv(vq); 213 unsigned i; 214 int err; 215 216 if (vb2_is_streaming(&dev->vb_vid_out_q)) 217 dev->can_loop_video = vivid_vid_can_loop(dev); 218 219 dev->vid_cap_seq_count = 0; 220 dprintk(dev, 1, "%s\n", __func__); 221 for (i = 0; i < VIDEO_MAX_FRAME; i++) 222 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 223 if (dev->start_streaming_error) { 224 dev->start_streaming_error = false; 225 err = -EINVAL; 226 } else { 227 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 228 } 229 if (err) { 230 struct vivid_buffer *buf, *tmp; 231 232 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 233 list_del(&buf->list); 234 vb2_buffer_done(&buf->vb.vb2_buf, 235 VB2_BUF_STATE_QUEUED); 236 } 237 } 238 return err; 239 } 240 241 /* abort streaming and wait for last buffer */ 242 static void vid_cap_stop_streaming(struct vb2_queue *vq) 243 { 244 struct vivid_dev *dev = vb2_get_drv_priv(vq); 245 246 dprintk(dev, 1, "%s\n", __func__); 247 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 248 dev->can_loop_video = false; 249 } 250 251 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 252 { 253 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 254 255 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 256 } 257 258 const struct vb2_ops vivid_vid_cap_qops = { 259 .queue_setup = vid_cap_queue_setup, 260 .buf_prepare = vid_cap_buf_prepare, 261 .buf_finish = vid_cap_buf_finish, 262 .buf_queue = vid_cap_buf_queue, 263 .start_streaming = vid_cap_start_streaming, 264 .stop_streaming = vid_cap_stop_streaming, 265 .buf_request_complete = vid_cap_buf_request_complete, 266 .wait_prepare = vb2_ops_wait_prepare, 267 .wait_finish = vb2_ops_wait_finish, 268 }; 269 270 /* 271 * Determine the 'picture' quality based on the current TV frequency: either 272 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 273 * signal or NOISE for no signal. 274 */ 275 void vivid_update_quality(struct vivid_dev *dev) 276 { 277 unsigned freq_modulus; 278 279 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 280 /* 281 * The 'noise' will only be replaced by the actual video 282 * if the output video matches the input video settings. 283 */ 284 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 285 return; 286 } 287 if (vivid_is_hdmi_cap(dev) && 288 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 289 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 290 return; 291 } 292 if (vivid_is_sdtv_cap(dev) && 293 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 294 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 295 return; 296 } 297 if (!vivid_is_tv_cap(dev)) { 298 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 299 return; 300 } 301 302 /* 303 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 304 * From +/- 0.25 MHz around the channel there is color, and from 305 * +/- 1 MHz there is grayscale (chroma is lost). 306 * Everywhere else it is just noise. 307 */ 308 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 309 if (freq_modulus > 2 * 16) { 310 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 311 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 312 return; 313 } 314 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 315 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 316 else 317 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 318 } 319 320 /* 321 * Get the current picture quality and the associated afc value. 322 */ 323 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 324 { 325 unsigned freq_modulus; 326 327 if (afc) 328 *afc = 0; 329 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 330 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 331 return tpg_g_quality(&dev->tpg); 332 333 /* 334 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 335 * From +/- 0.25 MHz around the channel there is color, and from 336 * +/- 1 MHz there is grayscale (chroma is lost). 337 * Everywhere else it is just gray. 338 */ 339 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 340 if (afc) 341 *afc = freq_modulus - 1 * 16; 342 return TPG_QUAL_GRAY; 343 } 344 345 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 346 { 347 if (vivid_is_sdtv_cap(dev)) 348 return dev->std_aspect_ratio[dev->input]; 349 350 if (vivid_is_hdmi_cap(dev)) 351 return dev->dv_timings_aspect_ratio[dev->input]; 352 353 return TPG_VIDEO_ASPECT_IMAGE; 354 } 355 356 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 357 { 358 if (vivid_is_sdtv_cap(dev)) 359 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 360 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 361 362 if (vivid_is_hdmi_cap(dev) && 363 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 364 return dev->src_rect.height == 480 ? 365 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 366 367 return TPG_PIXEL_ASPECT_SQUARE; 368 } 369 370 /* 371 * Called whenever the format has to be reset which can occur when 372 * changing inputs, standard, timings, etc. 373 */ 374 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 375 { 376 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 377 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 378 unsigned size; 379 u64 pixelclock; 380 381 switch (dev->input_type[dev->input]) { 382 case WEBCAM: 383 default: 384 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 385 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 386 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 387 dev->field_cap = V4L2_FIELD_NONE; 388 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 389 break; 390 case TV: 391 case SVID: 392 dev->field_cap = dev->tv_field_cap; 393 dev->src_rect.width = 720; 394 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 395 dev->src_rect.height = 480; 396 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 397 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 398 } else { 399 dev->src_rect.height = 576; 400 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 401 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 402 } 403 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 404 break; 405 case HDMI: 406 dev->src_rect.width = bt->width; 407 dev->src_rect.height = bt->height; 408 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 409 if (dev->reduced_fps && can_reduce_fps(bt)) { 410 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 411 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 412 } else { 413 pixelclock = bt->pixelclock; 414 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 415 } 416 dev->timeperframe_vid_cap = (struct v4l2_fract) { 417 size / 100, (u32)pixelclock / 100 418 }; 419 if (bt->interlaced) 420 dev->field_cap = V4L2_FIELD_ALTERNATE; 421 else 422 dev->field_cap = V4L2_FIELD_NONE; 423 424 /* 425 * We can be called from within s_ctrl, in that case we can't 426 * set/get controls. Luckily we don't need to in that case. 427 */ 428 if (keep_controls || !dev->colorspace) 429 break; 430 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 431 if (bt->width == 720 && bt->height <= 576) 432 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 433 else 434 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 435 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 436 } else { 437 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 438 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 439 } 440 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 441 break; 442 } 443 vivid_update_quality(dev); 444 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 445 dev->crop_cap = dev->src_rect; 446 dev->crop_bounds_cap = dev->src_rect; 447 dev->compose_cap = dev->crop_cap; 448 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 449 dev->compose_cap.height /= 2; 450 dev->fmt_cap_rect = dev->compose_cap; 451 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 452 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 453 tpg_update_mv_step(&dev->tpg); 454 455 /* 456 * We can be called from within s_ctrl, in that case we can't 457 * modify controls. Luckily we don't need to in that case. 458 */ 459 if (keep_controls) 460 return; 461 462 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 463 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 464 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 465 } 466 467 /* Map the field to something that is valid for the current input */ 468 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 469 { 470 if (vivid_is_sdtv_cap(dev)) { 471 switch (field) { 472 case V4L2_FIELD_INTERLACED_TB: 473 case V4L2_FIELD_INTERLACED_BT: 474 case V4L2_FIELD_SEQ_TB: 475 case V4L2_FIELD_SEQ_BT: 476 case V4L2_FIELD_TOP: 477 case V4L2_FIELD_BOTTOM: 478 case V4L2_FIELD_ALTERNATE: 479 return field; 480 case V4L2_FIELD_INTERLACED: 481 default: 482 return V4L2_FIELD_INTERLACED; 483 } 484 } 485 if (vivid_is_hdmi_cap(dev)) 486 return dev->dv_timings_cap[dev->input].bt.interlaced ? 487 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 488 return V4L2_FIELD_NONE; 489 } 490 491 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 492 { 493 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 494 return tpg_g_colorspace(&dev->tpg); 495 return dev->colorspace_out; 496 } 497 498 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 499 { 500 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 501 return tpg_g_xfer_func(&dev->tpg); 502 return dev->xfer_func_out; 503 } 504 505 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 506 { 507 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 508 return tpg_g_ycbcr_enc(&dev->tpg); 509 return dev->ycbcr_enc_out; 510 } 511 512 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 513 { 514 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 515 return tpg_g_hsv_enc(&dev->tpg); 516 return dev->hsv_enc_out; 517 } 518 519 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 520 { 521 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 522 return tpg_g_quantization(&dev->tpg); 523 return dev->quantization_out; 524 } 525 526 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 527 struct v4l2_format *f) 528 { 529 struct vivid_dev *dev = video_drvdata(file); 530 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 531 unsigned p; 532 533 mp->width = dev->fmt_cap_rect.width; 534 mp->height = dev->fmt_cap_rect.height; 535 mp->field = dev->field_cap; 536 mp->pixelformat = dev->fmt_cap->fourcc; 537 mp->colorspace = vivid_colorspace_cap(dev); 538 mp->xfer_func = vivid_xfer_func_cap(dev); 539 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 540 mp->hsv_enc = vivid_hsv_enc_cap(dev); 541 else 542 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 543 mp->quantization = vivid_quantization_cap(dev); 544 mp->num_planes = dev->fmt_cap->buffers; 545 for (p = 0; p < mp->num_planes; p++) { 546 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 547 mp->plane_fmt[p].sizeimage = 548 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 549 dev->fmt_cap->vdownsampling[p] + 550 dev->fmt_cap->data_offset[p]; 551 } 552 return 0; 553 } 554 555 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 556 struct v4l2_format *f) 557 { 558 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 559 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 560 struct vivid_dev *dev = video_drvdata(file); 561 const struct vivid_fmt *fmt; 562 unsigned bytesperline, max_bpl; 563 unsigned factor = 1; 564 unsigned w, h; 565 unsigned p; 566 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 567 568 fmt = vivid_get_format(dev, mp->pixelformat); 569 if (!fmt) { 570 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 571 mp->pixelformat); 572 mp->pixelformat = V4L2_PIX_FMT_YUYV; 573 fmt = vivid_get_format(dev, mp->pixelformat); 574 } 575 576 mp->field = vivid_field_cap(dev, mp->field); 577 if (vivid_is_webcam(dev)) { 578 const struct v4l2_frmsize_discrete *sz = 579 v4l2_find_nearest_size(webcam_sizes, 580 ARRAY_SIZE(webcam_sizes), width, 581 height, mp->width, mp->height); 582 583 w = sz->width; 584 h = sz->height; 585 } else if (vivid_is_sdtv_cap(dev)) { 586 w = 720; 587 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 588 } else { 589 w = dev->src_rect.width; 590 h = dev->src_rect.height; 591 } 592 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 593 factor = 2; 594 if (vivid_is_webcam(dev) || 595 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 596 mp->width = w; 597 mp->height = h / factor; 598 } else { 599 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 600 601 v4l2_rect_set_min_size(&r, &vivid_min_rect); 602 v4l2_rect_set_max_size(&r, &vivid_max_rect); 603 if (dev->has_scaler_cap && !dev->has_compose_cap) { 604 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 605 606 v4l2_rect_set_max_size(&r, &max_r); 607 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 608 v4l2_rect_set_max_size(&r, &dev->src_rect); 609 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 610 v4l2_rect_set_min_size(&r, &dev->src_rect); 611 } 612 mp->width = r.width; 613 mp->height = r.height / factor; 614 } 615 616 /* This driver supports custom bytesperline values */ 617 618 mp->num_planes = fmt->buffers; 619 for (p = 0; p < fmt->buffers; p++) { 620 /* Calculate the minimum supported bytesperline value */ 621 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 622 /* Calculate the maximum supported bytesperline value */ 623 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 624 625 if (pfmt[p].bytesperline > max_bpl) 626 pfmt[p].bytesperline = max_bpl; 627 if (pfmt[p].bytesperline < bytesperline) 628 pfmt[p].bytesperline = bytesperline; 629 630 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 631 fmt->vdownsampling[p] + fmt->data_offset[p]; 632 633 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 634 } 635 for (p = fmt->buffers; p < fmt->planes; p++) 636 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 637 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 638 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 639 640 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 641 mp->colorspace = vivid_colorspace_cap(dev); 642 643 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 644 mp->xfer_func = vivid_xfer_func_cap(dev); 645 646 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 647 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 648 mp->hsv_enc = vivid_hsv_enc_cap(dev); 649 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 650 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 651 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 652 } else { 653 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 654 } 655 656 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 657 fmt->color_enc == TGP_COLOR_ENC_RGB) { 658 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 659 mp->quantization = vivid_quantization_cap(dev); 660 } else { 661 mp->quantization = vivid_quantization_cap(dev); 662 } 663 664 memset(mp->reserved, 0, sizeof(mp->reserved)); 665 return 0; 666 } 667 668 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 669 struct v4l2_format *f) 670 { 671 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 672 struct vivid_dev *dev = video_drvdata(file); 673 struct v4l2_rect *crop = &dev->crop_cap; 674 struct v4l2_rect *compose = &dev->compose_cap; 675 struct vb2_queue *q = &dev->vb_vid_cap_q; 676 int ret = vivid_try_fmt_vid_cap(file, priv, f); 677 unsigned factor = 1; 678 unsigned p; 679 unsigned i; 680 681 if (ret < 0) 682 return ret; 683 684 if (vb2_is_busy(q)) { 685 dprintk(dev, 1, "%s device busy\n", __func__); 686 return -EBUSY; 687 } 688 689 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 690 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 691 factor = 2; 692 693 /* Note: the webcam input doesn't support scaling, cropping or composing */ 694 695 if (!vivid_is_webcam(dev) && 696 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 697 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 698 699 if (dev->has_scaler_cap) { 700 if (dev->has_compose_cap) 701 v4l2_rect_map_inside(compose, &r); 702 else 703 *compose = r; 704 if (dev->has_crop_cap && !dev->has_compose_cap) { 705 struct v4l2_rect min_r = { 706 0, 0, 707 r.width / MAX_ZOOM, 708 factor * r.height / MAX_ZOOM 709 }; 710 struct v4l2_rect max_r = { 711 0, 0, 712 r.width * MAX_ZOOM, 713 factor * r.height * MAX_ZOOM 714 }; 715 716 v4l2_rect_set_min_size(crop, &min_r); 717 v4l2_rect_set_max_size(crop, &max_r); 718 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 719 } else if (dev->has_crop_cap) { 720 struct v4l2_rect min_r = { 721 0, 0, 722 compose->width / MAX_ZOOM, 723 factor * compose->height / MAX_ZOOM 724 }; 725 struct v4l2_rect max_r = { 726 0, 0, 727 compose->width * MAX_ZOOM, 728 factor * compose->height * MAX_ZOOM 729 }; 730 731 v4l2_rect_set_min_size(crop, &min_r); 732 v4l2_rect_set_max_size(crop, &max_r); 733 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 734 } 735 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 736 r.height *= factor; 737 v4l2_rect_set_size_to(crop, &r); 738 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 739 r = *crop; 740 r.height /= factor; 741 v4l2_rect_set_size_to(compose, &r); 742 } else if (!dev->has_crop_cap) { 743 v4l2_rect_map_inside(compose, &r); 744 } else { 745 r.height *= factor; 746 v4l2_rect_set_max_size(crop, &r); 747 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 748 compose->top *= factor; 749 compose->height *= factor; 750 v4l2_rect_set_size_to(compose, crop); 751 v4l2_rect_map_inside(compose, &r); 752 compose->top /= factor; 753 compose->height /= factor; 754 } 755 } else if (vivid_is_webcam(dev)) { 756 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 757 758 /* Guaranteed to be a match */ 759 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 760 if (webcam_sizes[i].width == mp->width && 761 webcam_sizes[i].height == mp->height) 762 break; 763 dev->webcam_size_idx = i; 764 if (dev->webcam_ival_idx >= ival_sz) 765 dev->webcam_ival_idx = ival_sz - 1; 766 vivid_update_format_cap(dev, false); 767 } else { 768 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 769 770 v4l2_rect_set_size_to(compose, &r); 771 r.height *= factor; 772 v4l2_rect_set_size_to(crop, &r); 773 } 774 775 dev->fmt_cap_rect.width = mp->width; 776 dev->fmt_cap_rect.height = mp->height; 777 tpg_s_buf_height(&dev->tpg, mp->height); 778 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 779 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 780 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 781 dev->field_cap = mp->field; 782 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 783 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 784 else 785 tpg_s_field(&dev->tpg, dev->field_cap, false); 786 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 787 if (vivid_is_sdtv_cap(dev)) 788 dev->tv_field_cap = mp->field; 789 tpg_update_mv_step(&dev->tpg); 790 dev->tpg.colorspace = mp->colorspace; 791 dev->tpg.xfer_func = mp->xfer_func; 792 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 793 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 794 else 795 dev->tpg.hsv_enc = mp->hsv_enc; 796 dev->tpg.quantization = mp->quantization; 797 798 return 0; 799 } 800 801 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 802 struct v4l2_format *f) 803 { 804 struct vivid_dev *dev = video_drvdata(file); 805 806 if (!dev->multiplanar) 807 return -ENOTTY; 808 return vivid_g_fmt_vid_cap(file, priv, f); 809 } 810 811 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 812 struct v4l2_format *f) 813 { 814 struct vivid_dev *dev = video_drvdata(file); 815 816 if (!dev->multiplanar) 817 return -ENOTTY; 818 return vivid_try_fmt_vid_cap(file, priv, f); 819 } 820 821 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 822 struct v4l2_format *f) 823 { 824 struct vivid_dev *dev = video_drvdata(file); 825 826 if (!dev->multiplanar) 827 return -ENOTTY; 828 return vivid_s_fmt_vid_cap(file, priv, f); 829 } 830 831 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 832 struct v4l2_format *f) 833 { 834 struct vivid_dev *dev = video_drvdata(file); 835 836 if (dev->multiplanar) 837 return -ENOTTY; 838 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 839 } 840 841 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 842 struct v4l2_format *f) 843 { 844 struct vivid_dev *dev = video_drvdata(file); 845 846 if (dev->multiplanar) 847 return -ENOTTY; 848 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 849 } 850 851 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 852 struct v4l2_format *f) 853 { 854 struct vivid_dev *dev = video_drvdata(file); 855 856 if (dev->multiplanar) 857 return -ENOTTY; 858 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 859 } 860 861 int vivid_vid_cap_g_selection(struct file *file, void *priv, 862 struct v4l2_selection *sel) 863 { 864 struct vivid_dev *dev = video_drvdata(file); 865 866 if (!dev->has_crop_cap && !dev->has_compose_cap) 867 return -ENOTTY; 868 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 869 return -EINVAL; 870 if (vivid_is_webcam(dev)) 871 return -ENODATA; 872 873 sel->r.left = sel->r.top = 0; 874 switch (sel->target) { 875 case V4L2_SEL_TGT_CROP: 876 if (!dev->has_crop_cap) 877 return -EINVAL; 878 sel->r = dev->crop_cap; 879 break; 880 case V4L2_SEL_TGT_CROP_DEFAULT: 881 case V4L2_SEL_TGT_CROP_BOUNDS: 882 if (!dev->has_crop_cap) 883 return -EINVAL; 884 sel->r = dev->src_rect; 885 break; 886 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 887 if (!dev->has_compose_cap) 888 return -EINVAL; 889 sel->r = vivid_max_rect; 890 break; 891 case V4L2_SEL_TGT_COMPOSE: 892 if (!dev->has_compose_cap) 893 return -EINVAL; 894 sel->r = dev->compose_cap; 895 break; 896 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 897 if (!dev->has_compose_cap) 898 return -EINVAL; 899 sel->r = dev->fmt_cap_rect; 900 break; 901 default: 902 return -EINVAL; 903 } 904 return 0; 905 } 906 907 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 908 { 909 struct vivid_dev *dev = video_drvdata(file); 910 struct v4l2_rect *crop = &dev->crop_cap; 911 struct v4l2_rect *compose = &dev->compose_cap; 912 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 913 int ret; 914 915 if (!dev->has_crop_cap && !dev->has_compose_cap) 916 return -ENOTTY; 917 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 918 return -EINVAL; 919 if (vivid_is_webcam(dev)) 920 return -ENODATA; 921 922 switch (s->target) { 923 case V4L2_SEL_TGT_CROP: 924 if (!dev->has_crop_cap) 925 return -EINVAL; 926 ret = vivid_vid_adjust_sel(s->flags, &s->r); 927 if (ret) 928 return ret; 929 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 930 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 931 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 932 s->r.top /= factor; 933 s->r.height /= factor; 934 if (dev->has_scaler_cap) { 935 struct v4l2_rect fmt = dev->fmt_cap_rect; 936 struct v4l2_rect max_rect = { 937 0, 0, 938 s->r.width * MAX_ZOOM, 939 s->r.height * MAX_ZOOM 940 }; 941 struct v4l2_rect min_rect = { 942 0, 0, 943 s->r.width / MAX_ZOOM, 944 s->r.height / MAX_ZOOM 945 }; 946 947 v4l2_rect_set_min_size(&fmt, &min_rect); 948 if (!dev->has_compose_cap) 949 v4l2_rect_set_max_size(&fmt, &max_rect); 950 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 951 vb2_is_busy(&dev->vb_vid_cap_q)) 952 return -EBUSY; 953 if (dev->has_compose_cap) { 954 v4l2_rect_set_min_size(compose, &min_rect); 955 v4l2_rect_set_max_size(compose, &max_rect); 956 v4l2_rect_map_inside(compose, &fmt); 957 } 958 dev->fmt_cap_rect = fmt; 959 tpg_s_buf_height(&dev->tpg, fmt.height); 960 } else if (dev->has_compose_cap) { 961 struct v4l2_rect fmt = dev->fmt_cap_rect; 962 963 v4l2_rect_set_min_size(&fmt, &s->r); 964 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 965 vb2_is_busy(&dev->vb_vid_cap_q)) 966 return -EBUSY; 967 dev->fmt_cap_rect = fmt; 968 tpg_s_buf_height(&dev->tpg, fmt.height); 969 v4l2_rect_set_size_to(compose, &s->r); 970 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 971 } else { 972 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 973 vb2_is_busy(&dev->vb_vid_cap_q)) 974 return -EBUSY; 975 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 976 v4l2_rect_set_size_to(compose, &s->r); 977 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 978 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 979 } 980 s->r.top *= factor; 981 s->r.height *= factor; 982 *crop = s->r; 983 break; 984 case V4L2_SEL_TGT_COMPOSE: 985 if (!dev->has_compose_cap) 986 return -EINVAL; 987 ret = vivid_vid_adjust_sel(s->flags, &s->r); 988 if (ret) 989 return ret; 990 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 991 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 992 if (dev->has_scaler_cap) { 993 struct v4l2_rect max_rect = { 994 0, 0, 995 dev->src_rect.width * MAX_ZOOM, 996 (dev->src_rect.height / factor) * MAX_ZOOM 997 }; 998 999 v4l2_rect_set_max_size(&s->r, &max_rect); 1000 if (dev->has_crop_cap) { 1001 struct v4l2_rect min_rect = { 1002 0, 0, 1003 s->r.width / MAX_ZOOM, 1004 (s->r.height * factor) / MAX_ZOOM 1005 }; 1006 struct v4l2_rect max_rect = { 1007 0, 0, 1008 s->r.width * MAX_ZOOM, 1009 (s->r.height * factor) * MAX_ZOOM 1010 }; 1011 1012 v4l2_rect_set_min_size(crop, &min_rect); 1013 v4l2_rect_set_max_size(crop, &max_rect); 1014 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1015 } 1016 } else if (dev->has_crop_cap) { 1017 s->r.top *= factor; 1018 s->r.height *= factor; 1019 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1020 v4l2_rect_set_size_to(crop, &s->r); 1021 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1022 s->r.top /= factor; 1023 s->r.height /= factor; 1024 } else { 1025 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1026 s->r.height /= factor; 1027 } 1028 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1029 *compose = s->r; 1030 break; 1031 default: 1032 return -EINVAL; 1033 } 1034 1035 tpg_s_crop_compose(&dev->tpg, crop, compose); 1036 return 0; 1037 } 1038 1039 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1040 int type, struct v4l2_fract *f) 1041 { 1042 struct vivid_dev *dev = video_drvdata(file); 1043 1044 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1045 return -EINVAL; 1046 1047 switch (vivid_get_pixel_aspect(dev)) { 1048 case TPG_PIXEL_ASPECT_NTSC: 1049 f->numerator = 11; 1050 f->denominator = 10; 1051 break; 1052 case TPG_PIXEL_ASPECT_PAL: 1053 f->numerator = 54; 1054 f->denominator = 59; 1055 break; 1056 default: 1057 break; 1058 } 1059 return 0; 1060 } 1061 1062 static const struct v4l2_audio vivid_audio_inputs[] = { 1063 { 0, "TV", V4L2_AUDCAP_STEREO }, 1064 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1065 }; 1066 1067 int vidioc_enum_input(struct file *file, void *priv, 1068 struct v4l2_input *inp) 1069 { 1070 struct vivid_dev *dev = video_drvdata(file); 1071 1072 if (inp->index >= dev->num_inputs) 1073 return -EINVAL; 1074 1075 inp->type = V4L2_INPUT_TYPE_CAMERA; 1076 switch (dev->input_type[inp->index]) { 1077 case WEBCAM: 1078 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1079 dev->input_name_counter[inp->index]); 1080 inp->capabilities = 0; 1081 break; 1082 case TV: 1083 snprintf(inp->name, sizeof(inp->name), "TV %u", 1084 dev->input_name_counter[inp->index]); 1085 inp->type = V4L2_INPUT_TYPE_TUNER; 1086 inp->std = V4L2_STD_ALL; 1087 if (dev->has_audio_inputs) 1088 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1089 inp->capabilities = V4L2_IN_CAP_STD; 1090 break; 1091 case SVID: 1092 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1093 dev->input_name_counter[inp->index]); 1094 inp->std = V4L2_STD_ALL; 1095 if (dev->has_audio_inputs) 1096 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1097 inp->capabilities = V4L2_IN_CAP_STD; 1098 break; 1099 case HDMI: 1100 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1101 dev->input_name_counter[inp->index]); 1102 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1103 if (dev->edid_blocks == 0 || 1104 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1105 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1106 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1107 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1108 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1109 break; 1110 } 1111 if (dev->sensor_hflip) 1112 inp->status |= V4L2_IN_ST_HFLIP; 1113 if (dev->sensor_vflip) 1114 inp->status |= V4L2_IN_ST_VFLIP; 1115 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1116 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1117 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1118 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1119 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1120 } else if (vivid_is_tv_cap(dev)) { 1121 switch (tpg_g_quality(&dev->tpg)) { 1122 case TPG_QUAL_GRAY: 1123 inp->status |= V4L2_IN_ST_COLOR_KILL; 1124 break; 1125 case TPG_QUAL_NOISE: 1126 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1127 break; 1128 default: 1129 break; 1130 } 1131 } 1132 } 1133 return 0; 1134 } 1135 1136 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1137 { 1138 struct vivid_dev *dev = video_drvdata(file); 1139 1140 *i = dev->input; 1141 return 0; 1142 } 1143 1144 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1145 { 1146 struct vivid_dev *dev = video_drvdata(file); 1147 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1148 unsigned brightness; 1149 1150 if (i >= dev->num_inputs) 1151 return -EINVAL; 1152 1153 if (i == dev->input) 1154 return 0; 1155 1156 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1157 vb2_is_busy(&dev->vb_vbi_cap_q) || 1158 vb2_is_busy(&dev->vb_meta_cap_q)) 1159 return -EBUSY; 1160 1161 dev->input = i; 1162 dev->vid_cap_dev.tvnorms = 0; 1163 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1164 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1165 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1166 } 1167 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1168 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1169 vivid_update_format_cap(dev, false); 1170 1171 if (dev->colorspace) { 1172 switch (dev->input_type[i]) { 1173 case WEBCAM: 1174 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1175 break; 1176 case TV: 1177 case SVID: 1178 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1179 break; 1180 case HDMI: 1181 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1182 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1183 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1184 else 1185 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1186 } else { 1187 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1188 } 1189 break; 1190 } 1191 } 1192 1193 /* 1194 * Modify the brightness range depending on the input. 1195 * This makes it easy to use vivid to test if applications can 1196 * handle control range modifications and is also how this is 1197 * typically used in practice as different inputs may be hooked 1198 * up to different receivers with different control ranges. 1199 */ 1200 brightness = 128 * i + dev->input_brightness[i]; 1201 v4l2_ctrl_modify_range(dev->brightness, 1202 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1203 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1204 1205 /* Restore per-input states. */ 1206 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1207 vivid_is_hdmi_cap(dev)); 1208 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1209 dev->dv_timings_signal_mode[dev->input] == 1210 SELECTED_DV_TIMINGS); 1211 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1212 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1213 dev->std_signal_mode[dev->input]); 1214 1215 if (vivid_is_hdmi_cap(dev)) { 1216 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1217 dev->dv_timings_signal_mode[dev->input]); 1218 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1219 dev->query_dv_timings[dev->input]); 1220 } else if (vivid_is_sdtv_cap(dev)) { 1221 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1222 dev->std_signal_mode[dev->input]); 1223 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1224 dev->std_signal_mode[dev->input]); 1225 } 1226 1227 return 0; 1228 } 1229 1230 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1231 { 1232 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1233 return -EINVAL; 1234 *vin = vivid_audio_inputs[vin->index]; 1235 return 0; 1236 } 1237 1238 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1239 { 1240 struct vivid_dev *dev = video_drvdata(file); 1241 1242 if (!vivid_is_sdtv_cap(dev)) 1243 return -EINVAL; 1244 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1245 return 0; 1246 } 1247 1248 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1249 { 1250 struct vivid_dev *dev = video_drvdata(file); 1251 1252 if (!vivid_is_sdtv_cap(dev)) 1253 return -EINVAL; 1254 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1255 return -EINVAL; 1256 dev->tv_audio_input = vin->index; 1257 return 0; 1258 } 1259 1260 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1261 { 1262 struct vivid_dev *dev = video_drvdata(file); 1263 1264 if (vf->tuner != 0) 1265 return -EINVAL; 1266 vf->frequency = dev->tv_freq; 1267 return 0; 1268 } 1269 1270 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1271 { 1272 struct vivid_dev *dev = video_drvdata(file); 1273 1274 if (vf->tuner != 0) 1275 return -EINVAL; 1276 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1277 if (vivid_is_tv_cap(dev)) 1278 vivid_update_quality(dev); 1279 return 0; 1280 } 1281 1282 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1283 { 1284 struct vivid_dev *dev = video_drvdata(file); 1285 1286 if (vt->index != 0) 1287 return -EINVAL; 1288 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1289 return -EINVAL; 1290 dev->tv_audmode = vt->audmode; 1291 return 0; 1292 } 1293 1294 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1295 { 1296 struct vivid_dev *dev = video_drvdata(file); 1297 enum tpg_quality qual; 1298 1299 if (vt->index != 0) 1300 return -EINVAL; 1301 1302 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1303 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1304 vt->audmode = dev->tv_audmode; 1305 vt->rangelow = MIN_TV_FREQ; 1306 vt->rangehigh = MAX_TV_FREQ; 1307 qual = vivid_get_quality(dev, &vt->afc); 1308 if (qual == TPG_QUAL_COLOR) 1309 vt->signal = 0xffff; 1310 else if (qual == TPG_QUAL_GRAY) 1311 vt->signal = 0x8000; 1312 else 1313 vt->signal = 0; 1314 if (qual == TPG_QUAL_NOISE) { 1315 vt->rxsubchans = 0; 1316 } else if (qual == TPG_QUAL_GRAY) { 1317 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1318 } else { 1319 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1320 unsigned int options = 1321 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1322 1323 switch (channel_nr % options) { 1324 case 0: 1325 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1326 break; 1327 case 1: 1328 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1329 break; 1330 case 2: 1331 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1332 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1333 else 1334 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1335 break; 1336 case 3: 1337 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1338 break; 1339 } 1340 } 1341 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1342 return 0; 1343 } 1344 1345 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1346 const v4l2_std_id vivid_standard[] = { 1347 V4L2_STD_NTSC_M, 1348 V4L2_STD_NTSC_M_JP, 1349 V4L2_STD_NTSC_M_KR, 1350 V4L2_STD_NTSC_443, 1351 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1352 V4L2_STD_PAL_I, 1353 V4L2_STD_PAL_DK, 1354 V4L2_STD_PAL_M, 1355 V4L2_STD_PAL_N, 1356 V4L2_STD_PAL_Nc, 1357 V4L2_STD_PAL_60, 1358 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1359 V4L2_STD_SECAM_DK, 1360 V4L2_STD_SECAM_L, 1361 V4L2_STD_SECAM_LC, 1362 V4L2_STD_UNKNOWN 1363 }; 1364 1365 /* Must remain in sync with the vivid_standard array */ 1366 const char * const vivid_ctrl_standard_strings[] = { 1367 "NTSC-M", 1368 "NTSC-M-JP", 1369 "NTSC-M-KR", 1370 "NTSC-443", 1371 "PAL-BGH", 1372 "PAL-I", 1373 "PAL-DK", 1374 "PAL-M", 1375 "PAL-N", 1376 "PAL-Nc", 1377 "PAL-60", 1378 "SECAM-BGH", 1379 "SECAM-DK", 1380 "SECAM-L", 1381 "SECAM-Lc", 1382 NULL, 1383 }; 1384 1385 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1386 { 1387 struct vivid_dev *dev = video_drvdata(file); 1388 unsigned int last = dev->query_std_last[dev->input]; 1389 1390 if (!vivid_is_sdtv_cap(dev)) 1391 return -ENODATA; 1392 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1393 dev->std_signal_mode[dev->input] == NO_LOCK) { 1394 *id = V4L2_STD_UNKNOWN; 1395 return 0; 1396 } 1397 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1398 *id = V4L2_STD_UNKNOWN; 1399 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1400 *id = dev->std_cap[dev->input]; 1401 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1402 *id = dev->query_std[dev->input]; 1403 } else { 1404 *id = vivid_standard[last]; 1405 dev->query_std_last[dev->input] = 1406 (last + 1) % ARRAY_SIZE(vivid_standard); 1407 } 1408 1409 return 0; 1410 } 1411 1412 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1413 { 1414 struct vivid_dev *dev = video_drvdata(file); 1415 1416 if (!vivid_is_sdtv_cap(dev)) 1417 return -ENODATA; 1418 if (dev->std_cap[dev->input] == id) 1419 return 0; 1420 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1421 return -EBUSY; 1422 dev->std_cap[dev->input] = id; 1423 vivid_update_format_cap(dev, false); 1424 return 0; 1425 } 1426 1427 static void find_aspect_ratio(u32 width, u32 height, 1428 u32 *num, u32 *denom) 1429 { 1430 if (!(height % 3) && ((height * 4 / 3) == width)) { 1431 *num = 4; 1432 *denom = 3; 1433 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1434 *num = 16; 1435 *denom = 9; 1436 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1437 *num = 16; 1438 *denom = 10; 1439 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1440 *num = 5; 1441 *denom = 4; 1442 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1443 *num = 15; 1444 *denom = 9; 1445 } else { /* default to 16:9 */ 1446 *num = 16; 1447 *denom = 9; 1448 } 1449 } 1450 1451 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1452 { 1453 struct v4l2_bt_timings *bt = &timings->bt; 1454 u32 total_h_pixel; 1455 u32 total_v_lines; 1456 u32 h_freq; 1457 1458 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1459 NULL, NULL)) 1460 return false; 1461 1462 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1463 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1464 1465 h_freq = (u32)bt->pixelclock / total_h_pixel; 1466 1467 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1468 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1469 bt->polarities, bt->interlaced, timings)) 1470 return true; 1471 } 1472 1473 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1474 struct v4l2_fract aspect_ratio; 1475 1476 find_aspect_ratio(bt->width, bt->height, 1477 &aspect_ratio.numerator, 1478 &aspect_ratio.denominator); 1479 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1480 bt->polarities, bt->interlaced, 1481 aspect_ratio, timings)) 1482 return true; 1483 } 1484 return false; 1485 } 1486 1487 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1488 struct v4l2_dv_timings *timings) 1489 { 1490 struct vivid_dev *dev = video_drvdata(file); 1491 1492 if (!vivid_is_hdmi_cap(dev)) 1493 return -ENODATA; 1494 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1495 0, NULL, NULL) && 1496 !valid_cvt_gtf_timings(timings)) 1497 return -EINVAL; 1498 1499 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1500 0, false)) 1501 return 0; 1502 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1503 return -EBUSY; 1504 1505 dev->dv_timings_cap[dev->input] = *timings; 1506 vivid_update_format_cap(dev, false); 1507 return 0; 1508 } 1509 1510 int vidioc_query_dv_timings(struct file *file, void *_fh, 1511 struct v4l2_dv_timings *timings) 1512 { 1513 struct vivid_dev *dev = video_drvdata(file); 1514 unsigned int input = dev->input; 1515 unsigned int last = dev->query_dv_timings_last[input]; 1516 1517 if (!vivid_is_hdmi_cap(dev)) 1518 return -ENODATA; 1519 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1520 dev->edid_blocks == 0) 1521 return -ENOLINK; 1522 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1523 return -ENOLCK; 1524 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1525 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1526 return -ERANGE; 1527 } 1528 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1529 *timings = dev->dv_timings_cap[input]; 1530 } else if (dev->dv_timings_signal_mode[input] == 1531 SELECTED_DV_TIMINGS) { 1532 *timings = 1533 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1534 } else { 1535 *timings = 1536 v4l2_dv_timings_presets[last]; 1537 dev->query_dv_timings_last[input] = 1538 (last + 1) % dev->query_dv_timings_size; 1539 } 1540 return 0; 1541 } 1542 1543 int vidioc_s_edid(struct file *file, void *_fh, 1544 struct v4l2_edid *edid) 1545 { 1546 struct vivid_dev *dev = video_drvdata(file); 1547 u16 phys_addr; 1548 u32 display_present = 0; 1549 unsigned int i, j; 1550 int ret; 1551 1552 memset(edid->reserved, 0, sizeof(edid->reserved)); 1553 if (edid->pad >= dev->num_inputs) 1554 return -EINVAL; 1555 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1556 return -EINVAL; 1557 if (edid->blocks == 0) { 1558 dev->edid_blocks = 0; 1559 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1560 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1561 phys_addr = CEC_PHYS_ADDR_INVALID; 1562 goto set_phys_addr; 1563 } 1564 if (edid->blocks > dev->edid_max_blocks) { 1565 edid->blocks = dev->edid_max_blocks; 1566 return -E2BIG; 1567 } 1568 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1569 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1570 if (ret) 1571 return ret; 1572 1573 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1574 return -EBUSY; 1575 1576 dev->edid_blocks = edid->blocks; 1577 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1578 1579 for (i = 0, j = 0; i < dev->num_outputs; i++) 1580 if (dev->output_type[i] == HDMI) 1581 display_present |= 1582 dev->display_present[i] << j++; 1583 1584 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1585 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1586 1587 set_phys_addr: 1588 /* TODO: a proper hotplug detect cycle should be emulated here */ 1589 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1590 1591 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1592 cec_s_phys_addr(dev->cec_tx_adap[i], 1593 dev->display_present[i] ? 1594 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1595 CEC_PHYS_ADDR_INVALID, 1596 false); 1597 return 0; 1598 } 1599 1600 int vidioc_enum_framesizes(struct file *file, void *fh, 1601 struct v4l2_frmsizeenum *fsize) 1602 { 1603 struct vivid_dev *dev = video_drvdata(file); 1604 1605 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1606 return -EINVAL; 1607 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1608 return -EINVAL; 1609 if (vivid_is_webcam(dev)) { 1610 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1611 return -EINVAL; 1612 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1613 fsize->discrete = webcam_sizes[fsize->index]; 1614 return 0; 1615 } 1616 if (fsize->index) 1617 return -EINVAL; 1618 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1619 fsize->stepwise.min_width = MIN_WIDTH; 1620 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1621 fsize->stepwise.step_width = 2; 1622 fsize->stepwise.min_height = MIN_HEIGHT; 1623 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1624 fsize->stepwise.step_height = 2; 1625 return 0; 1626 } 1627 1628 /* timeperframe is arbitrary and continuous */ 1629 int vidioc_enum_frameintervals(struct file *file, void *priv, 1630 struct v4l2_frmivalenum *fival) 1631 { 1632 struct vivid_dev *dev = video_drvdata(file); 1633 const struct vivid_fmt *fmt; 1634 int i; 1635 1636 fmt = vivid_get_format(dev, fival->pixel_format); 1637 if (!fmt) 1638 return -EINVAL; 1639 1640 if (!vivid_is_webcam(dev)) { 1641 if (fival->index) 1642 return -EINVAL; 1643 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1644 return -EINVAL; 1645 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1646 return -EINVAL; 1647 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1648 fival->discrete = dev->timeperframe_vid_cap; 1649 return 0; 1650 } 1651 1652 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1653 if (fival->width == webcam_sizes[i].width && 1654 fival->height == webcam_sizes[i].height) 1655 break; 1656 if (i == ARRAY_SIZE(webcam_sizes)) 1657 return -EINVAL; 1658 if (fival->index >= webcam_ival_count(dev, i)) 1659 return -EINVAL; 1660 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1661 fival->discrete = webcam_intervals[fival->index]; 1662 return 0; 1663 } 1664 1665 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1666 struct v4l2_streamparm *parm) 1667 { 1668 struct vivid_dev *dev = video_drvdata(file); 1669 1670 if (parm->type != (dev->multiplanar ? 1671 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1672 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1673 return -EINVAL; 1674 1675 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1676 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1677 parm->parm.capture.readbuffers = 1; 1678 return 0; 1679 } 1680 1681 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1682 struct v4l2_streamparm *parm) 1683 { 1684 struct vivid_dev *dev = video_drvdata(file); 1685 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1686 struct v4l2_fract tpf; 1687 unsigned i; 1688 1689 if (parm->type != (dev->multiplanar ? 1690 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1691 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1692 return -EINVAL; 1693 if (!vivid_is_webcam(dev)) 1694 return vivid_vid_cap_g_parm(file, priv, parm); 1695 1696 tpf = parm->parm.capture.timeperframe; 1697 1698 if (tpf.denominator == 0) 1699 tpf = webcam_intervals[ival_sz - 1]; 1700 for (i = 0; i < ival_sz; i++) 1701 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1702 break; 1703 if (i == ival_sz) 1704 i = ival_sz - 1; 1705 dev->webcam_ival_idx = i; 1706 tpf = webcam_intervals[dev->webcam_ival_idx]; 1707 1708 /* resync the thread's timings */ 1709 dev->cap_seq_resync = true; 1710 dev->timeperframe_vid_cap = tpf; 1711 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1712 parm->parm.capture.timeperframe = tpf; 1713 parm->parm.capture.readbuffers = 1; 1714 return 0; 1715 } 1716