1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 /* Sizes must be in increasing order */ 25 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 26 { 320, 180 }, 27 { 640, 360 }, 28 { 640, 480 }, 29 { 1280, 720 }, 30 { 1920, 1080 }, 31 { 3840, 2160 }, 32 }; 33 34 /* 35 * Intervals must be in increasing order and there must be twice as many 36 * elements in this array as there are in webcam_sizes. 37 */ 38 static const struct v4l2_fract webcam_intervals[] = { 39 { 1, 1 }, 40 { 1, 2 }, 41 { 1, 4 }, 42 { 1, 5 }, 43 { 1, 10 }, 44 { 2, 25 }, 45 { 1, 15 }, /* 7 - maximum for 2160p */ 46 { 1, 25 }, 47 { 1, 30 }, /* 9 - maximum for 1080p */ 48 { 1, 40 }, 49 { 1, 50 }, 50 { 1, 60 }, /* 12 - maximum for 720p */ 51 { 1, 120 }, 52 }; 53 54 /* Limit maximum FPS rates for high resolutions */ 55 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 56 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 57 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 58 59 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 60 unsigned int frmsize_idx) 61 { 62 if (webcam_sizes[frmsize_idx].height >= 2160) 63 return IVAL_COUNT_2160P; 64 65 if (webcam_sizes[frmsize_idx].height >= 1080) 66 return IVAL_COUNT_1080P; 67 68 if (webcam_sizes[frmsize_idx].height >= 720) 69 return IVAL_COUNT_720P; 70 71 /* For low resolutions, allow all FPS rates */ 72 return ARRAY_SIZE(webcam_intervals); 73 } 74 75 static int vid_cap_queue_setup(struct vb2_queue *vq, 76 unsigned *nbuffers, unsigned *nplanes, 77 unsigned sizes[], struct device *alloc_devs[]) 78 { 79 struct vivid_dev *dev = vb2_get_drv_priv(vq); 80 unsigned buffers = tpg_g_buffers(&dev->tpg); 81 unsigned h = dev->fmt_cap_rect.height; 82 unsigned p; 83 84 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 85 /* 86 * You cannot use read() with FIELD_ALTERNATE since the field 87 * information (TOP/BOTTOM) cannot be passed back to the user. 88 */ 89 if (vb2_fileio_is_active(vq)) 90 return -EINVAL; 91 } 92 93 if (dev->queue_setup_error) { 94 /* 95 * Error injection: test what happens if queue_setup() returns 96 * an error. 97 */ 98 dev->queue_setup_error = false; 99 return -EINVAL; 100 } 101 if (*nplanes) { 102 /* 103 * Check if the number of requested planes match 104 * the number of buffers in the current format. You can't mix that. 105 */ 106 if (*nplanes != buffers) 107 return -EINVAL; 108 for (p = 0; p < buffers; p++) { 109 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / 110 dev->fmt_cap->vdownsampling[p] + 111 dev->fmt_cap->data_offset[p]) 112 return -EINVAL; 113 } 114 } else { 115 for (p = 0; p < buffers; p++) 116 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 117 dev->fmt_cap->vdownsampling[p] + 118 dev->fmt_cap->data_offset[p]; 119 } 120 121 if (vq->num_buffers + *nbuffers < 2) 122 *nbuffers = 2 - vq->num_buffers; 123 124 *nplanes = buffers; 125 126 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 127 for (p = 0; p < buffers; p++) 128 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 129 130 return 0; 131 } 132 133 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 134 { 135 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 136 unsigned long size; 137 unsigned buffers = tpg_g_buffers(&dev->tpg); 138 unsigned p; 139 140 dprintk(dev, 1, "%s\n", __func__); 141 142 if (WARN_ON(NULL == dev->fmt_cap)) 143 return -EINVAL; 144 145 if (dev->buf_prepare_error) { 146 /* 147 * Error injection: test what happens if buf_prepare() returns 148 * an error. 149 */ 150 dev->buf_prepare_error = false; 151 return -EINVAL; 152 } 153 for (p = 0; p < buffers; p++) { 154 size = (tpg_g_line_width(&dev->tpg, p) * 155 dev->fmt_cap_rect.height) / 156 dev->fmt_cap->vdownsampling[p] + 157 dev->fmt_cap->data_offset[p]; 158 159 if (vb2_plane_size(vb, p) < size) { 160 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 161 __func__, p, vb2_plane_size(vb, p), size); 162 return -EINVAL; 163 } 164 165 vb2_set_plane_payload(vb, p, size); 166 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 167 } 168 169 return 0; 170 } 171 172 static void vid_cap_buf_finish(struct vb2_buffer *vb) 173 { 174 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 175 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 176 struct v4l2_timecode *tc = &vbuf->timecode; 177 unsigned fps = 25; 178 unsigned seq = vbuf->sequence; 179 180 if (!vivid_is_sdtv_cap(dev)) 181 return; 182 183 /* 184 * Set the timecode. Rarely used, so it is interesting to 185 * test this. 186 */ 187 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 188 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 189 fps = 30; 190 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 191 tc->flags = 0; 192 tc->frames = seq % fps; 193 tc->seconds = (seq / fps) % 60; 194 tc->minutes = (seq / (60 * fps)) % 60; 195 tc->hours = (seq / (60 * 60 * fps)) % 24; 196 } 197 198 static void vid_cap_buf_queue(struct vb2_buffer *vb) 199 { 200 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 201 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 202 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 203 204 dprintk(dev, 1, "%s\n", __func__); 205 206 spin_lock(&dev->slock); 207 list_add_tail(&buf->list, &dev->vid_cap_active); 208 spin_unlock(&dev->slock); 209 } 210 211 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 212 { 213 struct vivid_dev *dev = vb2_get_drv_priv(vq); 214 unsigned i; 215 int err; 216 217 if (vb2_is_streaming(&dev->vb_vid_out_q)) 218 dev->can_loop_video = vivid_vid_can_loop(dev); 219 220 dev->vid_cap_seq_count = 0; 221 dprintk(dev, 1, "%s\n", __func__); 222 for (i = 0; i < VIDEO_MAX_FRAME; i++) 223 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 224 if (dev->start_streaming_error) { 225 dev->start_streaming_error = false; 226 err = -EINVAL; 227 } else { 228 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 229 } 230 if (err) { 231 struct vivid_buffer *buf, *tmp; 232 233 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 234 list_del(&buf->list); 235 vb2_buffer_done(&buf->vb.vb2_buf, 236 VB2_BUF_STATE_QUEUED); 237 } 238 } 239 return err; 240 } 241 242 /* abort streaming and wait for last buffer */ 243 static void vid_cap_stop_streaming(struct vb2_queue *vq) 244 { 245 struct vivid_dev *dev = vb2_get_drv_priv(vq); 246 247 dprintk(dev, 1, "%s\n", __func__); 248 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 249 dev->can_loop_video = false; 250 } 251 252 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 253 { 254 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 255 256 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 257 } 258 259 const struct vb2_ops vivid_vid_cap_qops = { 260 .queue_setup = vid_cap_queue_setup, 261 .buf_prepare = vid_cap_buf_prepare, 262 .buf_finish = vid_cap_buf_finish, 263 .buf_queue = vid_cap_buf_queue, 264 .start_streaming = vid_cap_start_streaming, 265 .stop_streaming = vid_cap_stop_streaming, 266 .buf_request_complete = vid_cap_buf_request_complete, 267 .wait_prepare = vb2_ops_wait_prepare, 268 .wait_finish = vb2_ops_wait_finish, 269 }; 270 271 /* 272 * Determine the 'picture' quality based on the current TV frequency: either 273 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 274 * signal or NOISE for no signal. 275 */ 276 void vivid_update_quality(struct vivid_dev *dev) 277 { 278 unsigned freq_modulus; 279 280 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 281 /* 282 * The 'noise' will only be replaced by the actual video 283 * if the output video matches the input video settings. 284 */ 285 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 286 return; 287 } 288 if (vivid_is_hdmi_cap(dev) && 289 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 290 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 291 return; 292 } 293 if (vivid_is_sdtv_cap(dev) && 294 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 295 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 296 return; 297 } 298 if (!vivid_is_tv_cap(dev)) { 299 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 300 return; 301 } 302 303 /* 304 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 305 * From +/- 0.25 MHz around the channel there is color, and from 306 * +/- 1 MHz there is grayscale (chroma is lost). 307 * Everywhere else it is just noise. 308 */ 309 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 310 if (freq_modulus > 2 * 16) { 311 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 312 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 313 return; 314 } 315 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 316 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 317 else 318 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 319 } 320 321 /* 322 * Get the current picture quality and the associated afc value. 323 */ 324 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 325 { 326 unsigned freq_modulus; 327 328 if (afc) 329 *afc = 0; 330 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 331 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 332 return tpg_g_quality(&dev->tpg); 333 334 /* 335 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 336 * From +/- 0.25 MHz around the channel there is color, and from 337 * +/- 1 MHz there is grayscale (chroma is lost). 338 * Everywhere else it is just gray. 339 */ 340 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 341 if (afc) 342 *afc = freq_modulus - 1 * 16; 343 return TPG_QUAL_GRAY; 344 } 345 346 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 347 { 348 if (vivid_is_sdtv_cap(dev)) 349 return dev->std_aspect_ratio[dev->input]; 350 351 if (vivid_is_hdmi_cap(dev)) 352 return dev->dv_timings_aspect_ratio[dev->input]; 353 354 return TPG_VIDEO_ASPECT_IMAGE; 355 } 356 357 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 358 { 359 if (vivid_is_sdtv_cap(dev)) 360 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 361 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 362 363 if (vivid_is_hdmi_cap(dev) && 364 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 365 return dev->src_rect.height == 480 ? 366 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 367 368 return TPG_PIXEL_ASPECT_SQUARE; 369 } 370 371 /* 372 * Called whenever the format has to be reset which can occur when 373 * changing inputs, standard, timings, etc. 374 */ 375 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 376 { 377 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 378 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 379 unsigned size; 380 u64 pixelclock; 381 382 switch (dev->input_type[dev->input]) { 383 case WEBCAM: 384 default: 385 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 386 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 387 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 388 dev->field_cap = V4L2_FIELD_NONE; 389 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 390 break; 391 case TV: 392 case SVID: 393 dev->field_cap = dev->tv_field_cap; 394 dev->src_rect.width = 720; 395 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 396 dev->src_rect.height = 480; 397 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 398 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 399 } else { 400 dev->src_rect.height = 576; 401 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 402 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 403 } 404 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 405 break; 406 case HDMI: 407 dev->src_rect.width = bt->width; 408 dev->src_rect.height = bt->height; 409 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 410 if (dev->reduced_fps && can_reduce_fps(bt)) { 411 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 412 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 413 } else { 414 pixelclock = bt->pixelclock; 415 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 416 } 417 dev->timeperframe_vid_cap = (struct v4l2_fract) { 418 size / 100, (u32)pixelclock / 100 419 }; 420 if (bt->interlaced) 421 dev->field_cap = V4L2_FIELD_ALTERNATE; 422 else 423 dev->field_cap = V4L2_FIELD_NONE; 424 425 /* 426 * We can be called from within s_ctrl, in that case we can't 427 * set/get controls. Luckily we don't need to in that case. 428 */ 429 if (keep_controls || !dev->colorspace) 430 break; 431 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 432 if (bt->width == 720 && bt->height <= 576) 433 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 434 else 435 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 436 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 437 } else { 438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 439 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 440 } 441 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 442 break; 443 } 444 vivid_update_quality(dev); 445 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 446 dev->crop_cap = dev->src_rect; 447 dev->crop_bounds_cap = dev->src_rect; 448 dev->compose_cap = dev->crop_cap; 449 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 450 dev->compose_cap.height /= 2; 451 dev->fmt_cap_rect = dev->compose_cap; 452 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 453 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 454 tpg_update_mv_step(&dev->tpg); 455 456 /* 457 * We can be called from within s_ctrl, in that case we can't 458 * modify controls. Luckily we don't need to in that case. 459 */ 460 if (keep_controls) 461 return; 462 463 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 464 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 465 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 466 } 467 468 /* Map the field to something that is valid for the current input */ 469 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 470 { 471 if (vivid_is_sdtv_cap(dev)) { 472 switch (field) { 473 case V4L2_FIELD_INTERLACED_TB: 474 case V4L2_FIELD_INTERLACED_BT: 475 case V4L2_FIELD_SEQ_TB: 476 case V4L2_FIELD_SEQ_BT: 477 case V4L2_FIELD_TOP: 478 case V4L2_FIELD_BOTTOM: 479 case V4L2_FIELD_ALTERNATE: 480 return field; 481 case V4L2_FIELD_INTERLACED: 482 default: 483 return V4L2_FIELD_INTERLACED; 484 } 485 } 486 if (vivid_is_hdmi_cap(dev)) 487 return dev->dv_timings_cap[dev->input].bt.interlaced ? 488 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 489 return V4L2_FIELD_NONE; 490 } 491 492 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 493 { 494 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 495 return tpg_g_colorspace(&dev->tpg); 496 return dev->colorspace_out; 497 } 498 499 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 500 { 501 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 502 return tpg_g_xfer_func(&dev->tpg); 503 return dev->xfer_func_out; 504 } 505 506 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 507 { 508 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 509 return tpg_g_ycbcr_enc(&dev->tpg); 510 return dev->ycbcr_enc_out; 511 } 512 513 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 514 { 515 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 516 return tpg_g_hsv_enc(&dev->tpg); 517 return dev->hsv_enc_out; 518 } 519 520 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 521 { 522 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 523 return tpg_g_quantization(&dev->tpg); 524 return dev->quantization_out; 525 } 526 527 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 528 struct v4l2_format *f) 529 { 530 struct vivid_dev *dev = video_drvdata(file); 531 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 532 unsigned p; 533 534 mp->width = dev->fmt_cap_rect.width; 535 mp->height = dev->fmt_cap_rect.height; 536 mp->field = dev->field_cap; 537 mp->pixelformat = dev->fmt_cap->fourcc; 538 mp->colorspace = vivid_colorspace_cap(dev); 539 mp->xfer_func = vivid_xfer_func_cap(dev); 540 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 541 mp->hsv_enc = vivid_hsv_enc_cap(dev); 542 else 543 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 544 mp->quantization = vivid_quantization_cap(dev); 545 mp->num_planes = dev->fmt_cap->buffers; 546 for (p = 0; p < mp->num_planes; p++) { 547 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 548 mp->plane_fmt[p].sizeimage = 549 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 550 dev->fmt_cap->vdownsampling[p] + 551 dev->fmt_cap->data_offset[p]; 552 } 553 return 0; 554 } 555 556 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 557 struct v4l2_format *f) 558 { 559 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 560 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 561 struct vivid_dev *dev = video_drvdata(file); 562 const struct vivid_fmt *fmt; 563 unsigned bytesperline, max_bpl; 564 unsigned factor = 1; 565 unsigned w, h; 566 unsigned p; 567 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 568 569 fmt = vivid_get_format(dev, mp->pixelformat); 570 if (!fmt) { 571 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 572 mp->pixelformat); 573 mp->pixelformat = V4L2_PIX_FMT_YUYV; 574 fmt = vivid_get_format(dev, mp->pixelformat); 575 } 576 577 mp->field = vivid_field_cap(dev, mp->field); 578 if (vivid_is_webcam(dev)) { 579 const struct v4l2_frmsize_discrete *sz = 580 v4l2_find_nearest_size(webcam_sizes, 581 ARRAY_SIZE(webcam_sizes), width, 582 height, mp->width, mp->height); 583 584 w = sz->width; 585 h = sz->height; 586 } else if (vivid_is_sdtv_cap(dev)) { 587 w = 720; 588 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 589 } else { 590 w = dev->src_rect.width; 591 h = dev->src_rect.height; 592 } 593 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 594 factor = 2; 595 if (vivid_is_webcam(dev) || 596 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 597 mp->width = w; 598 mp->height = h / factor; 599 } else { 600 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 601 602 v4l2_rect_set_min_size(&r, &vivid_min_rect); 603 v4l2_rect_set_max_size(&r, &vivid_max_rect); 604 if (dev->has_scaler_cap && !dev->has_compose_cap) { 605 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 606 607 v4l2_rect_set_max_size(&r, &max_r); 608 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 609 v4l2_rect_set_max_size(&r, &dev->src_rect); 610 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 611 v4l2_rect_set_min_size(&r, &dev->src_rect); 612 } 613 mp->width = r.width; 614 mp->height = r.height / factor; 615 } 616 617 /* This driver supports custom bytesperline values */ 618 619 mp->num_planes = fmt->buffers; 620 for (p = 0; p < fmt->buffers; p++) { 621 /* Calculate the minimum supported bytesperline value */ 622 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 623 /* Calculate the maximum supported bytesperline value */ 624 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 625 626 if (pfmt[p].bytesperline > max_bpl) 627 pfmt[p].bytesperline = max_bpl; 628 if (pfmt[p].bytesperline < bytesperline) 629 pfmt[p].bytesperline = bytesperline; 630 631 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 632 fmt->vdownsampling[p] + fmt->data_offset[p]; 633 634 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 635 } 636 for (p = fmt->buffers; p < fmt->planes; p++) 637 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 638 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 639 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 640 641 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 642 mp->colorspace = vivid_colorspace_cap(dev); 643 644 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 645 mp->xfer_func = vivid_xfer_func_cap(dev); 646 647 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 648 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 649 mp->hsv_enc = vivid_hsv_enc_cap(dev); 650 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 651 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 652 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 653 } else { 654 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 655 } 656 657 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 658 fmt->color_enc == TGP_COLOR_ENC_RGB) { 659 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 660 mp->quantization = vivid_quantization_cap(dev); 661 } else { 662 mp->quantization = vivid_quantization_cap(dev); 663 } 664 665 memset(mp->reserved, 0, sizeof(mp->reserved)); 666 return 0; 667 } 668 669 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 670 struct v4l2_format *f) 671 { 672 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 673 struct vivid_dev *dev = video_drvdata(file); 674 struct v4l2_rect *crop = &dev->crop_cap; 675 struct v4l2_rect *compose = &dev->compose_cap; 676 struct vb2_queue *q = &dev->vb_vid_cap_q; 677 int ret = vivid_try_fmt_vid_cap(file, priv, f); 678 unsigned factor = 1; 679 unsigned p; 680 unsigned i; 681 682 if (ret < 0) 683 return ret; 684 685 if (vb2_is_busy(q)) { 686 dprintk(dev, 1, "%s device busy\n", __func__); 687 return -EBUSY; 688 } 689 690 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 691 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 692 factor = 2; 693 694 /* Note: the webcam input doesn't support scaling, cropping or composing */ 695 696 if (!vivid_is_webcam(dev) && 697 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 698 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 699 700 if (dev->has_scaler_cap) { 701 if (dev->has_compose_cap) 702 v4l2_rect_map_inside(compose, &r); 703 else 704 *compose = r; 705 if (dev->has_crop_cap && !dev->has_compose_cap) { 706 struct v4l2_rect min_r = { 707 0, 0, 708 r.width / MAX_ZOOM, 709 factor * r.height / MAX_ZOOM 710 }; 711 struct v4l2_rect max_r = { 712 0, 0, 713 r.width * MAX_ZOOM, 714 factor * r.height * MAX_ZOOM 715 }; 716 717 v4l2_rect_set_min_size(crop, &min_r); 718 v4l2_rect_set_max_size(crop, &max_r); 719 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 720 } else if (dev->has_crop_cap) { 721 struct v4l2_rect min_r = { 722 0, 0, 723 compose->width / MAX_ZOOM, 724 factor * compose->height / MAX_ZOOM 725 }; 726 struct v4l2_rect max_r = { 727 0, 0, 728 compose->width * MAX_ZOOM, 729 factor * compose->height * MAX_ZOOM 730 }; 731 732 v4l2_rect_set_min_size(crop, &min_r); 733 v4l2_rect_set_max_size(crop, &max_r); 734 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 735 } 736 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 737 r.height *= factor; 738 v4l2_rect_set_size_to(crop, &r); 739 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 740 r = *crop; 741 r.height /= factor; 742 v4l2_rect_set_size_to(compose, &r); 743 } else if (!dev->has_crop_cap) { 744 v4l2_rect_map_inside(compose, &r); 745 } else { 746 r.height *= factor; 747 v4l2_rect_set_max_size(crop, &r); 748 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 749 compose->top *= factor; 750 compose->height *= factor; 751 v4l2_rect_set_size_to(compose, crop); 752 v4l2_rect_map_inside(compose, &r); 753 compose->top /= factor; 754 compose->height /= factor; 755 } 756 } else if (vivid_is_webcam(dev)) { 757 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 758 759 /* Guaranteed to be a match */ 760 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 761 if (webcam_sizes[i].width == mp->width && 762 webcam_sizes[i].height == mp->height) 763 break; 764 dev->webcam_size_idx = i; 765 if (dev->webcam_ival_idx >= ival_sz) 766 dev->webcam_ival_idx = ival_sz - 1; 767 vivid_update_format_cap(dev, false); 768 } else { 769 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 770 771 v4l2_rect_set_size_to(compose, &r); 772 r.height *= factor; 773 v4l2_rect_set_size_to(crop, &r); 774 } 775 776 dev->fmt_cap_rect.width = mp->width; 777 dev->fmt_cap_rect.height = mp->height; 778 tpg_s_buf_height(&dev->tpg, mp->height); 779 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 780 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 781 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 782 dev->field_cap = mp->field; 783 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 784 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 785 else 786 tpg_s_field(&dev->tpg, dev->field_cap, false); 787 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 788 if (vivid_is_sdtv_cap(dev)) 789 dev->tv_field_cap = mp->field; 790 tpg_update_mv_step(&dev->tpg); 791 dev->tpg.colorspace = mp->colorspace; 792 dev->tpg.xfer_func = mp->xfer_func; 793 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 794 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 795 else 796 dev->tpg.hsv_enc = mp->hsv_enc; 797 dev->tpg.quantization = mp->quantization; 798 799 return 0; 800 } 801 802 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 803 struct v4l2_format *f) 804 { 805 struct vivid_dev *dev = video_drvdata(file); 806 807 if (!dev->multiplanar) 808 return -ENOTTY; 809 return vivid_g_fmt_vid_cap(file, priv, f); 810 } 811 812 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 813 struct v4l2_format *f) 814 { 815 struct vivid_dev *dev = video_drvdata(file); 816 817 if (!dev->multiplanar) 818 return -ENOTTY; 819 return vivid_try_fmt_vid_cap(file, priv, f); 820 } 821 822 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 823 struct v4l2_format *f) 824 { 825 struct vivid_dev *dev = video_drvdata(file); 826 827 if (!dev->multiplanar) 828 return -ENOTTY; 829 return vivid_s_fmt_vid_cap(file, priv, f); 830 } 831 832 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 833 struct v4l2_format *f) 834 { 835 struct vivid_dev *dev = video_drvdata(file); 836 837 if (dev->multiplanar) 838 return -ENOTTY; 839 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 840 } 841 842 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 843 struct v4l2_format *f) 844 { 845 struct vivid_dev *dev = video_drvdata(file); 846 847 if (dev->multiplanar) 848 return -ENOTTY; 849 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 850 } 851 852 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 853 struct v4l2_format *f) 854 { 855 struct vivid_dev *dev = video_drvdata(file); 856 857 if (dev->multiplanar) 858 return -ENOTTY; 859 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 860 } 861 862 int vivid_vid_cap_g_selection(struct file *file, void *priv, 863 struct v4l2_selection *sel) 864 { 865 struct vivid_dev *dev = video_drvdata(file); 866 867 if (!dev->has_crop_cap && !dev->has_compose_cap) 868 return -ENOTTY; 869 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 870 return -EINVAL; 871 if (vivid_is_webcam(dev)) 872 return -ENODATA; 873 874 sel->r.left = sel->r.top = 0; 875 switch (sel->target) { 876 case V4L2_SEL_TGT_CROP: 877 if (!dev->has_crop_cap) 878 return -EINVAL; 879 sel->r = dev->crop_cap; 880 break; 881 case V4L2_SEL_TGT_CROP_DEFAULT: 882 case V4L2_SEL_TGT_CROP_BOUNDS: 883 if (!dev->has_crop_cap) 884 return -EINVAL; 885 sel->r = dev->src_rect; 886 break; 887 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 888 if (!dev->has_compose_cap) 889 return -EINVAL; 890 sel->r = vivid_max_rect; 891 break; 892 case V4L2_SEL_TGT_COMPOSE: 893 if (!dev->has_compose_cap) 894 return -EINVAL; 895 sel->r = dev->compose_cap; 896 break; 897 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 898 if (!dev->has_compose_cap) 899 return -EINVAL; 900 sel->r = dev->fmt_cap_rect; 901 break; 902 default: 903 return -EINVAL; 904 } 905 return 0; 906 } 907 908 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 909 { 910 struct vivid_dev *dev = video_drvdata(file); 911 struct v4l2_rect *crop = &dev->crop_cap; 912 struct v4l2_rect *compose = &dev->compose_cap; 913 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 914 int ret; 915 916 if (!dev->has_crop_cap && !dev->has_compose_cap) 917 return -ENOTTY; 918 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 919 return -EINVAL; 920 if (vivid_is_webcam(dev)) 921 return -ENODATA; 922 923 switch (s->target) { 924 case V4L2_SEL_TGT_CROP: 925 if (!dev->has_crop_cap) 926 return -EINVAL; 927 ret = vivid_vid_adjust_sel(s->flags, &s->r); 928 if (ret) 929 return ret; 930 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 931 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 932 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 933 s->r.top /= factor; 934 s->r.height /= factor; 935 if (dev->has_scaler_cap) { 936 struct v4l2_rect fmt = dev->fmt_cap_rect; 937 struct v4l2_rect max_rect = { 938 0, 0, 939 s->r.width * MAX_ZOOM, 940 s->r.height * MAX_ZOOM 941 }; 942 struct v4l2_rect min_rect = { 943 0, 0, 944 s->r.width / MAX_ZOOM, 945 s->r.height / MAX_ZOOM 946 }; 947 948 v4l2_rect_set_min_size(&fmt, &min_rect); 949 if (!dev->has_compose_cap) 950 v4l2_rect_set_max_size(&fmt, &max_rect); 951 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 952 vb2_is_busy(&dev->vb_vid_cap_q)) 953 return -EBUSY; 954 if (dev->has_compose_cap) { 955 v4l2_rect_set_min_size(compose, &min_rect); 956 v4l2_rect_set_max_size(compose, &max_rect); 957 v4l2_rect_map_inside(compose, &fmt); 958 } 959 dev->fmt_cap_rect = fmt; 960 tpg_s_buf_height(&dev->tpg, fmt.height); 961 } else if (dev->has_compose_cap) { 962 struct v4l2_rect fmt = dev->fmt_cap_rect; 963 964 v4l2_rect_set_min_size(&fmt, &s->r); 965 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 966 vb2_is_busy(&dev->vb_vid_cap_q)) 967 return -EBUSY; 968 dev->fmt_cap_rect = fmt; 969 tpg_s_buf_height(&dev->tpg, fmt.height); 970 v4l2_rect_set_size_to(compose, &s->r); 971 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 972 } else { 973 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 974 vb2_is_busy(&dev->vb_vid_cap_q)) 975 return -EBUSY; 976 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 977 v4l2_rect_set_size_to(compose, &s->r); 978 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 979 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 980 } 981 s->r.top *= factor; 982 s->r.height *= factor; 983 *crop = s->r; 984 break; 985 case V4L2_SEL_TGT_COMPOSE: 986 if (!dev->has_compose_cap) 987 return -EINVAL; 988 ret = vivid_vid_adjust_sel(s->flags, &s->r); 989 if (ret) 990 return ret; 991 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 992 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 993 if (dev->has_scaler_cap) { 994 struct v4l2_rect max_rect = { 995 0, 0, 996 dev->src_rect.width * MAX_ZOOM, 997 (dev->src_rect.height / factor) * MAX_ZOOM 998 }; 999 1000 v4l2_rect_set_max_size(&s->r, &max_rect); 1001 if (dev->has_crop_cap) { 1002 struct v4l2_rect min_rect = { 1003 0, 0, 1004 s->r.width / MAX_ZOOM, 1005 (s->r.height * factor) / MAX_ZOOM 1006 }; 1007 struct v4l2_rect max_rect = { 1008 0, 0, 1009 s->r.width * MAX_ZOOM, 1010 (s->r.height * factor) * MAX_ZOOM 1011 }; 1012 1013 v4l2_rect_set_min_size(crop, &min_rect); 1014 v4l2_rect_set_max_size(crop, &max_rect); 1015 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1016 } 1017 } else if (dev->has_crop_cap) { 1018 s->r.top *= factor; 1019 s->r.height *= factor; 1020 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1021 v4l2_rect_set_size_to(crop, &s->r); 1022 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1023 s->r.top /= factor; 1024 s->r.height /= factor; 1025 } else { 1026 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1027 s->r.height /= factor; 1028 } 1029 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1030 *compose = s->r; 1031 break; 1032 default: 1033 return -EINVAL; 1034 } 1035 1036 tpg_s_crop_compose(&dev->tpg, crop, compose); 1037 return 0; 1038 } 1039 1040 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1041 int type, struct v4l2_fract *f) 1042 { 1043 struct vivid_dev *dev = video_drvdata(file); 1044 1045 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1046 return -EINVAL; 1047 1048 switch (vivid_get_pixel_aspect(dev)) { 1049 case TPG_PIXEL_ASPECT_NTSC: 1050 f->numerator = 11; 1051 f->denominator = 10; 1052 break; 1053 case TPG_PIXEL_ASPECT_PAL: 1054 f->numerator = 54; 1055 f->denominator = 59; 1056 break; 1057 default: 1058 break; 1059 } 1060 return 0; 1061 } 1062 1063 static const struct v4l2_audio vivid_audio_inputs[] = { 1064 { 0, "TV", V4L2_AUDCAP_STEREO }, 1065 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1066 }; 1067 1068 int vidioc_enum_input(struct file *file, void *priv, 1069 struct v4l2_input *inp) 1070 { 1071 struct vivid_dev *dev = video_drvdata(file); 1072 1073 if (inp->index >= dev->num_inputs) 1074 return -EINVAL; 1075 1076 inp->type = V4L2_INPUT_TYPE_CAMERA; 1077 switch (dev->input_type[inp->index]) { 1078 case WEBCAM: 1079 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1080 dev->input_name_counter[inp->index]); 1081 inp->capabilities = 0; 1082 break; 1083 case TV: 1084 snprintf(inp->name, sizeof(inp->name), "TV %u", 1085 dev->input_name_counter[inp->index]); 1086 inp->type = V4L2_INPUT_TYPE_TUNER; 1087 inp->std = V4L2_STD_ALL; 1088 if (dev->has_audio_inputs) 1089 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1090 inp->capabilities = V4L2_IN_CAP_STD; 1091 break; 1092 case SVID: 1093 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1094 dev->input_name_counter[inp->index]); 1095 inp->std = V4L2_STD_ALL; 1096 if (dev->has_audio_inputs) 1097 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1098 inp->capabilities = V4L2_IN_CAP_STD; 1099 break; 1100 case HDMI: 1101 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1102 dev->input_name_counter[inp->index]); 1103 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1104 if (dev->edid_blocks == 0 || 1105 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1106 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1107 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1108 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1109 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1110 break; 1111 } 1112 if (dev->sensor_hflip) 1113 inp->status |= V4L2_IN_ST_HFLIP; 1114 if (dev->sensor_vflip) 1115 inp->status |= V4L2_IN_ST_VFLIP; 1116 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1117 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1118 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1119 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1120 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1121 } else if (vivid_is_tv_cap(dev)) { 1122 switch (tpg_g_quality(&dev->tpg)) { 1123 case TPG_QUAL_GRAY: 1124 inp->status |= V4L2_IN_ST_COLOR_KILL; 1125 break; 1126 case TPG_QUAL_NOISE: 1127 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1128 break; 1129 default: 1130 break; 1131 } 1132 } 1133 } 1134 return 0; 1135 } 1136 1137 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1138 { 1139 struct vivid_dev *dev = video_drvdata(file); 1140 1141 *i = dev->input; 1142 return 0; 1143 } 1144 1145 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1146 { 1147 struct vivid_dev *dev = video_drvdata(file); 1148 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1149 unsigned brightness; 1150 1151 if (i >= dev->num_inputs) 1152 return -EINVAL; 1153 1154 if (i == dev->input) 1155 return 0; 1156 1157 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1158 vb2_is_busy(&dev->vb_vbi_cap_q) || 1159 vb2_is_busy(&dev->vb_meta_cap_q)) 1160 return -EBUSY; 1161 1162 dev->input = i; 1163 dev->vid_cap_dev.tvnorms = 0; 1164 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1165 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1166 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1167 } 1168 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1169 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1170 vivid_update_format_cap(dev, false); 1171 1172 if (dev->colorspace) { 1173 switch (dev->input_type[i]) { 1174 case WEBCAM: 1175 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1176 break; 1177 case TV: 1178 case SVID: 1179 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1180 break; 1181 case HDMI: 1182 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1183 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1184 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1185 else 1186 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1187 } else { 1188 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1189 } 1190 break; 1191 } 1192 } 1193 1194 /* 1195 * Modify the brightness range depending on the input. 1196 * This makes it easy to use vivid to test if applications can 1197 * handle control range modifications and is also how this is 1198 * typically used in practice as different inputs may be hooked 1199 * up to different receivers with different control ranges. 1200 */ 1201 brightness = 128 * i + dev->input_brightness[i]; 1202 v4l2_ctrl_modify_range(dev->brightness, 1203 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1204 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1205 1206 /* Restore per-input states. */ 1207 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1208 vivid_is_hdmi_cap(dev)); 1209 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1210 dev->dv_timings_signal_mode[dev->input] == 1211 SELECTED_DV_TIMINGS); 1212 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1213 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1214 dev->std_signal_mode[dev->input]); 1215 1216 if (vivid_is_hdmi_cap(dev)) { 1217 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1218 dev->dv_timings_signal_mode[dev->input]); 1219 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1220 dev->query_dv_timings[dev->input]); 1221 } else if (vivid_is_sdtv_cap(dev)) { 1222 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1223 dev->std_signal_mode[dev->input]); 1224 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1225 dev->std_signal_mode[dev->input]); 1226 } 1227 1228 return 0; 1229 } 1230 1231 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1232 { 1233 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1234 return -EINVAL; 1235 *vin = vivid_audio_inputs[vin->index]; 1236 return 0; 1237 } 1238 1239 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1240 { 1241 struct vivid_dev *dev = video_drvdata(file); 1242 1243 if (!vivid_is_sdtv_cap(dev)) 1244 return -EINVAL; 1245 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1246 return 0; 1247 } 1248 1249 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1250 { 1251 struct vivid_dev *dev = video_drvdata(file); 1252 1253 if (!vivid_is_sdtv_cap(dev)) 1254 return -EINVAL; 1255 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1256 return -EINVAL; 1257 dev->tv_audio_input = vin->index; 1258 return 0; 1259 } 1260 1261 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1262 { 1263 struct vivid_dev *dev = video_drvdata(file); 1264 1265 if (vf->tuner != 0) 1266 return -EINVAL; 1267 vf->frequency = dev->tv_freq; 1268 return 0; 1269 } 1270 1271 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1272 { 1273 struct vivid_dev *dev = video_drvdata(file); 1274 1275 if (vf->tuner != 0) 1276 return -EINVAL; 1277 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1278 if (vivid_is_tv_cap(dev)) 1279 vivid_update_quality(dev); 1280 return 0; 1281 } 1282 1283 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1284 { 1285 struct vivid_dev *dev = video_drvdata(file); 1286 1287 if (vt->index != 0) 1288 return -EINVAL; 1289 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1290 return -EINVAL; 1291 dev->tv_audmode = vt->audmode; 1292 return 0; 1293 } 1294 1295 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1296 { 1297 struct vivid_dev *dev = video_drvdata(file); 1298 enum tpg_quality qual; 1299 1300 if (vt->index != 0) 1301 return -EINVAL; 1302 1303 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1304 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1305 vt->audmode = dev->tv_audmode; 1306 vt->rangelow = MIN_TV_FREQ; 1307 vt->rangehigh = MAX_TV_FREQ; 1308 qual = vivid_get_quality(dev, &vt->afc); 1309 if (qual == TPG_QUAL_COLOR) 1310 vt->signal = 0xffff; 1311 else if (qual == TPG_QUAL_GRAY) 1312 vt->signal = 0x8000; 1313 else 1314 vt->signal = 0; 1315 if (qual == TPG_QUAL_NOISE) { 1316 vt->rxsubchans = 0; 1317 } else if (qual == TPG_QUAL_GRAY) { 1318 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1319 } else { 1320 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1321 unsigned int options = 1322 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1323 1324 switch (channel_nr % options) { 1325 case 0: 1326 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1327 break; 1328 case 1: 1329 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1330 break; 1331 case 2: 1332 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1333 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1334 else 1335 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1336 break; 1337 case 3: 1338 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1339 break; 1340 } 1341 } 1342 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1343 return 0; 1344 } 1345 1346 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1347 const v4l2_std_id vivid_standard[] = { 1348 V4L2_STD_NTSC_M, 1349 V4L2_STD_NTSC_M_JP, 1350 V4L2_STD_NTSC_M_KR, 1351 V4L2_STD_NTSC_443, 1352 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1353 V4L2_STD_PAL_I, 1354 V4L2_STD_PAL_DK, 1355 V4L2_STD_PAL_M, 1356 V4L2_STD_PAL_N, 1357 V4L2_STD_PAL_Nc, 1358 V4L2_STD_PAL_60, 1359 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1360 V4L2_STD_SECAM_DK, 1361 V4L2_STD_SECAM_L, 1362 V4L2_STD_SECAM_LC, 1363 V4L2_STD_UNKNOWN 1364 }; 1365 1366 /* Must remain in sync with the vivid_standard array */ 1367 const char * const vivid_ctrl_standard_strings[] = { 1368 "NTSC-M", 1369 "NTSC-M-JP", 1370 "NTSC-M-KR", 1371 "NTSC-443", 1372 "PAL-BGH", 1373 "PAL-I", 1374 "PAL-DK", 1375 "PAL-M", 1376 "PAL-N", 1377 "PAL-Nc", 1378 "PAL-60", 1379 "SECAM-BGH", 1380 "SECAM-DK", 1381 "SECAM-L", 1382 "SECAM-Lc", 1383 NULL, 1384 }; 1385 1386 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1387 { 1388 struct vivid_dev *dev = video_drvdata(file); 1389 unsigned int last = dev->query_std_last[dev->input]; 1390 1391 if (!vivid_is_sdtv_cap(dev)) 1392 return -ENODATA; 1393 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1394 dev->std_signal_mode[dev->input] == NO_LOCK) { 1395 *id = V4L2_STD_UNKNOWN; 1396 return 0; 1397 } 1398 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1399 *id = V4L2_STD_UNKNOWN; 1400 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1401 *id = dev->std_cap[dev->input]; 1402 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1403 *id = dev->query_std[dev->input]; 1404 } else { 1405 *id = vivid_standard[last]; 1406 dev->query_std_last[dev->input] = 1407 (last + 1) % ARRAY_SIZE(vivid_standard); 1408 } 1409 1410 return 0; 1411 } 1412 1413 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1414 { 1415 struct vivid_dev *dev = video_drvdata(file); 1416 1417 if (!vivid_is_sdtv_cap(dev)) 1418 return -ENODATA; 1419 if (dev->std_cap[dev->input] == id) 1420 return 0; 1421 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1422 return -EBUSY; 1423 dev->std_cap[dev->input] = id; 1424 vivid_update_format_cap(dev, false); 1425 return 0; 1426 } 1427 1428 static void find_aspect_ratio(u32 width, u32 height, 1429 u32 *num, u32 *denom) 1430 { 1431 if (!(height % 3) && ((height * 4 / 3) == width)) { 1432 *num = 4; 1433 *denom = 3; 1434 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1435 *num = 16; 1436 *denom = 9; 1437 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1438 *num = 16; 1439 *denom = 10; 1440 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1441 *num = 5; 1442 *denom = 4; 1443 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1444 *num = 15; 1445 *denom = 9; 1446 } else { /* default to 16:9 */ 1447 *num = 16; 1448 *denom = 9; 1449 } 1450 } 1451 1452 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1453 { 1454 struct v4l2_bt_timings *bt = &timings->bt; 1455 u32 total_h_pixel; 1456 u32 total_v_lines; 1457 u32 h_freq; 1458 1459 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1460 NULL, NULL)) 1461 return false; 1462 1463 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1464 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1465 1466 h_freq = (u32)bt->pixelclock / total_h_pixel; 1467 1468 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1469 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1470 bt->polarities, bt->interlaced, timings)) 1471 return true; 1472 } 1473 1474 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1475 struct v4l2_fract aspect_ratio; 1476 1477 find_aspect_ratio(bt->width, bt->height, 1478 &aspect_ratio.numerator, 1479 &aspect_ratio.denominator); 1480 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1481 bt->polarities, bt->interlaced, 1482 aspect_ratio, timings)) 1483 return true; 1484 } 1485 return false; 1486 } 1487 1488 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1489 struct v4l2_dv_timings *timings) 1490 { 1491 struct vivid_dev *dev = video_drvdata(file); 1492 1493 if (!vivid_is_hdmi_cap(dev)) 1494 return -ENODATA; 1495 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1496 0, NULL, NULL) && 1497 !valid_cvt_gtf_timings(timings)) 1498 return -EINVAL; 1499 1500 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1501 0, false)) 1502 return 0; 1503 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1504 return -EBUSY; 1505 1506 dev->dv_timings_cap[dev->input] = *timings; 1507 vivid_update_format_cap(dev, false); 1508 return 0; 1509 } 1510 1511 int vidioc_query_dv_timings(struct file *file, void *_fh, 1512 struct v4l2_dv_timings *timings) 1513 { 1514 struct vivid_dev *dev = video_drvdata(file); 1515 unsigned int input = dev->input; 1516 unsigned int last = dev->query_dv_timings_last[input]; 1517 1518 if (!vivid_is_hdmi_cap(dev)) 1519 return -ENODATA; 1520 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1521 dev->edid_blocks == 0) 1522 return -ENOLINK; 1523 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1524 return -ENOLCK; 1525 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1526 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1527 return -ERANGE; 1528 } 1529 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1530 *timings = dev->dv_timings_cap[input]; 1531 } else if (dev->dv_timings_signal_mode[input] == 1532 SELECTED_DV_TIMINGS) { 1533 *timings = 1534 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1535 } else { 1536 *timings = 1537 v4l2_dv_timings_presets[last]; 1538 dev->query_dv_timings_last[input] = 1539 (last + 1) % dev->query_dv_timings_size; 1540 } 1541 return 0; 1542 } 1543 1544 int vidioc_s_edid(struct file *file, void *_fh, 1545 struct v4l2_edid *edid) 1546 { 1547 struct vivid_dev *dev = video_drvdata(file); 1548 u16 phys_addr; 1549 u32 display_present = 0; 1550 unsigned int i, j; 1551 int ret; 1552 1553 memset(edid->reserved, 0, sizeof(edid->reserved)); 1554 if (edid->pad >= dev->num_inputs) 1555 return -EINVAL; 1556 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1557 return -EINVAL; 1558 if (edid->blocks == 0) { 1559 dev->edid_blocks = 0; 1560 if (dev->num_outputs) { 1561 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1562 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1563 } 1564 phys_addr = CEC_PHYS_ADDR_INVALID; 1565 goto set_phys_addr; 1566 } 1567 if (edid->blocks > dev->edid_max_blocks) { 1568 edid->blocks = dev->edid_max_blocks; 1569 return -E2BIG; 1570 } 1571 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1572 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1573 if (ret) 1574 return ret; 1575 1576 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1577 return -EBUSY; 1578 1579 dev->edid_blocks = edid->blocks; 1580 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1581 1582 for (i = 0, j = 0; i < dev->num_outputs; i++) 1583 if (dev->output_type[i] == HDMI) 1584 display_present |= 1585 dev->display_present[i] << j++; 1586 1587 if (dev->num_outputs) { 1588 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1589 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1590 } 1591 1592 set_phys_addr: 1593 /* TODO: a proper hotplug detect cycle should be emulated here */ 1594 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1595 1596 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1597 cec_s_phys_addr(dev->cec_tx_adap[i], 1598 dev->display_present[i] ? 1599 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1600 CEC_PHYS_ADDR_INVALID, 1601 false); 1602 return 0; 1603 } 1604 1605 int vidioc_enum_framesizes(struct file *file, void *fh, 1606 struct v4l2_frmsizeenum *fsize) 1607 { 1608 struct vivid_dev *dev = video_drvdata(file); 1609 1610 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1611 return -EINVAL; 1612 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1613 return -EINVAL; 1614 if (vivid_is_webcam(dev)) { 1615 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1616 return -EINVAL; 1617 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1618 fsize->discrete = webcam_sizes[fsize->index]; 1619 return 0; 1620 } 1621 if (fsize->index) 1622 return -EINVAL; 1623 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1624 fsize->stepwise.min_width = MIN_WIDTH; 1625 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1626 fsize->stepwise.step_width = 2; 1627 fsize->stepwise.min_height = MIN_HEIGHT; 1628 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1629 fsize->stepwise.step_height = 2; 1630 return 0; 1631 } 1632 1633 /* timeperframe is arbitrary and continuous */ 1634 int vidioc_enum_frameintervals(struct file *file, void *priv, 1635 struct v4l2_frmivalenum *fival) 1636 { 1637 struct vivid_dev *dev = video_drvdata(file); 1638 const struct vivid_fmt *fmt; 1639 int i; 1640 1641 fmt = vivid_get_format(dev, fival->pixel_format); 1642 if (!fmt) 1643 return -EINVAL; 1644 1645 if (!vivid_is_webcam(dev)) { 1646 if (fival->index) 1647 return -EINVAL; 1648 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1649 return -EINVAL; 1650 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1651 return -EINVAL; 1652 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1653 fival->discrete = dev->timeperframe_vid_cap; 1654 return 0; 1655 } 1656 1657 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1658 if (fival->width == webcam_sizes[i].width && 1659 fival->height == webcam_sizes[i].height) 1660 break; 1661 if (i == ARRAY_SIZE(webcam_sizes)) 1662 return -EINVAL; 1663 if (fival->index >= webcam_ival_count(dev, i)) 1664 return -EINVAL; 1665 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1666 fival->discrete = webcam_intervals[fival->index]; 1667 return 0; 1668 } 1669 1670 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1671 struct v4l2_streamparm *parm) 1672 { 1673 struct vivid_dev *dev = video_drvdata(file); 1674 1675 if (parm->type != (dev->multiplanar ? 1676 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1677 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1678 return -EINVAL; 1679 1680 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1681 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1682 parm->parm.capture.readbuffers = 1; 1683 return 0; 1684 } 1685 1686 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1687 struct v4l2_streamparm *parm) 1688 { 1689 struct vivid_dev *dev = video_drvdata(file); 1690 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1691 struct v4l2_fract tpf; 1692 unsigned i; 1693 1694 if (parm->type != (dev->multiplanar ? 1695 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1696 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1697 return -EINVAL; 1698 if (!vivid_is_webcam(dev)) 1699 return vivid_vid_cap_g_parm(file, priv, parm); 1700 1701 tpf = parm->parm.capture.timeperframe; 1702 1703 if (tpf.denominator == 0) 1704 tpf = webcam_intervals[ival_sz - 1]; 1705 for (i = 0; i < ival_sz; i++) 1706 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1707 break; 1708 if (i == ival_sz) 1709 i = ival_sz - 1; 1710 dev->webcam_ival_idx = i; 1711 tpf = webcam_intervals[dev->webcam_ival_idx]; 1712 1713 /* resync the thread's timings */ 1714 dev->cap_seq_resync = true; 1715 dev->timeperframe_vid_cap = tpf; 1716 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1717 parm->parm.capture.timeperframe = tpf; 1718 parm->parm.capture.readbuffers = 1; 1719 return 0; 1720 } 1721