1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 /* The number of discrete webcam framesizes */ 25 #define VIVID_WEBCAM_SIZES 6 26 /* The number of discrete webcam frameintervals */ 27 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2) 28 29 /* Sizes must be in increasing order */ 30 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = { 31 { 320, 180 }, 32 { 640, 360 }, 33 { 640, 480 }, 34 { 1280, 720 }, 35 { 1920, 1080 }, 36 { 3840, 2160 }, 37 }; 38 39 /* 40 * Intervals must be in increasing order and there must be twice as many 41 * elements in this array as there are in webcam_sizes. 42 */ 43 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = { 44 { 1, 1 }, 45 { 1, 2 }, 46 { 1, 4 }, 47 { 1, 5 }, 48 { 1, 10 }, 49 { 2, 25 }, 50 { 1, 15 }, 51 { 1, 25 }, 52 { 1, 30 }, 53 { 1, 40 }, 54 { 1, 50 }, 55 { 1, 60 }, 56 }; 57 58 static int vid_cap_queue_setup(struct vb2_queue *vq, 59 unsigned *nbuffers, unsigned *nplanes, 60 unsigned sizes[], struct device *alloc_devs[]) 61 { 62 struct vivid_dev *dev = vb2_get_drv_priv(vq); 63 unsigned buffers = tpg_g_buffers(&dev->tpg); 64 unsigned h = dev->fmt_cap_rect.height; 65 unsigned p; 66 67 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 68 /* 69 * You cannot use read() with FIELD_ALTERNATE since the field 70 * information (TOP/BOTTOM) cannot be passed back to the user. 71 */ 72 if (vb2_fileio_is_active(vq)) 73 return -EINVAL; 74 } 75 76 if (dev->queue_setup_error) { 77 /* 78 * Error injection: test what happens if queue_setup() returns 79 * an error. 80 */ 81 dev->queue_setup_error = false; 82 return -EINVAL; 83 } 84 if (*nplanes) { 85 /* 86 * Check if the number of requested planes match 87 * the number of buffers in the current format. You can't mix that. 88 */ 89 if (*nplanes != buffers) 90 return -EINVAL; 91 for (p = 0; p < buffers; p++) { 92 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 93 dev->fmt_cap->data_offset[p]) 94 return -EINVAL; 95 } 96 } else { 97 for (p = 0; p < buffers; p++) 98 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 99 dev->fmt_cap->vdownsampling[p] + 100 dev->fmt_cap->data_offset[p]; 101 } 102 103 if (vq->num_buffers + *nbuffers < 2) 104 *nbuffers = 2 - vq->num_buffers; 105 106 *nplanes = buffers; 107 108 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 109 for (p = 0; p < buffers; p++) 110 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 111 112 return 0; 113 } 114 115 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 116 { 117 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 118 unsigned long size; 119 unsigned buffers = tpg_g_buffers(&dev->tpg); 120 unsigned p; 121 122 dprintk(dev, 1, "%s\n", __func__); 123 124 if (WARN_ON(NULL == dev->fmt_cap)) 125 return -EINVAL; 126 127 if (dev->buf_prepare_error) { 128 /* 129 * Error injection: test what happens if buf_prepare() returns 130 * an error. 131 */ 132 dev->buf_prepare_error = false; 133 return -EINVAL; 134 } 135 for (p = 0; p < buffers; p++) { 136 size = (tpg_g_line_width(&dev->tpg, p) * 137 dev->fmt_cap_rect.height) / 138 dev->fmt_cap->vdownsampling[p] + 139 dev->fmt_cap->data_offset[p]; 140 141 if (vb2_plane_size(vb, p) < size) { 142 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 143 __func__, p, vb2_plane_size(vb, p), size); 144 return -EINVAL; 145 } 146 147 vb2_set_plane_payload(vb, p, size); 148 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 149 } 150 151 return 0; 152 } 153 154 static void vid_cap_buf_finish(struct vb2_buffer *vb) 155 { 156 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 157 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 158 struct v4l2_timecode *tc = &vbuf->timecode; 159 unsigned fps = 25; 160 unsigned seq = vbuf->sequence; 161 162 if (!vivid_is_sdtv_cap(dev)) 163 return; 164 165 /* 166 * Set the timecode. Rarely used, so it is interesting to 167 * test this. 168 */ 169 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 170 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 171 fps = 30; 172 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 173 tc->flags = 0; 174 tc->frames = seq % fps; 175 tc->seconds = (seq / fps) % 60; 176 tc->minutes = (seq / (60 * fps)) % 60; 177 tc->hours = (seq / (60 * 60 * fps)) % 24; 178 } 179 180 static void vid_cap_buf_queue(struct vb2_buffer *vb) 181 { 182 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 183 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 184 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 185 186 dprintk(dev, 1, "%s\n", __func__); 187 188 spin_lock(&dev->slock); 189 list_add_tail(&buf->list, &dev->vid_cap_active); 190 spin_unlock(&dev->slock); 191 } 192 193 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 194 { 195 struct vivid_dev *dev = vb2_get_drv_priv(vq); 196 unsigned i; 197 int err; 198 199 if (vb2_is_streaming(&dev->vb_vid_out_q)) 200 dev->can_loop_video = vivid_vid_can_loop(dev); 201 202 dev->vid_cap_seq_count = 0; 203 dprintk(dev, 1, "%s\n", __func__); 204 for (i = 0; i < VIDEO_MAX_FRAME; i++) 205 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 206 if (dev->start_streaming_error) { 207 dev->start_streaming_error = false; 208 err = -EINVAL; 209 } else { 210 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 211 } 212 if (err) { 213 struct vivid_buffer *buf, *tmp; 214 215 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 216 list_del(&buf->list); 217 vb2_buffer_done(&buf->vb.vb2_buf, 218 VB2_BUF_STATE_QUEUED); 219 } 220 } 221 return err; 222 } 223 224 /* abort streaming and wait for last buffer */ 225 static void vid_cap_stop_streaming(struct vb2_queue *vq) 226 { 227 struct vivid_dev *dev = vb2_get_drv_priv(vq); 228 229 dprintk(dev, 1, "%s\n", __func__); 230 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 231 dev->can_loop_video = false; 232 } 233 234 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 235 { 236 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 237 238 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 239 } 240 241 const struct vb2_ops vivid_vid_cap_qops = { 242 .queue_setup = vid_cap_queue_setup, 243 .buf_prepare = vid_cap_buf_prepare, 244 .buf_finish = vid_cap_buf_finish, 245 .buf_queue = vid_cap_buf_queue, 246 .start_streaming = vid_cap_start_streaming, 247 .stop_streaming = vid_cap_stop_streaming, 248 .buf_request_complete = vid_cap_buf_request_complete, 249 .wait_prepare = vb2_ops_wait_prepare, 250 .wait_finish = vb2_ops_wait_finish, 251 }; 252 253 /* 254 * Determine the 'picture' quality based on the current TV frequency: either 255 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 256 * signal or NOISE for no signal. 257 */ 258 void vivid_update_quality(struct vivid_dev *dev) 259 { 260 unsigned freq_modulus; 261 262 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 263 /* 264 * The 'noise' will only be replaced by the actual video 265 * if the output video matches the input video settings. 266 */ 267 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 268 return; 269 } 270 if (vivid_is_hdmi_cap(dev) && 271 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 272 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 273 return; 274 } 275 if (vivid_is_sdtv_cap(dev) && 276 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 277 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 278 return; 279 } 280 if (!vivid_is_tv_cap(dev)) { 281 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 282 return; 283 } 284 285 /* 286 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 287 * From +/- 0.25 MHz around the channel there is color, and from 288 * +/- 1 MHz there is grayscale (chroma is lost). 289 * Everywhere else it is just noise. 290 */ 291 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 292 if (freq_modulus > 2 * 16) { 293 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 294 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 295 return; 296 } 297 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 298 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 299 else 300 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 301 } 302 303 /* 304 * Get the current picture quality and the associated afc value. 305 */ 306 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 307 { 308 unsigned freq_modulus; 309 310 if (afc) 311 *afc = 0; 312 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 313 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 314 return tpg_g_quality(&dev->tpg); 315 316 /* 317 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 318 * From +/- 0.25 MHz around the channel there is color, and from 319 * +/- 1 MHz there is grayscale (chroma is lost). 320 * Everywhere else it is just gray. 321 */ 322 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 323 if (afc) 324 *afc = freq_modulus - 1 * 16; 325 return TPG_QUAL_GRAY; 326 } 327 328 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 329 { 330 if (vivid_is_sdtv_cap(dev)) 331 return dev->std_aspect_ratio[dev->input]; 332 333 if (vivid_is_hdmi_cap(dev)) 334 return dev->dv_timings_aspect_ratio[dev->input]; 335 336 return TPG_VIDEO_ASPECT_IMAGE; 337 } 338 339 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 340 { 341 if (vivid_is_sdtv_cap(dev)) 342 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 343 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 344 345 if (vivid_is_hdmi_cap(dev) && 346 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 347 return dev->src_rect.height == 480 ? 348 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 349 350 return TPG_PIXEL_ASPECT_SQUARE; 351 } 352 353 /* 354 * Called whenever the format has to be reset which can occur when 355 * changing inputs, standard, timings, etc. 356 */ 357 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 358 { 359 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 360 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 361 unsigned size; 362 u64 pixelclock; 363 364 switch (dev->input_type[dev->input]) { 365 case WEBCAM: 366 default: 367 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 368 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 369 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 370 dev->field_cap = V4L2_FIELD_NONE; 371 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 372 break; 373 case TV: 374 case SVID: 375 dev->field_cap = dev->tv_field_cap; 376 dev->src_rect.width = 720; 377 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 378 dev->src_rect.height = 480; 379 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 380 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 381 } else { 382 dev->src_rect.height = 576; 383 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 384 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 385 } 386 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 387 break; 388 case HDMI: 389 dev->src_rect.width = bt->width; 390 dev->src_rect.height = bt->height; 391 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 392 if (dev->reduced_fps && can_reduce_fps(bt)) { 393 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 394 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 395 } else { 396 pixelclock = bt->pixelclock; 397 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 398 } 399 dev->timeperframe_vid_cap = (struct v4l2_fract) { 400 size / 100, (u32)pixelclock / 100 401 }; 402 if (bt->interlaced) 403 dev->field_cap = V4L2_FIELD_ALTERNATE; 404 else 405 dev->field_cap = V4L2_FIELD_NONE; 406 407 /* 408 * We can be called from within s_ctrl, in that case we can't 409 * set/get controls. Luckily we don't need to in that case. 410 */ 411 if (keep_controls || !dev->colorspace) 412 break; 413 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 414 if (bt->width == 720 && bt->height <= 576) 415 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 416 else 417 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 418 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 419 } else { 420 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 421 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 422 } 423 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 424 break; 425 } 426 vivid_update_quality(dev); 427 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 428 dev->crop_cap = dev->src_rect; 429 dev->crop_bounds_cap = dev->src_rect; 430 dev->compose_cap = dev->crop_cap; 431 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 432 dev->compose_cap.height /= 2; 433 dev->fmt_cap_rect = dev->compose_cap; 434 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 435 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 436 tpg_update_mv_step(&dev->tpg); 437 438 /* 439 * We can be called from within s_ctrl, in that case we can't 440 * modify controls. Luckily we don't need to in that case. 441 */ 442 if (keep_controls) 443 return; 444 445 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 446 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 447 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 448 } 449 450 /* Map the field to something that is valid for the current input */ 451 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 452 { 453 if (vivid_is_sdtv_cap(dev)) { 454 switch (field) { 455 case V4L2_FIELD_INTERLACED_TB: 456 case V4L2_FIELD_INTERLACED_BT: 457 case V4L2_FIELD_SEQ_TB: 458 case V4L2_FIELD_SEQ_BT: 459 case V4L2_FIELD_TOP: 460 case V4L2_FIELD_BOTTOM: 461 case V4L2_FIELD_ALTERNATE: 462 return field; 463 case V4L2_FIELD_INTERLACED: 464 default: 465 return V4L2_FIELD_INTERLACED; 466 } 467 } 468 if (vivid_is_hdmi_cap(dev)) 469 return dev->dv_timings_cap[dev->input].bt.interlaced ? 470 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 471 return V4L2_FIELD_NONE; 472 } 473 474 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 475 { 476 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 477 return tpg_g_colorspace(&dev->tpg); 478 return dev->colorspace_out; 479 } 480 481 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 482 { 483 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 484 return tpg_g_xfer_func(&dev->tpg); 485 return dev->xfer_func_out; 486 } 487 488 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 489 { 490 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 491 return tpg_g_ycbcr_enc(&dev->tpg); 492 return dev->ycbcr_enc_out; 493 } 494 495 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 496 { 497 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 498 return tpg_g_hsv_enc(&dev->tpg); 499 return dev->hsv_enc_out; 500 } 501 502 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 503 { 504 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 505 return tpg_g_quantization(&dev->tpg); 506 return dev->quantization_out; 507 } 508 509 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 510 struct v4l2_format *f) 511 { 512 struct vivid_dev *dev = video_drvdata(file); 513 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 514 unsigned p; 515 516 mp->width = dev->fmt_cap_rect.width; 517 mp->height = dev->fmt_cap_rect.height; 518 mp->field = dev->field_cap; 519 mp->pixelformat = dev->fmt_cap->fourcc; 520 mp->colorspace = vivid_colorspace_cap(dev); 521 mp->xfer_func = vivid_xfer_func_cap(dev); 522 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 523 mp->hsv_enc = vivid_hsv_enc_cap(dev); 524 else 525 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 526 mp->quantization = vivid_quantization_cap(dev); 527 mp->num_planes = dev->fmt_cap->buffers; 528 for (p = 0; p < mp->num_planes; p++) { 529 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 530 mp->plane_fmt[p].sizeimage = 531 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 532 dev->fmt_cap->vdownsampling[p] + 533 dev->fmt_cap->data_offset[p]; 534 } 535 return 0; 536 } 537 538 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 539 struct v4l2_format *f) 540 { 541 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 542 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 543 struct vivid_dev *dev = video_drvdata(file); 544 const struct vivid_fmt *fmt; 545 unsigned bytesperline, max_bpl; 546 unsigned factor = 1; 547 unsigned w, h; 548 unsigned p; 549 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 550 551 fmt = vivid_get_format(dev, mp->pixelformat); 552 if (!fmt) { 553 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 554 mp->pixelformat); 555 mp->pixelformat = V4L2_PIX_FMT_YUYV; 556 fmt = vivid_get_format(dev, mp->pixelformat); 557 } 558 559 mp->field = vivid_field_cap(dev, mp->field); 560 if (vivid_is_webcam(dev)) { 561 const struct v4l2_frmsize_discrete *sz = 562 v4l2_find_nearest_size(webcam_sizes, 563 VIVID_WEBCAM_SIZES, width, 564 height, mp->width, mp->height); 565 566 w = sz->width; 567 h = sz->height; 568 } else if (vivid_is_sdtv_cap(dev)) { 569 w = 720; 570 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 571 } else { 572 w = dev->src_rect.width; 573 h = dev->src_rect.height; 574 } 575 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 576 factor = 2; 577 if (vivid_is_webcam(dev) || 578 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 579 mp->width = w; 580 mp->height = h / factor; 581 } else { 582 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 583 584 v4l2_rect_set_min_size(&r, &vivid_min_rect); 585 v4l2_rect_set_max_size(&r, &vivid_max_rect); 586 if (dev->has_scaler_cap && !dev->has_compose_cap) { 587 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 588 589 v4l2_rect_set_max_size(&r, &max_r); 590 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 591 v4l2_rect_set_max_size(&r, &dev->src_rect); 592 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 593 v4l2_rect_set_min_size(&r, &dev->src_rect); 594 } 595 mp->width = r.width; 596 mp->height = r.height / factor; 597 } 598 599 /* This driver supports custom bytesperline values */ 600 601 mp->num_planes = fmt->buffers; 602 for (p = 0; p < fmt->buffers; p++) { 603 /* Calculate the minimum supported bytesperline value */ 604 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 605 /* Calculate the maximum supported bytesperline value */ 606 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 607 608 if (pfmt[p].bytesperline > max_bpl) 609 pfmt[p].bytesperline = max_bpl; 610 if (pfmt[p].bytesperline < bytesperline) 611 pfmt[p].bytesperline = bytesperline; 612 613 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 614 fmt->vdownsampling[p] + fmt->data_offset[p]; 615 616 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 617 } 618 for (p = fmt->buffers; p < fmt->planes; p++) 619 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 620 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 621 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 622 623 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 624 mp->colorspace = vivid_colorspace_cap(dev); 625 626 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 627 mp->xfer_func = vivid_xfer_func_cap(dev); 628 629 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 630 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 631 mp->hsv_enc = vivid_hsv_enc_cap(dev); 632 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 633 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 634 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 635 } else { 636 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 637 } 638 639 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 640 fmt->color_enc == TGP_COLOR_ENC_RGB) { 641 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 642 mp->quantization = vivid_quantization_cap(dev); 643 } else { 644 mp->quantization = vivid_quantization_cap(dev); 645 } 646 647 memset(mp->reserved, 0, sizeof(mp->reserved)); 648 return 0; 649 } 650 651 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 652 struct v4l2_format *f) 653 { 654 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 655 struct vivid_dev *dev = video_drvdata(file); 656 struct v4l2_rect *crop = &dev->crop_cap; 657 struct v4l2_rect *compose = &dev->compose_cap; 658 struct vb2_queue *q = &dev->vb_vid_cap_q; 659 int ret = vivid_try_fmt_vid_cap(file, priv, f); 660 unsigned factor = 1; 661 unsigned p; 662 unsigned i; 663 664 if (ret < 0) 665 return ret; 666 667 if (vb2_is_busy(q)) { 668 dprintk(dev, 1, "%s device busy\n", __func__); 669 return -EBUSY; 670 } 671 672 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 673 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 674 factor = 2; 675 676 /* Note: the webcam input doesn't support scaling, cropping or composing */ 677 678 if (!vivid_is_webcam(dev) && 679 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 680 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 681 682 if (dev->has_scaler_cap) { 683 if (dev->has_compose_cap) 684 v4l2_rect_map_inside(compose, &r); 685 else 686 *compose = r; 687 if (dev->has_crop_cap && !dev->has_compose_cap) { 688 struct v4l2_rect min_r = { 689 0, 0, 690 r.width / MAX_ZOOM, 691 factor * r.height / MAX_ZOOM 692 }; 693 struct v4l2_rect max_r = { 694 0, 0, 695 r.width * MAX_ZOOM, 696 factor * r.height * MAX_ZOOM 697 }; 698 699 v4l2_rect_set_min_size(crop, &min_r); 700 v4l2_rect_set_max_size(crop, &max_r); 701 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 702 } else if (dev->has_crop_cap) { 703 struct v4l2_rect min_r = { 704 0, 0, 705 compose->width / MAX_ZOOM, 706 factor * compose->height / MAX_ZOOM 707 }; 708 struct v4l2_rect max_r = { 709 0, 0, 710 compose->width * MAX_ZOOM, 711 factor * compose->height * MAX_ZOOM 712 }; 713 714 v4l2_rect_set_min_size(crop, &min_r); 715 v4l2_rect_set_max_size(crop, &max_r); 716 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 717 } 718 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 719 r.height *= factor; 720 v4l2_rect_set_size_to(crop, &r); 721 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 722 r = *crop; 723 r.height /= factor; 724 v4l2_rect_set_size_to(compose, &r); 725 } else if (!dev->has_crop_cap) { 726 v4l2_rect_map_inside(compose, &r); 727 } else { 728 r.height *= factor; 729 v4l2_rect_set_max_size(crop, &r); 730 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 731 compose->top *= factor; 732 compose->height *= factor; 733 v4l2_rect_set_size_to(compose, crop); 734 v4l2_rect_map_inside(compose, &r); 735 compose->top /= factor; 736 compose->height /= factor; 737 } 738 } else if (vivid_is_webcam(dev)) { 739 /* Guaranteed to be a match */ 740 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 741 if (webcam_sizes[i].width == mp->width && 742 webcam_sizes[i].height == mp->height) 743 break; 744 dev->webcam_size_idx = i; 745 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i)) 746 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1; 747 vivid_update_format_cap(dev, false); 748 } else { 749 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 750 751 v4l2_rect_set_size_to(compose, &r); 752 r.height *= factor; 753 v4l2_rect_set_size_to(crop, &r); 754 } 755 756 dev->fmt_cap_rect.width = mp->width; 757 dev->fmt_cap_rect.height = mp->height; 758 tpg_s_buf_height(&dev->tpg, mp->height); 759 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 760 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 761 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 762 dev->field_cap = mp->field; 763 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 764 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 765 else 766 tpg_s_field(&dev->tpg, dev->field_cap, false); 767 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 768 if (vivid_is_sdtv_cap(dev)) 769 dev->tv_field_cap = mp->field; 770 tpg_update_mv_step(&dev->tpg); 771 dev->tpg.colorspace = mp->colorspace; 772 dev->tpg.xfer_func = mp->xfer_func; 773 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 774 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 775 else 776 dev->tpg.hsv_enc = mp->hsv_enc; 777 dev->tpg.quantization = mp->quantization; 778 779 return 0; 780 } 781 782 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 783 struct v4l2_format *f) 784 { 785 struct vivid_dev *dev = video_drvdata(file); 786 787 if (!dev->multiplanar) 788 return -ENOTTY; 789 return vivid_g_fmt_vid_cap(file, priv, f); 790 } 791 792 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 793 struct v4l2_format *f) 794 { 795 struct vivid_dev *dev = video_drvdata(file); 796 797 if (!dev->multiplanar) 798 return -ENOTTY; 799 return vivid_try_fmt_vid_cap(file, priv, f); 800 } 801 802 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 803 struct v4l2_format *f) 804 { 805 struct vivid_dev *dev = video_drvdata(file); 806 807 if (!dev->multiplanar) 808 return -ENOTTY; 809 return vivid_s_fmt_vid_cap(file, priv, f); 810 } 811 812 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 813 struct v4l2_format *f) 814 { 815 struct vivid_dev *dev = video_drvdata(file); 816 817 if (dev->multiplanar) 818 return -ENOTTY; 819 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 820 } 821 822 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 823 struct v4l2_format *f) 824 { 825 struct vivid_dev *dev = video_drvdata(file); 826 827 if (dev->multiplanar) 828 return -ENOTTY; 829 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 830 } 831 832 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 833 struct v4l2_format *f) 834 { 835 struct vivid_dev *dev = video_drvdata(file); 836 837 if (dev->multiplanar) 838 return -ENOTTY; 839 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 840 } 841 842 int vivid_vid_cap_g_selection(struct file *file, void *priv, 843 struct v4l2_selection *sel) 844 { 845 struct vivid_dev *dev = video_drvdata(file); 846 847 if (!dev->has_crop_cap && !dev->has_compose_cap) 848 return -ENOTTY; 849 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 850 return -EINVAL; 851 if (vivid_is_webcam(dev)) 852 return -ENODATA; 853 854 sel->r.left = sel->r.top = 0; 855 switch (sel->target) { 856 case V4L2_SEL_TGT_CROP: 857 if (!dev->has_crop_cap) 858 return -EINVAL; 859 sel->r = dev->crop_cap; 860 break; 861 case V4L2_SEL_TGT_CROP_DEFAULT: 862 case V4L2_SEL_TGT_CROP_BOUNDS: 863 if (!dev->has_crop_cap) 864 return -EINVAL; 865 sel->r = dev->src_rect; 866 break; 867 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 868 if (!dev->has_compose_cap) 869 return -EINVAL; 870 sel->r = vivid_max_rect; 871 break; 872 case V4L2_SEL_TGT_COMPOSE: 873 if (!dev->has_compose_cap) 874 return -EINVAL; 875 sel->r = dev->compose_cap; 876 break; 877 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 878 if (!dev->has_compose_cap) 879 return -EINVAL; 880 sel->r = dev->fmt_cap_rect; 881 break; 882 default: 883 return -EINVAL; 884 } 885 return 0; 886 } 887 888 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 889 { 890 struct vivid_dev *dev = video_drvdata(file); 891 struct v4l2_rect *crop = &dev->crop_cap; 892 struct v4l2_rect *compose = &dev->compose_cap; 893 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 894 int ret; 895 896 if (!dev->has_crop_cap && !dev->has_compose_cap) 897 return -ENOTTY; 898 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 899 return -EINVAL; 900 if (vivid_is_webcam(dev)) 901 return -ENODATA; 902 903 switch (s->target) { 904 case V4L2_SEL_TGT_CROP: 905 if (!dev->has_crop_cap) 906 return -EINVAL; 907 ret = vivid_vid_adjust_sel(s->flags, &s->r); 908 if (ret) 909 return ret; 910 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 911 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 912 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 913 s->r.top /= factor; 914 s->r.height /= factor; 915 if (dev->has_scaler_cap) { 916 struct v4l2_rect fmt = dev->fmt_cap_rect; 917 struct v4l2_rect max_rect = { 918 0, 0, 919 s->r.width * MAX_ZOOM, 920 s->r.height * MAX_ZOOM 921 }; 922 struct v4l2_rect min_rect = { 923 0, 0, 924 s->r.width / MAX_ZOOM, 925 s->r.height / MAX_ZOOM 926 }; 927 928 v4l2_rect_set_min_size(&fmt, &min_rect); 929 if (!dev->has_compose_cap) 930 v4l2_rect_set_max_size(&fmt, &max_rect); 931 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 932 vb2_is_busy(&dev->vb_vid_cap_q)) 933 return -EBUSY; 934 if (dev->has_compose_cap) { 935 v4l2_rect_set_min_size(compose, &min_rect); 936 v4l2_rect_set_max_size(compose, &max_rect); 937 v4l2_rect_map_inside(compose, &fmt); 938 } 939 dev->fmt_cap_rect = fmt; 940 tpg_s_buf_height(&dev->tpg, fmt.height); 941 } else if (dev->has_compose_cap) { 942 struct v4l2_rect fmt = dev->fmt_cap_rect; 943 944 v4l2_rect_set_min_size(&fmt, &s->r); 945 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 946 vb2_is_busy(&dev->vb_vid_cap_q)) 947 return -EBUSY; 948 dev->fmt_cap_rect = fmt; 949 tpg_s_buf_height(&dev->tpg, fmt.height); 950 v4l2_rect_set_size_to(compose, &s->r); 951 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 952 } else { 953 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 954 vb2_is_busy(&dev->vb_vid_cap_q)) 955 return -EBUSY; 956 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 957 v4l2_rect_set_size_to(compose, &s->r); 958 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 959 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 960 } 961 s->r.top *= factor; 962 s->r.height *= factor; 963 *crop = s->r; 964 break; 965 case V4L2_SEL_TGT_COMPOSE: 966 if (!dev->has_compose_cap) 967 return -EINVAL; 968 ret = vivid_vid_adjust_sel(s->flags, &s->r); 969 if (ret) 970 return ret; 971 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 972 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 973 if (dev->has_scaler_cap) { 974 struct v4l2_rect max_rect = { 975 0, 0, 976 dev->src_rect.width * MAX_ZOOM, 977 (dev->src_rect.height / factor) * MAX_ZOOM 978 }; 979 980 v4l2_rect_set_max_size(&s->r, &max_rect); 981 if (dev->has_crop_cap) { 982 struct v4l2_rect min_rect = { 983 0, 0, 984 s->r.width / MAX_ZOOM, 985 (s->r.height * factor) / MAX_ZOOM 986 }; 987 struct v4l2_rect max_rect = { 988 0, 0, 989 s->r.width * MAX_ZOOM, 990 (s->r.height * factor) * MAX_ZOOM 991 }; 992 993 v4l2_rect_set_min_size(crop, &min_rect); 994 v4l2_rect_set_max_size(crop, &max_rect); 995 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 996 } 997 } else if (dev->has_crop_cap) { 998 s->r.top *= factor; 999 s->r.height *= factor; 1000 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1001 v4l2_rect_set_size_to(crop, &s->r); 1002 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1003 s->r.top /= factor; 1004 s->r.height /= factor; 1005 } else { 1006 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1007 s->r.height /= factor; 1008 } 1009 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1010 *compose = s->r; 1011 break; 1012 default: 1013 return -EINVAL; 1014 } 1015 1016 tpg_s_crop_compose(&dev->tpg, crop, compose); 1017 return 0; 1018 } 1019 1020 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1021 int type, struct v4l2_fract *f) 1022 { 1023 struct vivid_dev *dev = video_drvdata(file); 1024 1025 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1026 return -EINVAL; 1027 1028 switch (vivid_get_pixel_aspect(dev)) { 1029 case TPG_PIXEL_ASPECT_NTSC: 1030 f->numerator = 11; 1031 f->denominator = 10; 1032 break; 1033 case TPG_PIXEL_ASPECT_PAL: 1034 f->numerator = 54; 1035 f->denominator = 59; 1036 break; 1037 default: 1038 break; 1039 } 1040 return 0; 1041 } 1042 1043 static const struct v4l2_audio vivid_audio_inputs[] = { 1044 { 0, "TV", V4L2_AUDCAP_STEREO }, 1045 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1046 }; 1047 1048 int vidioc_enum_input(struct file *file, void *priv, 1049 struct v4l2_input *inp) 1050 { 1051 struct vivid_dev *dev = video_drvdata(file); 1052 1053 if (inp->index >= dev->num_inputs) 1054 return -EINVAL; 1055 1056 inp->type = V4L2_INPUT_TYPE_CAMERA; 1057 switch (dev->input_type[inp->index]) { 1058 case WEBCAM: 1059 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1060 dev->input_name_counter[inp->index]); 1061 inp->capabilities = 0; 1062 break; 1063 case TV: 1064 snprintf(inp->name, sizeof(inp->name), "TV %u", 1065 dev->input_name_counter[inp->index]); 1066 inp->type = V4L2_INPUT_TYPE_TUNER; 1067 inp->std = V4L2_STD_ALL; 1068 if (dev->has_audio_inputs) 1069 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1070 inp->capabilities = V4L2_IN_CAP_STD; 1071 break; 1072 case SVID: 1073 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1074 dev->input_name_counter[inp->index]); 1075 inp->std = V4L2_STD_ALL; 1076 if (dev->has_audio_inputs) 1077 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1078 inp->capabilities = V4L2_IN_CAP_STD; 1079 break; 1080 case HDMI: 1081 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1082 dev->input_name_counter[inp->index]); 1083 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1084 if (dev->edid_blocks == 0 || 1085 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1086 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1087 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1088 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1089 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1090 break; 1091 } 1092 if (dev->sensor_hflip) 1093 inp->status |= V4L2_IN_ST_HFLIP; 1094 if (dev->sensor_vflip) 1095 inp->status |= V4L2_IN_ST_VFLIP; 1096 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1097 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1098 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1099 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1100 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1101 } else if (vivid_is_tv_cap(dev)) { 1102 switch (tpg_g_quality(&dev->tpg)) { 1103 case TPG_QUAL_GRAY: 1104 inp->status |= V4L2_IN_ST_COLOR_KILL; 1105 break; 1106 case TPG_QUAL_NOISE: 1107 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1108 break; 1109 default: 1110 break; 1111 } 1112 } 1113 } 1114 return 0; 1115 } 1116 1117 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1118 { 1119 struct vivid_dev *dev = video_drvdata(file); 1120 1121 *i = dev->input; 1122 return 0; 1123 } 1124 1125 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1126 { 1127 struct vivid_dev *dev = video_drvdata(file); 1128 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1129 unsigned brightness; 1130 1131 if (i >= dev->num_inputs) 1132 return -EINVAL; 1133 1134 if (i == dev->input) 1135 return 0; 1136 1137 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1138 vb2_is_busy(&dev->vb_vbi_cap_q) || 1139 vb2_is_busy(&dev->vb_meta_cap_q)) 1140 return -EBUSY; 1141 1142 dev->input = i; 1143 dev->vid_cap_dev.tvnorms = 0; 1144 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1145 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1146 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1147 } 1148 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1149 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1150 vivid_update_format_cap(dev, false); 1151 1152 if (dev->colorspace) { 1153 switch (dev->input_type[i]) { 1154 case WEBCAM: 1155 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1156 break; 1157 case TV: 1158 case SVID: 1159 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1160 break; 1161 case HDMI: 1162 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1163 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1164 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1165 else 1166 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1167 } else { 1168 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1169 } 1170 break; 1171 } 1172 } 1173 1174 /* 1175 * Modify the brightness range depending on the input. 1176 * This makes it easy to use vivid to test if applications can 1177 * handle control range modifications and is also how this is 1178 * typically used in practice as different inputs may be hooked 1179 * up to different receivers with different control ranges. 1180 */ 1181 brightness = 128 * i + dev->input_brightness[i]; 1182 v4l2_ctrl_modify_range(dev->brightness, 1183 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1184 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1185 1186 /* Restore per-input states. */ 1187 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1188 vivid_is_hdmi_cap(dev)); 1189 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1190 dev->dv_timings_signal_mode[dev->input] == 1191 SELECTED_DV_TIMINGS); 1192 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1193 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1194 dev->std_signal_mode[dev->input]); 1195 1196 if (vivid_is_hdmi_cap(dev)) { 1197 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1198 dev->dv_timings_signal_mode[dev->input]); 1199 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1200 dev->query_dv_timings[dev->input]); 1201 } else if (vivid_is_sdtv_cap(dev)) { 1202 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1203 dev->std_signal_mode[dev->input]); 1204 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1205 dev->std_signal_mode[dev->input]); 1206 } 1207 1208 return 0; 1209 } 1210 1211 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1212 { 1213 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1214 return -EINVAL; 1215 *vin = vivid_audio_inputs[vin->index]; 1216 return 0; 1217 } 1218 1219 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1220 { 1221 struct vivid_dev *dev = video_drvdata(file); 1222 1223 if (!vivid_is_sdtv_cap(dev)) 1224 return -EINVAL; 1225 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1226 return 0; 1227 } 1228 1229 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1230 { 1231 struct vivid_dev *dev = video_drvdata(file); 1232 1233 if (!vivid_is_sdtv_cap(dev)) 1234 return -EINVAL; 1235 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1236 return -EINVAL; 1237 dev->tv_audio_input = vin->index; 1238 return 0; 1239 } 1240 1241 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1242 { 1243 struct vivid_dev *dev = video_drvdata(file); 1244 1245 if (vf->tuner != 0) 1246 return -EINVAL; 1247 vf->frequency = dev->tv_freq; 1248 return 0; 1249 } 1250 1251 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1252 { 1253 struct vivid_dev *dev = video_drvdata(file); 1254 1255 if (vf->tuner != 0) 1256 return -EINVAL; 1257 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1258 if (vivid_is_tv_cap(dev)) 1259 vivid_update_quality(dev); 1260 return 0; 1261 } 1262 1263 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1264 { 1265 struct vivid_dev *dev = video_drvdata(file); 1266 1267 if (vt->index != 0) 1268 return -EINVAL; 1269 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1270 return -EINVAL; 1271 dev->tv_audmode = vt->audmode; 1272 return 0; 1273 } 1274 1275 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1276 { 1277 struct vivid_dev *dev = video_drvdata(file); 1278 enum tpg_quality qual; 1279 1280 if (vt->index != 0) 1281 return -EINVAL; 1282 1283 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1284 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1285 vt->audmode = dev->tv_audmode; 1286 vt->rangelow = MIN_TV_FREQ; 1287 vt->rangehigh = MAX_TV_FREQ; 1288 qual = vivid_get_quality(dev, &vt->afc); 1289 if (qual == TPG_QUAL_COLOR) 1290 vt->signal = 0xffff; 1291 else if (qual == TPG_QUAL_GRAY) 1292 vt->signal = 0x8000; 1293 else 1294 vt->signal = 0; 1295 if (qual == TPG_QUAL_NOISE) { 1296 vt->rxsubchans = 0; 1297 } else if (qual == TPG_QUAL_GRAY) { 1298 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1299 } else { 1300 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1301 unsigned int options = 1302 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1303 1304 switch (channel_nr % options) { 1305 case 0: 1306 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1307 break; 1308 case 1: 1309 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1310 break; 1311 case 2: 1312 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1313 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1314 else 1315 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1316 break; 1317 case 3: 1318 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1319 break; 1320 } 1321 } 1322 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1323 return 0; 1324 } 1325 1326 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1327 const v4l2_std_id vivid_standard[] = { 1328 V4L2_STD_NTSC_M, 1329 V4L2_STD_NTSC_M_JP, 1330 V4L2_STD_NTSC_M_KR, 1331 V4L2_STD_NTSC_443, 1332 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1333 V4L2_STD_PAL_I, 1334 V4L2_STD_PAL_DK, 1335 V4L2_STD_PAL_M, 1336 V4L2_STD_PAL_N, 1337 V4L2_STD_PAL_Nc, 1338 V4L2_STD_PAL_60, 1339 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1340 V4L2_STD_SECAM_DK, 1341 V4L2_STD_SECAM_L, 1342 V4L2_STD_SECAM_LC, 1343 V4L2_STD_UNKNOWN 1344 }; 1345 1346 /* Must remain in sync with the vivid_standard array */ 1347 const char * const vivid_ctrl_standard_strings[] = { 1348 "NTSC-M", 1349 "NTSC-M-JP", 1350 "NTSC-M-KR", 1351 "NTSC-443", 1352 "PAL-BGH", 1353 "PAL-I", 1354 "PAL-DK", 1355 "PAL-M", 1356 "PAL-N", 1357 "PAL-Nc", 1358 "PAL-60", 1359 "SECAM-BGH", 1360 "SECAM-DK", 1361 "SECAM-L", 1362 "SECAM-Lc", 1363 NULL, 1364 }; 1365 1366 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1367 { 1368 struct vivid_dev *dev = video_drvdata(file); 1369 unsigned int last = dev->query_std_last[dev->input]; 1370 1371 if (!vivid_is_sdtv_cap(dev)) 1372 return -ENODATA; 1373 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1374 dev->std_signal_mode[dev->input] == NO_LOCK) { 1375 *id = V4L2_STD_UNKNOWN; 1376 return 0; 1377 } 1378 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1379 *id = V4L2_STD_UNKNOWN; 1380 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1381 *id = dev->std_cap[dev->input]; 1382 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1383 *id = dev->query_std[dev->input]; 1384 } else { 1385 *id = vivid_standard[last]; 1386 dev->query_std_last[dev->input] = 1387 (last + 1) % ARRAY_SIZE(vivid_standard); 1388 } 1389 1390 return 0; 1391 } 1392 1393 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1394 { 1395 struct vivid_dev *dev = video_drvdata(file); 1396 1397 if (!vivid_is_sdtv_cap(dev)) 1398 return -ENODATA; 1399 if (dev->std_cap[dev->input] == id) 1400 return 0; 1401 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1402 return -EBUSY; 1403 dev->std_cap[dev->input] = id; 1404 vivid_update_format_cap(dev, false); 1405 return 0; 1406 } 1407 1408 static void find_aspect_ratio(u32 width, u32 height, 1409 u32 *num, u32 *denom) 1410 { 1411 if (!(height % 3) && ((height * 4 / 3) == width)) { 1412 *num = 4; 1413 *denom = 3; 1414 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1415 *num = 16; 1416 *denom = 9; 1417 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1418 *num = 16; 1419 *denom = 10; 1420 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1421 *num = 5; 1422 *denom = 4; 1423 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1424 *num = 15; 1425 *denom = 9; 1426 } else { /* default to 16:9 */ 1427 *num = 16; 1428 *denom = 9; 1429 } 1430 } 1431 1432 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1433 { 1434 struct v4l2_bt_timings *bt = &timings->bt; 1435 u32 total_h_pixel; 1436 u32 total_v_lines; 1437 u32 h_freq; 1438 1439 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1440 NULL, NULL)) 1441 return false; 1442 1443 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1444 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1445 1446 h_freq = (u32)bt->pixelclock / total_h_pixel; 1447 1448 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1449 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1450 bt->polarities, bt->interlaced, timings)) 1451 return true; 1452 } 1453 1454 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1455 struct v4l2_fract aspect_ratio; 1456 1457 find_aspect_ratio(bt->width, bt->height, 1458 &aspect_ratio.numerator, 1459 &aspect_ratio.denominator); 1460 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1461 bt->polarities, bt->interlaced, 1462 aspect_ratio, timings)) 1463 return true; 1464 } 1465 return false; 1466 } 1467 1468 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1469 struct v4l2_dv_timings *timings) 1470 { 1471 struct vivid_dev *dev = video_drvdata(file); 1472 1473 if (!vivid_is_hdmi_cap(dev)) 1474 return -ENODATA; 1475 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1476 0, NULL, NULL) && 1477 !valid_cvt_gtf_timings(timings)) 1478 return -EINVAL; 1479 1480 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1481 0, false)) 1482 return 0; 1483 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1484 return -EBUSY; 1485 1486 dev->dv_timings_cap[dev->input] = *timings; 1487 vivid_update_format_cap(dev, false); 1488 return 0; 1489 } 1490 1491 int vidioc_query_dv_timings(struct file *file, void *_fh, 1492 struct v4l2_dv_timings *timings) 1493 { 1494 struct vivid_dev *dev = video_drvdata(file); 1495 unsigned int input = dev->input; 1496 unsigned int last = dev->query_dv_timings_last[input]; 1497 1498 if (!vivid_is_hdmi_cap(dev)) 1499 return -ENODATA; 1500 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1501 dev->edid_blocks == 0) 1502 return -ENOLINK; 1503 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1504 return -ENOLCK; 1505 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1506 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1507 return -ERANGE; 1508 } 1509 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1510 *timings = dev->dv_timings_cap[input]; 1511 } else if (dev->dv_timings_signal_mode[input] == 1512 SELECTED_DV_TIMINGS) { 1513 *timings = 1514 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1515 } else { 1516 *timings = 1517 v4l2_dv_timings_presets[last]; 1518 dev->query_dv_timings_last[input] = 1519 (last + 1) % dev->query_dv_timings_size; 1520 } 1521 return 0; 1522 } 1523 1524 int vidioc_s_edid(struct file *file, void *_fh, 1525 struct v4l2_edid *edid) 1526 { 1527 struct vivid_dev *dev = video_drvdata(file); 1528 u16 phys_addr; 1529 u32 display_present = 0; 1530 unsigned int i, j; 1531 int ret; 1532 1533 memset(edid->reserved, 0, sizeof(edid->reserved)); 1534 if (edid->pad >= dev->num_inputs) 1535 return -EINVAL; 1536 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1537 return -EINVAL; 1538 if (edid->blocks == 0) { 1539 dev->edid_blocks = 0; 1540 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1541 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1542 phys_addr = CEC_PHYS_ADDR_INVALID; 1543 goto set_phys_addr; 1544 } 1545 if (edid->blocks > dev->edid_max_blocks) { 1546 edid->blocks = dev->edid_max_blocks; 1547 return -E2BIG; 1548 } 1549 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1550 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1551 if (ret) 1552 return ret; 1553 1554 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1555 return -EBUSY; 1556 1557 dev->edid_blocks = edid->blocks; 1558 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1559 1560 for (i = 0, j = 0; i < dev->num_outputs; i++) 1561 if (dev->output_type[i] == HDMI) 1562 display_present |= 1563 dev->display_present[i] << j++; 1564 1565 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1566 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1567 1568 set_phys_addr: 1569 /* TODO: a proper hotplug detect cycle should be emulated here */ 1570 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1571 1572 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1573 cec_s_phys_addr(dev->cec_tx_adap[i], 1574 dev->display_present[i] ? 1575 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1576 CEC_PHYS_ADDR_INVALID, 1577 false); 1578 return 0; 1579 } 1580 1581 int vidioc_enum_framesizes(struct file *file, void *fh, 1582 struct v4l2_frmsizeenum *fsize) 1583 { 1584 struct vivid_dev *dev = video_drvdata(file); 1585 1586 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1587 return -EINVAL; 1588 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1589 return -EINVAL; 1590 if (vivid_is_webcam(dev)) { 1591 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1592 return -EINVAL; 1593 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1594 fsize->discrete = webcam_sizes[fsize->index]; 1595 return 0; 1596 } 1597 if (fsize->index) 1598 return -EINVAL; 1599 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1600 fsize->stepwise.min_width = MIN_WIDTH; 1601 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1602 fsize->stepwise.step_width = 2; 1603 fsize->stepwise.min_height = MIN_HEIGHT; 1604 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1605 fsize->stepwise.step_height = 2; 1606 return 0; 1607 } 1608 1609 /* timeperframe is arbitrary and continuous */ 1610 int vidioc_enum_frameintervals(struct file *file, void *priv, 1611 struct v4l2_frmivalenum *fival) 1612 { 1613 struct vivid_dev *dev = video_drvdata(file); 1614 const struct vivid_fmt *fmt; 1615 int i; 1616 1617 fmt = vivid_get_format(dev, fival->pixel_format); 1618 if (!fmt) 1619 return -EINVAL; 1620 1621 if (!vivid_is_webcam(dev)) { 1622 if (fival->index) 1623 return -EINVAL; 1624 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1625 return -EINVAL; 1626 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1627 return -EINVAL; 1628 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1629 fival->discrete = dev->timeperframe_vid_cap; 1630 return 0; 1631 } 1632 1633 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1634 if (fival->width == webcam_sizes[i].width && 1635 fival->height == webcam_sizes[i].height) 1636 break; 1637 if (i == ARRAY_SIZE(webcam_sizes)) 1638 return -EINVAL; 1639 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i)) 1640 return -EINVAL; 1641 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1642 fival->discrete = webcam_intervals[fival->index]; 1643 return 0; 1644 } 1645 1646 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1647 struct v4l2_streamparm *parm) 1648 { 1649 struct vivid_dev *dev = video_drvdata(file); 1650 1651 if (parm->type != (dev->multiplanar ? 1652 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1653 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1654 return -EINVAL; 1655 1656 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1657 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1658 parm->parm.capture.readbuffers = 1; 1659 return 0; 1660 } 1661 1662 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1663 struct v4l2_streamparm *parm) 1664 { 1665 struct vivid_dev *dev = video_drvdata(file); 1666 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx); 1667 struct v4l2_fract tpf; 1668 unsigned i; 1669 1670 if (parm->type != (dev->multiplanar ? 1671 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1672 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1673 return -EINVAL; 1674 if (!vivid_is_webcam(dev)) 1675 return vivid_vid_cap_g_parm(file, priv, parm); 1676 1677 tpf = parm->parm.capture.timeperframe; 1678 1679 if (tpf.denominator == 0) 1680 tpf = webcam_intervals[ival_sz - 1]; 1681 for (i = 0; i < ival_sz; i++) 1682 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1683 break; 1684 if (i == ival_sz) 1685 i = ival_sz - 1; 1686 dev->webcam_ival_idx = i; 1687 tpf = webcam_intervals[dev->webcam_ival_idx]; 1688 1689 /* resync the thread's timings */ 1690 dev->cap_seq_resync = true; 1691 dev->timeperframe_vid_cap = tpf; 1692 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1693 parm->parm.capture.timeperframe = tpf; 1694 parm->parm.capture.readbuffers = 1; 1695 return 0; 1696 } 1697