1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 static const struct vivid_fmt formats_ovl[] = { 25 { 26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 27 .vdownsampling = { 1 }, 28 .bit_depth = { 16 }, 29 .planes = 1, 30 .buffers = 1, 31 }, 32 { 33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */ 34 .vdownsampling = { 1 }, 35 .bit_depth = { 16 }, 36 .planes = 1, 37 .buffers = 1, 38 }, 39 { 40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */ 41 .vdownsampling = { 1 }, 42 .bit_depth = { 16 }, 43 .planes = 1, 44 .buffers = 1, 45 }, 46 }; 47 48 /* The number of discrete webcam framesizes */ 49 #define VIVID_WEBCAM_SIZES 6 50 /* The number of discrete webcam frameintervals */ 51 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2) 52 53 /* Sizes must be in increasing order */ 54 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = { 55 { 320, 180 }, 56 { 640, 360 }, 57 { 640, 480 }, 58 { 1280, 720 }, 59 { 1920, 1080 }, 60 { 3840, 2160 }, 61 }; 62 63 /* 64 * Intervals must be in increasing order and there must be twice as many 65 * elements in this array as there are in webcam_sizes. 66 */ 67 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = { 68 { 1, 1 }, 69 { 1, 2 }, 70 { 1, 4 }, 71 { 1, 5 }, 72 { 1, 10 }, 73 { 2, 25 }, 74 { 1, 15 }, 75 { 1, 25 }, 76 { 1, 30 }, 77 { 1, 40 }, 78 { 1, 50 }, 79 { 1, 60 }, 80 }; 81 82 static int vid_cap_queue_setup(struct vb2_queue *vq, 83 unsigned *nbuffers, unsigned *nplanes, 84 unsigned sizes[], struct device *alloc_devs[]) 85 { 86 struct vivid_dev *dev = vb2_get_drv_priv(vq); 87 unsigned buffers = tpg_g_buffers(&dev->tpg); 88 unsigned h = dev->fmt_cap_rect.height; 89 unsigned p; 90 91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 92 /* 93 * You cannot use read() with FIELD_ALTERNATE since the field 94 * information (TOP/BOTTOM) cannot be passed back to the user. 95 */ 96 if (vb2_fileio_is_active(vq)) 97 return -EINVAL; 98 } 99 100 if (dev->queue_setup_error) { 101 /* 102 * Error injection: test what happens if queue_setup() returns 103 * an error. 104 */ 105 dev->queue_setup_error = false; 106 return -EINVAL; 107 } 108 if (*nplanes) { 109 /* 110 * Check if the number of requested planes match 111 * the number of buffers in the current format. You can't mix that. 112 */ 113 if (*nplanes != buffers) 114 return -EINVAL; 115 for (p = 0; p < buffers; p++) { 116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 117 dev->fmt_cap->data_offset[p]) 118 return -EINVAL; 119 } 120 } else { 121 for (p = 0; p < buffers; p++) 122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 123 dev->fmt_cap->vdownsampling[p] + 124 dev->fmt_cap->data_offset[p]; 125 } 126 127 if (vq->num_buffers + *nbuffers < 2) 128 *nbuffers = 2 - vq->num_buffers; 129 130 *nplanes = buffers; 131 132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 133 for (p = 0; p < buffers; p++) 134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 135 136 return 0; 137 } 138 139 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 140 { 141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 142 unsigned long size; 143 unsigned buffers = tpg_g_buffers(&dev->tpg); 144 unsigned p; 145 146 dprintk(dev, 1, "%s\n", __func__); 147 148 if (WARN_ON(NULL == dev->fmt_cap)) 149 return -EINVAL; 150 151 if (dev->buf_prepare_error) { 152 /* 153 * Error injection: test what happens if buf_prepare() returns 154 * an error. 155 */ 156 dev->buf_prepare_error = false; 157 return -EINVAL; 158 } 159 for (p = 0; p < buffers; p++) { 160 size = (tpg_g_line_width(&dev->tpg, p) * 161 dev->fmt_cap_rect.height) / 162 dev->fmt_cap->vdownsampling[p] + 163 dev->fmt_cap->data_offset[p]; 164 165 if (vb2_plane_size(vb, p) < size) { 166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 167 __func__, p, vb2_plane_size(vb, p), size); 168 return -EINVAL; 169 } 170 171 vb2_set_plane_payload(vb, p, size); 172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 173 } 174 175 return 0; 176 } 177 178 static void vid_cap_buf_finish(struct vb2_buffer *vb) 179 { 180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 182 struct v4l2_timecode *tc = &vbuf->timecode; 183 unsigned fps = 25; 184 unsigned seq = vbuf->sequence; 185 186 if (!vivid_is_sdtv_cap(dev)) 187 return; 188 189 /* 190 * Set the timecode. Rarely used, so it is interesting to 191 * test this. 192 */ 193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 194 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 195 fps = 30; 196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 197 tc->flags = 0; 198 tc->frames = seq % fps; 199 tc->seconds = (seq / fps) % 60; 200 tc->minutes = (seq / (60 * fps)) % 60; 201 tc->hours = (seq / (60 * 60 * fps)) % 24; 202 } 203 204 static void vid_cap_buf_queue(struct vb2_buffer *vb) 205 { 206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 209 210 dprintk(dev, 1, "%s\n", __func__); 211 212 spin_lock(&dev->slock); 213 list_add_tail(&buf->list, &dev->vid_cap_active); 214 spin_unlock(&dev->slock); 215 } 216 217 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 218 { 219 struct vivid_dev *dev = vb2_get_drv_priv(vq); 220 unsigned i; 221 int err; 222 223 if (vb2_is_streaming(&dev->vb_vid_out_q)) 224 dev->can_loop_video = vivid_vid_can_loop(dev); 225 226 dev->vid_cap_seq_count = 0; 227 dprintk(dev, 1, "%s\n", __func__); 228 for (i = 0; i < VIDEO_MAX_FRAME; i++) 229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 230 if (dev->start_streaming_error) { 231 dev->start_streaming_error = false; 232 err = -EINVAL; 233 } else { 234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 235 } 236 if (err) { 237 struct vivid_buffer *buf, *tmp; 238 239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 240 list_del(&buf->list); 241 vb2_buffer_done(&buf->vb.vb2_buf, 242 VB2_BUF_STATE_QUEUED); 243 } 244 } 245 return err; 246 } 247 248 /* abort streaming and wait for last buffer */ 249 static void vid_cap_stop_streaming(struct vb2_queue *vq) 250 { 251 struct vivid_dev *dev = vb2_get_drv_priv(vq); 252 253 dprintk(dev, 1, "%s\n", __func__); 254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 255 dev->can_loop_video = false; 256 } 257 258 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 259 { 260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 261 262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 263 } 264 265 const struct vb2_ops vivid_vid_cap_qops = { 266 .queue_setup = vid_cap_queue_setup, 267 .buf_prepare = vid_cap_buf_prepare, 268 .buf_finish = vid_cap_buf_finish, 269 .buf_queue = vid_cap_buf_queue, 270 .start_streaming = vid_cap_start_streaming, 271 .stop_streaming = vid_cap_stop_streaming, 272 .buf_request_complete = vid_cap_buf_request_complete, 273 .wait_prepare = vb2_ops_wait_prepare, 274 .wait_finish = vb2_ops_wait_finish, 275 }; 276 277 /* 278 * Determine the 'picture' quality based on the current TV frequency: either 279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 280 * signal or NOISE for no signal. 281 */ 282 void vivid_update_quality(struct vivid_dev *dev) 283 { 284 unsigned freq_modulus; 285 286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 287 /* 288 * The 'noise' will only be replaced by the actual video 289 * if the output video matches the input video settings. 290 */ 291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 292 return; 293 } 294 if (vivid_is_hdmi_cap(dev) && 295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 297 return; 298 } 299 if (vivid_is_sdtv_cap(dev) && 300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 302 return; 303 } 304 if (!vivid_is_tv_cap(dev)) { 305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 306 return; 307 } 308 309 /* 310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 311 * From +/- 0.25 MHz around the channel there is color, and from 312 * +/- 1 MHz there is grayscale (chroma is lost). 313 * Everywhere else it is just noise. 314 */ 315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 316 if (freq_modulus > 2 * 16) { 317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 319 return; 320 } 321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 323 else 324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 325 } 326 327 /* 328 * Get the current picture quality and the associated afc value. 329 */ 330 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 331 { 332 unsigned freq_modulus; 333 334 if (afc) 335 *afc = 0; 336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 338 return tpg_g_quality(&dev->tpg); 339 340 /* 341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 342 * From +/- 0.25 MHz around the channel there is color, and from 343 * +/- 1 MHz there is grayscale (chroma is lost). 344 * Everywhere else it is just gray. 345 */ 346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 347 if (afc) 348 *afc = freq_modulus - 1 * 16; 349 return TPG_QUAL_GRAY; 350 } 351 352 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 353 { 354 if (vivid_is_sdtv_cap(dev)) 355 return dev->std_aspect_ratio[dev->input]; 356 357 if (vivid_is_hdmi_cap(dev)) 358 return dev->dv_timings_aspect_ratio[dev->input]; 359 360 return TPG_VIDEO_ASPECT_IMAGE; 361 } 362 363 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 364 { 365 if (vivid_is_sdtv_cap(dev)) 366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 368 369 if (vivid_is_hdmi_cap(dev) && 370 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 371 return dev->src_rect.height == 480 ? 372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 373 374 return TPG_PIXEL_ASPECT_SQUARE; 375 } 376 377 /* 378 * Called whenever the format has to be reset which can occur when 379 * changing inputs, standard, timings, etc. 380 */ 381 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 382 { 383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 384 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 385 unsigned size; 386 u64 pixelclock; 387 388 switch (dev->input_type[dev->input]) { 389 case WEBCAM: 390 default: 391 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 392 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 393 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 394 dev->field_cap = V4L2_FIELD_NONE; 395 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 396 break; 397 case TV: 398 case SVID: 399 dev->field_cap = dev->tv_field_cap; 400 dev->src_rect.width = 720; 401 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 402 dev->src_rect.height = 480; 403 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 404 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 405 } else { 406 dev->src_rect.height = 576; 407 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 408 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 409 } 410 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 411 break; 412 case HDMI: 413 dev->src_rect.width = bt->width; 414 dev->src_rect.height = bt->height; 415 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 416 if (dev->reduced_fps && can_reduce_fps(bt)) { 417 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 418 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 419 } else { 420 pixelclock = bt->pixelclock; 421 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 422 } 423 dev->timeperframe_vid_cap = (struct v4l2_fract) { 424 size / 100, (u32)pixelclock / 100 425 }; 426 if (bt->interlaced) 427 dev->field_cap = V4L2_FIELD_ALTERNATE; 428 else 429 dev->field_cap = V4L2_FIELD_NONE; 430 431 /* 432 * We can be called from within s_ctrl, in that case we can't 433 * set/get controls. Luckily we don't need to in that case. 434 */ 435 if (keep_controls || !dev->colorspace) 436 break; 437 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 438 if (bt->width == 720 && bt->height <= 576) 439 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 440 else 441 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 442 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 443 } else { 444 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 445 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 446 } 447 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 448 break; 449 } 450 vfree(dev->bitmap_cap); 451 dev->bitmap_cap = NULL; 452 vivid_update_quality(dev); 453 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 454 dev->crop_cap = dev->src_rect; 455 dev->crop_bounds_cap = dev->src_rect; 456 if (dev->bitmap_cap && 457 (dev->compose_cap.width != dev->crop_cap.width || 458 dev->compose_cap.height != dev->crop_cap.height)) { 459 vfree(dev->bitmap_cap); 460 dev->bitmap_cap = NULL; 461 } 462 dev->compose_cap = dev->crop_cap; 463 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 464 dev->compose_cap.height /= 2; 465 dev->fmt_cap_rect = dev->compose_cap; 466 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 467 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 468 tpg_update_mv_step(&dev->tpg); 469 470 /* 471 * We can be called from within s_ctrl, in that case we can't 472 * modify controls. Luckily we don't need to in that case. 473 */ 474 if (keep_controls) 475 return; 476 477 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 478 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 479 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 480 } 481 482 /* Map the field to something that is valid for the current input */ 483 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 484 { 485 if (vivid_is_sdtv_cap(dev)) { 486 switch (field) { 487 case V4L2_FIELD_INTERLACED_TB: 488 case V4L2_FIELD_INTERLACED_BT: 489 case V4L2_FIELD_SEQ_TB: 490 case V4L2_FIELD_SEQ_BT: 491 case V4L2_FIELD_TOP: 492 case V4L2_FIELD_BOTTOM: 493 case V4L2_FIELD_ALTERNATE: 494 return field; 495 case V4L2_FIELD_INTERLACED: 496 default: 497 return V4L2_FIELD_INTERLACED; 498 } 499 } 500 if (vivid_is_hdmi_cap(dev)) 501 return dev->dv_timings_cap[dev->input].bt.interlaced ? 502 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 503 return V4L2_FIELD_NONE; 504 } 505 506 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 507 { 508 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 509 return tpg_g_colorspace(&dev->tpg); 510 return dev->colorspace_out; 511 } 512 513 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 514 { 515 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 516 return tpg_g_xfer_func(&dev->tpg); 517 return dev->xfer_func_out; 518 } 519 520 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 521 { 522 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 523 return tpg_g_ycbcr_enc(&dev->tpg); 524 return dev->ycbcr_enc_out; 525 } 526 527 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 528 { 529 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 530 return tpg_g_hsv_enc(&dev->tpg); 531 return dev->hsv_enc_out; 532 } 533 534 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 535 { 536 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 537 return tpg_g_quantization(&dev->tpg); 538 return dev->quantization_out; 539 } 540 541 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 542 struct v4l2_format *f) 543 { 544 struct vivid_dev *dev = video_drvdata(file); 545 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 546 unsigned p; 547 548 mp->width = dev->fmt_cap_rect.width; 549 mp->height = dev->fmt_cap_rect.height; 550 mp->field = dev->field_cap; 551 mp->pixelformat = dev->fmt_cap->fourcc; 552 mp->colorspace = vivid_colorspace_cap(dev); 553 mp->xfer_func = vivid_xfer_func_cap(dev); 554 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 555 mp->hsv_enc = vivid_hsv_enc_cap(dev); 556 else 557 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 558 mp->quantization = vivid_quantization_cap(dev); 559 mp->num_planes = dev->fmt_cap->buffers; 560 for (p = 0; p < mp->num_planes; p++) { 561 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 562 mp->plane_fmt[p].sizeimage = 563 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 564 dev->fmt_cap->vdownsampling[p] + 565 dev->fmt_cap->data_offset[p]; 566 } 567 return 0; 568 } 569 570 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 571 struct v4l2_format *f) 572 { 573 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 574 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 575 struct vivid_dev *dev = video_drvdata(file); 576 const struct vivid_fmt *fmt; 577 unsigned bytesperline, max_bpl; 578 unsigned factor = 1; 579 unsigned w, h; 580 unsigned p; 581 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 582 583 fmt = vivid_get_format(dev, mp->pixelformat); 584 if (!fmt) { 585 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 586 mp->pixelformat); 587 mp->pixelformat = V4L2_PIX_FMT_YUYV; 588 fmt = vivid_get_format(dev, mp->pixelformat); 589 } 590 591 mp->field = vivid_field_cap(dev, mp->field); 592 if (vivid_is_webcam(dev)) { 593 const struct v4l2_frmsize_discrete *sz = 594 v4l2_find_nearest_size(webcam_sizes, 595 VIVID_WEBCAM_SIZES, width, 596 height, mp->width, mp->height); 597 598 w = sz->width; 599 h = sz->height; 600 } else if (vivid_is_sdtv_cap(dev)) { 601 w = 720; 602 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 603 } else { 604 w = dev->src_rect.width; 605 h = dev->src_rect.height; 606 } 607 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 608 factor = 2; 609 if (vivid_is_webcam(dev) || 610 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 611 mp->width = w; 612 mp->height = h / factor; 613 } else { 614 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 615 616 v4l2_rect_set_min_size(&r, &vivid_min_rect); 617 v4l2_rect_set_max_size(&r, &vivid_max_rect); 618 if (dev->has_scaler_cap && !dev->has_compose_cap) { 619 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 620 621 v4l2_rect_set_max_size(&r, &max_r); 622 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 623 v4l2_rect_set_max_size(&r, &dev->src_rect); 624 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 625 v4l2_rect_set_min_size(&r, &dev->src_rect); 626 } 627 mp->width = r.width; 628 mp->height = r.height / factor; 629 } 630 631 /* This driver supports custom bytesperline values */ 632 633 mp->num_planes = fmt->buffers; 634 for (p = 0; p < fmt->buffers; p++) { 635 /* Calculate the minimum supported bytesperline value */ 636 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 637 /* Calculate the maximum supported bytesperline value */ 638 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 639 640 if (pfmt[p].bytesperline > max_bpl) 641 pfmt[p].bytesperline = max_bpl; 642 if (pfmt[p].bytesperline < bytesperline) 643 pfmt[p].bytesperline = bytesperline; 644 645 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 646 fmt->vdownsampling[p] + fmt->data_offset[p]; 647 648 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 649 } 650 for (p = fmt->buffers; p < fmt->planes; p++) 651 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 652 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 653 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 654 655 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 656 mp->colorspace = vivid_colorspace_cap(dev); 657 658 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 659 mp->xfer_func = vivid_xfer_func_cap(dev); 660 661 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 662 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 663 mp->hsv_enc = vivid_hsv_enc_cap(dev); 664 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 665 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 666 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 667 } else { 668 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 669 } 670 671 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 672 fmt->color_enc == TGP_COLOR_ENC_RGB) { 673 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 674 mp->quantization = vivid_quantization_cap(dev); 675 } else { 676 mp->quantization = vivid_quantization_cap(dev); 677 } 678 679 memset(mp->reserved, 0, sizeof(mp->reserved)); 680 return 0; 681 } 682 683 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 684 struct v4l2_format *f) 685 { 686 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 687 struct vivid_dev *dev = video_drvdata(file); 688 struct v4l2_rect *crop = &dev->crop_cap; 689 struct v4l2_rect *compose = &dev->compose_cap; 690 struct vb2_queue *q = &dev->vb_vid_cap_q; 691 int ret = vivid_try_fmt_vid_cap(file, priv, f); 692 unsigned factor = 1; 693 unsigned p; 694 unsigned i; 695 696 if (ret < 0) 697 return ret; 698 699 if (vb2_is_busy(q)) { 700 dprintk(dev, 1, "%s device busy\n", __func__); 701 return -EBUSY; 702 } 703 704 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) { 705 dprintk(dev, 1, "overlay is active, can't change pixelformat\n"); 706 return -EBUSY; 707 } 708 709 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 710 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 711 factor = 2; 712 713 /* Note: the webcam input doesn't support scaling, cropping or composing */ 714 715 if (!vivid_is_webcam(dev) && 716 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 717 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 718 719 if (dev->has_scaler_cap) { 720 if (dev->has_compose_cap) 721 v4l2_rect_map_inside(compose, &r); 722 else 723 *compose = r; 724 if (dev->has_crop_cap && !dev->has_compose_cap) { 725 struct v4l2_rect min_r = { 726 0, 0, 727 r.width / MAX_ZOOM, 728 factor * r.height / MAX_ZOOM 729 }; 730 struct v4l2_rect max_r = { 731 0, 0, 732 r.width * MAX_ZOOM, 733 factor * r.height * MAX_ZOOM 734 }; 735 736 v4l2_rect_set_min_size(crop, &min_r); 737 v4l2_rect_set_max_size(crop, &max_r); 738 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 739 } else if (dev->has_crop_cap) { 740 struct v4l2_rect min_r = { 741 0, 0, 742 compose->width / MAX_ZOOM, 743 factor * compose->height / MAX_ZOOM 744 }; 745 struct v4l2_rect max_r = { 746 0, 0, 747 compose->width * MAX_ZOOM, 748 factor * compose->height * MAX_ZOOM 749 }; 750 751 v4l2_rect_set_min_size(crop, &min_r); 752 v4l2_rect_set_max_size(crop, &max_r); 753 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 754 } 755 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 756 r.height *= factor; 757 v4l2_rect_set_size_to(crop, &r); 758 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 759 r = *crop; 760 r.height /= factor; 761 v4l2_rect_set_size_to(compose, &r); 762 } else if (!dev->has_crop_cap) { 763 v4l2_rect_map_inside(compose, &r); 764 } else { 765 r.height *= factor; 766 v4l2_rect_set_max_size(crop, &r); 767 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 768 compose->top *= factor; 769 compose->height *= factor; 770 v4l2_rect_set_size_to(compose, crop); 771 v4l2_rect_map_inside(compose, &r); 772 compose->top /= factor; 773 compose->height /= factor; 774 } 775 } else if (vivid_is_webcam(dev)) { 776 /* Guaranteed to be a match */ 777 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 778 if (webcam_sizes[i].width == mp->width && 779 webcam_sizes[i].height == mp->height) 780 break; 781 dev->webcam_size_idx = i; 782 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i)) 783 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1; 784 vivid_update_format_cap(dev, false); 785 } else { 786 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 787 788 v4l2_rect_set_size_to(compose, &r); 789 r.height *= factor; 790 v4l2_rect_set_size_to(crop, &r); 791 } 792 793 dev->fmt_cap_rect.width = mp->width; 794 dev->fmt_cap_rect.height = mp->height; 795 tpg_s_buf_height(&dev->tpg, mp->height); 796 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 797 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 798 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 799 dev->field_cap = mp->field; 800 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 801 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 802 else 803 tpg_s_field(&dev->tpg, dev->field_cap, false); 804 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 805 if (vivid_is_sdtv_cap(dev)) 806 dev->tv_field_cap = mp->field; 807 tpg_update_mv_step(&dev->tpg); 808 dev->tpg.colorspace = mp->colorspace; 809 dev->tpg.xfer_func = mp->xfer_func; 810 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 811 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 812 else 813 dev->tpg.hsv_enc = mp->hsv_enc; 814 dev->tpg.quantization = mp->quantization; 815 816 return 0; 817 } 818 819 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 820 struct v4l2_format *f) 821 { 822 struct vivid_dev *dev = video_drvdata(file); 823 824 if (!dev->multiplanar) 825 return -ENOTTY; 826 return vivid_g_fmt_vid_cap(file, priv, f); 827 } 828 829 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 830 struct v4l2_format *f) 831 { 832 struct vivid_dev *dev = video_drvdata(file); 833 834 if (!dev->multiplanar) 835 return -ENOTTY; 836 return vivid_try_fmt_vid_cap(file, priv, f); 837 } 838 839 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 840 struct v4l2_format *f) 841 { 842 struct vivid_dev *dev = video_drvdata(file); 843 844 if (!dev->multiplanar) 845 return -ENOTTY; 846 return vivid_s_fmt_vid_cap(file, priv, f); 847 } 848 849 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 850 struct v4l2_format *f) 851 { 852 struct vivid_dev *dev = video_drvdata(file); 853 854 if (dev->multiplanar) 855 return -ENOTTY; 856 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 857 } 858 859 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 860 struct v4l2_format *f) 861 { 862 struct vivid_dev *dev = video_drvdata(file); 863 864 if (dev->multiplanar) 865 return -ENOTTY; 866 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 867 } 868 869 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 870 struct v4l2_format *f) 871 { 872 struct vivid_dev *dev = video_drvdata(file); 873 874 if (dev->multiplanar) 875 return -ENOTTY; 876 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 877 } 878 879 int vivid_vid_cap_g_selection(struct file *file, void *priv, 880 struct v4l2_selection *sel) 881 { 882 struct vivid_dev *dev = video_drvdata(file); 883 884 if (!dev->has_crop_cap && !dev->has_compose_cap) 885 return -ENOTTY; 886 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 887 return -EINVAL; 888 if (vivid_is_webcam(dev)) 889 return -ENODATA; 890 891 sel->r.left = sel->r.top = 0; 892 switch (sel->target) { 893 case V4L2_SEL_TGT_CROP: 894 if (!dev->has_crop_cap) 895 return -EINVAL; 896 sel->r = dev->crop_cap; 897 break; 898 case V4L2_SEL_TGT_CROP_DEFAULT: 899 case V4L2_SEL_TGT_CROP_BOUNDS: 900 if (!dev->has_crop_cap) 901 return -EINVAL; 902 sel->r = dev->src_rect; 903 break; 904 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 905 if (!dev->has_compose_cap) 906 return -EINVAL; 907 sel->r = vivid_max_rect; 908 break; 909 case V4L2_SEL_TGT_COMPOSE: 910 if (!dev->has_compose_cap) 911 return -EINVAL; 912 sel->r = dev->compose_cap; 913 break; 914 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 915 if (!dev->has_compose_cap) 916 return -EINVAL; 917 sel->r = dev->fmt_cap_rect; 918 break; 919 default: 920 return -EINVAL; 921 } 922 return 0; 923 } 924 925 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 926 { 927 struct vivid_dev *dev = video_drvdata(file); 928 struct v4l2_rect *crop = &dev->crop_cap; 929 struct v4l2_rect *compose = &dev->compose_cap; 930 unsigned orig_compose_w = compose->width; 931 unsigned orig_compose_h = compose->height; 932 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 933 int ret; 934 935 if (!dev->has_crop_cap && !dev->has_compose_cap) 936 return -ENOTTY; 937 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 938 return -EINVAL; 939 if (vivid_is_webcam(dev)) 940 return -ENODATA; 941 942 switch (s->target) { 943 case V4L2_SEL_TGT_CROP: 944 if (!dev->has_crop_cap) 945 return -EINVAL; 946 ret = vivid_vid_adjust_sel(s->flags, &s->r); 947 if (ret) 948 return ret; 949 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 950 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 951 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 952 s->r.top /= factor; 953 s->r.height /= factor; 954 if (dev->has_scaler_cap) { 955 struct v4l2_rect fmt = dev->fmt_cap_rect; 956 struct v4l2_rect max_rect = { 957 0, 0, 958 s->r.width * MAX_ZOOM, 959 s->r.height * MAX_ZOOM 960 }; 961 struct v4l2_rect min_rect = { 962 0, 0, 963 s->r.width / MAX_ZOOM, 964 s->r.height / MAX_ZOOM 965 }; 966 967 v4l2_rect_set_min_size(&fmt, &min_rect); 968 if (!dev->has_compose_cap) 969 v4l2_rect_set_max_size(&fmt, &max_rect); 970 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 971 vb2_is_busy(&dev->vb_vid_cap_q)) 972 return -EBUSY; 973 if (dev->has_compose_cap) { 974 v4l2_rect_set_min_size(compose, &min_rect); 975 v4l2_rect_set_max_size(compose, &max_rect); 976 v4l2_rect_map_inside(compose, &fmt); 977 } 978 dev->fmt_cap_rect = fmt; 979 tpg_s_buf_height(&dev->tpg, fmt.height); 980 } else if (dev->has_compose_cap) { 981 struct v4l2_rect fmt = dev->fmt_cap_rect; 982 983 v4l2_rect_set_min_size(&fmt, &s->r); 984 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 985 vb2_is_busy(&dev->vb_vid_cap_q)) 986 return -EBUSY; 987 dev->fmt_cap_rect = fmt; 988 tpg_s_buf_height(&dev->tpg, fmt.height); 989 v4l2_rect_set_size_to(compose, &s->r); 990 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 991 } else { 992 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 993 vb2_is_busy(&dev->vb_vid_cap_q)) 994 return -EBUSY; 995 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 996 v4l2_rect_set_size_to(compose, &s->r); 997 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 998 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 999 } 1000 s->r.top *= factor; 1001 s->r.height *= factor; 1002 *crop = s->r; 1003 break; 1004 case V4L2_SEL_TGT_COMPOSE: 1005 if (!dev->has_compose_cap) 1006 return -EINVAL; 1007 ret = vivid_vid_adjust_sel(s->flags, &s->r); 1008 if (ret) 1009 return ret; 1010 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 1011 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 1012 if (dev->has_scaler_cap) { 1013 struct v4l2_rect max_rect = { 1014 0, 0, 1015 dev->src_rect.width * MAX_ZOOM, 1016 (dev->src_rect.height / factor) * MAX_ZOOM 1017 }; 1018 1019 v4l2_rect_set_max_size(&s->r, &max_rect); 1020 if (dev->has_crop_cap) { 1021 struct v4l2_rect min_rect = { 1022 0, 0, 1023 s->r.width / MAX_ZOOM, 1024 (s->r.height * factor) / MAX_ZOOM 1025 }; 1026 struct v4l2_rect max_rect = { 1027 0, 0, 1028 s->r.width * MAX_ZOOM, 1029 (s->r.height * factor) * MAX_ZOOM 1030 }; 1031 1032 v4l2_rect_set_min_size(crop, &min_rect); 1033 v4l2_rect_set_max_size(crop, &max_rect); 1034 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1035 } 1036 } else if (dev->has_crop_cap) { 1037 s->r.top *= factor; 1038 s->r.height *= factor; 1039 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1040 v4l2_rect_set_size_to(crop, &s->r); 1041 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1042 s->r.top /= factor; 1043 s->r.height /= factor; 1044 } else { 1045 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1046 s->r.height /= factor; 1047 } 1048 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1049 *compose = s->r; 1050 break; 1051 default: 1052 return -EINVAL; 1053 } 1054 1055 if (dev->bitmap_cap && (compose->width != orig_compose_w || 1056 compose->height != orig_compose_h)) { 1057 vfree(dev->bitmap_cap); 1058 dev->bitmap_cap = NULL; 1059 } 1060 tpg_s_crop_compose(&dev->tpg, crop, compose); 1061 return 0; 1062 } 1063 1064 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1065 int type, struct v4l2_fract *f) 1066 { 1067 struct vivid_dev *dev = video_drvdata(file); 1068 1069 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1070 return -EINVAL; 1071 1072 switch (vivid_get_pixel_aspect(dev)) { 1073 case TPG_PIXEL_ASPECT_NTSC: 1074 f->numerator = 11; 1075 f->denominator = 10; 1076 break; 1077 case TPG_PIXEL_ASPECT_PAL: 1078 f->numerator = 54; 1079 f->denominator = 59; 1080 break; 1081 default: 1082 break; 1083 } 1084 return 0; 1085 } 1086 1087 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, 1088 struct v4l2_fmtdesc *f) 1089 { 1090 struct vivid_dev *dev = video_drvdata(file); 1091 const struct vivid_fmt *fmt; 1092 1093 if (dev->multiplanar) 1094 return -ENOTTY; 1095 1096 if (f->index >= ARRAY_SIZE(formats_ovl)) 1097 return -EINVAL; 1098 1099 fmt = &formats_ovl[f->index]; 1100 1101 f->pixelformat = fmt->fourcc; 1102 return 0; 1103 } 1104 1105 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, 1106 struct v4l2_format *f) 1107 { 1108 struct vivid_dev *dev = video_drvdata(file); 1109 const struct v4l2_rect *compose = &dev->compose_cap; 1110 struct v4l2_window *win = &f->fmt.win; 1111 unsigned clipcount = win->clipcount; 1112 1113 if (dev->multiplanar) 1114 return -ENOTTY; 1115 1116 win->w.top = dev->overlay_cap_top; 1117 win->w.left = dev->overlay_cap_left; 1118 win->w.width = compose->width; 1119 win->w.height = compose->height; 1120 win->field = dev->overlay_cap_field; 1121 win->clipcount = dev->clipcount_cap; 1122 if (clipcount > dev->clipcount_cap) 1123 clipcount = dev->clipcount_cap; 1124 if (dev->bitmap_cap == NULL) 1125 win->bitmap = NULL; 1126 else if (win->bitmap) { 1127 if (copy_to_user(win->bitmap, dev->bitmap_cap, 1128 ((compose->width + 7) / 8) * compose->height)) 1129 return -EFAULT; 1130 } 1131 if (clipcount && win->clips) 1132 memcpy(win->clips, dev->clips_cap, 1133 clipcount * sizeof(dev->clips_cap[0])); 1134 return 0; 1135 } 1136 1137 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, 1138 struct v4l2_format *f) 1139 { 1140 struct vivid_dev *dev = video_drvdata(file); 1141 const struct v4l2_rect *compose = &dev->compose_cap; 1142 struct v4l2_window *win = &f->fmt.win; 1143 int i, j; 1144 1145 if (dev->multiplanar) 1146 return -ENOTTY; 1147 1148 win->w.left = clamp_t(int, win->w.left, 1149 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1150 win->w.top = clamp_t(int, win->w.top, 1151 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1152 win->w.width = compose->width; 1153 win->w.height = compose->height; 1154 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP) 1155 win->field = V4L2_FIELD_ANY; 1156 win->chromakey = 0; 1157 win->global_alpha = 0; 1158 if (win->clipcount && !win->clips) 1159 win->clipcount = 0; 1160 if (win->clipcount > MAX_CLIPS) 1161 win->clipcount = MAX_CLIPS; 1162 if (win->clipcount) { 1163 memcpy(dev->try_clips_cap, win->clips, 1164 win->clipcount * sizeof(dev->clips_cap[0])); 1165 for (i = 0; i < win->clipcount; i++) { 1166 struct v4l2_rect *r = &dev->try_clips_cap[i].c; 1167 1168 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1); 1169 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top); 1170 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1); 1171 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left); 1172 } 1173 /* 1174 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small 1175 * number and it's typically a one-time deal. 1176 */ 1177 for (i = 0; i < win->clipcount - 1; i++) { 1178 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c; 1179 1180 for (j = i + 1; j < win->clipcount; j++) { 1181 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c; 1182 1183 if (v4l2_rect_overlap(r1, r2)) 1184 return -EINVAL; 1185 } 1186 } 1187 memcpy(win->clips, dev->try_clips_cap, 1188 win->clipcount * sizeof(dev->clips_cap[0])); 1189 } 1190 return 0; 1191 } 1192 1193 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, 1194 struct v4l2_format *f) 1195 { 1196 struct vivid_dev *dev = video_drvdata(file); 1197 const struct v4l2_rect *compose = &dev->compose_cap; 1198 struct v4l2_window *win = &f->fmt.win; 1199 int ret = vidioc_try_fmt_vid_overlay(file, priv, f); 1200 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height; 1201 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]); 1202 void *new_bitmap = NULL; 1203 1204 if (ret) 1205 return ret; 1206 1207 if (win->bitmap) { 1208 new_bitmap = vzalloc(bitmap_size); 1209 1210 if (new_bitmap == NULL) 1211 return -ENOMEM; 1212 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) { 1213 vfree(new_bitmap); 1214 return -EFAULT; 1215 } 1216 } 1217 1218 dev->overlay_cap_top = win->w.top; 1219 dev->overlay_cap_left = win->w.left; 1220 dev->overlay_cap_field = win->field; 1221 vfree(dev->bitmap_cap); 1222 dev->bitmap_cap = new_bitmap; 1223 dev->clipcount_cap = win->clipcount; 1224 if (dev->clipcount_cap) 1225 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size); 1226 return 0; 1227 } 1228 1229 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i) 1230 { 1231 struct vivid_dev *dev = video_drvdata(file); 1232 1233 if (dev->multiplanar) 1234 return -ENOTTY; 1235 1236 if (i && dev->fb_vbase_cap == NULL) 1237 return -EINVAL; 1238 1239 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) { 1240 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n"); 1241 return -EINVAL; 1242 } 1243 1244 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh) 1245 return -EBUSY; 1246 dev->overlay_cap_owner = i ? fh : NULL; 1247 return 0; 1248 } 1249 1250 int vivid_vid_cap_g_fbuf(struct file *file, void *fh, 1251 struct v4l2_framebuffer *a) 1252 { 1253 struct vivid_dev *dev = video_drvdata(file); 1254 1255 if (dev->multiplanar) 1256 return -ENOTTY; 1257 1258 *a = dev->fb_cap; 1259 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING | 1260 V4L2_FBUF_CAP_LIST_CLIPPING; 1261 a->flags = V4L2_FBUF_FLAG_PRIMARY; 1262 a->fmt.field = V4L2_FIELD_NONE; 1263 a->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1264 a->fmt.priv = 0; 1265 return 0; 1266 } 1267 1268 int vivid_vid_cap_s_fbuf(struct file *file, void *fh, 1269 const struct v4l2_framebuffer *a) 1270 { 1271 struct vivid_dev *dev = video_drvdata(file); 1272 const struct vivid_fmt *fmt; 1273 1274 if (dev->multiplanar) 1275 return -ENOTTY; 1276 1277 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 1278 return -EPERM; 1279 1280 if (dev->overlay_cap_owner) 1281 return -EBUSY; 1282 1283 if (a->base == NULL) { 1284 dev->fb_cap.base = NULL; 1285 dev->fb_vbase_cap = NULL; 1286 return 0; 1287 } 1288 1289 if (a->fmt.width < 48 || a->fmt.height < 32) 1290 return -EINVAL; 1291 fmt = vivid_get_format(dev, a->fmt.pixelformat); 1292 if (!fmt || !fmt->can_do_overlay) 1293 return -EINVAL; 1294 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) 1295 return -EINVAL; 1296 if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) 1297 return -EINVAL; 1298 1299 /* 1300 * Only support the framebuffer of one of the vivid instances. 1301 * Anything else is rejected. 1302 */ 1303 if (!vivid_validate_fb(a)) 1304 return -EINVAL; 1305 1306 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); 1307 dev->fb_cap = *a; 1308 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left, 1309 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1310 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top, 1311 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1312 return 0; 1313 } 1314 1315 static const struct v4l2_audio vivid_audio_inputs[] = { 1316 { 0, "TV", V4L2_AUDCAP_STEREO }, 1317 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1318 }; 1319 1320 int vidioc_enum_input(struct file *file, void *priv, 1321 struct v4l2_input *inp) 1322 { 1323 struct vivid_dev *dev = video_drvdata(file); 1324 1325 if (inp->index >= dev->num_inputs) 1326 return -EINVAL; 1327 1328 inp->type = V4L2_INPUT_TYPE_CAMERA; 1329 switch (dev->input_type[inp->index]) { 1330 case WEBCAM: 1331 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1332 dev->input_name_counter[inp->index]); 1333 inp->capabilities = 0; 1334 break; 1335 case TV: 1336 snprintf(inp->name, sizeof(inp->name), "TV %u", 1337 dev->input_name_counter[inp->index]); 1338 inp->type = V4L2_INPUT_TYPE_TUNER; 1339 inp->std = V4L2_STD_ALL; 1340 if (dev->has_audio_inputs) 1341 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1342 inp->capabilities = V4L2_IN_CAP_STD; 1343 break; 1344 case SVID: 1345 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1346 dev->input_name_counter[inp->index]); 1347 inp->std = V4L2_STD_ALL; 1348 if (dev->has_audio_inputs) 1349 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1350 inp->capabilities = V4L2_IN_CAP_STD; 1351 break; 1352 case HDMI: 1353 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1354 dev->input_name_counter[inp->index]); 1355 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1356 if (dev->edid_blocks == 0 || 1357 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1358 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1359 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1360 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1361 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1362 break; 1363 } 1364 if (dev->sensor_hflip) 1365 inp->status |= V4L2_IN_ST_HFLIP; 1366 if (dev->sensor_vflip) 1367 inp->status |= V4L2_IN_ST_VFLIP; 1368 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1369 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1370 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1371 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1372 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1373 } else if (vivid_is_tv_cap(dev)) { 1374 switch (tpg_g_quality(&dev->tpg)) { 1375 case TPG_QUAL_GRAY: 1376 inp->status |= V4L2_IN_ST_COLOR_KILL; 1377 break; 1378 case TPG_QUAL_NOISE: 1379 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1380 break; 1381 default: 1382 break; 1383 } 1384 } 1385 } 1386 return 0; 1387 } 1388 1389 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1390 { 1391 struct vivid_dev *dev = video_drvdata(file); 1392 1393 *i = dev->input; 1394 return 0; 1395 } 1396 1397 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1398 { 1399 struct vivid_dev *dev = video_drvdata(file); 1400 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1401 unsigned brightness; 1402 1403 if (i >= dev->num_inputs) 1404 return -EINVAL; 1405 1406 if (i == dev->input) 1407 return 0; 1408 1409 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1410 vb2_is_busy(&dev->vb_vbi_cap_q) || 1411 vb2_is_busy(&dev->vb_meta_cap_q)) 1412 return -EBUSY; 1413 1414 dev->input = i; 1415 dev->vid_cap_dev.tvnorms = 0; 1416 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1417 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1418 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1419 } 1420 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1421 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1422 vivid_update_format_cap(dev, false); 1423 1424 if (dev->colorspace) { 1425 switch (dev->input_type[i]) { 1426 case WEBCAM: 1427 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1428 break; 1429 case TV: 1430 case SVID: 1431 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1432 break; 1433 case HDMI: 1434 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1435 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1436 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1437 else 1438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1439 } else { 1440 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1441 } 1442 break; 1443 } 1444 } 1445 1446 /* 1447 * Modify the brightness range depending on the input. 1448 * This makes it easy to use vivid to test if applications can 1449 * handle control range modifications and is also how this is 1450 * typically used in practice as different inputs may be hooked 1451 * up to different receivers with different control ranges. 1452 */ 1453 brightness = 128 * i + dev->input_brightness[i]; 1454 v4l2_ctrl_modify_range(dev->brightness, 1455 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1456 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1457 1458 /* Restore per-input states. */ 1459 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1460 vivid_is_hdmi_cap(dev)); 1461 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1462 dev->dv_timings_signal_mode[dev->input] == 1463 SELECTED_DV_TIMINGS); 1464 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1465 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1466 dev->std_signal_mode[dev->input]); 1467 1468 if (vivid_is_hdmi_cap(dev)) { 1469 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1470 dev->dv_timings_signal_mode[dev->input]); 1471 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1472 dev->query_dv_timings[dev->input]); 1473 } else if (vivid_is_sdtv_cap(dev)) { 1474 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1475 dev->std_signal_mode[dev->input]); 1476 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1477 dev->std_signal_mode[dev->input]); 1478 } 1479 1480 return 0; 1481 } 1482 1483 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1484 { 1485 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1486 return -EINVAL; 1487 *vin = vivid_audio_inputs[vin->index]; 1488 return 0; 1489 } 1490 1491 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1492 { 1493 struct vivid_dev *dev = video_drvdata(file); 1494 1495 if (!vivid_is_sdtv_cap(dev)) 1496 return -EINVAL; 1497 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1498 return 0; 1499 } 1500 1501 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1502 { 1503 struct vivid_dev *dev = video_drvdata(file); 1504 1505 if (!vivid_is_sdtv_cap(dev)) 1506 return -EINVAL; 1507 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1508 return -EINVAL; 1509 dev->tv_audio_input = vin->index; 1510 return 0; 1511 } 1512 1513 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1514 { 1515 struct vivid_dev *dev = video_drvdata(file); 1516 1517 if (vf->tuner != 0) 1518 return -EINVAL; 1519 vf->frequency = dev->tv_freq; 1520 return 0; 1521 } 1522 1523 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1524 { 1525 struct vivid_dev *dev = video_drvdata(file); 1526 1527 if (vf->tuner != 0) 1528 return -EINVAL; 1529 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1530 if (vivid_is_tv_cap(dev)) 1531 vivid_update_quality(dev); 1532 return 0; 1533 } 1534 1535 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1536 { 1537 struct vivid_dev *dev = video_drvdata(file); 1538 1539 if (vt->index != 0) 1540 return -EINVAL; 1541 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1542 return -EINVAL; 1543 dev->tv_audmode = vt->audmode; 1544 return 0; 1545 } 1546 1547 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1548 { 1549 struct vivid_dev *dev = video_drvdata(file); 1550 enum tpg_quality qual; 1551 1552 if (vt->index != 0) 1553 return -EINVAL; 1554 1555 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1556 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1557 vt->audmode = dev->tv_audmode; 1558 vt->rangelow = MIN_TV_FREQ; 1559 vt->rangehigh = MAX_TV_FREQ; 1560 qual = vivid_get_quality(dev, &vt->afc); 1561 if (qual == TPG_QUAL_COLOR) 1562 vt->signal = 0xffff; 1563 else if (qual == TPG_QUAL_GRAY) 1564 vt->signal = 0x8000; 1565 else 1566 vt->signal = 0; 1567 if (qual == TPG_QUAL_NOISE) { 1568 vt->rxsubchans = 0; 1569 } else if (qual == TPG_QUAL_GRAY) { 1570 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1571 } else { 1572 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1573 unsigned int options = 1574 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1575 1576 switch (channel_nr % options) { 1577 case 0: 1578 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1579 break; 1580 case 1: 1581 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1582 break; 1583 case 2: 1584 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1585 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1586 else 1587 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1588 break; 1589 case 3: 1590 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1591 break; 1592 } 1593 } 1594 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1595 return 0; 1596 } 1597 1598 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1599 const v4l2_std_id vivid_standard[] = { 1600 V4L2_STD_NTSC_M, 1601 V4L2_STD_NTSC_M_JP, 1602 V4L2_STD_NTSC_M_KR, 1603 V4L2_STD_NTSC_443, 1604 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1605 V4L2_STD_PAL_I, 1606 V4L2_STD_PAL_DK, 1607 V4L2_STD_PAL_M, 1608 V4L2_STD_PAL_N, 1609 V4L2_STD_PAL_Nc, 1610 V4L2_STD_PAL_60, 1611 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1612 V4L2_STD_SECAM_DK, 1613 V4L2_STD_SECAM_L, 1614 V4L2_STD_SECAM_LC, 1615 V4L2_STD_UNKNOWN 1616 }; 1617 1618 /* Must remain in sync with the vivid_standard array */ 1619 const char * const vivid_ctrl_standard_strings[] = { 1620 "NTSC-M", 1621 "NTSC-M-JP", 1622 "NTSC-M-KR", 1623 "NTSC-443", 1624 "PAL-BGH", 1625 "PAL-I", 1626 "PAL-DK", 1627 "PAL-M", 1628 "PAL-N", 1629 "PAL-Nc", 1630 "PAL-60", 1631 "SECAM-BGH", 1632 "SECAM-DK", 1633 "SECAM-L", 1634 "SECAM-Lc", 1635 NULL, 1636 }; 1637 1638 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1639 { 1640 struct vivid_dev *dev = video_drvdata(file); 1641 unsigned int last = dev->query_std_last[dev->input]; 1642 1643 if (!vivid_is_sdtv_cap(dev)) 1644 return -ENODATA; 1645 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1646 dev->std_signal_mode[dev->input] == NO_LOCK) { 1647 *id = V4L2_STD_UNKNOWN; 1648 return 0; 1649 } 1650 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1651 *id = V4L2_STD_UNKNOWN; 1652 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1653 *id = dev->std_cap[dev->input]; 1654 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1655 *id = dev->query_std[dev->input]; 1656 } else { 1657 *id = vivid_standard[last]; 1658 dev->query_std_last[dev->input] = 1659 (last + 1) % ARRAY_SIZE(vivid_standard); 1660 } 1661 1662 return 0; 1663 } 1664 1665 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1666 { 1667 struct vivid_dev *dev = video_drvdata(file); 1668 1669 if (!vivid_is_sdtv_cap(dev)) 1670 return -ENODATA; 1671 if (dev->std_cap[dev->input] == id) 1672 return 0; 1673 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1674 return -EBUSY; 1675 dev->std_cap[dev->input] = id; 1676 vivid_update_format_cap(dev, false); 1677 return 0; 1678 } 1679 1680 static void find_aspect_ratio(u32 width, u32 height, 1681 u32 *num, u32 *denom) 1682 { 1683 if (!(height % 3) && ((height * 4 / 3) == width)) { 1684 *num = 4; 1685 *denom = 3; 1686 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1687 *num = 16; 1688 *denom = 9; 1689 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1690 *num = 16; 1691 *denom = 10; 1692 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1693 *num = 5; 1694 *denom = 4; 1695 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1696 *num = 15; 1697 *denom = 9; 1698 } else { /* default to 16:9 */ 1699 *num = 16; 1700 *denom = 9; 1701 } 1702 } 1703 1704 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1705 { 1706 struct v4l2_bt_timings *bt = &timings->bt; 1707 u32 total_h_pixel; 1708 u32 total_v_lines; 1709 u32 h_freq; 1710 1711 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1712 NULL, NULL)) 1713 return false; 1714 1715 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1716 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1717 1718 h_freq = (u32)bt->pixelclock / total_h_pixel; 1719 1720 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1721 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1722 bt->polarities, bt->interlaced, timings)) 1723 return true; 1724 } 1725 1726 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1727 struct v4l2_fract aspect_ratio; 1728 1729 find_aspect_ratio(bt->width, bt->height, 1730 &aspect_ratio.numerator, 1731 &aspect_ratio.denominator); 1732 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1733 bt->polarities, bt->interlaced, 1734 aspect_ratio, timings)) 1735 return true; 1736 } 1737 return false; 1738 } 1739 1740 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1741 struct v4l2_dv_timings *timings) 1742 { 1743 struct vivid_dev *dev = video_drvdata(file); 1744 1745 if (!vivid_is_hdmi_cap(dev)) 1746 return -ENODATA; 1747 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1748 0, NULL, NULL) && 1749 !valid_cvt_gtf_timings(timings)) 1750 return -EINVAL; 1751 1752 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1753 0, false)) 1754 return 0; 1755 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1756 return -EBUSY; 1757 1758 dev->dv_timings_cap[dev->input] = *timings; 1759 vivid_update_format_cap(dev, false); 1760 return 0; 1761 } 1762 1763 int vidioc_query_dv_timings(struct file *file, void *_fh, 1764 struct v4l2_dv_timings *timings) 1765 { 1766 struct vivid_dev *dev = video_drvdata(file); 1767 unsigned int input = dev->input; 1768 unsigned int last = dev->query_dv_timings_last[input]; 1769 1770 if (!vivid_is_hdmi_cap(dev)) 1771 return -ENODATA; 1772 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1773 dev->edid_blocks == 0) 1774 return -ENOLINK; 1775 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1776 return -ENOLCK; 1777 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1778 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1779 return -ERANGE; 1780 } 1781 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1782 *timings = dev->dv_timings_cap[input]; 1783 } else if (dev->dv_timings_signal_mode[input] == 1784 SELECTED_DV_TIMINGS) { 1785 *timings = 1786 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1787 } else { 1788 *timings = 1789 v4l2_dv_timings_presets[last]; 1790 dev->query_dv_timings_last[input] = 1791 (last + 1) % dev->query_dv_timings_size; 1792 } 1793 return 0; 1794 } 1795 1796 int vidioc_s_edid(struct file *file, void *_fh, 1797 struct v4l2_edid *edid) 1798 { 1799 struct vivid_dev *dev = video_drvdata(file); 1800 u16 phys_addr; 1801 u32 display_present = 0; 1802 unsigned int i, j; 1803 int ret; 1804 1805 memset(edid->reserved, 0, sizeof(edid->reserved)); 1806 if (edid->pad >= dev->num_inputs) 1807 return -EINVAL; 1808 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1809 return -EINVAL; 1810 if (edid->blocks == 0) { 1811 dev->edid_blocks = 0; 1812 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1813 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1814 phys_addr = CEC_PHYS_ADDR_INVALID; 1815 goto set_phys_addr; 1816 } 1817 if (edid->blocks > dev->edid_max_blocks) { 1818 edid->blocks = dev->edid_max_blocks; 1819 return -E2BIG; 1820 } 1821 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1822 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1823 if (ret) 1824 return ret; 1825 1826 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1827 return -EBUSY; 1828 1829 dev->edid_blocks = edid->blocks; 1830 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1831 1832 for (i = 0, j = 0; i < dev->num_outputs; i++) 1833 if (dev->output_type[i] == HDMI) 1834 display_present |= 1835 dev->display_present[i] << j++; 1836 1837 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1838 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1839 1840 set_phys_addr: 1841 /* TODO: a proper hotplug detect cycle should be emulated here */ 1842 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1843 1844 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1845 cec_s_phys_addr(dev->cec_tx_adap[i], 1846 dev->display_present[i] ? 1847 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1848 CEC_PHYS_ADDR_INVALID, 1849 false); 1850 return 0; 1851 } 1852 1853 int vidioc_enum_framesizes(struct file *file, void *fh, 1854 struct v4l2_frmsizeenum *fsize) 1855 { 1856 struct vivid_dev *dev = video_drvdata(file); 1857 1858 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1859 return -EINVAL; 1860 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1861 return -EINVAL; 1862 if (vivid_is_webcam(dev)) { 1863 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1864 return -EINVAL; 1865 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1866 fsize->discrete = webcam_sizes[fsize->index]; 1867 return 0; 1868 } 1869 if (fsize->index) 1870 return -EINVAL; 1871 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1872 fsize->stepwise.min_width = MIN_WIDTH; 1873 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1874 fsize->stepwise.step_width = 2; 1875 fsize->stepwise.min_height = MIN_HEIGHT; 1876 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1877 fsize->stepwise.step_height = 2; 1878 return 0; 1879 } 1880 1881 /* timeperframe is arbitrary and continuous */ 1882 int vidioc_enum_frameintervals(struct file *file, void *priv, 1883 struct v4l2_frmivalenum *fival) 1884 { 1885 struct vivid_dev *dev = video_drvdata(file); 1886 const struct vivid_fmt *fmt; 1887 int i; 1888 1889 fmt = vivid_get_format(dev, fival->pixel_format); 1890 if (!fmt) 1891 return -EINVAL; 1892 1893 if (!vivid_is_webcam(dev)) { 1894 if (fival->index) 1895 return -EINVAL; 1896 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1897 return -EINVAL; 1898 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1899 return -EINVAL; 1900 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1901 fival->discrete = dev->timeperframe_vid_cap; 1902 return 0; 1903 } 1904 1905 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1906 if (fival->width == webcam_sizes[i].width && 1907 fival->height == webcam_sizes[i].height) 1908 break; 1909 if (i == ARRAY_SIZE(webcam_sizes)) 1910 return -EINVAL; 1911 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i)) 1912 return -EINVAL; 1913 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1914 fival->discrete = webcam_intervals[fival->index]; 1915 return 0; 1916 } 1917 1918 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1919 struct v4l2_streamparm *parm) 1920 { 1921 struct vivid_dev *dev = video_drvdata(file); 1922 1923 if (parm->type != (dev->multiplanar ? 1924 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1925 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1926 return -EINVAL; 1927 1928 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1929 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1930 parm->parm.capture.readbuffers = 1; 1931 return 0; 1932 } 1933 1934 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1935 struct v4l2_streamparm *parm) 1936 { 1937 struct vivid_dev *dev = video_drvdata(file); 1938 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx); 1939 struct v4l2_fract tpf; 1940 unsigned i; 1941 1942 if (parm->type != (dev->multiplanar ? 1943 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1944 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1945 return -EINVAL; 1946 if (!vivid_is_webcam(dev)) 1947 return vivid_vid_cap_g_parm(file, priv, parm); 1948 1949 tpf = parm->parm.capture.timeperframe; 1950 1951 if (tpf.denominator == 0) 1952 tpf = webcam_intervals[ival_sz - 1]; 1953 for (i = 0; i < ival_sz; i++) 1954 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1955 break; 1956 if (i == ival_sz) 1957 i = ival_sz - 1; 1958 dev->webcam_ival_idx = i; 1959 tpf = webcam_intervals[dev->webcam_ival_idx]; 1960 1961 /* resync the thread's timings */ 1962 dev->cap_seq_resync = true; 1963 dev->timeperframe_vid_cap = tpf; 1964 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1965 parm->parm.capture.timeperframe = tpf; 1966 parm->parm.capture.readbuffers = 1; 1967 return 0; 1968 } 1969