1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 static const struct vivid_fmt formats_ovl[] = { 25 { 26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 27 .vdownsampling = { 1 }, 28 .bit_depth = { 16 }, 29 .planes = 1, 30 .buffers = 1, 31 }, 32 { 33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */ 34 .vdownsampling = { 1 }, 35 .bit_depth = { 16 }, 36 .planes = 1, 37 .buffers = 1, 38 }, 39 { 40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */ 41 .vdownsampling = { 1 }, 42 .bit_depth = { 16 }, 43 .planes = 1, 44 .buffers = 1, 45 }, 46 }; 47 48 /* The number of discrete webcam framesizes */ 49 #define VIVID_WEBCAM_SIZES 6 50 /* The number of discrete webcam frameintervals */ 51 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2) 52 53 /* Sizes must be in increasing order */ 54 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = { 55 { 320, 180 }, 56 { 640, 360 }, 57 { 640, 480 }, 58 { 1280, 720 }, 59 { 1920, 1080 }, 60 { 3840, 2160 }, 61 }; 62 63 /* 64 * Intervals must be in increasing order and there must be twice as many 65 * elements in this array as there are in webcam_sizes. 66 */ 67 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = { 68 { 1, 1 }, 69 { 1, 2 }, 70 { 1, 4 }, 71 { 1, 5 }, 72 { 1, 10 }, 73 { 2, 25 }, 74 { 1, 15 }, 75 { 1, 25 }, 76 { 1, 30 }, 77 { 1, 40 }, 78 { 1, 50 }, 79 { 1, 60 }, 80 }; 81 82 static int vid_cap_queue_setup(struct vb2_queue *vq, 83 unsigned *nbuffers, unsigned *nplanes, 84 unsigned sizes[], struct device *alloc_devs[]) 85 { 86 struct vivid_dev *dev = vb2_get_drv_priv(vq); 87 unsigned buffers = tpg_g_buffers(&dev->tpg); 88 unsigned h = dev->fmt_cap_rect.height; 89 unsigned p; 90 91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 92 /* 93 * You cannot use read() with FIELD_ALTERNATE since the field 94 * information (TOP/BOTTOM) cannot be passed back to the user. 95 */ 96 if (vb2_fileio_is_active(vq)) 97 return -EINVAL; 98 } 99 100 if (dev->queue_setup_error) { 101 /* 102 * Error injection: test what happens if queue_setup() returns 103 * an error. 104 */ 105 dev->queue_setup_error = false; 106 return -EINVAL; 107 } 108 if (*nplanes) { 109 /* 110 * Check if the number of requested planes match 111 * the number of buffers in the current format. You can't mix that. 112 */ 113 if (*nplanes != buffers) 114 return -EINVAL; 115 for (p = 0; p < buffers; p++) { 116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 117 dev->fmt_cap->data_offset[p]) 118 return -EINVAL; 119 } 120 } else { 121 for (p = 0; p < buffers; p++) 122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 123 dev->fmt_cap->vdownsampling[p] + 124 dev->fmt_cap->data_offset[p]; 125 } 126 127 if (vq->num_buffers + *nbuffers < 2) 128 *nbuffers = 2 - vq->num_buffers; 129 130 *nplanes = buffers; 131 132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 133 for (p = 0; p < buffers; p++) 134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 135 136 return 0; 137 } 138 139 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 140 { 141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 142 unsigned long size; 143 unsigned buffers = tpg_g_buffers(&dev->tpg); 144 unsigned p; 145 146 dprintk(dev, 1, "%s\n", __func__); 147 148 if (WARN_ON(NULL == dev->fmt_cap)) 149 return -EINVAL; 150 151 if (dev->buf_prepare_error) { 152 /* 153 * Error injection: test what happens if buf_prepare() returns 154 * an error. 155 */ 156 dev->buf_prepare_error = false; 157 return -EINVAL; 158 } 159 for (p = 0; p < buffers; p++) { 160 size = (tpg_g_line_width(&dev->tpg, p) * 161 dev->fmt_cap_rect.height) / 162 dev->fmt_cap->vdownsampling[p] + 163 dev->fmt_cap->data_offset[p]; 164 165 if (vb2_plane_size(vb, p) < size) { 166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 167 __func__, p, vb2_plane_size(vb, p), size); 168 return -EINVAL; 169 } 170 171 vb2_set_plane_payload(vb, p, size); 172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 173 } 174 175 return 0; 176 } 177 178 static void vid_cap_buf_finish(struct vb2_buffer *vb) 179 { 180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 182 struct v4l2_timecode *tc = &vbuf->timecode; 183 unsigned fps = 25; 184 unsigned seq = vbuf->sequence; 185 186 if (!vivid_is_sdtv_cap(dev)) 187 return; 188 189 /* 190 * Set the timecode. Rarely used, so it is interesting to 191 * test this. 192 */ 193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 194 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 195 fps = 30; 196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 197 tc->flags = 0; 198 tc->frames = seq % fps; 199 tc->seconds = (seq / fps) % 60; 200 tc->minutes = (seq / (60 * fps)) % 60; 201 tc->hours = (seq / (60 * 60 * fps)) % 24; 202 } 203 204 static void vid_cap_buf_queue(struct vb2_buffer *vb) 205 { 206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 209 210 dprintk(dev, 1, "%s\n", __func__); 211 212 spin_lock(&dev->slock); 213 list_add_tail(&buf->list, &dev->vid_cap_active); 214 spin_unlock(&dev->slock); 215 } 216 217 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 218 { 219 struct vivid_dev *dev = vb2_get_drv_priv(vq); 220 unsigned i; 221 int err; 222 223 if (vb2_is_streaming(&dev->vb_vid_out_q)) 224 dev->can_loop_video = vivid_vid_can_loop(dev); 225 226 dev->vid_cap_seq_count = 0; 227 dprintk(dev, 1, "%s\n", __func__); 228 for (i = 0; i < VIDEO_MAX_FRAME; i++) 229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 230 if (dev->start_streaming_error) { 231 dev->start_streaming_error = false; 232 err = -EINVAL; 233 } else { 234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 235 } 236 if (err) { 237 struct vivid_buffer *buf, *tmp; 238 239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 240 list_del(&buf->list); 241 vb2_buffer_done(&buf->vb.vb2_buf, 242 VB2_BUF_STATE_QUEUED); 243 } 244 } 245 return err; 246 } 247 248 /* abort streaming and wait for last buffer */ 249 static void vid_cap_stop_streaming(struct vb2_queue *vq) 250 { 251 struct vivid_dev *dev = vb2_get_drv_priv(vq); 252 253 dprintk(dev, 1, "%s\n", __func__); 254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 255 dev->can_loop_video = false; 256 } 257 258 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 259 { 260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 261 262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 263 } 264 265 const struct vb2_ops vivid_vid_cap_qops = { 266 .queue_setup = vid_cap_queue_setup, 267 .buf_prepare = vid_cap_buf_prepare, 268 .buf_finish = vid_cap_buf_finish, 269 .buf_queue = vid_cap_buf_queue, 270 .start_streaming = vid_cap_start_streaming, 271 .stop_streaming = vid_cap_stop_streaming, 272 .buf_request_complete = vid_cap_buf_request_complete, 273 .wait_prepare = vb2_ops_wait_prepare, 274 .wait_finish = vb2_ops_wait_finish, 275 }; 276 277 /* 278 * Determine the 'picture' quality based on the current TV frequency: either 279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 280 * signal or NOISE for no signal. 281 */ 282 void vivid_update_quality(struct vivid_dev *dev) 283 { 284 unsigned freq_modulus; 285 286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 287 /* 288 * The 'noise' will only be replaced by the actual video 289 * if the output video matches the input video settings. 290 */ 291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 292 return; 293 } 294 if (vivid_is_hdmi_cap(dev) && 295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 297 return; 298 } 299 if (vivid_is_sdtv_cap(dev) && 300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 302 return; 303 } 304 if (!vivid_is_tv_cap(dev)) { 305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 306 return; 307 } 308 309 /* 310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 311 * From +/- 0.25 MHz around the channel there is color, and from 312 * +/- 1 MHz there is grayscale (chroma is lost). 313 * Everywhere else it is just noise. 314 */ 315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 316 if (freq_modulus > 2 * 16) { 317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 319 return; 320 } 321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 323 else 324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 325 } 326 327 /* 328 * Get the current picture quality and the associated afc value. 329 */ 330 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 331 { 332 unsigned freq_modulus; 333 334 if (afc) 335 *afc = 0; 336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 338 return tpg_g_quality(&dev->tpg); 339 340 /* 341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 342 * From +/- 0.25 MHz around the channel there is color, and from 343 * +/- 1 MHz there is grayscale (chroma is lost). 344 * Everywhere else it is just gray. 345 */ 346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 347 if (afc) 348 *afc = freq_modulus - 1 * 16; 349 return TPG_QUAL_GRAY; 350 } 351 352 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 353 { 354 if (vivid_is_sdtv_cap(dev)) 355 return dev->std_aspect_ratio[dev->input]; 356 357 if (vivid_is_hdmi_cap(dev)) 358 return dev->dv_timings_aspect_ratio[dev->input]; 359 360 return TPG_VIDEO_ASPECT_IMAGE; 361 } 362 363 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 364 { 365 if (vivid_is_sdtv_cap(dev)) 366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 368 369 if (vivid_is_hdmi_cap(dev) && 370 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 371 return dev->src_rect.height == 480 ? 372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 373 374 return TPG_PIXEL_ASPECT_SQUARE; 375 } 376 377 /* 378 * Called whenever the format has to be reset which can occur when 379 * changing inputs, standard, timings, etc. 380 */ 381 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 382 { 383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 384 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 385 unsigned size; 386 u64 pixelclock; 387 388 switch (dev->input_type[dev->input]) { 389 case WEBCAM: 390 default: 391 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 392 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 393 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 394 dev->field_cap = V4L2_FIELD_NONE; 395 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 396 break; 397 case TV: 398 case SVID: 399 dev->field_cap = dev->tv_field_cap; 400 dev->src_rect.width = 720; 401 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 402 dev->src_rect.height = 480; 403 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 404 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 405 } else { 406 dev->src_rect.height = 576; 407 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 408 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 409 } 410 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 411 break; 412 case HDMI: 413 dev->src_rect.width = bt->width; 414 dev->src_rect.height = bt->height; 415 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 416 if (dev->reduced_fps && can_reduce_fps(bt)) { 417 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 418 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 419 } else { 420 pixelclock = bt->pixelclock; 421 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 422 } 423 dev->timeperframe_vid_cap = (struct v4l2_fract) { 424 size / 100, (u32)pixelclock / 100 425 }; 426 if (bt->interlaced) 427 dev->field_cap = V4L2_FIELD_ALTERNATE; 428 else 429 dev->field_cap = V4L2_FIELD_NONE; 430 431 /* 432 * We can be called from within s_ctrl, in that case we can't 433 * set/get controls. Luckily we don't need to in that case. 434 */ 435 if (keep_controls || !dev->colorspace) 436 break; 437 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 438 if (bt->width == 720 && bt->height <= 576) 439 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 440 else 441 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 442 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 443 } else { 444 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 445 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 446 } 447 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 448 break; 449 } 450 vfree(dev->bitmap_cap); 451 dev->bitmap_cap = NULL; 452 vivid_update_quality(dev); 453 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 454 dev->crop_cap = dev->src_rect; 455 dev->crop_bounds_cap = dev->src_rect; 456 if (dev->bitmap_cap && 457 (dev->compose_cap.width != dev->crop_cap.width || 458 dev->compose_cap.height != dev->crop_cap.height)) { 459 vfree(dev->bitmap_cap); 460 dev->bitmap_cap = NULL; 461 } 462 dev->compose_cap = dev->crop_cap; 463 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 464 dev->compose_cap.height /= 2; 465 dev->fmt_cap_rect = dev->compose_cap; 466 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 467 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 468 tpg_update_mv_step(&dev->tpg); 469 470 /* 471 * We can be called from within s_ctrl, in that case we can't 472 * modify controls. Luckily we don't need to in that case. 473 */ 474 if (keep_controls) 475 return; 476 477 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 478 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 479 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 480 } 481 482 /* Map the field to something that is valid for the current input */ 483 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 484 { 485 if (vivid_is_sdtv_cap(dev)) { 486 switch (field) { 487 case V4L2_FIELD_INTERLACED_TB: 488 case V4L2_FIELD_INTERLACED_BT: 489 case V4L2_FIELD_SEQ_TB: 490 case V4L2_FIELD_SEQ_BT: 491 case V4L2_FIELD_TOP: 492 case V4L2_FIELD_BOTTOM: 493 case V4L2_FIELD_ALTERNATE: 494 return field; 495 case V4L2_FIELD_INTERLACED: 496 default: 497 return V4L2_FIELD_INTERLACED; 498 } 499 } 500 if (vivid_is_hdmi_cap(dev)) 501 return dev->dv_timings_cap[dev->input].bt.interlaced ? 502 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 503 return V4L2_FIELD_NONE; 504 } 505 506 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 507 { 508 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 509 return tpg_g_colorspace(&dev->tpg); 510 return dev->colorspace_out; 511 } 512 513 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 514 { 515 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 516 return tpg_g_xfer_func(&dev->tpg); 517 return dev->xfer_func_out; 518 } 519 520 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 521 { 522 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 523 return tpg_g_ycbcr_enc(&dev->tpg); 524 return dev->ycbcr_enc_out; 525 } 526 527 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 528 { 529 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 530 return tpg_g_hsv_enc(&dev->tpg); 531 return dev->hsv_enc_out; 532 } 533 534 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 535 { 536 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 537 return tpg_g_quantization(&dev->tpg); 538 return dev->quantization_out; 539 } 540 541 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 542 struct v4l2_format *f) 543 { 544 struct vivid_dev *dev = video_drvdata(file); 545 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 546 unsigned p; 547 548 mp->width = dev->fmt_cap_rect.width; 549 mp->height = dev->fmt_cap_rect.height; 550 mp->field = dev->field_cap; 551 mp->pixelformat = dev->fmt_cap->fourcc; 552 mp->colorspace = vivid_colorspace_cap(dev); 553 mp->xfer_func = vivid_xfer_func_cap(dev); 554 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 555 mp->hsv_enc = vivid_hsv_enc_cap(dev); 556 else 557 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 558 mp->quantization = vivid_quantization_cap(dev); 559 mp->num_planes = dev->fmt_cap->buffers; 560 for (p = 0; p < mp->num_planes; p++) { 561 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 562 mp->plane_fmt[p].sizeimage = 563 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 564 dev->fmt_cap->vdownsampling[p] + 565 dev->fmt_cap->data_offset[p]; 566 } 567 return 0; 568 } 569 570 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 571 struct v4l2_format *f) 572 { 573 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 574 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 575 struct vivid_dev *dev = video_drvdata(file); 576 const struct vivid_fmt *fmt; 577 unsigned bytesperline, max_bpl; 578 unsigned factor = 1; 579 unsigned w, h; 580 unsigned p; 581 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 582 583 fmt = vivid_get_format(dev, mp->pixelformat); 584 if (!fmt) { 585 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 586 mp->pixelformat); 587 mp->pixelformat = V4L2_PIX_FMT_YUYV; 588 fmt = vivid_get_format(dev, mp->pixelformat); 589 } 590 591 mp->field = vivid_field_cap(dev, mp->field); 592 if (vivid_is_webcam(dev)) { 593 const struct v4l2_frmsize_discrete *sz = 594 v4l2_find_nearest_size(webcam_sizes, 595 VIVID_WEBCAM_SIZES, width, 596 height, mp->width, mp->height); 597 598 w = sz->width; 599 h = sz->height; 600 } else if (vivid_is_sdtv_cap(dev)) { 601 w = 720; 602 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 603 } else { 604 w = dev->src_rect.width; 605 h = dev->src_rect.height; 606 } 607 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 608 factor = 2; 609 if (vivid_is_webcam(dev) || 610 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 611 mp->width = w; 612 mp->height = h / factor; 613 } else { 614 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 615 616 v4l2_rect_set_min_size(&r, &vivid_min_rect); 617 v4l2_rect_set_max_size(&r, &vivid_max_rect); 618 if (dev->has_scaler_cap && !dev->has_compose_cap) { 619 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 620 621 v4l2_rect_set_max_size(&r, &max_r); 622 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 623 v4l2_rect_set_max_size(&r, &dev->src_rect); 624 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 625 v4l2_rect_set_min_size(&r, &dev->src_rect); 626 } 627 mp->width = r.width; 628 mp->height = r.height / factor; 629 } 630 631 /* This driver supports custom bytesperline values */ 632 633 mp->num_planes = fmt->buffers; 634 for (p = 0; p < fmt->buffers; p++) { 635 /* Calculate the minimum supported bytesperline value */ 636 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 637 /* Calculate the maximum supported bytesperline value */ 638 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 639 640 if (pfmt[p].bytesperline > max_bpl) 641 pfmt[p].bytesperline = max_bpl; 642 if (pfmt[p].bytesperline < bytesperline) 643 pfmt[p].bytesperline = bytesperline; 644 645 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 646 fmt->vdownsampling[p] + fmt->data_offset[p]; 647 648 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 649 } 650 for (p = fmt->buffers; p < fmt->planes; p++) 651 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 652 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 653 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 654 655 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 656 mp->colorspace = vivid_colorspace_cap(dev); 657 658 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 659 mp->xfer_func = vivid_xfer_func_cap(dev); 660 661 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 662 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 663 mp->hsv_enc = vivid_hsv_enc_cap(dev); 664 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 665 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 666 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 667 } else { 668 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 669 } 670 671 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 672 fmt->color_enc == TGP_COLOR_ENC_RGB) { 673 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 674 mp->quantization = vivid_quantization_cap(dev); 675 } else { 676 mp->quantization = vivid_quantization_cap(dev); 677 } 678 679 memset(mp->reserved, 0, sizeof(mp->reserved)); 680 return 0; 681 } 682 683 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 684 struct v4l2_format *f) 685 { 686 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 687 struct vivid_dev *dev = video_drvdata(file); 688 struct v4l2_rect *crop = &dev->crop_cap; 689 struct v4l2_rect *compose = &dev->compose_cap; 690 struct vb2_queue *q = &dev->vb_vid_cap_q; 691 int ret = vivid_try_fmt_vid_cap(file, priv, f); 692 unsigned factor = 1; 693 unsigned p; 694 unsigned i; 695 696 if (ret < 0) 697 return ret; 698 699 if (vb2_is_busy(q)) { 700 dprintk(dev, 1, "%s device busy\n", __func__); 701 return -EBUSY; 702 } 703 704 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) { 705 dprintk(dev, 1, "overlay is active, can't change pixelformat\n"); 706 return -EBUSY; 707 } 708 709 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 710 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 711 factor = 2; 712 713 /* Note: the webcam input doesn't support scaling, cropping or composing */ 714 715 if (!vivid_is_webcam(dev) && 716 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 717 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 718 719 if (dev->has_scaler_cap) { 720 if (dev->has_compose_cap) 721 v4l2_rect_map_inside(compose, &r); 722 else 723 *compose = r; 724 if (dev->has_crop_cap && !dev->has_compose_cap) { 725 struct v4l2_rect min_r = { 726 0, 0, 727 r.width / MAX_ZOOM, 728 factor * r.height / MAX_ZOOM 729 }; 730 struct v4l2_rect max_r = { 731 0, 0, 732 r.width * MAX_ZOOM, 733 factor * r.height * MAX_ZOOM 734 }; 735 736 v4l2_rect_set_min_size(crop, &min_r); 737 v4l2_rect_set_max_size(crop, &max_r); 738 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 739 } else if (dev->has_crop_cap) { 740 struct v4l2_rect min_r = { 741 0, 0, 742 compose->width / MAX_ZOOM, 743 factor * compose->height / MAX_ZOOM 744 }; 745 struct v4l2_rect max_r = { 746 0, 0, 747 compose->width * MAX_ZOOM, 748 factor * compose->height * MAX_ZOOM 749 }; 750 751 v4l2_rect_set_min_size(crop, &min_r); 752 v4l2_rect_set_max_size(crop, &max_r); 753 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 754 } 755 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 756 r.height *= factor; 757 v4l2_rect_set_size_to(crop, &r); 758 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 759 r = *crop; 760 r.height /= factor; 761 v4l2_rect_set_size_to(compose, &r); 762 } else if (!dev->has_crop_cap) { 763 v4l2_rect_map_inside(compose, &r); 764 } else { 765 r.height *= factor; 766 v4l2_rect_set_max_size(crop, &r); 767 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 768 compose->top *= factor; 769 compose->height *= factor; 770 v4l2_rect_set_size_to(compose, crop); 771 v4l2_rect_map_inside(compose, &r); 772 compose->top /= factor; 773 compose->height /= factor; 774 } 775 } else if (vivid_is_webcam(dev)) { 776 /* Guaranteed to be a match */ 777 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 778 if (webcam_sizes[i].width == mp->width && 779 webcam_sizes[i].height == mp->height) 780 break; 781 dev->webcam_size_idx = i; 782 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i)) 783 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1; 784 vivid_update_format_cap(dev, false); 785 } else { 786 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 787 788 v4l2_rect_set_size_to(compose, &r); 789 r.height *= factor; 790 v4l2_rect_set_size_to(crop, &r); 791 } 792 793 dev->fmt_cap_rect.width = mp->width; 794 dev->fmt_cap_rect.height = mp->height; 795 tpg_s_buf_height(&dev->tpg, mp->height); 796 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 797 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 798 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 799 dev->field_cap = mp->field; 800 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 801 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 802 else 803 tpg_s_field(&dev->tpg, dev->field_cap, false); 804 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 805 if (vivid_is_sdtv_cap(dev)) 806 dev->tv_field_cap = mp->field; 807 tpg_update_mv_step(&dev->tpg); 808 dev->tpg.colorspace = mp->colorspace; 809 dev->tpg.xfer_func = mp->xfer_func; 810 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 811 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 812 else 813 dev->tpg.hsv_enc = mp->hsv_enc; 814 dev->tpg.quantization = mp->quantization; 815 816 return 0; 817 } 818 819 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 820 struct v4l2_format *f) 821 { 822 struct vivid_dev *dev = video_drvdata(file); 823 824 if (!dev->multiplanar) 825 return -ENOTTY; 826 return vivid_g_fmt_vid_cap(file, priv, f); 827 } 828 829 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 830 struct v4l2_format *f) 831 { 832 struct vivid_dev *dev = video_drvdata(file); 833 834 if (!dev->multiplanar) 835 return -ENOTTY; 836 return vivid_try_fmt_vid_cap(file, priv, f); 837 } 838 839 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 840 struct v4l2_format *f) 841 { 842 struct vivid_dev *dev = video_drvdata(file); 843 844 if (!dev->multiplanar) 845 return -ENOTTY; 846 return vivid_s_fmt_vid_cap(file, priv, f); 847 } 848 849 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 850 struct v4l2_format *f) 851 { 852 struct vivid_dev *dev = video_drvdata(file); 853 854 if (dev->multiplanar) 855 return -ENOTTY; 856 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 857 } 858 859 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 860 struct v4l2_format *f) 861 { 862 struct vivid_dev *dev = video_drvdata(file); 863 864 if (dev->multiplanar) 865 return -ENOTTY; 866 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 867 } 868 869 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 870 struct v4l2_format *f) 871 { 872 struct vivid_dev *dev = video_drvdata(file); 873 874 if (dev->multiplanar) 875 return -ENOTTY; 876 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 877 } 878 879 int vivid_vid_cap_g_selection(struct file *file, void *priv, 880 struct v4l2_selection *sel) 881 { 882 struct vivid_dev *dev = video_drvdata(file); 883 884 if (!dev->has_crop_cap && !dev->has_compose_cap) 885 return -ENOTTY; 886 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 887 return -EINVAL; 888 if (vivid_is_webcam(dev)) 889 return -ENODATA; 890 891 sel->r.left = sel->r.top = 0; 892 switch (sel->target) { 893 case V4L2_SEL_TGT_CROP: 894 if (!dev->has_crop_cap) 895 return -EINVAL; 896 sel->r = dev->crop_cap; 897 break; 898 case V4L2_SEL_TGT_CROP_DEFAULT: 899 case V4L2_SEL_TGT_CROP_BOUNDS: 900 if (!dev->has_crop_cap) 901 return -EINVAL; 902 sel->r = dev->src_rect; 903 break; 904 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 905 if (!dev->has_compose_cap) 906 return -EINVAL; 907 sel->r = vivid_max_rect; 908 break; 909 case V4L2_SEL_TGT_COMPOSE: 910 if (!dev->has_compose_cap) 911 return -EINVAL; 912 sel->r = dev->compose_cap; 913 break; 914 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 915 if (!dev->has_compose_cap) 916 return -EINVAL; 917 sel->r = dev->fmt_cap_rect; 918 break; 919 default: 920 return -EINVAL; 921 } 922 return 0; 923 } 924 925 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 926 { 927 struct vivid_dev *dev = video_drvdata(file); 928 struct v4l2_rect *crop = &dev->crop_cap; 929 struct v4l2_rect *compose = &dev->compose_cap; 930 unsigned orig_compose_w = compose->width; 931 unsigned orig_compose_h = compose->height; 932 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 933 int ret; 934 935 if (!dev->has_crop_cap && !dev->has_compose_cap) 936 return -ENOTTY; 937 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 938 return -EINVAL; 939 if (vivid_is_webcam(dev)) 940 return -ENODATA; 941 942 switch (s->target) { 943 case V4L2_SEL_TGT_CROP: 944 if (!dev->has_crop_cap) 945 return -EINVAL; 946 ret = vivid_vid_adjust_sel(s->flags, &s->r); 947 if (ret) 948 return ret; 949 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 950 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 951 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 952 s->r.top /= factor; 953 s->r.height /= factor; 954 if (dev->has_scaler_cap) { 955 struct v4l2_rect fmt = dev->fmt_cap_rect; 956 struct v4l2_rect max_rect = { 957 0, 0, 958 s->r.width * MAX_ZOOM, 959 s->r.height * MAX_ZOOM 960 }; 961 struct v4l2_rect min_rect = { 962 0, 0, 963 s->r.width / MAX_ZOOM, 964 s->r.height / MAX_ZOOM 965 }; 966 967 v4l2_rect_set_min_size(&fmt, &min_rect); 968 if (!dev->has_compose_cap) 969 v4l2_rect_set_max_size(&fmt, &max_rect); 970 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 971 vb2_is_busy(&dev->vb_vid_cap_q)) 972 return -EBUSY; 973 if (dev->has_compose_cap) { 974 v4l2_rect_set_min_size(compose, &min_rect); 975 v4l2_rect_set_max_size(compose, &max_rect); 976 } 977 dev->fmt_cap_rect = fmt; 978 tpg_s_buf_height(&dev->tpg, fmt.height); 979 } else if (dev->has_compose_cap) { 980 struct v4l2_rect fmt = dev->fmt_cap_rect; 981 982 v4l2_rect_set_min_size(&fmt, &s->r); 983 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 984 vb2_is_busy(&dev->vb_vid_cap_q)) 985 return -EBUSY; 986 dev->fmt_cap_rect = fmt; 987 tpg_s_buf_height(&dev->tpg, fmt.height); 988 v4l2_rect_set_size_to(compose, &s->r); 989 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 990 } else { 991 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 992 vb2_is_busy(&dev->vb_vid_cap_q)) 993 return -EBUSY; 994 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 995 v4l2_rect_set_size_to(compose, &s->r); 996 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 997 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 998 } 999 s->r.top *= factor; 1000 s->r.height *= factor; 1001 *crop = s->r; 1002 break; 1003 case V4L2_SEL_TGT_COMPOSE: 1004 if (!dev->has_compose_cap) 1005 return -EINVAL; 1006 ret = vivid_vid_adjust_sel(s->flags, &s->r); 1007 if (ret) 1008 return ret; 1009 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 1010 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 1011 if (dev->has_scaler_cap) { 1012 struct v4l2_rect max_rect = { 1013 0, 0, 1014 dev->src_rect.width * MAX_ZOOM, 1015 (dev->src_rect.height / factor) * MAX_ZOOM 1016 }; 1017 1018 v4l2_rect_set_max_size(&s->r, &max_rect); 1019 if (dev->has_crop_cap) { 1020 struct v4l2_rect min_rect = { 1021 0, 0, 1022 s->r.width / MAX_ZOOM, 1023 (s->r.height * factor) / MAX_ZOOM 1024 }; 1025 struct v4l2_rect max_rect = { 1026 0, 0, 1027 s->r.width * MAX_ZOOM, 1028 (s->r.height * factor) * MAX_ZOOM 1029 }; 1030 1031 v4l2_rect_set_min_size(crop, &min_rect); 1032 v4l2_rect_set_max_size(crop, &max_rect); 1033 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1034 } 1035 } else if (dev->has_crop_cap) { 1036 s->r.top *= factor; 1037 s->r.height *= factor; 1038 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1039 v4l2_rect_set_size_to(crop, &s->r); 1040 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1041 s->r.top /= factor; 1042 s->r.height /= factor; 1043 } else { 1044 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1045 s->r.height /= factor; 1046 } 1047 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1048 *compose = s->r; 1049 break; 1050 default: 1051 return -EINVAL; 1052 } 1053 1054 if (dev->bitmap_cap && (compose->width != orig_compose_w || 1055 compose->height != orig_compose_h)) { 1056 vfree(dev->bitmap_cap); 1057 dev->bitmap_cap = NULL; 1058 } 1059 tpg_s_crop_compose(&dev->tpg, crop, compose); 1060 return 0; 1061 } 1062 1063 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1064 int type, struct v4l2_fract *f) 1065 { 1066 struct vivid_dev *dev = video_drvdata(file); 1067 1068 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1069 return -EINVAL; 1070 1071 switch (vivid_get_pixel_aspect(dev)) { 1072 case TPG_PIXEL_ASPECT_NTSC: 1073 f->numerator = 11; 1074 f->denominator = 10; 1075 break; 1076 case TPG_PIXEL_ASPECT_PAL: 1077 f->numerator = 54; 1078 f->denominator = 59; 1079 break; 1080 default: 1081 break; 1082 } 1083 return 0; 1084 } 1085 1086 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, 1087 struct v4l2_fmtdesc *f) 1088 { 1089 struct vivid_dev *dev = video_drvdata(file); 1090 const struct vivid_fmt *fmt; 1091 1092 if (dev->multiplanar) 1093 return -ENOTTY; 1094 1095 if (f->index >= ARRAY_SIZE(formats_ovl)) 1096 return -EINVAL; 1097 1098 fmt = &formats_ovl[f->index]; 1099 1100 f->pixelformat = fmt->fourcc; 1101 return 0; 1102 } 1103 1104 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, 1105 struct v4l2_format *f) 1106 { 1107 struct vivid_dev *dev = video_drvdata(file); 1108 const struct v4l2_rect *compose = &dev->compose_cap; 1109 struct v4l2_window *win = &f->fmt.win; 1110 unsigned clipcount = win->clipcount; 1111 1112 if (dev->multiplanar) 1113 return -ENOTTY; 1114 1115 win->w.top = dev->overlay_cap_top; 1116 win->w.left = dev->overlay_cap_left; 1117 win->w.width = compose->width; 1118 win->w.height = compose->height; 1119 win->field = dev->overlay_cap_field; 1120 win->clipcount = dev->clipcount_cap; 1121 if (clipcount > dev->clipcount_cap) 1122 clipcount = dev->clipcount_cap; 1123 if (dev->bitmap_cap == NULL) 1124 win->bitmap = NULL; 1125 else if (win->bitmap) { 1126 if (copy_to_user(win->bitmap, dev->bitmap_cap, 1127 ((compose->width + 7) / 8) * compose->height)) 1128 return -EFAULT; 1129 } 1130 if (clipcount && win->clips) 1131 memcpy(win->clips, dev->clips_cap, 1132 clipcount * sizeof(dev->clips_cap[0])); 1133 return 0; 1134 } 1135 1136 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, 1137 struct v4l2_format *f) 1138 { 1139 struct vivid_dev *dev = video_drvdata(file); 1140 const struct v4l2_rect *compose = &dev->compose_cap; 1141 struct v4l2_window *win = &f->fmt.win; 1142 int i, j; 1143 1144 if (dev->multiplanar) 1145 return -ENOTTY; 1146 1147 win->w.left = clamp_t(int, win->w.left, 1148 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1149 win->w.top = clamp_t(int, win->w.top, 1150 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1151 win->w.width = compose->width; 1152 win->w.height = compose->height; 1153 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP) 1154 win->field = V4L2_FIELD_ANY; 1155 win->chromakey = 0; 1156 win->global_alpha = 0; 1157 if (win->clipcount && !win->clips) 1158 win->clipcount = 0; 1159 if (win->clipcount > MAX_CLIPS) 1160 win->clipcount = MAX_CLIPS; 1161 if (win->clipcount) { 1162 memcpy(dev->try_clips_cap, win->clips, 1163 win->clipcount * sizeof(dev->clips_cap[0])); 1164 for (i = 0; i < win->clipcount; i++) { 1165 struct v4l2_rect *r = &dev->try_clips_cap[i].c; 1166 1167 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1); 1168 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top); 1169 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1); 1170 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left); 1171 } 1172 /* 1173 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small 1174 * number and it's typically a one-time deal. 1175 */ 1176 for (i = 0; i < win->clipcount - 1; i++) { 1177 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c; 1178 1179 for (j = i + 1; j < win->clipcount; j++) { 1180 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c; 1181 1182 if (v4l2_rect_overlap(r1, r2)) 1183 return -EINVAL; 1184 } 1185 } 1186 memcpy(win->clips, dev->try_clips_cap, 1187 win->clipcount * sizeof(dev->clips_cap[0])); 1188 } 1189 return 0; 1190 } 1191 1192 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, 1193 struct v4l2_format *f) 1194 { 1195 struct vivid_dev *dev = video_drvdata(file); 1196 const struct v4l2_rect *compose = &dev->compose_cap; 1197 struct v4l2_window *win = &f->fmt.win; 1198 int ret = vidioc_try_fmt_vid_overlay(file, priv, f); 1199 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height; 1200 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]); 1201 void *new_bitmap = NULL; 1202 1203 if (ret) 1204 return ret; 1205 1206 if (win->bitmap) { 1207 new_bitmap = vzalloc(bitmap_size); 1208 1209 if (new_bitmap == NULL) 1210 return -ENOMEM; 1211 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) { 1212 vfree(new_bitmap); 1213 return -EFAULT; 1214 } 1215 } 1216 1217 dev->overlay_cap_top = win->w.top; 1218 dev->overlay_cap_left = win->w.left; 1219 dev->overlay_cap_field = win->field; 1220 vfree(dev->bitmap_cap); 1221 dev->bitmap_cap = new_bitmap; 1222 dev->clipcount_cap = win->clipcount; 1223 if (dev->clipcount_cap) 1224 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size); 1225 return 0; 1226 } 1227 1228 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i) 1229 { 1230 struct vivid_dev *dev = video_drvdata(file); 1231 1232 if (dev->multiplanar) 1233 return -ENOTTY; 1234 1235 if (i && dev->fb_vbase_cap == NULL) 1236 return -EINVAL; 1237 1238 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) { 1239 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n"); 1240 return -EINVAL; 1241 } 1242 1243 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh) 1244 return -EBUSY; 1245 dev->overlay_cap_owner = i ? fh : NULL; 1246 return 0; 1247 } 1248 1249 int vivid_vid_cap_g_fbuf(struct file *file, void *fh, 1250 struct v4l2_framebuffer *a) 1251 { 1252 struct vivid_dev *dev = video_drvdata(file); 1253 1254 if (dev->multiplanar) 1255 return -ENOTTY; 1256 1257 *a = dev->fb_cap; 1258 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING | 1259 V4L2_FBUF_CAP_LIST_CLIPPING; 1260 a->flags = V4L2_FBUF_FLAG_PRIMARY; 1261 a->fmt.field = V4L2_FIELD_NONE; 1262 a->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1263 a->fmt.priv = 0; 1264 return 0; 1265 } 1266 1267 int vivid_vid_cap_s_fbuf(struct file *file, void *fh, 1268 const struct v4l2_framebuffer *a) 1269 { 1270 struct vivid_dev *dev = video_drvdata(file); 1271 const struct vivid_fmt *fmt; 1272 1273 if (dev->multiplanar) 1274 return -ENOTTY; 1275 1276 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 1277 return -EPERM; 1278 1279 if (dev->overlay_cap_owner) 1280 return -EBUSY; 1281 1282 if (a->base == NULL) { 1283 dev->fb_cap.base = NULL; 1284 dev->fb_vbase_cap = NULL; 1285 return 0; 1286 } 1287 1288 if (a->fmt.width < 48 || a->fmt.height < 32) 1289 return -EINVAL; 1290 fmt = vivid_get_format(dev, a->fmt.pixelformat); 1291 if (!fmt || !fmt->can_do_overlay) 1292 return -EINVAL; 1293 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) 1294 return -EINVAL; 1295 if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) 1296 return -EINVAL; 1297 1298 /* 1299 * Only support the framebuffer of one of the vivid instances. 1300 * Anything else is rejected. 1301 */ 1302 if (!vivid_validate_fb(a)) 1303 return -EINVAL; 1304 1305 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); 1306 dev->fb_cap = *a; 1307 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left, 1308 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1309 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top, 1310 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1311 return 0; 1312 } 1313 1314 static const struct v4l2_audio vivid_audio_inputs[] = { 1315 { 0, "TV", V4L2_AUDCAP_STEREO }, 1316 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1317 }; 1318 1319 int vidioc_enum_input(struct file *file, void *priv, 1320 struct v4l2_input *inp) 1321 { 1322 struct vivid_dev *dev = video_drvdata(file); 1323 1324 if (inp->index >= dev->num_inputs) 1325 return -EINVAL; 1326 1327 inp->type = V4L2_INPUT_TYPE_CAMERA; 1328 switch (dev->input_type[inp->index]) { 1329 case WEBCAM: 1330 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1331 dev->input_name_counter[inp->index]); 1332 inp->capabilities = 0; 1333 break; 1334 case TV: 1335 snprintf(inp->name, sizeof(inp->name), "TV %u", 1336 dev->input_name_counter[inp->index]); 1337 inp->type = V4L2_INPUT_TYPE_TUNER; 1338 inp->std = V4L2_STD_ALL; 1339 if (dev->has_audio_inputs) 1340 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1341 inp->capabilities = V4L2_IN_CAP_STD; 1342 break; 1343 case SVID: 1344 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1345 dev->input_name_counter[inp->index]); 1346 inp->std = V4L2_STD_ALL; 1347 if (dev->has_audio_inputs) 1348 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1349 inp->capabilities = V4L2_IN_CAP_STD; 1350 break; 1351 case HDMI: 1352 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1353 dev->input_name_counter[inp->index]); 1354 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1355 if (dev->edid_blocks == 0 || 1356 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1357 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1358 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1359 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1360 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1361 break; 1362 } 1363 if (dev->sensor_hflip) 1364 inp->status |= V4L2_IN_ST_HFLIP; 1365 if (dev->sensor_vflip) 1366 inp->status |= V4L2_IN_ST_VFLIP; 1367 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1368 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1369 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1370 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1371 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1372 } else if (vivid_is_tv_cap(dev)) { 1373 switch (tpg_g_quality(&dev->tpg)) { 1374 case TPG_QUAL_GRAY: 1375 inp->status |= V4L2_IN_ST_COLOR_KILL; 1376 break; 1377 case TPG_QUAL_NOISE: 1378 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1379 break; 1380 default: 1381 break; 1382 } 1383 } 1384 } 1385 return 0; 1386 } 1387 1388 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1389 { 1390 struct vivid_dev *dev = video_drvdata(file); 1391 1392 *i = dev->input; 1393 return 0; 1394 } 1395 1396 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1397 { 1398 struct vivid_dev *dev = video_drvdata(file); 1399 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1400 unsigned brightness; 1401 1402 if (i >= dev->num_inputs) 1403 return -EINVAL; 1404 1405 if (i == dev->input) 1406 return 0; 1407 1408 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1409 vb2_is_busy(&dev->vb_vbi_cap_q) || 1410 vb2_is_busy(&dev->vb_meta_cap_q)) 1411 return -EBUSY; 1412 1413 dev->input = i; 1414 dev->vid_cap_dev.tvnorms = 0; 1415 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1416 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1417 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1418 } 1419 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1420 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1421 vivid_update_format_cap(dev, false); 1422 1423 if (dev->colorspace) { 1424 switch (dev->input_type[i]) { 1425 case WEBCAM: 1426 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1427 break; 1428 case TV: 1429 case SVID: 1430 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1431 break; 1432 case HDMI: 1433 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1434 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1435 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1436 else 1437 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1438 } else { 1439 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1440 } 1441 break; 1442 } 1443 } 1444 1445 /* 1446 * Modify the brightness range depending on the input. 1447 * This makes it easy to use vivid to test if applications can 1448 * handle control range modifications and is also how this is 1449 * typically used in practice as different inputs may be hooked 1450 * up to different receivers with different control ranges. 1451 */ 1452 brightness = 128 * i + dev->input_brightness[i]; 1453 v4l2_ctrl_modify_range(dev->brightness, 1454 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1455 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1456 1457 /* Restore per-input states. */ 1458 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1459 vivid_is_hdmi_cap(dev)); 1460 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1461 dev->dv_timings_signal_mode[dev->input] == 1462 SELECTED_DV_TIMINGS); 1463 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1464 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1465 dev->std_signal_mode[dev->input]); 1466 1467 if (vivid_is_hdmi_cap(dev)) { 1468 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1469 dev->dv_timings_signal_mode[dev->input]); 1470 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1471 dev->query_dv_timings[dev->input]); 1472 } else if (vivid_is_sdtv_cap(dev)) { 1473 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1474 dev->std_signal_mode[dev->input]); 1475 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1476 dev->std_signal_mode[dev->input]); 1477 } 1478 1479 return 0; 1480 } 1481 1482 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1483 { 1484 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1485 return -EINVAL; 1486 *vin = vivid_audio_inputs[vin->index]; 1487 return 0; 1488 } 1489 1490 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1491 { 1492 struct vivid_dev *dev = video_drvdata(file); 1493 1494 if (!vivid_is_sdtv_cap(dev)) 1495 return -EINVAL; 1496 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1497 return 0; 1498 } 1499 1500 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1501 { 1502 struct vivid_dev *dev = video_drvdata(file); 1503 1504 if (!vivid_is_sdtv_cap(dev)) 1505 return -EINVAL; 1506 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1507 return -EINVAL; 1508 dev->tv_audio_input = vin->index; 1509 return 0; 1510 } 1511 1512 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1513 { 1514 struct vivid_dev *dev = video_drvdata(file); 1515 1516 if (vf->tuner != 0) 1517 return -EINVAL; 1518 vf->frequency = dev->tv_freq; 1519 return 0; 1520 } 1521 1522 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1523 { 1524 struct vivid_dev *dev = video_drvdata(file); 1525 1526 if (vf->tuner != 0) 1527 return -EINVAL; 1528 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1529 if (vivid_is_tv_cap(dev)) 1530 vivid_update_quality(dev); 1531 return 0; 1532 } 1533 1534 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1535 { 1536 struct vivid_dev *dev = video_drvdata(file); 1537 1538 if (vt->index != 0) 1539 return -EINVAL; 1540 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1541 return -EINVAL; 1542 dev->tv_audmode = vt->audmode; 1543 return 0; 1544 } 1545 1546 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1547 { 1548 struct vivid_dev *dev = video_drvdata(file); 1549 enum tpg_quality qual; 1550 1551 if (vt->index != 0) 1552 return -EINVAL; 1553 1554 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1555 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1556 vt->audmode = dev->tv_audmode; 1557 vt->rangelow = MIN_TV_FREQ; 1558 vt->rangehigh = MAX_TV_FREQ; 1559 qual = vivid_get_quality(dev, &vt->afc); 1560 if (qual == TPG_QUAL_COLOR) 1561 vt->signal = 0xffff; 1562 else if (qual == TPG_QUAL_GRAY) 1563 vt->signal = 0x8000; 1564 else 1565 vt->signal = 0; 1566 if (qual == TPG_QUAL_NOISE) { 1567 vt->rxsubchans = 0; 1568 } else if (qual == TPG_QUAL_GRAY) { 1569 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1570 } else { 1571 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1572 unsigned int options = 1573 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1574 1575 switch (channel_nr % options) { 1576 case 0: 1577 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1578 break; 1579 case 1: 1580 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1581 break; 1582 case 2: 1583 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1584 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1585 else 1586 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1587 break; 1588 case 3: 1589 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1590 break; 1591 } 1592 } 1593 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1594 return 0; 1595 } 1596 1597 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1598 const v4l2_std_id vivid_standard[] = { 1599 V4L2_STD_NTSC_M, 1600 V4L2_STD_NTSC_M_JP, 1601 V4L2_STD_NTSC_M_KR, 1602 V4L2_STD_NTSC_443, 1603 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1604 V4L2_STD_PAL_I, 1605 V4L2_STD_PAL_DK, 1606 V4L2_STD_PAL_M, 1607 V4L2_STD_PAL_N, 1608 V4L2_STD_PAL_Nc, 1609 V4L2_STD_PAL_60, 1610 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1611 V4L2_STD_SECAM_DK, 1612 V4L2_STD_SECAM_L, 1613 V4L2_STD_SECAM_LC, 1614 V4L2_STD_UNKNOWN 1615 }; 1616 1617 /* Must remain in sync with the vivid_standard array */ 1618 const char * const vivid_ctrl_standard_strings[] = { 1619 "NTSC-M", 1620 "NTSC-M-JP", 1621 "NTSC-M-KR", 1622 "NTSC-443", 1623 "PAL-BGH", 1624 "PAL-I", 1625 "PAL-DK", 1626 "PAL-M", 1627 "PAL-N", 1628 "PAL-Nc", 1629 "PAL-60", 1630 "SECAM-BGH", 1631 "SECAM-DK", 1632 "SECAM-L", 1633 "SECAM-Lc", 1634 NULL, 1635 }; 1636 1637 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1638 { 1639 struct vivid_dev *dev = video_drvdata(file); 1640 unsigned int last = dev->query_std_last[dev->input]; 1641 1642 if (!vivid_is_sdtv_cap(dev)) 1643 return -ENODATA; 1644 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1645 dev->std_signal_mode[dev->input] == NO_LOCK) { 1646 *id = V4L2_STD_UNKNOWN; 1647 return 0; 1648 } 1649 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1650 *id = V4L2_STD_UNKNOWN; 1651 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1652 *id = dev->std_cap[dev->input]; 1653 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1654 *id = dev->query_std[dev->input]; 1655 } else { 1656 *id = vivid_standard[last]; 1657 dev->query_std_last[dev->input] = 1658 (last + 1) % ARRAY_SIZE(vivid_standard); 1659 } 1660 1661 return 0; 1662 } 1663 1664 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1665 { 1666 struct vivid_dev *dev = video_drvdata(file); 1667 1668 if (!vivid_is_sdtv_cap(dev)) 1669 return -ENODATA; 1670 if (dev->std_cap[dev->input] == id) 1671 return 0; 1672 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1673 return -EBUSY; 1674 dev->std_cap[dev->input] = id; 1675 vivid_update_format_cap(dev, false); 1676 return 0; 1677 } 1678 1679 static void find_aspect_ratio(u32 width, u32 height, 1680 u32 *num, u32 *denom) 1681 { 1682 if (!(height % 3) && ((height * 4 / 3) == width)) { 1683 *num = 4; 1684 *denom = 3; 1685 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1686 *num = 16; 1687 *denom = 9; 1688 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1689 *num = 16; 1690 *denom = 10; 1691 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1692 *num = 5; 1693 *denom = 4; 1694 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1695 *num = 15; 1696 *denom = 9; 1697 } else { /* default to 16:9 */ 1698 *num = 16; 1699 *denom = 9; 1700 } 1701 } 1702 1703 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1704 { 1705 struct v4l2_bt_timings *bt = &timings->bt; 1706 u32 total_h_pixel; 1707 u32 total_v_lines; 1708 u32 h_freq; 1709 1710 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1711 NULL, NULL)) 1712 return false; 1713 1714 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1715 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1716 1717 h_freq = (u32)bt->pixelclock / total_h_pixel; 1718 1719 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1720 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1721 bt->polarities, bt->interlaced, timings)) 1722 return true; 1723 } 1724 1725 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1726 struct v4l2_fract aspect_ratio; 1727 1728 find_aspect_ratio(bt->width, bt->height, 1729 &aspect_ratio.numerator, 1730 &aspect_ratio.denominator); 1731 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1732 bt->polarities, bt->interlaced, 1733 aspect_ratio, timings)) 1734 return true; 1735 } 1736 return false; 1737 } 1738 1739 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1740 struct v4l2_dv_timings *timings) 1741 { 1742 struct vivid_dev *dev = video_drvdata(file); 1743 1744 if (!vivid_is_hdmi_cap(dev)) 1745 return -ENODATA; 1746 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1747 0, NULL, NULL) && 1748 !valid_cvt_gtf_timings(timings)) 1749 return -EINVAL; 1750 1751 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1752 0, false)) 1753 return 0; 1754 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1755 return -EBUSY; 1756 1757 dev->dv_timings_cap[dev->input] = *timings; 1758 vivid_update_format_cap(dev, false); 1759 return 0; 1760 } 1761 1762 int vidioc_query_dv_timings(struct file *file, void *_fh, 1763 struct v4l2_dv_timings *timings) 1764 { 1765 struct vivid_dev *dev = video_drvdata(file); 1766 unsigned int input = dev->input; 1767 unsigned int last = dev->query_dv_timings_last[input]; 1768 1769 if (!vivid_is_hdmi_cap(dev)) 1770 return -ENODATA; 1771 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1772 dev->edid_blocks == 0) 1773 return -ENOLINK; 1774 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1775 return -ENOLCK; 1776 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1777 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1778 return -ERANGE; 1779 } 1780 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1781 *timings = dev->dv_timings_cap[input]; 1782 } else if (dev->dv_timings_signal_mode[input] == 1783 SELECTED_DV_TIMINGS) { 1784 *timings = 1785 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1786 } else { 1787 *timings = 1788 v4l2_dv_timings_presets[last]; 1789 dev->query_dv_timings_last[input] = 1790 (last + 1) % dev->query_dv_timings_size; 1791 } 1792 return 0; 1793 } 1794 1795 int vidioc_s_edid(struct file *file, void *_fh, 1796 struct v4l2_edid *edid) 1797 { 1798 struct vivid_dev *dev = video_drvdata(file); 1799 u16 phys_addr; 1800 u32 display_present = 0; 1801 unsigned int i, j; 1802 int ret; 1803 1804 memset(edid->reserved, 0, sizeof(edid->reserved)); 1805 if (edid->pad >= dev->num_inputs) 1806 return -EINVAL; 1807 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1808 return -EINVAL; 1809 if (edid->blocks == 0) { 1810 dev->edid_blocks = 0; 1811 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1812 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1813 phys_addr = CEC_PHYS_ADDR_INVALID; 1814 goto set_phys_addr; 1815 } 1816 if (edid->blocks > dev->edid_max_blocks) { 1817 edid->blocks = dev->edid_max_blocks; 1818 return -E2BIG; 1819 } 1820 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1821 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1822 if (ret) 1823 return ret; 1824 1825 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1826 return -EBUSY; 1827 1828 dev->edid_blocks = edid->blocks; 1829 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1830 1831 for (i = 0, j = 0; i < dev->num_outputs; i++) 1832 if (dev->output_type[i] == HDMI) 1833 display_present |= 1834 dev->display_present[i] << j++; 1835 1836 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1837 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1838 1839 set_phys_addr: 1840 /* TODO: a proper hotplug detect cycle should be emulated here */ 1841 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1842 1843 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1844 cec_s_phys_addr(dev->cec_tx_adap[i], 1845 dev->display_present[i] ? 1846 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1847 CEC_PHYS_ADDR_INVALID, 1848 false); 1849 return 0; 1850 } 1851 1852 int vidioc_enum_framesizes(struct file *file, void *fh, 1853 struct v4l2_frmsizeenum *fsize) 1854 { 1855 struct vivid_dev *dev = video_drvdata(file); 1856 1857 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1858 return -EINVAL; 1859 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1860 return -EINVAL; 1861 if (vivid_is_webcam(dev)) { 1862 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1863 return -EINVAL; 1864 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1865 fsize->discrete = webcam_sizes[fsize->index]; 1866 return 0; 1867 } 1868 if (fsize->index) 1869 return -EINVAL; 1870 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1871 fsize->stepwise.min_width = MIN_WIDTH; 1872 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1873 fsize->stepwise.step_width = 2; 1874 fsize->stepwise.min_height = MIN_HEIGHT; 1875 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1876 fsize->stepwise.step_height = 2; 1877 return 0; 1878 } 1879 1880 /* timeperframe is arbitrary and continuous */ 1881 int vidioc_enum_frameintervals(struct file *file, void *priv, 1882 struct v4l2_frmivalenum *fival) 1883 { 1884 struct vivid_dev *dev = video_drvdata(file); 1885 const struct vivid_fmt *fmt; 1886 int i; 1887 1888 fmt = vivid_get_format(dev, fival->pixel_format); 1889 if (!fmt) 1890 return -EINVAL; 1891 1892 if (!vivid_is_webcam(dev)) { 1893 if (fival->index) 1894 return -EINVAL; 1895 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1896 return -EINVAL; 1897 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1898 return -EINVAL; 1899 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1900 fival->discrete = dev->timeperframe_vid_cap; 1901 return 0; 1902 } 1903 1904 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1905 if (fival->width == webcam_sizes[i].width && 1906 fival->height == webcam_sizes[i].height) 1907 break; 1908 if (i == ARRAY_SIZE(webcam_sizes)) 1909 return -EINVAL; 1910 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i)) 1911 return -EINVAL; 1912 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1913 fival->discrete = webcam_intervals[fival->index]; 1914 return 0; 1915 } 1916 1917 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1918 struct v4l2_streamparm *parm) 1919 { 1920 struct vivid_dev *dev = video_drvdata(file); 1921 1922 if (parm->type != (dev->multiplanar ? 1923 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1924 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1925 return -EINVAL; 1926 1927 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1928 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1929 parm->parm.capture.readbuffers = 1; 1930 return 0; 1931 } 1932 1933 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1934 struct v4l2_streamparm *parm) 1935 { 1936 struct vivid_dev *dev = video_drvdata(file); 1937 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx); 1938 struct v4l2_fract tpf; 1939 unsigned i; 1940 1941 if (parm->type != (dev->multiplanar ? 1942 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1943 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1944 return -EINVAL; 1945 if (!vivid_is_webcam(dev)) 1946 return vivid_vid_cap_g_parm(file, priv, parm); 1947 1948 tpf = parm->parm.capture.timeperframe; 1949 1950 if (tpf.denominator == 0) 1951 tpf = webcam_intervals[ival_sz - 1]; 1952 for (i = 0; i < ival_sz; i++) 1953 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1954 break; 1955 if (i == ival_sz) 1956 i = ival_sz - 1; 1957 dev->webcam_ival_idx = i; 1958 tpf = webcam_intervals[dev->webcam_ival_idx]; 1959 1960 /* resync the thread's timings */ 1961 dev->cap_seq_resync = true; 1962 dev->timeperframe_vid_cap = tpf; 1963 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1964 parm->parm.capture.timeperframe = tpf; 1965 parm->parm.capture.readbuffers = 1; 1966 return 0; 1967 } 1968