1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ispvideo.c 4 * 5 * TI OMAP3 ISP - Generic video node 6 * 7 * Copyright (C) 2009-2010 Nokia Corporation 8 * 9 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 * Sakari Ailus <sakari.ailus@iki.fi> 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/pagemap.h> 17 #include <linux/scatterlist.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-ioctl.h> 24 #include <media/v4l2-mc.h> 25 #include <media/videobuf2-dma-contig.h> 26 27 #include "ispvideo.h" 28 #include "isp.h" 29 30 31 /* ----------------------------------------------------------------------------- 32 * Helper functions 33 */ 34 35 /* 36 * NOTE: When adding new media bus codes, always remember to add 37 * corresponding in-memory formats to the table below!!! 38 */ 39 static struct isp_format_info formats[] = { 40 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 41 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 42 V4L2_PIX_FMT_GREY, 8, 1, }, 43 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, 44 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, 45 V4L2_PIX_FMT_Y10, 10, 2, }, 46 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, 47 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, 48 V4L2_PIX_FMT_Y12, 12, 2, }, 49 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 50 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 51 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 52 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 53 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 54 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 55 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 56 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 57 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 58 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 59 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 60 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 61 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 62 MEDIA_BUS_FMT_SBGGR10_1X10, 0, 63 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 64 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 65 MEDIA_BUS_FMT_SGBRG10_1X10, 0, 66 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 67 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 68 MEDIA_BUS_FMT_SGRBG10_1X10, 0, 69 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 70 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 71 MEDIA_BUS_FMT_SRGGB10_1X10, 0, 72 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 73 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, 74 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, 75 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 76 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, 77 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, 78 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 79 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, 80 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, 81 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 82 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 83 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, 84 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 85 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, 86 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, 87 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 88 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, 89 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, 90 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 91 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, 92 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, 93 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 94 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, 95 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, 96 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 97 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 98 MEDIA_BUS_FMT_UYVY8_1X16, 0, 99 V4L2_PIX_FMT_UYVY, 16, 2, }, 100 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 101 MEDIA_BUS_FMT_YUYV8_1X16, 0, 102 V4L2_PIX_FMT_YUYV, 16, 2, }, 103 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 104 MEDIA_BUS_FMT_UYVY8_2X8, 0, 105 V4L2_PIX_FMT_UYVY, 8, 2, }, 106 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 107 MEDIA_BUS_FMT_YUYV8_2X8, 0, 108 V4L2_PIX_FMT_YUYV, 8, 2, }, 109 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 110 * module and avoid NULL pointer dereferences. 111 */ 112 { 0, } 113 }; 114 115 const struct isp_format_info *omap3isp_video_format_info(u32 code) 116 { 117 unsigned int i; 118 119 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 120 if (formats[i].code == code) 121 return &formats[i]; 122 } 123 124 return NULL; 125 } 126 127 /* 128 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 129 * @video: ISP video instance 130 * @mbus: v4l2_mbus_framefmt format (input) 131 * @pix: v4l2_pix_format format (output) 132 * 133 * Fill the output pix structure with information from the input mbus format. 134 * The bytesperline and sizeimage fields are computed from the requested bytes 135 * per line value in the pix format and information from the video instance. 136 * 137 * Return the number of padding bytes at end of line. 138 */ 139 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 140 const struct v4l2_mbus_framefmt *mbus, 141 struct v4l2_pix_format *pix) 142 { 143 unsigned int bpl = pix->bytesperline; 144 unsigned int min_bpl; 145 unsigned int i; 146 147 memset(pix, 0, sizeof(*pix)); 148 pix->width = mbus->width; 149 pix->height = mbus->height; 150 151 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 152 if (formats[i].code == mbus->code) 153 break; 154 } 155 156 if (WARN_ON(i == ARRAY_SIZE(formats))) 157 return 0; 158 159 min_bpl = pix->width * formats[i].bpp; 160 161 /* Clamp the requested bytes per line value. If the maximum bytes per 162 * line value is zero, the module doesn't support user configurable line 163 * sizes. Override the requested value with the minimum in that case. 164 */ 165 if (video->bpl_max) 166 bpl = clamp(bpl, min_bpl, video->bpl_max); 167 else 168 bpl = min_bpl; 169 170 if (!video->bpl_zero_padding || bpl != min_bpl) 171 bpl = ALIGN(bpl, video->bpl_alignment); 172 173 pix->pixelformat = formats[i].pixelformat; 174 pix->bytesperline = bpl; 175 pix->sizeimage = pix->bytesperline * pix->height; 176 pix->colorspace = mbus->colorspace; 177 pix->field = mbus->field; 178 179 return bpl - min_bpl; 180 } 181 182 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 183 struct v4l2_mbus_framefmt *mbus) 184 { 185 unsigned int i; 186 187 memset(mbus, 0, sizeof(*mbus)); 188 mbus->width = pix->width; 189 mbus->height = pix->height; 190 191 /* Skip the last format in the loop so that it will be selected if no 192 * match is found. 193 */ 194 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { 195 if (formats[i].pixelformat == pix->pixelformat) 196 break; 197 } 198 199 mbus->code = formats[i].code; 200 mbus->colorspace = pix->colorspace; 201 mbus->field = pix->field; 202 } 203 204 static struct v4l2_subdev * 205 isp_video_remote_subdev(struct isp_video *video, u32 *pad) 206 { 207 struct media_pad *remote; 208 209 remote = media_pad_remote_pad_first(&video->pad); 210 211 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 212 return NULL; 213 214 if (pad) 215 *pad = remote->index; 216 217 return media_entity_to_v4l2_subdev(remote->entity); 218 } 219 220 /* Return a pointer to the ISP video instance at the far end of the pipeline. */ 221 static int isp_video_get_graph_data(struct isp_video *video, 222 struct isp_pipeline *pipe) 223 { 224 struct media_pipeline_entity_iter iter; 225 struct media_entity *entity; 226 struct isp_video *far_end = NULL; 227 int ret; 228 229 ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter); 230 if (ret) 231 return ret; 232 233 media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) { 234 struct isp_video *__video; 235 236 media_entity_enum_set(&pipe->ent_enum, entity); 237 238 if (far_end != NULL) 239 continue; 240 241 if (entity == &video->video.entity) 242 continue; 243 244 if (!is_media_entity_v4l2_video_device(entity)) 245 continue; 246 247 __video = to_isp_video(media_entity_to_video_device(entity)); 248 if (__video->type != video->type) 249 far_end = __video; 250 } 251 252 media_pipeline_entity_iter_cleanup(&iter); 253 254 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 255 pipe->input = far_end; 256 pipe->output = video; 257 } else { 258 if (far_end == NULL) 259 return -EPIPE; 260 261 pipe->input = video; 262 pipe->output = far_end; 263 } 264 265 return 0; 266 } 267 268 static int 269 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 270 { 271 struct v4l2_subdev_format fmt = { 272 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 273 }; 274 struct v4l2_subdev *subdev; 275 u32 pad; 276 int ret; 277 278 subdev = isp_video_remote_subdev(video, &pad); 279 if (subdev == NULL) 280 return -EINVAL; 281 282 fmt.pad = pad; 283 284 mutex_lock(&video->mutex); 285 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 286 mutex_unlock(&video->mutex); 287 288 if (ret) 289 return ret; 290 291 format->type = video->type; 292 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 293 } 294 295 static int 296 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 297 { 298 struct v4l2_format format; 299 int ret; 300 301 memcpy(&format, &vfh->format, sizeof(format)); 302 ret = __isp_video_get_format(video, &format); 303 if (ret < 0) 304 return ret; 305 306 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 307 vfh->format.fmt.pix.height != format.fmt.pix.height || 308 vfh->format.fmt.pix.width != format.fmt.pix.width || 309 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 310 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || 311 vfh->format.fmt.pix.field != format.fmt.pix.field) 312 return -EINVAL; 313 314 return 0; 315 } 316 317 /* ----------------------------------------------------------------------------- 318 * Video queue operations 319 */ 320 321 static int isp_video_queue_setup(struct vb2_queue *queue, 322 unsigned int *count, unsigned int *num_planes, 323 unsigned int sizes[], struct device *alloc_devs[]) 324 { 325 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 326 struct isp_video *video = vfh->video; 327 328 *num_planes = 1; 329 330 sizes[0] = vfh->format.fmt.pix.sizeimage; 331 if (sizes[0] == 0) 332 return -EINVAL; 333 334 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); 335 336 return 0; 337 } 338 339 static int isp_video_buffer_prepare(struct vb2_buffer *buf) 340 { 341 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 342 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 343 struct isp_buffer *buffer = to_isp_buffer(vbuf); 344 struct isp_video *video = vfh->video; 345 dma_addr_t addr; 346 347 /* Refuse to prepare the buffer is the video node has registered an 348 * error. We don't need to take any lock here as the operation is 349 * inherently racy. The authoritative check will be performed in the 350 * queue handler, which can't return an error, this check is just a best 351 * effort to notify userspace as early as possible. 352 */ 353 if (unlikely(video->error)) 354 return -EIO; 355 356 addr = vb2_dma_contig_plane_dma_addr(buf, 0); 357 if (!IS_ALIGNED(addr, 32)) { 358 dev_dbg(video->isp->dev, 359 "Buffer address must be aligned to 32 bytes boundary.\n"); 360 return -EINVAL; 361 } 362 363 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, 364 vfh->format.fmt.pix.sizeimage); 365 buffer->dma = addr; 366 367 return 0; 368 } 369 370 /* 371 * isp_video_buffer_queue - Add buffer to streaming queue 372 * @buf: Video buffer 373 * 374 * In memory-to-memory mode, start streaming on the pipeline if buffers are 375 * queued on both the input and the output, if the pipeline isn't already busy. 376 * If the pipeline is busy, it will be restarted in the output module interrupt 377 * handler. 378 */ 379 static void isp_video_buffer_queue(struct vb2_buffer *buf) 380 { 381 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 382 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 383 struct isp_buffer *buffer = to_isp_buffer(vbuf); 384 struct isp_video *video = vfh->video; 385 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 386 enum isp_pipeline_state state; 387 unsigned long flags; 388 unsigned int empty; 389 unsigned int start; 390 391 spin_lock_irqsave(&video->irqlock, flags); 392 393 if (unlikely(video->error)) { 394 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR); 395 spin_unlock_irqrestore(&video->irqlock, flags); 396 return; 397 } 398 399 empty = list_empty(&video->dmaqueue); 400 list_add_tail(&buffer->irqlist, &video->dmaqueue); 401 402 spin_unlock_irqrestore(&video->irqlock, flags); 403 404 if (empty) { 405 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 406 state = ISP_PIPELINE_QUEUE_OUTPUT; 407 else 408 state = ISP_PIPELINE_QUEUE_INPUT; 409 410 spin_lock_irqsave(&pipe->lock, flags); 411 pipe->state |= state; 412 video->ops->queue(video, buffer); 413 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 414 415 start = isp_pipeline_ready(pipe); 416 if (start) 417 pipe->state |= ISP_PIPELINE_STREAM; 418 spin_unlock_irqrestore(&pipe->lock, flags); 419 420 if (start) 421 omap3isp_pipeline_set_stream(pipe, 422 ISP_PIPELINE_STREAM_SINGLESHOT); 423 } 424 } 425 426 /* 427 * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 428 * @video: ISP video object 429 * @state: new state for the returned buffers 430 * 431 * Return all buffers queued on the video node to videobuf2 in the given state. 432 * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error 433 * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. 434 * 435 * The function must be called with the video irqlock held. 436 */ 437 static void omap3isp_video_return_buffers(struct isp_video *video, 438 enum vb2_buffer_state state) 439 { 440 while (!list_empty(&video->dmaqueue)) { 441 struct isp_buffer *buf; 442 443 buf = list_first_entry(&video->dmaqueue, 444 struct isp_buffer, irqlist); 445 list_del(&buf->irqlist); 446 vb2_buffer_done(&buf->vb.vb2_buf, state); 447 } 448 } 449 450 static int isp_video_start_streaming(struct vb2_queue *queue, 451 unsigned int count) 452 { 453 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 454 struct isp_video *video = vfh->video; 455 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 456 unsigned long flags; 457 int ret; 458 459 /* In sensor-to-memory mode, the stream can be started synchronously 460 * to the stream on command. In memory-to-memory mode, it will be 461 * started when buffers are queued on both the input and output. 462 */ 463 if (pipe->input) 464 return 0; 465 466 ret = omap3isp_pipeline_set_stream(pipe, 467 ISP_PIPELINE_STREAM_CONTINUOUS); 468 if (ret < 0) { 469 spin_lock_irqsave(&video->irqlock, flags); 470 omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); 471 spin_unlock_irqrestore(&video->irqlock, flags); 472 return ret; 473 } 474 475 spin_lock_irqsave(&video->irqlock, flags); 476 if (list_empty(&video->dmaqueue)) 477 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 478 spin_unlock_irqrestore(&video->irqlock, flags); 479 480 return 0; 481 } 482 483 static const struct vb2_ops isp_video_queue_ops = { 484 .queue_setup = isp_video_queue_setup, 485 .buf_prepare = isp_video_buffer_prepare, 486 .buf_queue = isp_video_buffer_queue, 487 .start_streaming = isp_video_start_streaming, 488 }; 489 490 /* 491 * omap3isp_video_buffer_next - Complete the current buffer and return the next 492 * @video: ISP video object 493 * 494 * Remove the current video buffer from the DMA queue and fill its timestamp and 495 * field count before handing it back to videobuf2. 496 * 497 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no 498 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. 499 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. 500 * 501 * The DMA queue is expected to contain at least one buffer. 502 * 503 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 504 * empty. 505 */ 506 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 507 { 508 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 509 enum vb2_buffer_state vb_state; 510 struct isp_buffer *buf; 511 unsigned long flags; 512 513 spin_lock_irqsave(&video->irqlock, flags); 514 if (WARN_ON(list_empty(&video->dmaqueue))) { 515 spin_unlock_irqrestore(&video->irqlock, flags); 516 return NULL; 517 } 518 519 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 520 irqlist); 521 list_del(&buf->irqlist); 522 spin_unlock_irqrestore(&video->irqlock, flags); 523 524 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 525 526 /* Do frame number propagation only if this is the output video node. 527 * Frame number either comes from the CSI receivers or it gets 528 * incremented here if H3A is not active. 529 * Note: There is no guarantee that the output buffer will finish 530 * first, so the input number might lag behind by 1 in some cases. 531 */ 532 if (video == pipe->output && !pipe->do_propagation) 533 buf->vb.sequence = 534 atomic_inc_return(&pipe->frame_number); 535 else 536 buf->vb.sequence = atomic_read(&pipe->frame_number); 537 538 if (pipe->field != V4L2_FIELD_NONE) 539 buf->vb.sequence /= 2; 540 541 buf->vb.field = pipe->field; 542 543 /* Report pipeline errors to userspace on the capture device side. */ 544 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 545 vb_state = VB2_BUF_STATE_ERROR; 546 pipe->error = false; 547 } else { 548 vb_state = VB2_BUF_STATE_DONE; 549 } 550 551 vb2_buffer_done(&buf->vb.vb2_buf, vb_state); 552 553 spin_lock_irqsave(&video->irqlock, flags); 554 555 if (list_empty(&video->dmaqueue)) { 556 enum isp_pipeline_state state; 557 558 spin_unlock_irqrestore(&video->irqlock, flags); 559 560 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 561 state = ISP_PIPELINE_QUEUE_OUTPUT 562 | ISP_PIPELINE_STREAM; 563 else 564 state = ISP_PIPELINE_QUEUE_INPUT 565 | ISP_PIPELINE_STREAM; 566 567 spin_lock_irqsave(&pipe->lock, flags); 568 pipe->state &= ~state; 569 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 570 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 571 spin_unlock_irqrestore(&pipe->lock, flags); 572 return NULL; 573 } 574 575 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 576 spin_lock(&pipe->lock); 577 pipe->state &= ~ISP_PIPELINE_STREAM; 578 spin_unlock(&pipe->lock); 579 } 580 581 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 582 irqlist); 583 584 spin_unlock_irqrestore(&video->irqlock, flags); 585 586 return buf; 587 } 588 589 /* 590 * omap3isp_video_cancel_stream - Cancel stream on a video node 591 * @video: ISP video object 592 * 593 * Cancelling a stream returns all buffers queued on the video node to videobuf2 594 * in the erroneous state and makes sure no new buffer can be queued. 595 */ 596 void omap3isp_video_cancel_stream(struct isp_video *video) 597 { 598 unsigned long flags; 599 600 spin_lock_irqsave(&video->irqlock, flags); 601 omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); 602 video->error = true; 603 spin_unlock_irqrestore(&video->irqlock, flags); 604 } 605 606 /* 607 * omap3isp_video_resume - Perform resume operation on the buffers 608 * @video: ISP video object 609 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 610 * 611 * This function is intended to be used on suspend/resume scenario. It 612 * requests video queue layer to discard buffers marked as DONE if it's in 613 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 614 * if there's any. 615 */ 616 void omap3isp_video_resume(struct isp_video *video, int continuous) 617 { 618 struct isp_buffer *buf = NULL; 619 620 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 621 mutex_lock(&video->queue_lock); 622 vb2_discard_done(video->queue); 623 mutex_unlock(&video->queue_lock); 624 } 625 626 if (!list_empty(&video->dmaqueue)) { 627 buf = list_first_entry(&video->dmaqueue, 628 struct isp_buffer, irqlist); 629 video->ops->queue(video, buf); 630 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 631 } else { 632 if (continuous) 633 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 634 } 635 } 636 637 /* ----------------------------------------------------------------------------- 638 * V4L2 ioctls 639 */ 640 641 static int 642 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 643 { 644 struct isp_video *video = video_drvdata(file); 645 646 strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 647 strscpy(cap->card, video->video.name, sizeof(cap->card)); 648 strscpy(cap->bus_info, "media", sizeof(cap->bus_info)); 649 650 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 651 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; 652 653 654 return 0; 655 } 656 657 static int 658 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 659 { 660 struct isp_video_fh *vfh = to_isp_video_fh(fh); 661 struct isp_video *video = video_drvdata(file); 662 663 if (format->type != video->type) 664 return -EINVAL; 665 666 mutex_lock(&video->mutex); 667 *format = vfh->format; 668 mutex_unlock(&video->mutex); 669 670 return 0; 671 } 672 673 static int 674 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 675 { 676 struct isp_video_fh *vfh = to_isp_video_fh(fh); 677 struct isp_video *video = video_drvdata(file); 678 struct v4l2_mbus_framefmt fmt; 679 680 if (format->type != video->type) 681 return -EINVAL; 682 683 /* Replace unsupported field orders with sane defaults. */ 684 switch (format->fmt.pix.field) { 685 case V4L2_FIELD_NONE: 686 /* Progressive is supported everywhere. */ 687 break; 688 case V4L2_FIELD_ALTERNATE: 689 /* ALTERNATE is not supported on output nodes. */ 690 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 691 format->fmt.pix.field = V4L2_FIELD_NONE; 692 break; 693 case V4L2_FIELD_INTERLACED: 694 /* The ISP has no concept of video standard, select the 695 * top-bottom order when the unqualified interlaced order is 696 * requested. 697 */ 698 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; 699 fallthrough; 700 case V4L2_FIELD_INTERLACED_TB: 701 case V4L2_FIELD_INTERLACED_BT: 702 /* Interlaced orders are only supported at the CCDC output. */ 703 if (video != &video->isp->isp_ccdc.video_out) 704 format->fmt.pix.field = V4L2_FIELD_NONE; 705 break; 706 case V4L2_FIELD_TOP: 707 case V4L2_FIELD_BOTTOM: 708 case V4L2_FIELD_SEQ_TB: 709 case V4L2_FIELD_SEQ_BT: 710 default: 711 /* All other field orders are currently unsupported, default to 712 * progressive. 713 */ 714 format->fmt.pix.field = V4L2_FIELD_NONE; 715 break; 716 } 717 718 /* Fill the bytesperline and sizeimage fields by converting to media bus 719 * format and back to pixel format. 720 */ 721 isp_video_pix_to_mbus(&format->fmt.pix, &fmt); 722 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); 723 724 mutex_lock(&video->mutex); 725 vfh->format = *format; 726 mutex_unlock(&video->mutex); 727 728 return 0; 729 } 730 731 static int 732 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 733 { 734 struct isp_video *video = video_drvdata(file); 735 struct v4l2_subdev_format fmt = { 736 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 737 }; 738 struct v4l2_subdev *subdev; 739 u32 pad; 740 int ret; 741 742 if (format->type != video->type) 743 return -EINVAL; 744 745 subdev = isp_video_remote_subdev(video, &pad); 746 if (subdev == NULL) 747 return -EINVAL; 748 749 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 750 751 fmt.pad = pad; 752 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 753 if (ret) 754 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 755 756 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 757 return 0; 758 } 759 760 static int 761 isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel) 762 { 763 struct isp_video *video = video_drvdata(file); 764 struct v4l2_subdev_format format = { 765 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 766 }; 767 struct v4l2_subdev *subdev; 768 struct v4l2_subdev_selection sdsel = { 769 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 770 .target = sel->target, 771 }; 772 u32 pad; 773 int ret; 774 775 switch (sel->target) { 776 case V4L2_SEL_TGT_CROP: 777 case V4L2_SEL_TGT_CROP_BOUNDS: 778 case V4L2_SEL_TGT_CROP_DEFAULT: 779 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 780 return -EINVAL; 781 break; 782 case V4L2_SEL_TGT_COMPOSE: 783 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 784 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 785 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 786 return -EINVAL; 787 break; 788 default: 789 return -EINVAL; 790 } 791 subdev = isp_video_remote_subdev(video, &pad); 792 if (subdev == NULL) 793 return -EINVAL; 794 795 /* Try the get selection operation first and fallback to get format if not 796 * implemented. 797 */ 798 sdsel.pad = pad; 799 ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); 800 if (!ret) 801 sel->r = sdsel.r; 802 if (ret != -ENOIOCTLCMD) 803 return ret; 804 805 format.pad = pad; 806 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 807 if (ret < 0) 808 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 809 810 sel->r.left = 0; 811 sel->r.top = 0; 812 sel->r.width = format.format.width; 813 sel->r.height = format.format.height; 814 815 return 0; 816 } 817 818 static int 819 isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel) 820 { 821 struct isp_video *video = video_drvdata(file); 822 struct v4l2_subdev *subdev; 823 struct v4l2_subdev_selection sdsel = { 824 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 825 .target = sel->target, 826 .flags = sel->flags, 827 .r = sel->r, 828 }; 829 u32 pad; 830 int ret; 831 832 switch (sel->target) { 833 case V4L2_SEL_TGT_CROP: 834 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 835 return -EINVAL; 836 break; 837 case V4L2_SEL_TGT_COMPOSE: 838 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 839 return -EINVAL; 840 break; 841 default: 842 return -EINVAL; 843 } 844 subdev = isp_video_remote_subdev(video, &pad); 845 if (subdev == NULL) 846 return -EINVAL; 847 848 sdsel.pad = pad; 849 mutex_lock(&video->mutex); 850 ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel); 851 mutex_unlock(&video->mutex); 852 if (!ret) 853 sel->r = sdsel.r; 854 855 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 856 } 857 858 static int 859 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 860 { 861 struct isp_video_fh *vfh = to_isp_video_fh(fh); 862 struct isp_video *video = video_drvdata(file); 863 864 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 865 video->type != a->type) 866 return -EINVAL; 867 868 memset(a, 0, sizeof(*a)); 869 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 870 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 871 a->parm.output.timeperframe = vfh->timeperframe; 872 873 return 0; 874 } 875 876 static int 877 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 878 { 879 struct isp_video_fh *vfh = to_isp_video_fh(fh); 880 struct isp_video *video = video_drvdata(file); 881 882 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 883 video->type != a->type) 884 return -EINVAL; 885 886 if (a->parm.output.timeperframe.denominator == 0) 887 a->parm.output.timeperframe.denominator = 1; 888 889 vfh->timeperframe = a->parm.output.timeperframe; 890 891 return 0; 892 } 893 894 static int 895 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 896 { 897 struct isp_video_fh *vfh = to_isp_video_fh(fh); 898 struct isp_video *video = video_drvdata(file); 899 int ret; 900 901 mutex_lock(&video->queue_lock); 902 ret = vb2_reqbufs(&vfh->queue, rb); 903 mutex_unlock(&video->queue_lock); 904 905 return ret; 906 } 907 908 static int 909 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 910 { 911 struct isp_video_fh *vfh = to_isp_video_fh(fh); 912 struct isp_video *video = video_drvdata(file); 913 int ret; 914 915 mutex_lock(&video->queue_lock); 916 ret = vb2_querybuf(&vfh->queue, b); 917 mutex_unlock(&video->queue_lock); 918 919 return ret; 920 } 921 922 static int 923 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 924 { 925 struct isp_video_fh *vfh = to_isp_video_fh(fh); 926 struct isp_video *video = video_drvdata(file); 927 int ret; 928 929 mutex_lock(&video->queue_lock); 930 ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 931 mutex_unlock(&video->queue_lock); 932 933 return ret; 934 } 935 936 static int 937 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 938 { 939 struct isp_video_fh *vfh = to_isp_video_fh(fh); 940 struct isp_video *video = video_drvdata(file); 941 int ret; 942 943 mutex_lock(&video->queue_lock); 944 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); 945 mutex_unlock(&video->queue_lock); 946 947 return ret; 948 } 949 950 static int isp_video_check_external_subdevs(struct isp_video *video, 951 struct isp_pipeline *pipe) 952 { 953 struct isp_device *isp = video->isp; 954 struct media_entity *ents[] = { 955 &isp->isp_csi2a.subdev.entity, 956 &isp->isp_csi2c.subdev.entity, 957 &isp->isp_ccp2.subdev.entity, 958 &isp->isp_ccdc.subdev.entity 959 }; 960 struct media_pad *source_pad; 961 struct media_entity *source = NULL; 962 struct media_entity *sink; 963 struct v4l2_subdev_format fmt = { 964 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 965 }; 966 struct v4l2_ext_controls ctrls; 967 struct v4l2_ext_control ctrl; 968 unsigned int i; 969 int ret; 970 971 /* Memory-to-memory pipelines have no external subdev. */ 972 if (pipe->input != NULL) 973 return 0; 974 975 for (i = 0; i < ARRAY_SIZE(ents); i++) { 976 /* Is the entity part of the pipeline? */ 977 if (!media_entity_enum_test(&pipe->ent_enum, ents[i])) 978 continue; 979 980 /* ISP entities have always sink pad == 0. Find source. */ 981 source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]); 982 if (source_pad == NULL) 983 continue; 984 985 source = source_pad->entity; 986 sink = ents[i]; 987 break; 988 } 989 990 if (!source) { 991 dev_warn(isp->dev, "can't find source, failing now\n"); 992 return -EINVAL; 993 } 994 995 if (!is_media_entity_v4l2_subdev(source)) 996 return 0; 997 998 pipe->external = media_entity_to_v4l2_subdev(source); 999 1000 fmt.pad = source_pad->index; 1001 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), 1002 pad, get_fmt, NULL, &fmt); 1003 if (unlikely(ret < 0)) { 1004 dev_warn(isp->dev, "get_fmt returned null!\n"); 1005 return ret; 1006 } 1007 1008 pipe->external_width = 1009 omap3isp_video_format_info(fmt.format.code)->width; 1010 1011 memset(&ctrls, 0, sizeof(ctrls)); 1012 memset(&ctrl, 0, sizeof(ctrl)); 1013 1014 ctrl.id = V4L2_CID_PIXEL_RATE; 1015 1016 ctrls.count = 1; 1017 ctrls.controls = &ctrl; 1018 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video, 1019 NULL, &ctrls); 1020 if (ret < 0) { 1021 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 1022 pipe->external->name); 1023 return ret; 1024 } 1025 1026 pipe->external_rate = ctrl.value64; 1027 1028 if (media_entity_enum_test(&pipe->ent_enum, 1029 &isp->isp_ccdc.subdev.entity)) { 1030 unsigned int rate = UINT_MAX; 1031 /* 1032 * Check that maximum allowed CCDC pixel rate isn't 1033 * exceeded by the pixel rate. 1034 */ 1035 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 1036 if (pipe->external_rate > rate) 1037 return -ENOSPC; 1038 } 1039 1040 return 0; 1041 } 1042 1043 /* 1044 * Stream management 1045 * 1046 * Every ISP pipeline has a single input and a single output. The input can be 1047 * either a sensor or a video node. The output is always a video node. 1048 * 1049 * As every pipeline has an output video node, the ISP video objects at the 1050 * pipeline output stores the pipeline state. It tracks the streaming state of 1051 * both the input and output, as well as the availability of buffers. 1052 * 1053 * In sensor-to-memory mode, frames are always available at the pipeline input. 1054 * Starting the sensor usually requires I2C transfers and must be done in 1055 * interruptible context. The pipeline is started and stopped synchronously 1056 * to the stream on/off commands. All modules in the pipeline will get their 1057 * subdev set stream handler called. The module at the end of the pipeline must 1058 * delay starting the hardware until buffers are available at its output. 1059 * 1060 * In memory-to-memory mode, starting/stopping the stream requires 1061 * synchronization between the input and output. ISP modules can't be stopped 1062 * in the middle of a frame, and at least some of the modules seem to become 1063 * busy as soon as they're started, even if they don't receive a frame start 1064 * event. For that reason frames need to be processed in single-shot mode. The 1065 * driver needs to wait until a frame is completely processed and written to 1066 * memory before restarting the pipeline for the next frame. Pipelined 1067 * processing might be possible but requires more testing. 1068 * 1069 * Stream start must be delayed until buffers are available at both the input 1070 * and output. The pipeline must be started in the vb2 queue callback with 1071 * the buffers queue spinlock held. The modules subdev set stream operation must 1072 * not sleep. 1073 */ 1074 static int 1075 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 1076 { 1077 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1078 struct isp_video *video = video_drvdata(file); 1079 enum isp_pipeline_state state; 1080 struct isp_pipeline *pipe; 1081 unsigned long flags; 1082 int ret; 1083 1084 if (type != video->type) 1085 return -EINVAL; 1086 1087 mutex_lock(&video->stream_lock); 1088 1089 /* Start streaming on the pipeline. No link touching an entity in the 1090 * pipeline can be activated or deactivated once streaming is started. 1091 */ 1092 pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; 1093 1094 ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); 1095 if (ret) 1096 goto err_enum_init; 1097 1098 /* TODO: Implement PM QoS */ 1099 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1100 pipe->max_rate = pipe->l3_ick; 1101 1102 ret = video_device_pipeline_start(&video->video, &pipe->pipe); 1103 if (ret < 0) 1104 goto err_pipeline_start; 1105 1106 /* Verify that the currently configured format matches the output of 1107 * the connected subdev. 1108 */ 1109 ret = isp_video_check_format(video, vfh); 1110 if (ret < 0) 1111 goto err_check_format; 1112 1113 video->bpl_padding = ret; 1114 video->bpl_value = vfh->format.fmt.pix.bytesperline; 1115 1116 ret = isp_video_get_graph_data(video, pipe); 1117 if (ret < 0) 1118 goto err_check_format; 1119 1120 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1121 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 1122 else 1123 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1124 1125 ret = isp_video_check_external_subdevs(video, pipe); 1126 if (ret < 0) 1127 goto err_check_format; 1128 1129 pipe->error = false; 1130 1131 spin_lock_irqsave(&pipe->lock, flags); 1132 pipe->state &= ~ISP_PIPELINE_STREAM; 1133 pipe->state |= state; 1134 spin_unlock_irqrestore(&pipe->lock, flags); 1135 1136 /* Set the maximum time per frame as the value requested by userspace. 1137 * This is a soft limit that can be overridden if the hardware doesn't 1138 * support the request limit. 1139 */ 1140 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1141 pipe->max_timeperframe = vfh->timeperframe; 1142 1143 video->queue = &vfh->queue; 1144 INIT_LIST_HEAD(&video->dmaqueue); 1145 atomic_set(&pipe->frame_number, -1); 1146 pipe->field = vfh->format.fmt.pix.field; 1147 1148 mutex_lock(&video->queue_lock); 1149 ret = vb2_streamon(&vfh->queue, type); 1150 mutex_unlock(&video->queue_lock); 1151 if (ret < 0) 1152 goto err_check_format; 1153 1154 mutex_unlock(&video->stream_lock); 1155 1156 return 0; 1157 1158 err_check_format: 1159 video_device_pipeline_stop(&video->video); 1160 err_pipeline_start: 1161 /* TODO: Implement PM QoS */ 1162 /* The DMA queue must be emptied here, otherwise CCDC interrupts that 1163 * will get triggered the next time the CCDC is powered up will try to 1164 * access buffers that might have been freed but still present in the 1165 * DMA queue. This can easily get triggered if the above 1166 * omap3isp_pipeline_set_stream() call fails on a system with a 1167 * free-running sensor. 1168 */ 1169 INIT_LIST_HEAD(&video->dmaqueue); 1170 video->queue = NULL; 1171 1172 media_entity_enum_cleanup(&pipe->ent_enum); 1173 1174 err_enum_init: 1175 mutex_unlock(&video->stream_lock); 1176 1177 return ret; 1178 } 1179 1180 static int 1181 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1182 { 1183 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1184 struct isp_video *video = video_drvdata(file); 1185 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1186 enum isp_pipeline_state state; 1187 unsigned int streaming; 1188 unsigned long flags; 1189 1190 if (type != video->type) 1191 return -EINVAL; 1192 1193 mutex_lock(&video->stream_lock); 1194 1195 /* Make sure we're not streaming yet. */ 1196 mutex_lock(&video->queue_lock); 1197 streaming = vb2_is_streaming(&vfh->queue); 1198 mutex_unlock(&video->queue_lock); 1199 1200 if (!streaming) 1201 goto done; 1202 1203 /* Update the pipeline state. */ 1204 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1205 state = ISP_PIPELINE_STREAM_OUTPUT 1206 | ISP_PIPELINE_QUEUE_OUTPUT; 1207 else 1208 state = ISP_PIPELINE_STREAM_INPUT 1209 | ISP_PIPELINE_QUEUE_INPUT; 1210 1211 spin_lock_irqsave(&pipe->lock, flags); 1212 pipe->state &= ~state; 1213 spin_unlock_irqrestore(&pipe->lock, flags); 1214 1215 /* Stop the stream. */ 1216 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1217 omap3isp_video_cancel_stream(video); 1218 1219 mutex_lock(&video->queue_lock); 1220 vb2_streamoff(&vfh->queue, type); 1221 mutex_unlock(&video->queue_lock); 1222 video->queue = NULL; 1223 video->error = false; 1224 1225 /* TODO: Implement PM QoS */ 1226 video_device_pipeline_stop(&video->video); 1227 1228 media_entity_enum_cleanup(&pipe->ent_enum); 1229 1230 done: 1231 mutex_unlock(&video->stream_lock); 1232 return 0; 1233 } 1234 1235 static int 1236 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1237 { 1238 if (input->index > 0) 1239 return -EINVAL; 1240 1241 strscpy(input->name, "camera", sizeof(input->name)); 1242 input->type = V4L2_INPUT_TYPE_CAMERA; 1243 1244 return 0; 1245 } 1246 1247 static int 1248 isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1249 { 1250 *input = 0; 1251 1252 return 0; 1253 } 1254 1255 static int 1256 isp_video_s_input(struct file *file, void *fh, unsigned int input) 1257 { 1258 return input == 0 ? 0 : -EINVAL; 1259 } 1260 1261 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1262 .vidioc_querycap = isp_video_querycap, 1263 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1264 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1265 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1266 .vidioc_g_fmt_vid_out = isp_video_get_format, 1267 .vidioc_s_fmt_vid_out = isp_video_set_format, 1268 .vidioc_try_fmt_vid_out = isp_video_try_format, 1269 .vidioc_g_selection = isp_video_get_selection, 1270 .vidioc_s_selection = isp_video_set_selection, 1271 .vidioc_g_parm = isp_video_get_param, 1272 .vidioc_s_parm = isp_video_set_param, 1273 .vidioc_reqbufs = isp_video_reqbufs, 1274 .vidioc_querybuf = isp_video_querybuf, 1275 .vidioc_qbuf = isp_video_qbuf, 1276 .vidioc_dqbuf = isp_video_dqbuf, 1277 .vidioc_streamon = isp_video_streamon, 1278 .vidioc_streamoff = isp_video_streamoff, 1279 .vidioc_enum_input = isp_video_enum_input, 1280 .vidioc_g_input = isp_video_g_input, 1281 .vidioc_s_input = isp_video_s_input, 1282 }; 1283 1284 /* ----------------------------------------------------------------------------- 1285 * V4L2 file operations 1286 */ 1287 1288 static int isp_video_open(struct file *file) 1289 { 1290 struct isp_video *video = video_drvdata(file); 1291 struct isp_video_fh *handle; 1292 struct vb2_queue *queue; 1293 int ret = 0; 1294 1295 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1296 if (handle == NULL) 1297 return -ENOMEM; 1298 1299 v4l2_fh_init(&handle->vfh, &video->video); 1300 v4l2_fh_add(&handle->vfh); 1301 1302 /* If this is the first user, initialise the pipeline. */ 1303 if (omap3isp_get(video->isp) == NULL) { 1304 ret = -EBUSY; 1305 goto done; 1306 } 1307 1308 ret = v4l2_pipeline_pm_get(&video->video.entity); 1309 if (ret < 0) { 1310 omap3isp_put(video->isp); 1311 goto done; 1312 } 1313 1314 queue = &handle->queue; 1315 queue->type = video->type; 1316 queue->io_modes = VB2_MMAP | VB2_USERPTR; 1317 queue->drv_priv = handle; 1318 queue->ops = &isp_video_queue_ops; 1319 queue->mem_ops = &vb2_dma_contig_memops; 1320 queue->buf_struct_size = sizeof(struct isp_buffer); 1321 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1322 queue->dev = video->isp->dev; 1323 1324 ret = vb2_queue_init(&handle->queue); 1325 if (ret < 0) { 1326 omap3isp_put(video->isp); 1327 goto done; 1328 } 1329 1330 memset(&handle->format, 0, sizeof(handle->format)); 1331 handle->format.type = video->type; 1332 handle->timeperframe.denominator = 1; 1333 1334 handle->video = video; 1335 file->private_data = &handle->vfh; 1336 1337 done: 1338 if (ret < 0) { 1339 v4l2_fh_del(&handle->vfh); 1340 v4l2_fh_exit(&handle->vfh); 1341 kfree(handle); 1342 } 1343 1344 return ret; 1345 } 1346 1347 static int isp_video_release(struct file *file) 1348 { 1349 struct isp_video *video = video_drvdata(file); 1350 struct v4l2_fh *vfh = file->private_data; 1351 struct isp_video_fh *handle = to_isp_video_fh(vfh); 1352 1353 /* Disable streaming and free the buffers queue resources. */ 1354 isp_video_streamoff(file, vfh, video->type); 1355 1356 mutex_lock(&video->queue_lock); 1357 vb2_queue_release(&handle->queue); 1358 mutex_unlock(&video->queue_lock); 1359 1360 v4l2_pipeline_pm_put(&video->video.entity); 1361 1362 /* Release the file handle. */ 1363 v4l2_fh_del(vfh); 1364 v4l2_fh_exit(vfh); 1365 kfree(handle); 1366 file->private_data = NULL; 1367 1368 omap3isp_put(video->isp); 1369 1370 return 0; 1371 } 1372 1373 static __poll_t isp_video_poll(struct file *file, poll_table *wait) 1374 { 1375 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1376 struct isp_video *video = video_drvdata(file); 1377 __poll_t ret; 1378 1379 mutex_lock(&video->queue_lock); 1380 ret = vb2_poll(&vfh->queue, file, wait); 1381 mutex_unlock(&video->queue_lock); 1382 1383 return ret; 1384 } 1385 1386 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1387 { 1388 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1389 1390 return vb2_mmap(&vfh->queue, vma); 1391 } 1392 1393 static const struct v4l2_file_operations isp_video_fops = { 1394 .owner = THIS_MODULE, 1395 .unlocked_ioctl = video_ioctl2, 1396 .open = isp_video_open, 1397 .release = isp_video_release, 1398 .poll = isp_video_poll, 1399 .mmap = isp_video_mmap, 1400 }; 1401 1402 /* ----------------------------------------------------------------------------- 1403 * ISP video core 1404 */ 1405 1406 static const struct isp_video_operations isp_video_dummy_ops = { 1407 }; 1408 1409 int omap3isp_video_init(struct isp_video *video, const char *name) 1410 { 1411 const char *direction; 1412 int ret; 1413 1414 switch (video->type) { 1415 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1416 direction = "output"; 1417 video->pad.flags = MEDIA_PAD_FL_SINK 1418 | MEDIA_PAD_FL_MUST_CONNECT; 1419 break; 1420 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1421 direction = "input"; 1422 video->pad.flags = MEDIA_PAD_FL_SOURCE 1423 | MEDIA_PAD_FL_MUST_CONNECT; 1424 video->video.vfl_dir = VFL_DIR_TX; 1425 break; 1426 1427 default: 1428 return -EINVAL; 1429 } 1430 1431 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); 1432 if (ret < 0) 1433 return ret; 1434 1435 mutex_init(&video->mutex); 1436 atomic_set(&video->active, 0); 1437 1438 spin_lock_init(&video->pipe.lock); 1439 mutex_init(&video->stream_lock); 1440 mutex_init(&video->queue_lock); 1441 spin_lock_init(&video->irqlock); 1442 1443 /* Initialize the video device. */ 1444 if (video->ops == NULL) 1445 video->ops = &isp_video_dummy_ops; 1446 1447 video->video.fops = &isp_video_fops; 1448 snprintf(video->video.name, sizeof(video->video.name), 1449 "OMAP3 ISP %s %s", name, direction); 1450 video->video.vfl_type = VFL_TYPE_VIDEO; 1451 video->video.release = video_device_release_empty; 1452 video->video.ioctl_ops = &isp_video_ioctl_ops; 1453 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1454 video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE 1455 | V4L2_CAP_STREAMING; 1456 else 1457 video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT 1458 | V4L2_CAP_STREAMING; 1459 1460 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1461 1462 video_set_drvdata(&video->video, video); 1463 1464 return 0; 1465 } 1466 1467 void omap3isp_video_cleanup(struct isp_video *video) 1468 { 1469 media_entity_cleanup(&video->video.entity); 1470 mutex_destroy(&video->queue_lock); 1471 mutex_destroy(&video->stream_lock); 1472 mutex_destroy(&video->mutex); 1473 } 1474 1475 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1476 { 1477 int ret; 1478 1479 video->video.v4l2_dev = vdev; 1480 1481 ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); 1482 if (ret < 0) 1483 dev_err(video->isp->dev, 1484 "%s: could not register video device (%d)\n", 1485 __func__, ret); 1486 1487 return ret; 1488 } 1489 1490 void omap3isp_video_unregister(struct isp_video *video) 1491 { 1492 video_unregister_device(&video->video); 1493 } 1494