1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ispvideo.c 4 * 5 * TI OMAP3 ISP - Generic video node 6 * 7 * Copyright (C) 2009-2010 Nokia Corporation 8 * 9 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 * Sakari Ailus <sakari.ailus@iki.fi> 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/pagemap.h> 17 #include <linux/scatterlist.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-ioctl.h> 24 #include <media/v4l2-mc.h> 25 #include <media/videobuf2-dma-contig.h> 26 27 #include "ispvideo.h" 28 #include "isp.h" 29 30 31 /* ----------------------------------------------------------------------------- 32 * Helper functions 33 */ 34 35 /* 36 * NOTE: When adding new media bus codes, always remember to add 37 * corresponding in-memory formats to the table below!!! 38 */ 39 static struct isp_format_info formats[] = { 40 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 41 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 42 V4L2_PIX_FMT_GREY, 8, 1, }, 43 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, 44 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, 45 V4L2_PIX_FMT_Y10, 10, 2, }, 46 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, 47 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, 48 V4L2_PIX_FMT_Y12, 12, 2, }, 49 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 50 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 51 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 52 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 53 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 54 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 55 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 56 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 57 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 58 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 59 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 60 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 61 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 62 MEDIA_BUS_FMT_SBGGR10_1X10, 0, 63 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 64 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 65 MEDIA_BUS_FMT_SGBRG10_1X10, 0, 66 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 67 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 68 MEDIA_BUS_FMT_SGRBG10_1X10, 0, 69 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 70 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 71 MEDIA_BUS_FMT_SRGGB10_1X10, 0, 72 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 73 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, 74 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, 75 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 76 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, 77 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, 78 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 79 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, 80 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, 81 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 82 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 83 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, 84 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 85 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, 86 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, 87 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 88 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, 89 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, 90 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 91 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, 92 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, 93 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 94 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, 95 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, 96 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 97 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 98 MEDIA_BUS_FMT_UYVY8_1X16, 0, 99 V4L2_PIX_FMT_UYVY, 16, 2, }, 100 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 101 MEDIA_BUS_FMT_YUYV8_1X16, 0, 102 V4L2_PIX_FMT_YUYV, 16, 2, }, 103 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 104 MEDIA_BUS_FMT_UYVY8_2X8, 0, 105 V4L2_PIX_FMT_UYVY, 8, 2, }, 106 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 107 MEDIA_BUS_FMT_YUYV8_2X8, 0, 108 V4L2_PIX_FMT_YUYV, 8, 2, }, 109 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 110 * module and avoid NULL pointer dereferences. 111 */ 112 { 0, } 113 }; 114 115 const struct isp_format_info *omap3isp_video_format_info(u32 code) 116 { 117 unsigned int i; 118 119 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 120 if (formats[i].code == code) 121 return &formats[i]; 122 } 123 124 return NULL; 125 } 126 127 /* 128 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 129 * @video: ISP video instance 130 * @mbus: v4l2_mbus_framefmt format (input) 131 * @pix: v4l2_pix_format format (output) 132 * 133 * Fill the output pix structure with information from the input mbus format. 134 * The bytesperline and sizeimage fields are computed from the requested bytes 135 * per line value in the pix format and information from the video instance. 136 * 137 * Return the number of padding bytes at end of line. 138 */ 139 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 140 const struct v4l2_mbus_framefmt *mbus, 141 struct v4l2_pix_format *pix) 142 { 143 unsigned int bpl = pix->bytesperline; 144 unsigned int min_bpl; 145 unsigned int i; 146 147 memset(pix, 0, sizeof(*pix)); 148 pix->width = mbus->width; 149 pix->height = mbus->height; 150 151 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 152 if (formats[i].code == mbus->code) 153 break; 154 } 155 156 if (WARN_ON(i == ARRAY_SIZE(formats))) 157 return 0; 158 159 min_bpl = pix->width * formats[i].bpp; 160 161 /* Clamp the requested bytes per line value. If the maximum bytes per 162 * line value is zero, the module doesn't support user configurable line 163 * sizes. Override the requested value with the minimum in that case. 164 */ 165 if (video->bpl_max) 166 bpl = clamp(bpl, min_bpl, video->bpl_max); 167 else 168 bpl = min_bpl; 169 170 if (!video->bpl_zero_padding || bpl != min_bpl) 171 bpl = ALIGN(bpl, video->bpl_alignment); 172 173 pix->pixelformat = formats[i].pixelformat; 174 pix->bytesperline = bpl; 175 pix->sizeimage = pix->bytesperline * pix->height; 176 pix->colorspace = mbus->colorspace; 177 pix->field = mbus->field; 178 179 return bpl - min_bpl; 180 } 181 182 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 183 struct v4l2_mbus_framefmt *mbus) 184 { 185 unsigned int i; 186 187 memset(mbus, 0, sizeof(*mbus)); 188 mbus->width = pix->width; 189 mbus->height = pix->height; 190 191 /* Skip the last format in the loop so that it will be selected if no 192 * match is found. 193 */ 194 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { 195 if (formats[i].pixelformat == pix->pixelformat) 196 break; 197 } 198 199 mbus->code = formats[i].code; 200 mbus->colorspace = pix->colorspace; 201 mbus->field = pix->field; 202 } 203 204 static struct v4l2_subdev * 205 isp_video_remote_subdev(struct isp_video *video, u32 *pad) 206 { 207 struct media_pad *remote; 208 209 remote = media_pad_remote_pad_first(&video->pad); 210 211 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 212 return NULL; 213 214 if (pad) 215 *pad = remote->index; 216 217 return media_entity_to_v4l2_subdev(remote->entity); 218 } 219 220 /* Return a pointer to the ISP video instance at the far end of the pipeline. */ 221 static int isp_video_get_graph_data(struct isp_video *video, 222 struct isp_pipeline *pipe) 223 { 224 struct media_graph graph; 225 struct media_entity *entity = &video->video.entity; 226 struct media_device *mdev = entity->graph_obj.mdev; 227 struct isp_video *far_end = NULL; 228 int ret; 229 230 mutex_lock(&mdev->graph_mutex); 231 ret = media_graph_walk_init(&graph, mdev); 232 if (ret) { 233 mutex_unlock(&mdev->graph_mutex); 234 return ret; 235 } 236 237 media_graph_walk_start(&graph, entity); 238 239 while ((entity = media_graph_walk_next(&graph))) { 240 struct isp_video *__video; 241 242 media_entity_enum_set(&pipe->ent_enum, entity); 243 244 if (far_end != NULL) 245 continue; 246 247 if (entity == &video->video.entity) 248 continue; 249 250 if (!is_media_entity_v4l2_video_device(entity)) 251 continue; 252 253 __video = to_isp_video(media_entity_to_video_device(entity)); 254 if (__video->type != video->type) 255 far_end = __video; 256 } 257 258 mutex_unlock(&mdev->graph_mutex); 259 260 media_graph_walk_cleanup(&graph); 261 262 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 263 pipe->input = far_end; 264 pipe->output = video; 265 } else { 266 if (far_end == NULL) 267 return -EPIPE; 268 269 pipe->input = video; 270 pipe->output = far_end; 271 } 272 273 return 0; 274 } 275 276 static int 277 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 278 { 279 struct v4l2_subdev_format fmt; 280 struct v4l2_subdev *subdev; 281 u32 pad; 282 int ret; 283 284 subdev = isp_video_remote_subdev(video, &pad); 285 if (subdev == NULL) 286 return -EINVAL; 287 288 fmt.pad = pad; 289 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 290 291 mutex_lock(&video->mutex); 292 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 293 mutex_unlock(&video->mutex); 294 295 if (ret) 296 return ret; 297 298 format->type = video->type; 299 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 300 } 301 302 static int 303 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 304 { 305 struct v4l2_format format; 306 int ret; 307 308 memcpy(&format, &vfh->format, sizeof(format)); 309 ret = __isp_video_get_format(video, &format); 310 if (ret < 0) 311 return ret; 312 313 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 314 vfh->format.fmt.pix.height != format.fmt.pix.height || 315 vfh->format.fmt.pix.width != format.fmt.pix.width || 316 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 317 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || 318 vfh->format.fmt.pix.field != format.fmt.pix.field) 319 return -EINVAL; 320 321 return 0; 322 } 323 324 /* ----------------------------------------------------------------------------- 325 * Video queue operations 326 */ 327 328 static int isp_video_queue_setup(struct vb2_queue *queue, 329 unsigned int *count, unsigned int *num_planes, 330 unsigned int sizes[], struct device *alloc_devs[]) 331 { 332 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 333 struct isp_video *video = vfh->video; 334 335 *num_planes = 1; 336 337 sizes[0] = vfh->format.fmt.pix.sizeimage; 338 if (sizes[0] == 0) 339 return -EINVAL; 340 341 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); 342 343 return 0; 344 } 345 346 static int isp_video_buffer_prepare(struct vb2_buffer *buf) 347 { 348 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 349 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 350 struct isp_buffer *buffer = to_isp_buffer(vbuf); 351 struct isp_video *video = vfh->video; 352 dma_addr_t addr; 353 354 /* Refuse to prepare the buffer is the video node has registered an 355 * error. We don't need to take any lock here as the operation is 356 * inherently racy. The authoritative check will be performed in the 357 * queue handler, which can't return an error, this check is just a best 358 * effort to notify userspace as early as possible. 359 */ 360 if (unlikely(video->error)) 361 return -EIO; 362 363 addr = vb2_dma_contig_plane_dma_addr(buf, 0); 364 if (!IS_ALIGNED(addr, 32)) { 365 dev_dbg(video->isp->dev, 366 "Buffer address must be aligned to 32 bytes boundary.\n"); 367 return -EINVAL; 368 } 369 370 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, 371 vfh->format.fmt.pix.sizeimage); 372 buffer->dma = addr; 373 374 return 0; 375 } 376 377 /* 378 * isp_video_buffer_queue - Add buffer to streaming queue 379 * @buf: Video buffer 380 * 381 * In memory-to-memory mode, start streaming on the pipeline if buffers are 382 * queued on both the input and the output, if the pipeline isn't already busy. 383 * If the pipeline is busy, it will be restarted in the output module interrupt 384 * handler. 385 */ 386 static void isp_video_buffer_queue(struct vb2_buffer *buf) 387 { 388 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 389 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 390 struct isp_buffer *buffer = to_isp_buffer(vbuf); 391 struct isp_video *video = vfh->video; 392 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 393 enum isp_pipeline_state state; 394 unsigned long flags; 395 unsigned int empty; 396 unsigned int start; 397 398 spin_lock_irqsave(&video->irqlock, flags); 399 400 if (unlikely(video->error)) { 401 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR); 402 spin_unlock_irqrestore(&video->irqlock, flags); 403 return; 404 } 405 406 empty = list_empty(&video->dmaqueue); 407 list_add_tail(&buffer->irqlist, &video->dmaqueue); 408 409 spin_unlock_irqrestore(&video->irqlock, flags); 410 411 if (empty) { 412 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 413 state = ISP_PIPELINE_QUEUE_OUTPUT; 414 else 415 state = ISP_PIPELINE_QUEUE_INPUT; 416 417 spin_lock_irqsave(&pipe->lock, flags); 418 pipe->state |= state; 419 video->ops->queue(video, buffer); 420 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 421 422 start = isp_pipeline_ready(pipe); 423 if (start) 424 pipe->state |= ISP_PIPELINE_STREAM; 425 spin_unlock_irqrestore(&pipe->lock, flags); 426 427 if (start) 428 omap3isp_pipeline_set_stream(pipe, 429 ISP_PIPELINE_STREAM_SINGLESHOT); 430 } 431 } 432 433 /* 434 * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 435 * @video: ISP video object 436 * @state: new state for the returned buffers 437 * 438 * Return all buffers queued on the video node to videobuf2 in the given state. 439 * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error 440 * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. 441 * 442 * The function must be called with the video irqlock held. 443 */ 444 static void omap3isp_video_return_buffers(struct isp_video *video, 445 enum vb2_buffer_state state) 446 { 447 while (!list_empty(&video->dmaqueue)) { 448 struct isp_buffer *buf; 449 450 buf = list_first_entry(&video->dmaqueue, 451 struct isp_buffer, irqlist); 452 list_del(&buf->irqlist); 453 vb2_buffer_done(&buf->vb.vb2_buf, state); 454 } 455 } 456 457 static int isp_video_start_streaming(struct vb2_queue *queue, 458 unsigned int count) 459 { 460 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 461 struct isp_video *video = vfh->video; 462 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 463 unsigned long flags; 464 int ret; 465 466 /* In sensor-to-memory mode, the stream can be started synchronously 467 * to the stream on command. In memory-to-memory mode, it will be 468 * started when buffers are queued on both the input and output. 469 */ 470 if (pipe->input) 471 return 0; 472 473 ret = omap3isp_pipeline_set_stream(pipe, 474 ISP_PIPELINE_STREAM_CONTINUOUS); 475 if (ret < 0) { 476 spin_lock_irqsave(&video->irqlock, flags); 477 omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); 478 spin_unlock_irqrestore(&video->irqlock, flags); 479 return ret; 480 } 481 482 spin_lock_irqsave(&video->irqlock, flags); 483 if (list_empty(&video->dmaqueue)) 484 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 485 spin_unlock_irqrestore(&video->irqlock, flags); 486 487 return 0; 488 } 489 490 static const struct vb2_ops isp_video_queue_ops = { 491 .queue_setup = isp_video_queue_setup, 492 .buf_prepare = isp_video_buffer_prepare, 493 .buf_queue = isp_video_buffer_queue, 494 .start_streaming = isp_video_start_streaming, 495 }; 496 497 /* 498 * omap3isp_video_buffer_next - Complete the current buffer and return the next 499 * @video: ISP video object 500 * 501 * Remove the current video buffer from the DMA queue and fill its timestamp and 502 * field count before handing it back to videobuf2. 503 * 504 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no 505 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. 506 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. 507 * 508 * The DMA queue is expected to contain at least one buffer. 509 * 510 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 511 * empty. 512 */ 513 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 514 { 515 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 516 enum vb2_buffer_state vb_state; 517 struct isp_buffer *buf; 518 unsigned long flags; 519 520 spin_lock_irqsave(&video->irqlock, flags); 521 if (WARN_ON(list_empty(&video->dmaqueue))) { 522 spin_unlock_irqrestore(&video->irqlock, flags); 523 return NULL; 524 } 525 526 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 527 irqlist); 528 list_del(&buf->irqlist); 529 spin_unlock_irqrestore(&video->irqlock, flags); 530 531 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 532 533 /* Do frame number propagation only if this is the output video node. 534 * Frame number either comes from the CSI receivers or it gets 535 * incremented here if H3A is not active. 536 * Note: There is no guarantee that the output buffer will finish 537 * first, so the input number might lag behind by 1 in some cases. 538 */ 539 if (video == pipe->output && !pipe->do_propagation) 540 buf->vb.sequence = 541 atomic_inc_return(&pipe->frame_number); 542 else 543 buf->vb.sequence = atomic_read(&pipe->frame_number); 544 545 if (pipe->field != V4L2_FIELD_NONE) 546 buf->vb.sequence /= 2; 547 548 buf->vb.field = pipe->field; 549 550 /* Report pipeline errors to userspace on the capture device side. */ 551 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 552 vb_state = VB2_BUF_STATE_ERROR; 553 pipe->error = false; 554 } else { 555 vb_state = VB2_BUF_STATE_DONE; 556 } 557 558 vb2_buffer_done(&buf->vb.vb2_buf, vb_state); 559 560 spin_lock_irqsave(&video->irqlock, flags); 561 562 if (list_empty(&video->dmaqueue)) { 563 enum isp_pipeline_state state; 564 565 spin_unlock_irqrestore(&video->irqlock, flags); 566 567 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 568 state = ISP_PIPELINE_QUEUE_OUTPUT 569 | ISP_PIPELINE_STREAM; 570 else 571 state = ISP_PIPELINE_QUEUE_INPUT 572 | ISP_PIPELINE_STREAM; 573 574 spin_lock_irqsave(&pipe->lock, flags); 575 pipe->state &= ~state; 576 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 577 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 578 spin_unlock_irqrestore(&pipe->lock, flags); 579 return NULL; 580 } 581 582 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 583 spin_lock(&pipe->lock); 584 pipe->state &= ~ISP_PIPELINE_STREAM; 585 spin_unlock(&pipe->lock); 586 } 587 588 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 589 irqlist); 590 591 spin_unlock_irqrestore(&video->irqlock, flags); 592 593 return buf; 594 } 595 596 /* 597 * omap3isp_video_cancel_stream - Cancel stream on a video node 598 * @video: ISP video object 599 * 600 * Cancelling a stream returns all buffers queued on the video node to videobuf2 601 * in the erroneous state and makes sure no new buffer can be queued. 602 */ 603 void omap3isp_video_cancel_stream(struct isp_video *video) 604 { 605 unsigned long flags; 606 607 spin_lock_irqsave(&video->irqlock, flags); 608 omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); 609 video->error = true; 610 spin_unlock_irqrestore(&video->irqlock, flags); 611 } 612 613 /* 614 * omap3isp_video_resume - Perform resume operation on the buffers 615 * @video: ISP video object 616 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 617 * 618 * This function is intended to be used on suspend/resume scenario. It 619 * requests video queue layer to discard buffers marked as DONE if it's in 620 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 621 * if there's any. 622 */ 623 void omap3isp_video_resume(struct isp_video *video, int continuous) 624 { 625 struct isp_buffer *buf = NULL; 626 627 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 628 mutex_lock(&video->queue_lock); 629 vb2_discard_done(video->queue); 630 mutex_unlock(&video->queue_lock); 631 } 632 633 if (!list_empty(&video->dmaqueue)) { 634 buf = list_first_entry(&video->dmaqueue, 635 struct isp_buffer, irqlist); 636 video->ops->queue(video, buf); 637 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 638 } else { 639 if (continuous) 640 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 641 } 642 } 643 644 /* ----------------------------------------------------------------------------- 645 * V4L2 ioctls 646 */ 647 648 static int 649 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 650 { 651 struct isp_video *video = video_drvdata(file); 652 653 strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 654 strscpy(cap->card, video->video.name, sizeof(cap->card)); 655 strscpy(cap->bus_info, "media", sizeof(cap->bus_info)); 656 657 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 658 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; 659 660 661 return 0; 662 } 663 664 static int 665 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 666 { 667 struct isp_video_fh *vfh = to_isp_video_fh(fh); 668 struct isp_video *video = video_drvdata(file); 669 670 if (format->type != video->type) 671 return -EINVAL; 672 673 mutex_lock(&video->mutex); 674 *format = vfh->format; 675 mutex_unlock(&video->mutex); 676 677 return 0; 678 } 679 680 static int 681 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 682 { 683 struct isp_video_fh *vfh = to_isp_video_fh(fh); 684 struct isp_video *video = video_drvdata(file); 685 struct v4l2_mbus_framefmt fmt; 686 687 if (format->type != video->type) 688 return -EINVAL; 689 690 /* Replace unsupported field orders with sane defaults. */ 691 switch (format->fmt.pix.field) { 692 case V4L2_FIELD_NONE: 693 /* Progressive is supported everywhere. */ 694 break; 695 case V4L2_FIELD_ALTERNATE: 696 /* ALTERNATE is not supported on output nodes. */ 697 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 698 format->fmt.pix.field = V4L2_FIELD_NONE; 699 break; 700 case V4L2_FIELD_INTERLACED: 701 /* The ISP has no concept of video standard, select the 702 * top-bottom order when the unqualified interlaced order is 703 * requested. 704 */ 705 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; 706 fallthrough; 707 case V4L2_FIELD_INTERLACED_TB: 708 case V4L2_FIELD_INTERLACED_BT: 709 /* Interlaced orders are only supported at the CCDC output. */ 710 if (video != &video->isp->isp_ccdc.video_out) 711 format->fmt.pix.field = V4L2_FIELD_NONE; 712 break; 713 case V4L2_FIELD_TOP: 714 case V4L2_FIELD_BOTTOM: 715 case V4L2_FIELD_SEQ_TB: 716 case V4L2_FIELD_SEQ_BT: 717 default: 718 /* All other field orders are currently unsupported, default to 719 * progressive. 720 */ 721 format->fmt.pix.field = V4L2_FIELD_NONE; 722 break; 723 } 724 725 /* Fill the bytesperline and sizeimage fields by converting to media bus 726 * format and back to pixel format. 727 */ 728 isp_video_pix_to_mbus(&format->fmt.pix, &fmt); 729 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); 730 731 mutex_lock(&video->mutex); 732 vfh->format = *format; 733 mutex_unlock(&video->mutex); 734 735 return 0; 736 } 737 738 static int 739 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 740 { 741 struct isp_video *video = video_drvdata(file); 742 struct v4l2_subdev_format fmt; 743 struct v4l2_subdev *subdev; 744 u32 pad; 745 int ret; 746 747 if (format->type != video->type) 748 return -EINVAL; 749 750 subdev = isp_video_remote_subdev(video, &pad); 751 if (subdev == NULL) 752 return -EINVAL; 753 754 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 755 756 fmt.pad = pad; 757 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 758 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 759 if (ret) 760 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 761 762 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 763 return 0; 764 } 765 766 static int 767 isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel) 768 { 769 struct isp_video *video = video_drvdata(file); 770 struct v4l2_subdev_format format; 771 struct v4l2_subdev *subdev; 772 struct v4l2_subdev_selection sdsel = { 773 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 774 .target = sel->target, 775 }; 776 u32 pad; 777 int ret; 778 779 switch (sel->target) { 780 case V4L2_SEL_TGT_CROP: 781 case V4L2_SEL_TGT_CROP_BOUNDS: 782 case V4L2_SEL_TGT_CROP_DEFAULT: 783 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 784 return -EINVAL; 785 break; 786 case V4L2_SEL_TGT_COMPOSE: 787 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 788 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 789 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 790 return -EINVAL; 791 break; 792 default: 793 return -EINVAL; 794 } 795 subdev = isp_video_remote_subdev(video, &pad); 796 if (subdev == NULL) 797 return -EINVAL; 798 799 /* Try the get selection operation first and fallback to get format if not 800 * implemented. 801 */ 802 sdsel.pad = pad; 803 ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); 804 if (!ret) 805 sel->r = sdsel.r; 806 if (ret != -ENOIOCTLCMD) 807 return ret; 808 809 format.pad = pad; 810 format.which = V4L2_SUBDEV_FORMAT_ACTIVE; 811 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 812 if (ret < 0) 813 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 814 815 sel->r.left = 0; 816 sel->r.top = 0; 817 sel->r.width = format.format.width; 818 sel->r.height = format.format.height; 819 820 return 0; 821 } 822 823 static int 824 isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel) 825 { 826 struct isp_video *video = video_drvdata(file); 827 struct v4l2_subdev *subdev; 828 struct v4l2_subdev_selection sdsel = { 829 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 830 .target = sel->target, 831 .flags = sel->flags, 832 .r = sel->r, 833 }; 834 u32 pad; 835 int ret; 836 837 switch (sel->target) { 838 case V4L2_SEL_TGT_CROP: 839 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 840 return -EINVAL; 841 break; 842 case V4L2_SEL_TGT_COMPOSE: 843 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 844 return -EINVAL; 845 break; 846 default: 847 return -EINVAL; 848 } 849 subdev = isp_video_remote_subdev(video, &pad); 850 if (subdev == NULL) 851 return -EINVAL; 852 853 sdsel.pad = pad; 854 mutex_lock(&video->mutex); 855 ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel); 856 mutex_unlock(&video->mutex); 857 if (!ret) 858 sel->r = sdsel.r; 859 860 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 861 } 862 863 static int 864 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 865 { 866 struct isp_video_fh *vfh = to_isp_video_fh(fh); 867 struct isp_video *video = video_drvdata(file); 868 869 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 870 video->type != a->type) 871 return -EINVAL; 872 873 memset(a, 0, sizeof(*a)); 874 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 875 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 876 a->parm.output.timeperframe = vfh->timeperframe; 877 878 return 0; 879 } 880 881 static int 882 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 883 { 884 struct isp_video_fh *vfh = to_isp_video_fh(fh); 885 struct isp_video *video = video_drvdata(file); 886 887 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 888 video->type != a->type) 889 return -EINVAL; 890 891 if (a->parm.output.timeperframe.denominator == 0) 892 a->parm.output.timeperframe.denominator = 1; 893 894 vfh->timeperframe = a->parm.output.timeperframe; 895 896 return 0; 897 } 898 899 static int 900 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 901 { 902 struct isp_video_fh *vfh = to_isp_video_fh(fh); 903 struct isp_video *video = video_drvdata(file); 904 int ret; 905 906 mutex_lock(&video->queue_lock); 907 ret = vb2_reqbufs(&vfh->queue, rb); 908 mutex_unlock(&video->queue_lock); 909 910 return ret; 911 } 912 913 static int 914 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 915 { 916 struct isp_video_fh *vfh = to_isp_video_fh(fh); 917 struct isp_video *video = video_drvdata(file); 918 int ret; 919 920 mutex_lock(&video->queue_lock); 921 ret = vb2_querybuf(&vfh->queue, b); 922 mutex_unlock(&video->queue_lock); 923 924 return ret; 925 } 926 927 static int 928 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 929 { 930 struct isp_video_fh *vfh = to_isp_video_fh(fh); 931 struct isp_video *video = video_drvdata(file); 932 int ret; 933 934 mutex_lock(&video->queue_lock); 935 ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 936 mutex_unlock(&video->queue_lock); 937 938 return ret; 939 } 940 941 static int 942 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 943 { 944 struct isp_video_fh *vfh = to_isp_video_fh(fh); 945 struct isp_video *video = video_drvdata(file); 946 int ret; 947 948 mutex_lock(&video->queue_lock); 949 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); 950 mutex_unlock(&video->queue_lock); 951 952 return ret; 953 } 954 955 static int isp_video_check_external_subdevs(struct isp_video *video, 956 struct isp_pipeline *pipe) 957 { 958 struct isp_device *isp = video->isp; 959 struct media_entity *ents[] = { 960 &isp->isp_csi2a.subdev.entity, 961 &isp->isp_csi2c.subdev.entity, 962 &isp->isp_ccp2.subdev.entity, 963 &isp->isp_ccdc.subdev.entity 964 }; 965 struct media_pad *source_pad; 966 struct media_entity *source = NULL; 967 struct media_entity *sink; 968 struct v4l2_subdev_format fmt; 969 struct v4l2_ext_controls ctrls; 970 struct v4l2_ext_control ctrl; 971 unsigned int i; 972 int ret; 973 974 /* Memory-to-memory pipelines have no external subdev. */ 975 if (pipe->input != NULL) 976 return 0; 977 978 for (i = 0; i < ARRAY_SIZE(ents); i++) { 979 /* Is the entity part of the pipeline? */ 980 if (!media_entity_enum_test(&pipe->ent_enum, ents[i])) 981 continue; 982 983 /* ISP entities have always sink pad == 0. Find source. */ 984 source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]); 985 if (source_pad == NULL) 986 continue; 987 988 source = source_pad->entity; 989 sink = ents[i]; 990 break; 991 } 992 993 if (!source) { 994 dev_warn(isp->dev, "can't find source, failing now\n"); 995 return -EINVAL; 996 } 997 998 if (!is_media_entity_v4l2_subdev(source)) 999 return 0; 1000 1001 pipe->external = media_entity_to_v4l2_subdev(source); 1002 1003 fmt.pad = source_pad->index; 1004 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 1005 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), 1006 pad, get_fmt, NULL, &fmt); 1007 if (unlikely(ret < 0)) { 1008 dev_warn(isp->dev, "get_fmt returned null!\n"); 1009 return ret; 1010 } 1011 1012 pipe->external_width = 1013 omap3isp_video_format_info(fmt.format.code)->width; 1014 1015 memset(&ctrls, 0, sizeof(ctrls)); 1016 memset(&ctrl, 0, sizeof(ctrl)); 1017 1018 ctrl.id = V4L2_CID_PIXEL_RATE; 1019 1020 ctrls.count = 1; 1021 ctrls.controls = &ctrl; 1022 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video, 1023 NULL, &ctrls); 1024 if (ret < 0) { 1025 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 1026 pipe->external->name); 1027 return ret; 1028 } 1029 1030 pipe->external_rate = ctrl.value64; 1031 1032 if (media_entity_enum_test(&pipe->ent_enum, 1033 &isp->isp_ccdc.subdev.entity)) { 1034 unsigned int rate = UINT_MAX; 1035 /* 1036 * Check that maximum allowed CCDC pixel rate isn't 1037 * exceeded by the pixel rate. 1038 */ 1039 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 1040 if (pipe->external_rate > rate) 1041 return -ENOSPC; 1042 } 1043 1044 return 0; 1045 } 1046 1047 /* 1048 * Stream management 1049 * 1050 * Every ISP pipeline has a single input and a single output. The input can be 1051 * either a sensor or a video node. The output is always a video node. 1052 * 1053 * As every pipeline has an output video node, the ISP video objects at the 1054 * pipeline output stores the pipeline state. It tracks the streaming state of 1055 * both the input and output, as well as the availability of buffers. 1056 * 1057 * In sensor-to-memory mode, frames are always available at the pipeline input. 1058 * Starting the sensor usually requires I2C transfers and must be done in 1059 * interruptible context. The pipeline is started and stopped synchronously 1060 * to the stream on/off commands. All modules in the pipeline will get their 1061 * subdev set stream handler called. The module at the end of the pipeline must 1062 * delay starting the hardware until buffers are available at its output. 1063 * 1064 * In memory-to-memory mode, starting/stopping the stream requires 1065 * synchronization between the input and output. ISP modules can't be stopped 1066 * in the middle of a frame, and at least some of the modules seem to become 1067 * busy as soon as they're started, even if they don't receive a frame start 1068 * event. For that reason frames need to be processed in single-shot mode. The 1069 * driver needs to wait until a frame is completely processed and written to 1070 * memory before restarting the pipeline for the next frame. Pipelined 1071 * processing might be possible but requires more testing. 1072 * 1073 * Stream start must be delayed until buffers are available at both the input 1074 * and output. The pipeline must be started in the vb2 queue callback with 1075 * the buffers queue spinlock held. The modules subdev set stream operation must 1076 * not sleep. 1077 */ 1078 static int 1079 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 1080 { 1081 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1082 struct isp_video *video = video_drvdata(file); 1083 enum isp_pipeline_state state; 1084 struct isp_pipeline *pipe; 1085 unsigned long flags; 1086 int ret; 1087 1088 if (type != video->type) 1089 return -EINVAL; 1090 1091 mutex_lock(&video->stream_lock); 1092 1093 /* Start streaming on the pipeline. No link touching an entity in the 1094 * pipeline can be activated or deactivated once streaming is started. 1095 */ 1096 pipe = video->video.entity.pipe 1097 ? to_isp_pipeline(&video->video.entity) : &video->pipe; 1098 1099 ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); 1100 if (ret) 1101 goto err_enum_init; 1102 1103 /* TODO: Implement PM QoS */ 1104 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1105 pipe->max_rate = pipe->l3_ick; 1106 1107 ret = media_pipeline_start(&video->video.entity, &pipe->pipe); 1108 if (ret < 0) 1109 goto err_pipeline_start; 1110 1111 /* Verify that the currently configured format matches the output of 1112 * the connected subdev. 1113 */ 1114 ret = isp_video_check_format(video, vfh); 1115 if (ret < 0) 1116 goto err_check_format; 1117 1118 video->bpl_padding = ret; 1119 video->bpl_value = vfh->format.fmt.pix.bytesperline; 1120 1121 ret = isp_video_get_graph_data(video, pipe); 1122 if (ret < 0) 1123 goto err_check_format; 1124 1125 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1126 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 1127 else 1128 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1129 1130 ret = isp_video_check_external_subdevs(video, pipe); 1131 if (ret < 0) 1132 goto err_check_format; 1133 1134 pipe->error = false; 1135 1136 spin_lock_irqsave(&pipe->lock, flags); 1137 pipe->state &= ~ISP_PIPELINE_STREAM; 1138 pipe->state |= state; 1139 spin_unlock_irqrestore(&pipe->lock, flags); 1140 1141 /* Set the maximum time per frame as the value requested by userspace. 1142 * This is a soft limit that can be overridden if the hardware doesn't 1143 * support the request limit. 1144 */ 1145 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1146 pipe->max_timeperframe = vfh->timeperframe; 1147 1148 video->queue = &vfh->queue; 1149 INIT_LIST_HEAD(&video->dmaqueue); 1150 atomic_set(&pipe->frame_number, -1); 1151 pipe->field = vfh->format.fmt.pix.field; 1152 1153 mutex_lock(&video->queue_lock); 1154 ret = vb2_streamon(&vfh->queue, type); 1155 mutex_unlock(&video->queue_lock); 1156 if (ret < 0) 1157 goto err_check_format; 1158 1159 mutex_unlock(&video->stream_lock); 1160 1161 return 0; 1162 1163 err_check_format: 1164 media_pipeline_stop(&video->video.entity); 1165 err_pipeline_start: 1166 /* TODO: Implement PM QoS */ 1167 /* The DMA queue must be emptied here, otherwise CCDC interrupts that 1168 * will get triggered the next time the CCDC is powered up will try to 1169 * access buffers that might have been freed but still present in the 1170 * DMA queue. This can easily get triggered if the above 1171 * omap3isp_pipeline_set_stream() call fails on a system with a 1172 * free-running sensor. 1173 */ 1174 INIT_LIST_HEAD(&video->dmaqueue); 1175 video->queue = NULL; 1176 1177 media_entity_enum_cleanup(&pipe->ent_enum); 1178 1179 err_enum_init: 1180 mutex_unlock(&video->stream_lock); 1181 1182 return ret; 1183 } 1184 1185 static int 1186 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1187 { 1188 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1189 struct isp_video *video = video_drvdata(file); 1190 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1191 enum isp_pipeline_state state; 1192 unsigned int streaming; 1193 unsigned long flags; 1194 1195 if (type != video->type) 1196 return -EINVAL; 1197 1198 mutex_lock(&video->stream_lock); 1199 1200 /* Make sure we're not streaming yet. */ 1201 mutex_lock(&video->queue_lock); 1202 streaming = vb2_is_streaming(&vfh->queue); 1203 mutex_unlock(&video->queue_lock); 1204 1205 if (!streaming) 1206 goto done; 1207 1208 /* Update the pipeline state. */ 1209 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1210 state = ISP_PIPELINE_STREAM_OUTPUT 1211 | ISP_PIPELINE_QUEUE_OUTPUT; 1212 else 1213 state = ISP_PIPELINE_STREAM_INPUT 1214 | ISP_PIPELINE_QUEUE_INPUT; 1215 1216 spin_lock_irqsave(&pipe->lock, flags); 1217 pipe->state &= ~state; 1218 spin_unlock_irqrestore(&pipe->lock, flags); 1219 1220 /* Stop the stream. */ 1221 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1222 omap3isp_video_cancel_stream(video); 1223 1224 mutex_lock(&video->queue_lock); 1225 vb2_streamoff(&vfh->queue, type); 1226 mutex_unlock(&video->queue_lock); 1227 video->queue = NULL; 1228 video->error = false; 1229 1230 /* TODO: Implement PM QoS */ 1231 media_pipeline_stop(&video->video.entity); 1232 1233 media_entity_enum_cleanup(&pipe->ent_enum); 1234 1235 done: 1236 mutex_unlock(&video->stream_lock); 1237 return 0; 1238 } 1239 1240 static int 1241 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1242 { 1243 if (input->index > 0) 1244 return -EINVAL; 1245 1246 strscpy(input->name, "camera", sizeof(input->name)); 1247 input->type = V4L2_INPUT_TYPE_CAMERA; 1248 1249 return 0; 1250 } 1251 1252 static int 1253 isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1254 { 1255 *input = 0; 1256 1257 return 0; 1258 } 1259 1260 static int 1261 isp_video_s_input(struct file *file, void *fh, unsigned int input) 1262 { 1263 return input == 0 ? 0 : -EINVAL; 1264 } 1265 1266 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1267 .vidioc_querycap = isp_video_querycap, 1268 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1269 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1270 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1271 .vidioc_g_fmt_vid_out = isp_video_get_format, 1272 .vidioc_s_fmt_vid_out = isp_video_set_format, 1273 .vidioc_try_fmt_vid_out = isp_video_try_format, 1274 .vidioc_g_selection = isp_video_get_selection, 1275 .vidioc_s_selection = isp_video_set_selection, 1276 .vidioc_g_parm = isp_video_get_param, 1277 .vidioc_s_parm = isp_video_set_param, 1278 .vidioc_reqbufs = isp_video_reqbufs, 1279 .vidioc_querybuf = isp_video_querybuf, 1280 .vidioc_qbuf = isp_video_qbuf, 1281 .vidioc_dqbuf = isp_video_dqbuf, 1282 .vidioc_streamon = isp_video_streamon, 1283 .vidioc_streamoff = isp_video_streamoff, 1284 .vidioc_enum_input = isp_video_enum_input, 1285 .vidioc_g_input = isp_video_g_input, 1286 .vidioc_s_input = isp_video_s_input, 1287 }; 1288 1289 /* ----------------------------------------------------------------------------- 1290 * V4L2 file operations 1291 */ 1292 1293 static int isp_video_open(struct file *file) 1294 { 1295 struct isp_video *video = video_drvdata(file); 1296 struct isp_video_fh *handle; 1297 struct vb2_queue *queue; 1298 int ret = 0; 1299 1300 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1301 if (handle == NULL) 1302 return -ENOMEM; 1303 1304 v4l2_fh_init(&handle->vfh, &video->video); 1305 v4l2_fh_add(&handle->vfh); 1306 1307 /* If this is the first user, initialise the pipeline. */ 1308 if (omap3isp_get(video->isp) == NULL) { 1309 ret = -EBUSY; 1310 goto done; 1311 } 1312 1313 ret = v4l2_pipeline_pm_get(&video->video.entity); 1314 if (ret < 0) { 1315 omap3isp_put(video->isp); 1316 goto done; 1317 } 1318 1319 queue = &handle->queue; 1320 queue->type = video->type; 1321 queue->io_modes = VB2_MMAP | VB2_USERPTR; 1322 queue->drv_priv = handle; 1323 queue->ops = &isp_video_queue_ops; 1324 queue->mem_ops = &vb2_dma_contig_memops; 1325 queue->buf_struct_size = sizeof(struct isp_buffer); 1326 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1327 queue->dev = video->isp->dev; 1328 1329 ret = vb2_queue_init(&handle->queue); 1330 if (ret < 0) { 1331 omap3isp_put(video->isp); 1332 goto done; 1333 } 1334 1335 memset(&handle->format, 0, sizeof(handle->format)); 1336 handle->format.type = video->type; 1337 handle->timeperframe.denominator = 1; 1338 1339 handle->video = video; 1340 file->private_data = &handle->vfh; 1341 1342 done: 1343 if (ret < 0) { 1344 v4l2_fh_del(&handle->vfh); 1345 v4l2_fh_exit(&handle->vfh); 1346 kfree(handle); 1347 } 1348 1349 return ret; 1350 } 1351 1352 static int isp_video_release(struct file *file) 1353 { 1354 struct isp_video *video = video_drvdata(file); 1355 struct v4l2_fh *vfh = file->private_data; 1356 struct isp_video_fh *handle = to_isp_video_fh(vfh); 1357 1358 /* Disable streaming and free the buffers queue resources. */ 1359 isp_video_streamoff(file, vfh, video->type); 1360 1361 mutex_lock(&video->queue_lock); 1362 vb2_queue_release(&handle->queue); 1363 mutex_unlock(&video->queue_lock); 1364 1365 v4l2_pipeline_pm_put(&video->video.entity); 1366 1367 /* Release the file handle. */ 1368 v4l2_fh_del(vfh); 1369 v4l2_fh_exit(vfh); 1370 kfree(handle); 1371 file->private_data = NULL; 1372 1373 omap3isp_put(video->isp); 1374 1375 return 0; 1376 } 1377 1378 static __poll_t isp_video_poll(struct file *file, poll_table *wait) 1379 { 1380 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1381 struct isp_video *video = video_drvdata(file); 1382 __poll_t ret; 1383 1384 mutex_lock(&video->queue_lock); 1385 ret = vb2_poll(&vfh->queue, file, wait); 1386 mutex_unlock(&video->queue_lock); 1387 1388 return ret; 1389 } 1390 1391 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1392 { 1393 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1394 1395 return vb2_mmap(&vfh->queue, vma); 1396 } 1397 1398 static const struct v4l2_file_operations isp_video_fops = { 1399 .owner = THIS_MODULE, 1400 .unlocked_ioctl = video_ioctl2, 1401 .open = isp_video_open, 1402 .release = isp_video_release, 1403 .poll = isp_video_poll, 1404 .mmap = isp_video_mmap, 1405 }; 1406 1407 /* ----------------------------------------------------------------------------- 1408 * ISP video core 1409 */ 1410 1411 static const struct isp_video_operations isp_video_dummy_ops = { 1412 }; 1413 1414 int omap3isp_video_init(struct isp_video *video, const char *name) 1415 { 1416 const char *direction; 1417 int ret; 1418 1419 switch (video->type) { 1420 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1421 direction = "output"; 1422 video->pad.flags = MEDIA_PAD_FL_SINK 1423 | MEDIA_PAD_FL_MUST_CONNECT; 1424 break; 1425 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1426 direction = "input"; 1427 video->pad.flags = MEDIA_PAD_FL_SOURCE 1428 | MEDIA_PAD_FL_MUST_CONNECT; 1429 video->video.vfl_dir = VFL_DIR_TX; 1430 break; 1431 1432 default: 1433 return -EINVAL; 1434 } 1435 1436 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); 1437 if (ret < 0) 1438 return ret; 1439 1440 mutex_init(&video->mutex); 1441 atomic_set(&video->active, 0); 1442 1443 spin_lock_init(&video->pipe.lock); 1444 mutex_init(&video->stream_lock); 1445 mutex_init(&video->queue_lock); 1446 spin_lock_init(&video->irqlock); 1447 1448 /* Initialize the video device. */ 1449 if (video->ops == NULL) 1450 video->ops = &isp_video_dummy_ops; 1451 1452 video->video.fops = &isp_video_fops; 1453 snprintf(video->video.name, sizeof(video->video.name), 1454 "OMAP3 ISP %s %s", name, direction); 1455 video->video.vfl_type = VFL_TYPE_VIDEO; 1456 video->video.release = video_device_release_empty; 1457 video->video.ioctl_ops = &isp_video_ioctl_ops; 1458 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1459 video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE 1460 | V4L2_CAP_STREAMING; 1461 else 1462 video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT 1463 | V4L2_CAP_STREAMING; 1464 1465 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1466 1467 video_set_drvdata(&video->video, video); 1468 1469 return 0; 1470 } 1471 1472 void omap3isp_video_cleanup(struct isp_video *video) 1473 { 1474 media_entity_cleanup(&video->video.entity); 1475 mutex_destroy(&video->queue_lock); 1476 mutex_destroy(&video->stream_lock); 1477 mutex_destroy(&video->mutex); 1478 } 1479 1480 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1481 { 1482 int ret; 1483 1484 video->video.v4l2_dev = vdev; 1485 1486 ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); 1487 if (ret < 0) 1488 dev_err(video->isp->dev, 1489 "%s: could not register video device (%d)\n", 1490 __func__, ret); 1491 1492 return ret; 1493 } 1494 1495 void omap3isp_video_unregister(struct isp_video *video) 1496 { 1497 video_unregister_device(&video->video); 1498 } 1499