1 /* 2 * Xilinx Video DMA 3 * 4 * Copyright (C) 2013-2015 Ideas on Board 5 * Copyright (C) 2013-2015 Xilinx, Inc. 6 * 7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com> 8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/dma/xilinx_dma.h> 16 #include <linux/lcm.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-fh.h> 24 #include <media/v4l2-ioctl.h> 25 #include <media/videobuf2-v4l2.h> 26 #include <media/videobuf2-dma-contig.h> 27 28 #include "xilinx-dma.h" 29 #include "xilinx-vip.h" 30 #include "xilinx-vipp.h" 31 32 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV 33 #define XVIP_DMA_DEF_WIDTH 1920 34 #define XVIP_DMA_DEF_HEIGHT 1080 35 36 /* Minimum and maximum widths are expressed in bytes */ 37 #define XVIP_DMA_MIN_WIDTH 1U 38 #define XVIP_DMA_MAX_WIDTH 65535U 39 #define XVIP_DMA_MIN_HEIGHT 1U 40 #define XVIP_DMA_MAX_HEIGHT 8191U 41 42 /* ----------------------------------------------------------------------------- 43 * Helper functions 44 */ 45 46 static struct v4l2_subdev * 47 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad) 48 { 49 struct media_pad *remote; 50 51 remote = media_entity_remote_pad(local); 52 if (remote == NULL || 53 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 54 return NULL; 55 56 if (pad) 57 *pad = remote->index; 58 59 return media_entity_to_v4l2_subdev(remote->entity); 60 } 61 62 static int xvip_dma_verify_format(struct xvip_dma *dma) 63 { 64 struct v4l2_subdev_format fmt; 65 struct v4l2_subdev *subdev; 66 int ret; 67 68 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad); 69 if (subdev == NULL) 70 return -EPIPE; 71 72 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 73 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 74 if (ret < 0) 75 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 76 77 if (dma->fmtinfo->code != fmt.format.code || 78 dma->format.height != fmt.format.height || 79 dma->format.width != fmt.format.width || 80 dma->format.colorspace != fmt.format.colorspace) 81 return -EINVAL; 82 83 return 0; 84 } 85 86 /* ----------------------------------------------------------------------------- 87 * Pipeline Stream Management 88 */ 89 90 /** 91 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline 92 * @pipe: The pipeline 93 * @start: Start (when true) or stop (when false) the pipeline 94 * 95 * Walk the entities chain starting at the pipeline output video node and start 96 * or stop all of them. 97 * 98 * Return: 0 if successful, or the return value of the failed video::s_stream 99 * operation otherwise. 100 */ 101 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start) 102 { 103 struct xvip_dma *dma = pipe->output; 104 struct media_entity *entity; 105 struct media_pad *pad; 106 struct v4l2_subdev *subdev; 107 int ret; 108 109 entity = &dma->video.entity; 110 while (1) { 111 pad = &entity->pads[0]; 112 if (!(pad->flags & MEDIA_PAD_FL_SINK)) 113 break; 114 115 pad = media_entity_remote_pad(pad); 116 if (pad == NULL || 117 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 118 break; 119 120 entity = pad->entity; 121 subdev = media_entity_to_v4l2_subdev(entity); 122 123 ret = v4l2_subdev_call(subdev, video, s_stream, start); 124 if (start && ret < 0 && ret != -ENOIOCTLCMD) 125 return ret; 126 } 127 128 return 0; 129 } 130 131 /** 132 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline 133 * @pipe: The pipeline 134 * @on: Turn the stream on when true or off when false 135 * 136 * The pipeline is shared between all DMA engines connect at its input and 137 * output. While the stream state of DMA engines can be controlled 138 * independently, pipelines have a shared stream state that enable or disable 139 * all entities in the pipeline. For this reason the pipeline uses a streaming 140 * counter that tracks the number of DMA engines that have requested the stream 141 * to be enabled. 142 * 143 * When called with the @on argument set to true, this function will increment 144 * the pipeline streaming count. If the streaming count reaches the number of 145 * DMA engines in the pipeline it will enable all entities that belong to the 146 * pipeline. 147 * 148 * Similarly, when called with the @on argument set to false, this function will 149 * decrement the pipeline streaming count and disable all entities in the 150 * pipeline when the streaming count reaches zero. 151 * 152 * Return: 0 if successful, or the return value of the failed video::s_stream 153 * operation otherwise. Stopping the pipeline never fails. The pipeline state is 154 * not updated when the operation fails. 155 */ 156 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on) 157 { 158 int ret = 0; 159 160 mutex_lock(&pipe->lock); 161 162 if (on) { 163 if (pipe->stream_count == pipe->num_dmas - 1) { 164 ret = xvip_pipeline_start_stop(pipe, true); 165 if (ret < 0) 166 goto done; 167 } 168 pipe->stream_count++; 169 } else { 170 if (--pipe->stream_count == 0) 171 xvip_pipeline_start_stop(pipe, false); 172 } 173 174 done: 175 mutex_unlock(&pipe->lock); 176 return ret; 177 } 178 179 static int xvip_pipeline_validate(struct xvip_pipeline *pipe, 180 struct xvip_dma *start) 181 { 182 struct media_entity_graph graph; 183 struct media_entity *entity = &start->video.entity; 184 struct media_device *mdev = entity->parent; 185 unsigned int num_inputs = 0; 186 unsigned int num_outputs = 0; 187 188 mutex_lock(&mdev->graph_mutex); 189 190 /* Walk the graph to locate the video nodes. */ 191 media_entity_graph_walk_start(&graph, entity); 192 193 while ((entity = media_entity_graph_walk_next(&graph))) { 194 struct xvip_dma *dma; 195 196 if (entity->type != MEDIA_ENT_T_DEVNODE_V4L) 197 continue; 198 199 dma = to_xvip_dma(media_entity_to_video_device(entity)); 200 201 if (dma->pad.flags & MEDIA_PAD_FL_SINK) { 202 pipe->output = dma; 203 num_outputs++; 204 } else { 205 num_inputs++; 206 } 207 } 208 209 mutex_unlock(&mdev->graph_mutex); 210 211 /* We need exactly one output and zero or one input. */ 212 if (num_outputs != 1 || num_inputs > 1) 213 return -EPIPE; 214 215 pipe->num_dmas = num_inputs + num_outputs; 216 217 return 0; 218 } 219 220 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 221 { 222 pipe->num_dmas = 0; 223 pipe->output = NULL; 224 } 225 226 /** 227 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming 228 * @pipe: the pipeline 229 * 230 * Decrease the pipeline use count and clean it up if we were the last user. 231 */ 232 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 233 { 234 mutex_lock(&pipe->lock); 235 236 /* If we're the last user clean up the pipeline. */ 237 if (--pipe->use_count == 0) 238 __xvip_pipeline_cleanup(pipe); 239 240 mutex_unlock(&pipe->lock); 241 } 242 243 /** 244 * xvip_pipeline_prepare - Prepare the pipeline for streaming 245 * @pipe: the pipeline 246 * @dma: DMA engine at one end of the pipeline 247 * 248 * Validate the pipeline if no user exists yet, otherwise just increase the use 249 * count. 250 * 251 * Return: 0 if successful or -EPIPE if the pipeline is not valid. 252 */ 253 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe, 254 struct xvip_dma *dma) 255 { 256 int ret; 257 258 mutex_lock(&pipe->lock); 259 260 /* If we're the first user validate and initialize the pipeline. */ 261 if (pipe->use_count == 0) { 262 ret = xvip_pipeline_validate(pipe, dma); 263 if (ret < 0) { 264 __xvip_pipeline_cleanup(pipe); 265 goto done; 266 } 267 } 268 269 pipe->use_count++; 270 ret = 0; 271 272 done: 273 mutex_unlock(&pipe->lock); 274 return ret; 275 } 276 277 /* ----------------------------------------------------------------------------- 278 * videobuf2 queue operations 279 */ 280 281 /** 282 * struct xvip_dma_buffer - Video DMA buffer 283 * @buf: vb2 buffer base object 284 * @queue: buffer list entry in the DMA engine queued buffers list 285 * @dma: DMA channel that uses the buffer 286 */ 287 struct xvip_dma_buffer { 288 struct vb2_v4l2_buffer buf; 289 struct list_head queue; 290 struct xvip_dma *dma; 291 }; 292 293 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf) 294 295 static void xvip_dma_complete(void *param) 296 { 297 struct xvip_dma_buffer *buf = param; 298 struct xvip_dma *dma = buf->dma; 299 300 spin_lock(&dma->queued_lock); 301 list_del(&buf->queue); 302 spin_unlock(&dma->queued_lock); 303 304 buf->buf.field = V4L2_FIELD_NONE; 305 buf->buf.sequence = dma->sequence++; 306 buf->buf.vb2_buf.timestamp = ktime_get_ns(); 307 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage); 308 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 309 } 310 311 static int 312 xvip_dma_queue_setup(struct vb2_queue *vq, 313 unsigned int *nbuffers, unsigned int *nplanes, 314 unsigned int sizes[], void *alloc_ctxs[]) 315 { 316 struct xvip_dma *dma = vb2_get_drv_priv(vq); 317 318 alloc_ctxs[0] = dma->alloc_ctx; 319 /* Make sure the image size is large enough. */ 320 if (*nplanes) 321 return sizes[0] < dma->format.sizeimage ? -EINVAL : 0; 322 323 *nplanes = 1; 324 sizes[0] = dma->format.sizeimage; 325 326 return 0; 327 } 328 329 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) 330 { 331 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 332 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 333 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 334 335 buf->dma = dma; 336 337 return 0; 338 } 339 340 static void xvip_dma_buffer_queue(struct vb2_buffer *vb) 341 { 342 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 343 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 344 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 345 struct dma_async_tx_descriptor *desc; 346 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0); 347 u32 flags; 348 349 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 350 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 351 dma->xt.dir = DMA_DEV_TO_MEM; 352 dma->xt.src_sgl = false; 353 dma->xt.dst_sgl = true; 354 dma->xt.dst_start = addr; 355 } else { 356 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 357 dma->xt.dir = DMA_MEM_TO_DEV; 358 dma->xt.src_sgl = true; 359 dma->xt.dst_sgl = false; 360 dma->xt.src_start = addr; 361 } 362 363 dma->xt.frame_size = 1; 364 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp; 365 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size; 366 dma->xt.numf = dma->format.height; 367 368 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags); 369 if (!desc) { 370 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n"); 371 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 372 return; 373 } 374 desc->callback = xvip_dma_complete; 375 desc->callback_param = buf; 376 377 spin_lock_irq(&dma->queued_lock); 378 list_add_tail(&buf->queue, &dma->queued_bufs); 379 spin_unlock_irq(&dma->queued_lock); 380 381 dmaengine_submit(desc); 382 383 if (vb2_is_streaming(&dma->queue)) 384 dma_async_issue_pending(dma->dma); 385 } 386 387 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) 388 { 389 struct xvip_dma *dma = vb2_get_drv_priv(vq); 390 struct xvip_dma_buffer *buf, *nbuf; 391 struct xvip_pipeline *pipe; 392 int ret; 393 394 dma->sequence = 0; 395 396 /* 397 * Start streaming on the pipeline. No link touching an entity in the 398 * pipeline can be activated or deactivated once streaming is started. 399 * 400 * Use the pipeline object embedded in the first DMA object that starts 401 * streaming. 402 */ 403 pipe = dma->video.entity.pipe 404 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; 405 406 ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe); 407 if (ret < 0) 408 goto error; 409 410 /* Verify that the configured format matches the output of the 411 * connected subdev. 412 */ 413 ret = xvip_dma_verify_format(dma); 414 if (ret < 0) 415 goto error_stop; 416 417 ret = xvip_pipeline_prepare(pipe, dma); 418 if (ret < 0) 419 goto error_stop; 420 421 /* Start the DMA engine. This must be done before starting the blocks 422 * in the pipeline to avoid DMA synchronization issues. 423 */ 424 dma_async_issue_pending(dma->dma); 425 426 /* Start the pipeline. */ 427 xvip_pipeline_set_stream(pipe, true); 428 429 return 0; 430 431 error_stop: 432 media_entity_pipeline_stop(&dma->video.entity); 433 434 error: 435 /* Give back all queued buffers to videobuf2. */ 436 spin_lock_irq(&dma->queued_lock); 437 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 438 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED); 439 list_del(&buf->queue); 440 } 441 spin_unlock_irq(&dma->queued_lock); 442 443 return ret; 444 } 445 446 static void xvip_dma_stop_streaming(struct vb2_queue *vq) 447 { 448 struct xvip_dma *dma = vb2_get_drv_priv(vq); 449 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); 450 struct xvip_dma_buffer *buf, *nbuf; 451 452 /* Stop the pipeline. */ 453 xvip_pipeline_set_stream(pipe, false); 454 455 /* Stop and reset the DMA engine. */ 456 dmaengine_terminate_all(dma->dma); 457 458 /* Cleanup the pipeline and mark it as being stopped. */ 459 xvip_pipeline_cleanup(pipe); 460 media_entity_pipeline_stop(&dma->video.entity); 461 462 /* Give back all queued buffers to videobuf2. */ 463 spin_lock_irq(&dma->queued_lock); 464 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 465 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 466 list_del(&buf->queue); 467 } 468 spin_unlock_irq(&dma->queued_lock); 469 } 470 471 static struct vb2_ops xvip_dma_queue_qops = { 472 .queue_setup = xvip_dma_queue_setup, 473 .buf_prepare = xvip_dma_buffer_prepare, 474 .buf_queue = xvip_dma_buffer_queue, 475 .wait_prepare = vb2_ops_wait_prepare, 476 .wait_finish = vb2_ops_wait_finish, 477 .start_streaming = xvip_dma_start_streaming, 478 .stop_streaming = xvip_dma_stop_streaming, 479 }; 480 481 /* ----------------------------------------------------------------------------- 482 * V4L2 ioctls 483 */ 484 485 static int 486 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 487 { 488 struct v4l2_fh *vfh = file->private_data; 489 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 490 491 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING 492 | dma->xdev->v4l2_caps; 493 494 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 495 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 496 else 497 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 498 499 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver)); 500 strlcpy(cap->card, dma->video.name, sizeof(cap->card)); 501 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u", 502 dma->xdev->dev->of_node->name, dma->port); 503 504 return 0; 505 } 506 507 /* FIXME: without this callback function, some applications are not configured 508 * with correct formats, and it results in frames in wrong format. Whether this 509 * callback needs to be required is not clearly defined, so it should be 510 * clarified through the mailing list. 511 */ 512 static int 513 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) 514 { 515 struct v4l2_fh *vfh = file->private_data; 516 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 517 518 if (f->index > 0) 519 return -EINVAL; 520 521 f->pixelformat = dma->format.pixelformat; 522 strlcpy(f->description, dma->fmtinfo->description, 523 sizeof(f->description)); 524 525 return 0; 526 } 527 528 static int 529 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format) 530 { 531 struct v4l2_fh *vfh = file->private_data; 532 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 533 534 format->fmt.pix = dma->format; 535 536 return 0; 537 } 538 539 static void 540 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix, 541 const struct xvip_video_format **fmtinfo) 542 { 543 const struct xvip_video_format *info; 544 unsigned int min_width; 545 unsigned int max_width; 546 unsigned int min_bpl; 547 unsigned int max_bpl; 548 unsigned int width; 549 unsigned int align; 550 unsigned int bpl; 551 552 /* Retrieve format information and select the default format if the 553 * requested format isn't supported. 554 */ 555 info = xvip_get_format_by_fourcc(pix->pixelformat); 556 if (IS_ERR(info)) 557 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 558 559 pix->pixelformat = info->fourcc; 560 pix->field = V4L2_FIELD_NONE; 561 562 /* The transfer alignment requirements are expressed in bytes. Compute 563 * the minimum and maximum values, clamp the requested width and convert 564 * it back to pixels. 565 */ 566 align = lcm(dma->align, info->bpp); 567 min_width = roundup(XVIP_DMA_MIN_WIDTH, align); 568 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align); 569 width = rounddown(pix->width * info->bpp, align); 570 571 pix->width = clamp(width, min_width, max_width) / info->bpp; 572 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT, 573 XVIP_DMA_MAX_HEIGHT); 574 575 /* Clamp the requested bytes per line value. If the maximum bytes per 576 * line value is zero, the module doesn't support user configurable line 577 * sizes. Override the requested value with the minimum in that case. 578 */ 579 min_bpl = pix->width * info->bpp; 580 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align); 581 bpl = rounddown(pix->bytesperline, dma->align); 582 583 pix->bytesperline = clamp(bpl, min_bpl, max_bpl); 584 pix->sizeimage = pix->bytesperline * pix->height; 585 586 if (fmtinfo) 587 *fmtinfo = info; 588 } 589 590 static int 591 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format) 592 { 593 struct v4l2_fh *vfh = file->private_data; 594 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 595 596 __xvip_dma_try_format(dma, &format->fmt.pix, NULL); 597 return 0; 598 } 599 600 static int 601 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format) 602 { 603 struct v4l2_fh *vfh = file->private_data; 604 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 605 const struct xvip_video_format *info; 606 607 __xvip_dma_try_format(dma, &format->fmt.pix, &info); 608 609 if (vb2_is_busy(&dma->queue)) 610 return -EBUSY; 611 612 dma->format = format->fmt.pix; 613 dma->fmtinfo = info; 614 615 return 0; 616 } 617 618 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = { 619 .vidioc_querycap = xvip_dma_querycap, 620 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format, 621 .vidioc_g_fmt_vid_cap = xvip_dma_get_format, 622 .vidioc_g_fmt_vid_out = xvip_dma_get_format, 623 .vidioc_s_fmt_vid_cap = xvip_dma_set_format, 624 .vidioc_s_fmt_vid_out = xvip_dma_set_format, 625 .vidioc_try_fmt_vid_cap = xvip_dma_try_format, 626 .vidioc_try_fmt_vid_out = xvip_dma_try_format, 627 .vidioc_reqbufs = vb2_ioctl_reqbufs, 628 .vidioc_querybuf = vb2_ioctl_querybuf, 629 .vidioc_qbuf = vb2_ioctl_qbuf, 630 .vidioc_dqbuf = vb2_ioctl_dqbuf, 631 .vidioc_create_bufs = vb2_ioctl_create_bufs, 632 .vidioc_expbuf = vb2_ioctl_expbuf, 633 .vidioc_streamon = vb2_ioctl_streamon, 634 .vidioc_streamoff = vb2_ioctl_streamoff, 635 }; 636 637 /* ----------------------------------------------------------------------------- 638 * V4L2 file operations 639 */ 640 641 static const struct v4l2_file_operations xvip_dma_fops = { 642 .owner = THIS_MODULE, 643 .unlocked_ioctl = video_ioctl2, 644 .open = v4l2_fh_open, 645 .release = vb2_fop_release, 646 .poll = vb2_fop_poll, 647 .mmap = vb2_fop_mmap, 648 }; 649 650 /* ----------------------------------------------------------------------------- 651 * Xilinx Video DMA Core 652 */ 653 654 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, 655 enum v4l2_buf_type type, unsigned int port) 656 { 657 char name[16]; 658 int ret; 659 660 dma->xdev = xdev; 661 dma->port = port; 662 mutex_init(&dma->lock); 663 mutex_init(&dma->pipe.lock); 664 INIT_LIST_HEAD(&dma->queued_bufs); 665 spin_lock_init(&dma->queued_lock); 666 667 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 668 dma->format.pixelformat = dma->fmtinfo->fourcc; 669 dma->format.colorspace = V4L2_COLORSPACE_SRGB; 670 dma->format.field = V4L2_FIELD_NONE; 671 dma->format.width = XVIP_DMA_DEF_WIDTH; 672 dma->format.height = XVIP_DMA_DEF_HEIGHT; 673 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp; 674 dma->format.sizeimage = dma->format.bytesperline * dma->format.height; 675 676 /* Initialize the media entity... */ 677 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 678 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; 679 680 ret = media_entity_init(&dma->video.entity, 1, &dma->pad, 0); 681 if (ret < 0) 682 goto error; 683 684 /* ... and the video node... */ 685 dma->video.fops = &xvip_dma_fops; 686 dma->video.v4l2_dev = &xdev->v4l2_dev; 687 dma->video.queue = &dma->queue; 688 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u", 689 xdev->dev->of_node->name, 690 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input", 691 port); 692 dma->video.vfl_type = VFL_TYPE_GRABBER; 693 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 694 ? VFL_DIR_RX : VFL_DIR_TX; 695 dma->video.release = video_device_release_empty; 696 dma->video.ioctl_ops = &xvip_dma_ioctl_ops; 697 dma->video.lock = &dma->lock; 698 699 video_set_drvdata(&dma->video, dma); 700 701 /* ... and the buffers queue... */ 702 dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev); 703 if (IS_ERR(dma->alloc_ctx)) { 704 ret = PTR_ERR(dma->alloc_ctx); 705 goto error; 706 } 707 708 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write() 709 * V4L2 APIs would be inefficient. Testing on the command line with a 710 * 'cat /dev/video?' thus won't be possible, but given that the driver 711 * anyway requires a test tool to setup the pipeline before any video 712 * stream can be started, requiring a specific V4L2 test tool as well 713 * instead of 'cat' isn't really a drawback. 714 */ 715 dma->queue.type = type; 716 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 717 dma->queue.lock = &dma->lock; 718 dma->queue.drv_priv = dma; 719 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer); 720 dma->queue.ops = &xvip_dma_queue_qops; 721 dma->queue.mem_ops = &vb2_dma_contig_memops; 722 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 723 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; 724 ret = vb2_queue_init(&dma->queue); 725 if (ret < 0) { 726 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n"); 727 goto error; 728 } 729 730 /* ... and the DMA channel. */ 731 snprintf(name, sizeof(name), "port%u", port); 732 dma->dma = dma_request_slave_channel(dma->xdev->dev, name); 733 if (dma->dma == NULL) { 734 dev_err(dma->xdev->dev, "no VDMA channel found\n"); 735 ret = -ENODEV; 736 goto error; 737 } 738 739 dma->align = 1 << dma->dma->device->copy_align; 740 741 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1); 742 if (ret < 0) { 743 dev_err(dma->xdev->dev, "failed to register video device\n"); 744 goto error; 745 } 746 747 return 0; 748 749 error: 750 xvip_dma_cleanup(dma); 751 return ret; 752 } 753 754 void xvip_dma_cleanup(struct xvip_dma *dma) 755 { 756 if (video_is_registered(&dma->video)) 757 video_unregister_device(&dma->video); 758 759 if (dma->dma) 760 dma_release_channel(dma->dma); 761 762 if (!IS_ERR_OR_NULL(dma->alloc_ctx)) 763 vb2_dma_contig_cleanup_ctx(dma->alloc_ctx); 764 765 media_entity_cleanup(&dma->video.entity); 766 767 mutex_destroy(&dma->lock); 768 mutex_destroy(&dma->pipe.lock); 769 } 770