1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Xilinx Video DMA 4 * 5 * Copyright (C) 2013-2015 Ideas on Board 6 * Copyright (C) 2013-2015 Xilinx, Inc. 7 * 8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com> 9 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 */ 11 12 #include <linux/dma/xilinx_dma.h> 13 #include <linux/lcm.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/slab.h> 18 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-fh.h> 21 #include <media/v4l2-ioctl.h> 22 #include <media/videobuf2-v4l2.h> 23 #include <media/videobuf2-dma-contig.h> 24 25 #include "xilinx-dma.h" 26 #include "xilinx-vip.h" 27 #include "xilinx-vipp.h" 28 29 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV 30 #define XVIP_DMA_DEF_WIDTH 1920 31 #define XVIP_DMA_DEF_HEIGHT 1080 32 33 /* Minimum and maximum widths are expressed in bytes */ 34 #define XVIP_DMA_MIN_WIDTH 1U 35 #define XVIP_DMA_MAX_WIDTH 65535U 36 #define XVIP_DMA_MIN_HEIGHT 1U 37 #define XVIP_DMA_MAX_HEIGHT 8191U 38 39 /* ----------------------------------------------------------------------------- 40 * Helper functions 41 */ 42 43 static struct v4l2_subdev * 44 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad) 45 { 46 struct media_pad *remote; 47 48 remote = media_entity_remote_pad(local); 49 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 50 return NULL; 51 52 if (pad) 53 *pad = remote->index; 54 55 return media_entity_to_v4l2_subdev(remote->entity); 56 } 57 58 static int xvip_dma_verify_format(struct xvip_dma *dma) 59 { 60 struct v4l2_subdev_format fmt; 61 struct v4l2_subdev *subdev; 62 int ret; 63 64 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad); 65 if (subdev == NULL) 66 return -EPIPE; 67 68 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 69 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 70 if (ret < 0) 71 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 72 73 if (dma->fmtinfo->code != fmt.format.code || 74 dma->format.height != fmt.format.height || 75 dma->format.width != fmt.format.width || 76 dma->format.colorspace != fmt.format.colorspace) 77 return -EINVAL; 78 79 return 0; 80 } 81 82 /* ----------------------------------------------------------------------------- 83 * Pipeline Stream Management 84 */ 85 86 /** 87 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline 88 * @pipe: The pipeline 89 * @start: Start (when true) or stop (when false) the pipeline 90 * 91 * Walk the entities chain starting at the pipeline output video node and start 92 * or stop all of them. 93 * 94 * Return: 0 if successful, or the return value of the failed video::s_stream 95 * operation otherwise. 96 */ 97 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start) 98 { 99 struct xvip_dma *dma = pipe->output; 100 struct media_entity *entity; 101 struct media_pad *pad; 102 struct v4l2_subdev *subdev; 103 int ret; 104 105 entity = &dma->video.entity; 106 while (1) { 107 pad = &entity->pads[0]; 108 if (!(pad->flags & MEDIA_PAD_FL_SINK)) 109 break; 110 111 pad = media_entity_remote_pad(pad); 112 if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) 113 break; 114 115 entity = pad->entity; 116 subdev = media_entity_to_v4l2_subdev(entity); 117 118 ret = v4l2_subdev_call(subdev, video, s_stream, start); 119 if (start && ret < 0 && ret != -ENOIOCTLCMD) 120 return ret; 121 } 122 123 return 0; 124 } 125 126 /** 127 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline 128 * @pipe: The pipeline 129 * @on: Turn the stream on when true or off when false 130 * 131 * The pipeline is shared between all DMA engines connect at its input and 132 * output. While the stream state of DMA engines can be controlled 133 * independently, pipelines have a shared stream state that enable or disable 134 * all entities in the pipeline. For this reason the pipeline uses a streaming 135 * counter that tracks the number of DMA engines that have requested the stream 136 * to be enabled. 137 * 138 * When called with the @on argument set to true, this function will increment 139 * the pipeline streaming count. If the streaming count reaches the number of 140 * DMA engines in the pipeline it will enable all entities that belong to the 141 * pipeline. 142 * 143 * Similarly, when called with the @on argument set to false, this function will 144 * decrement the pipeline streaming count and disable all entities in the 145 * pipeline when the streaming count reaches zero. 146 * 147 * Return: 0 if successful, or the return value of the failed video::s_stream 148 * operation otherwise. Stopping the pipeline never fails. The pipeline state is 149 * not updated when the operation fails. 150 */ 151 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on) 152 { 153 int ret = 0; 154 155 mutex_lock(&pipe->lock); 156 157 if (on) { 158 if (pipe->stream_count == pipe->num_dmas - 1) { 159 ret = xvip_pipeline_start_stop(pipe, true); 160 if (ret < 0) 161 goto done; 162 } 163 pipe->stream_count++; 164 } else { 165 if (--pipe->stream_count == 0) 166 xvip_pipeline_start_stop(pipe, false); 167 } 168 169 done: 170 mutex_unlock(&pipe->lock); 171 return ret; 172 } 173 174 static int xvip_pipeline_validate(struct xvip_pipeline *pipe, 175 struct xvip_dma *start) 176 { 177 struct media_graph graph; 178 struct media_entity *entity = &start->video.entity; 179 struct media_device *mdev = entity->graph_obj.mdev; 180 unsigned int num_inputs = 0; 181 unsigned int num_outputs = 0; 182 int ret; 183 184 mutex_lock(&mdev->graph_mutex); 185 186 /* Walk the graph to locate the video nodes. */ 187 ret = media_graph_walk_init(&graph, mdev); 188 if (ret) { 189 mutex_unlock(&mdev->graph_mutex); 190 return ret; 191 } 192 193 media_graph_walk_start(&graph, entity); 194 195 while ((entity = media_graph_walk_next(&graph))) { 196 struct xvip_dma *dma; 197 198 if (entity->function != MEDIA_ENT_F_IO_V4L) 199 continue; 200 201 dma = to_xvip_dma(media_entity_to_video_device(entity)); 202 203 if (dma->pad.flags & MEDIA_PAD_FL_SINK) { 204 pipe->output = dma; 205 num_outputs++; 206 } else { 207 num_inputs++; 208 } 209 } 210 211 mutex_unlock(&mdev->graph_mutex); 212 213 media_graph_walk_cleanup(&graph); 214 215 /* We need exactly one output and zero or one input. */ 216 if (num_outputs != 1 || num_inputs > 1) 217 return -EPIPE; 218 219 pipe->num_dmas = num_inputs + num_outputs; 220 221 return 0; 222 } 223 224 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 225 { 226 pipe->num_dmas = 0; 227 pipe->output = NULL; 228 } 229 230 /** 231 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming 232 * @pipe: the pipeline 233 * 234 * Decrease the pipeline use count and clean it up if we were the last user. 235 */ 236 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 237 { 238 mutex_lock(&pipe->lock); 239 240 /* If we're the last user clean up the pipeline. */ 241 if (--pipe->use_count == 0) 242 __xvip_pipeline_cleanup(pipe); 243 244 mutex_unlock(&pipe->lock); 245 } 246 247 /** 248 * xvip_pipeline_prepare - Prepare the pipeline for streaming 249 * @pipe: the pipeline 250 * @dma: DMA engine at one end of the pipeline 251 * 252 * Validate the pipeline if no user exists yet, otherwise just increase the use 253 * count. 254 * 255 * Return: 0 if successful or -EPIPE if the pipeline is not valid. 256 */ 257 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe, 258 struct xvip_dma *dma) 259 { 260 int ret; 261 262 mutex_lock(&pipe->lock); 263 264 /* If we're the first user validate and initialize the pipeline. */ 265 if (pipe->use_count == 0) { 266 ret = xvip_pipeline_validate(pipe, dma); 267 if (ret < 0) { 268 __xvip_pipeline_cleanup(pipe); 269 goto done; 270 } 271 } 272 273 pipe->use_count++; 274 ret = 0; 275 276 done: 277 mutex_unlock(&pipe->lock); 278 return ret; 279 } 280 281 /* ----------------------------------------------------------------------------- 282 * videobuf2 queue operations 283 */ 284 285 /** 286 * struct xvip_dma_buffer - Video DMA buffer 287 * @buf: vb2 buffer base object 288 * @queue: buffer list entry in the DMA engine queued buffers list 289 * @dma: DMA channel that uses the buffer 290 */ 291 struct xvip_dma_buffer { 292 struct vb2_v4l2_buffer buf; 293 struct list_head queue; 294 struct xvip_dma *dma; 295 }; 296 297 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf) 298 299 static void xvip_dma_complete(void *param) 300 { 301 struct xvip_dma_buffer *buf = param; 302 struct xvip_dma *dma = buf->dma; 303 304 spin_lock(&dma->queued_lock); 305 list_del(&buf->queue); 306 spin_unlock(&dma->queued_lock); 307 308 buf->buf.field = V4L2_FIELD_NONE; 309 buf->buf.sequence = dma->sequence++; 310 buf->buf.vb2_buf.timestamp = ktime_get_ns(); 311 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage); 312 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 313 } 314 315 static int 316 xvip_dma_queue_setup(struct vb2_queue *vq, 317 unsigned int *nbuffers, unsigned int *nplanes, 318 unsigned int sizes[], struct device *alloc_devs[]) 319 { 320 struct xvip_dma *dma = vb2_get_drv_priv(vq); 321 322 /* Make sure the image size is large enough. */ 323 if (*nplanes) 324 return sizes[0] < dma->format.sizeimage ? -EINVAL : 0; 325 326 *nplanes = 1; 327 sizes[0] = dma->format.sizeimage; 328 329 return 0; 330 } 331 332 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) 333 { 334 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 335 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 336 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 337 338 buf->dma = dma; 339 340 return 0; 341 } 342 343 static void xvip_dma_buffer_queue(struct vb2_buffer *vb) 344 { 345 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 346 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 347 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 348 struct dma_async_tx_descriptor *desc; 349 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0); 350 u32 flags; 351 352 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 353 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 354 dma->xt.dir = DMA_DEV_TO_MEM; 355 dma->xt.src_sgl = false; 356 dma->xt.dst_sgl = true; 357 dma->xt.dst_start = addr; 358 } else { 359 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 360 dma->xt.dir = DMA_MEM_TO_DEV; 361 dma->xt.src_sgl = true; 362 dma->xt.dst_sgl = false; 363 dma->xt.src_start = addr; 364 } 365 366 dma->xt.frame_size = 1; 367 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp; 368 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size; 369 dma->xt.numf = dma->format.height; 370 371 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags); 372 if (!desc) { 373 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n"); 374 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 375 return; 376 } 377 desc->callback = xvip_dma_complete; 378 desc->callback_param = buf; 379 380 spin_lock_irq(&dma->queued_lock); 381 list_add_tail(&buf->queue, &dma->queued_bufs); 382 spin_unlock_irq(&dma->queued_lock); 383 384 dmaengine_submit(desc); 385 386 if (vb2_is_streaming(&dma->queue)) 387 dma_async_issue_pending(dma->dma); 388 } 389 390 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) 391 { 392 struct xvip_dma *dma = vb2_get_drv_priv(vq); 393 struct xvip_dma_buffer *buf, *nbuf; 394 struct xvip_pipeline *pipe; 395 int ret; 396 397 dma->sequence = 0; 398 399 /* 400 * Start streaming on the pipeline. No link touching an entity in the 401 * pipeline can be activated or deactivated once streaming is started. 402 * 403 * Use the pipeline object embedded in the first DMA object that starts 404 * streaming. 405 */ 406 pipe = dma->video.entity.pipe 407 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; 408 409 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe); 410 if (ret < 0) 411 goto error; 412 413 /* Verify that the configured format matches the output of the 414 * connected subdev. 415 */ 416 ret = xvip_dma_verify_format(dma); 417 if (ret < 0) 418 goto error_stop; 419 420 ret = xvip_pipeline_prepare(pipe, dma); 421 if (ret < 0) 422 goto error_stop; 423 424 /* Start the DMA engine. This must be done before starting the blocks 425 * in the pipeline to avoid DMA synchronization issues. 426 */ 427 dma_async_issue_pending(dma->dma); 428 429 /* Start the pipeline. */ 430 xvip_pipeline_set_stream(pipe, true); 431 432 return 0; 433 434 error_stop: 435 media_pipeline_stop(&dma->video.entity); 436 437 error: 438 /* Give back all queued buffers to videobuf2. */ 439 spin_lock_irq(&dma->queued_lock); 440 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 441 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED); 442 list_del(&buf->queue); 443 } 444 spin_unlock_irq(&dma->queued_lock); 445 446 return ret; 447 } 448 449 static void xvip_dma_stop_streaming(struct vb2_queue *vq) 450 { 451 struct xvip_dma *dma = vb2_get_drv_priv(vq); 452 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); 453 struct xvip_dma_buffer *buf, *nbuf; 454 455 /* Stop the pipeline. */ 456 xvip_pipeline_set_stream(pipe, false); 457 458 /* Stop and reset the DMA engine. */ 459 dmaengine_terminate_all(dma->dma); 460 461 /* Cleanup the pipeline and mark it as being stopped. */ 462 xvip_pipeline_cleanup(pipe); 463 media_pipeline_stop(&dma->video.entity); 464 465 /* Give back all queued buffers to videobuf2. */ 466 spin_lock_irq(&dma->queued_lock); 467 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 468 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 469 list_del(&buf->queue); 470 } 471 spin_unlock_irq(&dma->queued_lock); 472 } 473 474 static const struct vb2_ops xvip_dma_queue_qops = { 475 .queue_setup = xvip_dma_queue_setup, 476 .buf_prepare = xvip_dma_buffer_prepare, 477 .buf_queue = xvip_dma_buffer_queue, 478 .wait_prepare = vb2_ops_wait_prepare, 479 .wait_finish = vb2_ops_wait_finish, 480 .start_streaming = xvip_dma_start_streaming, 481 .stop_streaming = xvip_dma_stop_streaming, 482 }; 483 484 /* ----------------------------------------------------------------------------- 485 * V4L2 ioctls 486 */ 487 488 static int 489 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 490 { 491 struct v4l2_fh *vfh = file->private_data; 492 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 493 494 cap->device_caps = V4L2_CAP_STREAMING; 495 496 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 497 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE; 498 else 499 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT; 500 501 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS 502 | dma->xdev->v4l2_caps; 503 504 strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver)); 505 strscpy(cap->card, dma->video.name, sizeof(cap->card)); 506 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u", 507 dma->xdev->dev->of_node, dma->port); 508 509 return 0; 510 } 511 512 /* FIXME: without this callback function, some applications are not configured 513 * with correct formats, and it results in frames in wrong format. Whether this 514 * callback needs to be required is not clearly defined, so it should be 515 * clarified through the mailing list. 516 */ 517 static int 518 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) 519 { 520 struct v4l2_fh *vfh = file->private_data; 521 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 522 523 if (f->index > 0) 524 return -EINVAL; 525 526 f->pixelformat = dma->format.pixelformat; 527 strscpy(f->description, dma->fmtinfo->description, 528 sizeof(f->description)); 529 530 return 0; 531 } 532 533 static int 534 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format) 535 { 536 struct v4l2_fh *vfh = file->private_data; 537 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 538 539 format->fmt.pix = dma->format; 540 541 return 0; 542 } 543 544 static void 545 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix, 546 const struct xvip_video_format **fmtinfo) 547 { 548 const struct xvip_video_format *info; 549 unsigned int min_width; 550 unsigned int max_width; 551 unsigned int min_bpl; 552 unsigned int max_bpl; 553 unsigned int width; 554 unsigned int align; 555 unsigned int bpl; 556 557 /* Retrieve format information and select the default format if the 558 * requested format isn't supported. 559 */ 560 info = xvip_get_format_by_fourcc(pix->pixelformat); 561 if (IS_ERR(info)) 562 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 563 564 pix->pixelformat = info->fourcc; 565 pix->field = V4L2_FIELD_NONE; 566 567 /* The transfer alignment requirements are expressed in bytes. Compute 568 * the minimum and maximum values, clamp the requested width and convert 569 * it back to pixels. 570 */ 571 align = lcm(dma->align, info->bpp); 572 min_width = roundup(XVIP_DMA_MIN_WIDTH, align); 573 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align); 574 width = rounddown(pix->width * info->bpp, align); 575 576 pix->width = clamp(width, min_width, max_width) / info->bpp; 577 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT, 578 XVIP_DMA_MAX_HEIGHT); 579 580 /* Clamp the requested bytes per line value. If the maximum bytes per 581 * line value is zero, the module doesn't support user configurable line 582 * sizes. Override the requested value with the minimum in that case. 583 */ 584 min_bpl = pix->width * info->bpp; 585 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align); 586 bpl = rounddown(pix->bytesperline, dma->align); 587 588 pix->bytesperline = clamp(bpl, min_bpl, max_bpl); 589 pix->sizeimage = pix->bytesperline * pix->height; 590 591 if (fmtinfo) 592 *fmtinfo = info; 593 } 594 595 static int 596 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format) 597 { 598 struct v4l2_fh *vfh = file->private_data; 599 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 600 601 __xvip_dma_try_format(dma, &format->fmt.pix, NULL); 602 return 0; 603 } 604 605 static int 606 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format) 607 { 608 struct v4l2_fh *vfh = file->private_data; 609 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 610 const struct xvip_video_format *info; 611 612 __xvip_dma_try_format(dma, &format->fmt.pix, &info); 613 614 if (vb2_is_busy(&dma->queue)) 615 return -EBUSY; 616 617 dma->format = format->fmt.pix; 618 dma->fmtinfo = info; 619 620 return 0; 621 } 622 623 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = { 624 .vidioc_querycap = xvip_dma_querycap, 625 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format, 626 .vidioc_g_fmt_vid_cap = xvip_dma_get_format, 627 .vidioc_g_fmt_vid_out = xvip_dma_get_format, 628 .vidioc_s_fmt_vid_cap = xvip_dma_set_format, 629 .vidioc_s_fmt_vid_out = xvip_dma_set_format, 630 .vidioc_try_fmt_vid_cap = xvip_dma_try_format, 631 .vidioc_try_fmt_vid_out = xvip_dma_try_format, 632 .vidioc_reqbufs = vb2_ioctl_reqbufs, 633 .vidioc_querybuf = vb2_ioctl_querybuf, 634 .vidioc_qbuf = vb2_ioctl_qbuf, 635 .vidioc_dqbuf = vb2_ioctl_dqbuf, 636 .vidioc_create_bufs = vb2_ioctl_create_bufs, 637 .vidioc_expbuf = vb2_ioctl_expbuf, 638 .vidioc_streamon = vb2_ioctl_streamon, 639 .vidioc_streamoff = vb2_ioctl_streamoff, 640 }; 641 642 /* ----------------------------------------------------------------------------- 643 * V4L2 file operations 644 */ 645 646 static const struct v4l2_file_operations xvip_dma_fops = { 647 .owner = THIS_MODULE, 648 .unlocked_ioctl = video_ioctl2, 649 .open = v4l2_fh_open, 650 .release = vb2_fop_release, 651 .poll = vb2_fop_poll, 652 .mmap = vb2_fop_mmap, 653 }; 654 655 /* ----------------------------------------------------------------------------- 656 * Xilinx Video DMA Core 657 */ 658 659 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, 660 enum v4l2_buf_type type, unsigned int port) 661 { 662 char name[16]; 663 int ret; 664 665 dma->xdev = xdev; 666 dma->port = port; 667 mutex_init(&dma->lock); 668 mutex_init(&dma->pipe.lock); 669 INIT_LIST_HEAD(&dma->queued_bufs); 670 spin_lock_init(&dma->queued_lock); 671 672 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 673 dma->format.pixelformat = dma->fmtinfo->fourcc; 674 dma->format.colorspace = V4L2_COLORSPACE_SRGB; 675 dma->format.field = V4L2_FIELD_NONE; 676 dma->format.width = XVIP_DMA_DEF_WIDTH; 677 dma->format.height = XVIP_DMA_DEF_HEIGHT; 678 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp; 679 dma->format.sizeimage = dma->format.bytesperline * dma->format.height; 680 681 /* Initialize the media entity... */ 682 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 683 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; 684 685 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad); 686 if (ret < 0) 687 goto error; 688 689 /* ... and the video node... */ 690 dma->video.fops = &xvip_dma_fops; 691 dma->video.v4l2_dev = &xdev->v4l2_dev; 692 dma->video.queue = &dma->queue; 693 snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u", 694 xdev->dev->of_node, 695 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input", 696 port); 697 dma->video.vfl_type = VFL_TYPE_GRABBER; 698 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 699 ? VFL_DIR_RX : VFL_DIR_TX; 700 dma->video.release = video_device_release_empty; 701 dma->video.ioctl_ops = &xvip_dma_ioctl_ops; 702 dma->video.lock = &dma->lock; 703 704 video_set_drvdata(&dma->video, dma); 705 706 /* ... and the buffers queue... */ 707 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write() 708 * V4L2 APIs would be inefficient. Testing on the command line with a 709 * 'cat /dev/video?' thus won't be possible, but given that the driver 710 * anyway requires a test tool to setup the pipeline before any video 711 * stream can be started, requiring a specific V4L2 test tool as well 712 * instead of 'cat' isn't really a drawback. 713 */ 714 dma->queue.type = type; 715 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 716 dma->queue.lock = &dma->lock; 717 dma->queue.drv_priv = dma; 718 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer); 719 dma->queue.ops = &xvip_dma_queue_qops; 720 dma->queue.mem_ops = &vb2_dma_contig_memops; 721 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 722 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; 723 dma->queue.dev = dma->xdev->dev; 724 ret = vb2_queue_init(&dma->queue); 725 if (ret < 0) { 726 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n"); 727 goto error; 728 } 729 730 /* ... and the DMA channel. */ 731 snprintf(name, sizeof(name), "port%u", port); 732 dma->dma = dma_request_slave_channel(dma->xdev->dev, name); 733 if (dma->dma == NULL) { 734 dev_err(dma->xdev->dev, "no VDMA channel found\n"); 735 ret = -ENODEV; 736 goto error; 737 } 738 739 dma->align = 1 << dma->dma->device->copy_align; 740 741 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1); 742 if (ret < 0) { 743 dev_err(dma->xdev->dev, "failed to register video device\n"); 744 goto error; 745 } 746 747 return 0; 748 749 error: 750 xvip_dma_cleanup(dma); 751 return ret; 752 } 753 754 void xvip_dma_cleanup(struct xvip_dma *dma) 755 { 756 if (video_is_registered(&dma->video)) 757 video_unregister_device(&dma->video); 758 759 if (dma->dma) 760 dma_release_channel(dma->dma); 761 762 media_entity_cleanup(&dma->video.entity); 763 764 mutex_destroy(&dma->lock); 765 mutex_destroy(&dma->pipe.lock); 766 } 767