1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Intel Corporation 3 4 #include <linux/module.h> 5 #include <linux/pm_runtime.h> 6 7 #include <media/v4l2-event.h> 8 #include <media/v4l2-ioctl.h> 9 10 #include "ipu3.h" 11 #include "ipu3-dmamap.h" 12 13 /******************** v4l2_subdev_ops ********************/ 14 15 #define IPU3_RUNNING_MODE_VIDEO 0 16 #define IPU3_RUNNING_MODE_STILL 1 17 18 static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) 19 { 20 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 21 struct imgu_v4l2_subdev, 22 subdev); 23 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 24 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[imgu_sd->pipe]; 25 struct v4l2_rect try_crop = { 26 .top = 0, 27 .left = 0, 28 }; 29 unsigned int i; 30 31 try_crop.width = 32 imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width; 33 try_crop.height = 34 imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height; 35 36 /* Initialize try_fmt */ 37 for (i = 0; i < IMGU_NODE_NUM; i++) { 38 struct v4l2_mbus_framefmt *try_fmt = 39 v4l2_subdev_get_try_format(sd, fh->state, i); 40 41 try_fmt->width = try_crop.width; 42 try_fmt->height = try_crop.height; 43 try_fmt->code = imgu_pipe->nodes[i].pad_fmt.code; 44 try_fmt->field = V4L2_FIELD_NONE; 45 } 46 47 *v4l2_subdev_get_try_crop(sd, fh->state, IMGU_NODE_IN) = try_crop; 48 *v4l2_subdev_get_try_compose(sd, fh->state, IMGU_NODE_IN) = try_crop; 49 50 return 0; 51 } 52 53 static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable) 54 { 55 int i; 56 unsigned int node; 57 int r = 0; 58 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 59 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 60 struct imgu_v4l2_subdev, 61 subdev); 62 unsigned int pipe = imgu_sd->pipe; 63 struct device *dev = &imgu->pci_dev->dev; 64 struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL }; 65 struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL }; 66 struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe]; 67 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; 68 69 dev_dbg(dev, "%s %d for pipe %u", __func__, enable, pipe); 70 /* grab ctrl after streamon and return after off */ 71 v4l2_ctrl_grab(imgu_sd->ctrl, enable); 72 73 if (!enable) { 74 imgu_sd->active = false; 75 return 0; 76 } 77 78 for (i = 0; i < IMGU_NODE_NUM; i++) 79 imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled; 80 81 /* This is handled specially */ 82 imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false; 83 84 /* Initialize CSS formats */ 85 for (i = 0; i < IPU3_CSS_QUEUES; i++) { 86 node = imgu_map_node(imgu, i); 87 /* No need to reconfig meta nodes */ 88 if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS) 89 continue; 90 fmts[i] = imgu_pipe->queue_enabled[node] ? 91 &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL; 92 } 93 94 /* Enable VF output only when VF queue requested by user */ 95 css_pipe->vf_output_en = false; 96 if (imgu_pipe->nodes[IMGU_NODE_VF].enabled) 97 css_pipe->vf_output_en = true; 98 99 if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO) 100 css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO; 101 else 102 css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE; 103 104 dev_dbg(dev, "IPU3 pipe %u pipe_id %u", pipe, css_pipe->pipe_id); 105 106 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff; 107 rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds; 108 rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc; 109 110 r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe); 111 if (r) { 112 dev_err(dev, "failed to set initial formats pipe %u with (%d)", 113 pipe, r); 114 return r; 115 } 116 117 imgu_sd->active = true; 118 119 return 0; 120 } 121 122 static int imgu_subdev_get_fmt(struct v4l2_subdev *sd, 123 struct v4l2_subdev_state *sd_state, 124 struct v4l2_subdev_format *fmt) 125 { 126 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 127 struct v4l2_mbus_framefmt *mf; 128 struct imgu_media_pipe *imgu_pipe; 129 u32 pad = fmt->pad; 130 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 131 struct imgu_v4l2_subdev, 132 subdev); 133 unsigned int pipe = imgu_sd->pipe; 134 135 imgu_pipe = &imgu->imgu_pipe[pipe]; 136 if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { 137 fmt->format = imgu_pipe->nodes[pad].pad_fmt; 138 } else { 139 mf = v4l2_subdev_get_try_format(sd, sd_state, pad); 140 fmt->format = *mf; 141 } 142 143 return 0; 144 } 145 146 static int imgu_subdev_set_fmt(struct v4l2_subdev *sd, 147 struct v4l2_subdev_state *sd_state, 148 struct v4l2_subdev_format *fmt) 149 { 150 struct imgu_media_pipe *imgu_pipe; 151 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 152 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 153 struct imgu_v4l2_subdev, 154 subdev); 155 struct v4l2_mbus_framefmt *mf; 156 u32 pad = fmt->pad; 157 unsigned int pipe = imgu_sd->pipe; 158 159 dev_dbg(&imgu->pci_dev->dev, "set subdev %u pad %u fmt to [%ux%u]", 160 pipe, pad, fmt->format.width, fmt->format.height); 161 162 imgu_pipe = &imgu->imgu_pipe[pipe]; 163 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) 164 mf = v4l2_subdev_get_try_format(sd, sd_state, pad); 165 else 166 mf = &imgu_pipe->nodes[pad].pad_fmt; 167 168 fmt->format.code = mf->code; 169 /* Clamp the w and h based on the hardware capabilities */ 170 if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) { 171 fmt->format.width = clamp(fmt->format.width, 172 IPU3_OUTPUT_MIN_WIDTH, 173 IPU3_OUTPUT_MAX_WIDTH); 174 fmt->format.height = clamp(fmt->format.height, 175 IPU3_OUTPUT_MIN_HEIGHT, 176 IPU3_OUTPUT_MAX_HEIGHT); 177 } else { 178 fmt->format.width = clamp(fmt->format.width, 179 IPU3_INPUT_MIN_WIDTH, 180 IPU3_INPUT_MAX_WIDTH); 181 fmt->format.height = clamp(fmt->format.height, 182 IPU3_INPUT_MIN_HEIGHT, 183 IPU3_INPUT_MAX_HEIGHT); 184 } 185 186 *mf = fmt->format; 187 188 return 0; 189 } 190 191 static int imgu_subdev_get_selection(struct v4l2_subdev *sd, 192 struct v4l2_subdev_state *sd_state, 193 struct v4l2_subdev_selection *sel) 194 { 195 struct v4l2_rect *try_sel, *r; 196 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 197 struct imgu_v4l2_subdev, 198 subdev); 199 200 if (sel->pad != IMGU_NODE_IN) 201 return -EINVAL; 202 203 switch (sel->target) { 204 case V4L2_SEL_TGT_CROP: 205 try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); 206 r = &imgu_sd->rect.eff; 207 break; 208 case V4L2_SEL_TGT_COMPOSE: 209 try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); 210 r = &imgu_sd->rect.bds; 211 break; 212 default: 213 return -EINVAL; 214 } 215 216 if (sel->which == V4L2_SUBDEV_FORMAT_TRY) 217 sel->r = *try_sel; 218 else 219 sel->r = *r; 220 221 return 0; 222 } 223 224 static int imgu_subdev_set_selection(struct v4l2_subdev *sd, 225 struct v4l2_subdev_state *sd_state, 226 struct v4l2_subdev_selection *sel) 227 { 228 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 229 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 230 struct imgu_v4l2_subdev, 231 subdev); 232 struct v4l2_rect *rect, *try_sel; 233 234 dev_dbg(&imgu->pci_dev->dev, 235 "set subdev %u sel which %u target 0x%4x rect [%ux%u]", 236 imgu_sd->pipe, sel->which, sel->target, 237 sel->r.width, sel->r.height); 238 239 if (sel->pad != IMGU_NODE_IN) 240 return -EINVAL; 241 242 switch (sel->target) { 243 case V4L2_SEL_TGT_CROP: 244 try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); 245 rect = &imgu_sd->rect.eff; 246 break; 247 case V4L2_SEL_TGT_COMPOSE: 248 try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); 249 rect = &imgu_sd->rect.bds; 250 break; 251 default: 252 return -EINVAL; 253 } 254 255 if (sel->which == V4L2_SUBDEV_FORMAT_TRY) 256 *try_sel = sel->r; 257 else 258 *rect = sel->r; 259 260 return 0; 261 } 262 263 /******************** media_entity_operations ********************/ 264 265 static int imgu_link_setup(struct media_entity *entity, 266 const struct media_pad *local, 267 const struct media_pad *remote, u32 flags) 268 { 269 struct imgu_media_pipe *imgu_pipe; 270 struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev, 271 entity); 272 struct imgu_device *imgu = v4l2_get_subdevdata(sd); 273 struct imgu_v4l2_subdev *imgu_sd = container_of(sd, 274 struct imgu_v4l2_subdev, 275 subdev); 276 unsigned int pipe = imgu_sd->pipe; 277 u32 pad = local->index; 278 279 WARN_ON(pad >= IMGU_NODE_NUM); 280 281 dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad, 282 flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled"); 283 284 imgu_pipe = &imgu->imgu_pipe[pipe]; 285 imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED; 286 287 /* enable input node to enable the pipe */ 288 if (pad != IMGU_NODE_IN) 289 return 0; 290 291 if (flags & MEDIA_LNK_FL_ENABLED) 292 __set_bit(pipe, imgu->css.enabled_pipes); 293 else 294 __clear_bit(pipe, imgu->css.enabled_pipes); 295 296 dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe, 297 flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled"); 298 299 return 0; 300 } 301 302 /******************** vb2_ops ********************/ 303 304 static int imgu_vb2_buf_init(struct vb2_buffer *vb) 305 { 306 struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); 307 struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue); 308 struct imgu_buffer *buf = container_of(vb, 309 struct imgu_buffer, vid_buf.vbb.vb2_buf); 310 struct imgu_video_device *node = 311 container_of(vb->vb2_queue, struct imgu_video_device, vbq); 312 unsigned int queue = imgu_node_to_queue(node->id); 313 314 if (queue == IPU3_CSS_QUEUE_PARAMS) 315 return 0; 316 317 return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map); 318 } 319 320 /* Called when each buffer is freed */ 321 static void imgu_vb2_buf_cleanup(struct vb2_buffer *vb) 322 { 323 struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue); 324 struct imgu_buffer *buf = container_of(vb, 325 struct imgu_buffer, vid_buf.vbb.vb2_buf); 326 struct imgu_video_device *node = 327 container_of(vb->vb2_queue, struct imgu_video_device, vbq); 328 unsigned int queue = imgu_node_to_queue(node->id); 329 330 if (queue == IPU3_CSS_QUEUE_PARAMS) 331 return; 332 333 imgu_dmamap_unmap(imgu, &buf->map); 334 } 335 336 /* Transfer buffer ownership to me */ 337 static void imgu_vb2_buf_queue(struct vb2_buffer *vb) 338 { 339 struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue); 340 struct imgu_video_device *node = 341 container_of(vb->vb2_queue, struct imgu_video_device, vbq); 342 unsigned int queue = imgu_node_to_queue(node->id); 343 struct imgu_buffer *buf = container_of(vb, struct imgu_buffer, 344 vid_buf.vbb.vb2_buf); 345 unsigned long need_bytes; 346 unsigned long payload = vb2_get_plane_payload(vb, 0); 347 348 if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE || 349 vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT) 350 need_bytes = node->vdev_fmt.fmt.meta.buffersize; 351 else 352 need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage; 353 354 if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) { 355 dev_err(&imgu->pci_dev->dev, "invalid data size for params."); 356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 357 return; 358 } 359 360 mutex_lock(&imgu->lock); 361 if (queue != IPU3_CSS_QUEUE_PARAMS) 362 imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr); 363 364 list_add_tail(&buf->vid_buf.list, &node->buffers); 365 mutex_unlock(&imgu->lock); 366 367 vb2_set_plane_payload(vb, 0, need_bytes); 368 369 mutex_lock(&imgu->streaming_lock); 370 if (imgu->streaming) 371 imgu_queue_buffers(imgu, false, node->pipe); 372 mutex_unlock(&imgu->streaming_lock); 373 374 dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__, 375 node->pipe, node->id); 376 } 377 378 static int imgu_vb2_queue_setup(struct vb2_queue *vq, 379 unsigned int *num_buffers, 380 unsigned int *num_planes, 381 unsigned int sizes[], 382 struct device *alloc_devs[]) 383 { 384 struct imgu_device *imgu = vb2_get_drv_priv(vq); 385 struct imgu_video_device *node = 386 container_of(vq, struct imgu_video_device, vbq); 387 const struct v4l2_format *fmt = &node->vdev_fmt; 388 unsigned int size; 389 390 *num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME); 391 alloc_devs[0] = &imgu->pci_dev->dev; 392 393 if (vq->type == V4L2_BUF_TYPE_META_CAPTURE || 394 vq->type == V4L2_BUF_TYPE_META_OUTPUT) 395 size = fmt->fmt.meta.buffersize; 396 else 397 size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage; 398 399 if (*num_planes) { 400 if (sizes[0] < size) 401 return -EINVAL; 402 size = sizes[0]; 403 } 404 405 *num_planes = 1; 406 sizes[0] = size; 407 408 /* Initialize buffer queue */ 409 INIT_LIST_HEAD(&node->buffers); 410 411 return 0; 412 } 413 414 /* Check if all enabled video nodes are streaming, exception ignored */ 415 static bool imgu_all_nodes_streaming(struct imgu_device *imgu, 416 struct imgu_video_device *except) 417 { 418 unsigned int i, pipe, p; 419 struct imgu_video_device *node; 420 struct device *dev = &imgu->pci_dev->dev; 421 422 pipe = except->pipe; 423 if (!test_bit(pipe, imgu->css.enabled_pipes)) { 424 dev_warn(&imgu->pci_dev->dev, 425 "pipe %u link is not ready yet", pipe); 426 return false; 427 } 428 429 for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { 430 for (i = 0; i < IMGU_NODE_NUM; i++) { 431 node = &imgu->imgu_pipe[p].nodes[i]; 432 dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u", 433 __func__, p, i, node->name, node->enabled); 434 if (node == except) 435 continue; 436 if (node->enabled && !vb2_start_streaming_called(&node->vbq)) 437 return false; 438 } 439 } 440 441 return true; 442 } 443 444 static void imgu_return_all_buffers(struct imgu_device *imgu, 445 struct imgu_video_device *node, 446 enum vb2_buffer_state state) 447 { 448 struct imgu_vb2_buffer *b, *b0; 449 450 /* Return all buffers */ 451 mutex_lock(&imgu->lock); 452 list_for_each_entry_safe(b, b0, &node->buffers, list) { 453 list_del(&b->list); 454 vb2_buffer_done(&b->vbb.vb2_buf, state); 455 } 456 mutex_unlock(&imgu->lock); 457 } 458 459 static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) 460 { 461 struct imgu_media_pipe *imgu_pipe; 462 struct imgu_device *imgu = vb2_get_drv_priv(vq); 463 struct device *dev = &imgu->pci_dev->dev; 464 struct imgu_video_device *node = 465 container_of(vq, struct imgu_video_device, vbq); 466 int r; 467 unsigned int pipe; 468 469 dev_dbg(dev, "%s node name %s pipe %u id %u", __func__, 470 node->name, node->pipe, node->id); 471 472 mutex_lock(&imgu->streaming_lock); 473 if (imgu->streaming) { 474 r = -EBUSY; 475 mutex_unlock(&imgu->streaming_lock); 476 goto fail_return_bufs; 477 } 478 mutex_unlock(&imgu->streaming_lock); 479 480 if (!node->enabled) { 481 dev_err(dev, "IMGU node is not enabled"); 482 r = -EINVAL; 483 goto fail_return_bufs; 484 } 485 486 pipe = node->pipe; 487 imgu_pipe = &imgu->imgu_pipe[pipe]; 488 r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline); 489 if (r < 0) 490 goto fail_return_bufs; 491 492 if (!imgu_all_nodes_streaming(imgu, node)) 493 return 0; 494 495 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { 496 r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev, 497 video, s_stream, 1); 498 if (r < 0) 499 goto fail_stop_pipeline; 500 } 501 502 /* Start streaming of the whole pipeline now */ 503 dev_dbg(dev, "IMGU streaming is ready to start"); 504 mutex_lock(&imgu->streaming_lock); 505 r = imgu_s_stream(imgu, true); 506 if (!r) 507 imgu->streaming = true; 508 mutex_unlock(&imgu->streaming_lock); 509 510 return 0; 511 512 fail_stop_pipeline: 513 media_pipeline_stop(&node->vdev.entity); 514 fail_return_bufs: 515 imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED); 516 517 return r; 518 } 519 520 static void imgu_vb2_stop_streaming(struct vb2_queue *vq) 521 { 522 struct imgu_media_pipe *imgu_pipe; 523 struct imgu_device *imgu = vb2_get_drv_priv(vq); 524 struct device *dev = &imgu->pci_dev->dev; 525 struct imgu_video_device *node = 526 container_of(vq, struct imgu_video_device, vbq); 527 int r; 528 unsigned int pipe; 529 530 WARN_ON(!node->enabled); 531 532 pipe = node->pipe; 533 dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id); 534 imgu_pipe = &imgu->imgu_pipe[pipe]; 535 r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0); 536 if (r) 537 dev_err(&imgu->pci_dev->dev, 538 "failed to stop subdev streaming\n"); 539 540 mutex_lock(&imgu->streaming_lock); 541 /* Was this the first node with streaming disabled? */ 542 if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) { 543 /* Yes, really stop streaming now */ 544 dev_dbg(dev, "IMGU streaming is ready to stop"); 545 r = imgu_s_stream(imgu, false); 546 if (!r) 547 imgu->streaming = false; 548 } 549 550 imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); 551 mutex_unlock(&imgu->streaming_lock); 552 553 media_pipeline_stop(&node->vdev.entity); 554 } 555 556 /******************** v4l2_ioctl_ops ********************/ 557 558 #define VID_CAPTURE 0 559 #define VID_OUTPUT 1 560 #define DEF_VID_CAPTURE 0 561 #define DEF_VID_OUTPUT 1 562 563 struct imgu_fmt { 564 u32 fourcc; 565 u16 type; /* VID_CAPTURE or VID_OUTPUT not both */ 566 }; 567 568 /* format descriptions for capture and preview */ 569 static const struct imgu_fmt formats[] = { 570 { V4L2_PIX_FMT_NV12, VID_CAPTURE }, 571 { V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT }, 572 { V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT }, 573 { V4L2_PIX_FMT_IPU3_SGBRG10, VID_OUTPUT }, 574 { V4L2_PIX_FMT_IPU3_SRGGB10, VID_OUTPUT }, 575 }; 576 577 /* Find the first matched format, return default if not found */ 578 static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type) 579 { 580 unsigned int i; 581 582 for (i = 0; i < ARRAY_SIZE(formats); i++) { 583 if (formats[i].fourcc == f->fmt.pix_mp.pixelformat && 584 formats[i].type == type) 585 return &formats[i]; 586 } 587 588 return type == VID_CAPTURE ? &formats[DEF_VID_CAPTURE] : 589 &formats[DEF_VID_OUTPUT]; 590 } 591 592 static int imgu_vidioc_querycap(struct file *file, void *fh, 593 struct v4l2_capability *cap) 594 { 595 struct imgu_device *imgu = video_drvdata(file); 596 597 strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver)); 598 strscpy(cap->card, IMGU_NAME, sizeof(cap->card)); 599 snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", 600 pci_name(imgu->pci_dev)); 601 602 return 0; 603 } 604 605 static int enum_fmts(struct v4l2_fmtdesc *f, u32 type) 606 { 607 unsigned int i, j; 608 609 if (f->mbus_code != 0 && f->mbus_code != MEDIA_BUS_FMT_FIXED) 610 return -EINVAL; 611 612 for (i = j = 0; i < ARRAY_SIZE(formats); ++i) { 613 if (formats[i].type == type) { 614 if (j == f->index) 615 break; 616 ++j; 617 } 618 } 619 620 if (i < ARRAY_SIZE(formats)) { 621 f->pixelformat = formats[i].fourcc; 622 return 0; 623 } 624 625 return -EINVAL; 626 } 627 628 static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, 629 struct v4l2_fmtdesc *f) 630 { 631 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 632 return -EINVAL; 633 634 return enum_fmts(f, VID_CAPTURE); 635 } 636 637 static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, 638 struct v4l2_fmtdesc *f) 639 { 640 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 641 return -EINVAL; 642 643 return enum_fmts(f, VID_OUTPUT); 644 } 645 646 /* Propagate forward always the format from the CIO2 subdev */ 647 static int imgu_vidioc_g_fmt(struct file *file, void *fh, 648 struct v4l2_format *f) 649 { 650 struct imgu_video_device *node = file_to_intel_imgu_node(file); 651 652 f->fmt = node->vdev_fmt.fmt; 653 654 return 0; 655 } 656 657 /* 658 * Set input/output format. Unless it is just a try, this also resets 659 * selections (ie. effective and BDS resolutions) to defaults. 660 */ 661 static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, 662 struct v4l2_format *f, bool try) 663 { 664 struct device *dev = &imgu->pci_dev->dev; 665 struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL }; 666 struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL }; 667 struct v4l2_mbus_framefmt pad_fmt; 668 unsigned int i, css_q; 669 int ret; 670 struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe]; 671 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; 672 struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd; 673 674 dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try); 675 676 for (i = 0; i < IMGU_NODE_NUM; i++) 677 dev_dbg(dev, "IMGU pipe %u node %u enabled = %u", 678 pipe, i, imgu_pipe->nodes[i].enabled); 679 680 if (imgu_pipe->nodes[IMGU_NODE_VF].enabled) 681 css_pipe->vf_output_en = true; 682 683 if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO) 684 css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO; 685 else 686 css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE; 687 688 dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id); 689 690 css_q = imgu_node_to_queue(node); 691 for (i = 0; i < IPU3_CSS_QUEUES; i++) { 692 unsigned int inode = imgu_map_node(imgu, i); 693 694 /* Skip the meta node */ 695 if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS) 696 continue; 697 698 /* CSS expects some format on OUT queue */ 699 if (i != IPU3_CSS_QUEUE_OUT && 700 !imgu_pipe->nodes[inode].enabled && !try) { 701 fmts[i] = NULL; 702 continue; 703 } 704 705 if (i == css_q) { 706 fmts[i] = &f->fmt.pix_mp; 707 continue; 708 } 709 710 if (try) { 711 fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp, 712 sizeof(struct v4l2_pix_format_mplane), 713 GFP_KERNEL); 714 if (!fmts[i]) { 715 ret = -ENOMEM; 716 goto out; 717 } 718 } else { 719 fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp; 720 } 721 722 } 723 724 if (!try) { 725 /* eff and bds res got by imgu_s_sel */ 726 struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd; 727 728 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff; 729 rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds; 730 rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc; 731 732 /* suppose that pad fmt was set by subdev s_fmt before */ 733 pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt; 734 rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width; 735 rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height; 736 } 737 738 if (!fmts[css_q]) { 739 ret = -EINVAL; 740 goto out; 741 } 742 743 if (try) 744 ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe); 745 else 746 ret = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe); 747 748 /* ret is the binary number in the firmware blob */ 749 if (ret < 0) 750 goto out; 751 752 /* 753 * imgu doesn't set the node to the value given by user 754 * before we return success from this function, so set it here. 755 */ 756 if (!try) 757 imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp; 758 759 out: 760 if (try) { 761 for (i = 0; i < IPU3_CSS_QUEUES; i++) 762 if (i != css_q) 763 kfree(fmts[i]); 764 } 765 766 return ret; 767 } 768 769 static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f) 770 { 771 struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp; 772 const struct imgu_fmt *fmt; 773 774 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 775 fmt = find_format(f, VID_CAPTURE); 776 else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 777 fmt = find_format(f, VID_OUTPUT); 778 else 779 return -EINVAL; 780 781 pixm->pixelformat = fmt->fourcc; 782 783 return 0; 784 } 785 786 static int imgu_vidioc_try_fmt(struct file *file, void *fh, 787 struct v4l2_format *f) 788 { 789 struct imgu_device *imgu = video_drvdata(file); 790 struct device *dev = &imgu->pci_dev->dev; 791 struct imgu_video_device *node = file_to_intel_imgu_node(file); 792 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 793 int r; 794 795 dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__, 796 pix_mp->width, pix_mp->height, node->id); 797 798 r = imgu_try_fmt(file, fh, f); 799 if (r) 800 return r; 801 802 return imgu_fmt(imgu, node->pipe, node->id, f, true); 803 } 804 805 static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f) 806 { 807 struct imgu_device *imgu = video_drvdata(file); 808 struct device *dev = &imgu->pci_dev->dev; 809 struct imgu_video_device *node = file_to_intel_imgu_node(file); 810 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 811 int r; 812 813 dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__, 814 pix_mp->width, pix_mp->height, node->id); 815 816 r = imgu_try_fmt(file, fh, f); 817 if (r) 818 return r; 819 820 return imgu_fmt(imgu, node->pipe, node->id, f, false); 821 } 822 823 struct imgu_meta_fmt { 824 __u32 fourcc; 825 char *name; 826 }; 827 828 /* From drivers/media/v4l2-core/v4l2-ioctl.c */ 829 static const struct imgu_meta_fmt meta_fmts[] = { 830 { V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" }, 831 { V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" }, 832 }; 833 834 static int imgu_meta_enum_format(struct file *file, void *fh, 835 struct v4l2_fmtdesc *fmt) 836 { 837 struct imgu_video_device *node = file_to_intel_imgu_node(file); 838 unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1; 839 840 /* Each node is dedicated to only one meta format */ 841 if (fmt->index > 0 || fmt->type != node->vbq.type) 842 return -EINVAL; 843 844 if (fmt->mbus_code != 0 && fmt->mbus_code != MEDIA_BUS_FMT_FIXED) 845 return -EINVAL; 846 847 strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description)); 848 fmt->pixelformat = meta_fmts[i].fourcc; 849 850 return 0; 851 } 852 853 static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh, 854 struct v4l2_format *f) 855 { 856 struct imgu_video_device *node = file_to_intel_imgu_node(file); 857 858 if (f->type != node->vbq.type) 859 return -EINVAL; 860 861 f->fmt = node->vdev_fmt.fmt; 862 863 return 0; 864 } 865 866 /******************** function pointers ********************/ 867 868 static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = { 869 .open = imgu_subdev_open, 870 }; 871 872 static const struct v4l2_subdev_core_ops imgu_subdev_core_ops = { 873 .subscribe_event = v4l2_ctrl_subdev_subscribe_event, 874 .unsubscribe_event = v4l2_event_subdev_unsubscribe, 875 }; 876 877 static const struct v4l2_subdev_video_ops imgu_subdev_video_ops = { 878 .s_stream = imgu_subdev_s_stream, 879 }; 880 881 static const struct v4l2_subdev_pad_ops imgu_subdev_pad_ops = { 882 .link_validate = v4l2_subdev_link_validate_default, 883 .get_fmt = imgu_subdev_get_fmt, 884 .set_fmt = imgu_subdev_set_fmt, 885 .get_selection = imgu_subdev_get_selection, 886 .set_selection = imgu_subdev_set_selection, 887 }; 888 889 static const struct v4l2_subdev_ops imgu_subdev_ops = { 890 .core = &imgu_subdev_core_ops, 891 .video = &imgu_subdev_video_ops, 892 .pad = &imgu_subdev_pad_ops, 893 }; 894 895 static const struct media_entity_operations imgu_media_ops = { 896 .link_setup = imgu_link_setup, 897 .link_validate = v4l2_subdev_link_validate, 898 }; 899 900 /****************** vb2_ops of the Q ********************/ 901 902 static const struct vb2_ops imgu_vb2_ops = { 903 .buf_init = imgu_vb2_buf_init, 904 .buf_cleanup = imgu_vb2_buf_cleanup, 905 .buf_queue = imgu_vb2_buf_queue, 906 .queue_setup = imgu_vb2_queue_setup, 907 .start_streaming = imgu_vb2_start_streaming, 908 .stop_streaming = imgu_vb2_stop_streaming, 909 .wait_prepare = vb2_ops_wait_prepare, 910 .wait_finish = vb2_ops_wait_finish, 911 }; 912 913 /****************** v4l2_file_operations *****************/ 914 915 static const struct v4l2_file_operations imgu_v4l2_fops = { 916 .unlocked_ioctl = video_ioctl2, 917 .open = v4l2_fh_open, 918 .release = vb2_fop_release, 919 .poll = vb2_fop_poll, 920 .mmap = vb2_fop_mmap, 921 }; 922 923 /******************** v4l2_ioctl_ops ********************/ 924 925 static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = { 926 .vidioc_querycap = imgu_vidioc_querycap, 927 928 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, 929 .vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt, 930 .vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt, 931 .vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt, 932 933 .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, 934 .vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt, 935 .vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt, 936 .vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt, 937 938 /* buffer queue management */ 939 .vidioc_reqbufs = vb2_ioctl_reqbufs, 940 .vidioc_create_bufs = vb2_ioctl_create_bufs, 941 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 942 .vidioc_querybuf = vb2_ioctl_querybuf, 943 .vidioc_qbuf = vb2_ioctl_qbuf, 944 .vidioc_dqbuf = vb2_ioctl_dqbuf, 945 .vidioc_streamon = vb2_ioctl_streamon, 946 .vidioc_streamoff = vb2_ioctl_streamoff, 947 .vidioc_expbuf = vb2_ioctl_expbuf, 948 }; 949 950 static const struct v4l2_ioctl_ops imgu_v4l2_meta_ioctl_ops = { 951 .vidioc_querycap = imgu_vidioc_querycap, 952 953 /* meta capture */ 954 .vidioc_enum_fmt_meta_cap = imgu_meta_enum_format, 955 .vidioc_g_fmt_meta_cap = imgu_vidioc_g_meta_fmt, 956 .vidioc_s_fmt_meta_cap = imgu_vidioc_g_meta_fmt, 957 .vidioc_try_fmt_meta_cap = imgu_vidioc_g_meta_fmt, 958 959 /* meta output */ 960 .vidioc_enum_fmt_meta_out = imgu_meta_enum_format, 961 .vidioc_g_fmt_meta_out = imgu_vidioc_g_meta_fmt, 962 .vidioc_s_fmt_meta_out = imgu_vidioc_g_meta_fmt, 963 .vidioc_try_fmt_meta_out = imgu_vidioc_g_meta_fmt, 964 965 .vidioc_reqbufs = vb2_ioctl_reqbufs, 966 .vidioc_create_bufs = vb2_ioctl_create_bufs, 967 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 968 .vidioc_querybuf = vb2_ioctl_querybuf, 969 .vidioc_qbuf = vb2_ioctl_qbuf, 970 .vidioc_dqbuf = vb2_ioctl_dqbuf, 971 .vidioc_streamon = vb2_ioctl_streamon, 972 .vidioc_streamoff = vb2_ioctl_streamoff, 973 .vidioc_expbuf = vb2_ioctl_expbuf, 974 }; 975 976 static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl) 977 { 978 struct imgu_v4l2_subdev *imgu_sd = 979 container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler); 980 struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev); 981 struct device *dev = &imgu->pci_dev->dev; 982 983 dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %u", 984 ctrl->val, ctrl->id, imgu_sd->pipe); 985 986 switch (ctrl->id) { 987 case V4L2_CID_INTEL_IPU3_MODE: 988 atomic_set(&imgu_sd->running_mode, ctrl->val); 989 return 0; 990 default: 991 return -EINVAL; 992 } 993 } 994 995 static const struct v4l2_ctrl_ops imgu_subdev_ctrl_ops = { 996 .s_ctrl = imgu_sd_s_ctrl, 997 }; 998 999 static const char * const imgu_ctrl_mode_strings[] = { 1000 "Video mode", 1001 "Still mode", 1002 }; 1003 1004 static const struct v4l2_ctrl_config imgu_subdev_ctrl_mode = { 1005 .ops = &imgu_subdev_ctrl_ops, 1006 .id = V4L2_CID_INTEL_IPU3_MODE, 1007 .name = "IPU3 Pipe Mode", 1008 .type = V4L2_CTRL_TYPE_MENU, 1009 .max = ARRAY_SIZE(imgu_ctrl_mode_strings) - 1, 1010 .def = IPU3_RUNNING_MODE_VIDEO, 1011 .qmenu = imgu_ctrl_mode_strings, 1012 }; 1013 1014 /******************** Framework registration ********************/ 1015 1016 /* helper function to config node's video properties */ 1017 static void imgu_node_to_v4l2(u32 node, struct video_device *vdev, 1018 struct v4l2_format *f) 1019 { 1020 u32 cap; 1021 1022 /* Should not happen */ 1023 WARN_ON(node >= IMGU_NODE_NUM); 1024 1025 switch (node) { 1026 case IMGU_NODE_IN: 1027 cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE; 1028 f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 1029 vdev->ioctl_ops = &imgu_v4l2_ioctl_ops; 1030 break; 1031 case IMGU_NODE_PARAMS: 1032 cap = V4L2_CAP_META_OUTPUT; 1033 f->type = V4L2_BUF_TYPE_META_OUTPUT; 1034 f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS; 1035 vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops; 1036 imgu_css_meta_fmt_set(&f->fmt.meta); 1037 break; 1038 case IMGU_NODE_STAT_3A: 1039 cap = V4L2_CAP_META_CAPTURE; 1040 f->type = V4L2_BUF_TYPE_META_CAPTURE; 1041 f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A; 1042 vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops; 1043 imgu_css_meta_fmt_set(&f->fmt.meta); 1044 break; 1045 default: 1046 cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE; 1047 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1048 vdev->ioctl_ops = &imgu_v4l2_ioctl_ops; 1049 } 1050 1051 vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | cap; 1052 } 1053 1054 static int imgu_v4l2_subdev_register(struct imgu_device *imgu, 1055 struct imgu_v4l2_subdev *imgu_sd, 1056 unsigned int pipe) 1057 { 1058 int i, r; 1059 struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler; 1060 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; 1061 1062 /* Initialize subdev media entity */ 1063 r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM, 1064 imgu_sd->subdev_pads); 1065 if (r) { 1066 dev_err(&imgu->pci_dev->dev, 1067 "failed initialize subdev media entity (%d)\n", r); 1068 return r; 1069 } 1070 imgu_sd->subdev.entity.ops = &imgu_media_ops; 1071 for (i = 0; i < IMGU_NODE_NUM; i++) { 1072 imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? 1073 MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; 1074 } 1075 1076 /* Initialize subdev */ 1077 v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops); 1078 imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS; 1079 imgu_sd->subdev.internal_ops = &imgu_subdev_internal_ops; 1080 imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | 1081 V4L2_SUBDEV_FL_HAS_EVENTS; 1082 snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name), 1083 "%s %u", IMGU_NAME, pipe); 1084 v4l2_set_subdevdata(&imgu_sd->subdev, imgu); 1085 atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO); 1086 v4l2_ctrl_handler_init(hdl, 1); 1087 imgu_sd->subdev.ctrl_handler = hdl; 1088 imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &imgu_subdev_ctrl_mode, NULL); 1089 if (hdl->error) { 1090 r = hdl->error; 1091 dev_err(&imgu->pci_dev->dev, 1092 "failed to create subdev v4l2 ctrl with err %d", r); 1093 goto fail_subdev; 1094 } 1095 r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev); 1096 if (r) { 1097 dev_err(&imgu->pci_dev->dev, 1098 "failed initialize subdev (%d)\n", r); 1099 goto fail_subdev; 1100 } 1101 1102 imgu_sd->pipe = pipe; 1103 return 0; 1104 1105 fail_subdev: 1106 v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler); 1107 media_entity_cleanup(&imgu_sd->subdev.entity); 1108 1109 return r; 1110 } 1111 1112 static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe, 1113 int node_num) 1114 { 1115 int r; 1116 u32 flags; 1117 struct v4l2_mbus_framefmt def_bus_fmt = { 0 }; 1118 struct v4l2_pix_format_mplane def_pix_fmt = { 0 }; 1119 struct device *dev = &imgu->pci_dev->dev; 1120 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; 1121 struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev; 1122 struct imgu_video_device *node = &imgu_pipe->nodes[node_num]; 1123 struct video_device *vdev = &node->vdev; 1124 struct vb2_queue *vbq = &node->vbq; 1125 1126 /* Initialize formats to default values */ 1127 def_bus_fmt.width = 1920; 1128 def_bus_fmt.height = 1080; 1129 def_bus_fmt.code = MEDIA_BUS_FMT_FIXED; 1130 def_bus_fmt.field = V4L2_FIELD_NONE; 1131 def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW; 1132 def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; 1133 def_bus_fmt.quantization = V4L2_QUANTIZATION_DEFAULT; 1134 def_bus_fmt.xfer_func = V4L2_XFER_FUNC_DEFAULT; 1135 1136 def_pix_fmt.width = def_bus_fmt.width; 1137 def_pix_fmt.height = def_bus_fmt.height; 1138 def_pix_fmt.field = def_bus_fmt.field; 1139 def_pix_fmt.num_planes = 1; 1140 def_pix_fmt.plane_fmt[0].bytesperline = 1141 imgu_bytesperline(def_pix_fmt.width, 1142 IMGU_ABI_FRAME_FORMAT_RAW_PACKED); 1143 def_pix_fmt.plane_fmt[0].sizeimage = 1144 def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline; 1145 def_pix_fmt.flags = 0; 1146 def_pix_fmt.colorspace = def_bus_fmt.colorspace; 1147 def_pix_fmt.ycbcr_enc = def_bus_fmt.ycbcr_enc; 1148 def_pix_fmt.quantization = def_bus_fmt.quantization; 1149 def_pix_fmt.xfer_func = def_bus_fmt.xfer_func; 1150 1151 /* Initialize miscellaneous variables */ 1152 mutex_init(&node->lock); 1153 INIT_LIST_HEAD(&node->buffers); 1154 1155 /* Initialize formats to default values */ 1156 node->pad_fmt = def_bus_fmt; 1157 node->id = node_num; 1158 node->pipe = pipe; 1159 imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt); 1160 if (node->vdev_fmt.type == 1161 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || 1162 node->vdev_fmt.type == 1163 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 1164 def_pix_fmt.pixelformat = node->output ? 1165 V4L2_PIX_FMT_IPU3_SGRBG10 : 1166 V4L2_PIX_FMT_NV12; 1167 node->vdev_fmt.fmt.pix_mp = def_pix_fmt; 1168 } 1169 1170 /* Initialize media entities */ 1171 r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad); 1172 if (r) { 1173 dev_err(dev, "failed initialize media entity (%d)\n", r); 1174 mutex_destroy(&node->lock); 1175 return r; 1176 } 1177 node->vdev_pad.flags = node->output ? 1178 MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; 1179 vdev->entity.ops = NULL; 1180 1181 /* Initialize vbq */ 1182 vbq->type = node->vdev_fmt.type; 1183 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; 1184 vbq->ops = &imgu_vb2_ops; 1185 vbq->mem_ops = &vb2_dma_sg_memops; 1186 if (imgu->buf_struct_size <= 0) 1187 imgu->buf_struct_size = 1188 sizeof(struct imgu_vb2_buffer); 1189 vbq->buf_struct_size = imgu->buf_struct_size; 1190 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1191 /* can streamon w/o buffers */ 1192 vbq->min_buffers_needed = 0; 1193 vbq->drv_priv = imgu; 1194 vbq->lock = &node->lock; 1195 r = vb2_queue_init(vbq); 1196 if (r) { 1197 dev_err(dev, "failed to initialize video queue (%d)", r); 1198 media_entity_cleanup(&vdev->entity); 1199 return r; 1200 } 1201 1202 /* Initialize vdev */ 1203 snprintf(vdev->name, sizeof(vdev->name), "%s %u %s", 1204 IMGU_NAME, pipe, node->name); 1205 vdev->release = video_device_release_empty; 1206 vdev->fops = &imgu_v4l2_fops; 1207 vdev->lock = &node->lock; 1208 vdev->v4l2_dev = &imgu->v4l2_dev; 1209 vdev->queue = &node->vbq; 1210 vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX; 1211 video_set_drvdata(vdev, imgu); 1212 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1); 1213 if (r) { 1214 dev_err(dev, "failed to register video device (%d)", r); 1215 media_entity_cleanup(&vdev->entity); 1216 return r; 1217 } 1218 1219 /* Create link between video node and the subdev pad */ 1220 flags = 0; 1221 if (node->enabled) 1222 flags |= MEDIA_LNK_FL_ENABLED; 1223 if (node->output) { 1224 r = media_create_pad_link(&vdev->entity, 0, &sd->entity, 1225 node_num, flags); 1226 } else { 1227 if (node->id == IMGU_NODE_OUT) { 1228 flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; 1229 node->enabled = true; 1230 } 1231 1232 r = media_create_pad_link(&sd->entity, node_num, &vdev->entity, 1233 0, flags); 1234 } 1235 if (r) { 1236 dev_err(dev, "failed to create pad link (%d)", r); 1237 video_unregister_device(vdev); 1238 return r; 1239 } 1240 1241 return 0; 1242 } 1243 1244 static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu, 1245 unsigned int pipe, int node) 1246 { 1247 int i; 1248 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; 1249 1250 for (i = 0; i < node; i++) { 1251 video_unregister_device(&imgu_pipe->nodes[i].vdev); 1252 media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity); 1253 mutex_destroy(&imgu_pipe->nodes[i].lock); 1254 } 1255 } 1256 1257 static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe) 1258 { 1259 int i; 1260 1261 for (i = 0; i < IMGU_NODE_NUM; i++) { 1262 int r = imgu_v4l2_node_setup(imgu, pipe, i); 1263 1264 if (r) { 1265 imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i); 1266 return r; 1267 } 1268 } 1269 return 0; 1270 } 1271 1272 static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i) 1273 { 1274 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i]; 1275 1276 v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev); 1277 v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler); 1278 media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity); 1279 } 1280 1281 static void imgu_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe) 1282 { 1283 int i; 1284 1285 for (i = 0; i < pipe; i++) { 1286 imgu_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM); 1287 imgu_v4l2_subdev_cleanup(imgu, i); 1288 } 1289 } 1290 1291 static int imgu_v4l2_register_pipes(struct imgu_device *imgu) 1292 { 1293 struct imgu_media_pipe *imgu_pipe; 1294 int i, r; 1295 1296 for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) { 1297 imgu_pipe = &imgu->imgu_pipe[i]; 1298 r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i); 1299 if (r) { 1300 dev_err(&imgu->pci_dev->dev, 1301 "failed to register subdev%u ret (%d)\n", i, r); 1302 goto pipes_cleanup; 1303 } 1304 r = imgu_v4l2_nodes_setup_pipe(imgu, i); 1305 if (r) { 1306 imgu_v4l2_subdev_cleanup(imgu, i); 1307 goto pipes_cleanup; 1308 } 1309 } 1310 1311 return 0; 1312 1313 pipes_cleanup: 1314 imgu_v4l2_cleanup_pipes(imgu, i); 1315 return r; 1316 } 1317 1318 int imgu_v4l2_register(struct imgu_device *imgu) 1319 { 1320 int r; 1321 1322 /* Initialize miscellaneous variables */ 1323 imgu->streaming = false; 1324 1325 /* Set up media device */ 1326 media_device_pci_init(&imgu->media_dev, imgu->pci_dev, IMGU_NAME); 1327 1328 /* Set up v4l2 device */ 1329 imgu->v4l2_dev.mdev = &imgu->media_dev; 1330 imgu->v4l2_dev.ctrl_handler = NULL; 1331 r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev); 1332 if (r) { 1333 dev_err(&imgu->pci_dev->dev, 1334 "failed to register V4L2 device (%d)\n", r); 1335 goto fail_v4l2_dev; 1336 } 1337 1338 r = imgu_v4l2_register_pipes(imgu); 1339 if (r) { 1340 dev_err(&imgu->pci_dev->dev, 1341 "failed to register pipes (%d)\n", r); 1342 goto fail_v4l2_pipes; 1343 } 1344 1345 r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev); 1346 if (r) { 1347 dev_err(&imgu->pci_dev->dev, 1348 "failed to register subdevs (%d)\n", r); 1349 goto fail_subdevs; 1350 } 1351 1352 r = media_device_register(&imgu->media_dev); 1353 if (r) { 1354 dev_err(&imgu->pci_dev->dev, 1355 "failed to register media device (%d)\n", r); 1356 goto fail_subdevs; 1357 } 1358 1359 return 0; 1360 1361 fail_subdevs: 1362 imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM); 1363 fail_v4l2_pipes: 1364 v4l2_device_unregister(&imgu->v4l2_dev); 1365 fail_v4l2_dev: 1366 media_device_cleanup(&imgu->media_dev); 1367 1368 return r; 1369 } 1370 1371 int imgu_v4l2_unregister(struct imgu_device *imgu) 1372 { 1373 media_device_unregister(&imgu->media_dev); 1374 imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM); 1375 v4l2_device_unregister(&imgu->v4l2_dev); 1376 media_device_cleanup(&imgu->media_dev); 1377 1378 return 0; 1379 } 1380 1381 void imgu_v4l2_buffer_done(struct vb2_buffer *vb, 1382 enum vb2_buffer_state state) 1383 { 1384 struct imgu_vb2_buffer *b = 1385 container_of(vb, struct imgu_vb2_buffer, vbb.vb2_buf); 1386 1387 list_del(&b->list); 1388 vb2_buffer_done(&b->vbb.vb2_buf, state); 1389 } 1390