1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/videodev2.h> 14 #include <media/v4l2-device.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-mem2mem.h> 17 #include <media/v4l2-ioctl.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-dma-contig.h> 20 #include <media/videobuf2-vmalloc.h> 21 #include "vpu.h" 22 #include "vpu_core.h" 23 #include "vpu_v4l2.h" 24 #include "vpu_msgs.h" 25 #include "vpu_helpers.h" 26 27 void vpu_inst_lock(struct vpu_inst *inst) 28 { 29 mutex_lock(&inst->lock); 30 } 31 32 void vpu_inst_unlock(struct vpu_inst *inst) 33 { 34 mutex_unlock(&inst->lock); 35 } 36 37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no) 38 { 39 if (plane_no >= vb->num_planes) 40 return 0; 41 return vb2_dma_contig_plane_dma_addr(vb, plane_no) + 42 vb->planes[plane_no].data_offset; 43 } 44 45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) 46 { 47 if (plane_no >= vb->num_planes) 48 return 0; 49 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset; 50 } 51 52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state) 53 { 54 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 55 56 vpu_buf->state = state; 57 } 58 59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf) 60 { 61 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 62 63 return vpu_buf->state; 64 } 65 66 void vpu_v4l2_set_error(struct vpu_inst *inst) 67 { 68 vpu_inst_lock(inst); 69 dev_err(inst->dev, "some error occurs in codec\n"); 70 if (inst->fh.m2m_ctx) { 71 vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx)); 72 vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)); 73 } 74 vpu_inst_unlock(inst); 75 } 76 77 int vpu_notify_eos(struct vpu_inst *inst) 78 { 79 static const struct v4l2_event ev = { 80 .id = 0, 81 .type = V4L2_EVENT_EOS 82 }; 83 84 vpu_trace(inst->dev, "[%d]\n", inst->id); 85 v4l2_event_queue_fh(&inst->fh, &ev); 86 87 return 0; 88 } 89 90 int vpu_notify_source_change(struct vpu_inst *inst) 91 { 92 static const struct v4l2_event ev = { 93 .id = 0, 94 .type = V4L2_EVENT_SOURCE_CHANGE, 95 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION 96 }; 97 98 vpu_trace(inst->dev, "[%d]\n", inst->id); 99 v4l2_event_queue_fh(&inst->fh, &ev); 100 return 0; 101 } 102 103 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst) 104 { 105 struct vb2_queue *q; 106 107 if (!inst || !inst->fh.m2m_ctx) 108 return -EINVAL; 109 110 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 111 if (!list_empty(&q->done_list)) 112 return -EINVAL; 113 114 if (q->last_buffer_dequeued) 115 return 0; 116 vpu_trace(inst->dev, "last buffer dequeued\n"); 117 q->last_buffer_dequeued = true; 118 wake_up(&q->done_wq); 119 vpu_notify_eos(inst); 120 return 0; 121 } 122 123 bool vpu_is_source_empty(struct vpu_inst *inst) 124 { 125 struct v4l2_m2m_buffer *buf = NULL; 126 127 if (!inst->fh.m2m_ctx) 128 return true; 129 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 130 if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE) 131 return false; 132 } 133 return true; 134 } 135 136 const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f) 137 { 138 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 139 u32 type = f->type; 140 u32 stride = 1; 141 u32 bytesperline; 142 u32 sizeimage; 143 const struct vpu_format *fmt; 144 const struct vpu_core_resources *res; 145 int i; 146 147 fmt = vpu_helper_find_format(inst, type, pixmp->pixelformat); 148 if (!fmt) { 149 fmt = vpu_helper_enum_format(inst, type, 0); 150 if (!fmt) 151 return NULL; 152 pixmp->pixelformat = fmt->pixfmt; 153 } 154 155 res = vpu_get_resource(inst); 156 if (res) 157 stride = res->stride; 158 if (pixmp->width) 159 pixmp->width = vpu_helper_valid_frame_width(inst, pixmp->width); 160 if (pixmp->height) 161 pixmp->height = vpu_helper_valid_frame_height(inst, pixmp->height); 162 pixmp->flags = fmt->flags; 163 pixmp->num_planes = fmt->num_planes; 164 if (pixmp->field == V4L2_FIELD_ANY) 165 pixmp->field = V4L2_FIELD_NONE; 166 for (i = 0; i < pixmp->num_planes; i++) { 167 bytesperline = max_t(s32, pixmp->plane_fmt[i].bytesperline, 0); 168 sizeimage = vpu_helper_get_plane_size(pixmp->pixelformat, 169 pixmp->width, 170 pixmp->height, 171 i, 172 stride, 173 pixmp->field > V4L2_FIELD_NONE ? 1 : 0, 174 &bytesperline); 175 sizeimage = max_t(s32, pixmp->plane_fmt[i].sizeimage, sizeimage); 176 pixmp->plane_fmt[i].bytesperline = bytesperline; 177 pixmp->plane_fmt[i].sizeimage = sizeimage; 178 } 179 180 return fmt; 181 } 182 183 static bool vpu_check_ready(struct vpu_inst *inst, u32 type) 184 { 185 if (!inst) 186 return false; 187 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0) 188 return false; 189 if (!inst->ops->check_ready) 190 return true; 191 return call_vop(inst, check_ready, type); 192 } 193 194 int vpu_process_output_buffer(struct vpu_inst *inst) 195 { 196 struct v4l2_m2m_buffer *buf = NULL; 197 struct vb2_v4l2_buffer *vbuf = NULL; 198 199 if (!inst || !inst->fh.m2m_ctx) 200 return -EINVAL; 201 202 if (!vpu_check_ready(inst, inst->out_format.type)) 203 return -EINVAL; 204 205 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 206 vbuf = &buf->vb; 207 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 208 break; 209 vbuf = NULL; 210 } 211 212 if (!vbuf) 213 return -EINVAL; 214 215 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n", 216 inst->id, vbuf->sequence, inst->sequence); 217 return call_vop(inst, process_output, &vbuf->vb2_buf); 218 } 219 220 int vpu_process_capture_buffer(struct vpu_inst *inst) 221 { 222 struct v4l2_m2m_buffer *buf = NULL; 223 struct vb2_v4l2_buffer *vbuf = NULL; 224 225 if (!inst || !inst->fh.m2m_ctx) 226 return -EINVAL; 227 228 if (!vpu_check_ready(inst, inst->cap_format.type)) 229 return -EINVAL; 230 231 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 232 vbuf = &buf->vb; 233 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 234 break; 235 vbuf = NULL; 236 } 237 if (!vbuf) 238 return -EINVAL; 239 240 return call_vop(inst, process_capture, &vbuf->vb2_buf); 241 } 242 243 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst) 244 { 245 struct vb2_v4l2_buffer *src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); 246 247 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 248 return NULL; 249 250 while (vpu_vb_is_codecconfig(src_buf)) { 251 v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); 252 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); 253 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); 254 255 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); 256 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 257 return NULL; 258 } 259 260 return src_buf; 261 } 262 263 void vpu_skip_frame(struct vpu_inst *inst, int count) 264 { 265 struct vb2_v4l2_buffer *src_buf; 266 enum vb2_buffer_state state; 267 int i = 0; 268 269 if (count <= 0) 270 return; 271 272 while (i < count) { 273 src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); 274 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 275 return; 276 if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED) 277 state = VB2_BUF_STATE_DONE; 278 else 279 state = VB2_BUF_STATE_ERROR; 280 i++; 281 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); 282 v4l2_m2m_buf_done(src_buf, state); 283 } 284 } 285 286 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence) 287 { 288 struct v4l2_m2m_buffer *buf = NULL; 289 struct vb2_v4l2_buffer *vbuf = NULL; 290 291 if (!inst || !inst->fh.m2m_ctx) 292 return NULL; 293 294 if (V4L2_TYPE_IS_OUTPUT(type)) { 295 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 296 vbuf = &buf->vb; 297 if (vbuf->sequence == sequence) 298 break; 299 vbuf = NULL; 300 } 301 } else { 302 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 303 vbuf = &buf->vb; 304 if (vbuf->sequence == sequence) 305 break; 306 vbuf = NULL; 307 } 308 } 309 310 return vbuf; 311 } 312 313 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx) 314 { 315 struct v4l2_m2m_buffer *buf = NULL; 316 struct vb2_v4l2_buffer *vbuf = NULL; 317 318 if (!inst || !inst->fh.m2m_ctx) 319 return NULL; 320 321 if (V4L2_TYPE_IS_OUTPUT(type)) { 322 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 323 vbuf = &buf->vb; 324 if (vbuf->vb2_buf.index == idx) 325 break; 326 vbuf = NULL; 327 } 328 } else { 329 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 330 vbuf = &buf->vb; 331 if (vbuf->vb2_buf.index == idx) 332 break; 333 vbuf = NULL; 334 } 335 } 336 337 return vbuf; 338 } 339 340 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type) 341 { 342 struct vb2_queue *q; 343 344 if (!inst || !inst->fh.m2m_ctx) 345 return -EINVAL; 346 347 if (V4L2_TYPE_IS_OUTPUT(type)) 348 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx); 349 else 350 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 351 352 return q->num_buffers; 353 } 354 355 static void vpu_m2m_device_run(void *priv) 356 { 357 } 358 359 static void vpu_m2m_job_abort(void *priv) 360 { 361 struct vpu_inst *inst = priv; 362 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx; 363 364 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx); 365 } 366 367 static const struct v4l2_m2m_ops vpu_m2m_ops = { 368 .device_run = vpu_m2m_device_run, 369 .job_abort = vpu_m2m_job_abort 370 }; 371 372 static int vpu_vb2_queue_setup(struct vb2_queue *vq, 373 unsigned int *buf_count, 374 unsigned int *plane_count, 375 unsigned int psize[], 376 struct device *allocators[]) 377 { 378 struct vpu_inst *inst = vb2_get_drv_priv(vq); 379 struct vpu_format *cur_fmt; 380 int i; 381 382 cur_fmt = vpu_get_format(inst, vq->type); 383 384 if (*plane_count) { 385 if (*plane_count != cur_fmt->num_planes) 386 return -EINVAL; 387 for (i = 0; i < cur_fmt->num_planes; i++) { 388 if (psize[i] < cur_fmt->sizeimage[i]) 389 return -EINVAL; 390 } 391 return 0; 392 } 393 394 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 395 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out); 396 else 397 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap); 398 *plane_count = cur_fmt->num_planes; 399 for (i = 0; i < cur_fmt->num_planes; i++) 400 psize[i] = cur_fmt->sizeimage[i]; 401 402 return 0; 403 } 404 405 static int vpu_vb2_buf_init(struct vb2_buffer *vb) 406 { 407 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 408 409 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE); 410 return 0; 411 } 412 413 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb) 414 { 415 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 416 417 vbuf->field = V4L2_FIELD_NONE; 418 419 return 0; 420 } 421 422 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb) 423 { 424 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 425 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 426 struct vpu_format *cur_fmt; 427 u32 i; 428 429 cur_fmt = vpu_get_format(inst, vb->type); 430 for (i = 0; i < cur_fmt->num_planes; i++) { 431 if (vpu_get_vb_length(vb, i) < cur_fmt->sizeimage[i]) { 432 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n", 433 inst->id, vpu_type_name(vb->type), vb->index); 434 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR); 435 } 436 } 437 438 return 0; 439 } 440 441 static void vpu_vb2_buf_finish(struct vb2_buffer *vb) 442 { 443 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 444 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 445 struct vb2_queue *q = vb->vb2_queue; 446 447 if (vbuf->flags & V4L2_BUF_FLAG_LAST) 448 vpu_notify_eos(inst); 449 450 if (list_empty(&q->done_list)) 451 call_void_vop(inst, on_queue_empty, q->type); 452 } 453 454 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state) 455 { 456 struct vb2_v4l2_buffer *buf; 457 458 if (V4L2_TYPE_IS_OUTPUT(type)) { 459 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) { 460 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 461 v4l2_m2m_buf_done(buf, state); 462 } 463 } else { 464 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) { 465 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 466 v4l2_m2m_buf_done(buf, state); 467 } 468 } 469 } 470 471 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count) 472 { 473 struct vpu_inst *inst = vb2_get_drv_priv(q); 474 struct vpu_format *fmt = vpu_get_format(inst, q->type); 475 int ret; 476 477 vpu_inst_unlock(inst); 478 ret = vpu_inst_register(inst); 479 vpu_inst_lock(inst); 480 if (ret) { 481 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 482 return ret; 483 } 484 485 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n", 486 inst->id, vpu_type_name(q->type), 487 fmt->pixfmt, 488 fmt->pixfmt >> 8, 489 fmt->pixfmt >> 16, 490 fmt->pixfmt >> 24, 491 fmt->width, fmt->height, 492 fmt->sizeimage[0], fmt->bytesperline[0], 493 fmt->sizeimage[1], fmt->bytesperline[1], 494 fmt->sizeimage[2], fmt->bytesperline[2], 495 q->num_buffers); 496 vb2_clear_last_buffer_dequeued(q); 497 ret = call_vop(inst, start, q->type); 498 if (ret) 499 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 500 501 return ret; 502 } 503 504 static void vpu_vb2_stop_streaming(struct vb2_queue *q) 505 { 506 struct vpu_inst *inst = vb2_get_drv_priv(q); 507 508 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type)); 509 510 call_void_vop(inst, stop, q->type); 511 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR); 512 if (V4L2_TYPE_IS_OUTPUT(q->type)) 513 inst->sequence = 0; 514 } 515 516 static void vpu_vb2_buf_queue(struct vb2_buffer *vb) 517 { 518 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 519 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 520 521 if (V4L2_TYPE_IS_OUTPUT(vb->type)) 522 vbuf->sequence = inst->sequence++; 523 524 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf); 525 vpu_process_output_buffer(inst); 526 vpu_process_capture_buffer(inst); 527 } 528 529 static const struct vb2_ops vpu_vb2_ops = { 530 .queue_setup = vpu_vb2_queue_setup, 531 .buf_init = vpu_vb2_buf_init, 532 .buf_out_validate = vpu_vb2_buf_out_validate, 533 .buf_prepare = vpu_vb2_buf_prepare, 534 .buf_finish = vpu_vb2_buf_finish, 535 .start_streaming = vpu_vb2_start_streaming, 536 .stop_streaming = vpu_vb2_stop_streaming, 537 .buf_queue = vpu_vb2_buf_queue, 538 .wait_prepare = vb2_ops_wait_prepare, 539 .wait_finish = vb2_ops_wait_finish, 540 }; 541 542 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 543 { 544 struct vpu_inst *inst = priv; 545 int ret; 546 547 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 548 inst->out_format.type = src_vq->type; 549 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 550 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 551 src_vq->ops = &vpu_vb2_ops; 552 src_vq->mem_ops = &vb2_dma_contig_memops; 553 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer) 554 src_vq->mem_ops = &vb2_vmalloc_memops; 555 src_vq->drv_priv = inst; 556 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 557 src_vq->min_buffers_needed = 1; 558 src_vq->dev = inst->vpu->dev; 559 src_vq->lock = &inst->lock; 560 ret = vb2_queue_init(src_vq); 561 if (ret) 562 return ret; 563 564 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 565 inst->cap_format.type = dst_vq->type; 566 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 567 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 568 dst_vq->ops = &vpu_vb2_ops; 569 dst_vq->mem_ops = &vb2_dma_contig_memops; 570 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer) 571 dst_vq->mem_ops = &vb2_vmalloc_memops; 572 dst_vq->drv_priv = inst; 573 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 574 dst_vq->min_buffers_needed = 1; 575 dst_vq->dev = inst->vpu->dev; 576 dst_vq->lock = &inst->lock; 577 ret = vb2_queue_init(dst_vq); 578 if (ret) { 579 vb2_queue_release(src_vq); 580 return ret; 581 } 582 583 return 0; 584 } 585 586 static int vpu_v4l2_release(struct vpu_inst *inst) 587 { 588 vpu_trace(inst->vpu->dev, "%p\n", inst); 589 590 vpu_release_core(inst->core); 591 put_device(inst->dev); 592 593 if (inst->workqueue) { 594 cancel_work_sync(&inst->msg_work); 595 destroy_workqueue(inst->workqueue); 596 inst->workqueue = NULL; 597 } 598 599 if (inst->fh.m2m_ctx) { 600 v4l2_m2m_ctx_release(inst->fh.m2m_ctx); 601 inst->fh.m2m_ctx = NULL; 602 } 603 v4l2_ctrl_handler_free(&inst->ctrl_handler); 604 mutex_destroy(&inst->lock); 605 v4l2_fh_del(&inst->fh); 606 v4l2_fh_exit(&inst->fh); 607 608 call_void_vop(inst, cleanup); 609 610 return 0; 611 } 612 613 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) 614 { 615 struct vpu_dev *vpu = video_drvdata(file); 616 struct vpu_func *func; 617 int ret = 0; 618 619 if (!inst || !inst->ops) 620 return -EINVAL; 621 622 if (inst->type == VPU_CORE_TYPE_ENC) 623 func = &vpu->encoder; 624 else 625 func = &vpu->decoder; 626 627 atomic_set(&inst->ref_count, 0); 628 vpu_inst_get(inst); 629 inst->vpu = vpu; 630 inst->core = vpu_request_core(vpu, inst->type); 631 if (inst->core) 632 inst->dev = get_device(inst->core->dev); 633 mutex_init(&inst->lock); 634 INIT_LIST_HEAD(&inst->cmd_q); 635 inst->id = VPU_INST_NULL_ID; 636 inst->release = vpu_v4l2_release; 637 inst->pid = current->pid; 638 inst->tgid = current->tgid; 639 inst->min_buffer_cap = 2; 640 inst->min_buffer_out = 2; 641 v4l2_fh_init(&inst->fh, func->vfd); 642 v4l2_fh_add(&inst->fh); 643 644 ret = call_vop(inst, ctrl_init); 645 if (ret) 646 goto error; 647 648 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init); 649 if (IS_ERR(inst->fh.m2m_ctx)) { 650 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n"); 651 ret = PTR_ERR(inst->fh.m2m_ctx); 652 goto error; 653 } 654 655 inst->fh.ctrl_handler = &inst->ctrl_handler; 656 file->private_data = &inst->fh; 657 inst->state = VPU_CODEC_STATE_DEINIT; 658 inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 659 if (inst->workqueue) { 660 INIT_WORK(&inst->msg_work, vpu_inst_run_work); 661 ret = kfifo_init(&inst->msg_fifo, 662 inst->msg_buffer, 663 rounddown_pow_of_two(sizeof(inst->msg_buffer))); 664 if (ret) { 665 destroy_workqueue(inst->workqueue); 666 inst->workqueue = NULL; 667 } 668 } 669 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n", 670 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst); 671 672 return 0; 673 error: 674 vpu_inst_put(inst); 675 return ret; 676 } 677 678 int vpu_v4l2_close(struct file *file) 679 { 680 struct vpu_dev *vpu = video_drvdata(file); 681 struct vpu_inst *inst = to_inst(file); 682 683 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); 684 685 call_void_vop(inst, release); 686 vpu_inst_unregister(inst); 687 vpu_inst_put(inst); 688 689 return 0; 690 } 691 692 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) 693 { 694 struct video_device *vfd; 695 int ret; 696 697 if (!vpu || !func) 698 return -EINVAL; 699 700 if (func->vfd) 701 return 0; 702 703 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); 704 if (IS_ERR(func->m2m_dev)) { 705 dev_err(vpu->dev, "v4l2_m2m_init fail\n"); 706 func->vfd = NULL; 707 return PTR_ERR(func->m2m_dev); 708 } 709 710 vfd = video_device_alloc(); 711 if (!vfd) { 712 v4l2_m2m_release(func->m2m_dev); 713 dev_err(vpu->dev, "alloc vpu decoder video device fail\n"); 714 return -ENOMEM; 715 } 716 vfd->release = video_device_release; 717 vfd->vfl_dir = VFL_DIR_M2M; 718 vfd->v4l2_dev = &vpu->v4l2_dev; 719 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; 720 if (func->type == VPU_CORE_TYPE_ENC) { 721 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name)); 722 vfd->fops = venc_get_fops(); 723 vfd->ioctl_ops = venc_get_ioctl_ops(); 724 } else { 725 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name)); 726 vfd->fops = vdec_get_fops(); 727 vfd->ioctl_ops = vdec_get_ioctl_ops(); 728 } 729 730 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); 731 if (ret) { 732 video_device_release(vfd); 733 v4l2_m2m_release(func->m2m_dev); 734 return ret; 735 } 736 video_set_drvdata(vfd, vpu); 737 func->vfd = vfd; 738 739 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function); 740 if (ret) { 741 v4l2_m2m_release(func->m2m_dev); 742 func->m2m_dev = NULL; 743 video_unregister_device(func->vfd); 744 func->vfd = NULL; 745 return ret; 746 } 747 748 return 0; 749 } 750 751 void vpu_remove_func(struct vpu_func *func) 752 { 753 if (!func) 754 return; 755 756 if (func->m2m_dev) { 757 v4l2_m2m_unregister_media_controller(func->m2m_dev); 758 v4l2_m2m_release(func->m2m_dev); 759 func->m2m_dev = NULL; 760 } 761 if (func->vfd) { 762 video_unregister_device(func->vfd); 763 func->vfd = NULL; 764 } 765 } 766