1 /* 2 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. 3 * Copyright (C) 2017 Linaro Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 and 7 * only version 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 */ 15 #include <linux/clk.h> 16 #include <linux/list.h> 17 #include <linux/mutex.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/slab.h> 20 #include <media/videobuf2-dma-sg.h> 21 #include <media/v4l2-mem2mem.h> 22 #include <asm/div64.h> 23 24 #include "core.h" 25 #include "helpers.h" 26 #include "hfi_helper.h" 27 28 struct intbuf { 29 struct list_head list; 30 u32 type; 31 size_t size; 32 void *va; 33 dma_addr_t da; 34 unsigned long attrs; 35 }; 36 37 bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt) 38 { 39 struct venus_core *core = inst->core; 40 u32 session_type = inst->session_type; 41 u32 codec; 42 43 switch (v4l2_pixfmt) { 44 case V4L2_PIX_FMT_H264: 45 codec = HFI_VIDEO_CODEC_H264; 46 break; 47 case V4L2_PIX_FMT_H263: 48 codec = HFI_VIDEO_CODEC_H263; 49 break; 50 case V4L2_PIX_FMT_MPEG1: 51 codec = HFI_VIDEO_CODEC_MPEG1; 52 break; 53 case V4L2_PIX_FMT_MPEG2: 54 codec = HFI_VIDEO_CODEC_MPEG2; 55 break; 56 case V4L2_PIX_FMT_MPEG4: 57 codec = HFI_VIDEO_CODEC_MPEG4; 58 break; 59 case V4L2_PIX_FMT_VC1_ANNEX_G: 60 case V4L2_PIX_FMT_VC1_ANNEX_L: 61 codec = HFI_VIDEO_CODEC_VC1; 62 break; 63 case V4L2_PIX_FMT_VP8: 64 codec = HFI_VIDEO_CODEC_VP8; 65 break; 66 case V4L2_PIX_FMT_VP9: 67 codec = HFI_VIDEO_CODEC_VP9; 68 break; 69 case V4L2_PIX_FMT_XVID: 70 codec = HFI_VIDEO_CODEC_DIVX; 71 break; 72 default: 73 return false; 74 } 75 76 if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec) 77 return true; 78 79 if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec) 80 return true; 81 82 return false; 83 } 84 EXPORT_SYMBOL_GPL(venus_helper_check_codec); 85 86 static int intbufs_set_buffer(struct venus_inst *inst, u32 type) 87 { 88 struct venus_core *core = inst->core; 89 struct device *dev = core->dev; 90 struct hfi_buffer_requirements bufreq; 91 struct hfi_buffer_desc bd; 92 struct intbuf *buf; 93 unsigned int i; 94 int ret; 95 96 ret = venus_helper_get_bufreq(inst, type, &bufreq); 97 if (ret) 98 return 0; 99 100 if (!bufreq.size) 101 return 0; 102 103 for (i = 0; i < bufreq.count_actual; i++) { 104 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 105 if (!buf) { 106 ret = -ENOMEM; 107 goto fail; 108 } 109 110 buf->type = bufreq.type; 111 buf->size = bufreq.size; 112 buf->attrs = DMA_ATTR_WRITE_COMBINE | 113 DMA_ATTR_NO_KERNEL_MAPPING; 114 buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL, 115 buf->attrs); 116 if (!buf->va) { 117 ret = -ENOMEM; 118 goto fail; 119 } 120 121 memset(&bd, 0, sizeof(bd)); 122 bd.buffer_size = buf->size; 123 bd.buffer_type = buf->type; 124 bd.num_buffers = 1; 125 bd.device_addr = buf->da; 126 127 ret = hfi_session_set_buffers(inst, &bd); 128 if (ret) { 129 dev_err(dev, "set session buffers failed\n"); 130 goto dma_free; 131 } 132 133 list_add_tail(&buf->list, &inst->internalbufs); 134 } 135 136 return 0; 137 138 dma_free: 139 dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs); 140 fail: 141 kfree(buf); 142 return ret; 143 } 144 145 static int intbufs_unset_buffers(struct venus_inst *inst) 146 { 147 struct hfi_buffer_desc bd = {0}; 148 struct intbuf *buf, *n; 149 int ret = 0; 150 151 list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { 152 bd.buffer_size = buf->size; 153 bd.buffer_type = buf->type; 154 bd.num_buffers = 1; 155 bd.device_addr = buf->da; 156 bd.response_required = true; 157 158 ret = hfi_session_unset_buffers(inst, &bd); 159 160 list_del_init(&buf->list); 161 dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da, 162 buf->attrs); 163 kfree(buf); 164 } 165 166 return ret; 167 } 168 169 static const unsigned int intbuf_types[] = { 170 HFI_BUFFER_INTERNAL_SCRATCH, 171 HFI_BUFFER_INTERNAL_SCRATCH_1, 172 HFI_BUFFER_INTERNAL_SCRATCH_2, 173 HFI_BUFFER_INTERNAL_PERSIST, 174 HFI_BUFFER_INTERNAL_PERSIST_1, 175 }; 176 177 static int intbufs_alloc(struct venus_inst *inst) 178 { 179 unsigned int i; 180 int ret; 181 182 for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) { 183 ret = intbufs_set_buffer(inst, intbuf_types[i]); 184 if (ret) 185 goto error; 186 } 187 188 return 0; 189 190 error: 191 intbufs_unset_buffers(inst); 192 return ret; 193 } 194 195 static int intbufs_free(struct venus_inst *inst) 196 { 197 return intbufs_unset_buffers(inst); 198 } 199 200 static u32 load_per_instance(struct venus_inst *inst) 201 { 202 u32 mbs; 203 204 if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP)) 205 return 0; 206 207 mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16); 208 209 return mbs * inst->fps; 210 } 211 212 static u32 load_per_type(struct venus_core *core, u32 session_type) 213 { 214 struct venus_inst *inst = NULL; 215 u32 mbs_per_sec = 0; 216 217 mutex_lock(&core->lock); 218 list_for_each_entry(inst, &core->instances, list) { 219 if (inst->session_type != session_type) 220 continue; 221 222 mbs_per_sec += load_per_instance(inst); 223 } 224 mutex_unlock(&core->lock); 225 226 return mbs_per_sec; 227 } 228 229 static int load_scale_clocks(struct venus_core *core) 230 { 231 const struct freq_tbl *table = core->res->freq_tbl; 232 unsigned int num_rows = core->res->freq_tbl_size; 233 unsigned long freq = table[0].freq; 234 struct clk *clk = core->clks[0]; 235 struct device *dev = core->dev; 236 u32 mbs_per_sec; 237 unsigned int i; 238 int ret; 239 240 mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + 241 load_per_type(core, VIDC_SESSION_TYPE_DEC); 242 243 if (mbs_per_sec > core->res->max_load) 244 dev_warn(dev, "HW is overloaded, needed: %d max: %d\n", 245 mbs_per_sec, core->res->max_load); 246 247 if (!mbs_per_sec && num_rows > 1) { 248 freq = table[num_rows - 1].freq; 249 goto set_freq; 250 } 251 252 for (i = 0; i < num_rows; i++) { 253 if (mbs_per_sec > table[i].load) 254 break; 255 freq = table[i].freq; 256 } 257 258 set_freq: 259 260 if (core->res->hfi_version == HFI_VERSION_3XX) { 261 ret = clk_set_rate(clk, freq); 262 ret |= clk_set_rate(core->core0_clk, freq); 263 ret |= clk_set_rate(core->core1_clk, freq); 264 } else { 265 ret = clk_set_rate(clk, freq); 266 } 267 268 if (ret) { 269 dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); 270 return ret; 271 } 272 273 return 0; 274 } 275 276 static void fill_buffer_desc(const struct venus_buffer *buf, 277 struct hfi_buffer_desc *bd, bool response) 278 { 279 memset(bd, 0, sizeof(*bd)); 280 bd->buffer_type = HFI_BUFFER_OUTPUT; 281 bd->buffer_size = buf->size; 282 bd->num_buffers = 1; 283 bd->device_addr = buf->dma_addr; 284 bd->response_required = response; 285 } 286 287 static void return_buf_error(struct venus_inst *inst, 288 struct vb2_v4l2_buffer *vbuf) 289 { 290 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 291 292 if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 293 v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); 294 else 295 v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf); 296 297 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); 298 } 299 300 static int 301 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) 302 { 303 struct venus_buffer *buf = to_venus_buffer(vbuf); 304 struct vb2_buffer *vb = &vbuf->vb2_buf; 305 unsigned int type = vb->type; 306 struct hfi_frame_data fdata; 307 int ret; 308 309 memset(&fdata, 0, sizeof(fdata)); 310 fdata.alloc_len = buf->size; 311 fdata.device_addr = buf->dma_addr; 312 fdata.timestamp = vb->timestamp; 313 do_div(fdata.timestamp, NSEC_PER_USEC); 314 fdata.flags = 0; 315 fdata.clnt_data = vbuf->vb2_buf.index; 316 317 if (!fdata.timestamp) 318 fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID; 319 320 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 321 fdata.buffer_type = HFI_BUFFER_INPUT; 322 fdata.filled_len = vb2_get_plane_payload(vb, 0); 323 fdata.offset = vb->planes[0].data_offset; 324 325 if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len) 326 fdata.flags |= HFI_BUFFERFLAG_EOS; 327 } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 328 fdata.buffer_type = HFI_BUFFER_OUTPUT; 329 fdata.filled_len = 0; 330 fdata.offset = 0; 331 } 332 333 ret = hfi_session_process_buf(inst, &fdata); 334 if (ret) 335 return ret; 336 337 return 0; 338 } 339 340 static inline int is_reg_unreg_needed(struct venus_inst *inst) 341 { 342 if (inst->session_type == VIDC_SESSION_TYPE_DEC && 343 inst->core->res->hfi_version == HFI_VERSION_3XX) 344 return 0; 345 346 if (inst->session_type == VIDC_SESSION_TYPE_DEC && 347 inst->cap_bufs_mode_dynamic && 348 inst->core->res->hfi_version == HFI_VERSION_1XX) 349 return 0; 350 351 return 1; 352 } 353 354 static int session_unregister_bufs(struct venus_inst *inst) 355 { 356 struct venus_buffer *buf, *n; 357 struct hfi_buffer_desc bd; 358 int ret = 0; 359 360 if (!is_reg_unreg_needed(inst)) 361 return 0; 362 363 list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) { 364 fill_buffer_desc(buf, &bd, true); 365 ret = hfi_session_unset_buffers(inst, &bd); 366 list_del_init(&buf->reg_list); 367 } 368 369 return ret; 370 } 371 372 static int session_register_bufs(struct venus_inst *inst) 373 { 374 struct venus_core *core = inst->core; 375 struct device *dev = core->dev; 376 struct hfi_buffer_desc bd; 377 struct venus_buffer *buf; 378 int ret = 0; 379 380 if (!is_reg_unreg_needed(inst)) 381 return 0; 382 383 list_for_each_entry(buf, &inst->registeredbufs, reg_list) { 384 fill_buffer_desc(buf, &bd, false); 385 ret = hfi_session_set_buffers(inst, &bd); 386 if (ret) { 387 dev_err(dev, "%s: set buffer failed\n", __func__); 388 break; 389 } 390 } 391 392 return ret; 393 } 394 395 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type, 396 struct hfi_buffer_requirements *req) 397 { 398 u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS; 399 union hfi_get_property hprop; 400 unsigned int i; 401 int ret; 402 403 if (req) 404 memset(req, 0, sizeof(*req)); 405 406 ret = hfi_session_get_property(inst, ptype, &hprop); 407 if (ret) 408 return ret; 409 410 ret = -EINVAL; 411 412 for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) { 413 if (hprop.bufreq[i].type != type) 414 continue; 415 416 if (req) 417 memcpy(req, &hprop.bufreq[i], sizeof(*req)); 418 ret = 0; 419 break; 420 } 421 422 return ret; 423 } 424 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq); 425 426 int venus_helper_set_input_resolution(struct venus_inst *inst, 427 unsigned int width, unsigned int height) 428 { 429 u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; 430 struct hfi_framesize fs; 431 432 fs.buffer_type = HFI_BUFFER_INPUT; 433 fs.width = width; 434 fs.height = height; 435 436 return hfi_session_set_property(inst, ptype, &fs); 437 } 438 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution); 439 440 int venus_helper_set_output_resolution(struct venus_inst *inst, 441 unsigned int width, unsigned int height) 442 { 443 u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; 444 struct hfi_framesize fs; 445 446 fs.buffer_type = HFI_BUFFER_OUTPUT; 447 fs.width = width; 448 fs.height = height; 449 450 return hfi_session_set_property(inst, ptype, &fs); 451 } 452 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution); 453 454 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, 455 unsigned int output_bufs) 456 { 457 u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL; 458 struct hfi_buffer_count_actual buf_count; 459 int ret; 460 461 buf_count.type = HFI_BUFFER_INPUT; 462 buf_count.count_actual = input_bufs; 463 464 ret = hfi_session_set_property(inst, ptype, &buf_count); 465 if (ret) 466 return ret; 467 468 buf_count.type = HFI_BUFFER_OUTPUT; 469 buf_count.count_actual = output_bufs; 470 471 return hfi_session_set_property(inst, ptype, &buf_count); 472 } 473 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs); 474 475 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt) 476 { 477 struct hfi_uncompressed_format_select fmt; 478 u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT; 479 int ret; 480 481 if (inst->session_type == VIDC_SESSION_TYPE_DEC) 482 fmt.buffer_type = HFI_BUFFER_OUTPUT; 483 else if (inst->session_type == VIDC_SESSION_TYPE_ENC) 484 fmt.buffer_type = HFI_BUFFER_INPUT; 485 else 486 return -EINVAL; 487 488 switch (pixfmt) { 489 case V4L2_PIX_FMT_NV12: 490 fmt.format = HFI_COLOR_FORMAT_NV12; 491 break; 492 case V4L2_PIX_FMT_NV21: 493 fmt.format = HFI_COLOR_FORMAT_NV21; 494 break; 495 default: 496 return -EINVAL; 497 } 498 499 ret = hfi_session_set_property(inst, ptype, &fmt); 500 if (ret) 501 return ret; 502 503 return 0; 504 } 505 EXPORT_SYMBOL_GPL(venus_helper_set_color_format); 506 507 static void delayed_process_buf_func(struct work_struct *work) 508 { 509 struct venus_buffer *buf, *n; 510 struct venus_inst *inst; 511 int ret; 512 513 inst = container_of(work, struct venus_inst, delayed_process_work); 514 515 mutex_lock(&inst->lock); 516 517 if (!(inst->streamon_out & inst->streamon_cap)) 518 goto unlock; 519 520 list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) { 521 if (buf->flags & HFI_BUFFERFLAG_READONLY) 522 continue; 523 524 ret = session_process_buf(inst, &buf->vb); 525 if (ret) 526 return_buf_error(inst, &buf->vb); 527 528 list_del_init(&buf->ref_list); 529 } 530 unlock: 531 mutex_unlock(&inst->lock); 532 } 533 534 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx) 535 { 536 struct venus_buffer *buf; 537 538 list_for_each_entry(buf, &inst->registeredbufs, reg_list) { 539 if (buf->vb.vb2_buf.index == idx) { 540 buf->flags &= ~HFI_BUFFERFLAG_READONLY; 541 schedule_work(&inst->delayed_process_work); 542 break; 543 } 544 } 545 } 546 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref); 547 548 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf) 549 { 550 struct venus_buffer *buf = to_venus_buffer(vbuf); 551 552 buf->flags |= HFI_BUFFERFLAG_READONLY; 553 } 554 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref); 555 556 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) 557 { 558 struct venus_buffer *buf = to_venus_buffer(vbuf); 559 560 if (buf->flags & HFI_BUFFERFLAG_READONLY) { 561 list_add_tail(&buf->ref_list, &inst->delayed_process); 562 schedule_work(&inst->delayed_process_work); 563 return 1; 564 } 565 566 return 0; 567 } 568 569 struct vb2_v4l2_buffer * 570 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx) 571 { 572 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 573 574 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 575 return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx); 576 else 577 return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx); 578 } 579 EXPORT_SYMBOL_GPL(venus_helper_find_buf); 580 581 int venus_helper_vb2_buf_init(struct vb2_buffer *vb) 582 { 583 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 584 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 585 struct venus_buffer *buf = to_venus_buffer(vbuf); 586 struct sg_table *sgt; 587 588 sgt = vb2_dma_sg_plane_desc(vb, 0); 589 if (!sgt) 590 return -EFAULT; 591 592 buf->size = vb2_plane_size(vb, 0); 593 buf->dma_addr = sg_dma_address(sgt->sgl); 594 595 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 596 list_add_tail(&buf->reg_list, &inst->registeredbufs); 597 598 return 0; 599 } 600 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init); 601 602 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb) 603 { 604 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 605 606 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && 607 vb2_plane_size(vb, 0) < inst->output_buf_size) 608 return -EINVAL; 609 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && 610 vb2_plane_size(vb, 0) < inst->input_buf_size) 611 return -EINVAL; 612 613 return 0; 614 } 615 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare); 616 617 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) 618 { 619 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 620 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 621 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 622 int ret; 623 624 mutex_lock(&inst->lock); 625 626 v4l2_m2m_buf_queue(m2m_ctx, vbuf); 627 628 if (!(inst->streamon_out & inst->streamon_cap)) 629 goto unlock; 630 631 ret = is_buf_refed(inst, vbuf); 632 if (ret) 633 goto unlock; 634 635 ret = session_process_buf(inst, vbuf); 636 if (ret) 637 return_buf_error(inst, vbuf); 638 639 unlock: 640 mutex_unlock(&inst->lock); 641 } 642 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue); 643 644 void venus_helper_buffers_done(struct venus_inst *inst, 645 enum vb2_buffer_state state) 646 { 647 struct vb2_v4l2_buffer *buf; 648 649 while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx))) 650 v4l2_m2m_buf_done(buf, state); 651 while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx))) 652 v4l2_m2m_buf_done(buf, state); 653 } 654 EXPORT_SYMBOL_GPL(venus_helper_buffers_done); 655 656 void venus_helper_vb2_stop_streaming(struct vb2_queue *q) 657 { 658 struct venus_inst *inst = vb2_get_drv_priv(q); 659 struct venus_core *core = inst->core; 660 int ret; 661 662 mutex_lock(&inst->lock); 663 664 if (inst->streamon_out & inst->streamon_cap) { 665 ret = hfi_session_stop(inst); 666 ret |= hfi_session_unload_res(inst); 667 ret |= session_unregister_bufs(inst); 668 ret |= intbufs_free(inst); 669 ret |= hfi_session_deinit(inst); 670 671 if (inst->session_error || core->sys_error) 672 ret = -EIO; 673 674 if (ret) 675 hfi_session_abort(inst); 676 677 load_scale_clocks(core); 678 INIT_LIST_HEAD(&inst->registeredbufs); 679 } 680 681 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); 682 683 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 684 inst->streamon_out = 0; 685 else 686 inst->streamon_cap = 0; 687 688 mutex_unlock(&inst->lock); 689 } 690 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming); 691 692 int venus_helper_vb2_start_streaming(struct venus_inst *inst) 693 { 694 struct venus_core *core = inst->core; 695 int ret; 696 697 ret = intbufs_alloc(inst); 698 if (ret) 699 return ret; 700 701 ret = session_register_bufs(inst); 702 if (ret) 703 goto err_bufs_free; 704 705 load_scale_clocks(core); 706 707 ret = hfi_session_load_res(inst); 708 if (ret) 709 goto err_unreg_bufs; 710 711 ret = hfi_session_start(inst); 712 if (ret) 713 goto err_unload_res; 714 715 return 0; 716 717 err_unload_res: 718 hfi_session_unload_res(inst); 719 err_unreg_bufs: 720 session_unregister_bufs(inst); 721 err_bufs_free: 722 intbufs_free(inst); 723 return ret; 724 } 725 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming); 726 727 void venus_helper_m2m_device_run(void *priv) 728 { 729 struct venus_inst *inst = priv; 730 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 731 struct v4l2_m2m_buffer *buf, *n; 732 int ret; 733 734 mutex_lock(&inst->lock); 735 736 v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { 737 ret = session_process_buf(inst, &buf->vb); 738 if (ret) 739 return_buf_error(inst, &buf->vb); 740 } 741 742 v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { 743 ret = session_process_buf(inst, &buf->vb); 744 if (ret) 745 return_buf_error(inst, &buf->vb); 746 } 747 748 mutex_unlock(&inst->lock); 749 } 750 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run); 751 752 void venus_helper_m2m_job_abort(void *priv) 753 { 754 struct venus_inst *inst = priv; 755 756 v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx); 757 } 758 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort); 759 760 void venus_helper_init_instance(struct venus_inst *inst) 761 { 762 if (inst->session_type == VIDC_SESSION_TYPE_DEC) { 763 INIT_LIST_HEAD(&inst->delayed_process); 764 INIT_WORK(&inst->delayed_process_work, 765 delayed_process_buf_func); 766 } 767 } 768 EXPORT_SYMBOL_GPL(venus_helper_init_instance); 769