1 /*
2  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2017 Linaro Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/clk.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <media/videobuf2-dma-sg.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <asm/div64.h>
23 
24 #include "core.h"
25 #include "helpers.h"
26 #include "hfi_helper.h"
27 
28 struct intbuf {
29 	struct list_head list;
30 	u32 type;
31 	size_t size;
32 	void *va;
33 	dma_addr_t da;
34 	unsigned long attrs;
35 };
36 
37 static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
38 {
39 	struct venus_core *core = inst->core;
40 	struct device *dev = core->dev;
41 	struct hfi_buffer_requirements bufreq;
42 	struct hfi_buffer_desc bd;
43 	struct intbuf *buf;
44 	unsigned int i;
45 	int ret;
46 
47 	ret = venus_helper_get_bufreq(inst, type, &bufreq);
48 	if (ret)
49 		return 0;
50 
51 	if (!bufreq.size)
52 		return 0;
53 
54 	for (i = 0; i < bufreq.count_actual; i++) {
55 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
56 		if (!buf) {
57 			ret = -ENOMEM;
58 			goto fail;
59 		}
60 
61 		buf->type = bufreq.type;
62 		buf->size = bufreq.size;
63 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
64 			     DMA_ATTR_NO_KERNEL_MAPPING;
65 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
66 					  buf->attrs);
67 		if (!buf->va) {
68 			ret = -ENOMEM;
69 			goto fail;
70 		}
71 
72 		memset(&bd, 0, sizeof(bd));
73 		bd.buffer_size = buf->size;
74 		bd.buffer_type = buf->type;
75 		bd.num_buffers = 1;
76 		bd.device_addr = buf->da;
77 
78 		ret = hfi_session_set_buffers(inst, &bd);
79 		if (ret) {
80 			dev_err(dev, "set session buffers failed\n");
81 			goto dma_free;
82 		}
83 
84 		list_add_tail(&buf->list, &inst->internalbufs);
85 	}
86 
87 	return 0;
88 
89 dma_free:
90 	dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
91 fail:
92 	kfree(buf);
93 	return ret;
94 }
95 
96 static int intbufs_unset_buffers(struct venus_inst *inst)
97 {
98 	struct hfi_buffer_desc bd = {0};
99 	struct intbuf *buf, *n;
100 	int ret = 0;
101 
102 	list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
103 		bd.buffer_size = buf->size;
104 		bd.buffer_type = buf->type;
105 		bd.num_buffers = 1;
106 		bd.device_addr = buf->da;
107 		bd.response_required = true;
108 
109 		ret = hfi_session_unset_buffers(inst, &bd);
110 
111 		list_del_init(&buf->list);
112 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
113 			       buf->attrs);
114 		kfree(buf);
115 	}
116 
117 	return ret;
118 }
119 
120 static const unsigned int intbuf_types[] = {
121 	HFI_BUFFER_INTERNAL_SCRATCH,
122 	HFI_BUFFER_INTERNAL_SCRATCH_1,
123 	HFI_BUFFER_INTERNAL_SCRATCH_2,
124 	HFI_BUFFER_INTERNAL_PERSIST,
125 	HFI_BUFFER_INTERNAL_PERSIST_1,
126 };
127 
128 static int intbufs_alloc(struct venus_inst *inst)
129 {
130 	unsigned int i;
131 	int ret;
132 
133 	for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) {
134 		ret = intbufs_set_buffer(inst, intbuf_types[i]);
135 		if (ret)
136 			goto error;
137 	}
138 
139 	return 0;
140 
141 error:
142 	intbufs_unset_buffers(inst);
143 	return ret;
144 }
145 
146 static int intbufs_free(struct venus_inst *inst)
147 {
148 	return intbufs_unset_buffers(inst);
149 }
150 
151 static u32 load_per_instance(struct venus_inst *inst)
152 {
153 	u32 mbs;
154 
155 	if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
156 		return 0;
157 
158 	mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
159 
160 	return mbs * inst->fps;
161 }
162 
163 static u32 load_per_type(struct venus_core *core, u32 session_type)
164 {
165 	struct venus_inst *inst = NULL;
166 	u32 mbs_per_sec = 0;
167 
168 	mutex_lock(&core->lock);
169 	list_for_each_entry(inst, &core->instances, list) {
170 		if (inst->session_type != session_type)
171 			continue;
172 
173 		mbs_per_sec += load_per_instance(inst);
174 	}
175 	mutex_unlock(&core->lock);
176 
177 	return mbs_per_sec;
178 }
179 
180 static int load_scale_clocks(struct venus_core *core)
181 {
182 	const struct freq_tbl *table = core->res->freq_tbl;
183 	unsigned int num_rows = core->res->freq_tbl_size;
184 	unsigned long freq = table[0].freq;
185 	struct clk *clk = core->clks[0];
186 	struct device *dev = core->dev;
187 	u32 mbs_per_sec;
188 	unsigned int i;
189 	int ret;
190 
191 	mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
192 		      load_per_type(core, VIDC_SESSION_TYPE_DEC);
193 
194 	if (mbs_per_sec > core->res->max_load)
195 		dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
196 			 mbs_per_sec, core->res->max_load);
197 
198 	if (!mbs_per_sec && num_rows > 1) {
199 		freq = table[num_rows - 1].freq;
200 		goto set_freq;
201 	}
202 
203 	for (i = 0; i < num_rows; i++) {
204 		if (mbs_per_sec > table[i].load)
205 			break;
206 		freq = table[i].freq;
207 	}
208 
209 set_freq:
210 
211 	if (core->res->hfi_version == HFI_VERSION_3XX) {
212 		ret = clk_set_rate(clk, freq);
213 		ret |= clk_set_rate(core->core0_clk, freq);
214 		ret |= clk_set_rate(core->core1_clk, freq);
215 	} else {
216 		ret = clk_set_rate(clk, freq);
217 	}
218 
219 	if (ret) {
220 		dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
221 		return ret;
222 	}
223 
224 	return 0;
225 }
226 
227 static void fill_buffer_desc(const struct venus_buffer *buf,
228 			     struct hfi_buffer_desc *bd, bool response)
229 {
230 	memset(bd, 0, sizeof(*bd));
231 	bd->buffer_type = HFI_BUFFER_OUTPUT;
232 	bd->buffer_size = buf->size;
233 	bd->num_buffers = 1;
234 	bd->device_addr = buf->dma_addr;
235 	bd->response_required = response;
236 }
237 
238 static void return_buf_error(struct venus_inst *inst,
239 			     struct vb2_v4l2_buffer *vbuf)
240 {
241 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
242 
243 	if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
244 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
245 	else
246 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
247 
248 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
249 }
250 
251 static int
252 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
253 {
254 	struct venus_buffer *buf = to_venus_buffer(vbuf);
255 	struct vb2_buffer *vb = &vbuf->vb2_buf;
256 	unsigned int type = vb->type;
257 	struct hfi_frame_data fdata;
258 	int ret;
259 
260 	memset(&fdata, 0, sizeof(fdata));
261 	fdata.alloc_len = buf->size;
262 	fdata.device_addr = buf->dma_addr;
263 	fdata.timestamp = vb->timestamp;
264 	do_div(fdata.timestamp, NSEC_PER_USEC);
265 	fdata.flags = 0;
266 	fdata.clnt_data = vbuf->vb2_buf.index;
267 
268 	if (!fdata.timestamp)
269 		fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID;
270 
271 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
272 		fdata.buffer_type = HFI_BUFFER_INPUT;
273 		fdata.filled_len = vb2_get_plane_payload(vb, 0);
274 		fdata.offset = vb->planes[0].data_offset;
275 
276 		if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
277 			fdata.flags |= HFI_BUFFERFLAG_EOS;
278 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
279 		fdata.buffer_type = HFI_BUFFER_OUTPUT;
280 		fdata.filled_len = 0;
281 		fdata.offset = 0;
282 	}
283 
284 	ret = hfi_session_process_buf(inst, &fdata);
285 	if (ret)
286 		return ret;
287 
288 	return 0;
289 }
290 
291 static inline int is_reg_unreg_needed(struct venus_inst *inst)
292 {
293 	if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
294 	    inst->core->res->hfi_version == HFI_VERSION_3XX)
295 		return 0;
296 
297 	if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
298 	    inst->cap_bufs_mode_dynamic &&
299 	    inst->core->res->hfi_version == HFI_VERSION_1XX)
300 		return 0;
301 
302 	return 1;
303 }
304 
305 static int session_unregister_bufs(struct venus_inst *inst)
306 {
307 	struct venus_buffer *buf, *n;
308 	struct hfi_buffer_desc bd;
309 	int ret = 0;
310 
311 	if (!is_reg_unreg_needed(inst))
312 		return 0;
313 
314 	list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
315 		fill_buffer_desc(buf, &bd, true);
316 		ret = hfi_session_unset_buffers(inst, &bd);
317 		list_del_init(&buf->reg_list);
318 	}
319 
320 	return ret;
321 }
322 
323 static int session_register_bufs(struct venus_inst *inst)
324 {
325 	struct venus_core *core = inst->core;
326 	struct device *dev = core->dev;
327 	struct hfi_buffer_desc bd;
328 	struct venus_buffer *buf;
329 	int ret = 0;
330 
331 	if (!is_reg_unreg_needed(inst))
332 		return 0;
333 
334 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
335 		fill_buffer_desc(buf, &bd, false);
336 		ret = hfi_session_set_buffers(inst, &bd);
337 		if (ret) {
338 			dev_err(dev, "%s: set buffer failed\n", __func__);
339 			break;
340 		}
341 	}
342 
343 	return ret;
344 }
345 
346 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
347 			    struct hfi_buffer_requirements *req)
348 {
349 	u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
350 	union hfi_get_property hprop;
351 	unsigned int i;
352 	int ret;
353 
354 	if (req)
355 		memset(req, 0, sizeof(*req));
356 
357 	ret = hfi_session_get_property(inst, ptype, &hprop);
358 	if (ret)
359 		return ret;
360 
361 	ret = -EINVAL;
362 
363 	for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
364 		if (hprop.bufreq[i].type != type)
365 			continue;
366 
367 		if (req)
368 			memcpy(req, &hprop.bufreq[i], sizeof(*req));
369 		ret = 0;
370 		break;
371 	}
372 
373 	return ret;
374 }
375 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
376 
377 int venus_helper_set_input_resolution(struct venus_inst *inst,
378 				      unsigned int width, unsigned int height)
379 {
380 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
381 	struct hfi_framesize fs;
382 
383 	fs.buffer_type = HFI_BUFFER_INPUT;
384 	fs.width = width;
385 	fs.height = height;
386 
387 	return hfi_session_set_property(inst, ptype, &fs);
388 }
389 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
390 
391 int venus_helper_set_output_resolution(struct venus_inst *inst,
392 				       unsigned int width, unsigned int height)
393 {
394 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
395 	struct hfi_framesize fs;
396 
397 	fs.buffer_type = HFI_BUFFER_OUTPUT;
398 	fs.width = width;
399 	fs.height = height;
400 
401 	return hfi_session_set_property(inst, ptype, &fs);
402 }
403 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
404 
405 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
406 			      unsigned int output_bufs)
407 {
408 	u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
409 	struct hfi_buffer_count_actual buf_count;
410 	int ret;
411 
412 	buf_count.type = HFI_BUFFER_INPUT;
413 	buf_count.count_actual = input_bufs;
414 
415 	ret = hfi_session_set_property(inst, ptype, &buf_count);
416 	if (ret)
417 		return ret;
418 
419 	buf_count.type = HFI_BUFFER_OUTPUT;
420 	buf_count.count_actual = output_bufs;
421 
422 	return hfi_session_set_property(inst, ptype, &buf_count);
423 }
424 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
425 
426 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
427 {
428 	struct hfi_uncompressed_format_select fmt;
429 	u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
430 	int ret;
431 
432 	if (inst->session_type == VIDC_SESSION_TYPE_DEC)
433 		fmt.buffer_type = HFI_BUFFER_OUTPUT;
434 	else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
435 		fmt.buffer_type = HFI_BUFFER_INPUT;
436 	else
437 		return -EINVAL;
438 
439 	switch (pixfmt) {
440 	case V4L2_PIX_FMT_NV12:
441 		fmt.format = HFI_COLOR_FORMAT_NV12;
442 		break;
443 	case V4L2_PIX_FMT_NV21:
444 		fmt.format = HFI_COLOR_FORMAT_NV21;
445 		break;
446 	default:
447 		return -EINVAL;
448 	}
449 
450 	ret = hfi_session_set_property(inst, ptype, &fmt);
451 	if (ret)
452 		return ret;
453 
454 	return 0;
455 }
456 EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
457 
458 static void delayed_process_buf_func(struct work_struct *work)
459 {
460 	struct venus_buffer *buf, *n;
461 	struct venus_inst *inst;
462 	int ret;
463 
464 	inst = container_of(work, struct venus_inst, delayed_process_work);
465 
466 	mutex_lock(&inst->lock);
467 
468 	if (!(inst->streamon_out & inst->streamon_cap))
469 		goto unlock;
470 
471 	list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
472 		if (buf->flags & HFI_BUFFERFLAG_READONLY)
473 			continue;
474 
475 		ret = session_process_buf(inst, &buf->vb);
476 		if (ret)
477 			return_buf_error(inst, &buf->vb);
478 
479 		list_del_init(&buf->ref_list);
480 	}
481 unlock:
482 	mutex_unlock(&inst->lock);
483 }
484 
485 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
486 {
487 	struct venus_buffer *buf;
488 
489 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
490 		if (buf->vb.vb2_buf.index == idx) {
491 			buf->flags &= ~HFI_BUFFERFLAG_READONLY;
492 			schedule_work(&inst->delayed_process_work);
493 			break;
494 		}
495 	}
496 }
497 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
498 
499 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
500 {
501 	struct venus_buffer *buf = to_venus_buffer(vbuf);
502 
503 	buf->flags |= HFI_BUFFERFLAG_READONLY;
504 }
505 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
506 
507 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
508 {
509 	struct venus_buffer *buf = to_venus_buffer(vbuf);
510 
511 	if (buf->flags & HFI_BUFFERFLAG_READONLY) {
512 		list_add_tail(&buf->ref_list, &inst->delayed_process);
513 		schedule_work(&inst->delayed_process_work);
514 		return 1;
515 	}
516 
517 	return 0;
518 }
519 
520 struct vb2_v4l2_buffer *
521 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
522 {
523 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
524 
525 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
526 		return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
527 	else
528 		return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
529 }
530 EXPORT_SYMBOL_GPL(venus_helper_find_buf);
531 
532 int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
533 {
534 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
535 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
536 	struct venus_buffer *buf = to_venus_buffer(vbuf);
537 	struct sg_table *sgt;
538 
539 	sgt = vb2_dma_sg_plane_desc(vb, 0);
540 	if (!sgt)
541 		return -EFAULT;
542 
543 	buf->size = vb2_plane_size(vb, 0);
544 	buf->dma_addr = sg_dma_address(sgt->sgl);
545 
546 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
547 		list_add_tail(&buf->reg_list, &inst->registeredbufs);
548 
549 	return 0;
550 }
551 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
552 
553 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
554 {
555 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
556 
557 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
558 	    vb2_plane_size(vb, 0) < inst->output_buf_size)
559 		return -EINVAL;
560 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
561 	    vb2_plane_size(vb, 0) < inst->input_buf_size)
562 		return -EINVAL;
563 
564 	return 0;
565 }
566 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
567 
568 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
569 {
570 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
571 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
572 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
573 	int ret;
574 
575 	mutex_lock(&inst->lock);
576 
577 	if (inst->cmd_stop) {
578 		vbuf->flags |= V4L2_BUF_FLAG_LAST;
579 		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
580 		inst->cmd_stop = false;
581 		goto unlock;
582 	}
583 
584 	v4l2_m2m_buf_queue(m2m_ctx, vbuf);
585 
586 	if (!(inst->streamon_out & inst->streamon_cap))
587 		goto unlock;
588 
589 	ret = is_buf_refed(inst, vbuf);
590 	if (ret)
591 		goto unlock;
592 
593 	ret = session_process_buf(inst, vbuf);
594 	if (ret)
595 		return_buf_error(inst, vbuf);
596 
597 unlock:
598 	mutex_unlock(&inst->lock);
599 }
600 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
601 
602 void venus_helper_buffers_done(struct venus_inst *inst,
603 			       enum vb2_buffer_state state)
604 {
605 	struct vb2_v4l2_buffer *buf;
606 
607 	while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
608 		v4l2_m2m_buf_done(buf, state);
609 	while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
610 		v4l2_m2m_buf_done(buf, state);
611 }
612 EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
613 
614 void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
615 {
616 	struct venus_inst *inst = vb2_get_drv_priv(q);
617 	struct venus_core *core = inst->core;
618 	int ret;
619 
620 	mutex_lock(&inst->lock);
621 
622 	if (inst->streamon_out & inst->streamon_cap) {
623 		ret = hfi_session_stop(inst);
624 		ret |= hfi_session_unload_res(inst);
625 		ret |= session_unregister_bufs(inst);
626 		ret |= intbufs_free(inst);
627 		ret |= hfi_session_deinit(inst);
628 
629 		if (inst->session_error || core->sys_error)
630 			ret = -EIO;
631 
632 		if (ret)
633 			hfi_session_abort(inst);
634 
635 		load_scale_clocks(core);
636 	}
637 
638 	venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
639 
640 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
641 		inst->streamon_out = 0;
642 	else
643 		inst->streamon_cap = 0;
644 
645 	mutex_unlock(&inst->lock);
646 }
647 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
648 
649 int venus_helper_vb2_start_streaming(struct venus_inst *inst)
650 {
651 	struct venus_core *core = inst->core;
652 	int ret;
653 
654 	ret = intbufs_alloc(inst);
655 	if (ret)
656 		return ret;
657 
658 	ret = session_register_bufs(inst);
659 	if (ret)
660 		goto err_bufs_free;
661 
662 	load_scale_clocks(core);
663 
664 	ret = hfi_session_load_res(inst);
665 	if (ret)
666 		goto err_unreg_bufs;
667 
668 	ret = hfi_session_start(inst);
669 	if (ret)
670 		goto err_unload_res;
671 
672 	return 0;
673 
674 err_unload_res:
675 	hfi_session_unload_res(inst);
676 err_unreg_bufs:
677 	session_unregister_bufs(inst);
678 err_bufs_free:
679 	intbufs_free(inst);
680 	return ret;
681 }
682 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
683 
684 void venus_helper_m2m_device_run(void *priv)
685 {
686 	struct venus_inst *inst = priv;
687 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
688 	struct v4l2_m2m_buffer *buf, *n;
689 	int ret;
690 
691 	mutex_lock(&inst->lock);
692 
693 	v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
694 		ret = session_process_buf(inst, &buf->vb);
695 		if (ret)
696 			return_buf_error(inst, &buf->vb);
697 	}
698 
699 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
700 		ret = session_process_buf(inst, &buf->vb);
701 		if (ret)
702 			return_buf_error(inst, &buf->vb);
703 	}
704 
705 	mutex_unlock(&inst->lock);
706 }
707 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
708 
709 void venus_helper_m2m_job_abort(void *priv)
710 {
711 	struct venus_inst *inst = priv;
712 
713 	v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
714 }
715 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
716 
717 void venus_helper_init_instance(struct venus_inst *inst)
718 {
719 	if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
720 		INIT_LIST_HEAD(&inst->delayed_process);
721 		INIT_WORK(&inst->delayed_process_work,
722 			  delayed_process_buf_func);
723 	}
724 }
725 EXPORT_SYMBOL_GPL(venus_helper_init_instance);
726