1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2017 Linaro Ltd.
5  */
6 #include <linux/clk.h>
7 #include <linux/iopoll.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12 #include <media/videobuf2-dma-sg.h>
13 #include <media/v4l2-mem2mem.h>
14 #include <asm/div64.h>
15 
16 #include "core.h"
17 #include "helpers.h"
18 #include "hfi_helper.h"
19 #include "hfi_venus_io.h"
20 
21 struct intbuf {
22 	struct list_head list;
23 	u32 type;
24 	size_t size;
25 	void *va;
26 	dma_addr_t da;
27 	unsigned long attrs;
28 };
29 
30 bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
31 {
32 	struct venus_core *core = inst->core;
33 	u32 session_type = inst->session_type;
34 	u32 codec;
35 
36 	switch (v4l2_pixfmt) {
37 	case V4L2_PIX_FMT_H264:
38 		codec = HFI_VIDEO_CODEC_H264;
39 		break;
40 	case V4L2_PIX_FMT_H263:
41 		codec = HFI_VIDEO_CODEC_H263;
42 		break;
43 	case V4L2_PIX_FMT_MPEG1:
44 		codec = HFI_VIDEO_CODEC_MPEG1;
45 		break;
46 	case V4L2_PIX_FMT_MPEG2:
47 		codec = HFI_VIDEO_CODEC_MPEG2;
48 		break;
49 	case V4L2_PIX_FMT_MPEG4:
50 		codec = HFI_VIDEO_CODEC_MPEG4;
51 		break;
52 	case V4L2_PIX_FMT_VC1_ANNEX_G:
53 	case V4L2_PIX_FMT_VC1_ANNEX_L:
54 		codec = HFI_VIDEO_CODEC_VC1;
55 		break;
56 	case V4L2_PIX_FMT_VP8:
57 		codec = HFI_VIDEO_CODEC_VP8;
58 		break;
59 	case V4L2_PIX_FMT_VP9:
60 		codec = HFI_VIDEO_CODEC_VP9;
61 		break;
62 	case V4L2_PIX_FMT_XVID:
63 		codec = HFI_VIDEO_CODEC_DIVX;
64 		break;
65 	case V4L2_PIX_FMT_HEVC:
66 		codec = HFI_VIDEO_CODEC_HEVC;
67 		break;
68 	default:
69 		return false;
70 	}
71 
72 	if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
73 		return true;
74 
75 	if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
76 		return true;
77 
78 	return false;
79 }
80 EXPORT_SYMBOL_GPL(venus_helper_check_codec);
81 
82 static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
83 {
84 	struct intbuf *buf;
85 	int ret = 0;
86 
87 	list_for_each_entry(buf, &inst->dpbbufs, list) {
88 		struct hfi_frame_data fdata;
89 
90 		memset(&fdata, 0, sizeof(fdata));
91 		fdata.alloc_len = buf->size;
92 		fdata.device_addr = buf->da;
93 		fdata.buffer_type = buf->type;
94 
95 		ret = hfi_session_process_buf(inst, &fdata);
96 		if (ret)
97 			goto fail;
98 	}
99 
100 fail:
101 	return ret;
102 }
103 
104 int venus_helper_free_dpb_bufs(struct venus_inst *inst)
105 {
106 	struct intbuf *buf, *n;
107 
108 	list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
109 		list_del_init(&buf->list);
110 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
111 			       buf->attrs);
112 		kfree(buf);
113 	}
114 
115 	INIT_LIST_HEAD(&inst->dpbbufs);
116 
117 	return 0;
118 }
119 EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
120 
121 int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
122 {
123 	struct venus_core *core = inst->core;
124 	struct device *dev = core->dev;
125 	enum hfi_version ver = core->res->hfi_version;
126 	struct hfi_buffer_requirements bufreq;
127 	u32 buftype = inst->dpb_buftype;
128 	unsigned int dpb_size = 0;
129 	struct intbuf *buf;
130 	unsigned int i;
131 	u32 count;
132 	int ret;
133 
134 	/* no need to allocate dpb buffers */
135 	if (!inst->dpb_fmt)
136 		return 0;
137 
138 	if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
139 		dpb_size = inst->output_buf_size;
140 	else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
141 		dpb_size = inst->output2_buf_size;
142 
143 	if (!dpb_size)
144 		return 0;
145 
146 	ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
147 	if (ret)
148 		return ret;
149 
150 	count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
151 
152 	for (i = 0; i < count; i++) {
153 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
154 		if (!buf) {
155 			ret = -ENOMEM;
156 			goto fail;
157 		}
158 
159 		buf->type = buftype;
160 		buf->size = dpb_size;
161 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
162 			     DMA_ATTR_NO_KERNEL_MAPPING;
163 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
164 					  buf->attrs);
165 		if (!buf->va) {
166 			kfree(buf);
167 			ret = -ENOMEM;
168 			goto fail;
169 		}
170 
171 		list_add_tail(&buf->list, &inst->dpbbufs);
172 	}
173 
174 	return 0;
175 
176 fail:
177 	venus_helper_free_dpb_bufs(inst);
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
181 
182 static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
183 {
184 	struct venus_core *core = inst->core;
185 	struct device *dev = core->dev;
186 	struct hfi_buffer_requirements bufreq;
187 	struct hfi_buffer_desc bd;
188 	struct intbuf *buf;
189 	unsigned int i;
190 	int ret;
191 
192 	ret = venus_helper_get_bufreq(inst, type, &bufreq);
193 	if (ret)
194 		return 0;
195 
196 	if (!bufreq.size)
197 		return 0;
198 
199 	for (i = 0; i < bufreq.count_actual; i++) {
200 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
201 		if (!buf) {
202 			ret = -ENOMEM;
203 			goto fail;
204 		}
205 
206 		buf->type = bufreq.type;
207 		buf->size = bufreq.size;
208 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
209 			     DMA_ATTR_NO_KERNEL_MAPPING;
210 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
211 					  buf->attrs);
212 		if (!buf->va) {
213 			ret = -ENOMEM;
214 			goto fail;
215 		}
216 
217 		memset(&bd, 0, sizeof(bd));
218 		bd.buffer_size = buf->size;
219 		bd.buffer_type = buf->type;
220 		bd.num_buffers = 1;
221 		bd.device_addr = buf->da;
222 
223 		ret = hfi_session_set_buffers(inst, &bd);
224 		if (ret) {
225 			dev_err(dev, "set session buffers failed\n");
226 			goto dma_free;
227 		}
228 
229 		list_add_tail(&buf->list, &inst->internalbufs);
230 	}
231 
232 	return 0;
233 
234 dma_free:
235 	dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
236 fail:
237 	kfree(buf);
238 	return ret;
239 }
240 
241 static int intbufs_unset_buffers(struct venus_inst *inst)
242 {
243 	struct hfi_buffer_desc bd = {0};
244 	struct intbuf *buf, *n;
245 	int ret = 0;
246 
247 	list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
248 		bd.buffer_size = buf->size;
249 		bd.buffer_type = buf->type;
250 		bd.num_buffers = 1;
251 		bd.device_addr = buf->da;
252 		bd.response_required = true;
253 
254 		ret = hfi_session_unset_buffers(inst, &bd);
255 
256 		list_del_init(&buf->list);
257 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
258 			       buf->attrs);
259 		kfree(buf);
260 	}
261 
262 	return ret;
263 }
264 
265 static const unsigned int intbuf_types_1xx[] = {
266 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
267 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
268 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
269 	HFI_BUFFER_INTERNAL_PERSIST,
270 	HFI_BUFFER_INTERNAL_PERSIST_1,
271 };
272 
273 static const unsigned int intbuf_types_4xx[] = {
274 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
275 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
276 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
277 	HFI_BUFFER_INTERNAL_PERSIST,
278 	HFI_BUFFER_INTERNAL_PERSIST_1,
279 };
280 
281 static int intbufs_alloc(struct venus_inst *inst)
282 {
283 	const unsigned int *intbuf;
284 	size_t arr_sz, i;
285 	int ret;
286 
287 	if (IS_V4(inst->core)) {
288 		arr_sz = ARRAY_SIZE(intbuf_types_4xx);
289 		intbuf = intbuf_types_4xx;
290 	} else {
291 		arr_sz = ARRAY_SIZE(intbuf_types_1xx);
292 		intbuf = intbuf_types_1xx;
293 	}
294 
295 	for (i = 0; i < arr_sz; i++) {
296 		ret = intbufs_set_buffer(inst, intbuf[i]);
297 		if (ret)
298 			goto error;
299 	}
300 
301 	return 0;
302 
303 error:
304 	intbufs_unset_buffers(inst);
305 	return ret;
306 }
307 
308 static int intbufs_free(struct venus_inst *inst)
309 {
310 	return intbufs_unset_buffers(inst);
311 }
312 
313 static u32 load_per_instance(struct venus_inst *inst)
314 {
315 	u32 mbs;
316 
317 	if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
318 		return 0;
319 
320 	mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
321 
322 	return mbs * inst->fps;
323 }
324 
325 static u32 load_per_type(struct venus_core *core, u32 session_type)
326 {
327 	struct venus_inst *inst = NULL;
328 	u32 mbs_per_sec = 0;
329 
330 	mutex_lock(&core->lock);
331 	list_for_each_entry(inst, &core->instances, list) {
332 		if (inst->session_type != session_type)
333 			continue;
334 
335 		mbs_per_sec += load_per_instance(inst);
336 	}
337 	mutex_unlock(&core->lock);
338 
339 	return mbs_per_sec;
340 }
341 
342 static int load_scale_clocks(struct venus_core *core)
343 {
344 	const struct freq_tbl *table = core->res->freq_tbl;
345 	unsigned int num_rows = core->res->freq_tbl_size;
346 	unsigned long freq = table[0].freq;
347 	struct clk *clk = core->clks[0];
348 	struct device *dev = core->dev;
349 	u32 mbs_per_sec;
350 	unsigned int i;
351 	int ret;
352 
353 	mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
354 		      load_per_type(core, VIDC_SESSION_TYPE_DEC);
355 
356 	if (mbs_per_sec > core->res->max_load)
357 		dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
358 			 mbs_per_sec, core->res->max_load);
359 
360 	if (!mbs_per_sec && num_rows > 1) {
361 		freq = table[num_rows - 1].freq;
362 		goto set_freq;
363 	}
364 
365 	for (i = 0; i < num_rows; i++) {
366 		if (mbs_per_sec > table[i].load)
367 			break;
368 		freq = table[i].freq;
369 	}
370 
371 set_freq:
372 
373 	ret = clk_set_rate(clk, freq);
374 	if (ret)
375 		goto err;
376 
377 	ret = clk_set_rate(core->core0_clk, freq);
378 	if (ret)
379 		goto err;
380 
381 	ret = clk_set_rate(core->core1_clk, freq);
382 	if (ret)
383 		goto err;
384 
385 	return 0;
386 
387 err:
388 	dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
389 	return ret;
390 }
391 
392 static void fill_buffer_desc(const struct venus_buffer *buf,
393 			     struct hfi_buffer_desc *bd, bool response)
394 {
395 	memset(bd, 0, sizeof(*bd));
396 	bd->buffer_type = HFI_BUFFER_OUTPUT;
397 	bd->buffer_size = buf->size;
398 	bd->num_buffers = 1;
399 	bd->device_addr = buf->dma_addr;
400 	bd->response_required = response;
401 }
402 
403 static void return_buf_error(struct venus_inst *inst,
404 			     struct vb2_v4l2_buffer *vbuf)
405 {
406 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
407 
408 	if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
409 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
410 	else
411 		v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
412 
413 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
414 }
415 
416 static int
417 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
418 {
419 	struct venus_buffer *buf = to_venus_buffer(vbuf);
420 	struct vb2_buffer *vb = &vbuf->vb2_buf;
421 	unsigned int type = vb->type;
422 	struct hfi_frame_data fdata;
423 	int ret;
424 
425 	memset(&fdata, 0, sizeof(fdata));
426 	fdata.alloc_len = buf->size;
427 	fdata.device_addr = buf->dma_addr;
428 	fdata.timestamp = vb->timestamp;
429 	do_div(fdata.timestamp, NSEC_PER_USEC);
430 	fdata.flags = 0;
431 	fdata.clnt_data = vbuf->vb2_buf.index;
432 
433 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
434 		fdata.buffer_type = HFI_BUFFER_INPUT;
435 		fdata.filled_len = vb2_get_plane_payload(vb, 0);
436 		fdata.offset = vb->planes[0].data_offset;
437 
438 		if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
439 			fdata.flags |= HFI_BUFFERFLAG_EOS;
440 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
441 		if (inst->session_type == VIDC_SESSION_TYPE_ENC)
442 			fdata.buffer_type = HFI_BUFFER_OUTPUT;
443 		else
444 			fdata.buffer_type = inst->opb_buftype;
445 		fdata.filled_len = 0;
446 		fdata.offset = 0;
447 	}
448 
449 	ret = hfi_session_process_buf(inst, &fdata);
450 	if (ret)
451 		return ret;
452 
453 	return 0;
454 }
455 
456 static bool is_dynamic_bufmode(struct venus_inst *inst)
457 {
458 	struct venus_core *core = inst->core;
459 	struct venus_caps *caps;
460 
461 	/*
462 	 * v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
463 	 * dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2.
464 	 */
465 	if (IS_V4(core))
466 		return true;
467 
468 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
469 	if (!caps)
470 		return false;
471 
472 	return caps->cap_bufs_mode_dynamic;
473 }
474 
475 static int session_unregister_bufs(struct venus_inst *inst)
476 {
477 	struct venus_buffer *buf, *n;
478 	struct hfi_buffer_desc bd;
479 	int ret = 0;
480 
481 	if (is_dynamic_bufmode(inst))
482 		return 0;
483 
484 	list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
485 		fill_buffer_desc(buf, &bd, true);
486 		ret = hfi_session_unset_buffers(inst, &bd);
487 		list_del_init(&buf->reg_list);
488 	}
489 
490 	return ret;
491 }
492 
493 static int session_register_bufs(struct venus_inst *inst)
494 {
495 	struct venus_core *core = inst->core;
496 	struct device *dev = core->dev;
497 	struct hfi_buffer_desc bd;
498 	struct venus_buffer *buf;
499 	int ret = 0;
500 
501 	if (is_dynamic_bufmode(inst))
502 		return 0;
503 
504 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
505 		fill_buffer_desc(buf, &bd, false);
506 		ret = hfi_session_set_buffers(inst, &bd);
507 		if (ret) {
508 			dev_err(dev, "%s: set buffer failed\n", __func__);
509 			break;
510 		}
511 	}
512 
513 	return ret;
514 }
515 
516 static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
517 {
518 	switch (v4l2_fmt) {
519 	case V4L2_PIX_FMT_NV12:
520 		return HFI_COLOR_FORMAT_NV12;
521 	case V4L2_PIX_FMT_NV21:
522 		return HFI_COLOR_FORMAT_NV21;
523 	default:
524 		break;
525 	}
526 
527 	return 0;
528 }
529 
530 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
531 			    struct hfi_buffer_requirements *req)
532 {
533 	u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
534 	union hfi_get_property hprop;
535 	unsigned int i;
536 	int ret;
537 
538 	if (req)
539 		memset(req, 0, sizeof(*req));
540 
541 	ret = hfi_session_get_property(inst, ptype, &hprop);
542 	if (ret)
543 		return ret;
544 
545 	ret = -EINVAL;
546 
547 	for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
548 		if (hprop.bufreq[i].type != type)
549 			continue;
550 
551 		if (req)
552 			memcpy(req, &hprop.bufreq[i], sizeof(*req));
553 		ret = 0;
554 		break;
555 	}
556 
557 	return ret;
558 }
559 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
560 
561 static u32 get_framesize_raw_nv12(u32 width, u32 height)
562 {
563 	u32 y_stride, uv_stride, y_plane;
564 	u32 y_sclines, uv_sclines, uv_plane;
565 	u32 size;
566 
567 	y_stride = ALIGN(width, 128);
568 	uv_stride = ALIGN(width, 128);
569 	y_sclines = ALIGN(height, 32);
570 	uv_sclines = ALIGN(((height + 1) >> 1), 16);
571 
572 	y_plane = y_stride * y_sclines;
573 	uv_plane = uv_stride * uv_sclines + SZ_4K;
574 	size = y_plane + uv_plane + SZ_8K;
575 
576 	return ALIGN(size, SZ_4K);
577 }
578 
579 static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
580 {
581 	u32 y_meta_stride, y_meta_plane;
582 	u32 y_stride, y_plane;
583 	u32 uv_meta_stride, uv_meta_plane;
584 	u32 uv_stride, uv_plane;
585 	u32 extradata = SZ_16K;
586 
587 	y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
588 	y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
589 	y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
590 
591 	y_stride = ALIGN(width, 128);
592 	y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
593 
594 	uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
595 	uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
596 	uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
597 
598 	uv_stride = ALIGN(width, 128);
599 	uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
600 
601 	return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
602 		     max(extradata, y_stride * 48), SZ_4K);
603 }
604 
605 u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
606 {
607 	switch (hfi_fmt) {
608 	case HFI_COLOR_FORMAT_NV12:
609 	case HFI_COLOR_FORMAT_NV21:
610 		return get_framesize_raw_nv12(width, height);
611 	case HFI_COLOR_FORMAT_NV12_UBWC:
612 		return get_framesize_raw_nv12_ubwc(width, height);
613 	default:
614 		return 0;
615 	}
616 }
617 EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
618 
619 u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
620 {
621 	u32 hfi_fmt, sz;
622 	bool compressed;
623 
624 	switch (v4l2_fmt) {
625 	case V4L2_PIX_FMT_MPEG:
626 	case V4L2_PIX_FMT_H264:
627 	case V4L2_PIX_FMT_H264_NO_SC:
628 	case V4L2_PIX_FMT_H264_MVC:
629 	case V4L2_PIX_FMT_H263:
630 	case V4L2_PIX_FMT_MPEG1:
631 	case V4L2_PIX_FMT_MPEG2:
632 	case V4L2_PIX_FMT_MPEG4:
633 	case V4L2_PIX_FMT_XVID:
634 	case V4L2_PIX_FMT_VC1_ANNEX_G:
635 	case V4L2_PIX_FMT_VC1_ANNEX_L:
636 	case V4L2_PIX_FMT_VP8:
637 	case V4L2_PIX_FMT_VP9:
638 	case V4L2_PIX_FMT_HEVC:
639 		compressed = true;
640 		break;
641 	default:
642 		compressed = false;
643 		break;
644 	}
645 
646 	if (compressed) {
647 		sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
648 		return ALIGN(sz, SZ_4K);
649 	}
650 
651 	hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
652 	if (!hfi_fmt)
653 		return 0;
654 
655 	return venus_helper_get_framesz_raw(hfi_fmt, width, height);
656 }
657 EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
658 
659 int venus_helper_set_input_resolution(struct venus_inst *inst,
660 				      unsigned int width, unsigned int height)
661 {
662 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
663 	struct hfi_framesize fs;
664 
665 	fs.buffer_type = HFI_BUFFER_INPUT;
666 	fs.width = width;
667 	fs.height = height;
668 
669 	return hfi_session_set_property(inst, ptype, &fs);
670 }
671 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
672 
673 int venus_helper_set_output_resolution(struct venus_inst *inst,
674 				       unsigned int width, unsigned int height,
675 				       u32 buftype)
676 {
677 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
678 	struct hfi_framesize fs;
679 
680 	fs.buffer_type = buftype;
681 	fs.width = width;
682 	fs.height = height;
683 
684 	return hfi_session_set_property(inst, ptype, &fs);
685 }
686 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
687 
688 int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
689 {
690 	const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
691 	struct hfi_video_work_mode wm;
692 
693 	if (!IS_V4(inst->core))
694 		return 0;
695 
696 	wm.video_work_mode = mode;
697 
698 	return hfi_session_set_property(inst, ptype, &wm);
699 }
700 EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
701 
702 int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
703 {
704 	const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
705 	struct hfi_videocores_usage_type cu;
706 
707 	if (!IS_V4(inst->core))
708 		return 0;
709 
710 	cu.video_core_enable_mask = usage;
711 
712 	return hfi_session_set_property(inst, ptype, &cu);
713 }
714 EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
715 
716 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
717 			      unsigned int output_bufs,
718 			      unsigned int output2_bufs)
719 {
720 	u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
721 	struct hfi_buffer_count_actual buf_count;
722 	int ret;
723 
724 	buf_count.type = HFI_BUFFER_INPUT;
725 	buf_count.count_actual = input_bufs;
726 
727 	ret = hfi_session_set_property(inst, ptype, &buf_count);
728 	if (ret)
729 		return ret;
730 
731 	buf_count.type = HFI_BUFFER_OUTPUT;
732 	buf_count.count_actual = output_bufs;
733 
734 	ret = hfi_session_set_property(inst, ptype, &buf_count);
735 	if (ret)
736 		return ret;
737 
738 	if (output2_bufs) {
739 		buf_count.type = HFI_BUFFER_OUTPUT2;
740 		buf_count.count_actual = output2_bufs;
741 
742 		ret = hfi_session_set_property(inst, ptype, &buf_count);
743 	}
744 
745 	return ret;
746 }
747 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
748 
749 int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
750 				u32 buftype)
751 {
752 	const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
753 	struct hfi_uncompressed_format_select fmt;
754 
755 	fmt.buffer_type = buftype;
756 	fmt.format = hfi_format;
757 
758 	return hfi_session_set_property(inst, ptype, &fmt);
759 }
760 EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
761 
762 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
763 {
764 	u32 hfi_format, buftype;
765 
766 	if (inst->session_type == VIDC_SESSION_TYPE_DEC)
767 		buftype = HFI_BUFFER_OUTPUT;
768 	else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
769 		buftype = HFI_BUFFER_INPUT;
770 	else
771 		return -EINVAL;
772 
773 	hfi_format = to_hfi_raw_fmt(pixfmt);
774 	if (!hfi_format)
775 		return -EINVAL;
776 
777 	return venus_helper_set_raw_format(inst, hfi_format, buftype);
778 }
779 EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
780 
781 int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
782 				 bool out2_en)
783 {
784 	struct hfi_multi_stream multi = {0};
785 	u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
786 	int ret;
787 
788 	multi.buffer_type = HFI_BUFFER_OUTPUT;
789 	multi.enable = out_en;
790 
791 	ret = hfi_session_set_property(inst, ptype, &multi);
792 	if (ret)
793 		return ret;
794 
795 	multi.buffer_type = HFI_BUFFER_OUTPUT2;
796 	multi.enable = out2_en;
797 
798 	return hfi_session_set_property(inst, ptype, &multi);
799 }
800 EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
801 
802 int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
803 {
804 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
805 	struct hfi_buffer_alloc_mode mode;
806 	int ret;
807 
808 	if (!is_dynamic_bufmode(inst))
809 		return 0;
810 
811 	mode.type = HFI_BUFFER_OUTPUT;
812 	mode.mode = HFI_BUFFER_MODE_DYNAMIC;
813 
814 	ret = hfi_session_set_property(inst, ptype, &mode);
815 	if (ret)
816 		return ret;
817 
818 	mode.type = HFI_BUFFER_OUTPUT2;
819 
820 	return hfi_session_set_property(inst, ptype, &mode);
821 }
822 EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
823 
824 int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
825 {
826 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
827 	struct hfi_buffer_size_actual bufsz;
828 
829 	bufsz.type = buftype;
830 	bufsz.size = bufsize;
831 
832 	return hfi_session_set_property(inst, ptype, &bufsz);
833 }
834 EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
835 
836 unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
837 {
838 	/* the encoder has only one output */
839 	if (inst->session_type == VIDC_SESSION_TYPE_ENC)
840 		return inst->output_buf_size;
841 
842 	if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
843 		return inst->output_buf_size;
844 	else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
845 		return inst->output2_buf_size;
846 
847 	return 0;
848 }
849 EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
850 
851 static void delayed_process_buf_func(struct work_struct *work)
852 {
853 	struct venus_buffer *buf, *n;
854 	struct venus_inst *inst;
855 	int ret;
856 
857 	inst = container_of(work, struct venus_inst, delayed_process_work);
858 
859 	mutex_lock(&inst->lock);
860 
861 	if (!(inst->streamon_out & inst->streamon_cap))
862 		goto unlock;
863 
864 	list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
865 		if (buf->flags & HFI_BUFFERFLAG_READONLY)
866 			continue;
867 
868 		ret = session_process_buf(inst, &buf->vb);
869 		if (ret)
870 			return_buf_error(inst, &buf->vb);
871 
872 		list_del_init(&buf->ref_list);
873 	}
874 unlock:
875 	mutex_unlock(&inst->lock);
876 }
877 
878 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
879 {
880 	struct venus_buffer *buf;
881 
882 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
883 		if (buf->vb.vb2_buf.index == idx) {
884 			buf->flags &= ~HFI_BUFFERFLAG_READONLY;
885 			schedule_work(&inst->delayed_process_work);
886 			break;
887 		}
888 	}
889 }
890 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
891 
892 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
893 {
894 	struct venus_buffer *buf = to_venus_buffer(vbuf);
895 
896 	buf->flags |= HFI_BUFFERFLAG_READONLY;
897 }
898 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
899 
900 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
901 {
902 	struct venus_buffer *buf = to_venus_buffer(vbuf);
903 
904 	if (buf->flags & HFI_BUFFERFLAG_READONLY) {
905 		list_add_tail(&buf->ref_list, &inst->delayed_process);
906 		schedule_work(&inst->delayed_process_work);
907 		return 1;
908 	}
909 
910 	return 0;
911 }
912 
913 struct vb2_v4l2_buffer *
914 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
915 {
916 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
917 
918 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
919 		return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
920 	else
921 		return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
922 }
923 EXPORT_SYMBOL_GPL(venus_helper_find_buf);
924 
925 int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
926 {
927 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
928 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
929 	struct venus_buffer *buf = to_venus_buffer(vbuf);
930 	struct sg_table *sgt;
931 
932 	sgt = vb2_dma_sg_plane_desc(vb, 0);
933 	if (!sgt)
934 		return -EFAULT;
935 
936 	buf->size = vb2_plane_size(vb, 0);
937 	buf->dma_addr = sg_dma_address(sgt->sgl);
938 
939 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
940 		list_add_tail(&buf->reg_list, &inst->registeredbufs);
941 
942 	return 0;
943 }
944 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
945 
946 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
947 {
948 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
949 	unsigned int out_buf_size = venus_helper_get_opb_size(inst);
950 
951 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
952 	    vb2_plane_size(vb, 0) < out_buf_size)
953 		return -EINVAL;
954 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
955 	    vb2_plane_size(vb, 0) < inst->input_buf_size)
956 		return -EINVAL;
957 
958 	return 0;
959 }
960 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
961 
962 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
963 {
964 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
965 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
966 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
967 	int ret;
968 
969 	mutex_lock(&inst->lock);
970 
971 	v4l2_m2m_buf_queue(m2m_ctx, vbuf);
972 
973 	if (!(inst->streamon_out & inst->streamon_cap))
974 		goto unlock;
975 
976 	ret = is_buf_refed(inst, vbuf);
977 	if (ret)
978 		goto unlock;
979 
980 	ret = session_process_buf(inst, vbuf);
981 	if (ret)
982 		return_buf_error(inst, vbuf);
983 
984 unlock:
985 	mutex_unlock(&inst->lock);
986 }
987 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
988 
989 void venus_helper_buffers_done(struct venus_inst *inst,
990 			       enum vb2_buffer_state state)
991 {
992 	struct vb2_v4l2_buffer *buf;
993 
994 	while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
995 		v4l2_m2m_buf_done(buf, state);
996 	while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
997 		v4l2_m2m_buf_done(buf, state);
998 }
999 EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
1000 
1001 void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
1002 {
1003 	struct venus_inst *inst = vb2_get_drv_priv(q);
1004 	struct venus_core *core = inst->core;
1005 	int ret;
1006 
1007 	mutex_lock(&inst->lock);
1008 
1009 	if (inst->streamon_out & inst->streamon_cap) {
1010 		ret = hfi_session_stop(inst);
1011 		ret |= hfi_session_unload_res(inst);
1012 		ret |= session_unregister_bufs(inst);
1013 		ret |= intbufs_free(inst);
1014 		ret |= hfi_session_deinit(inst);
1015 
1016 		if (inst->session_error || core->sys_error)
1017 			ret = -EIO;
1018 
1019 		if (ret)
1020 			hfi_session_abort(inst);
1021 
1022 		venus_helper_free_dpb_bufs(inst);
1023 
1024 		load_scale_clocks(core);
1025 		INIT_LIST_HEAD(&inst->registeredbufs);
1026 	}
1027 
1028 	venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
1029 
1030 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1031 		inst->streamon_out = 0;
1032 	else
1033 		inst->streamon_cap = 0;
1034 
1035 	mutex_unlock(&inst->lock);
1036 }
1037 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
1038 
1039 int venus_helper_vb2_start_streaming(struct venus_inst *inst)
1040 {
1041 	struct venus_core *core = inst->core;
1042 	int ret;
1043 
1044 	ret = intbufs_alloc(inst);
1045 	if (ret)
1046 		return ret;
1047 
1048 	ret = session_register_bufs(inst);
1049 	if (ret)
1050 		goto err_bufs_free;
1051 
1052 	load_scale_clocks(core);
1053 
1054 	ret = hfi_session_load_res(inst);
1055 	if (ret)
1056 		goto err_unreg_bufs;
1057 
1058 	ret = hfi_session_start(inst);
1059 	if (ret)
1060 		goto err_unload_res;
1061 
1062 	ret = venus_helper_queue_dpb_bufs(inst);
1063 	if (ret)
1064 		goto err_session_stop;
1065 
1066 	return 0;
1067 
1068 err_session_stop:
1069 	hfi_session_stop(inst);
1070 err_unload_res:
1071 	hfi_session_unload_res(inst);
1072 err_unreg_bufs:
1073 	session_unregister_bufs(inst);
1074 err_bufs_free:
1075 	intbufs_free(inst);
1076 	return ret;
1077 }
1078 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
1079 
1080 void venus_helper_m2m_device_run(void *priv)
1081 {
1082 	struct venus_inst *inst = priv;
1083 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1084 	struct v4l2_m2m_buffer *buf, *n;
1085 	int ret;
1086 
1087 	mutex_lock(&inst->lock);
1088 
1089 	v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1090 		ret = session_process_buf(inst, &buf->vb);
1091 		if (ret)
1092 			return_buf_error(inst, &buf->vb);
1093 	}
1094 
1095 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1096 		ret = session_process_buf(inst, &buf->vb);
1097 		if (ret)
1098 			return_buf_error(inst, &buf->vb);
1099 	}
1100 
1101 	mutex_unlock(&inst->lock);
1102 }
1103 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
1104 
1105 void venus_helper_m2m_job_abort(void *priv)
1106 {
1107 	struct venus_inst *inst = priv;
1108 
1109 	v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
1110 }
1111 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
1112 
1113 void venus_helper_init_instance(struct venus_inst *inst)
1114 {
1115 	if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
1116 		INIT_LIST_HEAD(&inst->delayed_process);
1117 		INIT_WORK(&inst->delayed_process_work,
1118 			  delayed_process_buf_func);
1119 	}
1120 }
1121 EXPORT_SYMBOL_GPL(venus_helper_init_instance);
1122 
1123 static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
1124 {
1125 	unsigned int i;
1126 
1127 	for (i = 0; i < caps->num_fmts; i++) {
1128 		if (caps->fmts[i].buftype == buftype &&
1129 		    caps->fmts[i].fmt == fmt)
1130 			return true;
1131 	}
1132 
1133 	return false;
1134 }
1135 
1136 int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
1137 			      u32 *out_fmt, u32 *out2_fmt, bool ubwc)
1138 {
1139 	struct venus_core *core = inst->core;
1140 	struct venus_caps *caps;
1141 	u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
1142 	bool found, found_ubwc;
1143 
1144 	*out_fmt = *out2_fmt = 0;
1145 
1146 	if (!fmt)
1147 		return -EINVAL;
1148 
1149 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
1150 	if (!caps)
1151 		return -EINVAL;
1152 
1153 	if (ubwc) {
1154 		ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
1155 		found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
1156 						ubwc_fmt);
1157 		found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1158 
1159 		if (found_ubwc && found) {
1160 			*out_fmt = ubwc_fmt;
1161 			*out2_fmt = fmt;
1162 			return 0;
1163 		}
1164 	}
1165 
1166 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
1167 	if (found) {
1168 		*out_fmt = fmt;
1169 		*out2_fmt = 0;
1170 		return 0;
1171 	}
1172 
1173 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1174 	if (found) {
1175 		*out_fmt = 0;
1176 		*out2_fmt = fmt;
1177 		return 0;
1178 	}
1179 
1180 	return -EINVAL;
1181 }
1182 EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
1183 
1184 int venus_helper_power_enable(struct venus_core *core, u32 session_type,
1185 			      bool enable)
1186 {
1187 	void __iomem *ctrl, *stat;
1188 	u32 val;
1189 	int ret;
1190 
1191 	if (!IS_V3(core) && !IS_V4(core))
1192 		return 0;
1193 
1194 	if (IS_V3(core)) {
1195 		if (session_type == VIDC_SESSION_TYPE_DEC)
1196 			ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
1197 		else
1198 			ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
1199 		if (enable)
1200 			writel(0, ctrl);
1201 		else
1202 			writel(1, ctrl);
1203 
1204 		return 0;
1205 	}
1206 
1207 	if (session_type == VIDC_SESSION_TYPE_DEC) {
1208 		ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
1209 		stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
1210 	} else {
1211 		ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
1212 		stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
1213 	}
1214 
1215 	if (enable) {
1216 		writel(0, ctrl);
1217 
1218 		ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
1219 		if (ret)
1220 			return ret;
1221 	} else {
1222 		writel(1, ctrl);
1223 
1224 		ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
1225 		if (ret)
1226 			return ret;
1227 	}
1228 
1229 	return 0;
1230 }
1231 EXPORT_SYMBOL_GPL(venus_helper_power_enable);
1232