1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/videodev2.h>
14 #include <media/v4l2-device.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-mem2mem.h>
17 #include <media/v4l2-ioctl.h>
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-dma-contig.h>
20 #include <media/videobuf2-vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_core.h"
23 #include "vpu_v4l2.h"
24 #include "vpu_msgs.h"
25 #include "vpu_helpers.h"
26 
vpu_inst_lock(struct vpu_inst * inst)27 void vpu_inst_lock(struct vpu_inst *inst)
28 {
29 	mutex_lock(&inst->lock);
30 }
31 
vpu_inst_unlock(struct vpu_inst * inst)32 void vpu_inst_unlock(struct vpu_inst *inst)
33 {
34 	mutex_unlock(&inst->lock);
35 }
36 
vpu_get_vb_phy_addr(struct vb2_buffer * vb,u32 plane_no)37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
38 {
39 	if (plane_no >= vb->num_planes)
40 		return 0;
41 	return vb2_dma_contig_plane_dma_addr(vb, plane_no) +
42 			vb->planes[plane_no].data_offset;
43 }
44 
vpu_get_vb_length(struct vb2_buffer * vb,u32 plane_no)45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
46 {
47 	if (plane_no >= vb->num_planes)
48 		return 0;
49 	return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset;
50 }
51 
vpu_set_buffer_state(struct vb2_v4l2_buffer * vbuf,unsigned int state)52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state)
53 {
54 	struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
55 
56 	vpu_buf->state = state;
57 }
58 
vpu_get_buffer_state(struct vb2_v4l2_buffer * vbuf)59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
60 {
61 	struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
62 
63 	return vpu_buf->state;
64 }
65 
vpu_v4l2_set_error(struct vpu_inst * inst)66 void vpu_v4l2_set_error(struct vpu_inst *inst)
67 {
68 	vpu_inst_lock(inst);
69 	dev_err(inst->dev, "some error occurs in codec\n");
70 	if (inst->fh.m2m_ctx) {
71 		vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx));
72 		vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx));
73 	}
74 	vpu_inst_unlock(inst);
75 }
76 
vpu_notify_eos(struct vpu_inst * inst)77 int vpu_notify_eos(struct vpu_inst *inst)
78 {
79 	static const struct v4l2_event ev = {
80 		.id = 0,
81 		.type = V4L2_EVENT_EOS
82 	};
83 
84 	vpu_trace(inst->dev, "[%d]\n", inst->id);
85 	v4l2_event_queue_fh(&inst->fh, &ev);
86 
87 	return 0;
88 }
89 
vpu_notify_source_change(struct vpu_inst * inst)90 int vpu_notify_source_change(struct vpu_inst *inst)
91 {
92 	static const struct v4l2_event ev = {
93 		.id = 0,
94 		.type = V4L2_EVENT_SOURCE_CHANGE,
95 		.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
96 	};
97 
98 	vpu_trace(inst->dev, "[%d]\n", inst->id);
99 	v4l2_event_queue_fh(&inst->fh, &ev);
100 	return 0;
101 }
102 
vpu_set_last_buffer_dequeued(struct vpu_inst * inst,bool eos)103 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos)
104 {
105 	struct vb2_queue *q;
106 
107 	if (!inst || !inst->fh.m2m_ctx)
108 		return -EINVAL;
109 
110 	q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
111 	if (!list_empty(&q->done_list))
112 		return -EINVAL;
113 
114 	if (q->last_buffer_dequeued)
115 		return 0;
116 	vpu_trace(inst->dev, "last buffer dequeued\n");
117 	q->last_buffer_dequeued = true;
118 	wake_up(&q->done_wq);
119 	if (eos)
120 		vpu_notify_eos(inst);
121 	return 0;
122 }
123 
vpu_is_source_empty(struct vpu_inst * inst)124 bool vpu_is_source_empty(struct vpu_inst *inst)
125 {
126 	struct v4l2_m2m_buffer *buf = NULL;
127 
128 	if (!inst->fh.m2m_ctx)
129 		return true;
130 	v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
131 		if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE)
132 			return false;
133 	}
134 	return true;
135 }
136 
vpu_init_format(struct vpu_inst * inst,struct vpu_format * fmt)137 static int vpu_init_format(struct vpu_inst *inst, struct vpu_format *fmt)
138 {
139 	const struct vpu_format *info;
140 
141 	info = vpu_helper_find_format(inst, fmt->type, fmt->pixfmt);
142 	if (!info) {
143 		info = vpu_helper_enum_format(inst, fmt->type, 0);
144 		if (!info)
145 			return -EINVAL;
146 	}
147 	memcpy(fmt, info, sizeof(*fmt));
148 
149 	return 0;
150 }
151 
vpu_calc_fmt_bytesperline(struct v4l2_format * f,struct vpu_format * fmt)152 static int vpu_calc_fmt_bytesperline(struct v4l2_format *f, struct vpu_format *fmt)
153 {
154 	struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
155 	int i;
156 
157 	if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) {
158 		for (i = 0; i < fmt->comp_planes; i++)
159 			fmt->bytesperline[i] = 0;
160 		return 0;
161 	}
162 	if (pixmp->num_planes == fmt->comp_planes) {
163 		for (i = 0; i < fmt->comp_planes; i++)
164 			fmt->bytesperline[i] = pixmp->plane_fmt[i].bytesperline;
165 		return 0;
166 	}
167 	if (pixmp->num_planes > 1)
168 		return -EINVAL;
169 
170 	/*amphion vpu only support nv12 and nv12 tiled,
171 	 * so the bytesperline of luma and chroma should be same
172 	 */
173 	for (i = 0; i < fmt->comp_planes; i++)
174 		fmt->bytesperline[i] = pixmp->plane_fmt[0].bytesperline;
175 
176 	return 0;
177 }
178 
vpu_calc_fmt_sizeimage(struct vpu_inst * inst,struct vpu_format * fmt)179 static int vpu_calc_fmt_sizeimage(struct vpu_inst *inst, struct vpu_format *fmt)
180 {
181 	u32 stride = 1;
182 	int i;
183 
184 	if (!(fmt->flags & V4L2_FMT_FLAG_COMPRESSED)) {
185 		const struct vpu_core_resources *res = vpu_get_resource(inst);
186 
187 		if (res)
188 			stride = res->stride;
189 	}
190 
191 	for (i = 0; i < fmt->comp_planes; i++) {
192 		fmt->sizeimage[i] = vpu_helper_get_plane_size(fmt->pixfmt,
193 							      fmt->width,
194 							      fmt->height,
195 							      i,
196 							      stride,
197 							      fmt->field != V4L2_FIELD_NONE ? 1 : 0,
198 							      &fmt->bytesperline[i]);
199 		fmt->sizeimage[i] = max_t(u32, fmt->sizeimage[i], PAGE_SIZE);
200 		if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) {
201 			fmt->sizeimage[i] = clamp_val(fmt->sizeimage[i], SZ_128K, SZ_8M);
202 			fmt->bytesperline[i] = 0;
203 		}
204 	}
205 
206 	return 0;
207 }
208 
vpu_get_fmt_plane_size(struct vpu_format * fmt,u32 plane_no)209 u32 vpu_get_fmt_plane_size(struct vpu_format *fmt, u32 plane_no)
210 {
211 	u32 size;
212 	int i;
213 
214 	if (plane_no >= fmt->mem_planes)
215 		return 0;
216 
217 	if (fmt->comp_planes == fmt->mem_planes)
218 		return fmt->sizeimage[plane_no];
219 	if (plane_no < fmt->mem_planes - 1)
220 		return fmt->sizeimage[plane_no];
221 
222 	size = fmt->sizeimage[plane_no];
223 	for (i = fmt->mem_planes; i < fmt->comp_planes; i++)
224 		size += fmt->sizeimage[i];
225 
226 	return size;
227 }
228 
vpu_try_fmt_common(struct vpu_inst * inst,struct v4l2_format * f,struct vpu_format * fmt)229 int vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f, struct vpu_format *fmt)
230 {
231 	struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
232 	int i;
233 	int ret;
234 
235 	fmt->pixfmt = pixmp->pixelformat;
236 	fmt->type = f->type;
237 	ret = vpu_init_format(inst, fmt);
238 	if (ret < 0)
239 		return ret;
240 
241 	fmt->width = pixmp->width;
242 	fmt->height = pixmp->height;
243 	if (fmt->width)
244 		fmt->width = vpu_helper_valid_frame_width(inst, fmt->width);
245 	if (fmt->height)
246 		fmt->height = vpu_helper_valid_frame_height(inst, fmt->height);
247 	fmt->field = pixmp->field == V4L2_FIELD_ANY ? V4L2_FIELD_NONE : pixmp->field;
248 	vpu_calc_fmt_bytesperline(f, fmt);
249 	vpu_calc_fmt_sizeimage(inst, fmt);
250 	if ((fmt->flags & V4L2_FMT_FLAG_COMPRESSED) && pixmp->plane_fmt[0].sizeimage)
251 		fmt->sizeimage[0] = clamp_val(pixmp->plane_fmt[0].sizeimage, SZ_128K, SZ_8M);
252 
253 	pixmp->pixelformat = fmt->pixfmt;
254 	pixmp->width = fmt->width;
255 	pixmp->height = fmt->height;
256 	pixmp->flags = fmt->flags;
257 	pixmp->num_planes = fmt->mem_planes;
258 	pixmp->field = fmt->field;
259 	memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
260 	for (i = 0; i < pixmp->num_planes; i++) {
261 		pixmp->plane_fmt[i].bytesperline = fmt->bytesperline[i];
262 		pixmp->plane_fmt[i].sizeimage = vpu_get_fmt_plane_size(fmt, i);
263 		memset(pixmp->plane_fmt[i].reserved, 0, sizeof(pixmp->plane_fmt[i].reserved));
264 	}
265 
266 	return 0;
267 }
268 
vpu_check_ready(struct vpu_inst * inst,u32 type)269 static bool vpu_check_ready(struct vpu_inst *inst, u32 type)
270 {
271 	if (!inst)
272 		return false;
273 	if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0)
274 		return false;
275 	if (!inst->ops->check_ready)
276 		return true;
277 	return call_vop(inst, check_ready, type);
278 }
279 
vpu_process_output_buffer(struct vpu_inst * inst)280 int vpu_process_output_buffer(struct vpu_inst *inst)
281 {
282 	struct v4l2_m2m_buffer *buf = NULL;
283 	struct vb2_v4l2_buffer *vbuf = NULL;
284 
285 	if (!inst || !inst->fh.m2m_ctx)
286 		return -EINVAL;
287 
288 	if (!vpu_check_ready(inst, inst->out_format.type))
289 		return -EINVAL;
290 
291 	v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
292 		vbuf = &buf->vb;
293 		if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
294 			break;
295 		vbuf = NULL;
296 	}
297 
298 	if (!vbuf)
299 		return -EINVAL;
300 
301 	dev_dbg(inst->dev, "[%d]frame id = %d / %d\n",
302 		inst->id, vbuf->sequence, inst->sequence);
303 	return call_vop(inst, process_output, &vbuf->vb2_buf);
304 }
305 
vpu_process_capture_buffer(struct vpu_inst * inst)306 int vpu_process_capture_buffer(struct vpu_inst *inst)
307 {
308 	struct v4l2_m2m_buffer *buf = NULL;
309 	struct vb2_v4l2_buffer *vbuf = NULL;
310 
311 	if (!inst || !inst->fh.m2m_ctx)
312 		return -EINVAL;
313 
314 	if (!vpu_check_ready(inst, inst->cap_format.type))
315 		return -EINVAL;
316 
317 	v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
318 		vbuf = &buf->vb;
319 		if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
320 			break;
321 		vbuf = NULL;
322 	}
323 	if (!vbuf)
324 		return -EINVAL;
325 
326 	return call_vop(inst, process_capture, &vbuf->vb2_buf);
327 }
328 
vpu_next_src_buf(struct vpu_inst * inst)329 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
330 {
331 	struct vb2_v4l2_buffer *src_buf = NULL;
332 
333 	if (!inst->fh.m2m_ctx)
334 		return NULL;
335 
336 	src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
337 	if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
338 		return NULL;
339 
340 	while (vpu_vb_is_codecconfig(src_buf)) {
341 		v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
342 		vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
343 		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
344 
345 		src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
346 		if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
347 			return NULL;
348 	}
349 
350 	return src_buf;
351 }
352 
vpu_skip_frame(struct vpu_inst * inst,int count)353 void vpu_skip_frame(struct vpu_inst *inst, int count)
354 {
355 	struct vb2_v4l2_buffer *src_buf;
356 	enum vb2_buffer_state state;
357 	int i = 0;
358 
359 	if (count <= 0 || !inst->fh.m2m_ctx)
360 		return;
361 
362 	while (i < count) {
363 		src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
364 		if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
365 			return;
366 		if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED)
367 			state = VB2_BUF_STATE_DONE;
368 		else
369 			state = VB2_BUF_STATE_ERROR;
370 		i++;
371 		vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
372 		v4l2_m2m_buf_done(src_buf, state);
373 	}
374 }
375 
vpu_find_buf_by_sequence(struct vpu_inst * inst,u32 type,u32 sequence)376 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence)
377 {
378 	struct v4l2_m2m_buffer *buf = NULL;
379 	struct vb2_v4l2_buffer *vbuf = NULL;
380 
381 	if (!inst || !inst->fh.m2m_ctx)
382 		return NULL;
383 
384 	if (V4L2_TYPE_IS_OUTPUT(type)) {
385 		v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
386 			vbuf = &buf->vb;
387 			if (vbuf->sequence == sequence)
388 				break;
389 			vbuf = NULL;
390 		}
391 	} else {
392 		v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
393 			vbuf = &buf->vb;
394 			if (vbuf->sequence == sequence)
395 				break;
396 			vbuf = NULL;
397 		}
398 	}
399 
400 	return vbuf;
401 }
402 
vpu_find_buf_by_idx(struct vpu_inst * inst,u32 type,u32 idx)403 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx)
404 {
405 	struct v4l2_m2m_buffer *buf = NULL;
406 	struct vb2_v4l2_buffer *vbuf = NULL;
407 
408 	if (!inst || !inst->fh.m2m_ctx)
409 		return NULL;
410 
411 	if (V4L2_TYPE_IS_OUTPUT(type)) {
412 		v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
413 			vbuf = &buf->vb;
414 			if (vbuf->vb2_buf.index == idx)
415 				break;
416 			vbuf = NULL;
417 		}
418 	} else {
419 		v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
420 			vbuf = &buf->vb;
421 			if (vbuf->vb2_buf.index == idx)
422 				break;
423 			vbuf = NULL;
424 		}
425 	}
426 
427 	return vbuf;
428 }
429 
vpu_get_num_buffers(struct vpu_inst * inst,u32 type)430 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
431 {
432 	struct vb2_queue *q;
433 
434 	if (!inst || !inst->fh.m2m_ctx)
435 		return -EINVAL;
436 
437 	if (V4L2_TYPE_IS_OUTPUT(type))
438 		q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
439 	else
440 		q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
441 
442 	return q->num_buffers;
443 }
444 
vpu_m2m_device_run(void * priv)445 static void vpu_m2m_device_run(void *priv)
446 {
447 }
448 
vpu_m2m_job_abort(void * priv)449 static void vpu_m2m_job_abort(void *priv)
450 {
451 	struct vpu_inst *inst = priv;
452 	struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
453 
454 	v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
455 }
456 
457 static const struct v4l2_m2m_ops vpu_m2m_ops = {
458 	.device_run = vpu_m2m_device_run,
459 	.job_abort = vpu_m2m_job_abort
460 };
461 
vpu_vb2_queue_setup(struct vb2_queue * vq,unsigned int * buf_count,unsigned int * plane_count,unsigned int psize[],struct device * allocators[])462 static int vpu_vb2_queue_setup(struct vb2_queue *vq,
463 			       unsigned int *buf_count,
464 			       unsigned int *plane_count,
465 			       unsigned int psize[],
466 			       struct device *allocators[])
467 {
468 	struct vpu_inst *inst = vb2_get_drv_priv(vq);
469 	struct vpu_format *cur_fmt;
470 	int i;
471 
472 	cur_fmt = vpu_get_format(inst, vq->type);
473 
474 	if (*plane_count) {
475 		if (*plane_count != cur_fmt->mem_planes)
476 			return -EINVAL;
477 		for (i = 0; i < cur_fmt->mem_planes; i++) {
478 			if (psize[i] < vpu_get_fmt_plane_size(cur_fmt, i))
479 				return -EINVAL;
480 		}
481 		return 0;
482 	}
483 
484 	if (V4L2_TYPE_IS_OUTPUT(vq->type))
485 		*buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out);
486 	else
487 		*buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap);
488 	*plane_count = cur_fmt->mem_planes;
489 	for (i = 0; i < cur_fmt->mem_planes; i++)
490 		psize[i] = vpu_get_fmt_plane_size(cur_fmt, i);
491 
492 	if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) {
493 		vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n");
494 		call_void_vop(inst, release);
495 	}
496 
497 	return 0;
498 }
499 
vpu_vb2_buf_init(struct vb2_buffer * vb)500 static int vpu_vb2_buf_init(struct vb2_buffer *vb)
501 {
502 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
503 
504 	vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
505 	return 0;
506 }
507 
vpu_vb2_buf_out_validate(struct vb2_buffer * vb)508 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb)
509 {
510 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
511 
512 	vbuf->field = V4L2_FIELD_NONE;
513 
514 	return 0;
515 }
516 
vpu_vb2_buf_prepare(struct vb2_buffer * vb)517 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb)
518 {
519 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
520 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
521 	struct vpu_format *cur_fmt;
522 	u32 i;
523 
524 	cur_fmt = vpu_get_format(inst, vb->type);
525 	for (i = 0; i < cur_fmt->mem_planes; i++) {
526 		if (vpu_get_vb_length(vb, i) < vpu_get_fmt_plane_size(cur_fmt, i)) {
527 			dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n",
528 				inst->id, vpu_type_name(vb->type), vb->index);
529 			vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
530 		}
531 	}
532 
533 	return 0;
534 }
535 
vpu_vb2_buf_finish(struct vb2_buffer * vb)536 static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
537 {
538 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
539 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
540 	struct vb2_queue *q = vb->vb2_queue;
541 
542 	if (vbuf->flags & V4L2_BUF_FLAG_LAST)
543 		vpu_notify_eos(inst);
544 
545 	if (list_empty(&q->done_list))
546 		call_void_vop(inst, on_queue_empty, q->type);
547 }
548 
vpu_vb2_buffers_return(struct vpu_inst * inst,unsigned int type,enum vb2_buffer_state state)549 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
550 {
551 	struct vb2_v4l2_buffer *buf;
552 
553 	if (V4L2_TYPE_IS_OUTPUT(type)) {
554 		while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) {
555 			vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
556 			v4l2_m2m_buf_done(buf, state);
557 		}
558 	} else {
559 		while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) {
560 			vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
561 			v4l2_m2m_buf_done(buf, state);
562 		}
563 	}
564 }
565 
vpu_vb2_start_streaming(struct vb2_queue * q,unsigned int count)566 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
567 {
568 	struct vpu_inst *inst = vb2_get_drv_priv(q);
569 	struct vpu_format *fmt = vpu_get_format(inst, q->type);
570 	int ret;
571 
572 	vpu_inst_unlock(inst);
573 	ret = vpu_inst_register(inst);
574 	vpu_inst_lock(inst);
575 	if (ret) {
576 		vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
577 		return ret;
578 	}
579 
580 	vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n",
581 		  inst->id, vpu_type_name(q->type),
582 		  fmt->pixfmt,
583 		  fmt->pixfmt >> 8,
584 		  fmt->pixfmt >> 16,
585 		  fmt->pixfmt >> 24,
586 		  fmt->width, fmt->height,
587 		  fmt->sizeimage[0], fmt->bytesperline[0],
588 		  fmt->sizeimage[1], fmt->bytesperline[1],
589 		  fmt->sizeimage[2], fmt->bytesperline[2],
590 		  q->num_buffers);
591 	vb2_clear_last_buffer_dequeued(q);
592 	ret = call_vop(inst, start, q->type);
593 	if (ret)
594 		vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
595 
596 	return ret;
597 }
598 
vpu_vb2_stop_streaming(struct vb2_queue * q)599 static void vpu_vb2_stop_streaming(struct vb2_queue *q)
600 {
601 	struct vpu_inst *inst = vb2_get_drv_priv(q);
602 
603 	vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
604 
605 	call_void_vop(inst, stop, q->type);
606 	vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
607 	if (V4L2_TYPE_IS_OUTPUT(q->type))
608 		inst->sequence = 0;
609 }
610 
vpu_vb2_buf_queue(struct vb2_buffer * vb)611 static void vpu_vb2_buf_queue(struct vb2_buffer *vb)
612 {
613 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
614 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
615 
616 	if (V4L2_TYPE_IS_OUTPUT(vb->type))
617 		vbuf->sequence = inst->sequence++;
618 
619 	v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
620 	vpu_process_output_buffer(inst);
621 	vpu_process_capture_buffer(inst);
622 }
623 
624 static const struct vb2_ops vpu_vb2_ops = {
625 	.queue_setup        = vpu_vb2_queue_setup,
626 	.buf_init           = vpu_vb2_buf_init,
627 	.buf_out_validate   = vpu_vb2_buf_out_validate,
628 	.buf_prepare        = vpu_vb2_buf_prepare,
629 	.buf_finish         = vpu_vb2_buf_finish,
630 	.start_streaming    = vpu_vb2_start_streaming,
631 	.stop_streaming     = vpu_vb2_stop_streaming,
632 	.buf_queue          = vpu_vb2_buf_queue,
633 	.wait_prepare       = vb2_ops_wait_prepare,
634 	.wait_finish        = vb2_ops_wait_finish,
635 };
636 
vpu_m2m_queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)637 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
638 {
639 	struct vpu_inst *inst = priv;
640 	int ret;
641 
642 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
643 	inst->out_format.type = src_vq->type;
644 	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
645 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
646 	src_vq->ops = &vpu_vb2_ops;
647 	src_vq->mem_ops = &vb2_dma_contig_memops;
648 	if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer)
649 		src_vq->mem_ops = &vb2_vmalloc_memops;
650 	src_vq->drv_priv = inst;
651 	src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
652 	src_vq->min_buffers_needed = 1;
653 	src_vq->dev = inst->vpu->dev;
654 	src_vq->lock = &inst->lock;
655 	ret = vb2_queue_init(src_vq);
656 	if (ret)
657 		return ret;
658 
659 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
660 	inst->cap_format.type = dst_vq->type;
661 	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
662 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
663 	dst_vq->ops = &vpu_vb2_ops;
664 	dst_vq->mem_ops = &vb2_dma_contig_memops;
665 	if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer)
666 		dst_vq->mem_ops = &vb2_vmalloc_memops;
667 	dst_vq->drv_priv = inst;
668 	dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
669 	dst_vq->min_buffers_needed = 1;
670 	dst_vq->dev = inst->vpu->dev;
671 	dst_vq->lock = &inst->lock;
672 	ret = vb2_queue_init(dst_vq);
673 	if (ret) {
674 		vb2_queue_release(src_vq);
675 		return ret;
676 	}
677 
678 	return 0;
679 }
680 
vpu_v4l2_release(struct vpu_inst * inst)681 static int vpu_v4l2_release(struct vpu_inst *inst)
682 {
683 	vpu_trace(inst->vpu->dev, "%p\n", inst);
684 
685 	vpu_release_core(inst->core);
686 	put_device(inst->dev);
687 
688 	if (inst->workqueue) {
689 		cancel_work_sync(&inst->msg_work);
690 		destroy_workqueue(inst->workqueue);
691 		inst->workqueue = NULL;
692 	}
693 
694 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
695 	mutex_destroy(&inst->lock);
696 	v4l2_fh_del(&inst->fh);
697 	v4l2_fh_exit(&inst->fh);
698 
699 	call_void_vop(inst, cleanup);
700 
701 	return 0;
702 }
703 
vpu_v4l2_open(struct file * file,struct vpu_inst * inst)704 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
705 {
706 	struct vpu_dev *vpu = video_drvdata(file);
707 	struct vpu_func *func;
708 	int ret = 0;
709 
710 	if (!inst || !inst->ops)
711 		return -EINVAL;
712 
713 	if (inst->type == VPU_CORE_TYPE_ENC)
714 		func = &vpu->encoder;
715 	else
716 		func = &vpu->decoder;
717 
718 	atomic_set(&inst->ref_count, 0);
719 	atomic_long_set(&inst->last_response_cmd, 0);
720 	vpu_inst_get(inst);
721 	inst->vpu = vpu;
722 	inst->core = vpu_request_core(vpu, inst->type);
723 	if (inst->core)
724 		inst->dev = get_device(inst->core->dev);
725 	mutex_init(&inst->lock);
726 	INIT_LIST_HEAD(&inst->cmd_q);
727 	inst->id = VPU_INST_NULL_ID;
728 	inst->release = vpu_v4l2_release;
729 	inst->pid = current->pid;
730 	inst->tgid = current->tgid;
731 	inst->min_buffer_cap = 2;
732 	inst->min_buffer_out = 2;
733 	v4l2_fh_init(&inst->fh, func->vfd);
734 	v4l2_fh_add(&inst->fh);
735 
736 	ret = call_vop(inst, ctrl_init);
737 	if (ret)
738 		goto error;
739 
740 	inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init);
741 	if (IS_ERR(inst->fh.m2m_ctx)) {
742 		dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n");
743 		ret = PTR_ERR(inst->fh.m2m_ctx);
744 		goto error;
745 	}
746 
747 	inst->fh.ctrl_handler = &inst->ctrl_handler;
748 	file->private_data = &inst->fh;
749 	inst->state = VPU_CODEC_STATE_DEINIT;
750 	inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
751 	if (inst->workqueue) {
752 		INIT_WORK(&inst->msg_work, vpu_inst_run_work);
753 		ret = kfifo_init(&inst->msg_fifo,
754 				 inst->msg_buffer,
755 				 rounddown_pow_of_two(sizeof(inst->msg_buffer)));
756 		if (ret) {
757 			destroy_workqueue(inst->workqueue);
758 			inst->workqueue = NULL;
759 		}
760 	}
761 	vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n",
762 		  inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst);
763 
764 	return 0;
765 error:
766 	vpu_inst_put(inst);
767 	return ret;
768 }
769 
vpu_v4l2_close(struct file * file)770 int vpu_v4l2_close(struct file *file)
771 {
772 	struct vpu_dev *vpu = video_drvdata(file);
773 	struct vpu_inst *inst = to_inst(file);
774 
775 	vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
776 
777 	vpu_inst_lock(inst);
778 	if (inst->fh.m2m_ctx) {
779 		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
780 		inst->fh.m2m_ctx = NULL;
781 	}
782 	call_void_vop(inst, release);
783 	vpu_inst_unlock(inst);
784 
785 	vpu_inst_unregister(inst);
786 	vpu_inst_put(inst);
787 
788 	return 0;
789 }
790 
vpu_add_func(struct vpu_dev * vpu,struct vpu_func * func)791 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
792 {
793 	struct video_device *vfd;
794 	int ret;
795 
796 	if (!vpu || !func)
797 		return -EINVAL;
798 
799 	if (func->vfd)
800 		return 0;
801 
802 	func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
803 	if (IS_ERR(func->m2m_dev)) {
804 		dev_err(vpu->dev, "v4l2_m2m_init fail\n");
805 		func->vfd = NULL;
806 		return PTR_ERR(func->m2m_dev);
807 	}
808 
809 	vfd = video_device_alloc();
810 	if (!vfd) {
811 		v4l2_m2m_release(func->m2m_dev);
812 		dev_err(vpu->dev, "alloc vpu decoder video device fail\n");
813 		return -ENOMEM;
814 	}
815 	vfd->release = video_device_release;
816 	vfd->vfl_dir = VFL_DIR_M2M;
817 	vfd->v4l2_dev = &vpu->v4l2_dev;
818 	vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
819 	if (func->type == VPU_CORE_TYPE_ENC) {
820 		strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name));
821 		vfd->fops = venc_get_fops();
822 		vfd->ioctl_ops = venc_get_ioctl_ops();
823 	} else {
824 		strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name));
825 		vfd->fops = vdec_get_fops();
826 		vfd->ioctl_ops = vdec_get_ioctl_ops();
827 	}
828 
829 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
830 	if (ret) {
831 		video_device_release(vfd);
832 		v4l2_m2m_release(func->m2m_dev);
833 		return ret;
834 	}
835 	video_set_drvdata(vfd, vpu);
836 	func->vfd = vfd;
837 
838 	ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
839 	if (ret) {
840 		v4l2_m2m_release(func->m2m_dev);
841 		func->m2m_dev = NULL;
842 		video_unregister_device(func->vfd);
843 		func->vfd = NULL;
844 		return ret;
845 	}
846 
847 	return 0;
848 }
849 
vpu_remove_func(struct vpu_func * func)850 void vpu_remove_func(struct vpu_func *func)
851 {
852 	if (!func)
853 		return;
854 
855 	if (func->m2m_dev) {
856 		v4l2_m2m_unregister_media_controller(func->m2m_dev);
857 		v4l2_m2m_release(func->m2m_dev);
858 		func->m2m_dev = NULL;
859 	}
860 	if (func->vfd) {
861 		video_unregister_device(func->vfd);
862 		func->vfd = NULL;
863 	}
864 }
865