1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include "vpu.h"
13 #include "vpu_core.h"
14 #include "vpu_rpc.h"
15 #include "vpu_mbox.h"
16 #include "vpu_defs.h"
17 #include "vpu_cmds.h"
18 #include "vpu_msgs.h"
19 #include "vpu_v4l2.h"
20 
21 #define VPU_PKT_HEADER_LENGTH		3
22 
23 struct vpu_msg_handler {
24 	u32 id;
25 	void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
26 };
27 
28 static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
29 {
30 	vpu_trace(inst->dev, "[%d]\n", inst->id);
31 }
32 
33 static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
34 {
35 	struct vpu_pkt_mem_req_data req_data = { 0 };
36 
37 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
38 	vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
39 		  inst->id,
40 		  req_data.enc_frame_size,
41 		  req_data.enc_frame_num,
42 		  req_data.ref_frame_size,
43 		  req_data.ref_frame_num,
44 		  req_data.act_buf_size,
45 		  req_data.act_buf_num);
46 	vpu_inst_lock(inst);
47 	call_void_vop(inst, mem_request,
48 		      req_data.enc_frame_size,
49 		      req_data.enc_frame_num,
50 		      req_data.ref_frame_size,
51 		      req_data.ref_frame_num,
52 		      req_data.act_buf_size,
53 		      req_data.act_buf_num);
54 	vpu_inst_unlock(inst);
55 }
56 
57 static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
58 {
59 	vpu_trace(inst->dev, "[%d]\n", inst->id);
60 
61 	call_void_vop(inst, stop_done);
62 }
63 
64 static void vpu_session_handle_seq_hdr(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
65 {
66 	struct vpu_dec_codec_info info;
67 	const struct vpu_core_resources *res;
68 
69 	memset(&info, 0, sizeof(info));
70 	res = vpu_get_resource(inst);
71 	info.stride = res ? res->stride : 1;
72 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
73 	call_void_vop(inst, event_notify, VPU_MSG_ID_SEQ_HDR_FOUND, &info);
74 }
75 
76 static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
77 {
78 	call_void_vop(inst, event_notify, VPU_MSG_ID_RES_CHANGE, NULL);
79 }
80 
81 static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
82 {
83 	struct vpu_enc_pic_info info = { 0 };
84 
85 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
86 	dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
87 		inst->id, info.frame_id, info.wptr, info.frame_size);
88 	call_void_vop(inst, get_one_frame, &info);
89 }
90 
91 static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
92 {
93 	struct vpu_fs_info fs = { 0 };
94 
95 	vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
96 	call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
97 }
98 
99 static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
100 {
101 	if (inst->core->type == VPU_CORE_TYPE_ENC) {
102 		struct vpu_frame_info info;
103 
104 		memset(&info, 0, sizeof(info));
105 		vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info.sequence);
106 		dev_dbg(inst->dev, "[%d] %d\n", inst->id, info.sequence);
107 		info.type = inst->out_format.type;
108 		call_void_vop(inst, buf_done, &info);
109 	} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
110 		struct vpu_fs_info fs = { 0 };
111 
112 		vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
113 		call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
114 	}
115 }
116 
117 static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
118 {
119 	dev_dbg(inst->dev, "[%d]\n", inst->id);
120 	call_void_vop(inst, input_done);
121 }
122 
123 static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
124 {
125 	struct vpu_dec_pic_info info = { 0 };
126 
127 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
128 	call_void_vop(inst, get_one_frame, &info);
129 }
130 
131 static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
132 {
133 	struct vpu_dec_pic_info info = { 0 };
134 	struct vpu_frame_info frame;
135 
136 	memset(&frame, 0, sizeof(frame));
137 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
138 	if (inst->core->type == VPU_CORE_TYPE_DEC)
139 		frame.type = inst->cap_format.type;
140 	frame.id = info.id;
141 	frame.luma = info.luma;
142 	frame.skipped = info.skipped;
143 	frame.timestamp = info.timestamp;
144 
145 	call_void_vop(inst, buf_done, &frame);
146 }
147 
148 static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
149 {
150 	call_void_vop(inst, event_notify, VPU_MSG_ID_PIC_EOS, NULL);
151 }
152 
153 static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
154 {
155 	char *str = (char *)pkt->data;
156 
157 	if (strlen(str))
158 		dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
159 	else
160 		dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
161 	call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
162 	vpu_v4l2_set_error(inst);
163 }
164 
165 static void vpu_session_handle_firmware_xcpt(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
166 {
167 	char *str = (char *)pkt->data;
168 
169 	dev_err(inst->dev, "%s firmware xcpt: %s\n",
170 		vpu_core_type_desc(inst->core->type), str);
171 	call_void_vop(inst, event_notify, VPU_MSG_ID_FIRMWARE_XCPT, NULL);
172 	set_bit(inst->id, &inst->core->hang_mask);
173 	vpu_v4l2_set_error(inst);
174 }
175 
176 static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
177 {
178 	vpu_inst_lock(inst);
179 	vpu_skip_frame(inst, 1);
180 	vpu_inst_unlock(inst);
181 }
182 
183 static struct vpu_msg_handler handlers[] = {
184 	{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
185 	{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
186 	{VPU_MSG_ID_MEM_REQUEST, vpu_session_handle_mem_request},
187 	{VPU_MSG_ID_SEQ_HDR_FOUND, vpu_session_handle_seq_hdr},
188 	{VPU_MSG_ID_RES_CHANGE, vpu_session_handle_resolution_change},
189 	{VPU_MSG_ID_FRAME_INPUT_DONE, vpu_session_handle_input_done},
190 	{VPU_MSG_ID_FRAME_REQ, vpu_session_handle_frame_request},
191 	{VPU_MSG_ID_FRAME_RELEASE, vpu_session_handle_frame_release},
192 	{VPU_MSG_ID_ENC_DONE, vpu_session_handle_enc_frame_done},
193 	{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
194 	{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
195 	{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
196 	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
197 	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
198 	{VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
199 };
200 
201 static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
202 {
203 	int ret;
204 	u32 msg_id;
205 	struct vpu_msg_handler *handler = NULL;
206 	unsigned int i;
207 
208 	ret = vpu_iface_convert_msg_id(inst->core, msg->hdr.id);
209 	if (ret < 0)
210 		return -EINVAL;
211 
212 	msg_id = ret;
213 	dev_dbg(inst->dev, "[%d] receive event(%s)\n", inst->id, vpu_id_name(msg_id));
214 
215 	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
216 		if (handlers[i].id == msg_id) {
217 			handler = &handlers[i];
218 			break;
219 		}
220 	}
221 
222 	if (handler && handler->done)
223 		handler->done(inst, msg);
224 
225 	vpu_response_cmd(inst, msg_id, 1);
226 
227 	return 0;
228 }
229 
230 static bool vpu_inst_receive_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
231 {
232 	unsigned long bytes = sizeof(struct vpu_rpc_event_header);
233 	u32 ret;
234 
235 	memset(pkt, 0, sizeof(*pkt));
236 	if (kfifo_len(&inst->msg_fifo) < bytes)
237 		return false;
238 
239 	ret = kfifo_out(&inst->msg_fifo, pkt, bytes);
240 	if (ret != bytes)
241 		return false;
242 
243 	if (pkt->hdr.num > 0) {
244 		bytes = pkt->hdr.num * sizeof(u32);
245 		ret = kfifo_out(&inst->msg_fifo, pkt->data, bytes);
246 		if (ret != bytes)
247 			return false;
248 	}
249 
250 	return true;
251 }
252 
253 void vpu_inst_run_work(struct work_struct *work)
254 {
255 	struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
256 	struct vpu_rpc_event pkt;
257 
258 	while (vpu_inst_receive_msg(inst, &pkt))
259 		vpu_session_handle_msg(inst, &pkt);
260 }
261 
262 static void vpu_inst_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
263 {
264 	unsigned long bytes;
265 	u32 id = pkt->hdr.id;
266 	int ret;
267 
268 	if (!inst->workqueue)
269 		return;
270 
271 	bytes = sizeof(pkt->hdr) + pkt->hdr.num * sizeof(u32);
272 	ret = kfifo_in(&inst->msg_fifo, pkt, bytes);
273 	if (ret != bytes)
274 		dev_err(inst->dev, "[%d:%d]overflow: %d\n", inst->core->id, inst->id, id);
275 	queue_work(inst->workqueue, &inst->msg_work);
276 }
277 
278 static int vpu_handle_msg(struct vpu_core *core)
279 {
280 	struct vpu_rpc_event pkt;
281 	struct vpu_inst *inst;
282 	int ret;
283 
284 	memset(&pkt, 0, sizeof(pkt));
285 	while (!vpu_iface_receive_msg(core, &pkt)) {
286 		dev_dbg(core->dev, "event index = %d, id = %d, num = %d\n",
287 			pkt.hdr.index, pkt.hdr.id, pkt.hdr.num);
288 
289 		ret = vpu_iface_convert_msg_id(core, pkt.hdr.id);
290 		if (ret < 0)
291 			continue;
292 
293 		inst = vpu_core_find_instance(core, pkt.hdr.index);
294 		if (inst) {
295 			vpu_response_cmd(inst, ret, 0);
296 			mutex_lock(&core->cmd_lock);
297 			vpu_inst_record_flow(inst, ret);
298 			mutex_unlock(&core->cmd_lock);
299 
300 			vpu_inst_handle_msg(inst, &pkt);
301 			vpu_inst_put(inst);
302 		}
303 		memset(&pkt, 0, sizeof(pkt));
304 	}
305 
306 	return 0;
307 }
308 
309 static int vpu_isr_thread(struct vpu_core *core, u32 irq_code)
310 {
311 	dev_dbg(core->dev, "irq code = 0x%x\n", irq_code);
312 	switch (irq_code) {
313 	case VPU_IRQ_CODE_SYNC:
314 		vpu_mbox_send_msg(core, PRC_BUF_OFFSET, core->rpc.phys - core->fw.phys);
315 		vpu_mbox_send_msg(core, BOOT_ADDRESS, core->fw.phys);
316 		vpu_mbox_send_msg(core, INIT_DONE, 2);
317 		break;
318 	case VPU_IRQ_CODE_BOOT_DONE:
319 		break;
320 	case VPU_IRQ_CODE_SNAPSHOT_DONE:
321 		break;
322 	default:
323 		vpu_handle_msg(core);
324 		break;
325 	}
326 
327 	return 0;
328 }
329 
330 static void vpu_core_run_msg_work(struct vpu_core *core)
331 {
332 	const unsigned int SIZE = sizeof(u32);
333 
334 	while (kfifo_len(&core->msg_fifo) >= SIZE) {
335 		u32 data = 0;
336 
337 		if (kfifo_out(&core->msg_fifo, &data, SIZE) == SIZE)
338 			vpu_isr_thread(core, data);
339 	}
340 }
341 
342 void vpu_msg_run_work(struct work_struct *work)
343 {
344 	struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
345 	unsigned long delay = msecs_to_jiffies(10);
346 
347 	vpu_core_run_msg_work(core);
348 	queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
349 }
350 
351 void vpu_msg_delayed_work(struct work_struct *work)
352 {
353 	struct vpu_core *core;
354 	struct delayed_work *dwork;
355 	unsigned long bytes = sizeof(u32);
356 	u32 i;
357 
358 	if (!work)
359 		return;
360 
361 	dwork = to_delayed_work(work);
362 	core = container_of(dwork, struct vpu_core, msg_delayed_work);
363 	if (kfifo_len(&core->msg_fifo) >= bytes)
364 		vpu_core_run_msg_work(core);
365 
366 	bytes = sizeof(struct vpu_rpc_event_header);
367 	for (i = 0; i < core->supported_instance_count; i++) {
368 		struct vpu_inst *inst = vpu_core_find_instance(core, i);
369 
370 		if (!inst)
371 			continue;
372 
373 		if (inst->workqueue && kfifo_len(&inst->msg_fifo) >= bytes)
374 			queue_work(inst->workqueue, &inst->msg_work);
375 
376 		vpu_inst_put(inst);
377 	}
378 }
379 
380 int vpu_isr(struct vpu_core *core, u32 irq)
381 {
382 	switch (irq) {
383 	case VPU_IRQ_CODE_SYNC:
384 		break;
385 	case VPU_IRQ_CODE_BOOT_DONE:
386 		complete(&core->cmp);
387 		break;
388 	case VPU_IRQ_CODE_SNAPSHOT_DONE:
389 		complete(&core->cmp);
390 		break;
391 	default:
392 		break;
393 	}
394 
395 	if (kfifo_in(&core->msg_fifo, &irq, sizeof(irq)) != sizeof(irq))
396 		dev_err(core->dev, "[%d]overflow: %d\n", core->id, irq);
397 	queue_work(core->workqueue, &core->msg_work);
398 
399 	return 0;
400 }
401