1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/vmalloc.h>
17 #include "vpu.h"
18 #include "vpu_defs.h"
19 #include "vpu_cmds.h"
20 #include "vpu_rpc.h"
21 #include "vpu_mbox.h"
22 
23 struct vpu_cmd_request {
24 	u32 request;
25 	u32 response;
26 	u32 handled;
27 };
28 
29 struct vpu_cmd_t {
30 	struct list_head list;
31 	u32 id;
32 	struct vpu_cmd_request *request;
33 	struct vpu_rpc_event *pkt;
34 	unsigned long key;
35 };
36 
37 static struct vpu_cmd_request vpu_cmd_requests[] = {
38 	{
39 		.request = VPU_CMD_ID_CONFIGURE_CODEC,
40 		.response = VPU_MSG_ID_MEM_REQUEST,
41 		.handled = 1,
42 	},
43 	{
44 		.request = VPU_CMD_ID_START,
45 		.response = VPU_MSG_ID_START_DONE,
46 		.handled = 0,
47 	},
48 	{
49 		.request = VPU_CMD_ID_STOP,
50 		.response = VPU_MSG_ID_STOP_DONE,
51 		.handled = 0,
52 	},
53 	{
54 		.request = VPU_CMD_ID_ABORT,
55 		.response = VPU_MSG_ID_ABORT_DONE,
56 		.handled = 0,
57 	},
58 	{
59 		.request = VPU_CMD_ID_RST_BUF,
60 		.response = VPU_MSG_ID_BUF_RST,
61 		.handled = 1,
62 	},
63 };
64 
65 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
66 {
67 	int ret = 0;
68 
69 	ret = vpu_iface_send_cmd(core, pkt);
70 	if (ret)
71 		return ret;
72 
73 	/*write cmd data to cmd buffer before trigger a cmd interrupt*/
74 	mb();
75 	vpu_mbox_send_type(core, COMMAND);
76 
77 	return ret;
78 }
79 
80 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
81 {
82 	struct vpu_cmd_t *cmd;
83 	int i;
84 	int ret;
85 
86 	cmd = vzalloc(sizeof(*cmd));
87 	if (!cmd)
88 		return NULL;
89 
90 	cmd->pkt = vzalloc(sizeof(*cmd->pkt));
91 	if (!cmd->pkt) {
92 		vfree(cmd);
93 		return NULL;
94 	}
95 
96 	cmd->id = id;
97 	ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
98 	if (ret) {
99 		dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
100 		vfree(cmd->pkt);
101 		vfree(cmd);
102 		return NULL;
103 	}
104 	for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
105 		if (vpu_cmd_requests[i].request == id) {
106 			cmd->request = &vpu_cmd_requests[i];
107 			break;
108 		}
109 	}
110 
111 	return cmd;
112 }
113 
114 static void vpu_free_cmd(struct vpu_cmd_t *cmd)
115 {
116 	if (!cmd)
117 		return;
118 	vfree(cmd->pkt);
119 	vfree(cmd);
120 }
121 
122 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
123 {
124 	int ret;
125 
126 	dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
127 	vpu_iface_pre_send_cmd(inst);
128 	ret = vpu_cmd_send(inst->core, cmd->pkt);
129 	if (!ret) {
130 		vpu_iface_post_send_cmd(inst);
131 		vpu_inst_record_flow(inst, cmd->id);
132 	} else {
133 		dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
134 	}
135 
136 	return ret;
137 }
138 
139 static void vpu_process_cmd_request(struct vpu_inst *inst)
140 {
141 	struct vpu_cmd_t *cmd;
142 	struct vpu_cmd_t *tmp;
143 
144 	if (!inst || inst->pending)
145 		return;
146 
147 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
148 		list_del_init(&cmd->list);
149 		if (vpu_session_process_cmd(inst, cmd))
150 			dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
151 		if (cmd->request) {
152 			inst->pending = (void *)cmd;
153 			break;
154 		}
155 		vpu_free_cmd(cmd);
156 	}
157 }
158 
159 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
160 			   unsigned long *key, int *sync)
161 {
162 	struct vpu_core *core;
163 	struct vpu_cmd_t *cmd;
164 
165 	if (!inst || !inst->core)
166 		return -EINVAL;
167 
168 	core = inst->core;
169 	cmd = vpu_alloc_cmd(inst, id, data);
170 	if (!cmd)
171 		return -ENOMEM;
172 
173 	mutex_lock(&core->cmd_lock);
174 	cmd->key = core->cmd_seq++;
175 	if (key)
176 		*key = cmd->key;
177 	if (sync)
178 		*sync = cmd->request ? true : false;
179 	list_add_tail(&cmd->list, &inst->cmd_q);
180 	vpu_process_cmd_request(inst);
181 	mutex_unlock(&core->cmd_lock);
182 
183 	return 0;
184 }
185 
186 static void vpu_clear_pending(struct vpu_inst *inst)
187 {
188 	if (!inst || !inst->pending)
189 		return;
190 
191 	vpu_free_cmd(inst->pending);
192 	wake_up_all(&inst->core->ack_wq);
193 	inst->pending = NULL;
194 }
195 
196 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
197 {
198 	struct vpu_cmd_request *request;
199 
200 	if (!cmd || !cmd->request)
201 		return false;
202 
203 	request = cmd->request;
204 	if (request->response != response)
205 		return false;
206 	if (request->handled != handled)
207 		return false;
208 
209 	return true;
210 }
211 
212 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
213 {
214 	struct vpu_core *core;
215 
216 	if (!inst || !inst->core)
217 		return -EINVAL;
218 
219 	core = inst->core;
220 	mutex_lock(&core->cmd_lock);
221 	if (vpu_check_response(inst->pending, response, handled))
222 		vpu_clear_pending(inst);
223 
224 	vpu_process_cmd_request(inst);
225 	mutex_unlock(&core->cmd_lock);
226 
227 	return 0;
228 }
229 
230 void vpu_clear_request(struct vpu_inst *inst)
231 {
232 	struct vpu_cmd_t *cmd;
233 	struct vpu_cmd_t *tmp;
234 
235 	mutex_lock(&inst->core->cmd_lock);
236 	if (inst->pending)
237 		vpu_clear_pending(inst);
238 
239 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
240 		list_del_init(&cmd->list);
241 		vpu_free_cmd(cmd);
242 	}
243 	mutex_unlock(&inst->core->cmd_lock);
244 }
245 
246 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
247 {
248 	struct vpu_core *core = inst->core;
249 	struct vpu_cmd_t *cmd;
250 	bool flag = true;
251 
252 	mutex_lock(&core->cmd_lock);
253 	cmd = inst->pending;
254 	if (cmd && key == cmd->key) {
255 		flag = false;
256 		goto exit;
257 	}
258 	list_for_each_entry(cmd, &inst->cmd_q, list) {
259 		if (key == cmd->key) {
260 			flag = false;
261 			break;
262 		}
263 	}
264 exit:
265 	mutex_unlock(&core->cmd_lock);
266 
267 	return flag;
268 }
269 
270 static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
271 {
272 	struct vpu_core *core;
273 
274 	if (!inst || !inst->core)
275 		return -EINVAL;
276 
277 	core = inst->core;
278 
279 	call_void_vop(inst, wait_prepare);
280 	wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout);
281 	call_void_vop(inst, wait_finish);
282 
283 	if (!check_is_responsed(inst, key)) {
284 		if (try)
285 			return -EINVAL;
286 		dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
287 		set_bit(inst->id, &core->hang_mask);
288 		mutex_lock(&inst->core->cmd_lock);
289 		vpu_clear_pending(inst);
290 		mutex_unlock(&inst->core->cmd_lock);
291 		return -EINVAL;
292 	}
293 
294 	return 0;
295 }
296 
297 static void vpu_core_keep_active(struct vpu_core *core)
298 {
299 	struct vpu_rpc_event pkt;
300 
301 	memset(&pkt, 0, sizeof(pkt));
302 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL);
303 
304 	dev_dbg(core->dev, "try to wake up\n");
305 	mutex_lock(&core->cmd_lock);
306 	vpu_cmd_send(core, &pkt);
307 	mutex_unlock(&core->cmd_lock);
308 }
309 
310 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
311 {
312 	unsigned long key;
313 	int sync = false;
314 	int ret = -EINVAL;
315 
316 	if (inst->id < 0)
317 		return -EINVAL;
318 
319 	ret = vpu_request_cmd(inst, id, data, &key, &sync);
320 	if (ret)
321 		goto exit;
322 
323 	/* workaround for a firmware issue,
324 	 * firmware should be waked up by start or configure command,
325 	 * but there is a very small change that firmware failed to wakeup.
326 	 * in such case, try to wakeup firmware again by sending a noop command
327 	 */
328 	if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) {
329 		if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1))
330 			vpu_core_keep_active(inst->core);
331 		else
332 			goto exit;
333 	}
334 
335 	if (sync)
336 		ret = sync_session_response(inst, key, VPU_TIMEOUT, 0);
337 
338 exit:
339 	if (ret)
340 		dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
341 
342 	return ret;
343 }
344 
345 int vpu_session_configure_codec(struct vpu_inst *inst)
346 {
347 	return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
348 }
349 
350 int vpu_session_start(struct vpu_inst *inst)
351 {
352 	vpu_trace(inst->dev, "[%d]\n", inst->id);
353 
354 	return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
355 }
356 
357 int vpu_session_stop(struct vpu_inst *inst)
358 {
359 	int ret;
360 
361 	vpu_trace(inst->dev, "[%d]\n", inst->id);
362 
363 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
364 	/* workaround for a firmware bug,
365 	 * if the next command is too close after stop cmd,
366 	 * the firmware may enter wfi wrongly.
367 	 */
368 	usleep_range(3000, 5000);
369 	return ret;
370 }
371 
372 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
373 {
374 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, &timestamp);
375 }
376 
377 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
378 {
379 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
380 }
381 
382 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
383 {
384 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
385 }
386 
387 int vpu_session_abort(struct vpu_inst *inst)
388 {
389 	return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
390 }
391 
392 int vpu_session_rst_buf(struct vpu_inst *inst)
393 {
394 	return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
395 }
396 
397 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
398 {
399 	return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
400 }
401 
402 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
403 {
404 	if (inst->type & VPU_CORE_TYPE_DEC)
405 		vpu_iface_set_decode_params(inst, arg, 1);
406 	else
407 		vpu_iface_set_encode_params(inst, arg, 1);
408 
409 	return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
410 }
411 
412 int vpu_session_debug(struct vpu_inst *inst)
413 {
414 	return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
415 }
416 
417 int vpu_core_snapshot(struct vpu_core *core)
418 {
419 	struct vpu_inst *inst;
420 	int ret;
421 
422 	if (!core || list_empty(&core->instances))
423 		return 0;
424 
425 	inst = list_first_entry(&core->instances, struct vpu_inst, list);
426 
427 	reinit_completion(&core->cmp);
428 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
429 	if (ret)
430 		return ret;
431 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
432 	if (!ret) {
433 		dev_err(core->dev, "snapshot timeout\n");
434 		return -EINVAL;
435 	}
436 
437 	return 0;
438 }
439 
440 int vpu_core_sw_reset(struct vpu_core *core)
441 {
442 	struct vpu_rpc_event pkt;
443 	int ret;
444 
445 	memset(&pkt, 0, sizeof(pkt));
446 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
447 
448 	reinit_completion(&core->cmp);
449 	mutex_lock(&core->cmd_lock);
450 	ret = vpu_cmd_send(core, &pkt);
451 	mutex_unlock(&core->cmd_lock);
452 	if (ret)
453 		return ret;
454 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
455 	if (!ret) {
456 		dev_err(core->dev, "sw reset timeout\n");
457 		return -EINVAL;
458 	}
459 
460 	return 0;
461 }
462