1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/delay.h>
18 #include <linux/vmalloc.h>
19 #include "vpu.h"
20 #include "vpu_defs.h"
21 #include "vpu_cmds.h"
22 #include "vpu_rpc.h"
23 #include "vpu_mbox.h"
24 
25 struct vpu_cmd_request {
26 	u32 request;
27 	u32 response;
28 	u32 handled;
29 };
30 
31 struct vpu_cmd_t {
32 	struct list_head list;
33 	u32 id;
34 	struct vpu_cmd_request *request;
35 	struct vpu_rpc_event *pkt;
36 	unsigned long key;
37 };
38 
39 static struct vpu_cmd_request vpu_cmd_requests[] = {
40 	{
41 		.request = VPU_CMD_ID_CONFIGURE_CODEC,
42 		.response = VPU_MSG_ID_MEM_REQUEST,
43 		.handled = 1,
44 	},
45 	{
46 		.request = VPU_CMD_ID_START,
47 		.response = VPU_MSG_ID_START_DONE,
48 		.handled = 0,
49 	},
50 	{
51 		.request = VPU_CMD_ID_STOP,
52 		.response = VPU_MSG_ID_STOP_DONE,
53 		.handled = 0,
54 	},
55 	{
56 		.request = VPU_CMD_ID_ABORT,
57 		.response = VPU_MSG_ID_ABORT_DONE,
58 		.handled = 0,
59 	},
60 	{
61 		.request = VPU_CMD_ID_RST_BUF,
62 		.response = VPU_MSG_ID_BUF_RST,
63 		.handled = 1,
64 	},
65 };
66 
67 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
68 {
69 	int ret = 0;
70 
71 	ret = vpu_iface_send_cmd(core, pkt);
72 	if (ret)
73 		return ret;
74 
75 	/*write cmd data to cmd buffer before trigger a cmd interrupt*/
76 	mb();
77 	vpu_mbox_send_type(core, COMMAND);
78 
79 	return ret;
80 }
81 
82 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
83 {
84 	struct vpu_cmd_t *cmd;
85 	int i;
86 	int ret;
87 
88 	cmd = vzalloc(sizeof(*cmd));
89 	if (!cmd)
90 		return NULL;
91 
92 	cmd->pkt = vzalloc(sizeof(*cmd->pkt));
93 	if (!cmd->pkt) {
94 		vfree(cmd);
95 		return NULL;
96 	}
97 
98 	cmd->id = id;
99 	ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
100 	if (ret) {
101 		dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
102 		vfree(cmd->pkt);
103 		vfree(cmd);
104 		return NULL;
105 	}
106 	for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
107 		if (vpu_cmd_requests[i].request == id) {
108 			cmd->request = &vpu_cmd_requests[i];
109 			break;
110 		}
111 	}
112 
113 	return cmd;
114 }
115 
116 static void vpu_free_cmd(struct vpu_cmd_t *cmd)
117 {
118 	if (!cmd)
119 		return;
120 	vfree(cmd->pkt);
121 	vfree(cmd);
122 }
123 
124 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
125 {
126 	int ret;
127 
128 	dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
129 	vpu_iface_pre_send_cmd(inst);
130 	ret = vpu_cmd_send(inst->core, cmd->pkt);
131 	if (!ret) {
132 		vpu_iface_post_send_cmd(inst);
133 		vpu_inst_record_flow(inst, cmd->id);
134 	} else {
135 		dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
136 	}
137 
138 	return ret;
139 }
140 
141 static void vpu_process_cmd_request(struct vpu_inst *inst)
142 {
143 	struct vpu_cmd_t *cmd;
144 	struct vpu_cmd_t *tmp;
145 
146 	if (!inst || inst->pending)
147 		return;
148 
149 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
150 		list_del_init(&cmd->list);
151 		if (vpu_session_process_cmd(inst, cmd))
152 			dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
153 		if (cmd->request) {
154 			inst->pending = (void *)cmd;
155 			break;
156 		}
157 		vpu_free_cmd(cmd);
158 	}
159 }
160 
161 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
162 			   unsigned long *key, int *sync)
163 {
164 	struct vpu_core *core;
165 	struct vpu_cmd_t *cmd;
166 
167 	if (!inst || !inst->core)
168 		return -EINVAL;
169 
170 	core = inst->core;
171 	cmd = vpu_alloc_cmd(inst, id, data);
172 	if (!cmd)
173 		return -ENOMEM;
174 
175 	mutex_lock(&core->cmd_lock);
176 	cmd->key = core->cmd_seq++;
177 	if (key)
178 		*key = cmd->key;
179 	if (sync)
180 		*sync = cmd->request ? true : false;
181 	list_add_tail(&cmd->list, &inst->cmd_q);
182 	vpu_process_cmd_request(inst);
183 	mutex_unlock(&core->cmd_lock);
184 
185 	return 0;
186 }
187 
188 static void vpu_clear_pending(struct vpu_inst *inst)
189 {
190 	if (!inst || !inst->pending)
191 		return;
192 
193 	vpu_free_cmd(inst->pending);
194 	wake_up_all(&inst->core->ack_wq);
195 	inst->pending = NULL;
196 }
197 
198 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
199 {
200 	struct vpu_cmd_request *request;
201 
202 	if (!cmd || !cmd->request)
203 		return false;
204 
205 	request = cmd->request;
206 	if (request->response != response)
207 		return false;
208 	if (request->handled != handled)
209 		return false;
210 
211 	return true;
212 }
213 
214 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
215 {
216 	struct vpu_core *core;
217 
218 	if (!inst || !inst->core)
219 		return -EINVAL;
220 
221 	core = inst->core;
222 	mutex_lock(&core->cmd_lock);
223 	if (vpu_check_response(inst->pending, response, handled))
224 		vpu_clear_pending(inst);
225 
226 	vpu_process_cmd_request(inst);
227 	mutex_unlock(&core->cmd_lock);
228 
229 	return 0;
230 }
231 
232 void vpu_clear_request(struct vpu_inst *inst)
233 {
234 	struct vpu_cmd_t *cmd;
235 	struct vpu_cmd_t *tmp;
236 
237 	mutex_lock(&inst->core->cmd_lock);
238 	if (inst->pending)
239 		vpu_clear_pending(inst);
240 
241 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
242 		list_del_init(&cmd->list);
243 		vpu_free_cmd(cmd);
244 	}
245 	mutex_unlock(&inst->core->cmd_lock);
246 }
247 
248 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
249 {
250 	struct vpu_core *core = inst->core;
251 	struct vpu_cmd_t *cmd;
252 	bool flag = true;
253 
254 	mutex_lock(&core->cmd_lock);
255 	cmd = inst->pending;
256 	if (cmd && key == cmd->key) {
257 		flag = false;
258 		goto exit;
259 	}
260 	list_for_each_entry(cmd, &inst->cmd_q, list) {
261 		if (key == cmd->key) {
262 			flag = false;
263 			break;
264 		}
265 	}
266 exit:
267 	mutex_unlock(&core->cmd_lock);
268 
269 	return flag;
270 }
271 
272 static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
273 {
274 	struct vpu_core *core;
275 
276 	if (!inst || !inst->core)
277 		return -EINVAL;
278 
279 	core = inst->core;
280 
281 	call_void_vop(inst, wait_prepare);
282 	wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout);
283 	call_void_vop(inst, wait_finish);
284 
285 	if (!check_is_responsed(inst, key)) {
286 		if (try)
287 			return -EINVAL;
288 		dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
289 		set_bit(inst->id, &core->hang_mask);
290 		mutex_lock(&inst->core->cmd_lock);
291 		vpu_clear_pending(inst);
292 		mutex_unlock(&inst->core->cmd_lock);
293 		return -EINVAL;
294 	}
295 
296 	return 0;
297 }
298 
299 static void vpu_core_keep_active(struct vpu_core *core)
300 {
301 	struct vpu_rpc_event pkt;
302 
303 	memset(&pkt, 0, sizeof(pkt));
304 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL);
305 
306 	dev_dbg(core->dev, "try to wake up\n");
307 	mutex_lock(&core->cmd_lock);
308 	vpu_cmd_send(core, &pkt);
309 	mutex_unlock(&core->cmd_lock);
310 }
311 
312 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
313 {
314 	unsigned long key;
315 	int sync = false;
316 	int ret = -EINVAL;
317 
318 	if (inst->id < 0)
319 		return -EINVAL;
320 
321 	ret = vpu_request_cmd(inst, id, data, &key, &sync);
322 	if (ret)
323 		goto exit;
324 
325 	/* workaround for a firmware issue,
326 	 * firmware should be waked up by start or configure command,
327 	 * but there is a very small change that firmware failed to wakeup.
328 	 * in such case, try to wakeup firmware again by sending a noop command
329 	 */
330 	if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) {
331 		if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1))
332 			vpu_core_keep_active(inst->core);
333 		else
334 			goto exit;
335 	}
336 
337 	if (sync)
338 		ret = sync_session_response(inst, key, VPU_TIMEOUT, 0);
339 
340 exit:
341 	if (ret)
342 		dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
343 
344 	return ret;
345 }
346 
347 int vpu_session_configure_codec(struct vpu_inst *inst)
348 {
349 	return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
350 }
351 
352 int vpu_session_start(struct vpu_inst *inst)
353 {
354 	vpu_trace(inst->dev, "[%d]\n", inst->id);
355 
356 	return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
357 }
358 
359 int vpu_session_stop(struct vpu_inst *inst)
360 {
361 	int ret;
362 
363 	vpu_trace(inst->dev, "[%d]\n", inst->id);
364 
365 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
366 	/* workaround for a firmware bug,
367 	 * if the next command is too close after stop cmd,
368 	 * the firmware may enter wfi wrongly.
369 	 */
370 	usleep_range(3000, 5000);
371 	return ret;
372 }
373 
374 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
375 {
376 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, &timestamp);
377 }
378 
379 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
380 {
381 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
382 }
383 
384 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
385 {
386 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
387 }
388 
389 int vpu_session_abort(struct vpu_inst *inst)
390 {
391 	return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
392 }
393 
394 int vpu_session_rst_buf(struct vpu_inst *inst)
395 {
396 	return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
397 }
398 
399 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
400 {
401 	return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
402 }
403 
404 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
405 {
406 	if (inst->type & VPU_CORE_TYPE_DEC)
407 		vpu_iface_set_decode_params(inst, arg, 1);
408 	else
409 		vpu_iface_set_encode_params(inst, arg, 1);
410 
411 	return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
412 }
413 
414 int vpu_session_debug(struct vpu_inst *inst)
415 {
416 	return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
417 }
418 
419 int vpu_core_snapshot(struct vpu_core *core)
420 {
421 	struct vpu_inst *inst;
422 	int ret;
423 
424 	if (!core || list_empty(&core->instances))
425 		return 0;
426 
427 	inst = list_first_entry(&core->instances, struct vpu_inst, list);
428 
429 	reinit_completion(&core->cmp);
430 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
431 	if (ret)
432 		return ret;
433 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
434 	if (!ret) {
435 		dev_err(core->dev, "snapshot timeout\n");
436 		return -EINVAL;
437 	}
438 
439 	return 0;
440 }
441 
442 int vpu_core_sw_reset(struct vpu_core *core)
443 {
444 	struct vpu_rpc_event pkt;
445 	int ret;
446 
447 	memset(&pkt, 0, sizeof(pkt));
448 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
449 
450 	reinit_completion(&core->cmp);
451 	mutex_lock(&core->cmd_lock);
452 	ret = vpu_cmd_send(core, &pkt);
453 	mutex_unlock(&core->cmd_lock);
454 	if (ret)
455 		return ret;
456 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
457 	if (!ret) {
458 		dev_err(core->dev, "sw reset timeout\n");
459 		return -EINVAL;
460 	}
461 
462 	return 0;
463 }
464