1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/vmalloc.h>
17 #include "vpu.h"
18 #include "vpu_defs.h"
19 #include "vpu_cmds.h"
20 #include "vpu_rpc.h"
21 #include "vpu_mbox.h"
22 
23 struct vpu_cmd_request {
24 	u32 request;
25 	u32 response;
26 	u32 handled;
27 };
28 
29 struct vpu_cmd_t {
30 	struct list_head list;
31 	u32 id;
32 	struct vpu_cmd_request *request;
33 	struct vpu_rpc_event *pkt;
34 	unsigned long key;
35 	atomic_long_t *last_response_cmd;
36 };
37 
38 static struct vpu_cmd_request vpu_cmd_requests[] = {
39 	{
40 		.request = VPU_CMD_ID_CONFIGURE_CODEC,
41 		.response = VPU_MSG_ID_MEM_REQUEST,
42 		.handled = 1,
43 	},
44 	{
45 		.request = VPU_CMD_ID_START,
46 		.response = VPU_MSG_ID_START_DONE,
47 		.handled = 0,
48 	},
49 	{
50 		.request = VPU_CMD_ID_STOP,
51 		.response = VPU_MSG_ID_STOP_DONE,
52 		.handled = 0,
53 	},
54 	{
55 		.request = VPU_CMD_ID_ABORT,
56 		.response = VPU_MSG_ID_ABORT_DONE,
57 		.handled = 0,
58 	},
59 	{
60 		.request = VPU_CMD_ID_RST_BUF,
61 		.response = VPU_MSG_ID_BUF_RST,
62 		.handled = 1,
63 	},
64 };
65 
66 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
67 {
68 	int ret = 0;
69 
70 	ret = vpu_iface_send_cmd(core, pkt);
71 	if (ret)
72 		return ret;
73 
74 	/*write cmd data to cmd buffer before trigger a cmd interrupt*/
75 	mb();
76 	vpu_mbox_send_type(core, COMMAND);
77 
78 	return ret;
79 }
80 
81 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
82 {
83 	struct vpu_cmd_t *cmd;
84 	int i;
85 	int ret;
86 
87 	cmd = vzalloc(sizeof(*cmd));
88 	if (!cmd)
89 		return NULL;
90 
91 	cmd->pkt = vzalloc(sizeof(*cmd->pkt));
92 	if (!cmd->pkt) {
93 		vfree(cmd);
94 		return NULL;
95 	}
96 
97 	cmd->id = id;
98 	ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
99 	if (ret) {
100 		dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id));
101 		vfree(cmd->pkt);
102 		vfree(cmd);
103 		return NULL;
104 	}
105 	for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
106 		if (vpu_cmd_requests[i].request == id) {
107 			cmd->request = &vpu_cmd_requests[i];
108 			break;
109 		}
110 	}
111 
112 	return cmd;
113 }
114 
115 static void vpu_free_cmd(struct vpu_cmd_t *cmd)
116 {
117 	if (!cmd)
118 		return;
119 	if (cmd->last_response_cmd)
120 		atomic_long_set(cmd->last_response_cmd, cmd->key);
121 	vfree(cmd->pkt);
122 	vfree(cmd);
123 }
124 
125 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
126 {
127 	int ret;
128 
129 	dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id));
130 	vpu_iface_pre_send_cmd(inst);
131 	ret = vpu_cmd_send(inst->core, cmd->pkt);
132 	if (!ret) {
133 		vpu_iface_post_send_cmd(inst);
134 		vpu_inst_record_flow(inst, cmd->id);
135 	} else {
136 		dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id));
137 	}
138 
139 	return ret;
140 }
141 
142 static void vpu_process_cmd_request(struct vpu_inst *inst)
143 {
144 	struct vpu_cmd_t *cmd;
145 	struct vpu_cmd_t *tmp;
146 
147 	if (!inst || inst->pending)
148 		return;
149 
150 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
151 		list_del_init(&cmd->list);
152 		if (vpu_session_process_cmd(inst, cmd))
153 			dev_err(inst->dev, "[%d] process cmd %s fail\n",
154 				inst->id, vpu_id_name(cmd->id));
155 		if (cmd->request) {
156 			inst->pending = (void *)cmd;
157 			break;
158 		}
159 		vpu_free_cmd(cmd);
160 	}
161 }
162 
163 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
164 			   unsigned long *key, int *sync)
165 {
166 	struct vpu_core *core;
167 	struct vpu_cmd_t *cmd;
168 
169 	if (!inst || !inst->core)
170 		return -EINVAL;
171 
172 	core = inst->core;
173 	cmd = vpu_alloc_cmd(inst, id, data);
174 	if (!cmd)
175 		return -ENOMEM;
176 
177 	mutex_lock(&core->cmd_lock);
178 	cmd->key = ++inst->cmd_seq;
179 	cmd->last_response_cmd = &inst->last_response_cmd;
180 	if (key)
181 		*key = cmd->key;
182 	if (sync)
183 		*sync = cmd->request ? true : false;
184 	list_add_tail(&cmd->list, &inst->cmd_q);
185 	vpu_process_cmd_request(inst);
186 	mutex_unlock(&core->cmd_lock);
187 
188 	return 0;
189 }
190 
191 static void vpu_clear_pending(struct vpu_inst *inst)
192 {
193 	if (!inst || !inst->pending)
194 		return;
195 
196 	vpu_free_cmd(inst->pending);
197 	wake_up_all(&inst->core->ack_wq);
198 	inst->pending = NULL;
199 }
200 
201 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
202 {
203 	struct vpu_cmd_request *request;
204 
205 	if (!cmd || !cmd->request)
206 		return false;
207 
208 	request = cmd->request;
209 	if (request->response != response)
210 		return false;
211 	if (request->handled != handled)
212 		return false;
213 
214 	return true;
215 }
216 
217 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
218 {
219 	struct vpu_core *core;
220 
221 	if (!inst || !inst->core)
222 		return -EINVAL;
223 
224 	core = inst->core;
225 	mutex_lock(&core->cmd_lock);
226 	if (vpu_check_response(inst->pending, response, handled))
227 		vpu_clear_pending(inst);
228 
229 	vpu_process_cmd_request(inst);
230 	mutex_unlock(&core->cmd_lock);
231 
232 	return 0;
233 }
234 
235 void vpu_clear_request(struct vpu_inst *inst)
236 {
237 	struct vpu_cmd_t *cmd;
238 	struct vpu_cmd_t *tmp;
239 
240 	mutex_lock(&inst->core->cmd_lock);
241 	if (inst->pending)
242 		vpu_clear_pending(inst);
243 
244 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
245 		list_del_init(&cmd->list);
246 		vpu_free_cmd(cmd);
247 	}
248 	mutex_unlock(&inst->core->cmd_lock);
249 }
250 
251 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
252 {
253 	unsigned long last_response = atomic_long_read(&inst->last_response_cmd);
254 
255 	if (key <= last_response && (last_response - key) < (ULONG_MAX >> 1))
256 		return true;
257 
258 	return false;
259 }
260 
261 static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
262 {
263 	struct vpu_core *core;
264 
265 	if (!inst || !inst->core)
266 		return -EINVAL;
267 
268 	core = inst->core;
269 
270 	call_void_vop(inst, wait_prepare);
271 	wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout);
272 	call_void_vop(inst, wait_finish);
273 
274 	if (!check_is_responsed(inst, key)) {
275 		if (try)
276 			return -EINVAL;
277 		dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
278 		set_bit(inst->id, &core->hang_mask);
279 		mutex_lock(&inst->core->cmd_lock);
280 		vpu_clear_pending(inst);
281 		mutex_unlock(&inst->core->cmd_lock);
282 		return -EINVAL;
283 	}
284 
285 	return 0;
286 }
287 
288 static void vpu_core_keep_active(struct vpu_core *core)
289 {
290 	struct vpu_rpc_event pkt;
291 
292 	memset(&pkt, 0, sizeof(pkt));
293 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL);
294 
295 	dev_dbg(core->dev, "try to wake up\n");
296 	mutex_lock(&core->cmd_lock);
297 	if (vpu_cmd_send(core, &pkt))
298 		dev_err(core->dev, "fail to keep active\n");
299 	mutex_unlock(&core->cmd_lock);
300 }
301 
302 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
303 {
304 	unsigned long key;
305 	int sync = false;
306 	int ret;
307 
308 	if (inst->id < 0)
309 		return -EINVAL;
310 
311 	ret = vpu_request_cmd(inst, id, data, &key, &sync);
312 	if (ret)
313 		goto exit;
314 
315 	/* workaround for a firmware issue,
316 	 * firmware should be waked up by start or configure command,
317 	 * but there is a very small change that firmware failed to wakeup.
318 	 * in such case, try to wakeup firmware again by sending a noop command
319 	 */
320 	if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) {
321 		if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1))
322 			vpu_core_keep_active(inst->core);
323 		else
324 			goto exit;
325 	}
326 
327 	if (sync)
328 		ret = sync_session_response(inst, key, VPU_TIMEOUT, 0);
329 
330 exit:
331 	if (ret)
332 		dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id));
333 
334 	return ret;
335 }
336 
337 int vpu_session_configure_codec(struct vpu_inst *inst)
338 {
339 	return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
340 }
341 
342 int vpu_session_start(struct vpu_inst *inst)
343 {
344 	vpu_trace(inst->dev, "[%d]\n", inst->id);
345 
346 	return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
347 }
348 
349 int vpu_session_stop(struct vpu_inst *inst)
350 {
351 	int ret;
352 
353 	vpu_trace(inst->dev, "[%d]\n", inst->id);
354 
355 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
356 	/* workaround for a firmware bug,
357 	 * if the next command is too close after stop cmd,
358 	 * the firmware may enter wfi wrongly.
359 	 */
360 	usleep_range(3000, 5000);
361 	return ret;
362 }
363 
364 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
365 {
366 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, &timestamp);
367 }
368 
369 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
370 {
371 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
372 }
373 
374 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
375 {
376 	return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
377 }
378 
379 int vpu_session_abort(struct vpu_inst *inst)
380 {
381 	return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
382 }
383 
384 int vpu_session_rst_buf(struct vpu_inst *inst)
385 {
386 	return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
387 }
388 
389 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
390 {
391 	return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
392 }
393 
394 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
395 {
396 	if (inst->type & VPU_CORE_TYPE_DEC)
397 		vpu_iface_set_decode_params(inst, arg, 1);
398 	else
399 		vpu_iface_set_encode_params(inst, arg, 1);
400 
401 	return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
402 }
403 
404 int vpu_session_debug(struct vpu_inst *inst)
405 {
406 	return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
407 }
408 
409 int vpu_core_snapshot(struct vpu_core *core)
410 {
411 	struct vpu_inst *inst;
412 	int ret;
413 
414 	if (!core || list_empty(&core->instances))
415 		return 0;
416 
417 	inst = list_first_entry(&core->instances, struct vpu_inst, list);
418 
419 	reinit_completion(&core->cmp);
420 	ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
421 	if (ret)
422 		return ret;
423 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
424 	if (!ret) {
425 		dev_err(core->dev, "snapshot timeout\n");
426 		return -EINVAL;
427 	}
428 
429 	return 0;
430 }
431 
432 int vpu_core_sw_reset(struct vpu_core *core)
433 {
434 	struct vpu_rpc_event pkt;
435 	int ret;
436 
437 	memset(&pkt, 0, sizeof(pkt));
438 	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
439 
440 	reinit_completion(&core->cmp);
441 	mutex_lock(&core->cmd_lock);
442 	ret = vpu_cmd_send(core, &pkt);
443 	mutex_unlock(&core->cmd_lock);
444 	if (ret)
445 		return ret;
446 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
447 	if (!ret) {
448 		dev_err(core->dev, "sw reset timeout\n");
449 		return -EINVAL;
450 	}
451 
452 	return 0;
453 }
454