1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/delay.h> 16 #include <linux/vmalloc.h> 17 #include "vpu.h" 18 #include "vpu_defs.h" 19 #include "vpu_cmds.h" 20 #include "vpu_rpc.h" 21 #include "vpu_mbox.h" 22 23 struct vpu_cmd_request { 24 u32 request; 25 u32 response; 26 u32 handled; 27 }; 28 29 struct vpu_cmd_t { 30 struct list_head list; 31 u32 id; 32 struct vpu_cmd_request *request; 33 struct vpu_rpc_event *pkt; 34 unsigned long key; 35 }; 36 37 static struct vpu_cmd_request vpu_cmd_requests[] = { 38 { 39 .request = VPU_CMD_ID_CONFIGURE_CODEC, 40 .response = VPU_MSG_ID_MEM_REQUEST, 41 .handled = 1, 42 }, 43 { 44 .request = VPU_CMD_ID_START, 45 .response = VPU_MSG_ID_START_DONE, 46 .handled = 0, 47 }, 48 { 49 .request = VPU_CMD_ID_STOP, 50 .response = VPU_MSG_ID_STOP_DONE, 51 .handled = 0, 52 }, 53 { 54 .request = VPU_CMD_ID_ABORT, 55 .response = VPU_MSG_ID_ABORT_DONE, 56 .handled = 0, 57 }, 58 { 59 .request = VPU_CMD_ID_RST_BUF, 60 .response = VPU_MSG_ID_BUF_RST, 61 .handled = 1, 62 }, 63 }; 64 65 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt) 66 { 67 int ret = 0; 68 69 ret = vpu_iface_send_cmd(core, pkt); 70 if (ret) 71 return ret; 72 73 /*write cmd data to cmd buffer before trigger a cmd interrupt*/ 74 mb(); 75 vpu_mbox_send_type(core, COMMAND); 76 77 return ret; 78 } 79 80 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data) 81 { 82 struct vpu_cmd_t *cmd; 83 int i; 84 int ret; 85 86 cmd = vzalloc(sizeof(*cmd)); 87 if (!cmd) 88 return NULL; 89 90 cmd->pkt = vzalloc(sizeof(*cmd->pkt)); 91 if (!cmd->pkt) { 92 vfree(cmd); 93 return NULL; 94 } 95 96 cmd->id = id; 97 ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data); 98 if (ret) { 99 dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id)); 100 vfree(cmd->pkt); 101 vfree(cmd); 102 return NULL; 103 } 104 for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) { 105 if (vpu_cmd_requests[i].request == id) { 106 cmd->request = &vpu_cmd_requests[i]; 107 break; 108 } 109 } 110 111 return cmd; 112 } 113 114 static void vpu_free_cmd(struct vpu_cmd_t *cmd) 115 { 116 if (!cmd) 117 return; 118 vfree(cmd->pkt); 119 vfree(cmd); 120 } 121 122 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd) 123 { 124 int ret; 125 126 dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id)); 127 vpu_iface_pre_send_cmd(inst); 128 ret = vpu_cmd_send(inst->core, cmd->pkt); 129 if (!ret) { 130 vpu_iface_post_send_cmd(inst); 131 vpu_inst_record_flow(inst, cmd->id); 132 } else { 133 dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id)); 134 } 135 136 return ret; 137 } 138 139 static void vpu_process_cmd_request(struct vpu_inst *inst) 140 { 141 struct vpu_cmd_t *cmd; 142 struct vpu_cmd_t *tmp; 143 144 if (!inst || inst->pending) 145 return; 146 147 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) { 148 list_del_init(&cmd->list); 149 if (vpu_session_process_cmd(inst, cmd)) 150 dev_err(inst->dev, "[%d] process cmd %s fail\n", 151 inst->id, vpu_id_name(cmd->id)); 152 if (cmd->request) { 153 inst->pending = (void *)cmd; 154 break; 155 } 156 vpu_free_cmd(cmd); 157 } 158 } 159 160 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data, 161 unsigned long *key, int *sync) 162 { 163 struct vpu_core *core; 164 struct vpu_cmd_t *cmd; 165 166 if (!inst || !inst->core) 167 return -EINVAL; 168 169 core = inst->core; 170 cmd = vpu_alloc_cmd(inst, id, data); 171 if (!cmd) 172 return -ENOMEM; 173 174 mutex_lock(&core->cmd_lock); 175 cmd->key = core->cmd_seq++; 176 if (key) 177 *key = cmd->key; 178 if (sync) 179 *sync = cmd->request ? true : false; 180 list_add_tail(&cmd->list, &inst->cmd_q); 181 vpu_process_cmd_request(inst); 182 mutex_unlock(&core->cmd_lock); 183 184 return 0; 185 } 186 187 static void vpu_clear_pending(struct vpu_inst *inst) 188 { 189 if (!inst || !inst->pending) 190 return; 191 192 vpu_free_cmd(inst->pending); 193 wake_up_all(&inst->core->ack_wq); 194 inst->pending = NULL; 195 } 196 197 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled) 198 { 199 struct vpu_cmd_request *request; 200 201 if (!cmd || !cmd->request) 202 return false; 203 204 request = cmd->request; 205 if (request->response != response) 206 return false; 207 if (request->handled != handled) 208 return false; 209 210 return true; 211 } 212 213 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled) 214 { 215 struct vpu_core *core; 216 217 if (!inst || !inst->core) 218 return -EINVAL; 219 220 core = inst->core; 221 mutex_lock(&core->cmd_lock); 222 if (vpu_check_response(inst->pending, response, handled)) 223 vpu_clear_pending(inst); 224 225 vpu_process_cmd_request(inst); 226 mutex_unlock(&core->cmd_lock); 227 228 return 0; 229 } 230 231 void vpu_clear_request(struct vpu_inst *inst) 232 { 233 struct vpu_cmd_t *cmd; 234 struct vpu_cmd_t *tmp; 235 236 mutex_lock(&inst->core->cmd_lock); 237 if (inst->pending) 238 vpu_clear_pending(inst); 239 240 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) { 241 list_del_init(&cmd->list); 242 vpu_free_cmd(cmd); 243 } 244 mutex_unlock(&inst->core->cmd_lock); 245 } 246 247 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key) 248 { 249 struct vpu_core *core = inst->core; 250 struct vpu_cmd_t *cmd; 251 bool flag = true; 252 253 mutex_lock(&core->cmd_lock); 254 cmd = inst->pending; 255 if (cmd && key == cmd->key) { 256 flag = false; 257 goto exit; 258 } 259 list_for_each_entry(cmd, &inst->cmd_q, list) { 260 if (key == cmd->key) { 261 flag = false; 262 break; 263 } 264 } 265 exit: 266 mutex_unlock(&core->cmd_lock); 267 268 return flag; 269 } 270 271 static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try) 272 { 273 struct vpu_core *core; 274 275 if (!inst || !inst->core) 276 return -EINVAL; 277 278 core = inst->core; 279 280 call_void_vop(inst, wait_prepare); 281 wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout); 282 call_void_vop(inst, wait_finish); 283 284 if (!check_is_responsed(inst, key)) { 285 if (try) 286 return -EINVAL; 287 dev_err(inst->dev, "[%d] sync session timeout\n", inst->id); 288 set_bit(inst->id, &core->hang_mask); 289 mutex_lock(&inst->core->cmd_lock); 290 vpu_clear_pending(inst); 291 mutex_unlock(&inst->core->cmd_lock); 292 return -EINVAL; 293 } 294 295 return 0; 296 } 297 298 static void vpu_core_keep_active(struct vpu_core *core) 299 { 300 struct vpu_rpc_event pkt; 301 302 memset(&pkt, 0, sizeof(pkt)); 303 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL); 304 305 dev_dbg(core->dev, "try to wake up\n"); 306 mutex_lock(&core->cmd_lock); 307 if (vpu_cmd_send(core, &pkt)) 308 dev_err(core->dev, "fail to keep active\n"); 309 mutex_unlock(&core->cmd_lock); 310 } 311 312 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data) 313 { 314 unsigned long key; 315 int sync = false; 316 int ret; 317 318 if (inst->id < 0) 319 return -EINVAL; 320 321 ret = vpu_request_cmd(inst, id, data, &key, &sync); 322 if (ret) 323 goto exit; 324 325 /* workaround for a firmware issue, 326 * firmware should be waked up by start or configure command, 327 * but there is a very small change that firmware failed to wakeup. 328 * in such case, try to wakeup firmware again by sending a noop command 329 */ 330 if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) { 331 if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1)) 332 vpu_core_keep_active(inst->core); 333 else 334 goto exit; 335 } 336 337 if (sync) 338 ret = sync_session_response(inst, key, VPU_TIMEOUT, 0); 339 340 exit: 341 if (ret) 342 dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id)); 343 344 return ret; 345 } 346 347 int vpu_session_configure_codec(struct vpu_inst *inst) 348 { 349 return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL); 350 } 351 352 int vpu_session_start(struct vpu_inst *inst) 353 { 354 vpu_trace(inst->dev, "[%d]\n", inst->id); 355 356 return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL); 357 } 358 359 int vpu_session_stop(struct vpu_inst *inst) 360 { 361 int ret; 362 363 vpu_trace(inst->dev, "[%d]\n", inst->id); 364 365 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL); 366 /* workaround for a firmware bug, 367 * if the next command is too close after stop cmd, 368 * the firmware may enter wfi wrongly. 369 */ 370 usleep_range(3000, 5000); 371 return ret; 372 } 373 374 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp) 375 { 376 return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, ×tamp); 377 } 378 379 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs) 380 { 381 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs); 382 } 383 384 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs) 385 { 386 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs); 387 } 388 389 int vpu_session_abort(struct vpu_inst *inst) 390 { 391 return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL); 392 } 393 394 int vpu_session_rst_buf(struct vpu_inst *inst) 395 { 396 return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL); 397 } 398 399 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info) 400 { 401 return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info); 402 } 403 404 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg) 405 { 406 if (inst->type & VPU_CORE_TYPE_DEC) 407 vpu_iface_set_decode_params(inst, arg, 1); 408 else 409 vpu_iface_set_encode_params(inst, arg, 1); 410 411 return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg); 412 } 413 414 int vpu_session_debug(struct vpu_inst *inst) 415 { 416 return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL); 417 } 418 419 int vpu_core_snapshot(struct vpu_core *core) 420 { 421 struct vpu_inst *inst; 422 int ret; 423 424 if (!core || list_empty(&core->instances)) 425 return 0; 426 427 inst = list_first_entry(&core->instances, struct vpu_inst, list); 428 429 reinit_completion(&core->cmp); 430 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL); 431 if (ret) 432 return ret; 433 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT); 434 if (!ret) { 435 dev_err(core->dev, "snapshot timeout\n"); 436 return -EINVAL; 437 } 438 439 return 0; 440 } 441 442 int vpu_core_sw_reset(struct vpu_core *core) 443 { 444 struct vpu_rpc_event pkt; 445 int ret; 446 447 memset(&pkt, 0, sizeof(pkt)); 448 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL); 449 450 reinit_completion(&core->cmp); 451 mutex_lock(&core->cmd_lock); 452 ret = vpu_cmd_send(core, &pkt); 453 mutex_unlock(&core->cmd_lock); 454 if (ret) 455 return ret; 456 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT); 457 if (!ret) { 458 dev_err(core->dev, "sw reset timeout\n"); 459 return -EINVAL; 460 } 461 462 return 0; 463 } 464