1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_ipc.h" 8 #include "ivpu_jsm_msg.h" 9 10 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id, 11 u64 jobq_base, u32 jobq_size) 12 { 13 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB }; 14 struct vpu_jsm_msg resp; 15 int ret = 0; 16 17 req.payload.register_db.db_idx = db_id; 18 req.payload.register_db.jobq_base = jobq_base; 19 req.payload.register_db.jobq_size = jobq_size; 20 req.payload.register_db.host_ssid = ctx_id; 21 22 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp, 23 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 24 if (ret) { 25 ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret); 26 return ret; 27 } 28 29 ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id); 30 31 return 0; 32 } 33 34 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id) 35 { 36 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB }; 37 struct vpu_jsm_msg resp; 38 int ret = 0; 39 40 req.payload.unregister_db.db_idx = db_id; 41 42 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp, 43 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 44 if (ret) { 45 ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret); 46 return ret; 47 } 48 49 ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id); 50 51 return 0; 52 } 53 54 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) 55 { 56 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; 57 struct vpu_jsm_msg resp; 58 int ret; 59 60 if (engine > VPU_ENGINE_COPY) 61 return -EINVAL; 62 63 req.payload.query_engine_hb.engine_idx = engine; 64 65 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp, 66 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 67 if (ret) { 68 ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret); 69 return ret; 70 } 71 72 *heartbeat = resp.payload.query_engine_hb_done.heartbeat; 73 return ret; 74 } 75 76 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine) 77 { 78 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET }; 79 struct vpu_jsm_msg resp; 80 int ret; 81 82 if (engine > VPU_ENGINE_COPY) 83 return -EINVAL; 84 85 req.payload.engine_reset.engine_idx = engine; 86 87 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp, 88 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 89 if (ret) 90 ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret); 91 92 return ret; 93 } 94 95 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id) 96 { 97 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT }; 98 struct vpu_jsm_msg resp; 99 int ret; 100 101 if (engine > VPU_ENGINE_COPY) 102 return -EINVAL; 103 104 req.payload.engine_preempt.engine_idx = engine; 105 req.payload.engine_preempt.preempt_id = preempt_id; 106 107 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp, 108 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 109 if (ret) 110 ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret); 111 112 return ret; 113 } 114 115 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size) 116 { 117 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL }; 118 struct vpu_jsm_msg resp; 119 int ret; 120 121 if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1)) 122 return -ENOMEM; 123 124 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp, 125 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 126 if (ret) 127 ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret); 128 129 return ret; 130 } 131 132 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask, 133 u64 *trace_hw_component_mask) 134 { 135 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY }; 136 struct vpu_jsm_msg resp; 137 int ret; 138 139 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp, 140 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 141 if (ret) { 142 ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret); 143 return ret; 144 } 145 146 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask; 147 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask; 148 149 return ret; 150 } 151 152 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask, 153 u64 trace_hw_component_mask) 154 { 155 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG }; 156 struct vpu_jsm_msg resp; 157 int ret; 158 159 req.payload.trace_config.trace_level = trace_level; 160 req.payload.trace_config.trace_destination_mask = trace_destination_mask; 161 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask; 162 163 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp, 164 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 165 if (ret) 166 ivpu_warn(vdev, "Failed to set config: %d\n", ret); 167 168 return ret; 169 } 170 171 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid) 172 { 173 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE }; 174 struct vpu_jsm_msg resp; 175 176 req.payload.ssid_release.host_ssid = host_ssid; 177 178 return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, 179 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 180 } 181