1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_ipc.h"
8 #include "ivpu_jsm_msg.h"
9 
ivpu_jsm_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 db_id,u64 jobq_base,u32 jobq_size)10 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
11 			 u64 jobq_base, u32 jobq_size)
12 {
13 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
14 	struct vpu_jsm_msg resp;
15 	int ret = 0;
16 
17 	req.payload.register_db.db_idx = db_id;
18 	req.payload.register_db.jobq_base = jobq_base;
19 	req.payload.register_db.jobq_size = jobq_size;
20 	req.payload.register_db.host_ssid = ctx_id;
21 
22 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
23 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
24 	if (ret) {
25 		ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
26 		return ret;
27 	}
28 
29 	ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
30 
31 	return 0;
32 }
33 
ivpu_jsm_unregister_db(struct ivpu_device * vdev,u32 db_id)34 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
35 {
36 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
37 	struct vpu_jsm_msg resp;
38 	int ret = 0;
39 
40 	req.payload.unregister_db.db_idx = db_id;
41 
42 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
43 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
44 	if (ret) {
45 		ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
46 		return ret;
47 	}
48 
49 	ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
50 
51 	return 0;
52 }
53 
ivpu_jsm_get_heartbeat(struct ivpu_device * vdev,u32 engine,u64 * heartbeat)54 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
55 {
56 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
57 	struct vpu_jsm_msg resp;
58 	int ret;
59 
60 	if (engine > VPU_ENGINE_COPY)
61 		return -EINVAL;
62 
63 	req.payload.query_engine_hb.engine_idx = engine;
64 
65 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
66 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
67 	if (ret) {
68 		ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
69 		return ret;
70 	}
71 
72 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
73 	return ret;
74 }
75 
ivpu_jsm_reset_engine(struct ivpu_device * vdev,u32 engine)76 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
77 {
78 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
79 	struct vpu_jsm_msg resp;
80 	int ret;
81 
82 	if (engine > VPU_ENGINE_COPY)
83 		return -EINVAL;
84 
85 	req.payload.engine_reset.engine_idx = engine;
86 
87 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
88 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
89 	if (ret)
90 		ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
91 
92 	return ret;
93 }
94 
ivpu_jsm_preempt_engine(struct ivpu_device * vdev,u32 engine,u32 preempt_id)95 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
96 {
97 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
98 	struct vpu_jsm_msg resp;
99 	int ret;
100 
101 	if (engine > VPU_ENGINE_COPY)
102 		return -EINVAL;
103 
104 	req.payload.engine_preempt.engine_idx = engine;
105 	req.payload.engine_preempt.preempt_id = preempt_id;
106 
107 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
108 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
109 	if (ret)
110 		ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
111 
112 	return ret;
113 }
114 
ivpu_jsm_dyndbg_control(struct ivpu_device * vdev,char * command,size_t size)115 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
116 {
117 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
118 	struct vpu_jsm_msg resp;
119 	int ret;
120 
121 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
122 
123 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
124 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
125 	if (ret)
126 		ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
127 
128 	return ret;
129 }
130 
ivpu_jsm_trace_get_capability(struct ivpu_device * vdev,u32 * trace_destination_mask,u64 * trace_hw_component_mask)131 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
132 				  u64 *trace_hw_component_mask)
133 {
134 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
135 	struct vpu_jsm_msg resp;
136 	int ret;
137 
138 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
139 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
140 	if (ret) {
141 		ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
142 		return ret;
143 	}
144 
145 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
146 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
147 
148 	return ret;
149 }
150 
ivpu_jsm_trace_set_config(struct ivpu_device * vdev,u32 trace_level,u32 trace_destination_mask,u64 trace_hw_component_mask)151 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
152 			      u64 trace_hw_component_mask)
153 {
154 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
155 	struct vpu_jsm_msg resp;
156 	int ret;
157 
158 	req.payload.trace_config.trace_level = trace_level;
159 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
160 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
161 
162 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
163 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
164 	if (ret)
165 		ivpu_warn(vdev, "Failed to set config: %d\n", ret);
166 
167 	return ret;
168 }
169 
ivpu_jsm_context_release(struct ivpu_device * vdev,u32 host_ssid)170 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
171 {
172 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
173 	struct vpu_jsm_msg resp;
174 
175 	req.payload.ssid_release.host_ssid = host_ssid;
176 
177 	return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
178 				     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
179 }
180