xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c (revision 68198dca)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "vega10/soc15ip.h"
26 #include "vega10/NBIO/nbio_6_1_offset.h"
27 #include "vega10/NBIO/nbio_6_1_sh_mask.h"
28 #include "vega10/GC/gc_9_0_offset.h"
29 #include "vega10/GC/gc_9_0_sh_mask.h"
30 #include "soc15.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
33 #include "mxgpu_ai.h"
34 
35 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36 {
37 	u32 reg;
38 	int timeout = AI_MAILBOX_TIMEDOUT;
39 	u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
40 
41 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
42 					     mmBIF_BX_PF0_MAILBOX_CONTROL));
43 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
44 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
45 				       mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
46 
47 	/*Wait for RCV_MSG_VALID to be 0*/
48 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
49 					     mmBIF_BX_PF0_MAILBOX_CONTROL));
50 	while (reg & mask) {
51 		if (timeout <= 0) {
52 			pr_err("RCV_MSG_VALID is not cleared\n");
53 			break;
54 		}
55 		mdelay(1);
56 		timeout -=1;
57 
58 		reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59 						     mmBIF_BX_PF0_MAILBOX_CONTROL));
60 	}
61 }
62 
63 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
64 {
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
68 					     mmBIF_BX_PF0_MAILBOX_CONTROL));
69 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
70 			    TRN_MSG_VALID, val ? 1 : 0);
71 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
72 		      reg);
73 }
74 
75 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
76 				   enum idh_event event)
77 {
78 	u32 reg;
79 	u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
80 
81 	if (event != IDH_FLR_NOTIFICATION_CMPL) {
82 		reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
83 						     mmBIF_BX_PF0_MAILBOX_CONTROL));
84 		if (!(reg & mask))
85 			return -ENOENT;
86 	}
87 
88 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
89 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
90 	if (reg != event)
91 		return -ENOENT;
92 
93 	xgpu_ai_mailbox_send_ack(adev);
94 
95 	return 0;
96 }
97 
98 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
99 {
100 	int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
101 	u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
102 	u32 reg;
103 
104 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
105 					     mmBIF_BX_PF0_MAILBOX_CONTROL));
106 	while (!(reg & mask)) {
107 		if (timeout <= 0) {
108 			pr_err("Doesn't get ack from pf.\n");
109 			r = -ETIME;
110 			break;
111 		}
112 		mdelay(5);
113 		timeout -= 5;
114 
115 		reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
116 						     mmBIF_BX_PF0_MAILBOX_CONTROL));
117 	}
118 
119 	return r;
120 }
121 
122 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
123 {
124 	int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
125 
126 	r = xgpu_ai_mailbox_rcv_msg(adev, event);
127 	while (r) {
128 		if (timeout <= 0) {
129 			pr_err("Doesn't get msg:%d from pf.\n", event);
130 			r = -ETIME;
131 			break;
132 		}
133 		mdelay(5);
134 		timeout -= 5;
135 
136 		r = xgpu_ai_mailbox_rcv_msg(adev, event);
137 	}
138 
139 	return r;
140 }
141 
142 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
143 	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
144 	u32 reg;
145 	int r;
146 
147 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
148 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
149 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
150 			    MSGBUF_DATA, req);
151 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
152 		      reg);
153 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
154 				data1);
155 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
156 				data2);
157 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
158 				data3);
159 
160 	xgpu_ai_mailbox_set_valid(adev, true);
161 
162 	/* start to poll ack */
163 	r = xgpu_ai_poll_ack(adev);
164 	if (r)
165 		pr_err("Doesn't get ack from pf, continue\n");
166 
167 	xgpu_ai_mailbox_set_valid(adev, false);
168 }
169 
170 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
171 					enum idh_request req)
172 {
173 	int r;
174 
175 	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
176 
177 	/* start to check msg if request is idh_req_gpu_init_access */
178 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
179 		req == IDH_REQ_GPU_FINI_ACCESS ||
180 		req == IDH_REQ_GPU_RESET_ACCESS) {
181 		r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
182 		if (r) {
183 			pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
184 			return r;
185 		}
186 		/* Retrieve checksum from mailbox2 */
187 		if (req == IDH_REQ_GPU_INIT_ACCESS) {
188 			adev->virt.fw_reserve.checksum_key =
189 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
190 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
191 		}
192 	}
193 
194 	return 0;
195 }
196 
197 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
198 {
199 	return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
200 }
201 
202 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
203 					   bool init)
204 {
205 	enum idh_request req;
206 
207 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
208 	return xgpu_ai_send_access_requests(adev, req);
209 }
210 
211 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
212 					   bool init)
213 {
214 	enum idh_request req;
215 	int r = 0;
216 
217 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
218 	r = xgpu_ai_send_access_requests(adev, req);
219 
220 	return r;
221 }
222 
223 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
224 					struct amdgpu_irq_src *source,
225 					struct amdgpu_iv_entry *entry)
226 {
227 	DRM_DEBUG("get ack intr and do nothing.\n");
228 	return 0;
229 }
230 
231 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
232 					struct amdgpu_irq_src *source,
233 					unsigned type,
234 					enum amdgpu_interrupt_state state)
235 {
236 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
237 
238 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
239 				(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
240 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
241 
242 	return 0;
243 }
244 
245 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
246 {
247 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
248 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
249 
250 	/* wait until RCV_MSG become 3 */
251 	if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
252 		pr_err("failed to recieve FLR_CMPL\n");
253 		return;
254 	}
255 
256 	/* Trigger recovery due to world switch failure */
257 	amdgpu_sriov_gpu_reset(adev, NULL);
258 }
259 
260 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
261 				       struct amdgpu_irq_src *src,
262 				       unsigned type,
263 				       enum amdgpu_interrupt_state state)
264 {
265 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
266 
267 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
268 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
269 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
270 
271 	return 0;
272 }
273 
274 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
275 				   struct amdgpu_irq_src *source,
276 				   struct amdgpu_iv_entry *entry)
277 {
278 	int r;
279 
280 	/* trigger gpu-reset by hypervisor only if TDR disbaled */
281 	if (amdgpu_lockup_timeout == 0) {
282 		/* see what event we get */
283 		r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
284 
285 		/* only handle FLR_NOTIFY now */
286 		if (!r)
287 			schedule_work(&adev->virt.flr_work);
288 	}
289 
290 	return 0;
291 }
292 
293 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
294 	.set = xgpu_ai_set_mailbox_ack_irq,
295 	.process = xgpu_ai_mailbox_ack_irq,
296 };
297 
298 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
299 	.set = xgpu_ai_set_mailbox_rcv_irq,
300 	.process = xgpu_ai_mailbox_rcv_irq,
301 };
302 
303 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
304 {
305 	adev->virt.ack_irq.num_types = 1;
306 	adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
307 	adev->virt.rcv_irq.num_types = 1;
308 	adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
309 }
310 
311 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
312 {
313 	int r;
314 
315 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
316 	if (r)
317 		return r;
318 
319 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
320 	if (r) {
321 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
322 		return r;
323 	}
324 
325 	return 0;
326 }
327 
328 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
329 {
330 	int r;
331 
332 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
333 	if (r)
334 		return r;
335 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
336 	if (r) {
337 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
338 		return r;
339 	}
340 
341 	INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
342 
343 	return 0;
344 }
345 
346 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
347 {
348 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
349 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
350 }
351 
352 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
353 	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
354 	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
355 	.reset_gpu = xgpu_ai_request_reset,
356 	.trans_msg = xgpu_ai_mailbox_trans_msg,
357 };
358