xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision e2515e2b)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	u32 reg;
65 
66 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
67 	if (reg != event)
68 		return -ENOENT;
69 
70 	xgpu_nv_mailbox_send_ack(adev);
71 
72 	return 0;
73 }
74 
xgpu_nv_peek_ack(struct amdgpu_device * adev)75 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
76 {
77 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78 }
79 
xgpu_nv_poll_ack(struct amdgpu_device * adev)80 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
81 {
82 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
83 	u8 reg;
84 
85 	do {
86 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 		if (reg & 2)
88 			return 0;
89 
90 		mdelay(5);
91 		timeout -= 5;
92 	} while (timeout > 1);
93 
94 	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
95 
96 	return -ETIME;
97 }
98 
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)99 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100 {
101 	int r;
102 	uint64_t timeout, now;
103 
104 	now = (uint64_t)ktime_to_ms(ktime_get());
105 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
106 
107 	do {
108 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
109 		if (!r)
110 			return 0;
111 
112 		msleep(10);
113 		now = (uint64_t)ktime_to_ms(ktime_get());
114 	} while (timeout > now);
115 
116 
117 	return -ETIME;
118 }
119 
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)120 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
121 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
122 {
123 	int r;
124 	uint8_t trn;
125 
126 	/* IMPORTANT:
127 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
128 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
129 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
130 	 * will return immediatly
131 	 */
132 	do {
133 		xgpu_nv_mailbox_set_valid(adev, false);
134 		trn = xgpu_nv_peek_ack(adev);
135 		if (trn) {
136 			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
137 			msleep(1);
138 		}
139 	} while (trn);
140 
141 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
145 	xgpu_nv_mailbox_set_valid(adev, true);
146 
147 	/* start to poll ack */
148 	r = xgpu_nv_poll_ack(adev);
149 	if (r)
150 		pr_err("Doesn't get ack from pf, continue\n");
151 
152 	xgpu_nv_mailbox_set_valid(adev, false);
153 }
154 
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)155 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
156 					enum idh_request req)
157 {
158 	int r, retry = 1;
159 	enum idh_event event = -1;
160 
161 send_request:
162 	xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
163 
164 	switch (req) {
165 	case IDH_REQ_GPU_INIT_ACCESS:
166 	case IDH_REQ_GPU_FINI_ACCESS:
167 	case IDH_REQ_GPU_RESET_ACCESS:
168 		event = IDH_READY_TO_ACCESS_GPU;
169 		break;
170 	case IDH_REQ_GPU_INIT_DATA:
171 		event = IDH_REQ_GPU_INIT_DATA_READY;
172 		break;
173 	default:
174 		break;
175 	}
176 
177 	if (event != -1) {
178 		r = xgpu_nv_poll_msg(adev, event);
179 		if (r) {
180 			if (retry++ < 2)
181 				goto send_request;
182 
183 			if (req != IDH_REQ_GPU_INIT_DATA) {
184 				pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
185 				return r;
186 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
187 				adev->virt.req_init_data_ver = 0;
188 		} else {
189 			if (req == IDH_REQ_GPU_INIT_DATA) {
190 				adev->virt.req_init_data_ver =
191 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
192 
193 				/* assume V1 in case host doesn't set version number */
194 				if (adev->virt.req_init_data_ver < 1)
195 					adev->virt.req_init_data_ver = 1;
196 			}
197 		}
198 
199 		/* Retrieve checksum from mailbox2 */
200 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
201 			adev->virt.fw_reserve.checksum_key =
202 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
203 		}
204 	}
205 
206 	return 0;
207 }
208 
xgpu_nv_request_reset(struct amdgpu_device * adev)209 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
210 {
211 	int ret, i = 0;
212 
213 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
214 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
215 		if (!ret)
216 			break;
217 		i++;
218 	}
219 
220 	return ret;
221 }
222 
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)223 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
224 					   bool init)
225 {
226 	enum idh_request req;
227 
228 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
229 	return xgpu_nv_send_access_requests(adev, req);
230 }
231 
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)232 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
233 					   bool init)
234 {
235 	enum idh_request req;
236 	int r = 0;
237 
238 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
239 	r = xgpu_nv_send_access_requests(adev, req);
240 
241 	return r;
242 }
243 
xgpu_nv_request_init_data(struct amdgpu_device * adev)244 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
245 {
246 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
247 }
248 
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)249 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
250 					struct amdgpu_irq_src *source,
251 					struct amdgpu_iv_entry *entry)
252 {
253 	DRM_DEBUG("get ack intr and do nothing.\n");
254 	return 0;
255 }
256 
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)257 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
258 					struct amdgpu_irq_src *source,
259 					unsigned type,
260 					enum amdgpu_interrupt_state state)
261 {
262 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
263 
264 	if (state == AMDGPU_IRQ_STATE_ENABLE)
265 		tmp |= 2;
266 	else
267 		tmp &= ~2;
268 
269 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
270 
271 	return 0;
272 }
273 
xgpu_nv_mailbox_flr_work(struct work_struct * work)274 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
275 {
276 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
277 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
278 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
279 
280 	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
281 	 * otherwise the mailbox msg will be ruined/reseted by
282 	 * the VF FLR.
283 	 */
284 	if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
285 		return;
286 
287 	down_write(&adev->reset_domain->sem);
288 
289 	amdgpu_virt_fini_data_exchange(adev);
290 
291 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
292 
293 	do {
294 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
295 			goto flr_done;
296 
297 		msleep(10);
298 		timeout -= 10;
299 	} while (timeout > 1);
300 
301 flr_done:
302 	atomic_set(&adev->reset_domain->in_gpu_reset, 0);
303 	up_write(&adev->reset_domain->sem);
304 
305 	/* Trigger recovery for world switch failure if no TDR */
306 	if (amdgpu_device_should_recover_gpu(adev)
307 		&& (!amdgpu_device_has_job_running(adev) ||
308 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
309 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
310 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
311 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
312 		struct amdgpu_reset_context reset_context;
313 		memset(&reset_context, 0, sizeof(reset_context));
314 
315 		reset_context.method = AMD_RESET_METHOD_NONE;
316 		reset_context.reset_req_dev = adev;
317 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
318 
319 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
320 	}
321 }
322 
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)323 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
324 				       struct amdgpu_irq_src *src,
325 				       unsigned type,
326 				       enum amdgpu_interrupt_state state)
327 {
328 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
329 
330 	if (state == AMDGPU_IRQ_STATE_ENABLE)
331 		tmp |= 1;
332 	else
333 		tmp &= ~1;
334 
335 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
336 
337 	return 0;
338 }
339 
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)340 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
341 				   struct amdgpu_irq_src *source,
342 				   struct amdgpu_iv_entry *entry)
343 {
344 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
345 
346 	switch (event) {
347 	case IDH_FLR_NOTIFICATION:
348 		if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
349 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
350 				   &adev->virt.flr_work),
351 				  "Failed to queue work! at %s",
352 				  __func__);
353 		break;
354 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
355 		 * it byfar since that polling thread will handle it,
356 		 * other msg like flr complete is not handled here.
357 		 */
358 	case IDH_CLR_MSG_BUF:
359 	case IDH_FLR_NOTIFICATION_CMPL:
360 	case IDH_READY_TO_ACCESS_GPU:
361 	default:
362 		break;
363 	}
364 
365 	return 0;
366 }
367 
368 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
369 	.set = xgpu_nv_set_mailbox_ack_irq,
370 	.process = xgpu_nv_mailbox_ack_irq,
371 };
372 
373 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
374 	.set = xgpu_nv_set_mailbox_rcv_irq,
375 	.process = xgpu_nv_mailbox_rcv_irq,
376 };
377 
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)378 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
379 {
380 	adev->virt.ack_irq.num_types = 1;
381 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
382 	adev->virt.rcv_irq.num_types = 1;
383 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
384 }
385 
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)386 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
387 {
388 	int r;
389 
390 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
391 	if (r)
392 		return r;
393 
394 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
395 	if (r) {
396 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
397 		return r;
398 	}
399 
400 	return 0;
401 }
402 
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)403 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
404 {
405 	int r;
406 
407 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
408 	if (r)
409 		return r;
410 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
411 	if (r) {
412 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
413 		return r;
414 	}
415 
416 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
417 
418 	return 0;
419 }
420 
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)421 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
422 {
423 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
424 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
425 }
426 
xgpu_nv_ras_poison_handler(struct amdgpu_device * adev)427 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
428 {
429 	xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
430 }
431 
432 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
433 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
434 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
435 	.req_init_data  = xgpu_nv_request_init_data,
436 	.reset_gpu = xgpu_nv_request_reset,
437 	.wait_reset = NULL,
438 	.trans_msg = xgpu_nv_mailbox_trans_msg,
439 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
440 };
441