xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
13636169cSJiange Zhao /*
23636169cSJiange Zhao  * Copyright 2014 Advanced Micro Devices, Inc.
33636169cSJiange Zhao  *
43636169cSJiange Zhao  * Permission is hereby granted, free of charge, to any person obtaining a
53636169cSJiange Zhao  * copy of this software and associated documentation files (the "Software"),
63636169cSJiange Zhao  * to deal in the Software without restriction, including without limitation
73636169cSJiange Zhao  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
83636169cSJiange Zhao  * and/or sell copies of the Software, and to permit persons to whom the
93636169cSJiange Zhao  * Software is furnished to do so, subject to the following conditions:
103636169cSJiange Zhao  *
113636169cSJiange Zhao  * The above copyright notice and this permission notice shall be included in
123636169cSJiange Zhao  * all copies or substantial portions of the Software.
133636169cSJiange Zhao  *
143636169cSJiange Zhao  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
153636169cSJiange Zhao  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
163636169cSJiange Zhao  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
173636169cSJiange Zhao  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
183636169cSJiange Zhao  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
193636169cSJiange Zhao  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
203636169cSJiange Zhao  * OTHER DEALINGS IN THE SOFTWARE.
213636169cSJiange Zhao  *
223636169cSJiange Zhao  */
233636169cSJiange Zhao 
243636169cSJiange Zhao #include "amdgpu.h"
253636169cSJiange Zhao #include "nbio/nbio_2_3_offset.h"
263636169cSJiange Zhao #include "nbio/nbio_2_3_sh_mask.h"
273636169cSJiange Zhao #include "gc/gc_10_1_0_offset.h"
283636169cSJiange Zhao #include "gc/gc_10_1_0_sh_mask.h"
293636169cSJiange Zhao #include "soc15.h"
303636169cSJiange Zhao #include "navi10_ih.h"
313636169cSJiange Zhao #include "soc15_common.h"
323636169cSJiange Zhao #include "mxgpu_nv.h"
333636169cSJiange Zhao 
34cfbb6b00SAndrey Grodzovsky #include "amdgpu_reset.h"
35cfbb6b00SAndrey Grodzovsky 
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)363636169cSJiange Zhao static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
373636169cSJiange Zhao {
383636169cSJiange Zhao 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
393636169cSJiange Zhao }
403636169cSJiange Zhao 
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)413636169cSJiange Zhao static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
423636169cSJiange Zhao {
433636169cSJiange Zhao 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
443636169cSJiange Zhao }
453636169cSJiange Zhao 
463636169cSJiange Zhao /*
473636169cSJiange Zhao  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
483636169cSJiange Zhao  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
493636169cSJiange Zhao  * by host.
503636169cSJiange Zhao  *
513636169cSJiange Zhao  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
523636169cSJiange Zhao  * correct value since it doesn't return the RCV_DW0 under the case that
533636169cSJiange Zhao  * RCV_MSG_VALID is set by host.
543636169cSJiange Zhao  */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)553636169cSJiange Zhao static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
563636169cSJiange Zhao {
57ff1f03a7SMonk Liu 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
583636169cSJiange Zhao }
593636169cSJiange Zhao 
603636169cSJiange Zhao 
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)613636169cSJiange Zhao static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
623636169cSJiange Zhao 				   enum idh_event event)
633636169cSJiange Zhao {
643636169cSJiange Zhao 	u32 reg;
653636169cSJiange Zhao 
66ff1f03a7SMonk Liu 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
673636169cSJiange Zhao 	if (reg != event)
683636169cSJiange Zhao 		return -ENOENT;
693636169cSJiange Zhao 
703636169cSJiange Zhao 	xgpu_nv_mailbox_send_ack(adev);
713636169cSJiange Zhao 
723636169cSJiange Zhao 	return 0;
733636169cSJiange Zhao }
743636169cSJiange Zhao 
xgpu_nv_peek_ack(struct amdgpu_device * adev)753636169cSJiange Zhao static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
763636169cSJiange Zhao {
773636169cSJiange Zhao 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
783636169cSJiange Zhao }
793636169cSJiange Zhao 
xgpu_nv_poll_ack(struct amdgpu_device * adev)803636169cSJiange Zhao static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
813636169cSJiange Zhao {
823636169cSJiange Zhao 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
833636169cSJiange Zhao 	u8 reg;
843636169cSJiange Zhao 
853636169cSJiange Zhao 	do {
863636169cSJiange Zhao 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
873636169cSJiange Zhao 		if (reg & 2)
883636169cSJiange Zhao 			return 0;
893636169cSJiange Zhao 
903636169cSJiange Zhao 		mdelay(5);
913636169cSJiange Zhao 		timeout -= 5;
923636169cSJiange Zhao 	} while (timeout > 1);
933636169cSJiange Zhao 
943636169cSJiange Zhao 	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
953636169cSJiange Zhao 
963636169cSJiange Zhao 	return -ETIME;
973636169cSJiange Zhao }
983636169cSJiange Zhao 
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)993636169cSJiange Zhao static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
1003636169cSJiange Zhao {
101124e8b19SVictor Zhao 	int r;
102124e8b19SVictor Zhao 	uint64_t timeout, now;
103124e8b19SVictor Zhao 
104124e8b19SVictor Zhao 	now = (uint64_t)ktime_to_ms(ktime_get());
105124e8b19SVictor Zhao 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
1063636169cSJiange Zhao 
1073636169cSJiange Zhao 	do {
1083636169cSJiange Zhao 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
1093636169cSJiange Zhao 		if (!r)
1103636169cSJiange Zhao 			return 0;
1113636169cSJiange Zhao 
1123636169cSJiange Zhao 		msleep(10);
113124e8b19SVictor Zhao 		now = (uint64_t)ktime_to_ms(ktime_get());
114124e8b19SVictor Zhao 	} while (timeout > now);
1153636169cSJiange Zhao 
1163636169cSJiange Zhao 
1173636169cSJiange Zhao 	return -ETIME;
1183636169cSJiange Zhao }
1193636169cSJiange Zhao 
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)1203636169cSJiange Zhao static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
1213636169cSJiange Zhao 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
1223636169cSJiange Zhao {
1233636169cSJiange Zhao 	int r;
1243636169cSJiange Zhao 	uint8_t trn;
1253636169cSJiange Zhao 
1263636169cSJiange Zhao 	/* IMPORTANT:
1273636169cSJiange Zhao 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
1283636169cSJiange Zhao 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
1293636169cSJiange Zhao 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
1303636169cSJiange Zhao 	 * will return immediatly
1313636169cSJiange Zhao 	 */
1323636169cSJiange Zhao 	do {
1333636169cSJiange Zhao 		xgpu_nv_mailbox_set_valid(adev, false);
1343636169cSJiange Zhao 		trn = xgpu_nv_peek_ack(adev);
1353636169cSJiange Zhao 		if (trn) {
1363636169cSJiange Zhao 			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
1373636169cSJiange Zhao 			msleep(1);
1383636169cSJiange Zhao 		}
1393636169cSJiange Zhao 	} while (trn);
1403636169cSJiange Zhao 
141ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
1453636169cSJiange Zhao 	xgpu_nv_mailbox_set_valid(adev, true);
1463636169cSJiange Zhao 
1473636169cSJiange Zhao 	/* start to poll ack */
1483636169cSJiange Zhao 	r = xgpu_nv_poll_ack(adev);
1493636169cSJiange Zhao 	if (r)
1503636169cSJiange Zhao 		pr_err("Doesn't get ack from pf, continue\n");
1513636169cSJiange Zhao 
1523636169cSJiange Zhao 	xgpu_nv_mailbox_set_valid(adev, false);
1533636169cSJiange Zhao }
1543636169cSJiange Zhao 
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)1553636169cSJiange Zhao static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
1563636169cSJiange Zhao 					enum idh_request req)
1573636169cSJiange Zhao {
158124e8b19SVictor Zhao 	int r, retry = 1;
159aa53bc2eSMonk Liu 	enum idh_event event = -1;
1603636169cSJiange Zhao 
161124e8b19SVictor Zhao send_request:
1623636169cSJiange Zhao 	xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
1633636169cSJiange Zhao 
164aa53bc2eSMonk Liu 	switch (req) {
165aa53bc2eSMonk Liu 	case IDH_REQ_GPU_INIT_ACCESS:
166aa53bc2eSMonk Liu 	case IDH_REQ_GPU_FINI_ACCESS:
167aa53bc2eSMonk Liu 	case IDH_REQ_GPU_RESET_ACCESS:
168aa53bc2eSMonk Liu 		event = IDH_READY_TO_ACCESS_GPU;
169aa53bc2eSMonk Liu 		break;
170aa53bc2eSMonk Liu 	case IDH_REQ_GPU_INIT_DATA:
171aa53bc2eSMonk Liu 		event = IDH_REQ_GPU_INIT_DATA_READY;
172aa53bc2eSMonk Liu 		break;
173aa53bc2eSMonk Liu 	default:
174aa53bc2eSMonk Liu 		break;
175aa53bc2eSMonk Liu 	}
176aa53bc2eSMonk Liu 
177aa53bc2eSMonk Liu 	if (event != -1) {
178aa53bc2eSMonk Liu 		r = xgpu_nv_poll_msg(adev, event);
1793636169cSJiange Zhao 		if (r) {
180124e8b19SVictor Zhao 			if (retry++ < 2)
181124e8b19SVictor Zhao 				goto send_request;
182124e8b19SVictor Zhao 
183aa53bc2eSMonk Liu 			if (req != IDH_REQ_GPU_INIT_DATA) {
184aa53bc2eSMonk Liu 				pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
1853636169cSJiange Zhao 				return r;
186*e2515e2bSRan Sun 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
187aa53bc2eSMonk Liu 				adev->virt.req_init_data_ver = 0;
188aa53bc2eSMonk Liu 		} else {
189*e2515e2bSRan Sun 			if (req == IDH_REQ_GPU_INIT_DATA) {
190aa53bc2eSMonk Liu 				adev->virt.req_init_data_ver =
191ff1f03a7SMonk Liu 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
192aa53bc2eSMonk Liu 
193aa53bc2eSMonk Liu 				/* assume V1 in case host doesn't set version number */
194aa53bc2eSMonk Liu 				if (adev->virt.req_init_data_ver < 1)
195aa53bc2eSMonk Liu 					adev->virt.req_init_data_ver = 1;
196aa53bc2eSMonk Liu 			}
197aa53bc2eSMonk Liu 		}
198aa53bc2eSMonk Liu 
1993636169cSJiange Zhao 		/* Retrieve checksum from mailbox2 */
2003636169cSJiange Zhao 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
2013636169cSJiange Zhao 			adev->virt.fw_reserve.checksum_key =
202ff1f03a7SMonk Liu 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
2033636169cSJiange Zhao 		}
2043636169cSJiange Zhao 	}
2053636169cSJiange Zhao 
2063636169cSJiange Zhao 	return 0;
2073636169cSJiange Zhao }
2083636169cSJiange Zhao 
xgpu_nv_request_reset(struct amdgpu_device * adev)2093636169cSJiange Zhao static int xgpu_nv_request_reset(struct amdgpu_device *adev)
2103636169cSJiange Zhao {
2113aa883acSJiange Zhao 	int ret, i = 0;
2123aa883acSJiange Zhao 
2133aa883acSJiange Zhao 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
2143aa883acSJiange Zhao 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
2153aa883acSJiange Zhao 		if (!ret)
2163aa883acSJiange Zhao 			break;
2173aa883acSJiange Zhao 		i++;
2183aa883acSJiange Zhao 	}
2193aa883acSJiange Zhao 
2203aa883acSJiange Zhao 	return ret;
2213636169cSJiange Zhao }
2223636169cSJiange Zhao 
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)2233636169cSJiange Zhao static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
2243636169cSJiange Zhao 					   bool init)
2253636169cSJiange Zhao {
2263636169cSJiange Zhao 	enum idh_request req;
2273636169cSJiange Zhao 
2283636169cSJiange Zhao 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
2293636169cSJiange Zhao 	return xgpu_nv_send_access_requests(adev, req);
2303636169cSJiange Zhao }
2313636169cSJiange Zhao 
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)2323636169cSJiange Zhao static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
2333636169cSJiange Zhao 					   bool init)
2343636169cSJiange Zhao {
2353636169cSJiange Zhao 	enum idh_request req;
2363636169cSJiange Zhao 	int r = 0;
2373636169cSJiange Zhao 
2383636169cSJiange Zhao 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
2393636169cSJiange Zhao 	r = xgpu_nv_send_access_requests(adev, req);
2403636169cSJiange Zhao 
2413636169cSJiange Zhao 	return r;
2423636169cSJiange Zhao }
2433636169cSJiange Zhao 
xgpu_nv_request_init_data(struct amdgpu_device * adev)244aa53bc2eSMonk Liu static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
245aa53bc2eSMonk Liu {
246aa53bc2eSMonk Liu 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
247aa53bc2eSMonk Liu }
248aa53bc2eSMonk Liu 
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2493636169cSJiange Zhao static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
2503636169cSJiange Zhao 					struct amdgpu_irq_src *source,
2513636169cSJiange Zhao 					struct amdgpu_iv_entry *entry)
2523636169cSJiange Zhao {
2533636169cSJiange Zhao 	DRM_DEBUG("get ack intr and do nothing.\n");
2543636169cSJiange Zhao 	return 0;
2553636169cSJiange Zhao }
2563636169cSJiange Zhao 
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)2573636169cSJiange Zhao static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
2583636169cSJiange Zhao 					struct amdgpu_irq_src *source,
2593636169cSJiange Zhao 					unsigned type,
2603636169cSJiange Zhao 					enum amdgpu_interrupt_state state)
2613636169cSJiange Zhao {
262ff1f03a7SMonk Liu 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
2633636169cSJiange Zhao 
264ff1f03a7SMonk Liu 	if (state == AMDGPU_IRQ_STATE_ENABLE)
265ff1f03a7SMonk Liu 		tmp |= 2;
266ff1f03a7SMonk Liu 	else
267ff1f03a7SMonk Liu 		tmp &= ~2;
268ff1f03a7SMonk Liu 
269ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
2703636169cSJiange Zhao 
2713636169cSJiange Zhao 	return 0;
2723636169cSJiange Zhao }
2733636169cSJiange Zhao 
xgpu_nv_mailbox_flr_work(struct work_struct * work)2743636169cSJiange Zhao static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
2753636169cSJiange Zhao {
2763636169cSJiange Zhao 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
2773636169cSJiange Zhao 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
2783636169cSJiange Zhao 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
2793636169cSJiange Zhao 
2803636169cSJiange Zhao 	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
2813636169cSJiange Zhao 	 * otherwise the mailbox msg will be ruined/reseted by
2823636169cSJiange Zhao 	 * the VF FLR.
2833636169cSJiange Zhao 	 */
28489a7a870SAndrey Grodzovsky 	if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
2856049db43SDennis Li 		return;
2866049db43SDennis Li 
287d0fb18b5SAndrey Grodzovsky 	down_write(&adev->reset_domain->sem);
288fa4a427dSVictor Skvortsov 
2893c2a01cbSJack Zhang 	amdgpu_virt_fini_data_exchange(adev);
290f1403342SChristian König 
2913e183e2fSJiange Zhao 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
2923e183e2fSJiange Zhao 
2933636169cSJiange Zhao 	do {
2943636169cSJiange Zhao 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
2953636169cSJiange Zhao 			goto flr_done;
2963636169cSJiange Zhao 
2973636169cSJiange Zhao 		msleep(10);
2983636169cSJiange Zhao 		timeout -= 10;
2993636169cSJiange Zhao 	} while (timeout > 1);
3003636169cSJiange Zhao 
3013636169cSJiange Zhao flr_done:
30289a7a870SAndrey Grodzovsky 	atomic_set(&adev->reset_domain->in_gpu_reset, 0);
303d0fb18b5SAndrey Grodzovsky 	up_write(&adev->reset_domain->sem);
3043636169cSJiange Zhao 
3053636169cSJiange Zhao 	/* Trigger recovery for world switch failure if no TDR */
3061512d064SMonk Liu 	if (amdgpu_device_should_recover_gpu(adev)
3072a9787dcSLiu ChengZhe 		&& (!amdgpu_device_has_job_running(adev) ||
3089a1cddd6Sjqdeng 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
3091512d064SMonk Liu 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
3101512d064SMonk Liu 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
311f1549c09SLikun Gao 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
312f1549c09SLikun Gao 		struct amdgpu_reset_context reset_context;
313f1549c09SLikun Gao 		memset(&reset_context, 0, sizeof(reset_context));
314f1549c09SLikun Gao 
315f1549c09SLikun Gao 		reset_context.method = AMD_RESET_METHOD_NONE;
316f1549c09SLikun Gao 		reset_context.reset_req_dev = adev;
317f1549c09SLikun Gao 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
318f1549c09SLikun Gao 
319f1549c09SLikun Gao 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
320f1549c09SLikun Gao 	}
3213636169cSJiange Zhao }
3223636169cSJiange Zhao 
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3233636169cSJiange Zhao static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
3243636169cSJiange Zhao 				       struct amdgpu_irq_src *src,
3253636169cSJiange Zhao 				       unsigned type,
3263636169cSJiange Zhao 				       enum amdgpu_interrupt_state state)
3273636169cSJiange Zhao {
328ff1f03a7SMonk Liu 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
3293636169cSJiange Zhao 
330ff1f03a7SMonk Liu 	if (state == AMDGPU_IRQ_STATE_ENABLE)
331ff1f03a7SMonk Liu 		tmp |= 1;
332ff1f03a7SMonk Liu 	else
333ff1f03a7SMonk Liu 		tmp &= ~1;
334ff1f03a7SMonk Liu 
335ff1f03a7SMonk Liu 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
3363636169cSJiange Zhao 
3373636169cSJiange Zhao 	return 0;
3383636169cSJiange Zhao }
3393636169cSJiange Zhao 
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3403636169cSJiange Zhao static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
3413636169cSJiange Zhao 				   struct amdgpu_irq_src *source,
3423636169cSJiange Zhao 				   struct amdgpu_iv_entry *entry)
3433636169cSJiange Zhao {
3443636169cSJiange Zhao 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
3453636169cSJiange Zhao 
3463636169cSJiange Zhao 	switch (event) {
3473636169cSJiange Zhao 	case IDH_FLR_NOTIFICATION:
34802599bc7SAndrey Grodzovsky 		if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
349cfbb6b00SAndrey Grodzovsky 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
35002599bc7SAndrey Grodzovsky 				   &adev->virt.flr_work),
35102599bc7SAndrey Grodzovsky 				  "Failed to queue work! at %s",
35202599bc7SAndrey Grodzovsky 				  __func__);
3533636169cSJiange Zhao 		break;
3543636169cSJiange Zhao 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
3553636169cSJiange Zhao 		 * it byfar since that polling thread will handle it,
3563636169cSJiange Zhao 		 * other msg like flr complete is not handled here.
3573636169cSJiange Zhao 		 */
3583636169cSJiange Zhao 	case IDH_CLR_MSG_BUF:
3593636169cSJiange Zhao 	case IDH_FLR_NOTIFICATION_CMPL:
3603636169cSJiange Zhao 	case IDH_READY_TO_ACCESS_GPU:
3613636169cSJiange Zhao 	default:
3623636169cSJiange Zhao 		break;
3633636169cSJiange Zhao 	}
3643636169cSJiange Zhao 
3653636169cSJiange Zhao 	return 0;
3663636169cSJiange Zhao }
3673636169cSJiange Zhao 
3683636169cSJiange Zhao static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
3693636169cSJiange Zhao 	.set = xgpu_nv_set_mailbox_ack_irq,
3703636169cSJiange Zhao 	.process = xgpu_nv_mailbox_ack_irq,
3713636169cSJiange Zhao };
3723636169cSJiange Zhao 
3733636169cSJiange Zhao static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
3743636169cSJiange Zhao 	.set = xgpu_nv_set_mailbox_rcv_irq,
3753636169cSJiange Zhao 	.process = xgpu_nv_mailbox_rcv_irq,
3763636169cSJiange Zhao };
3773636169cSJiange Zhao 
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)3783636169cSJiange Zhao void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
3793636169cSJiange Zhao {
3803636169cSJiange Zhao 	adev->virt.ack_irq.num_types = 1;
3813636169cSJiange Zhao 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
3823636169cSJiange Zhao 	adev->virt.rcv_irq.num_types = 1;
3833636169cSJiange Zhao 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
3843636169cSJiange Zhao }
3853636169cSJiange Zhao 
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)3863636169cSJiange Zhao int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
3873636169cSJiange Zhao {
3883636169cSJiange Zhao 	int r;
3893636169cSJiange Zhao 
3903636169cSJiange Zhao 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
3913636169cSJiange Zhao 	if (r)
3923636169cSJiange Zhao 		return r;
3933636169cSJiange Zhao 
3943636169cSJiange Zhao 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
3953636169cSJiange Zhao 	if (r) {
3963636169cSJiange Zhao 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
3973636169cSJiange Zhao 		return r;
3983636169cSJiange Zhao 	}
3993636169cSJiange Zhao 
4003636169cSJiange Zhao 	return 0;
4013636169cSJiange Zhao }
4023636169cSJiange Zhao 
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)4033636169cSJiange Zhao int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
4043636169cSJiange Zhao {
4053636169cSJiange Zhao 	int r;
4063636169cSJiange Zhao 
4073636169cSJiange Zhao 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
4083636169cSJiange Zhao 	if (r)
4093636169cSJiange Zhao 		return r;
4103636169cSJiange Zhao 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
4113636169cSJiange Zhao 	if (r) {
4123636169cSJiange Zhao 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
4133636169cSJiange Zhao 		return r;
4143636169cSJiange Zhao 	}
4153636169cSJiange Zhao 
4163636169cSJiange Zhao 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
4173636169cSJiange Zhao 
4183636169cSJiange Zhao 	return 0;
4193636169cSJiange Zhao }
4203636169cSJiange Zhao 
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)4213636169cSJiange Zhao void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
4223636169cSJiange Zhao {
4233636169cSJiange Zhao 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
4243636169cSJiange Zhao 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
4253636169cSJiange Zhao }
4263636169cSJiange Zhao 
xgpu_nv_ras_poison_handler(struct amdgpu_device * adev)427ae844dd7STao Zhou static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
428ae844dd7STao Zhou {
429ae844dd7STao Zhou 	xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
430ae844dd7STao Zhou }
431ae844dd7STao Zhou 
4323636169cSJiange Zhao const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
4333636169cSJiange Zhao 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
4343636169cSJiange Zhao 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
435aa53bc2eSMonk Liu 	.req_init_data  = xgpu_nv_request_init_data,
4363636169cSJiange Zhao 	.reset_gpu = xgpu_nv_request_reset,
4373636169cSJiange Zhao 	.wait_reset = NULL,
4383636169cSJiange Zhao 	.trans_msg = xgpu_nv_mailbox_trans_msg,
439ae844dd7STao Zhou 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
4403636169cSJiange Zhao };
441