1d5a114a6SFelix Kuehling /*
2d5a114a6SFelix Kuehling  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3d5a114a6SFelix Kuehling  *
4d5a114a6SFelix Kuehling  * Permission is hereby granted, free of charge, to any person obtaining a
5d5a114a6SFelix Kuehling  * copy of this software and associated documentation files (the "Software"),
6d5a114a6SFelix Kuehling  * to deal in the Software without restriction, including without limitation
7d5a114a6SFelix Kuehling  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8d5a114a6SFelix Kuehling  * and/or sell copies of the Software, and to permit persons to whom the
9d5a114a6SFelix Kuehling  * Software is furnished to do so, subject to the following conditions:
10d5a114a6SFelix Kuehling  *
11d5a114a6SFelix Kuehling  * The above copyright notice and this permission notice shall be included in
12d5a114a6SFelix Kuehling  * all copies or substantial portions of the Software.
13d5a114a6SFelix Kuehling  *
14d5a114a6SFelix Kuehling  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15d5a114a6SFelix Kuehling  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16d5a114a6SFelix Kuehling  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17d5a114a6SFelix Kuehling  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18d5a114a6SFelix Kuehling  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19d5a114a6SFelix Kuehling  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20d5a114a6SFelix Kuehling  * OTHER DEALINGS IN THE SOFTWARE.
21d5a114a6SFelix Kuehling  */
22d5a114a6SFelix Kuehling #include "amdgpu.h"
23d5a114a6SFelix Kuehling #include "amdgpu_amdkfd.h"
24d5a114a6SFelix Kuehling #include "gc/gc_9_0_offset.h"
25d5a114a6SFelix Kuehling #include "gc/gc_9_0_sh_mask.h"
26d5a114a6SFelix Kuehling #include "vega10_enum.h"
27d5a114a6SFelix Kuehling #include "sdma0/sdma0_4_0_offset.h"
28d5a114a6SFelix Kuehling #include "sdma0/sdma0_4_0_sh_mask.h"
29d5a114a6SFelix Kuehling #include "sdma1/sdma1_4_0_offset.h"
30d5a114a6SFelix Kuehling #include "sdma1/sdma1_4_0_sh_mask.h"
31d5a114a6SFelix Kuehling #include "athub/athub_1_0_offset.h"
32d5a114a6SFelix Kuehling #include "athub/athub_1_0_sh_mask.h"
33d5a114a6SFelix Kuehling #include "oss/osssys_4_0_offset.h"
34d5a114a6SFelix Kuehling #include "oss/osssys_4_0_sh_mask.h"
35d5a114a6SFelix Kuehling #include "soc15_common.h"
36d5a114a6SFelix Kuehling #include "v9_structs.h"
37d5a114a6SFelix Kuehling #include "soc15.h"
38d5a114a6SFelix Kuehling #include "soc15d.h"
3943a4bc82SRamesh Errabolu #include "gfx_v9_0.h"
403ac2bc76SRamesh Errabolu #include "amdgpu_amdkfd_gfx_v9.h"
41101827e1SJonathan Kim #include <uapi/linux/kfd_ioctl.h>
42d5a114a6SFelix Kuehling 
43d5a114a6SFelix Kuehling enum hqd_dequeue_request_type {
44d5a114a6SFelix Kuehling 	NO_ACTION = 0,
45d5a114a6SFelix Kuehling 	DRAIN_PIPE,
46b53ef0dfSMukul Joshi 	RESET_WAVES,
47b53ef0dfSMukul Joshi 	SAVE_WAVES
48d5a114a6SFelix Kuehling };
49d5a114a6SFelix Kuehling 
kgd_gfx_v9_lock_srbm(struct amdgpu_device * adev,uint32_t mec,uint32_t pipe,uint32_t queue,uint32_t vmid,uint32_t inst)50e2069a7bSMukul Joshi static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
51e2069a7bSMukul Joshi 			uint32_t queue, uint32_t vmid, uint32_t inst)
52d5a114a6SFelix Kuehling {
53d5a114a6SFelix Kuehling 	mutex_lock(&adev->srbm_mutex);
5402ee3b02SMukul Joshi 	soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst));
55d5a114a6SFelix Kuehling }
56d5a114a6SFelix Kuehling 
kgd_gfx_v9_unlock_srbm(struct amdgpu_device * adev,uint32_t inst)57e2069a7bSMukul Joshi static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst)
58d5a114a6SFelix Kuehling {
5902ee3b02SMukul Joshi 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
60d5a114a6SFelix Kuehling 	mutex_unlock(&adev->srbm_mutex);
61d5a114a6SFelix Kuehling }
62d5a114a6SFelix Kuehling 
kgd_gfx_v9_acquire_queue(struct amdgpu_device * adev,uint32_t pipe_id,uint32_t queue_id,uint32_t inst)63f544afacSAmber Lin void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
64e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
65d5a114a6SFelix Kuehling {
66d5a114a6SFelix Kuehling 	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
67d5a114a6SFelix Kuehling 	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
68d5a114a6SFelix Kuehling 
69e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst);
70d5a114a6SFelix Kuehling }
71d5a114a6SFelix Kuehling 
kgd_gfx_v9_get_queue_mask(struct amdgpu_device * adev,uint32_t pipe_id,uint32_t queue_id)72f544afacSAmber Lin uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
73d5a114a6SFelix Kuehling 			       uint32_t pipe_id, uint32_t queue_id)
74d5a114a6SFelix Kuehling {
7535cd89d5SAaron Liu 	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
7635cd89d5SAaron Liu 			queue_id;
77d5a114a6SFelix Kuehling 
7835cd89d5SAaron Liu 	return 1ull << bit;
79d5a114a6SFelix Kuehling }
80d5a114a6SFelix Kuehling 
kgd_gfx_v9_release_queue(struct amdgpu_device * adev,uint32_t inst)81e2069a7bSMukul Joshi void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst)
82d5a114a6SFelix Kuehling {
83e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
84d5a114a6SFelix Kuehling }
85d5a114a6SFelix Kuehling 
kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device * adev,uint32_t vmid,uint32_t sh_mem_config,uint32_t sh_mem_ape1_base,uint32_t sh_mem_ape1_limit,uint32_t sh_mem_bases,uint32_t inst)863356c38dSGraham Sider void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
87d5a114a6SFelix Kuehling 					uint32_t sh_mem_config,
88d5a114a6SFelix Kuehling 					uint32_t sh_mem_ape1_base,
89d5a114a6SFelix Kuehling 					uint32_t sh_mem_ape1_limit,
90e2069a7bSMukul Joshi 					uint32_t sh_mem_bases, uint32_t inst)
91d5a114a6SFelix Kuehling {
92e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
93d5a114a6SFelix Kuehling 
9402ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config);
9502ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases);
96d5a114a6SFelix Kuehling 	/* APE1 no longer exists on GFX9 */
97d5a114a6SFelix Kuehling 
98e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
99d5a114a6SFelix Kuehling }
100d5a114a6SFelix Kuehling 
kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device * adev,u32 pasid,unsigned int vmid,uint32_t inst)1013356c38dSGraham Sider int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
102e2069a7bSMukul Joshi 					unsigned int vmid, uint32_t inst)
103d5a114a6SFelix Kuehling {
104d5a114a6SFelix Kuehling 	/*
105d5a114a6SFelix Kuehling 	 * We have to assume that there is no outstanding mapping.
106d5a114a6SFelix Kuehling 	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
107d5a114a6SFelix Kuehling 	 * a mapping is in progress or because a mapping finished
108d5a114a6SFelix Kuehling 	 * and the SW cleared it.
109d5a114a6SFelix Kuehling 	 * So the protocol is to always wait & clear.
110d5a114a6SFelix Kuehling 	 */
111d5a114a6SFelix Kuehling 	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
112d5a114a6SFelix Kuehling 			ATC_VMID0_PASID_MAPPING__VALID_MASK;
113d5a114a6SFelix Kuehling 
114d5a114a6SFelix Kuehling 	/*
115d5a114a6SFelix Kuehling 	 * need to do this twice, once for gfx and once for mmhub
116d5a114a6SFelix Kuehling 	 * for ATC add 16 to VMID for mmhub, for IH different registers.
117d5a114a6SFelix Kuehling 	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
118d5a114a6SFelix Kuehling 	 */
119d5a114a6SFelix Kuehling 
120d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
121d5a114a6SFelix Kuehling 	       pasid_mapping);
122d5a114a6SFelix Kuehling 
123d5a114a6SFelix Kuehling 	while (!(RREG32(SOC15_REG_OFFSET(
124d5a114a6SFelix Kuehling 				ATHUB, 0,
125d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
126d5a114a6SFelix Kuehling 		 (1U << vmid)))
127d5a114a6SFelix Kuehling 		cpu_relax();
128d5a114a6SFelix Kuehling 
129d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
130d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
131d5a114a6SFelix Kuehling 	       1U << vmid);
132d5a114a6SFelix Kuehling 
133d5a114a6SFelix Kuehling 	/* Mapping vmid to pasid also for IH block */
134d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
135d5a114a6SFelix Kuehling 	       pasid_mapping);
136d5a114a6SFelix Kuehling 
137d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
138d5a114a6SFelix Kuehling 	       pasid_mapping);
139d5a114a6SFelix Kuehling 
140d5a114a6SFelix Kuehling 	while (!(RREG32(SOC15_REG_OFFSET(
141d5a114a6SFelix Kuehling 				ATHUB, 0,
142d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
143d5a114a6SFelix Kuehling 		 (1U << (vmid + 16))))
144d5a114a6SFelix Kuehling 		cpu_relax();
145d5a114a6SFelix Kuehling 
146d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
147d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
148d5a114a6SFelix Kuehling 	       1U << (vmid + 16));
149d5a114a6SFelix Kuehling 
150d5a114a6SFelix Kuehling 	/* Mapping vmid to pasid also for IH block */
151d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
152d5a114a6SFelix Kuehling 	       pasid_mapping);
153d5a114a6SFelix Kuehling 	return 0;
154d5a114a6SFelix Kuehling }
155d5a114a6SFelix Kuehling 
156d5a114a6SFelix Kuehling /* TODO - RING0 form of field is obsolete, seems to date back to SI
157d5a114a6SFelix Kuehling  * but still works
158d5a114a6SFelix Kuehling  */
159d5a114a6SFelix Kuehling 
kgd_gfx_v9_init_interrupts(struct amdgpu_device * adev,uint32_t pipe_id,uint32_t inst)160e2069a7bSMukul Joshi int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
161e2069a7bSMukul Joshi 				uint32_t inst)
162d5a114a6SFelix Kuehling {
163d5a114a6SFelix Kuehling 	uint32_t mec;
164d5a114a6SFelix Kuehling 	uint32_t pipe;
165d5a114a6SFelix Kuehling 
166d5a114a6SFelix Kuehling 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
167d5a114a6SFelix Kuehling 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
168d5a114a6SFelix Kuehling 
169e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst);
170d5a114a6SFelix Kuehling 
17102ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL,
172d5a114a6SFelix Kuehling 		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
173d5a114a6SFelix Kuehling 		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
174d5a114a6SFelix Kuehling 
175e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
176d5a114a6SFelix Kuehling 
177d5a114a6SFelix Kuehling 	return 0;
178d5a114a6SFelix Kuehling }
179d5a114a6SFelix Kuehling 
get_sdma_rlc_reg_offset(struct amdgpu_device * adev,unsigned int engine_id,unsigned int queue_id)180b55a8b8bSYong Zhao static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
181d5a114a6SFelix Kuehling 				unsigned int engine_id,
182d5a114a6SFelix Kuehling 				unsigned int queue_id)
183d5a114a6SFelix Kuehling {
18434174b89SHuang Rui 	uint32_t sdma_engine_reg_base = 0;
18534174b89SHuang Rui 	uint32_t sdma_rlc_reg_offset;
18634174b89SHuang Rui 
18734174b89SHuang Rui 	switch (engine_id) {
18834174b89SHuang Rui 	default:
18934174b89SHuang Rui 		dev_warn(adev->dev,
19034174b89SHuang Rui 			 "Invalid sdma engine id (%d), using engine id 0\n",
19134174b89SHuang Rui 			 engine_id);
19234174b89SHuang Rui 		fallthrough;
19334174b89SHuang Rui 	case 0:
19434174b89SHuang Rui 		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
19534174b89SHuang Rui 				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
19634174b89SHuang Rui 		break;
19734174b89SHuang Rui 	case 1:
19834174b89SHuang Rui 		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
19934174b89SHuang Rui 				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
20034174b89SHuang Rui 		break;
20134174b89SHuang Rui 	}
20234174b89SHuang Rui 
20334174b89SHuang Rui 	sdma_rlc_reg_offset = sdma_engine_reg_base
204b55a8b8bSYong Zhao 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
205d5a114a6SFelix Kuehling 
206b55a8b8bSYong Zhao 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
20734174b89SHuang Rui 		 queue_id, sdma_rlc_reg_offset);
208d5a114a6SFelix Kuehling 
20934174b89SHuang Rui 	return sdma_rlc_reg_offset;
210d5a114a6SFelix Kuehling }
211d5a114a6SFelix Kuehling 
get_mqd(void * mqd)212d5a114a6SFelix Kuehling static inline struct v9_mqd *get_mqd(void *mqd)
213d5a114a6SFelix Kuehling {
214d5a114a6SFelix Kuehling 	return (struct v9_mqd *)mqd;
215d5a114a6SFelix Kuehling }
216d5a114a6SFelix Kuehling 
get_sdma_mqd(void * mqd)217d5a114a6SFelix Kuehling static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
218d5a114a6SFelix Kuehling {
219d5a114a6SFelix Kuehling 	return (struct v9_sdma_mqd *)mqd;
220d5a114a6SFelix Kuehling }
221d5a114a6SFelix Kuehling 
kgd_gfx_v9_hqd_load(struct amdgpu_device * adev,void * mqd,uint32_t pipe_id,uint32_t queue_id,uint32_t __user * wptr,uint32_t wptr_shift,uint32_t wptr_mask,struct mm_struct * mm,uint32_t inst)222420185fdSGraham Sider int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
223420185fdSGraham Sider 			uint32_t pipe_id, uint32_t queue_id,
224420185fdSGraham Sider 			uint32_t __user *wptr, uint32_t wptr_shift,
225e2069a7bSMukul Joshi 			uint32_t wptr_mask, struct mm_struct *mm,
226e2069a7bSMukul Joshi 			uint32_t inst)
227d5a114a6SFelix Kuehling {
228d5a114a6SFelix Kuehling 	struct v9_mqd *m;
229d5a114a6SFelix Kuehling 	uint32_t *mqd_hqd;
230d5a114a6SFelix Kuehling 	uint32_t reg, hqd_base, data;
231d5a114a6SFelix Kuehling 
232d5a114a6SFelix Kuehling 	m = get_mqd(mqd);
233d5a114a6SFelix Kuehling 
234e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
235d5a114a6SFelix Kuehling 
236d5a114a6SFelix Kuehling 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
237d5a114a6SFelix Kuehling 	mqd_hqd = &m->cp_mqd_base_addr_lo;
23802ee3b02SMukul Joshi 	hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
239d5a114a6SFelix Kuehling 
240d5a114a6SFelix Kuehling 	for (reg = hqd_base;
24102ee3b02SMukul Joshi 	     reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
2421bff7f6cSTrigger Huang 		WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
243d5a114a6SFelix Kuehling 
244d5a114a6SFelix Kuehling 
245d5a114a6SFelix Kuehling 	/* Activate doorbell logic before triggering WPTR poll. */
246d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
247d5a114a6SFelix Kuehling 			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
24802ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL),
249e2069a7bSMukul Joshi 					data);
250d5a114a6SFelix Kuehling 
251d5a114a6SFelix Kuehling 	if (wptr) {
252d5a114a6SFelix Kuehling 		/* Don't read wptr with get_user because the user
253d5a114a6SFelix Kuehling 		 * context may not be accessible (if this function
254d5a114a6SFelix Kuehling 		 * runs in a work queue). Instead trigger a one-shot
255d5a114a6SFelix Kuehling 		 * polling read from memory in the CP. This assumes
256d5a114a6SFelix Kuehling 		 * that wptr is GPU-accessible in the queue's VMID via
257d5a114a6SFelix Kuehling 		 * ATC or SVM. WPTR==RPTR before starting the poll so
258d5a114a6SFelix Kuehling 		 * the CP starts fetching new commands from the right
259d5a114a6SFelix Kuehling 		 * place.
260d5a114a6SFelix Kuehling 		 *
261d5a114a6SFelix Kuehling 		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
262d5a114a6SFelix Kuehling 		 * tricky. Assume that the queue didn't overflow. The
263d5a114a6SFelix Kuehling 		 * number of valid bits in the 32-bit RPTR depends on
264d5a114a6SFelix Kuehling 		 * the queue size. The remaining bits are taken from
265d5a114a6SFelix Kuehling 		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
266d5a114a6SFelix Kuehling 		 * queue size.
267d5a114a6SFelix Kuehling 		 */
268d5a114a6SFelix Kuehling 		uint32_t queue_size =
269d5a114a6SFelix Kuehling 			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
270d5a114a6SFelix Kuehling 					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
271d5a114a6SFelix Kuehling 		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
272d5a114a6SFelix Kuehling 
273d5a114a6SFelix Kuehling 		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
274d5a114a6SFelix Kuehling 			guessed_wptr += queue_size;
275d5a114a6SFelix Kuehling 		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
276d5a114a6SFelix Kuehling 		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
277d5a114a6SFelix Kuehling 
27802ee3b02SMukul Joshi 		WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO),
279d5a114a6SFelix Kuehling 		       lower_32_bits(guessed_wptr));
28002ee3b02SMukul Joshi 		WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI),
281d5a114a6SFelix Kuehling 		       upper_32_bits(guessed_wptr));
28202ee3b02SMukul Joshi 		WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR),
283ebe1d22bSArnd Bergmann 		       lower_32_bits((uintptr_t)wptr));
28402ee3b02SMukul Joshi 		WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
285ebe1d22bSArnd Bergmann 		       upper_32_bits((uintptr_t)wptr));
28602ee3b02SMukul Joshi 		WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
287f544afacSAmber Lin 		       (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
288d5a114a6SFelix Kuehling 	}
289d5a114a6SFelix Kuehling 
290d5a114a6SFelix Kuehling 	/* Start the EOP fetcher */
29102ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR),
292d5a114a6SFelix Kuehling 	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
293d5a114a6SFelix Kuehling 			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
294d5a114a6SFelix Kuehling 
295d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
29602ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data);
297d5a114a6SFelix Kuehling 
298e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
299d5a114a6SFelix Kuehling 
300d5a114a6SFelix Kuehling 	return 0;
301d5a114a6SFelix Kuehling }
302d5a114a6SFelix Kuehling 
kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device * adev,void * mqd,uint32_t pipe_id,uint32_t queue_id,uint32_t doorbell_off,uint32_t inst)303420185fdSGraham Sider int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
30435cd89d5SAaron Liu 			    uint32_t pipe_id, uint32_t queue_id,
305e2069a7bSMukul Joshi 			    uint32_t doorbell_off, uint32_t inst)
30635cd89d5SAaron Liu {
307e2069a7bSMukul Joshi 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring;
30835cd89d5SAaron Liu 	struct v9_mqd *m;
30935cd89d5SAaron Liu 	uint32_t mec, pipe;
31035cd89d5SAaron Liu 	int r;
31135cd89d5SAaron Liu 
31235cd89d5SAaron Liu 	m = get_mqd(mqd);
31335cd89d5SAaron Liu 
314e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
31535cd89d5SAaron Liu 
31635cd89d5SAaron Liu 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
31735cd89d5SAaron Liu 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
31835cd89d5SAaron Liu 
31935cd89d5SAaron Liu 	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
32035cd89d5SAaron Liu 		 mec, pipe, queue_id);
32135cd89d5SAaron Liu 
322e2069a7bSMukul Joshi 	spin_lock(&adev->gfx.kiq[inst].ring_lock);
32335cd89d5SAaron Liu 	r = amdgpu_ring_alloc(kiq_ring, 7);
32435cd89d5SAaron Liu 	if (r) {
32535cd89d5SAaron Liu 		pr_err("Failed to alloc KIQ (%d).\n", r);
32635cd89d5SAaron Liu 		goto out_unlock;
32735cd89d5SAaron Liu 	}
32835cd89d5SAaron Liu 
32935cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
33035cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring,
33135cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
33235cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
33335cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
33435cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_PIPE(pipe) |
33535cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
33635cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
33735cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
33835cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
33935cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
34035cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring,
34135cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
34235cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
34335cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
34435cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
34535cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
34635cd89d5SAaron Liu 	amdgpu_ring_commit(kiq_ring);
34735cd89d5SAaron Liu 
34835cd89d5SAaron Liu out_unlock:
349e2069a7bSMukul Joshi 	spin_unlock(&adev->gfx.kiq[inst].ring_lock);
350e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
35135cd89d5SAaron Liu 
35235cd89d5SAaron Liu 	return r;
35335cd89d5SAaron Liu }
35435cd89d5SAaron Liu 
kgd_gfx_v9_hqd_dump(struct amdgpu_device * adev,uint32_t pipe_id,uint32_t queue_id,uint32_t (** dump)[2],uint32_t * n_regs,uint32_t inst)355420185fdSGraham Sider int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
356d5a114a6SFelix Kuehling 			uint32_t pipe_id, uint32_t queue_id,
357e2069a7bSMukul Joshi 			uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
358d5a114a6SFelix Kuehling {
359d5a114a6SFelix Kuehling 	uint32_t i = 0, reg;
360d5a114a6SFelix Kuehling #define HQD_N_REGS 56
361d5a114a6SFelix Kuehling #define DUMP_REG(addr) do {				\
362d5a114a6SFelix Kuehling 		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
363d5a114a6SFelix Kuehling 			break;				\
364d5a114a6SFelix Kuehling 		(*dump)[i][0] = (addr) << 2;		\
365d5a114a6SFelix Kuehling 		(*dump)[i++][1] = RREG32(addr);		\
366d5a114a6SFelix Kuehling 	} while (0)
367d5a114a6SFelix Kuehling 
3686da2ec56SKees Cook 	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
369d5a114a6SFelix Kuehling 	if (*dump == NULL)
370d5a114a6SFelix Kuehling 		return -ENOMEM;
371d5a114a6SFelix Kuehling 
372e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
373d5a114a6SFelix Kuehling 
37402ee3b02SMukul Joshi 	for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
37502ee3b02SMukul Joshi 	     reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
376d5a114a6SFelix Kuehling 		DUMP_REG(reg);
377d5a114a6SFelix Kuehling 
378e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
379d5a114a6SFelix Kuehling 
380d5a114a6SFelix Kuehling 	WARN_ON_ONCE(i != HQD_N_REGS);
381d5a114a6SFelix Kuehling 	*n_regs = i;
382d5a114a6SFelix Kuehling 
383d5a114a6SFelix Kuehling 	return 0;
384d5a114a6SFelix Kuehling }
385d5a114a6SFelix Kuehling 
kgd_hqd_sdma_load(struct amdgpu_device * adev,void * mqd,uint32_t __user * wptr,struct mm_struct * mm)386420185fdSGraham Sider static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
387d5a114a6SFelix Kuehling 			     uint32_t __user *wptr, struct mm_struct *mm)
388d5a114a6SFelix Kuehling {
389d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
390b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
391d5a114a6SFelix Kuehling 	unsigned long end_jiffies;
392d5a114a6SFelix Kuehling 	uint32_t data;
393d5a114a6SFelix Kuehling 	uint64_t data64;
394d5a114a6SFelix Kuehling 	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
395d5a114a6SFelix Kuehling 
396d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
397b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
398d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
399d5a114a6SFelix Kuehling 
400b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
401d5a114a6SFelix Kuehling 		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
402d5a114a6SFelix Kuehling 
403d5a114a6SFelix Kuehling 	end_jiffies = msecs_to_jiffies(2000) + jiffies;
404d5a114a6SFelix Kuehling 	while (true) {
405b55a8b8bSYong Zhao 		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
406d5a114a6SFelix Kuehling 		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
407d5a114a6SFelix Kuehling 			break;
408812330ebSYong Zhao 		if (time_after(jiffies, end_jiffies)) {
409812330ebSYong Zhao 			pr_err("SDMA RLC not idle in %s\n", __func__);
410d5a114a6SFelix Kuehling 			return -ETIME;
411812330ebSYong Zhao 		}
412d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
413d5a114a6SFelix Kuehling 	}
414d5a114a6SFelix Kuehling 
415b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
416d5a114a6SFelix Kuehling 	       m->sdmax_rlcx_doorbell_offset);
417d5a114a6SFelix Kuehling 
418d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
419d5a114a6SFelix Kuehling 			     ENABLE, 1);
420b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
421b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
422b55a8b8bSYong Zhao 				m->sdmax_rlcx_rb_rptr);
423b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
424d5a114a6SFelix Kuehling 				m->sdmax_rlcx_rb_rptr_hi);
425d5a114a6SFelix Kuehling 
426b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
427d5a114a6SFelix Kuehling 	if (read_user_wptr(mm, wptr64, data64)) {
428b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
429d5a114a6SFelix Kuehling 		       lower_32_bits(data64));
430b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
431d5a114a6SFelix Kuehling 		       upper_32_bits(data64));
432d5a114a6SFelix Kuehling 	} else {
433b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
434d5a114a6SFelix Kuehling 		       m->sdmax_rlcx_rb_rptr);
435b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
436d5a114a6SFelix Kuehling 		       m->sdmax_rlcx_rb_rptr_hi);
437d5a114a6SFelix Kuehling 	}
438b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
439d5a114a6SFelix Kuehling 
440b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
441b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
442d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_base_hi);
443b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
444d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_rptr_addr_lo);
445b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
446d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_rptr_addr_hi);
447d5a114a6SFelix Kuehling 
448d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
449d5a114a6SFelix Kuehling 			     RB_ENABLE, 1);
450b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
451d5a114a6SFelix Kuehling 
452d5a114a6SFelix Kuehling 	return 0;
453d5a114a6SFelix Kuehling }
454d5a114a6SFelix Kuehling 
kgd_hqd_sdma_dump(struct amdgpu_device * adev,uint32_t engine_id,uint32_t queue_id,uint32_t (** dump)[2],uint32_t * n_regs)455420185fdSGraham Sider static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
456d5a114a6SFelix Kuehling 			     uint32_t engine_id, uint32_t queue_id,
457d5a114a6SFelix Kuehling 			     uint32_t (**dump)[2], uint32_t *n_regs)
458d5a114a6SFelix Kuehling {
459b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
460b55a8b8bSYong Zhao 			engine_id, queue_id);
461d5a114a6SFelix Kuehling 	uint32_t i = 0, reg;
462d5a114a6SFelix Kuehling #undef HQD_N_REGS
463d5a114a6SFelix Kuehling #define HQD_N_REGS (19+6+7+10)
464d5a114a6SFelix Kuehling 
4656da2ec56SKees Cook 	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
466d5a114a6SFelix Kuehling 	if (*dump == NULL)
467d5a114a6SFelix Kuehling 		return -ENOMEM;
468d5a114a6SFelix Kuehling 
469d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
470b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
471d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
472b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
473d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
474d5a114a6SFelix Kuehling 	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
475b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
476d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
477d5a114a6SFelix Kuehling 	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
478b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
479d5a114a6SFelix Kuehling 
480d5a114a6SFelix Kuehling 	WARN_ON_ONCE(i != HQD_N_REGS);
481d5a114a6SFelix Kuehling 	*n_regs = i;
482d5a114a6SFelix Kuehling 
483d5a114a6SFelix Kuehling 	return 0;
484d5a114a6SFelix Kuehling }
485d5a114a6SFelix Kuehling 
kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device * adev,uint64_t queue_address,uint32_t pipe_id,uint32_t queue_id,uint32_t inst)486420185fdSGraham Sider bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
487420185fdSGraham Sider 				uint64_t queue_address, uint32_t pipe_id,
488e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
489d5a114a6SFelix Kuehling {
490d5a114a6SFelix Kuehling 	uint32_t act;
491d5a114a6SFelix Kuehling 	bool retval = false;
492d5a114a6SFelix Kuehling 	uint32_t low, high;
493d5a114a6SFelix Kuehling 
494e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
49502ee3b02SMukul Joshi 	act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
496d5a114a6SFelix Kuehling 	if (act) {
497d5a114a6SFelix Kuehling 		low = lower_32_bits(queue_address >> 8);
498d5a114a6SFelix Kuehling 		high = upper_32_bits(queue_address >> 8);
499d5a114a6SFelix Kuehling 
50002ee3b02SMukul Joshi 		if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) &&
50102ee3b02SMukul Joshi 		   high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI))
502d5a114a6SFelix Kuehling 			retval = true;
503d5a114a6SFelix Kuehling 	}
504e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
505d5a114a6SFelix Kuehling 	return retval;
506d5a114a6SFelix Kuehling }
507d5a114a6SFelix Kuehling 
kgd_hqd_sdma_is_occupied(struct amdgpu_device * adev,void * mqd)508420185fdSGraham Sider static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
509d5a114a6SFelix Kuehling {
510d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
511b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
512d5a114a6SFelix Kuehling 	uint32_t sdma_rlc_rb_cntl;
513d5a114a6SFelix Kuehling 
514d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
515b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
516d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
517d5a114a6SFelix Kuehling 
518b55a8b8bSYong Zhao 	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
519d5a114a6SFelix Kuehling 
520d5a114a6SFelix Kuehling 	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
521d5a114a6SFelix Kuehling 		return true;
522d5a114a6SFelix Kuehling 
523d5a114a6SFelix Kuehling 	return false;
524d5a114a6SFelix Kuehling }
525d5a114a6SFelix Kuehling 
kgd_gfx_v9_hqd_destroy(struct amdgpu_device * adev,void * mqd,enum kfd_preempt_type reset_type,unsigned int utimeout,uint32_t pipe_id,uint32_t queue_id,uint32_t inst)526420185fdSGraham Sider int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
527d5a114a6SFelix Kuehling 				enum kfd_preempt_type reset_type,
528d5a114a6SFelix Kuehling 				unsigned int utimeout, uint32_t pipe_id,
529e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
530d5a114a6SFelix Kuehling {
531d5a114a6SFelix Kuehling 	enum hqd_dequeue_request_type type;
532d5a114a6SFelix Kuehling 	unsigned long end_jiffies;
533d5a114a6SFelix Kuehling 	uint32_t temp;
534d5a114a6SFelix Kuehling 	struct v9_mqd *m = get_mqd(mqd);
535d5a114a6SFelix Kuehling 
53653b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
5371b0bfcffSShaoyun Liu 		return -EIO;
5381b0bfcffSShaoyun Liu 
539e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
540d5a114a6SFelix Kuehling 
541d5a114a6SFelix Kuehling 	if (m->cp_hqd_vmid == 0)
54202ee3b02SMukul Joshi 		WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
543d5a114a6SFelix Kuehling 
544d5a114a6SFelix Kuehling 	switch (reset_type) {
545d5a114a6SFelix Kuehling 	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
546d5a114a6SFelix Kuehling 		type = DRAIN_PIPE;
547d5a114a6SFelix Kuehling 		break;
548d5a114a6SFelix Kuehling 	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
549d5a114a6SFelix Kuehling 		type = RESET_WAVES;
550d5a114a6SFelix Kuehling 		break;
551b53ef0dfSMukul Joshi 	case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
552b53ef0dfSMukul Joshi 		type = SAVE_WAVES;
553b53ef0dfSMukul Joshi 		break;
554d5a114a6SFelix Kuehling 	default:
555d5a114a6SFelix Kuehling 		type = DRAIN_PIPE;
556d5a114a6SFelix Kuehling 		break;
557d5a114a6SFelix Kuehling 	}
558d5a114a6SFelix Kuehling 
55902ee3b02SMukul Joshi 	WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type);
560d5a114a6SFelix Kuehling 
561d5a114a6SFelix Kuehling 	end_jiffies = (utimeout * HZ / 1000) + jiffies;
562d5a114a6SFelix Kuehling 	while (true) {
56302ee3b02SMukul Joshi 		temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
564d5a114a6SFelix Kuehling 		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
565d5a114a6SFelix Kuehling 			break;
566d5a114a6SFelix Kuehling 		if (time_after(jiffies, end_jiffies)) {
567d5a114a6SFelix Kuehling 			pr_err("cp queue preemption time out.\n");
568e2069a7bSMukul Joshi 			kgd_gfx_v9_release_queue(adev, inst);
569d5a114a6SFelix Kuehling 			return -ETIME;
570d5a114a6SFelix Kuehling 		}
571d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
572d5a114a6SFelix Kuehling 	}
573d5a114a6SFelix Kuehling 
574e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
575d5a114a6SFelix Kuehling 	return 0;
576d5a114a6SFelix Kuehling }
577d5a114a6SFelix Kuehling 
kgd_hqd_sdma_destroy(struct amdgpu_device * adev,void * mqd,unsigned int utimeout)578420185fdSGraham Sider static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
579d5a114a6SFelix Kuehling 				unsigned int utimeout)
580d5a114a6SFelix Kuehling {
581d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
582b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
583d5a114a6SFelix Kuehling 	uint32_t temp;
584d5a114a6SFelix Kuehling 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
585d5a114a6SFelix Kuehling 
586d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
587b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
588d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
589d5a114a6SFelix Kuehling 
590b55a8b8bSYong Zhao 	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
591d5a114a6SFelix Kuehling 	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
592b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
593d5a114a6SFelix Kuehling 
594d5a114a6SFelix Kuehling 	while (true) {
595b55a8b8bSYong Zhao 		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
596d5a114a6SFelix Kuehling 		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
597d5a114a6SFelix Kuehling 			break;
598812330ebSYong Zhao 		if (time_after(jiffies, end_jiffies)) {
599812330ebSYong Zhao 			pr_err("SDMA RLC not idle in %s\n", __func__);
600d5a114a6SFelix Kuehling 			return -ETIME;
601812330ebSYong Zhao 		}
602d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
603d5a114a6SFelix Kuehling 	}
604d5a114a6SFelix Kuehling 
605b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
606b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
607b55a8b8bSYong Zhao 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
608d5a114a6SFelix Kuehling 		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
609d5a114a6SFelix Kuehling 
610b55a8b8bSYong Zhao 	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
611d5a114a6SFelix Kuehling 	m->sdmax_rlcx_rb_rptr_hi =
612b55a8b8bSYong Zhao 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
613d5a114a6SFelix Kuehling 
614d5a114a6SFelix Kuehling 	return 0;
615d5a114a6SFelix Kuehling }
616d5a114a6SFelix Kuehling 
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)6173356c38dSGraham Sider bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
61856fc40abSYong Zhao 					uint8_t vmid, uint16_t *p_pasid)
619d5a114a6SFelix Kuehling {
62056fc40abSYong Zhao 	uint32_t value;
621d5a114a6SFelix Kuehling 
62256fc40abSYong Zhao 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
623d5a114a6SFelix Kuehling 		     + vmid);
62456fc40abSYong Zhao 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
625d5a114a6SFelix Kuehling 
62656fc40abSYong Zhao 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
627d5a114a6SFelix Kuehling }
628d5a114a6SFelix Kuehling 
kgd_gfx_v9_wave_control_execute(struct amdgpu_device * adev,uint32_t gfx_index_val,uint32_t sq_cmd,uint32_t inst)6293356c38dSGraham Sider int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
630d5a114a6SFelix Kuehling 					uint32_t gfx_index_val,
631e2069a7bSMukul Joshi 					uint32_t sq_cmd, uint32_t inst)
632d5a114a6SFelix Kuehling {
633d5a114a6SFelix Kuehling 	uint32_t data = 0;
634d5a114a6SFelix Kuehling 
635d5a114a6SFelix Kuehling 	mutex_lock(&adev->grbm_idx_mutex);
636d5a114a6SFelix Kuehling 
63702ee3b02SMukul Joshi 	WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val);
63802ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd);
639d5a114a6SFelix Kuehling 
640d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
641d5a114a6SFelix Kuehling 		INSTANCE_BROADCAST_WRITES, 1);
642d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
643d5a114a6SFelix Kuehling 		SH_BROADCAST_WRITES, 1);
644d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
645d5a114a6SFelix Kuehling 		SE_BROADCAST_WRITES, 1);
646d5a114a6SFelix Kuehling 
64702ee3b02SMukul Joshi 	WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data);
648d5a114a6SFelix Kuehling 	mutex_unlock(&adev->grbm_idx_mutex);
649d5a114a6SFelix Kuehling 
650d5a114a6SFelix Kuehling 	return 0;
651d5a114a6SFelix Kuehling }
652d5a114a6SFelix Kuehling 
653cde2e087SJonathan Kim /*
654cde2e087SJonathan Kim  * GFX9 helper for wave launch stall requirements on debug trap setting.
655cde2e087SJonathan Kim  *
656cde2e087SJonathan Kim  * vmid:
657cde2e087SJonathan Kim  *   Target VMID to stall/unstall.
658cde2e087SJonathan Kim  *
659cde2e087SJonathan Kim  * stall:
660cde2e087SJonathan Kim  *   0-unstall wave launch (enable), 1-stall wave launch (disable).
661cde2e087SJonathan Kim  *   After wavefront launch has been stalled, allocated waves must drain from
662cde2e087SJonathan Kim  *   SPI in order for debug trap settings to take effect on those waves.
663cde2e087SJonathan Kim  *   This is roughly a ~96 clock cycle wait on SPI where a read on
664cde2e087SJonathan Kim  *   SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
665cde2e087SJonathan Kim  *   KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
666cde2e087SJonathan Kim  *
667cde2e087SJonathan Kim  *   NOTE: We can afford to clear the entire STALL_VMID field on unstall
668cde2e087SJonathan Kim  *   because GFX9.4.1 cannot support multi-process debugging due to trap
669cde2e087SJonathan Kim  *   configuration and masking being limited to global scope.  Always assume
670cde2e087SJonathan Kim  *   single process conditions.
671cde2e087SJonathan Kim  */
672cde2e087SJonathan Kim #define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY	3
kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device * adev,uint32_t vmid,bool stall)673cde2e087SJonathan Kim void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
674cde2e087SJonathan Kim 					uint32_t vmid,
675cde2e087SJonathan Kim 					bool stall)
676cde2e087SJonathan Kim {
677cde2e087SJonathan Kim 	int i;
678cde2e087SJonathan Kim 	uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
679cde2e087SJonathan Kim 
680cde2e087SJonathan Kim 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
681cde2e087SJonathan Kim 		data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
682cde2e087SJonathan Kim 							stall ? 1 << vmid : 0);
683cde2e087SJonathan Kim 	else
684cde2e087SJonathan Kim 		data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA,
685cde2e087SJonathan Kim 							stall ? 1 : 0);
686cde2e087SJonathan Kim 
687cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
688cde2e087SJonathan Kim 
689cde2e087SJonathan Kim 	if (!stall)
690cde2e087SJonathan Kim 		return;
691cde2e087SJonathan Kim 
692cde2e087SJonathan Kim 	for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
693cde2e087SJonathan Kim 		RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
694cde2e087SJonathan Kim }
695cde2e087SJonathan Kim 
696cde2e087SJonathan Kim /*
697cde2e087SJonathan Kim  * restore_dbg_registers is ignored here but is a general interface requirement
698cde2e087SJonathan Kim  * for devices that support GFXOFF and where the RLC save/restore list
699cde2e087SJonathan Kim  * does not support hw registers for debugging i.e. the driver has to manually
700cde2e087SJonathan Kim  * initialize the debug mode registers after it has disabled GFX off during the
701cde2e087SJonathan Kim  * debug session.
702cde2e087SJonathan Kim  */
kgd_gfx_v9_enable_debug_trap(struct amdgpu_device * adev,bool restore_dbg_registers,uint32_t vmid)703cde2e087SJonathan Kim uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
704cde2e087SJonathan Kim 				bool restore_dbg_registers,
705cde2e087SJonathan Kim 				uint32_t vmid)
706cde2e087SJonathan Kim {
707cde2e087SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
708cde2e087SJonathan Kim 
709cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
710cde2e087SJonathan Kim 
711cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
712cde2e087SJonathan Kim 
713cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
714cde2e087SJonathan Kim 
715cde2e087SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
716cde2e087SJonathan Kim 
717cde2e087SJonathan Kim 	return 0;
718cde2e087SJonathan Kim }
719cde2e087SJonathan Kim 
720cde2e087SJonathan Kim /*
721cde2e087SJonathan Kim  * keep_trap_enabled is ignored here but is a general interface requirement
722cde2e087SJonathan Kim  * for devices that support multi-process debugging where the performance
723cde2e087SJonathan Kim  * overhead from trap temporary setup needs to be bypassed when the debug
724cde2e087SJonathan Kim  * session has ended.
725cde2e087SJonathan Kim  */
kgd_gfx_v9_disable_debug_trap(struct amdgpu_device * adev,bool keep_trap_enabled,uint32_t vmid)726cde2e087SJonathan Kim uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
727cde2e087SJonathan Kim 					bool keep_trap_enabled,
728cde2e087SJonathan Kim 					uint32_t vmid)
729cde2e087SJonathan Kim {
730cde2e087SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
731cde2e087SJonathan Kim 
732cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
733cde2e087SJonathan Kim 
734cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
735cde2e087SJonathan Kim 
736cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
737cde2e087SJonathan Kim 
738cde2e087SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
739cde2e087SJonathan Kim 
740cde2e087SJonathan Kim 	return 0;
741cde2e087SJonathan Kim }
742cde2e087SJonathan Kim 
kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device * adev,uint32_t trap_override,uint32_t * trap_mask_supported)743101827e1SJonathan Kim int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
744101827e1SJonathan Kim 					uint32_t trap_override,
745101827e1SJonathan Kim 					uint32_t *trap_mask_supported)
746101827e1SJonathan Kim {
747101827e1SJonathan Kim 	*trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
748101827e1SJonathan Kim 
749101827e1SJonathan Kim 	/* The SPI_GDBG_TRAP_MASK register is global and affects all
750101827e1SJonathan Kim 	 * processes. Only allow OR-ing the address-watch bit, since
751101827e1SJonathan Kim 	 * this only affects processes under the debugger. Other bits
752101827e1SJonathan Kim 	 * should stay 0 to avoid the debugger interfering with other
753101827e1SJonathan Kim 	 * processes.
754101827e1SJonathan Kim 	 */
755101827e1SJonathan Kim 	if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
756101827e1SJonathan Kim 		return -EINVAL;
757101827e1SJonathan Kim 
758101827e1SJonathan Kim 	return 0;
759101827e1SJonathan Kim }
760101827e1SJonathan Kim 
kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device * adev,uint32_t vmid,uint32_t trap_override,uint32_t trap_mask_bits,uint32_t trap_mask_request,uint32_t * trap_mask_prev,uint32_t kfd_dbg_cntl_prev)761101827e1SJonathan Kim uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
762101827e1SJonathan Kim 					     uint32_t vmid,
763101827e1SJonathan Kim 					     uint32_t trap_override,
764101827e1SJonathan Kim 					     uint32_t trap_mask_bits,
765101827e1SJonathan Kim 					     uint32_t trap_mask_request,
766101827e1SJonathan Kim 					     uint32_t *trap_mask_prev,
767101827e1SJonathan Kim 					     uint32_t kfd_dbg_cntl_prev)
768101827e1SJonathan Kim {
769101827e1SJonathan Kim 	uint32_t data, wave_cntl_prev;
770101827e1SJonathan Kim 
771101827e1SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
772101827e1SJonathan Kim 
773101827e1SJonathan Kim 	wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
774101827e1SJonathan Kim 
775101827e1SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
776101827e1SJonathan Kim 
777101827e1SJonathan Kim 	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
778101827e1SJonathan Kim 	*trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
779101827e1SJonathan Kim 
780101827e1SJonathan Kim 	trap_mask_bits = (trap_mask_bits & trap_mask_request) |
781101827e1SJonathan Kim 		(*trap_mask_prev & ~trap_mask_request);
782101827e1SJonathan Kim 
783101827e1SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
784101827e1SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
785101827e1SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
786101827e1SJonathan Kim 
787101827e1SJonathan Kim 	/* We need to preserve wave launch mode stall settings. */
788101827e1SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
789101827e1SJonathan Kim 
790101827e1SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
791101827e1SJonathan Kim 
792101827e1SJonathan Kim 	return 0;
793101827e1SJonathan Kim }
794101827e1SJonathan Kim 
kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device * adev,uint8_t wave_launch_mode,uint32_t vmid)795aea1b473SJonathan Kim uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
796aea1b473SJonathan Kim 					uint8_t wave_launch_mode,
797aea1b473SJonathan Kim 					uint32_t vmid)
798aea1b473SJonathan Kim {
799aea1b473SJonathan Kim 	uint32_t data = 0;
800aea1b473SJonathan Kim 	bool is_mode_set = !!wave_launch_mode;
801aea1b473SJonathan Kim 
802aea1b473SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
803aea1b473SJonathan Kim 
804aea1b473SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
805aea1b473SJonathan Kim 
806aea1b473SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
807aea1b473SJonathan Kim 		VMID_MASK, is_mode_set ? 1 << vmid : 0);
808aea1b473SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
809aea1b473SJonathan Kim 		MODE, is_mode_set ? wave_launch_mode : 0);
810aea1b473SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
811aea1b473SJonathan Kim 
812aea1b473SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
813aea1b473SJonathan Kim 
814aea1b473SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
815aea1b473SJonathan Kim 
816aea1b473SJonathan Kim 	return 0;
817aea1b473SJonathan Kim }
818aea1b473SJonathan Kim 
819e0f85f46SJonathan Kim #define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
kgd_gfx_v9_set_address_watch(struct amdgpu_device * adev,uint64_t watch_address,uint32_t watch_address_mask,uint32_t watch_id,uint32_t watch_mode,uint32_t debug_vmid,uint32_t inst)820e0f85f46SJonathan Kim uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
821e0f85f46SJonathan Kim 					uint64_t watch_address,
822e0f85f46SJonathan Kim 					uint32_t watch_address_mask,
823e0f85f46SJonathan Kim 					uint32_t watch_id,
824e0f85f46SJonathan Kim 					uint32_t watch_mode,
825036e348fSEric Huang 					uint32_t debug_vmid,
826036e348fSEric Huang 					uint32_t inst)
827e0f85f46SJonathan Kim {
828e0f85f46SJonathan Kim 	uint32_t watch_address_high;
829e0f85f46SJonathan Kim 	uint32_t watch_address_low;
830e0f85f46SJonathan Kim 	uint32_t watch_address_cntl;
831e0f85f46SJonathan Kim 
832e0f85f46SJonathan Kim 	watch_address_cntl = 0;
833e0f85f46SJonathan Kim 
834e0f85f46SJonathan Kim 	watch_address_low = lower_32_bits(watch_address);
835e0f85f46SJonathan Kim 	watch_address_high = upper_32_bits(watch_address) & 0xffff;
836e0f85f46SJonathan Kim 
837e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
838e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
839e0f85f46SJonathan Kim 			VMID,
840e0f85f46SJonathan Kim 			debug_vmid);
841e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
842e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
843e0f85f46SJonathan Kim 			MODE,
844e0f85f46SJonathan Kim 			watch_mode);
845e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
846e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
847e0f85f46SJonathan Kim 			MASK,
848e0f85f46SJonathan Kim 			watch_address_mask >> 6);
849e0f85f46SJonathan Kim 
850e0f85f46SJonathan Kim 	/* Turning off this watch point until we set all the registers */
851e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
852e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
853e0f85f46SJonathan Kim 			VALID,
854e0f85f46SJonathan Kim 			0);
855e0f85f46SJonathan Kim 
856e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
857e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
858e0f85f46SJonathan Kim 			watch_address_cntl);
859e0f85f46SJonathan Kim 
860e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
861e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
862e0f85f46SJonathan Kim 			watch_address_high);
863e0f85f46SJonathan Kim 
864e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
865e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
866e0f85f46SJonathan Kim 			watch_address_low);
867e0f85f46SJonathan Kim 
868e0f85f46SJonathan Kim 	/* Enable the watch point */
869e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
870e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
871e0f85f46SJonathan Kim 			VALID,
872e0f85f46SJonathan Kim 			1);
873e0f85f46SJonathan Kim 
874e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
875e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
876e0f85f46SJonathan Kim 			watch_address_cntl);
877e0f85f46SJonathan Kim 
878e0f85f46SJonathan Kim 	return 0;
879e0f85f46SJonathan Kim }
880e0f85f46SJonathan Kim 
kgd_gfx_v9_clear_address_watch(struct amdgpu_device * adev,uint32_t watch_id)881e0f85f46SJonathan Kim uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
882e0f85f46SJonathan Kim 					uint32_t watch_id)
883e0f85f46SJonathan Kim {
884e0f85f46SJonathan Kim 	uint32_t watch_address_cntl;
885e0f85f46SJonathan Kim 
886e0f85f46SJonathan Kim 	watch_address_cntl = 0;
887e0f85f46SJonathan Kim 
888e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
889e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
890e0f85f46SJonathan Kim 			watch_address_cntl);
891e0f85f46SJonathan Kim 
892e0f85f46SJonathan Kim 	return 0;
893e0f85f46SJonathan Kim }
894e0f85f46SJonathan Kim 
8957cee6a68SJonathan Kim /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
8967cee6a68SJonathan Kim  * The values read are:
8977cee6a68SJonathan Kim  *     ib_offload_wait_time     -- Wait Count for Indirect Buffer Offloads.
8987cee6a68SJonathan Kim  *     atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
8997cee6a68SJonathan Kim  *     wrm_offload_wait_time    -- Wait Count for WAIT_REG_MEM Offloads.
9007cee6a68SJonathan Kim  *     gws_wait_time            -- Wait Count for Global Wave Syncs.
9017cee6a68SJonathan Kim  *     que_sleep_wait_time      -- Wait Count for Dequeue Retry.
9027cee6a68SJonathan Kim  *     sch_wave_wait_time       -- Wait Count for Scheduling Wave Message.
9037cee6a68SJonathan Kim  *     sem_rearm_wait_time      -- Wait Count for Semaphore re-arm.
9047cee6a68SJonathan Kim  *     deq_retry_wait_time      -- Wait Count for Global Wave Syncs.
9057cee6a68SJonathan Kim  */
kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device * adev,uint32_t * wait_times,uint32_t inst)9067cee6a68SJonathan Kim void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
907036e348fSEric Huang 					uint32_t *wait_times,
908036e348fSEric Huang 					uint32_t inst)
9097cee6a68SJonathan Kim 
9107cee6a68SJonathan Kim {
911036e348fSEric Huang 	*wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
912036e348fSEric Huang 			mmCP_IQ_WAIT_TIME2));
9137cee6a68SJonathan Kim }
9147cee6a68SJonathan Kim 
kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base)9153356c38dSGraham Sider void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
916ad5901dfSYong Zhao 			uint32_t vmid, uint64_t page_table_base)
917d5a114a6SFelix Kuehling {
918d5a114a6SFelix Kuehling 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
919d5a114a6SFelix Kuehling 		pr_err("trying to set page table base for wrong VMID %u\n",
920d5a114a6SFelix Kuehling 		       vmid);
921d5a114a6SFelix Kuehling 		return;
922d5a114a6SFelix Kuehling 	}
923d5a114a6SFelix Kuehling 
9249fb1506eSOak Zeng 	adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
925d5a114a6SFelix Kuehling 
9268ffff9b4SOak Zeng 	adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
927d5a114a6SFelix Kuehling }
9283e205a08SOak Zeng 
lock_spi_csq_mutexes(struct amdgpu_device * adev)92943a4bc82SRamesh Errabolu static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
93043a4bc82SRamesh Errabolu {
93143a4bc82SRamesh Errabolu 	mutex_lock(&adev->srbm_mutex);
93243a4bc82SRamesh Errabolu 	mutex_lock(&adev->grbm_idx_mutex);
93343a4bc82SRamesh Errabolu 
93443a4bc82SRamesh Errabolu }
93543a4bc82SRamesh Errabolu 
unlock_spi_csq_mutexes(struct amdgpu_device * adev)93643a4bc82SRamesh Errabolu static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
93743a4bc82SRamesh Errabolu {
93843a4bc82SRamesh Errabolu 	mutex_unlock(&adev->grbm_idx_mutex);
93943a4bc82SRamesh Errabolu 	mutex_unlock(&adev->srbm_mutex);
94043a4bc82SRamesh Errabolu }
94143a4bc82SRamesh Errabolu 
94243a4bc82SRamesh Errabolu /**
9431fdbbc12SFabio M. De Francesco  * get_wave_count: Read device registers to get number of waves in flight for
94443a4bc82SRamesh Errabolu  * a particular queue. The method also returns the VMID associated with the
94543a4bc82SRamesh Errabolu  * queue.
94643a4bc82SRamesh Errabolu  *
94743a4bc82SRamesh Errabolu  * @adev: Handle of device whose registers are to be read
94843a4bc82SRamesh Errabolu  * @queue_idx: Index of queue in the queue-map bit-field
94943a4bc82SRamesh Errabolu  * @wave_cnt: Output parameter updated with number of waves in flight
95043a4bc82SRamesh Errabolu  * @vmid: Output parameter updated with VMID of queue whose wave count
95143a4bc82SRamesh Errabolu  *        is being collected
9523eeb0d03SSrinivasan Shanmugam  * @inst: xcc's instance number on a multi-XCC setup
95343a4bc82SRamesh Errabolu  */
get_wave_count(struct amdgpu_device * adev,int queue_idx,int * wave_cnt,int * vmid,uint32_t inst)95443a4bc82SRamesh Errabolu static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
955e2069a7bSMukul Joshi 		int *wave_cnt, int *vmid, uint32_t inst)
95643a4bc82SRamesh Errabolu {
95743a4bc82SRamesh Errabolu 	int pipe_idx;
95843a4bc82SRamesh Errabolu 	int queue_slot;
95943a4bc82SRamesh Errabolu 	unsigned int reg_val;
96043a4bc82SRamesh Errabolu 
96143a4bc82SRamesh Errabolu 	/*
96243a4bc82SRamesh Errabolu 	 * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
96343a4bc82SRamesh Errabolu 	 * parameters to read out waves in flight. Get VMID if there are
96443a4bc82SRamesh Errabolu 	 * non-zero waves in flight.
96543a4bc82SRamesh Errabolu 	 */
96643a4bc82SRamesh Errabolu 	*vmid = 0xFF;
96743a4bc82SRamesh Errabolu 	*wave_cnt = 0;
96843a4bc82SRamesh Errabolu 	pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
96943a4bc82SRamesh Errabolu 	queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
970e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
971e2069a7bSMukul Joshi 	reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
97243a4bc82SRamesh Errabolu 			 queue_slot);
97343a4bc82SRamesh Errabolu 	*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
97443a4bc82SRamesh Errabolu 	if (*wave_cnt != 0)
975e2069a7bSMukul Joshi 		*vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
97643a4bc82SRamesh Errabolu 			 CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
97743a4bc82SRamesh Errabolu }
97843a4bc82SRamesh Errabolu 
97943a4bc82SRamesh Errabolu /**
9801fdbbc12SFabio M. De Francesco  * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
98143a4bc82SRamesh Errabolu  * shader engine and aggregates the number of waves that are in flight for the
98243a4bc82SRamesh Errabolu  * process whose pasid is provided as a parameter. The process could have ZERO
98343a4bc82SRamesh Errabolu  * or more queues running and submitting waves to compute units.
98443a4bc82SRamesh Errabolu  *
98577608faaSRajneesh Bhardwaj  * @adev: Handle of device from which to get number of waves in flight
98643a4bc82SRamesh Errabolu  * @pasid: Identifies the process for which this query call is invoked
9871fdbbc12SFabio M. De Francesco  * @pasid_wave_cnt: Output parameter updated with number of waves in flight that
98843a4bc82SRamesh Errabolu  *                  belong to process with given pasid
98943a4bc82SRamesh Errabolu  * @max_waves_per_cu: Output parameter updated with maximum number of waves
99043a4bc82SRamesh Errabolu  *                    possible per Compute Unit
9913eeb0d03SSrinivasan Shanmugam  * @inst: xcc's instance number on a multi-XCC setup
99243a4bc82SRamesh Errabolu  *
9931fdbbc12SFabio M. De Francesco  * Note: It's possible that the device has too many queues (oversubscription)
99443a4bc82SRamesh Errabolu  * in which case a VMID could be remapped to a different PASID. This could lead
99577608faaSRajneesh Bhardwaj  * to an inaccurate wave count. Following is a high-level sequence:
99643a4bc82SRamesh Errabolu  *    Time T1: vmid = getVmid(); vmid is associated with Pasid P1
99743a4bc82SRamesh Errabolu  *    Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
99843a4bc82SRamesh Errabolu  * In the sequence above wave count obtained from time T1 will be incorrectly
99943a4bc82SRamesh Errabolu  * lost or added to total wave count.
100043a4bc82SRamesh Errabolu  *
100143a4bc82SRamesh Errabolu  * The registers that provide the waves in flight are:
100243a4bc82SRamesh Errabolu  *
100343a4bc82SRamesh Errabolu  *  SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
100443a4bc82SRamesh Errabolu  *  queue is slotted, OFF if there is no queue. A process could have ZERO or
100543a4bc82SRamesh Errabolu  *  more queues slotted and submitting waves to be run on compute units. Even
100643a4bc82SRamesh Errabolu  *  when there is a queue it is possible there could be zero wave fronts, this
100743a4bc82SRamesh Errabolu  *  can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
100843a4bc82SRamesh Errabolu  *  command
100943a4bc82SRamesh Errabolu  *
101043a4bc82SRamesh Errabolu  *  For each bit that is ON from above:
101143a4bc82SRamesh Errabolu  *
101243a4bc82SRamesh Errabolu  *    Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
101343a4bc82SRamesh Errabolu  *    number of waves that are in flight for the queue at specified index. The
101443a4bc82SRamesh Errabolu  *    index ranges from 0 to 7.
101543a4bc82SRamesh Errabolu  *
101643a4bc82SRamesh Errabolu  *    If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
101743a4bc82SRamesh Errabolu  *    of the wave(s).
101843a4bc82SRamesh Errabolu  *
101943a4bc82SRamesh Errabolu  *    Determine if VMID from above step maps to pasid provided as parameter. If
102043a4bc82SRamesh Errabolu  *    it matches agrregate the wave count. That the VMID will not match pasid is
102143a4bc82SRamesh Errabolu  *    a normal condition i.e. a device is expected to support multiple queues
102243a4bc82SRamesh Errabolu  *    from multiple proceses.
102343a4bc82SRamesh Errabolu  *
102443a4bc82SRamesh Errabolu  *  Reading registers referenced above involves programming GRBM appropriately
102543a4bc82SRamesh Errabolu  */
kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device * adev,int pasid,int * pasid_wave_cnt,int * max_waves_per_cu,uint32_t inst)10263356c38dSGraham Sider void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
1027e2069a7bSMukul Joshi 		int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
102843a4bc82SRamesh Errabolu {
102943a4bc82SRamesh Errabolu 	int qidx;
103043a4bc82SRamesh Errabolu 	int vmid;
103143a4bc82SRamesh Errabolu 	int se_idx;
103243a4bc82SRamesh Errabolu 	int sh_idx;
103343a4bc82SRamesh Errabolu 	int se_cnt;
103443a4bc82SRamesh Errabolu 	int sh_cnt;
103543a4bc82SRamesh Errabolu 	int wave_cnt;
103643a4bc82SRamesh Errabolu 	int queue_map;
103743a4bc82SRamesh Errabolu 	int pasid_tmp;
103843a4bc82SRamesh Errabolu 	int max_queue_cnt;
103943a4bc82SRamesh Errabolu 	int vmid_wave_cnt = 0;
104043a4bc82SRamesh Errabolu 	DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
104143a4bc82SRamesh Errabolu 
104243a4bc82SRamesh Errabolu 	lock_spi_csq_mutexes(adev);
1043e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 1, 0, 0, 0, inst);
104443a4bc82SRamesh Errabolu 
104543a4bc82SRamesh Errabolu 	/*
104643a4bc82SRamesh Errabolu 	 * Iterate through the shader engines and arrays of the device
104743a4bc82SRamesh Errabolu 	 * to get number of waves in flight
104843a4bc82SRamesh Errabolu 	 */
1049be697aa3SLe Ma 	bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
105043a4bc82SRamesh Errabolu 			  KGD_MAX_QUEUES);
105143a4bc82SRamesh Errabolu 	max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
105243a4bc82SRamesh Errabolu 			adev->gfx.mec.num_queue_per_pipe;
105343a4bc82SRamesh Errabolu 	sh_cnt = adev->gfx.config.max_sh_per_se;
105443a4bc82SRamesh Errabolu 	se_cnt = adev->gfx.config.max_shader_engines;
105543a4bc82SRamesh Errabolu 	for (se_idx = 0; se_idx < se_cnt; se_idx++) {
105643a4bc82SRamesh Errabolu 		for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
105743a4bc82SRamesh Errabolu 
1058e2069a7bSMukul Joshi 			amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
1059e2069a7bSMukul Joshi 			queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
106043a4bc82SRamesh Errabolu 
106143a4bc82SRamesh Errabolu 			/*
106243a4bc82SRamesh Errabolu 			 * Assumption: queue map encodes following schema: four
106343a4bc82SRamesh Errabolu 			 * pipes per each micro-engine, with each pipe mapping
106443a4bc82SRamesh Errabolu 			 * eight queues. This schema is true for GFX9 devices
106543a4bc82SRamesh Errabolu 			 * and must be verified for newer device families
106643a4bc82SRamesh Errabolu 			 */
106743a4bc82SRamesh Errabolu 			for (qidx = 0; qidx < max_queue_cnt; qidx++) {
106843a4bc82SRamesh Errabolu 
106943a4bc82SRamesh Errabolu 				/* Skip qeueus that are not associated with
107043a4bc82SRamesh Errabolu 				 * compute functions
107143a4bc82SRamesh Errabolu 				 */
107243a4bc82SRamesh Errabolu 				if (!test_bit(qidx, cp_queue_bitmap))
107343a4bc82SRamesh Errabolu 					continue;
107443a4bc82SRamesh Errabolu 
107543a4bc82SRamesh Errabolu 				if (!(queue_map & (1 << qidx)))
107643a4bc82SRamesh Errabolu 					continue;
107743a4bc82SRamesh Errabolu 
107843a4bc82SRamesh Errabolu 				/* Get number of waves in flight and aggregate them */
1079e2069a7bSMukul Joshi 				get_wave_count(adev, qidx, &wave_cnt, &vmid,
1080e2069a7bSMukul Joshi 						inst);
108143a4bc82SRamesh Errabolu 				if (wave_cnt != 0) {
108243a4bc82SRamesh Errabolu 					pasid_tmp =
1083e2069a7bSMukul Joshi 					  RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
108443a4bc82SRamesh Errabolu 						 mmIH_VMID_0_LUT) + vmid);
108543a4bc82SRamesh Errabolu 					if (pasid_tmp == pasid)
108643a4bc82SRamesh Errabolu 						vmid_wave_cnt += wave_cnt;
108743a4bc82SRamesh Errabolu 				}
108843a4bc82SRamesh Errabolu 			}
108943a4bc82SRamesh Errabolu 		}
109043a4bc82SRamesh Errabolu 	}
109143a4bc82SRamesh Errabolu 
1092e2069a7bSMukul Joshi 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
1093e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 0, 0, 0, 0, inst);
109443a4bc82SRamesh Errabolu 	unlock_spi_csq_mutexes(adev);
109543a4bc82SRamesh Errabolu 
109643a4bc82SRamesh Errabolu 	/* Update the output parameters and return */
109743a4bc82SRamesh Errabolu 	*pasid_wave_cnt = vmid_wave_cnt;
109843a4bc82SRamesh Errabolu 	*max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
109943a4bc82SRamesh Errabolu 				adev->gfx.cu_info.max_waves_per_simd;
1100d5a114a6SFelix Kuehling }
11013e205a08SOak Zeng 
kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device * adev,uint32_t wait_times,uint32_t grace_period,uint32_t * reg_offset,uint32_t * reg_data)11027cee6a68SJonathan Kim void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
11037cee6a68SJonathan Kim 		uint32_t wait_times,
11047cee6a68SJonathan Kim 		uint32_t grace_period,
11057cee6a68SJonathan Kim 		uint32_t *reg_offset,
1106*81faf9e0SMukul Joshi 		uint32_t *reg_data)
11077cee6a68SJonathan Kim {
11087cee6a68SJonathan Kim 	*reg_data = wait_times;
11097cee6a68SJonathan Kim 
11107cee6a68SJonathan Kim 	/*
11111879e009SMukul Joshi 	 * The CP cannot handle a 0 grace period input and will result in
11127cee6a68SJonathan Kim 	 * an infinite grace period being set so set to 1 to prevent this.
11137cee6a68SJonathan Kim 	 */
11147cee6a68SJonathan Kim 	if (grace_period == 0)
11157cee6a68SJonathan Kim 		grace_period = 1;
11167cee6a68SJonathan Kim 
11177cee6a68SJonathan Kim 	*reg_data = REG_SET_FIELD(*reg_data,
11187cee6a68SJonathan Kim 			CP_IQ_WAIT_TIME2,
11197cee6a68SJonathan Kim 			SCH_WAVE,
11207cee6a68SJonathan Kim 			grace_period);
11217cee6a68SJonathan Kim 
1122*81faf9e0SMukul Joshi 	*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
11237cee6a68SJonathan Kim }
11247cee6a68SJonathan Kim 
kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device * adev,uint32_t vmid,uint64_t tba_addr,uint64_t tma_addr,uint32_t inst)11253356c38dSGraham Sider void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
1126e2069a7bSMukul Joshi 		uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst)
1127b53ef0dfSMukul Joshi {
1128e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
1129b53ef0dfSMukul Joshi 
1130b53ef0dfSMukul Joshi 	/*
1131b53ef0dfSMukul Joshi 	 * Program TBA registers
1132b53ef0dfSMukul Joshi 	 */
113302ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
1134b53ef0dfSMukul Joshi 			lower_32_bits(tba_addr >> 8));
113502ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
1136b53ef0dfSMukul Joshi 			upper_32_bits(tba_addr >> 8));
1137b53ef0dfSMukul Joshi 
1138b53ef0dfSMukul Joshi 	/*
1139b53ef0dfSMukul Joshi 	 * Program TMA registers
1140b53ef0dfSMukul Joshi 	 */
114102ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO,
1142b53ef0dfSMukul Joshi 			lower_32_bits(tma_addr >> 8));
114302ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI,
1144b53ef0dfSMukul Joshi 			upper_32_bits(tma_addr >> 8));
1145b53ef0dfSMukul Joshi 
1146e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
1147b53ef0dfSMukul Joshi }
1148b53ef0dfSMukul Joshi 
1149e392c887SYong Zhao const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
11503e205a08SOak Zeng 	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
11513e205a08SOak Zeng 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
11523e205a08SOak Zeng 	.init_interrupts = kgd_gfx_v9_init_interrupts,
11533e205a08SOak Zeng 	.hqd_load = kgd_gfx_v9_hqd_load,
115435cd89d5SAaron Liu 	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
11553e205a08SOak Zeng 	.hqd_sdma_load = kgd_hqd_sdma_load,
11563e205a08SOak Zeng 	.hqd_dump = kgd_gfx_v9_hqd_dump,
11573e205a08SOak Zeng 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
11583e205a08SOak Zeng 	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
11593e205a08SOak Zeng 	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
11603e205a08SOak Zeng 	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
11613e205a08SOak Zeng 	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
11623e205a08SOak Zeng 	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
116356fc40abSYong Zhao 	.get_atc_vmid_pasid_mapping_info =
116456fc40abSYong Zhao 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
11653e205a08SOak Zeng 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
1166cde2e087SJonathan Kim 	.enable_debug_trap = kgd_gfx_v9_enable_debug_trap,
1167cde2e087SJonathan Kim 	.disable_debug_trap = kgd_gfx_v9_disable_debug_trap,
1168101827e1SJonathan Kim 	.validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
1169101827e1SJonathan Kim 	.set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
1170aea1b473SJonathan Kim 	.set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
1171e0f85f46SJonathan Kim 	.set_address_watch = kgd_gfx_v9_set_address_watch,
1172e0f85f46SJonathan Kim 	.clear_address_watch = kgd_gfx_v9_clear_address_watch,
11737cee6a68SJonathan Kim 	.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
11747cee6a68SJonathan Kim 	.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
117543a4bc82SRamesh Errabolu 	.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
1176b53ef0dfSMukul Joshi 	.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
11773e205a08SOak Zeng };
1178