1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/module.h>
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/mmu_context.h>
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_amdkfd.h"
29 #include "sdma0/sdma0_4_2_2_offset.h"
30 #include "sdma0/sdma0_4_2_2_sh_mask.h"
31 #include "sdma1/sdma1_4_2_2_offset.h"
32 #include "sdma1/sdma1_4_2_2_sh_mask.h"
33 #include "sdma2/sdma2_4_2_2_offset.h"
34 #include "sdma2/sdma2_4_2_2_sh_mask.h"
35 #include "sdma3/sdma3_4_2_2_offset.h"
36 #include "sdma3/sdma3_4_2_2_sh_mask.h"
37 #include "sdma4/sdma4_4_2_2_offset.h"
38 #include "sdma4/sdma4_4_2_2_sh_mask.h"
39 #include "sdma5/sdma5_4_2_2_offset.h"
40 #include "sdma5/sdma5_4_2_2_sh_mask.h"
41 #include "sdma6/sdma6_4_2_2_offset.h"
42 #include "sdma6/sdma6_4_2_2_sh_mask.h"
43 #include "sdma7/sdma7_4_2_2_offset.h"
44 #include "sdma7/sdma7_4_2_2_sh_mask.h"
45 #include "v9_structs.h"
46 #include "soc15.h"
47 #include "soc15d.h"
48 #include "amdgpu_amdkfd_gfx_v9.h"
49 
50 #define HQD_N_REGS 56
51 #define DUMP_REG(addr) do {				\
52 		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
53 			break;				\
54 		(*dump)[i][0] = (addr) << 2;		\
55 		(*dump)[i++][1] = RREG32(addr);		\
56 	} while (0)
57 
58 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
59 {
60 	return (struct amdgpu_device *)kgd;
61 }
62 
63 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
64 {
65 	return (struct v9_sdma_mqd *)mqd;
66 }
67 
68 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
69 				unsigned int engine_id,
70 				unsigned int queue_id)
71 {
72 	uint32_t sdma_engine_reg_base[8] = {
73 		SOC15_REG_OFFSET(SDMA0, 0,
74 				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
75 		SOC15_REG_OFFSET(SDMA1, 0,
76 				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL,
77 		SOC15_REG_OFFSET(SDMA2, 0,
78 				 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL,
79 		SOC15_REG_OFFSET(SDMA3, 0,
80 				 mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL,
81 		SOC15_REG_OFFSET(SDMA4, 0,
82 				 mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL,
83 		SOC15_REG_OFFSET(SDMA5, 0,
84 				 mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL,
85 		SOC15_REG_OFFSET(SDMA6, 0,
86 				 mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL,
87 		SOC15_REG_OFFSET(SDMA7, 0,
88 				 mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
89 	};
90 
91 	uint32_t retval = sdma_engine_reg_base[engine_id]
92 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
93 
94 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
95 			queue_id, retval);
96 
97 	return retval;
98 }
99 
100 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
101 			     uint32_t __user *wptr, struct mm_struct *mm)
102 {
103 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
104 	struct v9_sdma_mqd *m;
105 	uint32_t sdma_rlc_reg_offset;
106 	unsigned long end_jiffies;
107 	uint32_t data;
108 	uint64_t data64;
109 	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
110 
111 	m = get_sdma_mqd(mqd);
112 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
113 					    m->sdma_queue_id);
114 
115 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
116 		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
117 
118 	end_jiffies = msecs_to_jiffies(2000) + jiffies;
119 	while (true) {
120 		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
121 		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
122 			break;
123 		if (time_after(jiffies, end_jiffies)) {
124 			pr_err("SDMA RLC not idle in %s\n", __func__);
125 			return -ETIME;
126 		}
127 		usleep_range(500, 1000);
128 	}
129 
130 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
131 	       m->sdmax_rlcx_doorbell_offset);
132 
133 	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
134 			     ENABLE, 1);
135 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
136 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
137 				m->sdmax_rlcx_rb_rptr);
138 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
139 				m->sdmax_rlcx_rb_rptr_hi);
140 
141 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
142 	if (read_user_wptr(mm, wptr64, data64)) {
143 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
144 		       lower_32_bits(data64));
145 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
146 		       upper_32_bits(data64));
147 	} else {
148 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
149 		       m->sdmax_rlcx_rb_rptr);
150 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
151 		       m->sdmax_rlcx_rb_rptr_hi);
152 	}
153 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
154 
155 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
156 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
157 			m->sdmax_rlcx_rb_base_hi);
158 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
159 			m->sdmax_rlcx_rb_rptr_addr_lo);
160 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
161 			m->sdmax_rlcx_rb_rptr_addr_hi);
162 
163 	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
164 			     RB_ENABLE, 1);
165 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
166 
167 	return 0;
168 }
169 
170 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
171 			     uint32_t engine_id, uint32_t queue_id,
172 			     uint32_t (**dump)[2], uint32_t *n_regs)
173 {
174 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
175 	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
176 			engine_id, queue_id);
177 	uint32_t i = 0, reg;
178 #undef HQD_N_REGS
179 #define HQD_N_REGS (19+6+7+10)
180 
181 	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
182 	if (*dump == NULL)
183 		return -ENOMEM;
184 
185 	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
186 		DUMP_REG(sdma_rlc_reg_offset + reg);
187 	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
188 		DUMP_REG(sdma_rlc_reg_offset + reg);
189 	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
190 	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
191 		DUMP_REG(sdma_rlc_reg_offset + reg);
192 	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
193 	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
194 		DUMP_REG(sdma_rlc_reg_offset + reg);
195 
196 	WARN_ON_ONCE(i != HQD_N_REGS);
197 	*n_regs = i;
198 
199 	return 0;
200 }
201 
202 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
203 {
204 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
205 	struct v9_sdma_mqd *m;
206 	uint32_t sdma_rlc_reg_offset;
207 	uint32_t sdma_rlc_rb_cntl;
208 
209 	m = get_sdma_mqd(mqd);
210 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
211 					    m->sdma_queue_id);
212 
213 	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
214 
215 	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
216 		return true;
217 
218 	return false;
219 }
220 
221 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
222 				unsigned int utimeout)
223 {
224 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
225 	struct v9_sdma_mqd *m;
226 	uint32_t sdma_rlc_reg_offset;
227 	uint32_t temp;
228 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
229 
230 	m = get_sdma_mqd(mqd);
231 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
232 					    m->sdma_queue_id);
233 
234 	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
235 	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
236 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
237 
238 	while (true) {
239 		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
240 		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
241 			break;
242 		if (time_after(jiffies, end_jiffies)) {
243 			pr_err("SDMA RLC not idle in %s\n", __func__);
244 			return -ETIME;
245 		}
246 		usleep_range(500, 1000);
247 	}
248 
249 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
250 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
251 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
252 		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
253 
254 	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
255 	m->sdmax_rlcx_rb_rptr_hi =
256 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
257 
258 	return 0;
259 }
260 
261 const struct kfd2kgd_calls arcturus_kfd2kgd = {
262 	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
263 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
264 	.init_interrupts = kgd_gfx_v9_init_interrupts,
265 	.hqd_load = kgd_gfx_v9_hqd_load,
266 	.hqd_sdma_load = kgd_hqd_sdma_load,
267 	.hqd_dump = kgd_gfx_v9_hqd_dump,
268 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
269 	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
270 	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
271 	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
272 	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
273 	.address_watch_disable = kgd_gfx_v9_address_watch_disable,
274 	.address_watch_execute = kgd_gfx_v9_address_watch_execute,
275 	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
276 	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
277 	.get_atc_vmid_pasid_mapping_info =
278 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
279 	.get_tile_config = kgd_gfx_v9_get_tile_config,
280 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
281 	.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
282 	.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
283 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
284 };
285