1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/mmu_context.h>
23 #include "amdgpu.h"
24 #include "amdgpu_amdkfd.h"
25 #include "gc/gc_10_1_0_offset.h"
26 #include "gc/gc_10_1_0_sh_mask.h"
27 #include "navi10_enum.h"
28 #include "athub/athub_2_0_0_offset.h"
29 #include "athub/athub_2_0_0_sh_mask.h"
30 #include "oss/osssys_5_0_0_offset.h"
31 #include "oss/osssys_5_0_0_sh_mask.h"
32 #include "soc15_common.h"
33 #include "v10_structs.h"
34 #include "nv.h"
35 #include "nvd.h"
36 #include "gfxhub_v2_0.h"
37 
38 enum hqd_dequeue_request_type {
39 	NO_ACTION = 0,
40 	DRAIN_PIPE,
41 	RESET_WAVES,
42 	SAVE_WAVES
43 };
44 
45 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
46 {
47 	return (struct amdgpu_device *)kgd;
48 }
49 
50 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
51 			uint32_t queue, uint32_t vmid)
52 {
53 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
54 
55 	mutex_lock(&adev->srbm_mutex);
56 	nv_grbm_select(adev, mec, pipe, queue, vmid);
57 }
58 
59 static void unlock_srbm(struct kgd_dev *kgd)
60 {
61 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
62 
63 	nv_grbm_select(adev, 0, 0, 0, 0);
64 	mutex_unlock(&adev->srbm_mutex);
65 }
66 
67 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
68 				uint32_t queue_id)
69 {
70 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
71 
72 	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
73 	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
74 
75 	lock_srbm(kgd, mec, pipe, queue_id, 0);
76 }
77 
78 static uint64_t get_queue_mask(struct amdgpu_device *adev,
79 			       uint32_t pipe_id, uint32_t queue_id)
80 {
81 	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
82 			queue_id;
83 
84 	return 1ull << bit;
85 }
86 
87 static void release_queue(struct kgd_dev *kgd)
88 {
89 	unlock_srbm(kgd);
90 }
91 
92 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
93 					uint32_t sh_mem_config,
94 					uint32_t sh_mem_ape1_base,
95 					uint32_t sh_mem_ape1_limit,
96 					uint32_t sh_mem_bases)
97 {
98 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
99 
100 	lock_srbm(kgd, 0, 0, 0, vmid);
101 
102 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
103 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
104 	/* APE1 no longer exists on GFX9 */
105 
106 	unlock_srbm(kgd);
107 }
108 
109 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
110 					unsigned int vmid)
111 {
112 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
113 
114 	/*
115 	 * We have to assume that there is no outstanding mapping.
116 	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
117 	 * a mapping is in progress or because a mapping finished
118 	 * and the SW cleared it.
119 	 * So the protocol is to always wait & clear.
120 	 */
121 	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
122 			ATC_VMID0_PASID_MAPPING__VALID_MASK;
123 
124 	pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
125 
126 	pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
127 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
128 	       pasid_mapping);
129 
130 #if 0
131 	/* TODO: uncomment this code when the hardware support is ready. */
132 	while (!(RREG32(SOC15_REG_OFFSET(
133 				ATHUB, 0,
134 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
135 		 (1U << vmid)))
136 		cpu_relax();
137 
138 	pr_debug("ATHUB mapping update finished\n");
139 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
140 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
141 	       1U << vmid);
142 #endif
143 
144 	/* Mapping vmid to pasid also for IH block */
145 	pr_debug("update mapping for IH block and mmhub");
146 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
147 	       pasid_mapping);
148 
149 	return 0;
150 }
151 
152 /* TODO - RING0 form of field is obsolete, seems to date back to SI
153  * but still works
154  */
155 
156 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
157 {
158 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
159 	uint32_t mec;
160 	uint32_t pipe;
161 
162 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
163 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
164 
165 	lock_srbm(kgd, mec, pipe, 0, 0);
166 
167 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
168 		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
169 		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
170 
171 	unlock_srbm(kgd);
172 
173 	return 0;
174 }
175 
176 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
177 				unsigned int engine_id,
178 				unsigned int queue_id)
179 {
180 	uint32_t sdma_engine_reg_base[2] = {
181 		SOC15_REG_OFFSET(SDMA0, 0,
182 				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
183 		/* On gfx10, mmSDMA1_xxx registers are defined NOT based
184 		 * on SDMA1 base address (dw 0x1860) but based on SDMA0
185 		 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
186 		 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
187 		 * below
188 		 */
189 		SOC15_REG_OFFSET(SDMA1, 0,
190 				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
191 	};
192 
193 	uint32_t retval = sdma_engine_reg_base[engine_id]
194 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
195 
196 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
197 			queue_id, retval);
198 
199 	return retval;
200 }
201 
202 #if 0
203 static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
204 {
205 	uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
206 			mmTCP_WATCH0_ADDR_H;
207 
208 	pr_debug("kfd: reg watch base address: 0x%x\n", retval);
209 
210 	return retval;
211 }
212 #endif
213 
214 static inline struct v10_compute_mqd *get_mqd(void *mqd)
215 {
216 	return (struct v10_compute_mqd *)mqd;
217 }
218 
219 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
220 {
221 	return (struct v10_sdma_mqd *)mqd;
222 }
223 
224 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
225 			uint32_t queue_id, uint32_t __user *wptr,
226 			uint32_t wptr_shift, uint32_t wptr_mask,
227 			struct mm_struct *mm)
228 {
229 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
230 	struct v10_compute_mqd *m;
231 	uint32_t *mqd_hqd;
232 	uint32_t reg, hqd_base, data;
233 
234 	m = get_mqd(mqd);
235 
236 	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
237 	acquire_queue(kgd, pipe_id, queue_id);
238 
239 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
240 	mqd_hqd = &m->cp_mqd_base_addr_lo;
241 	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
242 
243 	for (reg = hqd_base;
244 	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
245 		WREG32(reg, mqd_hqd[reg - hqd_base]);
246 
247 
248 	/* Activate doorbell logic before triggering WPTR poll. */
249 	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
250 			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
251 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
252 
253 	if (wptr) {
254 		/* Don't read wptr with get_user because the user
255 		 * context may not be accessible (if this function
256 		 * runs in a work queue). Instead trigger a one-shot
257 		 * polling read from memory in the CP. This assumes
258 		 * that wptr is GPU-accessible in the queue's VMID via
259 		 * ATC or SVM. WPTR==RPTR before starting the poll so
260 		 * the CP starts fetching new commands from the right
261 		 * place.
262 		 *
263 		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
264 		 * tricky. Assume that the queue didn't overflow. The
265 		 * number of valid bits in the 32-bit RPTR depends on
266 		 * the queue size. The remaining bits are taken from
267 		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
268 		 * queue size.
269 		 */
270 		uint32_t queue_size =
271 			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
272 					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
273 		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
274 
275 		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
276 			guessed_wptr += queue_size;
277 		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
278 		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
279 
280 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
281 		       lower_32_bits(guessed_wptr));
282 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
283 		       upper_32_bits(guessed_wptr));
284 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
285 		       lower_32_bits((uint64_t)wptr));
286 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
287 		       upper_32_bits((uint64_t)wptr));
288 		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
289 			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
290 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
291 		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
292 	}
293 
294 	/* Start the EOP fetcher */
295 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
296 	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
297 			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
298 
299 	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
300 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
301 
302 	release_queue(kgd);
303 
304 	return 0;
305 }
306 
307 static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
308 			    uint32_t pipe_id, uint32_t queue_id,
309 			    uint32_t doorbell_off)
310 {
311 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
312 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
313 	struct v10_compute_mqd *m;
314 	uint32_t mec, pipe;
315 	int r;
316 
317 	m = get_mqd(mqd);
318 
319 	acquire_queue(kgd, pipe_id, queue_id);
320 
321 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
322 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
323 
324 	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
325 		 mec, pipe, queue_id);
326 
327 	spin_lock(&adev->gfx.kiq.ring_lock);
328 	r = amdgpu_ring_alloc(kiq_ring, 7);
329 	if (r) {
330 		pr_err("Failed to alloc KIQ (%d).\n", r);
331 		goto out_unlock;
332 	}
333 
334 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
335 	amdgpu_ring_write(kiq_ring,
336 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
337 			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
338 			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
339 			  PACKET3_MAP_QUEUES_PIPE(pipe) |
340 			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
341 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
342 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
343 			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
344 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
345 	amdgpu_ring_write(kiq_ring,
346 			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
347 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
348 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
349 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
350 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
351 	amdgpu_ring_commit(kiq_ring);
352 
353 out_unlock:
354 	spin_unlock(&adev->gfx.kiq.ring_lock);
355 	release_queue(kgd);
356 
357 	return r;
358 }
359 
360 static int kgd_hqd_dump(struct kgd_dev *kgd,
361 			uint32_t pipe_id, uint32_t queue_id,
362 			uint32_t (**dump)[2], uint32_t *n_regs)
363 {
364 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
365 	uint32_t i = 0, reg;
366 #define HQD_N_REGS 56
367 #define DUMP_REG(addr) do {				\
368 		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
369 			break;				\
370 		(*dump)[i][0] = (addr) << 2;		\
371 		(*dump)[i++][1] = RREG32(addr);		\
372 	} while (0)
373 
374 	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
375 	if (*dump == NULL)
376 		return -ENOMEM;
377 
378 	acquire_queue(kgd, pipe_id, queue_id);
379 
380 	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
381 	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
382 		DUMP_REG(reg);
383 
384 	release_queue(kgd);
385 
386 	WARN_ON_ONCE(i != HQD_N_REGS);
387 	*n_regs = i;
388 
389 	return 0;
390 }
391 
392 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
393 			     uint32_t __user *wptr, struct mm_struct *mm)
394 {
395 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
396 	struct v10_sdma_mqd *m;
397 	uint32_t sdma_rlc_reg_offset;
398 	unsigned long end_jiffies;
399 	uint32_t data;
400 	uint64_t data64;
401 	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
402 
403 	m = get_sdma_mqd(mqd);
404 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
405 					    m->sdma_queue_id);
406 
407 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
408 		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
409 
410 	end_jiffies = msecs_to_jiffies(2000) + jiffies;
411 	while (true) {
412 		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
413 		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
414 			break;
415 		if (time_after(jiffies, end_jiffies)) {
416 			pr_err("SDMA RLC not idle in %s\n", __func__);
417 			return -ETIME;
418 		}
419 		usleep_range(500, 1000);
420 	}
421 
422 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
423 	       m->sdmax_rlcx_doorbell_offset);
424 
425 	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
426 			     ENABLE, 1);
427 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
428 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
429 				m->sdmax_rlcx_rb_rptr);
430 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
431 				m->sdmax_rlcx_rb_rptr_hi);
432 
433 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
434 	if (read_user_wptr(mm, wptr64, data64)) {
435 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
436 		       lower_32_bits(data64));
437 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
438 		       upper_32_bits(data64));
439 	} else {
440 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
441 		       m->sdmax_rlcx_rb_rptr);
442 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
443 		       m->sdmax_rlcx_rb_rptr_hi);
444 	}
445 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
446 
447 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
448 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
449 			m->sdmax_rlcx_rb_base_hi);
450 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
451 			m->sdmax_rlcx_rb_rptr_addr_lo);
452 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
453 			m->sdmax_rlcx_rb_rptr_addr_hi);
454 
455 	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
456 			     RB_ENABLE, 1);
457 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
458 
459 	return 0;
460 }
461 
462 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
463 			     uint32_t engine_id, uint32_t queue_id,
464 			     uint32_t (**dump)[2], uint32_t *n_regs)
465 {
466 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
467 	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
468 			engine_id, queue_id);
469 	uint32_t i = 0, reg;
470 #undef HQD_N_REGS
471 #define HQD_N_REGS (19+6+7+10)
472 
473 	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
474 	if (*dump == NULL)
475 		return -ENOMEM;
476 
477 	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
478 		DUMP_REG(sdma_rlc_reg_offset + reg);
479 	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
480 		DUMP_REG(sdma_rlc_reg_offset + reg);
481 	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
482 	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
483 		DUMP_REG(sdma_rlc_reg_offset + reg);
484 	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
485 	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
486 		DUMP_REG(sdma_rlc_reg_offset + reg);
487 
488 	WARN_ON_ONCE(i != HQD_N_REGS);
489 	*n_regs = i;
490 
491 	return 0;
492 }
493 
494 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
495 				uint32_t pipe_id, uint32_t queue_id)
496 {
497 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
498 	uint32_t act;
499 	bool retval = false;
500 	uint32_t low, high;
501 
502 	acquire_queue(kgd, pipe_id, queue_id);
503 	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
504 	if (act) {
505 		low = lower_32_bits(queue_address >> 8);
506 		high = upper_32_bits(queue_address >> 8);
507 
508 		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
509 		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
510 			retval = true;
511 	}
512 	release_queue(kgd);
513 	return retval;
514 }
515 
516 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
517 {
518 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
519 	struct v10_sdma_mqd *m;
520 	uint32_t sdma_rlc_reg_offset;
521 	uint32_t sdma_rlc_rb_cntl;
522 
523 	m = get_sdma_mqd(mqd);
524 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
525 					    m->sdma_queue_id);
526 
527 	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
528 
529 	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
530 		return true;
531 
532 	return false;
533 }
534 
535 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
536 				enum kfd_preempt_type reset_type,
537 				unsigned int utimeout, uint32_t pipe_id,
538 				uint32_t queue_id)
539 {
540 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
541 	enum hqd_dequeue_request_type type;
542 	unsigned long end_jiffies;
543 	uint32_t temp;
544 	struct v10_compute_mqd *m = get_mqd(mqd);
545 
546 	if (adev->in_gpu_reset)
547 		return -EIO;
548 
549 #if 0
550 	unsigned long flags;
551 	int retry;
552 #endif
553 
554 	acquire_queue(kgd, pipe_id, queue_id);
555 
556 	if (m->cp_hqd_vmid == 0)
557 		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
558 
559 	switch (reset_type) {
560 	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
561 		type = DRAIN_PIPE;
562 		break;
563 	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
564 		type = RESET_WAVES;
565 		break;
566 	default:
567 		type = DRAIN_PIPE;
568 		break;
569 	}
570 
571 #if 0 /* Is this still needed? */
572 	/* Workaround: If IQ timer is active and the wait time is close to or
573 	 * equal to 0, dequeueing is not safe. Wait until either the wait time
574 	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
575 	 * cleared before continuing. Also, ensure wait times are set to at
576 	 * least 0x3.
577 	 */
578 	local_irq_save(flags);
579 	preempt_disable();
580 	retry = 5000; /* wait for 500 usecs at maximum */
581 	while (true) {
582 		temp = RREG32(mmCP_HQD_IQ_TIMER);
583 		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
584 			pr_debug("HW is processing IQ\n");
585 			goto loop;
586 		}
587 		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
588 			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
589 					== 3) /* SEM-rearm is safe */
590 				break;
591 			/* Wait time 3 is safe for CP, but our MMIO read/write
592 			 * time is close to 1 microsecond, so check for 10 to
593 			 * leave more buffer room
594 			 */
595 			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
596 					>= 10)
597 				break;
598 			pr_debug("IQ timer is active\n");
599 		} else
600 			break;
601 loop:
602 		if (!retry) {
603 			pr_err("CP HQD IQ timer status time out\n");
604 			break;
605 		}
606 		ndelay(100);
607 		--retry;
608 	}
609 	retry = 1000;
610 	while (true) {
611 		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
612 		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
613 			break;
614 		pr_debug("Dequeue request is pending\n");
615 
616 		if (!retry) {
617 			pr_err("CP HQD dequeue request time out\n");
618 			break;
619 		}
620 		ndelay(100);
621 		--retry;
622 	}
623 	local_irq_restore(flags);
624 	preempt_enable();
625 #endif
626 
627 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
628 
629 	end_jiffies = (utimeout * HZ / 1000) + jiffies;
630 	while (true) {
631 		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
632 		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
633 			break;
634 		if (time_after(jiffies, end_jiffies)) {
635 			pr_err("cp queue preemption time out.\n");
636 			release_queue(kgd);
637 			return -ETIME;
638 		}
639 		usleep_range(500, 1000);
640 	}
641 
642 	release_queue(kgd);
643 	return 0;
644 }
645 
646 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
647 				unsigned int utimeout)
648 {
649 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
650 	struct v10_sdma_mqd *m;
651 	uint32_t sdma_rlc_reg_offset;
652 	uint32_t temp;
653 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
654 
655 	m = get_sdma_mqd(mqd);
656 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
657 					    m->sdma_queue_id);
658 
659 	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
660 	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
661 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
662 
663 	while (true) {
664 		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
665 		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
666 			break;
667 		if (time_after(jiffies, end_jiffies)) {
668 			pr_err("SDMA RLC not idle in %s\n", __func__);
669 			return -ETIME;
670 		}
671 		usleep_range(500, 1000);
672 	}
673 
674 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
675 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
676 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
677 		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
678 
679 	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
680 	m->sdmax_rlcx_rb_rptr_hi =
681 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
682 
683 	return 0;
684 }
685 
686 static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
687 					uint8_t vmid, uint16_t *p_pasid)
688 {
689 	uint32_t value;
690 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
691 
692 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
693 		     + vmid);
694 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
695 
696 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
697 }
698 
699 static int kgd_address_watch_disable(struct kgd_dev *kgd)
700 {
701 	return 0;
702 }
703 
704 static int kgd_address_watch_execute(struct kgd_dev *kgd,
705 					unsigned int watch_point_id,
706 					uint32_t cntl_val,
707 					uint32_t addr_hi,
708 					uint32_t addr_lo)
709 {
710 	return 0;
711 }
712 
713 static int kgd_wave_control_execute(struct kgd_dev *kgd,
714 					uint32_t gfx_index_val,
715 					uint32_t sq_cmd)
716 {
717 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
718 	uint32_t data = 0;
719 
720 	mutex_lock(&adev->grbm_idx_mutex);
721 
722 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
723 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
724 
725 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
726 		INSTANCE_BROADCAST_WRITES, 1);
727 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
728 		SA_BROADCAST_WRITES, 1);
729 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
730 		SE_BROADCAST_WRITES, 1);
731 
732 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
733 	mutex_unlock(&adev->grbm_idx_mutex);
734 
735 	return 0;
736 }
737 
738 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
739 					unsigned int watch_point_id,
740 					unsigned int reg_offset)
741 {
742 	return 0;
743 }
744 
745 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
746 		uint64_t page_table_base)
747 {
748 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
749 
750 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
751 		pr_err("trying to set page table base for wrong VMID %u\n",
752 		       vmid);
753 		return;
754 	}
755 
756 	/* SDMA is on gfxhub as well for Navi1* series */
757 	gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
758 }
759 
760 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
761 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
762 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
763 	.init_interrupts = kgd_init_interrupts,
764 	.hqd_load = kgd_hqd_load,
765 	.hiq_mqd_load = kgd_hiq_mqd_load,
766 	.hqd_sdma_load = kgd_hqd_sdma_load,
767 	.hqd_dump = kgd_hqd_dump,
768 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
769 	.hqd_is_occupied = kgd_hqd_is_occupied,
770 	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
771 	.hqd_destroy = kgd_hqd_destroy,
772 	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
773 	.address_watch_disable = kgd_address_watch_disable,
774 	.address_watch_execute = kgd_address_watch_execute,
775 	.wave_control_execute = kgd_wave_control_execute,
776 	.address_watch_get_offset = kgd_address_watch_get_offset,
777 	.get_atc_vmid_pasid_mapping_info =
778 			get_atc_vmid_pasid_mapping_info,
779 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
780 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
781 	.get_unique_id = amdgpu_amdkfd_get_unique_id,
782 };
783