1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2016-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include "kfd_priv.h"
29 #include "kfd_mqd_manager.h"
30 #include "v9_structs.h"
31 #include "gc/gc_9_0_offset.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "sdma0/sdma0_4_0_sh_mask.h"
34 #include "amdgpu_amdkfd.h"
35 #include "kfd_device_queue_manager.h"
36 
37 static void update_mqd(struct mqd_manager *mm, void *mqd,
38 		       struct queue_properties *q,
39 		       struct mqd_update_info *minfo);
40 
41 static uint64_t mqd_stride_v9(struct mqd_manager *mm,
42 				struct queue_properties *q)
43 {
44 	if (mm->dev->kfd->cwsr_enabled &&
45 	    q->type == KFD_QUEUE_TYPE_COMPUTE)
46 		return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
47 			ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
48 
49 	return mm->mqd_size;
50 }
51 
52 static inline struct v9_mqd *get_mqd(void *mqd)
53 {
54 	return (struct v9_mqd *)mqd;
55 }
56 
57 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
58 {
59 	return (struct v9_sdma_mqd *)mqd;
60 }
61 
62 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
63 			struct mqd_update_info *minfo)
64 {
65 	struct v9_mqd *m;
66 	uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
67 
68 	if (!minfo || !minfo->cu_mask.ptr)
69 		return;
70 
71 	mqd_symmetrically_map_cu_mask(mm,
72 		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
73 
74 	m = get_mqd(mqd);
75 	m->compute_static_thread_mgmt_se0 = se_mask[0];
76 	m->compute_static_thread_mgmt_se1 = se_mask[1];
77 	m->compute_static_thread_mgmt_se2 = se_mask[2];
78 	m->compute_static_thread_mgmt_se3 = se_mask[3];
79 	m->compute_static_thread_mgmt_se4 = se_mask[4];
80 	m->compute_static_thread_mgmt_se5 = se_mask[5];
81 	m->compute_static_thread_mgmt_se6 = se_mask[6];
82 	m->compute_static_thread_mgmt_se7 = se_mask[7];
83 
84 	pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
85 		m->compute_static_thread_mgmt_se0,
86 		m->compute_static_thread_mgmt_se1,
87 		m->compute_static_thread_mgmt_se2,
88 		m->compute_static_thread_mgmt_se3,
89 		m->compute_static_thread_mgmt_se4,
90 		m->compute_static_thread_mgmt_se5,
91 		m->compute_static_thread_mgmt_se6,
92 		m->compute_static_thread_mgmt_se7);
93 }
94 
95 static void set_priority(struct v9_mqd *m, struct queue_properties *q)
96 {
97 	m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
98 	m->cp_hqd_queue_priority = q->priority;
99 }
100 
101 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
102 		struct queue_properties *q)
103 {
104 	int retval;
105 	struct kfd_mem_obj *mqd_mem_obj = NULL;
106 
107 	/* For V9 only, due to a HW bug, the control stack of a user mode
108 	 * compute queue needs to be allocated just behind the page boundary
109 	 * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
110 	 * the first page of the buffer serves as the regular MQD buffer
111 	 * purpose and the remaining is for control stack. Although the two
112 	 * parts are in the same buffer object, they need different memory
113 	 * types: MQD part needs UC (uncached) as usual, while control stack
114 	 * needs NC (non coherent), which is different from the UC type which
115 	 * is used when control stack is allocated in user space.
116 	 *
117 	 * Because of all those, we use the gtt allocation function instead
118 	 * of sub-allocation function for this enlarged MQD buffer. Moreover,
119 	 * in order to achieve two memory types in a single buffer object, we
120 	 * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
121 	 * amdgpu memory functions to do so.
122 	 */
123 	if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
124 		mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
125 		if (!mqd_mem_obj)
126 			return NULL;
127 		retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev,
128 			(ALIGN(q->ctl_stack_size, PAGE_SIZE) +
129 			ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
130 			NUM_XCC(node->xcc_mask),
131 			&(mqd_mem_obj->gtt_mem),
132 			&(mqd_mem_obj->gpu_addr),
133 			(void *)&(mqd_mem_obj->cpu_ptr), true);
134 
135 		if (retval) {
136 			kfree(mqd_mem_obj);
137 			return NULL;
138 		}
139 	} else {
140 		retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
141 				&mqd_mem_obj);
142 		if (retval)
143 			return NULL;
144 	}
145 
146 	return mqd_mem_obj;
147 }
148 
149 static void init_mqd(struct mqd_manager *mm, void **mqd,
150 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
151 			struct queue_properties *q)
152 {
153 	uint64_t addr;
154 	struct v9_mqd *m;
155 
156 	m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
157 	addr = mqd_mem_obj->gpu_addr;
158 
159 	memset(m, 0, sizeof(struct v9_mqd));
160 
161 	m->header = 0xC0310800;
162 	m->compute_pipelinestat_enable = 1;
163 	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
164 	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
165 	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
166 	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
167 	m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
168 	m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
169 	m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
170 	m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
171 
172 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
173 			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
174 
175 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
176 
177 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
178 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
179 
180 	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
181 			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
182 			1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
183 
184 	/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
185 	 * DISPATCH_PTR.  This is required for the kfd debugger
186 	 */
187 	m->cp_hqd_hq_status0 = 1 << 14;
188 
189 	if (q->format == KFD_QUEUE_FORMAT_AQL)
190 		m->cp_hqd_aql_control =
191 			1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
192 
193 	if (q->tba_addr) {
194 		m->compute_pgm_rsrc2 |=
195 			(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
196 	}
197 
198 	if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
199 		m->cp_hqd_persistent_state |=
200 			(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
201 		m->cp_hqd_ctx_save_base_addr_lo =
202 			lower_32_bits(q->ctx_save_restore_area_address);
203 		m->cp_hqd_ctx_save_base_addr_hi =
204 			upper_32_bits(q->ctx_save_restore_area_address);
205 		m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
206 		m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
207 		m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
208 		m->cp_hqd_wg_state_offset = q->ctl_stack_size;
209 	}
210 
211 	*mqd = m;
212 	if (gart_addr)
213 		*gart_addr = addr;
214 	update_mqd(mm, m, q, NULL);
215 }
216 
217 static int load_mqd(struct mqd_manager *mm, void *mqd,
218 			uint32_t pipe_id, uint32_t queue_id,
219 			struct queue_properties *p, struct mm_struct *mms)
220 {
221 	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
222 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
223 
224 	return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
225 					  (uint32_t __user *)p->write_ptr,
226 					  wptr_shift, 0, mms, 0);
227 }
228 
229 static void update_mqd(struct mqd_manager *mm, void *mqd,
230 			struct queue_properties *q,
231 			struct mqd_update_info *minfo)
232 {
233 	struct v9_mqd *m;
234 
235 	m = get_mqd(mqd);
236 
237 	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
238 	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
239 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
240 
241 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
242 	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
243 
244 	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
245 	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
246 	m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
247 	m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
248 
249 	m->cp_hqd_pq_doorbell_control =
250 		q->doorbell_off <<
251 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
252 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
253 			m->cp_hqd_pq_doorbell_control);
254 
255 	m->cp_hqd_ib_control =
256 		3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
257 		1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
258 
259 	/*
260 	 * HW does not clamp this field correctly. Maximum EOP queue size
261 	 * is constrained by per-SE EOP done signal count, which is 8-bit.
262 	 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
263 	 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
264 	 * is safe, giving a maximum field value of 0xA.
265 	 *
266 	 * Also, do calculation only if EOP is used (size > 0), otherwise
267 	 * the order_base_2 calculation provides incorrect result.
268 	 *
269 	 */
270 	m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
271 		min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
272 
273 	m->cp_hqd_eop_base_addr_lo =
274 			lower_32_bits(q->eop_ring_buffer_address >> 8);
275 	m->cp_hqd_eop_base_addr_hi =
276 			upper_32_bits(q->eop_ring_buffer_address >> 8);
277 
278 	m->cp_hqd_iq_timer = 0;
279 
280 	m->cp_hqd_vmid = q->vmid;
281 
282 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
283 		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
284 				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
285 				1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
286 				1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
287 		m->cp_hqd_pq_doorbell_control |= 1 <<
288 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
289 	}
290 	if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
291 		m->cp_hqd_ctx_save_control = 0;
292 
293 	update_cu_mask(mm, mqd, minfo);
294 	set_priority(m, q);
295 
296 	q->is_active = QUEUE_IS_ACTIVE(*q);
297 }
298 
299 
300 static uint32_t read_doorbell_id(void *mqd)
301 {
302 	struct v9_mqd *m = (struct v9_mqd *)mqd;
303 
304 	return m->queue_doorbell_id0;
305 }
306 
307 static int get_wave_state(struct mqd_manager *mm, void *mqd,
308 			  struct queue_properties *q,
309 			  void __user *ctl_stack,
310 			  u32 *ctl_stack_used_size,
311 			  u32 *save_area_used_size)
312 {
313 	struct v9_mqd *m;
314 	struct kfd_context_save_area_header header;
315 
316 	/* Control stack is located one page after MQD. */
317 	void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
318 
319 	m = get_mqd(mqd);
320 
321 	*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
322 		m->cp_hqd_cntl_stack_offset;
323 	*save_area_used_size = m->cp_hqd_wg_state_offset -
324 		m->cp_hqd_cntl_stack_size;
325 
326 	header.wave_state.control_stack_size = *ctl_stack_used_size;
327 	header.wave_state.wave_state_size = *save_area_used_size;
328 
329 	header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
330 	header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
331 
332 	if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
333 		return -EFAULT;
334 
335 	if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
336 				mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
337 				*ctl_stack_used_size))
338 		return -EFAULT;
339 
340 	return 0;
341 }
342 
343 static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
344 {
345 	struct v9_mqd *m = get_mqd(mqd);
346 
347 	*ctl_stack_size = m->cp_hqd_cntl_stack_size;
348 }
349 
350 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
351 {
352 	struct v9_mqd *m;
353 	/* Control stack is located one page after MQD. */
354 	void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
355 
356 	m = get_mqd(mqd);
357 
358 	memcpy(mqd_dst, m, sizeof(struct v9_mqd));
359 	memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
360 }
361 
362 static void restore_mqd(struct mqd_manager *mm, void **mqd,
363 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
364 			struct queue_properties *qp,
365 			const void *mqd_src,
366 			const void *ctl_stack_src, u32 ctl_stack_size)
367 {
368 	uint64_t addr;
369 	struct v9_mqd *m;
370 	void *ctl_stack;
371 
372 	m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
373 	addr = mqd_mem_obj->gpu_addr;
374 
375 	memcpy(m, mqd_src, sizeof(*m));
376 
377 	*mqd = m;
378 	if (gart_addr)
379 		*gart_addr = addr;
380 
381 	/* Control stack is located one page after MQD. */
382 	ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
383 	memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
384 
385 	m->cp_hqd_pq_doorbell_control =
386 		qp->doorbell_off <<
387 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
388 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
389 				m->cp_hqd_pq_doorbell_control);
390 
391 	qp->is_active = 0;
392 }
393 
394 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
395 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
396 			struct queue_properties *q)
397 {
398 	struct v9_mqd *m;
399 
400 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
401 
402 	m = get_mqd(*mqd);
403 
404 	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
405 			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
406 }
407 
408 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
409 		struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
410 		struct queue_properties *q)
411 {
412 	struct v9_sdma_mqd *m;
413 
414 	m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
415 
416 	memset(m, 0, sizeof(struct v9_sdma_mqd));
417 
418 	*mqd = m;
419 	if (gart_addr)
420 		*gart_addr = mqd_mem_obj->gpu_addr;
421 
422 	mm->update_mqd(mm, m, q, NULL);
423 }
424 
425 #define SDMA_RLC_DUMMY_DEFAULT 0xf
426 
427 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
428 			struct queue_properties *q,
429 			struct mqd_update_info *minfo)
430 {
431 	struct v9_sdma_mqd *m;
432 
433 	m = get_sdma_mqd(mqd);
434 	m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
435 		<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
436 		q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
437 		1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
438 		6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
439 
440 	m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
441 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
442 	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
443 	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
444 	m->sdmax_rlcx_doorbell_offset =
445 		q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
446 
447 	m->sdma_engine_id = q->sdma_engine_id;
448 	m->sdma_queue_id = q->sdma_queue_id;
449 	m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
450 
451 	q->is_active = QUEUE_IS_ACTIVE(*q);
452 }
453 
454 static void checkpoint_mqd_sdma(struct mqd_manager *mm,
455 				void *mqd,
456 				void *mqd_dst,
457 				void *ctl_stack_dst)
458 {
459 	struct v9_sdma_mqd *m;
460 
461 	m = get_sdma_mqd(mqd);
462 
463 	memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
464 }
465 
466 static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
467 			     struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
468 			     struct queue_properties *qp,
469 			     const void *mqd_src,
470 			     const void *ctl_stack_src, const u32 ctl_stack_size)
471 {
472 	uint64_t addr;
473 	struct v9_sdma_mqd *m;
474 
475 	m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
476 	addr = mqd_mem_obj->gpu_addr;
477 
478 	memcpy(m, mqd_src, sizeof(*m));
479 
480 	m->sdmax_rlcx_doorbell_offset =
481 		qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
482 
483 	*mqd = m;
484 	if (gart_addr)
485 		*gart_addr = addr;
486 
487 	qp->is_active = 0;
488 }
489 
490 static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
491 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
492 			struct queue_properties *q)
493 {
494 	struct v9_mqd *m;
495 	int xcc = 0;
496 	struct kfd_mem_obj xcc_mqd_mem_obj;
497 	uint64_t xcc_gart_addr = 0;
498 
499 	memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
500 
501 	for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
502 		kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
503 
504 		init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
505 
506 		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
507 					1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
508 					1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
509 		m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
510 		if (xcc == 0) {
511 			/* Set no_update_rptr = 0 in Master XCC */
512 			m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
513 
514 			/* Set the MQD pointer and gart address to XCC0 MQD */
515 			*mqd = m;
516 			*gart_addr = xcc_gart_addr;
517 		}
518 	}
519 }
520 
521 static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
522 			uint32_t pipe_id, uint32_t queue_id,
523 			struct queue_properties *p, struct mm_struct *mms)
524 {
525 	uint32_t xcc_mask = mm->dev->xcc_mask;
526 	int xcc_id, err, inst = 0;
527 	void *xcc_mqd;
528 	uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
529 
530 	for_each_inst(xcc_id, xcc_mask) {
531 		xcc_mqd = mqd + hiq_mqd_size * inst;
532 		err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
533 						     pipe_id, queue_id,
534 						     p->doorbell_off, xcc_id);
535 		if (err) {
536 			pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
537 			break;
538 		}
539 		++inst;
540 	}
541 
542 	return err;
543 }
544 
545 static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
546 			enum kfd_preempt_type type, unsigned int timeout,
547 			uint32_t pipe_id, uint32_t queue_id)
548 {
549 	uint32_t xcc_mask = mm->dev->xcc_mask;
550 	int xcc_id, err, inst = 0;
551 	void *xcc_mqd;
552 	uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
553 
554 	for_each_inst(xcc_id, xcc_mask) {
555 		xcc_mqd = mqd + hiq_mqd_size * inst;
556 		err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
557 						    type, timeout, pipe_id,
558 						    queue_id, xcc_id);
559 		if (err) {
560 			pr_debug("Destroy MQD failed for xcc: %d\n", inst);
561 			break;
562 		}
563 		++inst;
564 	}
565 
566 	return err;
567 }
568 
569 static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
570 			       struct kfd_mem_obj *xcc_mqd_mem_obj,
571 			       uint64_t offset)
572 {
573 	xcc_mqd_mem_obj->gtt_mem = (offset == 0) ?
574 					mqd_mem_obj->gtt_mem : NULL;
575 	xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
576 	xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
577 						+ offset);
578 }
579 
580 static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
581 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
582 			struct queue_properties *q)
583 {
584 	struct v9_mqd *m;
585 	int xcc = 0;
586 	struct kfd_mem_obj xcc_mqd_mem_obj;
587 	uint64_t xcc_gart_addr = 0;
588 	uint64_t xcc_ctx_save_restore_area_address;
589 	uint64_t offset = mm->mqd_stride(mm, q);
590 	uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
591 
592 	memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
593 	for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
594 		get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
595 
596 		init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
597 
598 		m->cp_mqd_stride_size = offset;
599 
600 		/*
601 		 * Update the CWSR address for each XCC if CWSR is enabled
602 		 * and CWSR area is allocated in thunk
603 		 */
604 		if (mm->dev->kfd->cwsr_enabled &&
605 		    q->ctx_save_restore_area_address) {
606 			xcc_ctx_save_restore_area_address =
607 				q->ctx_save_restore_area_address +
608 				(xcc * q->ctx_save_restore_area_size);
609 
610 			m->cp_hqd_ctx_save_base_addr_lo =
611 				lower_32_bits(xcc_ctx_save_restore_area_address);
612 			m->cp_hqd_ctx_save_base_addr_hi =
613 				upper_32_bits(xcc_ctx_save_restore_area_address);
614 		}
615 
616 		if (q->format == KFD_QUEUE_FORMAT_AQL) {
617 			m->compute_tg_chunk_size = 1;
618 			m->compute_current_logic_xcc_id =
619 					(local_xcc_start + xcc) %
620 					NUM_XCC(mm->dev->xcc_mask);
621 
622 			switch (xcc) {
623 			case 0:
624 				/* Master XCC */
625 				m->cp_hqd_pq_control &=
626 					~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
627 				break;
628 			default:
629 				break;
630 			}
631 		} else {
632 			/* PM4 Queue */
633 			m->compute_current_logic_xcc_id = 0;
634 			m->compute_tg_chunk_size = 0;
635 			m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
636 		}
637 
638 		if (xcc == 0) {
639 			/* Set the MQD pointer and gart address to XCC0 MQD */
640 			*mqd = m;
641 			*gart_addr = xcc_gart_addr;
642 		}
643 	}
644 }
645 
646 static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
647 		      struct queue_properties *q, struct mqd_update_info *minfo)
648 {
649 	struct v9_mqd *m;
650 	int xcc = 0;
651 	uint64_t size = mm->mqd_stride(mm, q);
652 
653 	for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
654 		m = get_mqd(mqd + size * xcc);
655 		update_mqd(mm, m, q, minfo);
656 
657 		if (q->format == KFD_QUEUE_FORMAT_AQL) {
658 			switch (xcc) {
659 			case 0:
660 				/* Master XCC */
661 				m->cp_hqd_pq_control &=
662 					~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
663 				break;
664 			default:
665 				break;
666 			}
667 			m->compute_tg_chunk_size = 1;
668 		} else {
669 			/* PM4 Queue */
670 			m->compute_current_logic_xcc_id = 0;
671 			m->compute_tg_chunk_size = 0;
672 			m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
673 		}
674 	}
675 }
676 
677 static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
678 		   enum kfd_preempt_type type, unsigned int timeout,
679 		   uint32_t pipe_id, uint32_t queue_id)
680 {
681 	uint32_t xcc_mask = mm->dev->xcc_mask;
682 	int xcc_id, err, inst = 0;
683 	void *xcc_mqd;
684 	struct v9_mqd *m;
685 	uint64_t mqd_offset;
686 
687 	m = get_mqd(mqd);
688 	mqd_offset = m->cp_mqd_stride_size;
689 
690 	for_each_inst(xcc_id, xcc_mask) {
691 		xcc_mqd = mqd + mqd_offset * inst;
692 		err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
693 						    type, timeout, pipe_id,
694 						    queue_id, xcc_id);
695 		if (err) {
696 			pr_debug("Destroy MQD failed for xcc: %d\n", inst);
697 			break;
698 		}
699 		++inst;
700 	}
701 
702 	return err;
703 }
704 
705 static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
706 			uint32_t pipe_id, uint32_t queue_id,
707 			struct queue_properties *p, struct mm_struct *mms)
708 {
709 	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
710 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
711 	uint32_t xcc_mask = mm->dev->xcc_mask;
712 	int xcc_id, err, inst = 0;
713 	void *xcc_mqd;
714 	uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
715 
716 	for_each_inst(xcc_id, xcc_mask) {
717 		xcc_mqd = mqd + mqd_stride_size * inst;
718 		err = mm->dev->kfd2kgd->hqd_load(
719 			mm->dev->adev, xcc_mqd, pipe_id, queue_id,
720 			(uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
721 			xcc_id);
722 		if (err) {
723 			pr_debug("Load MQD failed for xcc: %d\n", inst);
724 			break;
725 		}
726 		++inst;
727 	}
728 
729 	return err;
730 }
731 
732 static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
733 				 struct queue_properties *q,
734 				 void __user *ctl_stack,
735 				 u32 *ctl_stack_used_size,
736 				 u32 *save_area_used_size)
737 {
738 	int xcc, err = 0;
739 	void *xcc_mqd;
740 	void __user *xcc_ctl_stack;
741 	uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
742 	u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
743 
744 	for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
745 		xcc_mqd = mqd + mqd_stride_size * xcc;
746 		xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
747 					q->ctx_save_restore_area_size * xcc);
748 
749 		err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
750 				     &tmp_ctl_stack_used_size,
751 				     &tmp_save_area_used_size);
752 		if (err)
753 			break;
754 
755 		/*
756 		 * Set the ctl_stack_used_size and save_area_used_size to
757 		 * ctl_stack_used_size and save_area_used_size of XCC 0 when
758 		 * passing the info the user-space.
759 		 * For multi XCC, user-space would have to look at the header
760 		 * info of each Control stack area to determine the control
761 		 * stack size and save area used.
762 		 */
763 		if (xcc == 0) {
764 			*ctl_stack_used_size = tmp_ctl_stack_used_size;
765 			*save_area_used_size = tmp_save_area_used_size;
766 		}
767 	}
768 
769 	return err;
770 }
771 
772 #if defined(CONFIG_DEBUG_FS)
773 
774 static int debugfs_show_mqd(struct seq_file *m, void *data)
775 {
776 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
777 		     data, sizeof(struct v9_mqd), false);
778 	return 0;
779 }
780 
781 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
782 {
783 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
784 		     data, sizeof(struct v9_sdma_mqd), false);
785 	return 0;
786 }
787 
788 #endif
789 
790 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
791 		struct kfd_node *dev)
792 {
793 	struct mqd_manager *mqd;
794 
795 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
796 		return NULL;
797 
798 	mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
799 	if (!mqd)
800 		return NULL;
801 
802 	mqd->dev = dev;
803 
804 	switch (type) {
805 	case KFD_MQD_TYPE_CP:
806 		mqd->allocate_mqd = allocate_mqd;
807 		mqd->free_mqd = kfd_free_mqd_cp;
808 		mqd->is_occupied = kfd_is_occupied_cp;
809 		mqd->get_checkpoint_info = get_checkpoint_info;
810 		mqd->checkpoint_mqd = checkpoint_mqd;
811 		mqd->restore_mqd = restore_mqd;
812 		mqd->mqd_size = sizeof(struct v9_mqd);
813 		mqd->mqd_stride = mqd_stride_v9;
814 #if defined(CONFIG_DEBUG_FS)
815 		mqd->debugfs_show_mqd = debugfs_show_mqd;
816 #endif
817 		if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
818 			mqd->init_mqd = init_mqd_v9_4_3;
819 			mqd->load_mqd = load_mqd_v9_4_3;
820 			mqd->update_mqd = update_mqd_v9_4_3;
821 			mqd->destroy_mqd = destroy_mqd_v9_4_3;
822 			mqd->get_wave_state = get_wave_state_v9_4_3;
823 		} else {
824 			mqd->init_mqd = init_mqd;
825 			mqd->load_mqd = load_mqd;
826 			mqd->update_mqd = update_mqd;
827 			mqd->destroy_mqd = kfd_destroy_mqd_cp;
828 			mqd->get_wave_state = get_wave_state;
829 		}
830 		break;
831 	case KFD_MQD_TYPE_HIQ:
832 		mqd->allocate_mqd = allocate_hiq_mqd;
833 		mqd->free_mqd = free_mqd_hiq_sdma;
834 		mqd->update_mqd = update_mqd;
835 		mqd->is_occupied = kfd_is_occupied_cp;
836 		mqd->mqd_size = sizeof(struct v9_mqd);
837 		mqd->mqd_stride = kfd_mqd_stride;
838 #if defined(CONFIG_DEBUG_FS)
839 		mqd->debugfs_show_mqd = debugfs_show_mqd;
840 #endif
841 		mqd->read_doorbell_id = read_doorbell_id;
842 		if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
843 			mqd->init_mqd = init_mqd_hiq_v9_4_3;
844 			mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
845 			mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
846 		} else {
847 			mqd->init_mqd = init_mqd_hiq;
848 			mqd->load_mqd = kfd_hiq_load_mqd_kiq;
849 			mqd->destroy_mqd = kfd_destroy_mqd_cp;
850 		}
851 		break;
852 	case KFD_MQD_TYPE_DIQ:
853 		mqd->allocate_mqd = allocate_mqd;
854 		mqd->init_mqd = init_mqd_hiq;
855 		mqd->free_mqd = kfd_free_mqd_cp;
856 		mqd->load_mqd = load_mqd;
857 		mqd->update_mqd = update_mqd;
858 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
859 		mqd->is_occupied = kfd_is_occupied_cp;
860 		mqd->mqd_size = sizeof(struct v9_mqd);
861 #if defined(CONFIG_DEBUG_FS)
862 		mqd->debugfs_show_mqd = debugfs_show_mqd;
863 #endif
864 		break;
865 	case KFD_MQD_TYPE_SDMA:
866 		mqd->allocate_mqd = allocate_sdma_mqd;
867 		mqd->init_mqd = init_mqd_sdma;
868 		mqd->free_mqd = free_mqd_hiq_sdma;
869 		mqd->load_mqd = kfd_load_mqd_sdma;
870 		mqd->update_mqd = update_mqd_sdma;
871 		mqd->destroy_mqd = kfd_destroy_mqd_sdma;
872 		mqd->is_occupied = kfd_is_occupied_sdma;
873 		mqd->checkpoint_mqd = checkpoint_mqd_sdma;
874 		mqd->restore_mqd = restore_mqd_sdma;
875 		mqd->mqd_size = sizeof(struct v9_sdma_mqd);
876 		mqd->mqd_stride = kfd_mqd_stride;
877 #if defined(CONFIG_DEBUG_FS)
878 		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
879 #endif
880 		break;
881 	default:
882 		kfree(mqd);
883 		return NULL;
884 	}
885 
886 	return mqd;
887 }
888