14b8f589bSBen Goz /*
24b8f589bSBen Goz  * Copyright 2014 Advanced Micro Devices, Inc.
34b8f589bSBen Goz  *
44b8f589bSBen Goz  * Permission is hereby granted, free of charge, to any person obtaining a
54b8f589bSBen Goz  * copy of this software and associated documentation files (the "Software"),
64b8f589bSBen Goz  * to deal in the Software without restriction, including without limitation
74b8f589bSBen Goz  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84b8f589bSBen Goz  * and/or sell copies of the Software, and to permit persons to whom the
94b8f589bSBen Goz  * Software is furnished to do so, subject to the following conditions:
104b8f589bSBen Goz  *
114b8f589bSBen Goz  * The above copyright notice and this permission notice shall be included in
124b8f589bSBen Goz  * all copies or substantial portions of the Software.
134b8f589bSBen Goz  *
144b8f589bSBen Goz  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
154b8f589bSBen Goz  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
164b8f589bSBen Goz  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
174b8f589bSBen Goz  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
184b8f589bSBen Goz  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
194b8f589bSBen Goz  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
204b8f589bSBen Goz  * OTHER DEALINGS IN THE SOFTWARE.
214b8f589bSBen Goz  *
224b8f589bSBen Goz  */
234b8f589bSBen Goz 
244b8f589bSBen Goz #include <linux/printk.h>
254b8f589bSBen Goz #include <linux/slab.h>
26589ee628SIngo Molnar #include <linux/mm_types.h>
27589ee628SIngo Molnar 
284b8f589bSBen Goz #include "kfd_priv.h"
294b8f589bSBen Goz #include "kfd_mqd_manager.h"
304b8f589bSBen Goz #include "cik_regs.h"
314b8f589bSBen Goz #include "cik_structs.h"
323d30b28bSOded Gabbay #include "oss/oss_2_4_sh_mask.h"
334b8f589bSBen Goz 
344b8f589bSBen Goz static inline struct cik_mqd *get_mqd(void *mqd)
354b8f589bSBen Goz {
364b8f589bSBen Goz 	return (struct cik_mqd *)mqd;
374b8f589bSBen Goz }
384b8f589bSBen Goz 
3997b9ad12SFelix Kuehling static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
4097b9ad12SFelix Kuehling {
4197b9ad12SFelix Kuehling 	return (struct cik_sdma_rlc_registers *)mqd;
4297b9ad12SFelix Kuehling }
4397b9ad12SFelix Kuehling 
444b8f589bSBen Goz static int init_mqd(struct mqd_manager *mm, void **mqd,
454b8f589bSBen Goz 		struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
464b8f589bSBen Goz 		struct queue_properties *q)
474b8f589bSBen Goz {
484b8f589bSBen Goz 	uint64_t addr;
494b8f589bSBen Goz 	struct cik_mqd *m;
504b8f589bSBen Goz 	int retval;
514b8f589bSBen Goz 
524b8f589bSBen Goz 	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
534b8f589bSBen Goz 					mqd_mem_obj);
544b8f589bSBen Goz 
554b8f589bSBen Goz 	if (retval != 0)
564b8f589bSBen Goz 		return -ENOMEM;
574b8f589bSBen Goz 
584b8f589bSBen Goz 	m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
594b8f589bSBen Goz 	addr = (*mqd_mem_obj)->gpu_addr;
604b8f589bSBen Goz 
614b8f589bSBen Goz 	memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
624b8f589bSBen Goz 
634b8f589bSBen Goz 	m->header = 0xC0310800;
644b8f589bSBen Goz 	m->compute_pipelinestat_enable = 1;
654b8f589bSBen Goz 	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
664b8f589bSBen Goz 	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
674b8f589bSBen Goz 	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
684b8f589bSBen Goz 	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
694b8f589bSBen Goz 
704b8f589bSBen Goz 	/*
714b8f589bSBen Goz 	 * Make sure to use the last queue state saved on mqd when the cp
724b8f589bSBen Goz 	 * reassigns the queue, so when queue is switched on/off (e.g over
734b8f589bSBen Goz 	 * subscription or quantum timeout) the context will be consistent
744b8f589bSBen Goz 	 */
754b8f589bSBen Goz 	m->cp_hqd_persistent_state =
764b8f589bSBen Goz 				DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
774b8f589bSBen Goz 
784b8f589bSBen Goz 	m->cp_mqd_control             = MQD_CONTROL_PRIV_STATE_EN;
794b8f589bSBen Goz 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
804b8f589bSBen Goz 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
814b8f589bSBen Goz 
824b8f589bSBen Goz 	m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
834b8f589bSBen Goz 	/* Although WinKFD writes this, I suspect it should not be necessary */
844b8f589bSBen Goz 	m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
854b8f589bSBen Goz 
864b8f589bSBen Goz 	m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
874b8f589bSBen Goz 				QUANTUM_DURATION(10);
884b8f589bSBen Goz 
894b8f589bSBen Goz 	/*
904b8f589bSBen Goz 	 * Pipe Priority
914b8f589bSBen Goz 	 * Identifies the pipe relative priority when this queue is connected
924b8f589bSBen Goz 	 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
934b8f589bSBen Goz 	 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
944b8f589bSBen Goz 	 * 0 = CS_LOW (typically below GFX)
954b8f589bSBen Goz 	 * 1 = CS_MEDIUM (typically between HP3D and GFX
964b8f589bSBen Goz 	 * 2 = CS_HIGH (typically above HP3D)
974b8f589bSBen Goz 	 */
984b8f589bSBen Goz 	m->cp_hqd_pipe_priority = 1;
994b8f589bSBen Goz 	m->cp_hqd_queue_priority = 15;
1004b8f589bSBen Goz 
101d752f95eSJay Cornwall 	if (q->format == KFD_QUEUE_FORMAT_AQL)
102d752f95eSJay Cornwall 		m->cp_hqd_iq_rptr = AQL_ENABLE;
103d752f95eSJay Cornwall 
1044b8f589bSBen Goz 	*mqd = m;
1054eacc26bSKent Russell 	if (gart_addr)
1064b8f589bSBen Goz 		*gart_addr = addr;
1074b8f589bSBen Goz 	retval = mm->update_mqd(mm, m, q);
1084b8f589bSBen Goz 
1094b8f589bSBen Goz 	return retval;
1104b8f589bSBen Goz }
1114b8f589bSBen Goz 
1124b8f589bSBen Goz static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
1134b8f589bSBen Goz 			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
1144b8f589bSBen Goz 			struct queue_properties *q)
1154b8f589bSBen Goz {
1164b8f589bSBen Goz 	int retval;
1174b8f589bSBen Goz 	struct cik_sdma_rlc_registers *m;
1184b8f589bSBen Goz 
1194b8f589bSBen Goz 	retval = kfd_gtt_sa_allocate(mm->dev,
1204b8f589bSBen Goz 					sizeof(struct cik_sdma_rlc_registers),
1214b8f589bSBen Goz 					mqd_mem_obj);
1224b8f589bSBen Goz 
1234b8f589bSBen Goz 	if (retval != 0)
1244b8f589bSBen Goz 		return -ENOMEM;
1254b8f589bSBen Goz 
1264b8f589bSBen Goz 	m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr;
1274b8f589bSBen Goz 
1284b8f589bSBen Goz 	memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
1294b8f589bSBen Goz 
1304b8f589bSBen Goz 	*mqd = m;
1314eacc26bSKent Russell 	if (gart_addr)
1324b8f589bSBen Goz 		*gart_addr = (*mqd_mem_obj)->gpu_addr;
1334b8f589bSBen Goz 
1344b8f589bSBen Goz 	retval = mm->update_mqd(mm, m, q);
1354b8f589bSBen Goz 
1364b8f589bSBen Goz 	return retval;
1374b8f589bSBen Goz }
1384b8f589bSBen Goz 
1394b8f589bSBen Goz static void uninit_mqd(struct mqd_manager *mm, void *mqd,
1404b8f589bSBen Goz 			struct kfd_mem_obj *mqd_mem_obj)
1414b8f589bSBen Goz {
1424b8f589bSBen Goz 	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
1434b8f589bSBen Goz }
1444b8f589bSBen Goz 
1454b8f589bSBen Goz static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
1464b8f589bSBen Goz 				struct kfd_mem_obj *mqd_mem_obj)
1474b8f589bSBen Goz {
1484b8f589bSBen Goz 	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
1494b8f589bSBen Goz }
1504b8f589bSBen Goz 
1514b8f589bSBen Goz static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
15270539bd7SFelix Kuehling 		    uint32_t queue_id, struct queue_properties *p,
15370539bd7SFelix Kuehling 		    struct mm_struct *mms)
1544b8f589bSBen Goz {
15570539bd7SFelix Kuehling 	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
15670539bd7SFelix Kuehling 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
1576d566930SFelix Kuehling 	uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
15870539bd7SFelix Kuehling 
15970539bd7SFelix Kuehling 	return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
16070539bd7SFelix Kuehling 					  (uint32_t __user *)p->write_ptr,
16170539bd7SFelix Kuehling 					  wptr_shift, wptr_mask, mms);
1624b8f589bSBen Goz }
1634b8f589bSBen Goz 
1644b8f589bSBen Goz static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
1654b8f589bSBen Goz 			 uint32_t pipe_id, uint32_t queue_id,
16670539bd7SFelix Kuehling 			 struct queue_properties *p, struct mm_struct *mms)
1674b8f589bSBen Goz {
1687ce66118SFelix Kuehling 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
1697ce66118SFelix Kuehling 					       (uint32_t __user *)p->write_ptr,
1707ce66118SFelix Kuehling 					       mms);
1714b8f589bSBen Goz }
1724b8f589bSBen Goz 
173ee04955aSFelix Kuehling static int __update_mqd(struct mqd_manager *mm, void *mqd,
174ee04955aSFelix Kuehling 			struct queue_properties *q, unsigned int atc_bit)
1754b8f589bSBen Goz {
1764b8f589bSBen Goz 	struct cik_mqd *m;
1774b8f589bSBen Goz 
1784b8f589bSBen Goz 	m = get_mqd(mqd);
1794b8f589bSBen Goz 	m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
180ee04955aSFelix Kuehling 				DEFAULT_MIN_AVAIL_SIZE;
181ee04955aSFelix Kuehling 	m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
182ee04955aSFelix Kuehling 	if (atc_bit) {
183ee04955aSFelix Kuehling 		m->cp_hqd_pq_control |= PQ_ATC_EN;
184ee04955aSFelix Kuehling 		m->cp_hqd_ib_control |= IB_ATC_EN;
185ee04955aSFelix Kuehling 	}
1864b8f589bSBen Goz 
1874b8f589bSBen Goz 	/*
1884b8f589bSBen Goz 	 * Calculating queue size which is log base 2 of actual queue size -1
1894b8f589bSBen Goz 	 * dwords and another -1 for ffs
1904b8f589bSBen Goz 	 */
191115c8c41SFelix Kuehling 	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
1924b8f589bSBen Goz 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
1934b8f589bSBen Goz 	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
1944b8f589bSBen Goz 	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
1954b8f589bSBen Goz 	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
19670539bd7SFelix Kuehling 	m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
1974b8f589bSBen Goz 
1984b8f589bSBen Goz 	m->cp_hqd_vmid = q->vmid;
1994b8f589bSBen Goz 
2008eabaf54SKent Russell 	if (q->format == KFD_QUEUE_FORMAT_AQL)
2014b8f589bSBen Goz 		m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
2024b8f589bSBen Goz 
203bba9662dSJay Cornwall 	q->is_active = (q->queue_size > 0 &&
2044b8f589bSBen Goz 			q->queue_address != 0 &&
205bba9662dSJay Cornwall 			q->queue_percent > 0);
2064b8f589bSBen Goz 
2074b8f589bSBen Goz 	return 0;
2084b8f589bSBen Goz }
2094b8f589bSBen Goz 
210ee04955aSFelix Kuehling static int update_mqd(struct mqd_manager *mm, void *mqd,
211ee04955aSFelix Kuehling 			struct queue_properties *q)
212ee04955aSFelix Kuehling {
213ee04955aSFelix Kuehling 	return __update_mqd(mm, mqd, q, 1);
214ee04955aSFelix Kuehling }
215ee04955aSFelix Kuehling 
216ee04955aSFelix Kuehling static int update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
217ee04955aSFelix Kuehling 			struct queue_properties *q)
218ee04955aSFelix Kuehling {
219ee04955aSFelix Kuehling 	return __update_mqd(mm, mqd, q, 0);
220ee04955aSFelix Kuehling }
221ee04955aSFelix Kuehling 
2224b8f589bSBen Goz static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
2234b8f589bSBen Goz 				struct queue_properties *q)
2244b8f589bSBen Goz {
2254b8f589bSBen Goz 	struct cik_sdma_rlc_registers *m;
2264b8f589bSBen Goz 
2274b8f589bSBen Goz 	m = get_sdma_mqd(mqd);
228115c8c41SFelix Kuehling 	m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4)
229d12fb13fSshaoyunl 			<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
2303d30b28bSOded Gabbay 			q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
2313d30b28bSOded Gabbay 			1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
2323d30b28bSOded Gabbay 			6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
2334b8f589bSBen Goz 
2344b8f589bSBen Goz 	m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
2354b8f589bSBen Goz 	m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
2364b8f589bSBen Goz 	m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
2374b8f589bSBen Goz 	m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
238bba9662dSJay Cornwall 	m->sdma_rlc_doorbell =
239bba9662dSJay Cornwall 		q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
2403d30b28bSOded Gabbay 
2414b8f589bSBen Goz 	m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
2424b8f589bSBen Goz 
2434b8f589bSBen Goz 	m->sdma_engine_id = q->sdma_engine_id;
2444b8f589bSBen Goz 	m->sdma_queue_id = q->sdma_queue_id;
2454b8f589bSBen Goz 
246bba9662dSJay Cornwall 	q->is_active = (q->queue_size > 0 &&
2474b8f589bSBen Goz 			q->queue_address != 0 &&
248bba9662dSJay Cornwall 			q->queue_percent > 0);
2494b8f589bSBen Goz 
2504b8f589bSBen Goz 	return 0;
2514b8f589bSBen Goz }
2524b8f589bSBen Goz 
2534b8f589bSBen Goz static int destroy_mqd(struct mqd_manager *mm, void *mqd,
2544b8f589bSBen Goz 			enum kfd_preempt_type type,
2554b8f589bSBen Goz 			unsigned int timeout, uint32_t pipe_id,
2564b8f589bSBen Goz 			uint32_t queue_id)
2574b8f589bSBen Goz {
25870539bd7SFelix Kuehling 	return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
2594b8f589bSBen Goz 					pipe_id, queue_id);
2604b8f589bSBen Goz }
2614b8f589bSBen Goz 
2624b8f589bSBen Goz /*
2634b8f589bSBen Goz  * preempt type here is ignored because there is only one way
2644b8f589bSBen Goz  * to preempt sdma queue
2654b8f589bSBen Goz  */
2664b8f589bSBen Goz static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
2674b8f589bSBen Goz 				enum kfd_preempt_type type,
2684b8f589bSBen Goz 				unsigned int timeout, uint32_t pipe_id,
2694b8f589bSBen Goz 				uint32_t queue_id)
2704b8f589bSBen Goz {
271cea405b1SXihan Zhang 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
2724b8f589bSBen Goz }
2734b8f589bSBen Goz 
2744b8f589bSBen Goz static bool is_occupied(struct mqd_manager *mm, void *mqd,
2754b8f589bSBen Goz 			uint64_t queue_address,	uint32_t pipe_id,
2764b8f589bSBen Goz 			uint32_t queue_id)
2774b8f589bSBen Goz {
2784b8f589bSBen Goz 
279cea405b1SXihan Zhang 	return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
2804b8f589bSBen Goz 					pipe_id, queue_id);
2814b8f589bSBen Goz 
2824b8f589bSBen Goz }
2834b8f589bSBen Goz 
2844b8f589bSBen Goz static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
2854b8f589bSBen Goz 			uint64_t queue_address,	uint32_t pipe_id,
2864b8f589bSBen Goz 			uint32_t queue_id)
2874b8f589bSBen Goz {
288cea405b1SXihan Zhang 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
2894b8f589bSBen Goz }
2904b8f589bSBen Goz 
2914b8f589bSBen Goz /*
2924b8f589bSBen Goz  * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
2934b8f589bSBen Goz  * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
2944b8f589bSBen Goz  * queues but with different initial values.
2954b8f589bSBen Goz  */
2964b8f589bSBen Goz 
2974b8f589bSBen Goz static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
2984b8f589bSBen Goz 		struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
2994b8f589bSBen Goz 		struct queue_properties *q)
3004b8f589bSBen Goz {
3014b8f589bSBen Goz 	uint64_t addr;
3024b8f589bSBen Goz 	struct cik_mqd *m;
3034b8f589bSBen Goz 	int retval;
3044b8f589bSBen Goz 
3054b8f589bSBen Goz 	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
3064b8f589bSBen Goz 					mqd_mem_obj);
3074b8f589bSBen Goz 
3084b8f589bSBen Goz 	if (retval != 0)
3094b8f589bSBen Goz 		return -ENOMEM;
3104b8f589bSBen Goz 
3114b8f589bSBen Goz 	m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
3124b8f589bSBen Goz 	addr = (*mqd_mem_obj)->gpu_addr;
3134b8f589bSBen Goz 
3144b8f589bSBen Goz 	memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
3154b8f589bSBen Goz 
3164b8f589bSBen Goz 	m->header = 0xC0310800;
3174b8f589bSBen Goz 	m->compute_pipelinestat_enable = 1;
3184b8f589bSBen Goz 	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
3194b8f589bSBen Goz 	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
3204b8f589bSBen Goz 	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
3214b8f589bSBen Goz 	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
3224b8f589bSBen Goz 
3234b8f589bSBen Goz 	m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
3244b8f589bSBen Goz 					PRELOAD_REQ;
3254b8f589bSBen Goz 	m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
3264b8f589bSBen Goz 				QUANTUM_DURATION(10);
3274b8f589bSBen Goz 
3284b8f589bSBen Goz 	m->cp_mqd_control             = MQD_CONTROL_PRIV_STATE_EN;
3294b8f589bSBen Goz 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
3304b8f589bSBen Goz 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
3314b8f589bSBen Goz 
3324b8f589bSBen Goz 	m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
3334b8f589bSBen Goz 
3344b8f589bSBen Goz 	/*
3354b8f589bSBen Goz 	 * Pipe Priority
3364b8f589bSBen Goz 	 * Identifies the pipe relative priority when this queue is connected
3374b8f589bSBen Goz 	 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
3384b8f589bSBen Goz 	 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
3394b8f589bSBen Goz 	 * 0 = CS_LOW (typically below GFX)
3404b8f589bSBen Goz 	 * 1 = CS_MEDIUM (typically between HP3D and GFX
3414b8f589bSBen Goz 	 * 2 = CS_HIGH (typically above HP3D)
3424b8f589bSBen Goz 	 */
3434b8f589bSBen Goz 	m->cp_hqd_pipe_priority = 1;
3444b8f589bSBen Goz 	m->cp_hqd_queue_priority = 15;
3454b8f589bSBen Goz 
3464b8f589bSBen Goz 	*mqd = m;
3474b8f589bSBen Goz 	if (gart_addr)
3484b8f589bSBen Goz 		*gart_addr = addr;
3494b8f589bSBen Goz 	retval = mm->update_mqd(mm, m, q);
3504b8f589bSBen Goz 
3514b8f589bSBen Goz 	return retval;
3524b8f589bSBen Goz }
3534b8f589bSBen Goz 
3544b8f589bSBen Goz static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
3554b8f589bSBen Goz 				struct queue_properties *q)
3564b8f589bSBen Goz {
3574b8f589bSBen Goz 	struct cik_mqd *m;
3584b8f589bSBen Goz 
3594b8f589bSBen Goz 	m = get_mqd(mqd);
3604b8f589bSBen Goz 	m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
3614b8f589bSBen Goz 				DEFAULT_MIN_AVAIL_SIZE |
3624b8f589bSBen Goz 				PRIV_STATE |
3634b8f589bSBen Goz 				KMD_QUEUE;
3644b8f589bSBen Goz 
3654b8f589bSBen Goz 	/*
3664b8f589bSBen Goz 	 * Calculating queue size which is log base 2 of actual queue
3674b8f589bSBen Goz 	 * size -1 dwords
3684b8f589bSBen Goz 	 */
369115c8c41SFelix Kuehling 	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
3704b8f589bSBen Goz 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
3714b8f589bSBen Goz 	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
3724b8f589bSBen Goz 	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
3734b8f589bSBen Goz 	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
374bba9662dSJay Cornwall 	m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
3754b8f589bSBen Goz 
3764b8f589bSBen Goz 	m->cp_hqd_vmid = q->vmid;
3774b8f589bSBen Goz 
378bba9662dSJay Cornwall 	q->is_active = (q->queue_size > 0 &&
3794b8f589bSBen Goz 			q->queue_address != 0 &&
380bba9662dSJay Cornwall 			q->queue_percent > 0);
3814b8f589bSBen Goz 
3824b8f589bSBen Goz 	return 0;
3834b8f589bSBen Goz }
3844b8f589bSBen Goz 
385851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS)
386851a645eSFelix Kuehling 
387851a645eSFelix Kuehling static int debugfs_show_mqd(struct seq_file *m, void *data)
388851a645eSFelix Kuehling {
389851a645eSFelix Kuehling 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
390851a645eSFelix Kuehling 		     data, sizeof(struct cik_mqd), false);
391851a645eSFelix Kuehling 	return 0;
392851a645eSFelix Kuehling }
393851a645eSFelix Kuehling 
394851a645eSFelix Kuehling static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
395851a645eSFelix Kuehling {
396851a645eSFelix Kuehling 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
397851a645eSFelix Kuehling 		     data, sizeof(struct cik_sdma_rlc_registers), false);
398851a645eSFelix Kuehling 	return 0;
399851a645eSFelix Kuehling }
400851a645eSFelix Kuehling 
401851a645eSFelix Kuehling #endif
402851a645eSFelix Kuehling 
4034b8f589bSBen Goz 
4044b8f589bSBen Goz struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
4054b8f589bSBen Goz 		struct kfd_dev *dev)
4064b8f589bSBen Goz {
4074b8f589bSBen Goz 	struct mqd_manager *mqd;
4084b8f589bSBen Goz 
40932fa8219SFelix Kuehling 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
41032fa8219SFelix Kuehling 		return NULL;
4114b8f589bSBen Goz 
412dbf56ab1SKent Russell 	mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
4134b8f589bSBen Goz 	if (!mqd)
4144b8f589bSBen Goz 		return NULL;
4154b8f589bSBen Goz 
4164b8f589bSBen Goz 	mqd->dev = dev;
4174b8f589bSBen Goz 
4184b8f589bSBen Goz 	switch (type) {
4194b8f589bSBen Goz 	case KFD_MQD_TYPE_CP:
4204b8f589bSBen Goz 	case KFD_MQD_TYPE_COMPUTE:
4214b8f589bSBen Goz 		mqd->init_mqd = init_mqd;
4224b8f589bSBen Goz 		mqd->uninit_mqd = uninit_mqd;
4234b8f589bSBen Goz 		mqd->load_mqd = load_mqd;
4244b8f589bSBen Goz 		mqd->update_mqd = update_mqd;
4254b8f589bSBen Goz 		mqd->destroy_mqd = destroy_mqd;
4264b8f589bSBen Goz 		mqd->is_occupied = is_occupied;
427851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS)
428851a645eSFelix Kuehling 		mqd->debugfs_show_mqd = debugfs_show_mqd;
429851a645eSFelix Kuehling #endif
4304b8f589bSBen Goz 		break;
4314b8f589bSBen Goz 	case KFD_MQD_TYPE_HIQ:
4324b8f589bSBen Goz 		mqd->init_mqd = init_mqd_hiq;
4334b8f589bSBen Goz 		mqd->uninit_mqd = uninit_mqd;
4344b8f589bSBen Goz 		mqd->load_mqd = load_mqd;
4354b8f589bSBen Goz 		mqd->update_mqd = update_mqd_hiq;
4364b8f589bSBen Goz 		mqd->destroy_mqd = destroy_mqd;
4374b8f589bSBen Goz 		mqd->is_occupied = is_occupied;
438851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS)
439851a645eSFelix Kuehling 		mqd->debugfs_show_mqd = debugfs_show_mqd;
440851a645eSFelix Kuehling #endif
4414b8f589bSBen Goz 		break;
4424b8f589bSBen Goz 	case KFD_MQD_TYPE_SDMA:
4434b8f589bSBen Goz 		mqd->init_mqd = init_mqd_sdma;
4444b8f589bSBen Goz 		mqd->uninit_mqd = uninit_mqd_sdma;
4454b8f589bSBen Goz 		mqd->load_mqd = load_mqd_sdma;
4464b8f589bSBen Goz 		mqd->update_mqd = update_mqd_sdma;
4474b8f589bSBen Goz 		mqd->destroy_mqd = destroy_mqd_sdma;
4484b8f589bSBen Goz 		mqd->is_occupied = is_occupied_sdma;
449851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS)
450851a645eSFelix Kuehling 		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
451851a645eSFelix Kuehling #endif
4524b8f589bSBen Goz 		break;
4534b8f589bSBen Goz 	default:
4544b8f589bSBen Goz 		kfree(mqd);
4554b8f589bSBen Goz 		return NULL;
4564b8f589bSBen Goz 	}
4574b8f589bSBen Goz 
4584b8f589bSBen Goz 	return mqd;
4594b8f589bSBen Goz }
4604b8f589bSBen Goz 
461ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
462ee04955aSFelix Kuehling 			struct kfd_dev *dev)
463ee04955aSFelix Kuehling {
464ee04955aSFelix Kuehling 	struct mqd_manager *mqd;
465ee04955aSFelix Kuehling 
466ee04955aSFelix Kuehling 	mqd = mqd_manager_init_cik(type, dev);
467ee04955aSFelix Kuehling 	if (!mqd)
468ee04955aSFelix Kuehling 		return NULL;
469ee04955aSFelix Kuehling 	if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
470ee04955aSFelix Kuehling 		mqd->update_mqd = update_mqd_hawaii;
471ee04955aSFelix Kuehling 	return mqd;
472ee04955aSFelix Kuehling }
473