1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include "amdgpu.h"
27 #include "soc15_common.h"
28 #include "soc21.h"
29 #include "gc/gc_11_0_0_offset.h"
30 #include "gc/gc_11_0_0_sh_mask.h"
31 #include "gc/gc_11_0_0_default.h"
32 #include "v11_structs.h"
33 #include "mes_v11_api_def.h"
34 
35 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
36 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
37 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
38 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
39 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
40 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
41 
42 static int mes_v11_0_hw_fini(void *handle);
43 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
44 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
45 
46 #define MES_EOP_SIZE   2048
47 
48 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
49 {
50 	struct amdgpu_device *adev = ring->adev;
51 
52 	if (ring->use_doorbell) {
53 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
54 			     ring->wptr);
55 		WDOORBELL64(ring->doorbell_index, ring->wptr);
56 	} else {
57 		BUG();
58 	}
59 }
60 
61 static u64 mes_v11_0_ring_get_rptr(struct amdgpu_ring *ring)
62 {
63 	return *ring->rptr_cpu_addr;
64 }
65 
66 static u64 mes_v11_0_ring_get_wptr(struct amdgpu_ring *ring)
67 {
68 	u64 wptr;
69 
70 	if (ring->use_doorbell)
71 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
72 	else
73 		BUG();
74 	return wptr;
75 }
76 
77 static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
78 	.type = AMDGPU_RING_TYPE_MES,
79 	.align_mask = 1,
80 	.nop = 0,
81 	.support_64bit_ptrs = true,
82 	.get_rptr = mes_v11_0_ring_get_rptr,
83 	.get_wptr = mes_v11_0_ring_get_wptr,
84 	.set_wptr = mes_v11_0_ring_set_wptr,
85 	.insert_nop = amdgpu_ring_insert_nop,
86 };
87 
88 static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
89 						    void *pkt, int size)
90 {
91 	int ndw = size / 4;
92 	signed long r;
93 	union MESAPI__ADD_QUEUE *x_pkt = pkt;
94 	struct amdgpu_device *adev = mes->adev;
95 	struct amdgpu_ring *ring = &mes->ring;
96 
97 	BUG_ON(size % 4 != 0);
98 
99 	if (amdgpu_ring_alloc(ring, ndw))
100 		return -ENOMEM;
101 
102 	amdgpu_ring_write_multiple(ring, pkt, ndw);
103 	amdgpu_ring_commit(ring);
104 
105 	DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
106 
107 	r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
108 		      adev->usec_timeout * (amdgpu_emu_mode ? 100 : 1));
109 	if (r < 1) {
110 		DRM_ERROR("MES failed to response msg=%d\n",
111 			  x_pkt->header.opcode);
112 		return -ETIMEDOUT;
113 	}
114 
115 	return 0;
116 }
117 
118 static int convert_to_mes_queue_type(int queue_type)
119 {
120 	if (queue_type == AMDGPU_RING_TYPE_GFX)
121 		return MES_QUEUE_TYPE_GFX;
122 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
123 		return MES_QUEUE_TYPE_COMPUTE;
124 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
125 		return MES_QUEUE_TYPE_SDMA;
126 	else
127 		BUG();
128 	return -1;
129 }
130 
131 static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
132 				  struct mes_add_queue_input *input)
133 {
134 	struct amdgpu_device *adev = mes->adev;
135 	union MESAPI__ADD_QUEUE mes_add_queue_pkt;
136 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
137 	uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
138 
139 	memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
140 
141 	mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
142 	mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
143 	mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
144 
145 	mes_add_queue_pkt.process_id = input->process_id;
146 	mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
147 	mes_add_queue_pkt.process_va_start = input->process_va_start;
148 	mes_add_queue_pkt.process_va_end = input->process_va_end;
149 	mes_add_queue_pkt.process_quantum = input->process_quantum;
150 	mes_add_queue_pkt.process_context_addr = input->process_context_addr;
151 	mes_add_queue_pkt.gang_quantum = input->gang_quantum;
152 	mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
153 	mes_add_queue_pkt.inprocess_gang_priority =
154 		input->inprocess_gang_priority;
155 	mes_add_queue_pkt.gang_global_priority_level =
156 		input->gang_global_priority_level;
157 	mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
158 	mes_add_queue_pkt.mqd_addr = input->mqd_addr;
159 	mes_add_queue_pkt.wptr_addr = input->wptr_addr;
160 	mes_add_queue_pkt.queue_type =
161 		convert_to_mes_queue_type(input->queue_type);
162 	mes_add_queue_pkt.paging = input->paging;
163 	mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
164 	mes_add_queue_pkt.gws_base = input->gws_base;
165 	mes_add_queue_pkt.gws_size = input->gws_size;
166 	mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
167 	mes_add_queue_pkt.tma_addr = input->tma_addr;
168 
169 	mes_add_queue_pkt.api_status.api_completion_fence_addr =
170 		mes->ring.fence_drv.gpu_addr;
171 	mes_add_queue_pkt.api_status.api_completion_fence_value =
172 		++mes->ring.fence_drv.sync_seq;
173 
174 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
175 			&mes_add_queue_pkt, sizeof(mes_add_queue_pkt));
176 }
177 
178 static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
179 				     struct mes_remove_queue_input *input)
180 {
181 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
182 
183 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
184 
185 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
186 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
187 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
188 
189 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
190 	mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
191 
192 	mes_remove_queue_pkt.api_status.api_completion_fence_addr =
193 		mes->ring.fence_drv.gpu_addr;
194 	mes_remove_queue_pkt.api_status.api_completion_fence_value =
195 		++mes->ring.fence_drv.sync_seq;
196 
197 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
198 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
199 }
200 
201 static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
202 			struct mes_unmap_legacy_queue_input *input)
203 {
204 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
205 
206 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
207 
208 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
209 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
210 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
211 
212 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset << 2;
213 	mes_remove_queue_pkt.gang_context_addr = 0;
214 
215 	mes_remove_queue_pkt.pipe_id = input->pipe_id;
216 	mes_remove_queue_pkt.queue_id = input->queue_id;
217 
218 	if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
219 		mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
220 		mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
221 		mes_remove_queue_pkt.tf_data =
222 			lower_32_bits(input->trail_fence_data);
223 	} else {
224 		if (input->queue_type == AMDGPU_RING_TYPE_GFX)
225 			mes_remove_queue_pkt.unmap_legacy_gfx_queue = 1;
226 		else
227 			mes_remove_queue_pkt.unmap_kiq_utility_queue = 1;
228 	}
229 
230 	mes_remove_queue_pkt.api_status.api_completion_fence_addr =
231 		mes->ring.fence_drv.gpu_addr;
232 	mes_remove_queue_pkt.api_status.api_completion_fence_value =
233 		++mes->ring.fence_drv.sync_seq;
234 
235 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
236 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
237 }
238 
239 static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
240 				  struct mes_suspend_gang_input *input)
241 {
242 	return 0;
243 }
244 
245 static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
246 				 struct mes_resume_gang_input *input)
247 {
248 	return 0;
249 }
250 
251 static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
252 {
253 	union MESAPI__QUERY_MES_STATUS mes_status_pkt;
254 
255 	memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
256 
257 	mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
258 	mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
259 	mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
260 
261 	mes_status_pkt.api_status.api_completion_fence_addr =
262 		mes->ring.fence_drv.gpu_addr;
263 	mes_status_pkt.api_status.api_completion_fence_value =
264 		++mes->ring.fence_drv.sync_seq;
265 
266 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
267 			&mes_status_pkt, sizeof(mes_status_pkt));
268 }
269 
270 static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
271 {
272 	int i;
273 	struct amdgpu_device *adev = mes->adev;
274 	union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
275 
276 	memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
277 
278 	mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
279 	mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
280 	mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
281 
282 	mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
283 	mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
284 	mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
285 	mes_set_hw_res_pkt.paging_vmid = 0;
286 	mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
287 	mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
288 		mes->query_status_fence_gpu_addr;
289 
290 	for (i = 0; i < MAX_COMPUTE_PIPES; i++)
291 		mes_set_hw_res_pkt.compute_hqd_mask[i] =
292 			mes->compute_hqd_mask[i];
293 
294 	for (i = 0; i < MAX_GFX_PIPES; i++)
295 		mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
296 
297 	for (i = 0; i < MAX_SDMA_PIPES; i++)
298 		mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
299 
300 	for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
301 		mes_set_hw_res_pkt.aggregated_doorbells[i] =
302 			mes->agreegated_doorbells[i];
303 
304 	for (i = 0; i < 5; i++) {
305 		mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
306 		mes_set_hw_res_pkt.mmhub_base[i] =
307 				adev->reg_offset[MMHUB_HWIP][0][i];
308 		mes_set_hw_res_pkt.osssys_base[i] =
309 		adev->reg_offset[OSSSYS_HWIP][0][i];
310 	}
311 
312 	mes_set_hw_res_pkt.disable_reset = 1;
313 	mes_set_hw_res_pkt.disable_mes_log = 1;
314 	mes_set_hw_res_pkt.use_different_vmid_compute = 1;
315 
316 	mes_set_hw_res_pkt.api_status.api_completion_fence_addr =
317 		mes->ring.fence_drv.gpu_addr;
318 	mes_set_hw_res_pkt.api_status.api_completion_fence_value =
319 		++mes->ring.fence_drv.sync_seq;
320 
321 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
322 			&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt));
323 }
324 
325 static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
326 	.add_hw_queue = mes_v11_0_add_hw_queue,
327 	.remove_hw_queue = mes_v11_0_remove_hw_queue,
328 	.unmap_legacy_queue = mes_v11_0_unmap_legacy_queue,
329 	.suspend_gang = mes_v11_0_suspend_gang,
330 	.resume_gang = mes_v11_0_resume_gang,
331 };
332 
333 static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
334 				    enum admgpu_mes_pipe pipe)
335 {
336 	char fw_name[30];
337 	char ucode_prefix[30];
338 	int err;
339 	const struct mes_firmware_header_v1_0 *mes_hdr;
340 	struct amdgpu_firmware_info *info;
341 
342 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
343 
344 	if (pipe == AMDGPU_MES_SCHED_PIPE)
345 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
346 			 ucode_prefix);
347 	else
348 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
349 			 ucode_prefix);
350 
351 	err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
352 	if (err)
353 		return err;
354 
355 	err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
356 	if (err) {
357 		release_firmware(adev->mes.fw[pipe]);
358 		adev->mes.fw[pipe] = NULL;
359 		return err;
360 	}
361 
362 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
363 		adev->mes.fw[pipe]->data;
364 	adev->mes.ucode_fw_version[pipe] =
365 		le32_to_cpu(mes_hdr->mes_ucode_version);
366 	adev->mes.ucode_fw_version[pipe] =
367 		le32_to_cpu(mes_hdr->mes_ucode_data_version);
368 	adev->mes.uc_start_addr[pipe] =
369 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
370 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
371 	adev->mes.data_start_addr[pipe] =
372 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
373 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
374 
375 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
376 		int ucode, ucode_data;
377 
378 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
379 			ucode = AMDGPU_UCODE_ID_CP_MES;
380 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
381 		} else {
382 			ucode = AMDGPU_UCODE_ID_CP_MES1;
383 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
384 		}
385 
386 		info = &adev->firmware.ucode[ucode];
387 		info->ucode_id = ucode;
388 		info->fw = adev->mes.fw[pipe];
389 		adev->firmware.fw_size +=
390 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
391 			      PAGE_SIZE);
392 
393 		info = &adev->firmware.ucode[ucode_data];
394 		info->ucode_id = ucode_data;
395 		info->fw = adev->mes.fw[pipe];
396 		adev->firmware.fw_size +=
397 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
398 			      PAGE_SIZE);
399 	}
400 
401 	return 0;
402 }
403 
404 static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
405 				     enum admgpu_mes_pipe pipe)
406 {
407 	release_firmware(adev->mes.fw[pipe]);
408 	adev->mes.fw[pipe] = NULL;
409 }
410 
411 static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
412 					   enum admgpu_mes_pipe pipe)
413 {
414 	int r;
415 	const struct mes_firmware_header_v1_0 *mes_hdr;
416 	const __le32 *fw_data;
417 	unsigned fw_size;
418 
419 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
420 		adev->mes.fw[pipe]->data;
421 
422 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
423 		   le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
424 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
425 
426 	r = amdgpu_bo_create_reserved(adev, fw_size,
427 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
428 				      &adev->mes.ucode_fw_obj[pipe],
429 				      &adev->mes.ucode_fw_gpu_addr[pipe],
430 				      (void **)&adev->mes.ucode_fw_ptr[pipe]);
431 	if (r) {
432 		dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
433 		return r;
434 	}
435 
436 	memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
437 
438 	amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
439 	amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
440 
441 	return 0;
442 }
443 
444 static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
445 						enum admgpu_mes_pipe pipe)
446 {
447 	int r;
448 	const struct mes_firmware_header_v1_0 *mes_hdr;
449 	const __le32 *fw_data;
450 	unsigned fw_size;
451 
452 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
453 		adev->mes.fw[pipe]->data;
454 
455 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
456 		   le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
457 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
458 
459 	r = amdgpu_bo_create_reserved(adev, fw_size,
460 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
461 				      &adev->mes.data_fw_obj[pipe],
462 				      &adev->mes.data_fw_gpu_addr[pipe],
463 				      (void **)&adev->mes.data_fw_ptr[pipe]);
464 	if (r) {
465 		dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
466 		return r;
467 	}
468 
469 	memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
470 
471 	amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
472 	amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
473 
474 	return 0;
475 }
476 
477 static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
478 					 enum admgpu_mes_pipe pipe)
479 {
480 	amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
481 			      &adev->mes.data_fw_gpu_addr[pipe],
482 			      (void **)&adev->mes.data_fw_ptr[pipe]);
483 
484 	amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
485 			      &adev->mes.ucode_fw_gpu_addr[pipe],
486 			      (void **)&adev->mes.ucode_fw_ptr[pipe]);
487 }
488 
489 static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
490 {
491 	uint64_t ucode_addr;
492 	uint32_t pipe, data = 0;
493 
494 	if (enable) {
495 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
496 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
497 		data = REG_SET_FIELD(data, CP_MES_CNTL,
498 			     MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
499 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
500 
501 		mutex_lock(&adev->srbm_mutex);
502 		for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
503 			if (!adev->enable_mes_kiq &&
504 			    pipe == AMDGPU_MES_KIQ_PIPE)
505 				continue;
506 
507 			soc21_grbm_select(adev, 3, pipe, 0, 0);
508 
509 			ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
510 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
511 				     lower_32_bits(ucode_addr));
512 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
513 				     upper_32_bits(ucode_addr));
514 		}
515 		soc21_grbm_select(adev, 0, 0, 0, 0);
516 		mutex_unlock(&adev->srbm_mutex);
517 
518 		/* unhalt MES and activate pipe0 */
519 		data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
520 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
521 				     adev->enable_mes_kiq ? 1 : 0);
522 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
523 
524 		if (amdgpu_emu_mode)
525 			msleep(100);
526 		else
527 			udelay(50);
528 	} else {
529 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
530 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
531 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
532 		data = REG_SET_FIELD(data, CP_MES_CNTL,
533 				     MES_INVALIDATE_ICACHE, 1);
534 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
535 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
536 				     adev->enable_mes_kiq ? 1 : 0);
537 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
538 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
539 	}
540 }
541 
542 /* This function is for backdoor MES firmware */
543 static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
544 				    enum admgpu_mes_pipe pipe, bool prime_icache)
545 {
546 	int r;
547 	uint32_t data;
548 	uint64_t ucode_addr;
549 
550 	mes_v11_0_enable(adev, false);
551 
552 	if (!adev->mes.fw[pipe])
553 		return -EINVAL;
554 
555 	r = mes_v11_0_allocate_ucode_buffer(adev, pipe);
556 	if (r)
557 		return r;
558 
559 	r = mes_v11_0_allocate_ucode_data_buffer(adev, pipe);
560 	if (r) {
561 		mes_v11_0_free_ucode_buffers(adev, pipe);
562 		return r;
563 	}
564 
565 	mutex_lock(&adev->srbm_mutex);
566 	/* me=3, pipe=0, queue=0 */
567 	soc21_grbm_select(adev, 3, pipe, 0, 0);
568 
569 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
570 
571 	/* set ucode start address */
572 	ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
573 	WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
574 		     lower_32_bits(ucode_addr));
575 	WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
576 		     upper_32_bits(ucode_addr));
577 
578 	/* set ucode fimrware address */
579 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
580 		     lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
581 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
582 		     upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
583 
584 	/* set ucode instruction cache boundary to 2M-1 */
585 	WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
586 
587 	/* set ucode data firmware address */
588 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
589 		     lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
590 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
591 		     upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
592 
593 	/* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
594 	WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
595 
596 	if (prime_icache) {
597 		/* invalidate ICACHE */
598 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
599 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
600 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
601 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
602 
603 		/* prime the ICACHE. */
604 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
605 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
606 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
607 	}
608 
609 	soc21_grbm_select(adev, 0, 0, 0, 0);
610 	mutex_unlock(&adev->srbm_mutex);
611 
612 	return 0;
613 }
614 
615 static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
616 				      enum admgpu_mes_pipe pipe)
617 {
618 	int r;
619 	u32 *eop;
620 
621 	r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
622 			      AMDGPU_GEM_DOMAIN_GTT,
623 			      &adev->mes.eop_gpu_obj[pipe],
624 			      &adev->mes.eop_gpu_addr[pipe],
625 			      (void **)&eop);
626 	if (r) {
627 		dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
628 		return r;
629 	}
630 
631 	memset(eop, 0,
632 	       adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
633 
634 	amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
635 	amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
636 
637 	return 0;
638 }
639 
640 static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
641 {
642 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
643 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
644 	uint32_t tmp;
645 
646 	mqd->header = 0xC0310800;
647 	mqd->compute_pipelinestat_enable = 0x00000001;
648 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
649 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
650 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
651 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
652 	mqd->compute_misc_reserved = 0x00000007;
653 
654 	eop_base_addr = ring->eop_gpu_addr >> 8;
655 
656 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
657 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
658 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
659 			(order_base_2(MES_EOP_SIZE / 4) - 1));
660 
661 	mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
662 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
663 	mqd->cp_hqd_eop_control = tmp;
664 
665 	/* disable the queue if it's active */
666 	ring->wptr = 0;
667 	mqd->cp_hqd_pq_rptr = 0;
668 	mqd->cp_hqd_pq_wptr_lo = 0;
669 	mqd->cp_hqd_pq_wptr_hi = 0;
670 
671 	/* set the pointer to the MQD */
672 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
673 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
674 
675 	/* set MQD vmid to 0 */
676 	tmp = regCP_MQD_CONTROL_DEFAULT;
677 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
678 	mqd->cp_mqd_control = tmp;
679 
680 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
681 	hqd_gpu_addr = ring->gpu_addr >> 8;
682 	mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
683 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
684 
685 	/* set the wb address whether it's enabled or not */
686 	wb_gpu_addr = ring->rptr_gpu_addr;
687 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
688 	mqd->cp_hqd_pq_rptr_report_addr_hi =
689 		upper_32_bits(wb_gpu_addr) & 0xffff;
690 
691 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
692 	wb_gpu_addr = ring->wptr_gpu_addr;
693 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
694 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
695 
696 	/* set up the HQD, this is similar to CP_RB0_CNTL */
697 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
698 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
699 			    (order_base_2(ring->ring_size / 4) - 1));
700 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
701 			    ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
702 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
703 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
704 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
705 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
706 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
707 	mqd->cp_hqd_pq_control = tmp;
708 
709 	/* enable doorbell */
710 	tmp = 0;
711 	if (ring->use_doorbell) {
712 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
713 				    DOORBELL_OFFSET, ring->doorbell_index);
714 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
715 				    DOORBELL_EN, 1);
716 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
717 				    DOORBELL_SOURCE, 0);
718 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
719 				    DOORBELL_HIT, 0);
720 	}
721 	else
722 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
723 				    DOORBELL_EN, 0);
724 	mqd->cp_hqd_pq_doorbell_control = tmp;
725 
726 	mqd->cp_hqd_vmid = 0;
727 	/* activate the queue */
728 	mqd->cp_hqd_active = 1;
729 
730 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
731 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
732 			    PRELOAD_SIZE, 0x55);
733 	mqd->cp_hqd_persistent_state = tmp;
734 
735 	mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
736 	mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
737 	mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
738 
739 	return 0;
740 }
741 
742 static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
743 {
744 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
745 	struct amdgpu_device *adev = ring->adev;
746 	uint32_t data = 0;
747 
748 	mutex_lock(&adev->srbm_mutex);
749 	soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
750 
751 	/* set CP_HQD_VMID.VMID = 0. */
752 	data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
753 	data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
754 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
755 
756 	/* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
757 	data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
758 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
759 			     DOORBELL_EN, 0);
760 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
761 
762 	/* set CP_MQD_BASE_ADDR/HI with the MQD base address */
763 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
764 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
765 
766 	/* set CP_MQD_CONTROL.VMID=0 */
767 	data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
768 	data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
769 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
770 
771 	/* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
772 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
773 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
774 
775 	/* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
776 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
777 		     mqd->cp_hqd_pq_rptr_report_addr_lo);
778 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
779 		     mqd->cp_hqd_pq_rptr_report_addr_hi);
780 
781 	/* set CP_HQD_PQ_CONTROL */
782 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
783 
784 	/* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
785 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
786 		     mqd->cp_hqd_pq_wptr_poll_addr_lo);
787 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
788 		     mqd->cp_hqd_pq_wptr_poll_addr_hi);
789 
790 	/* set CP_HQD_PQ_DOORBELL_CONTROL */
791 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
792 		     mqd->cp_hqd_pq_doorbell_control);
793 
794 	/* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
795 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
796 
797 	/* set CP_HQD_ACTIVE.ACTIVE=1 */
798 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
799 
800 	soc21_grbm_select(adev, 0, 0, 0, 0);
801 	mutex_unlock(&adev->srbm_mutex);
802 }
803 
804 static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
805 {
806 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
807 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
808 	int r;
809 
810 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
811 		return -EINVAL;
812 
813 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
814 	if (r) {
815 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
816 		return r;
817 	}
818 
819 	kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
820 
821 	r = amdgpu_ring_test_ring(kiq_ring);
822 	if (r) {
823 		DRM_ERROR("kfq enable failed\n");
824 		kiq_ring->sched.ready = false;
825 	}
826 	return r;
827 }
828 
829 static int mes_v11_0_queue_init(struct amdgpu_device *adev,
830 				enum admgpu_mes_pipe pipe)
831 {
832 	struct amdgpu_ring *ring;
833 	int r;
834 
835 	if (pipe == AMDGPU_MES_KIQ_PIPE)
836 		ring = &adev->gfx.kiq.ring;
837 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
838 		ring = &adev->mes.ring;
839 	else
840 		BUG();
841 
842 	if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
843 	    (amdgpu_in_reset(adev) || adev->in_suspend)) {
844 		*(ring->wptr_cpu_addr) = 0;
845 		*(ring->rptr_cpu_addr) = 0;
846 		amdgpu_ring_clear_ring(ring);
847 	}
848 
849 	r = mes_v11_0_mqd_init(ring);
850 	if (r)
851 		return r;
852 
853 	if (pipe == AMDGPU_MES_SCHED_PIPE) {
854 		r = mes_v11_0_kiq_enable_queue(adev);
855 		if (r)
856 			return r;
857 	} else {
858 		mes_v11_0_queue_init_register(ring);
859 	}
860 
861 	return 0;
862 }
863 
864 static int mes_v11_0_ring_init(struct amdgpu_device *adev)
865 {
866 	struct amdgpu_ring *ring;
867 
868 	ring = &adev->mes.ring;
869 
870 	ring->funcs = &mes_v11_0_ring_funcs;
871 
872 	ring->me = 3;
873 	ring->pipe = 0;
874 	ring->queue = 0;
875 
876 	ring->ring_obj = NULL;
877 	ring->use_doorbell = true;
878 	ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
879 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
880 	ring->no_scheduler = true;
881 	sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
882 
883 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
884 				AMDGPU_RING_PRIO_DEFAULT, NULL);
885 }
886 
887 static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
888 {
889 	struct amdgpu_ring *ring;
890 
891 	spin_lock_init(&adev->gfx.kiq.ring_lock);
892 
893 	ring = &adev->gfx.kiq.ring;
894 
895 	ring->me = 3;
896 	ring->pipe = 1;
897 	ring->queue = 0;
898 
899 	ring->adev = NULL;
900 	ring->ring_obj = NULL;
901 	ring->use_doorbell = true;
902 	ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
903 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
904 	ring->no_scheduler = true;
905 	sprintf(ring->name, "mes_kiq_%d.%d.%d",
906 		ring->me, ring->pipe, ring->queue);
907 
908 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
909 				AMDGPU_RING_PRIO_DEFAULT, NULL);
910 }
911 
912 static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
913 				 enum admgpu_mes_pipe pipe)
914 {
915 	int r, mqd_size = sizeof(struct v11_compute_mqd);
916 	struct amdgpu_ring *ring;
917 
918 	if (pipe == AMDGPU_MES_KIQ_PIPE)
919 		ring = &adev->gfx.kiq.ring;
920 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
921 		ring = &adev->mes.ring;
922 	else
923 		BUG();
924 
925 	if (ring->mqd_obj)
926 		return 0;
927 
928 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
929 				    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
930 				    &ring->mqd_gpu_addr, &ring->mqd_ptr);
931 	if (r) {
932 		dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
933 		return r;
934 	}
935 
936 	memset(ring->mqd_ptr, 0, mqd_size);
937 
938 	/* prepare MQD backup */
939 	adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
940 	if (!adev->mes.mqd_backup[pipe])
941 		dev_warn(adev->dev,
942 			 "no memory to create MQD backup for ring %s\n",
943 			 ring->name);
944 
945 	return 0;
946 }
947 
948 static int mes_v11_0_sw_init(void *handle)
949 {
950 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951 	int pipe, r;
952 
953 	adev->mes.adev = adev;
954 	adev->mes.funcs = &mes_v11_0_funcs;
955 	adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
956 	adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
957 
958 	r = amdgpu_mes_init(adev);
959 	if (r)
960 		return r;
961 
962 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
963 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
964 			continue;
965 
966 		r = mes_v11_0_init_microcode(adev, pipe);
967 		if (r)
968 			return r;
969 
970 		r = mes_v11_0_allocate_eop_buf(adev, pipe);
971 		if (r)
972 			return r;
973 
974 		r = mes_v11_0_mqd_sw_init(adev, pipe);
975 		if (r)
976 			return r;
977 	}
978 
979 	if (adev->enable_mes_kiq) {
980 		r = mes_v11_0_kiq_ring_init(adev);
981 		if (r)
982 			return r;
983 	}
984 
985 	r = mes_v11_0_ring_init(adev);
986 	if (r)
987 		return r;
988 
989 	return 0;
990 }
991 
992 static int mes_v11_0_sw_fini(void *handle)
993 {
994 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
995 	int pipe;
996 
997 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
998 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
999 
1000 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1001 		kfree(adev->mes.mqd_backup[pipe]);
1002 
1003 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1004 				      &adev->mes.eop_gpu_addr[pipe],
1005 				      NULL);
1006 
1007 		mes_v11_0_free_microcode(adev, pipe);
1008 	}
1009 
1010 	amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
1011 			      &adev->gfx.kiq.ring.mqd_gpu_addr,
1012 			      &adev->gfx.kiq.ring.mqd_ptr);
1013 
1014 	amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
1015 			      &adev->mes.ring.mqd_gpu_addr,
1016 			      &adev->mes.ring.mqd_ptr);
1017 
1018 	amdgpu_ring_fini(&adev->gfx.kiq.ring);
1019 	amdgpu_ring_fini(&adev->mes.ring);
1020 
1021 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1022 		mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1023 		mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1024 	}
1025 
1026 	amdgpu_mes_fini(adev);
1027 	return 0;
1028 }
1029 
1030 static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
1031 {
1032 	uint32_t tmp;
1033 	struct amdgpu_device *adev = ring->adev;
1034 
1035 	/* tell RLC which is KIQ queue */
1036 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1037 	tmp &= 0xffffff00;
1038 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1039 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1040 	tmp |= 0x80;
1041 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1042 }
1043 
1044 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
1045 {
1046 	int r = 0;
1047 
1048 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1049 
1050 		r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
1051 		if (r) {
1052 			DRM_ERROR("failed to load MES fw, r=%d\n", r);
1053 			return r;
1054 		}
1055 
1056 		r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
1057 		if (r) {
1058 			DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1059 			return r;
1060 		}
1061 
1062 	}
1063 
1064 	mes_v11_0_enable(adev, true);
1065 
1066 	mes_v11_0_kiq_setting(&adev->gfx.kiq.ring);
1067 
1068 	r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
1069 	if (r)
1070 		goto failure;
1071 
1072 	return r;
1073 
1074 failure:
1075 	mes_v11_0_hw_fini(adev);
1076 	return r;
1077 }
1078 
1079 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
1080 {
1081 	mes_v11_0_enable(adev, false);
1082 	return 0;
1083 }
1084 
1085 static int mes_v11_0_hw_init(void *handle)
1086 {
1087 	int r;
1088 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1089 
1090 	if (!adev->enable_mes_kiq) {
1091 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1092 			r = mes_v11_0_load_microcode(adev,
1093 					     AMDGPU_MES_SCHED_PIPE, true);
1094 			if (r) {
1095 				DRM_ERROR("failed to MES fw, r=%d\n", r);
1096 				return r;
1097 			}
1098 		}
1099 
1100 		mes_v11_0_enable(adev, true);
1101 	}
1102 
1103 	r = mes_v11_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
1104 	if (r)
1105 		goto failure;
1106 
1107 	r = mes_v11_0_set_hw_resources(&adev->mes);
1108 	if (r)
1109 		goto failure;
1110 
1111 	r = mes_v11_0_query_sched_status(&adev->mes);
1112 	if (r) {
1113 		DRM_ERROR("MES is busy\n");
1114 		goto failure;
1115 	}
1116 
1117 	/*
1118 	 * Disable KIQ ring usage from the driver once MES is enabled.
1119 	 * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1120 	 * with MES enabled.
1121 	 */
1122 	adev->gfx.kiq.ring.sched.ready = false;
1123 
1124 	return 0;
1125 
1126 failure:
1127 	mes_v11_0_hw_fini(adev);
1128 	return r;
1129 }
1130 
1131 static int mes_v11_0_hw_fini(void *handle)
1132 {
1133 	return 0;
1134 }
1135 
1136 static int mes_v11_0_suspend(void *handle)
1137 {
1138 	int r;
1139 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1140 
1141 	r = amdgpu_mes_suspend(adev);
1142 	if (r)
1143 		return r;
1144 
1145 	return mes_v11_0_hw_fini(adev);
1146 }
1147 
1148 static int mes_v11_0_resume(void *handle)
1149 {
1150 	int r;
1151 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152 
1153 	r = mes_v11_0_hw_init(adev);
1154 	if (r)
1155 		return r;
1156 
1157 	return amdgpu_mes_resume(adev);
1158 }
1159 
1160 static int mes_v11_0_late_init(void *handle)
1161 {
1162 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1163 
1164 	amdgpu_mes_self_test(adev);
1165 
1166 	return 0;
1167 }
1168 
1169 static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
1170 	.name = "mes_v11_0",
1171 	.late_init = mes_v11_0_late_init,
1172 	.sw_init = mes_v11_0_sw_init,
1173 	.sw_fini = mes_v11_0_sw_fini,
1174 	.hw_init = mes_v11_0_hw_init,
1175 	.hw_fini = mes_v11_0_hw_fini,
1176 	.suspend = mes_v11_0_suspend,
1177 	.resume = mes_v11_0_resume,
1178 };
1179 
1180 const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
1181 	.type = AMD_IP_BLOCK_TYPE_MES,
1182 	.major = 11,
1183 	.minor = 0,
1184 	.rev = 0,
1185 	.funcs = &mes_v11_0_ip_funcs,
1186 };
1187