1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 struct amdgpu_mes_process *process,
44 					 int ip_type, uint64_t *doorbell_index)
45 {
46 	unsigned int offset, found;
47 	struct amdgpu_mes *mes = &adev->mes;
48 
49 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 		offset = adev->doorbell_index.sdma_engine[0];
51 	else
52 		offset = 0;
53 
54 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 	if (found >= mes->num_mes_dbs) {
56 		DRM_WARN("No doorbell available\n");
57 		return -ENOSPC;
58 	}
59 
60 	set_bit(found, mes->doorbell_bitmap);
61 
62 	/* Get the absolute doorbell index on BAR */
63 	*doorbell_index = mes->db_start_dw_offset + found * 2;
64 	return 0;
65 }
66 
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 					   struct amdgpu_mes_process *process,
69 					   uint32_t doorbell_index)
70 {
71 	unsigned int old, rel_index;
72 	struct amdgpu_mes *mes = &adev->mes;
73 
74 	/* Find the relative index of the doorbell in this object */
75 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 	WARN_ON(!old);
78 }
79 
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81 {
82 	int i;
83 	struct amdgpu_mes *mes = &adev->mes;
84 
85 	/* Bitmap for dynamic allocation of kernel doorbells */
86 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 	if (!mes->doorbell_bitmap) {
88 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 		return -ENOMEM;
90 	}
91 
92 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 		set_bit(i, mes->doorbell_bitmap);
96 	}
97 
98 	return 0;
99 }
100 
101 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
102 {
103 	bitmap_free(adev->mes.doorbell_bitmap);
104 }
105 
106 int amdgpu_mes_init(struct amdgpu_device *adev)
107 {
108 	int i, r;
109 
110 	adev->mes.adev = adev;
111 
112 	idr_init(&adev->mes.pasid_idr);
113 	idr_init(&adev->mes.gang_id_idr);
114 	idr_init(&adev->mes.queue_id_idr);
115 	ida_init(&adev->mes.doorbell_ida);
116 	spin_lock_init(&adev->mes.queue_id_lock);
117 	spin_lock_init(&adev->mes.ring_lock);
118 	mutex_init(&adev->mes.mutex_hidden);
119 
120 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121 	adev->mes.vmid_mask_mmhub = 0xffffff00;
122 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
123 
124 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125 		/* use only 1st MEC pipes */
126 		if (i >= 4)
127 			continue;
128 		adev->mes.compute_hqd_mask[i] = 0xc;
129 	}
130 
131 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
133 
134 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
136 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
137 		/* zero sdma_hqd_mask for non-existent engine */
138 		else if (adev->sdma.num_instances == 1)
139 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
140 		else
141 			adev->mes.sdma_hqd_mask[i] = 0xfc;
142 	}
143 
144 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
145 	if (r) {
146 		dev_err(adev->dev,
147 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
148 		goto error_ids;
149 	}
150 	adev->mes.sch_ctx_gpu_addr =
151 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
152 	adev->mes.sch_ctx_ptr =
153 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
154 
155 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
156 	if (r) {
157 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
158 		dev_err(adev->dev,
159 			"(%d) query_status_fence_offs wb alloc failed\n", r);
160 		goto error_ids;
161 	}
162 	adev->mes.query_status_fence_gpu_addr =
163 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
164 	adev->mes.query_status_fence_ptr =
165 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
166 
167 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
168 	if (r) {
169 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
170 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
171 		dev_err(adev->dev,
172 			"(%d) read_val_offs alloc failed\n", r);
173 		goto error_ids;
174 	}
175 	adev->mes.read_val_gpu_addr =
176 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
177 	adev->mes.read_val_ptr =
178 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
179 
180 	r = amdgpu_mes_doorbell_init(adev);
181 	if (r)
182 		goto error;
183 
184 	return 0;
185 
186 error:
187 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
188 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
189 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
190 error_ids:
191 	idr_destroy(&adev->mes.pasid_idr);
192 	idr_destroy(&adev->mes.gang_id_idr);
193 	idr_destroy(&adev->mes.queue_id_idr);
194 	ida_destroy(&adev->mes.doorbell_ida);
195 	mutex_destroy(&adev->mes.mutex_hidden);
196 	return r;
197 }
198 
199 void amdgpu_mes_fini(struct amdgpu_device *adev)
200 {
201 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
202 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
203 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
204 	amdgpu_mes_doorbell_free(adev);
205 
206 	idr_destroy(&adev->mes.pasid_idr);
207 	idr_destroy(&adev->mes.gang_id_idr);
208 	idr_destroy(&adev->mes.queue_id_idr);
209 	ida_destroy(&adev->mes.doorbell_ida);
210 	mutex_destroy(&adev->mes.mutex_hidden);
211 }
212 
213 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
214 {
215 	amdgpu_bo_free_kernel(&q->mqd_obj,
216 			      &q->mqd_gpu_addr,
217 			      &q->mqd_cpu_ptr);
218 }
219 
220 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
221 			      struct amdgpu_vm *vm)
222 {
223 	struct amdgpu_mes_process *process;
224 	int r;
225 
226 	/* allocate the mes process buffer */
227 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
228 	if (!process) {
229 		DRM_ERROR("no more memory to create mes process\n");
230 		return -ENOMEM;
231 	}
232 
233 	/* allocate the process context bo and map it */
234 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
235 				    AMDGPU_GEM_DOMAIN_GTT,
236 				    &process->proc_ctx_bo,
237 				    &process->proc_ctx_gpu_addr,
238 				    &process->proc_ctx_cpu_ptr);
239 	if (r) {
240 		DRM_ERROR("failed to allocate process context bo\n");
241 		goto clean_up_memory;
242 	}
243 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
244 
245 	/*
246 	 * Avoid taking any other locks under MES lock to avoid circular
247 	 * lock dependencies.
248 	 */
249 	amdgpu_mes_lock(&adev->mes);
250 
251 	/* add the mes process to idr list */
252 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
253 		      GFP_KERNEL);
254 	if (r < 0) {
255 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
256 		goto clean_up_ctx;
257 	}
258 
259 	INIT_LIST_HEAD(&process->gang_list);
260 	process->vm = vm;
261 	process->pasid = pasid;
262 	process->process_quantum = adev->mes.default_process_quantum;
263 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
264 
265 	amdgpu_mes_unlock(&adev->mes);
266 	return 0;
267 
268 clean_up_ctx:
269 	amdgpu_mes_unlock(&adev->mes);
270 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
271 			      &process->proc_ctx_gpu_addr,
272 			      &process->proc_ctx_cpu_ptr);
273 clean_up_memory:
274 	kfree(process);
275 	return r;
276 }
277 
278 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
279 {
280 	struct amdgpu_mes_process *process;
281 	struct amdgpu_mes_gang *gang, *tmp1;
282 	struct amdgpu_mes_queue *queue, *tmp2;
283 	struct mes_remove_queue_input queue_input;
284 	unsigned long flags;
285 	int r;
286 
287 	/*
288 	 * Avoid taking any other locks under MES lock to avoid circular
289 	 * lock dependencies.
290 	 */
291 	amdgpu_mes_lock(&adev->mes);
292 
293 	process = idr_find(&adev->mes.pasid_idr, pasid);
294 	if (!process) {
295 		DRM_WARN("pasid %d doesn't exist\n", pasid);
296 		amdgpu_mes_unlock(&adev->mes);
297 		return;
298 	}
299 
300 	/* Remove all queues from hardware */
301 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
302 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
303 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
304 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
305 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
306 
307 			queue_input.doorbell_offset = queue->doorbell_off;
308 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
309 
310 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
311 							     &queue_input);
312 			if (r)
313 				DRM_WARN("failed to remove hardware queue\n");
314 		}
315 
316 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
317 	}
318 
319 	idr_remove(&adev->mes.pasid_idr, pasid);
320 	amdgpu_mes_unlock(&adev->mes);
321 
322 	/* free all memory allocated by the process */
323 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
324 		/* free all queues in the gang */
325 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
326 			amdgpu_mes_queue_free_mqd(queue);
327 			list_del(&queue->list);
328 			kfree(queue);
329 		}
330 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
331 				      &gang->gang_ctx_gpu_addr,
332 				      &gang->gang_ctx_cpu_ptr);
333 		list_del(&gang->list);
334 		kfree(gang);
335 
336 	}
337 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
338 			      &process->proc_ctx_gpu_addr,
339 			      &process->proc_ctx_cpu_ptr);
340 	kfree(process);
341 }
342 
343 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
344 			struct amdgpu_mes_gang_properties *gprops,
345 			int *gang_id)
346 {
347 	struct amdgpu_mes_process *process;
348 	struct amdgpu_mes_gang *gang;
349 	int r;
350 
351 	/* allocate the mes gang buffer */
352 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
353 	if (!gang) {
354 		return -ENOMEM;
355 	}
356 
357 	/* allocate the gang context bo and map it to cpu space */
358 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
359 				    AMDGPU_GEM_DOMAIN_GTT,
360 				    &gang->gang_ctx_bo,
361 				    &gang->gang_ctx_gpu_addr,
362 				    &gang->gang_ctx_cpu_ptr);
363 	if (r) {
364 		DRM_ERROR("failed to allocate process context bo\n");
365 		goto clean_up_mem;
366 	}
367 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
368 
369 	/*
370 	 * Avoid taking any other locks under MES lock to avoid circular
371 	 * lock dependencies.
372 	 */
373 	amdgpu_mes_lock(&adev->mes);
374 
375 	process = idr_find(&adev->mes.pasid_idr, pasid);
376 	if (!process) {
377 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
378 		r = -EINVAL;
379 		goto clean_up_ctx;
380 	}
381 
382 	/* add the mes gang to idr list */
383 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
384 		      GFP_KERNEL);
385 	if (r < 0) {
386 		DRM_ERROR("failed to allocate idr for gang\n");
387 		goto clean_up_ctx;
388 	}
389 
390 	gang->gang_id = r;
391 	*gang_id = r;
392 
393 	INIT_LIST_HEAD(&gang->queue_list);
394 	gang->process = process;
395 	gang->priority = gprops->priority;
396 	gang->gang_quantum = gprops->gang_quantum ?
397 		gprops->gang_quantum : adev->mes.default_gang_quantum;
398 	gang->global_priority_level = gprops->global_priority_level;
399 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
400 	list_add_tail(&gang->list, &process->gang_list);
401 
402 	amdgpu_mes_unlock(&adev->mes);
403 	return 0;
404 
405 clean_up_ctx:
406 	amdgpu_mes_unlock(&adev->mes);
407 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
408 			      &gang->gang_ctx_gpu_addr,
409 			      &gang->gang_ctx_cpu_ptr);
410 clean_up_mem:
411 	kfree(gang);
412 	return r;
413 }
414 
415 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
416 {
417 	struct amdgpu_mes_gang *gang;
418 
419 	/*
420 	 * Avoid taking any other locks under MES lock to avoid circular
421 	 * lock dependencies.
422 	 */
423 	amdgpu_mes_lock(&adev->mes);
424 
425 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
426 	if (!gang) {
427 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
428 		amdgpu_mes_unlock(&adev->mes);
429 		return -EINVAL;
430 	}
431 
432 	if (!list_empty(&gang->queue_list)) {
433 		DRM_ERROR("queue list is not empty\n");
434 		amdgpu_mes_unlock(&adev->mes);
435 		return -EBUSY;
436 	}
437 
438 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
439 	list_del(&gang->list);
440 	amdgpu_mes_unlock(&adev->mes);
441 
442 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
443 			      &gang->gang_ctx_gpu_addr,
444 			      &gang->gang_ctx_cpu_ptr);
445 
446 	kfree(gang);
447 
448 	return 0;
449 }
450 
451 int amdgpu_mes_suspend(struct amdgpu_device *adev)
452 {
453 	struct idr *idp;
454 	struct amdgpu_mes_process *process;
455 	struct amdgpu_mes_gang *gang;
456 	struct mes_suspend_gang_input input;
457 	int r, pasid;
458 
459 	/*
460 	 * Avoid taking any other locks under MES lock to avoid circular
461 	 * lock dependencies.
462 	 */
463 	amdgpu_mes_lock(&adev->mes);
464 
465 	idp = &adev->mes.pasid_idr;
466 
467 	idr_for_each_entry(idp, process, pasid) {
468 		list_for_each_entry(gang, &process->gang_list, list) {
469 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
470 			if (r)
471 				DRM_ERROR("failed to suspend pasid %d gangid %d",
472 					 pasid, gang->gang_id);
473 		}
474 	}
475 
476 	amdgpu_mes_unlock(&adev->mes);
477 	return 0;
478 }
479 
480 int amdgpu_mes_resume(struct amdgpu_device *adev)
481 {
482 	struct idr *idp;
483 	struct amdgpu_mes_process *process;
484 	struct amdgpu_mes_gang *gang;
485 	struct mes_resume_gang_input input;
486 	int r, pasid;
487 
488 	/*
489 	 * Avoid taking any other locks under MES lock to avoid circular
490 	 * lock dependencies.
491 	 */
492 	amdgpu_mes_lock(&adev->mes);
493 
494 	idp = &adev->mes.pasid_idr;
495 
496 	idr_for_each_entry(idp, process, pasid) {
497 		list_for_each_entry(gang, &process->gang_list, list) {
498 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
499 			if (r)
500 				DRM_ERROR("failed to resume pasid %d gangid %d",
501 					 pasid, gang->gang_id);
502 		}
503 	}
504 
505 	amdgpu_mes_unlock(&adev->mes);
506 	return 0;
507 }
508 
509 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
510 				     struct amdgpu_mes_queue *q,
511 				     struct amdgpu_mes_queue_properties *p)
512 {
513 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
514 	u32 mqd_size = mqd_mgr->mqd_size;
515 	int r;
516 
517 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
518 				    AMDGPU_GEM_DOMAIN_GTT,
519 				    &q->mqd_obj,
520 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
521 	if (r) {
522 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
523 		return r;
524 	}
525 	memset(q->mqd_cpu_ptr, 0, mqd_size);
526 
527 	r = amdgpu_bo_reserve(q->mqd_obj, false);
528 	if (unlikely(r != 0))
529 		goto clean_up;
530 
531 	return 0;
532 
533 clean_up:
534 	amdgpu_bo_free_kernel(&q->mqd_obj,
535 			      &q->mqd_gpu_addr,
536 			      &q->mqd_cpu_ptr);
537 	return r;
538 }
539 
540 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
541 				     struct amdgpu_mes_queue *q,
542 				     struct amdgpu_mes_queue_properties *p)
543 {
544 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
545 	struct amdgpu_mqd_prop mqd_prop = {0};
546 
547 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
548 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
549 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
550 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
551 	mqd_prop.queue_size = p->queue_size;
552 	mqd_prop.use_doorbell = true;
553 	mqd_prop.doorbell_index = p->doorbell_off;
554 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
555 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
556 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
557 	mqd_prop.hqd_active = false;
558 
559 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
560 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
561 		mutex_lock(&adev->srbm_mutex);
562 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
563 	}
564 
565 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
566 
567 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
568 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
569 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
570 		mutex_unlock(&adev->srbm_mutex);
571 	}
572 
573 	amdgpu_bo_unreserve(q->mqd_obj);
574 }
575 
576 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
577 			    struct amdgpu_mes_queue_properties *qprops,
578 			    int *queue_id)
579 {
580 	struct amdgpu_mes_queue *queue;
581 	struct amdgpu_mes_gang *gang;
582 	struct mes_add_queue_input queue_input;
583 	unsigned long flags;
584 	int r;
585 
586 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
587 
588 	/* allocate the mes queue buffer */
589 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
590 	if (!queue) {
591 		DRM_ERROR("Failed to allocate memory for queue\n");
592 		return -ENOMEM;
593 	}
594 
595 	/* Allocate the queue mqd */
596 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
597 	if (r)
598 		goto clean_up_memory;
599 
600 	/*
601 	 * Avoid taking any other locks under MES lock to avoid circular
602 	 * lock dependencies.
603 	 */
604 	amdgpu_mes_lock(&adev->mes);
605 
606 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
607 	if (!gang) {
608 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
609 		r = -EINVAL;
610 		goto clean_up_mqd;
611 	}
612 
613 	/* add the mes gang to idr list */
614 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
615 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
616 		      GFP_ATOMIC);
617 	if (r < 0) {
618 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
619 		goto clean_up_mqd;
620 	}
621 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
622 	*queue_id = queue->queue_id = r;
623 
624 	/* allocate a doorbell index for the queue */
625 	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
626 					  qprops->queue_type,
627 					  &qprops->doorbell_off);
628 	if (r)
629 		goto clean_up_queue_id;
630 
631 	/* initialize the queue mqd */
632 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
633 
634 	/* add hw queue to mes */
635 	queue_input.process_id = gang->process->pasid;
636 
637 	queue_input.page_table_base_addr =
638 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
639 		adev->gmc.vram_start;
640 
641 	queue_input.process_va_start = 0;
642 	queue_input.process_va_end =
643 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
644 	queue_input.process_quantum = gang->process->process_quantum;
645 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
646 	queue_input.gang_quantum = gang->gang_quantum;
647 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
648 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
649 	queue_input.gang_global_priority_level = gang->global_priority_level;
650 	queue_input.doorbell_offset = qprops->doorbell_off;
651 	queue_input.mqd_addr = queue->mqd_gpu_addr;
652 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
653 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
654 	queue_input.queue_type = qprops->queue_type;
655 	queue_input.paging = qprops->paging;
656 	queue_input.is_kfd_process = 0;
657 
658 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
659 	if (r) {
660 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
661 			  qprops->doorbell_off);
662 		goto clean_up_doorbell;
663 	}
664 
665 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
666 		  "queue type=%d, doorbell=0x%llx\n",
667 		  gang->process->pasid, gang_id, qprops->queue_type,
668 		  qprops->doorbell_off);
669 
670 	queue->ring = qprops->ring;
671 	queue->doorbell_off = qprops->doorbell_off;
672 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
673 	queue->queue_type = qprops->queue_type;
674 	queue->paging = qprops->paging;
675 	queue->gang = gang;
676 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
677 	list_add_tail(&queue->list, &gang->queue_list);
678 
679 	amdgpu_mes_unlock(&adev->mes);
680 	return 0;
681 
682 clean_up_doorbell:
683 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
684 				       qprops->doorbell_off);
685 clean_up_queue_id:
686 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
687 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
688 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
689 clean_up_mqd:
690 	amdgpu_mes_unlock(&adev->mes);
691 	amdgpu_mes_queue_free_mqd(queue);
692 clean_up_memory:
693 	kfree(queue);
694 	return r;
695 }
696 
697 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
698 {
699 	unsigned long flags;
700 	struct amdgpu_mes_queue *queue;
701 	struct amdgpu_mes_gang *gang;
702 	struct mes_remove_queue_input queue_input;
703 	int r;
704 
705 	/*
706 	 * Avoid taking any other locks under MES lock to avoid circular
707 	 * lock dependencies.
708 	 */
709 	amdgpu_mes_lock(&adev->mes);
710 
711 	/* remove the mes gang from idr list */
712 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
713 
714 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
715 	if (!queue) {
716 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
717 		amdgpu_mes_unlock(&adev->mes);
718 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
719 		return -EINVAL;
720 	}
721 
722 	idr_remove(&adev->mes.queue_id_idr, queue_id);
723 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
724 
725 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
726 		  queue->doorbell_off);
727 
728 	gang = queue->gang;
729 	queue_input.doorbell_offset = queue->doorbell_off;
730 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
731 
732 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
733 	if (r)
734 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
735 			  queue_id);
736 
737 	list_del(&queue->list);
738 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
739 				       queue->doorbell_off);
740 	amdgpu_mes_unlock(&adev->mes);
741 
742 	amdgpu_mes_queue_free_mqd(queue);
743 	kfree(queue);
744 	return 0;
745 }
746 
747 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
748 				  struct amdgpu_ring *ring,
749 				  enum amdgpu_unmap_queues_action action,
750 				  u64 gpu_addr, u64 seq)
751 {
752 	struct mes_unmap_legacy_queue_input queue_input;
753 	int r;
754 
755 	queue_input.action = action;
756 	queue_input.queue_type = ring->funcs->type;
757 	queue_input.doorbell_offset = ring->doorbell_index;
758 	queue_input.pipe_id = ring->pipe;
759 	queue_input.queue_id = ring->queue;
760 	queue_input.trail_fence_addr = gpu_addr;
761 	queue_input.trail_fence_data = seq;
762 
763 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
764 	if (r)
765 		DRM_ERROR("failed to unmap legacy queue\n");
766 
767 	return r;
768 }
769 
770 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
771 {
772 	struct mes_misc_op_input op_input;
773 	int r, val = 0;
774 
775 	op_input.op = MES_MISC_OP_READ_REG;
776 	op_input.read_reg.reg_offset = reg;
777 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
778 
779 	if (!adev->mes.funcs->misc_op) {
780 		DRM_ERROR("mes rreg is not supported!\n");
781 		goto error;
782 	}
783 
784 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
785 	if (r)
786 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
787 	else
788 		val = *(adev->mes.read_val_ptr);
789 
790 error:
791 	return val;
792 }
793 
794 int amdgpu_mes_wreg(struct amdgpu_device *adev,
795 		    uint32_t reg, uint32_t val)
796 {
797 	struct mes_misc_op_input op_input;
798 	int r;
799 
800 	op_input.op = MES_MISC_OP_WRITE_REG;
801 	op_input.write_reg.reg_offset = reg;
802 	op_input.write_reg.reg_value = val;
803 
804 	if (!adev->mes.funcs->misc_op) {
805 		DRM_ERROR("mes wreg is not supported!\n");
806 		r = -EINVAL;
807 		goto error;
808 	}
809 
810 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
811 	if (r)
812 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
813 
814 error:
815 	return r;
816 }
817 
818 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
819 				  uint32_t reg0, uint32_t reg1,
820 				  uint32_t ref, uint32_t mask)
821 {
822 	struct mes_misc_op_input op_input;
823 	int r;
824 
825 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
826 	op_input.wrm_reg.reg0 = reg0;
827 	op_input.wrm_reg.reg1 = reg1;
828 	op_input.wrm_reg.ref = ref;
829 	op_input.wrm_reg.mask = mask;
830 
831 	if (!adev->mes.funcs->misc_op) {
832 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
833 		r = -EINVAL;
834 		goto error;
835 	}
836 
837 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
838 	if (r)
839 		DRM_ERROR("failed to reg_write_reg_wait\n");
840 
841 error:
842 	return r;
843 }
844 
845 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
846 			uint32_t val, uint32_t mask)
847 {
848 	struct mes_misc_op_input op_input;
849 	int r;
850 
851 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
852 	op_input.wrm_reg.reg0 = reg;
853 	op_input.wrm_reg.ref = val;
854 	op_input.wrm_reg.mask = mask;
855 
856 	if (!adev->mes.funcs->misc_op) {
857 		DRM_ERROR("mes reg wait is not supported!\n");
858 		r = -EINVAL;
859 		goto error;
860 	}
861 
862 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
863 	if (r)
864 		DRM_ERROR("failed to reg_write_reg_wait\n");
865 
866 error:
867 	return r;
868 }
869 
870 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
871 				uint64_t process_context_addr,
872 				uint32_t spi_gdbg_per_vmid_cntl,
873 				const uint32_t *tcp_watch_cntl,
874 				uint32_t flags,
875 				bool trap_en)
876 {
877 	struct mes_misc_op_input op_input = {0};
878 	int r;
879 
880 	if (!adev->mes.funcs->misc_op) {
881 		DRM_ERROR("mes set shader debugger is not supported!\n");
882 		return -EINVAL;
883 	}
884 
885 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
886 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
887 	op_input.set_shader_debugger.flags.u32all = flags;
888 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
889 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
890 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
891 
892 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
893 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
894 		op_input.set_shader_debugger.trap_en = trap_en;
895 
896 	amdgpu_mes_lock(&adev->mes);
897 
898 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
899 	if (r)
900 		DRM_ERROR("failed to set_shader_debugger\n");
901 
902 	amdgpu_mes_unlock(&adev->mes);
903 
904 	return r;
905 }
906 
907 static void
908 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
909 			       struct amdgpu_ring *ring,
910 			       struct amdgpu_mes_queue_properties *props)
911 {
912 	props->queue_type = ring->funcs->type;
913 	props->hqd_base_gpu_addr = ring->gpu_addr;
914 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
915 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
916 	props->wptr_mc_addr =
917 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
918 	props->queue_size = ring->ring_size;
919 	props->eop_gpu_addr = ring->eop_gpu_addr;
920 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
921 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
922 	props->paging = false;
923 	props->ring = ring;
924 }
925 
926 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
927 do {									\
928        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
929 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
930 				_eng[ring->idx].slots[id_offs]);        \
931        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
932 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
933 				_eng[ring->idx].ring);                  \
934        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
935 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
936 				_eng[ring->idx].ib);                    \
937        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
938 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
939 				_eng[ring->idx].padding);               \
940 } while(0)
941 
942 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
943 {
944 	switch (ring->funcs->type) {
945 	case AMDGPU_RING_TYPE_GFX:
946 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
947 		break;
948 	case AMDGPU_RING_TYPE_COMPUTE:
949 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
950 		break;
951 	case AMDGPU_RING_TYPE_SDMA:
952 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
953 		break;
954 	default:
955 		break;
956 	}
957 
958 	WARN_ON(1);
959 	return -EINVAL;
960 }
961 
962 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
963 			int queue_type, int idx,
964 			struct amdgpu_mes_ctx_data *ctx_data,
965 			struct amdgpu_ring **out)
966 {
967 	struct amdgpu_ring *ring;
968 	struct amdgpu_mes_gang *gang;
969 	struct amdgpu_mes_queue_properties qprops = {0};
970 	int r, queue_id, pasid;
971 
972 	/*
973 	 * Avoid taking any other locks under MES lock to avoid circular
974 	 * lock dependencies.
975 	 */
976 	amdgpu_mes_lock(&adev->mes);
977 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
978 	if (!gang) {
979 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
980 		amdgpu_mes_unlock(&adev->mes);
981 		return -EINVAL;
982 	}
983 	pasid = gang->process->pasid;
984 
985 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
986 	if (!ring) {
987 		amdgpu_mes_unlock(&adev->mes);
988 		return -ENOMEM;
989 	}
990 
991 	ring->ring_obj = NULL;
992 	ring->use_doorbell = true;
993 	ring->is_mes_queue = true;
994 	ring->mes_ctx = ctx_data;
995 	ring->idx = idx;
996 	ring->no_scheduler = true;
997 
998 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
999 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1000 				      compute[ring->idx].mec_hpd);
1001 		ring->eop_gpu_addr =
1002 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1003 	}
1004 
1005 	switch (queue_type) {
1006 	case AMDGPU_RING_TYPE_GFX:
1007 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1008 		ring->me = adev->gfx.gfx_ring[0].me;
1009 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1010 		break;
1011 	case AMDGPU_RING_TYPE_COMPUTE:
1012 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1013 		ring->me = adev->gfx.compute_ring[0].me;
1014 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1015 		break;
1016 	case AMDGPU_RING_TYPE_SDMA:
1017 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1018 		break;
1019 	default:
1020 		BUG();
1021 	}
1022 
1023 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1024 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1025 	if (r)
1026 		goto clean_up_memory;
1027 
1028 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1029 
1030 	dma_fence_wait(gang->process->vm->last_update, false);
1031 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1032 	amdgpu_mes_unlock(&adev->mes);
1033 
1034 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1035 	if (r)
1036 		goto clean_up_ring;
1037 
1038 	ring->hw_queue_id = queue_id;
1039 	ring->doorbell_index = qprops.doorbell_off;
1040 
1041 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1042 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1043 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1044 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1045 			queue_id);
1046 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1047 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1048 			queue_id);
1049 	else
1050 		BUG();
1051 
1052 	*out = ring;
1053 	return 0;
1054 
1055 clean_up_ring:
1056 	amdgpu_ring_fini(ring);
1057 clean_up_memory:
1058 	kfree(ring);
1059 	amdgpu_mes_unlock(&adev->mes);
1060 	return r;
1061 }
1062 
1063 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1064 			    struct amdgpu_ring *ring)
1065 {
1066 	if (!ring)
1067 		return;
1068 
1069 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1070 	amdgpu_ring_fini(ring);
1071 	kfree(ring);
1072 }
1073 
1074 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1075 						   enum amdgpu_mes_priority_level prio)
1076 {
1077 	return adev->mes.aggregated_doorbells[prio];
1078 }
1079 
1080 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1081 				   struct amdgpu_mes_ctx_data *ctx_data)
1082 {
1083 	int r;
1084 
1085 	r = amdgpu_bo_create_kernel(adev,
1086 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1087 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1088 			    &ctx_data->meta_data_obj,
1089 			    &ctx_data->meta_data_mc_addr,
1090 			    &ctx_data->meta_data_ptr);
1091 	if (r) {
1092 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1093 		return r;
1094 	}
1095 
1096 	if (!ctx_data->meta_data_obj)
1097 		return -ENOMEM;
1098 
1099 	memset(ctx_data->meta_data_ptr, 0,
1100 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1101 
1102 	return 0;
1103 }
1104 
1105 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1106 {
1107 	if (ctx_data->meta_data_obj)
1108 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1109 				      &ctx_data->meta_data_mc_addr,
1110 				      &ctx_data->meta_data_ptr);
1111 }
1112 
1113 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1114 				 struct amdgpu_vm *vm,
1115 				 struct amdgpu_mes_ctx_data *ctx_data)
1116 {
1117 	struct amdgpu_bo_va *bo_va;
1118 	struct amdgpu_sync sync;
1119 	struct drm_exec exec;
1120 	int r;
1121 
1122 	amdgpu_sync_create(&sync);
1123 
1124 	drm_exec_init(&exec, 0);
1125 	drm_exec_until_all_locked(&exec) {
1126 		r = drm_exec_lock_obj(&exec,
1127 				      &ctx_data->meta_data_obj->tbo.base);
1128 		drm_exec_retry_on_contention(&exec);
1129 		if (unlikely(r))
1130 			goto error_fini_exec;
1131 
1132 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1133 		drm_exec_retry_on_contention(&exec);
1134 		if (unlikely(r))
1135 			goto error_fini_exec;
1136 	}
1137 
1138 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1139 	if (!bo_va) {
1140 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1141 		r = -ENOMEM;
1142 		goto error_fini_exec;
1143 	}
1144 
1145 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1146 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1147 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1148 			     AMDGPU_PTE_EXECUTABLE);
1149 
1150 	if (r) {
1151 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1152 		goto error_del_bo_va;
1153 	}
1154 
1155 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1156 	if (r) {
1157 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1158 		goto error_del_bo_va;
1159 	}
1160 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1161 
1162 	r = amdgpu_vm_update_pdes(adev, vm, false);
1163 	if (r) {
1164 		DRM_ERROR("failed to update pdes on meta data\n");
1165 		goto error_del_bo_va;
1166 	}
1167 	amdgpu_sync_fence(&sync, vm->last_update);
1168 
1169 	amdgpu_sync_wait(&sync, false);
1170 	drm_exec_fini(&exec);
1171 
1172 	amdgpu_sync_free(&sync);
1173 	ctx_data->meta_data_va = bo_va;
1174 	return 0;
1175 
1176 error_del_bo_va:
1177 	amdgpu_vm_bo_del(adev, bo_va);
1178 
1179 error_fini_exec:
1180 	drm_exec_fini(&exec);
1181 	amdgpu_sync_free(&sync);
1182 	return r;
1183 }
1184 
1185 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1186 				   struct amdgpu_mes_ctx_data *ctx_data)
1187 {
1188 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1189 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1190 	struct amdgpu_vm *vm = bo_va->base.vm;
1191 	struct dma_fence *fence;
1192 	struct drm_exec exec;
1193 	long r;
1194 
1195 	drm_exec_init(&exec, 0);
1196 	drm_exec_until_all_locked(&exec) {
1197 		r = drm_exec_lock_obj(&exec,
1198 				      &ctx_data->meta_data_obj->tbo.base);
1199 		drm_exec_retry_on_contention(&exec);
1200 		if (unlikely(r))
1201 			goto out_unlock;
1202 
1203 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1204 		drm_exec_retry_on_contention(&exec);
1205 		if (unlikely(r))
1206 			goto out_unlock;
1207 	}
1208 
1209 	amdgpu_vm_bo_del(adev, bo_va);
1210 	if (!amdgpu_vm_ready(vm))
1211 		goto out_unlock;
1212 
1213 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1214 				   &fence);
1215 	if (r)
1216 		goto out_unlock;
1217 	if (fence) {
1218 		amdgpu_bo_fence(bo, fence, true);
1219 		fence = NULL;
1220 	}
1221 
1222 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1223 	if (r || !fence)
1224 		goto out_unlock;
1225 
1226 	dma_fence_wait(fence, false);
1227 	amdgpu_bo_fence(bo, fence, true);
1228 	dma_fence_put(fence);
1229 
1230 out_unlock:
1231 	if (unlikely(r < 0))
1232 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1233 	drm_exec_fini(&exec);
1234 
1235 	return r;
1236 }
1237 
1238 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1239 					  int pasid, int *gang_id,
1240 					  int queue_type, int num_queue,
1241 					  struct amdgpu_ring **added_rings,
1242 					  struct amdgpu_mes_ctx_data *ctx_data)
1243 {
1244 	struct amdgpu_ring *ring;
1245 	struct amdgpu_mes_gang_properties gprops = {0};
1246 	int r, j;
1247 
1248 	/* create a gang for the process */
1249 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1250 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1251 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1252 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1253 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1254 
1255 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1256 	if (r) {
1257 		DRM_ERROR("failed to add gang\n");
1258 		return r;
1259 	}
1260 
1261 	/* create queues for the gang */
1262 	for (j = 0; j < num_queue; j++) {
1263 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1264 					ctx_data, &ring);
1265 		if (r) {
1266 			DRM_ERROR("failed to add ring\n");
1267 			break;
1268 		}
1269 
1270 		DRM_INFO("ring %s was added\n", ring->name);
1271 		added_rings[j] = ring;
1272 	}
1273 
1274 	return 0;
1275 }
1276 
1277 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1278 {
1279 	struct amdgpu_ring *ring;
1280 	int i, r;
1281 
1282 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1283 		ring = added_rings[i];
1284 		if (!ring)
1285 			continue;
1286 
1287 		r = amdgpu_ring_test_helper(ring);
1288 		if (r)
1289 			return r;
1290 
1291 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1292 		if (r) {
1293 			DRM_DEV_ERROR(ring->adev->dev,
1294 				      "ring %s ib test failed (%d)\n",
1295 				      ring->name, r);
1296 			return r;
1297 		} else
1298 			DRM_INFO("ring %s ib test pass\n", ring->name);
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1305 {
1306 	struct amdgpu_vm *vm = NULL;
1307 	struct amdgpu_mes_ctx_data ctx_data = {0};
1308 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1309 	int gang_ids[3] = {0};
1310 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1311 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1312 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1313 	int i, r, pasid, k = 0;
1314 
1315 	pasid = amdgpu_pasid_alloc(16);
1316 	if (pasid < 0) {
1317 		dev_warn(adev->dev, "No more PASIDs available!");
1318 		pasid = 0;
1319 	}
1320 
1321 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1322 	if (!vm) {
1323 		r = -ENOMEM;
1324 		goto error_pasid;
1325 	}
1326 
1327 	r = amdgpu_vm_init(adev, vm, -1);
1328 	if (r) {
1329 		DRM_ERROR("failed to initialize vm\n");
1330 		goto error_pasid;
1331 	}
1332 
1333 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1334 	if (r) {
1335 		DRM_ERROR("failed to alloc ctx meta data\n");
1336 		goto error_fini;
1337 	}
1338 
1339 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1340 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1341 	if (r) {
1342 		DRM_ERROR("failed to map ctx meta data\n");
1343 		goto error_vm;
1344 	}
1345 
1346 	r = amdgpu_mes_create_process(adev, pasid, vm);
1347 	if (r) {
1348 		DRM_ERROR("failed to create MES process\n");
1349 		goto error_vm;
1350 	}
1351 
1352 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1353 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1354 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1355 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1356 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1357 			continue;
1358 
1359 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1360 							   &gang_ids[i],
1361 							   queue_types[i][0],
1362 							   queue_types[i][1],
1363 							   &added_rings[k],
1364 							   &ctx_data);
1365 		if (r)
1366 			goto error_queues;
1367 
1368 		k += queue_types[i][1];
1369 	}
1370 
1371 	/* start ring test and ib test for MES queues */
1372 	amdgpu_mes_test_queues(added_rings);
1373 
1374 error_queues:
1375 	/* remove all queues */
1376 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1377 		if (!added_rings[i])
1378 			continue;
1379 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1380 	}
1381 
1382 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1383 		if (!gang_ids[i])
1384 			continue;
1385 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1386 	}
1387 
1388 	amdgpu_mes_destroy_process(adev, pasid);
1389 
1390 error_vm:
1391 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1392 
1393 error_fini:
1394 	amdgpu_vm_fini(adev, vm);
1395 
1396 error_pasid:
1397 	if (pasid)
1398 		amdgpu_pasid_free(pasid);
1399 
1400 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1401 	kfree(vm);
1402 	return 0;
1403 }
1404 
1405 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1406 {
1407 	const struct mes_firmware_header_v1_0 *mes_hdr;
1408 	struct amdgpu_firmware_info *info;
1409 	char ucode_prefix[30];
1410 	char fw_name[40];
1411 	bool need_retry = false;
1412 	int r;
1413 
1414 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1415 				       sizeof(ucode_prefix));
1416 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1417 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1418 			 ucode_prefix,
1419 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1420 		need_retry = true;
1421 	} else {
1422 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1423 			 ucode_prefix,
1424 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1425 	}
1426 
1427 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1428 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1429 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1430 			 ucode_prefix);
1431 		DRM_INFO("try to fall back to %s\n", fw_name);
1432 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1433 					 fw_name);
1434 	}
1435 
1436 	if (r)
1437 		goto out;
1438 
1439 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1440 		adev->mes.fw[pipe]->data;
1441 	adev->mes.uc_start_addr[pipe] =
1442 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1443 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1444 	adev->mes.data_start_addr[pipe] =
1445 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1446 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1447 
1448 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1449 		int ucode, ucode_data;
1450 
1451 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1452 			ucode = AMDGPU_UCODE_ID_CP_MES;
1453 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1454 		} else {
1455 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1456 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1457 		}
1458 
1459 		info = &adev->firmware.ucode[ucode];
1460 		info->ucode_id = ucode;
1461 		info->fw = adev->mes.fw[pipe];
1462 		adev->firmware.fw_size +=
1463 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1464 			      PAGE_SIZE);
1465 
1466 		info = &adev->firmware.ucode[ucode_data];
1467 		info->ucode_id = ucode_data;
1468 		info->fw = adev->mes.fw[pipe];
1469 		adev->firmware.fw_size +=
1470 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1471 			      PAGE_SIZE);
1472 	}
1473 
1474 	return 0;
1475 out:
1476 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1477 	return r;
1478 }
1479