1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_mes.h"
25 #include "amdgpu.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
28 
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
31 
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33 {
34 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 		       PAGE_SIZE);
37 }
38 
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 				      unsigned int *doorbell_index)
41 {
42 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 			       adev->mes.max_doorbell_slices,
44 			       GFP_KERNEL);
45 	if (r > 0)
46 		*doorbell_index = r;
47 
48 	return r;
49 }
50 
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 				      unsigned int doorbell_index)
53 {
54 	if (doorbell_index)
55 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56 }
57 
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 					struct amdgpu_device *adev,
60 					uint32_t doorbell_index,
61 					unsigned int doorbell_id)
62 {
63 	return ((doorbell_index *
64 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 		doorbell_id * 2);
66 }
67 
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 					 struct amdgpu_mes_process *process,
70 					 int ip_type, uint64_t *doorbell_index)
71 {
72 	unsigned int offset, found;
73 
74 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 		offset = adev->doorbell_index.sdma_engine[0];
76 		found = find_next_zero_bit(process->doorbell_bitmap,
77 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 					   offset);
79 	} else {
80 		found = find_first_zero_bit(process->doorbell_bitmap,
81 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 	}
83 
84 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 		DRM_WARN("No doorbell available\n");
86 		return -ENOSPC;
87 	}
88 
89 	set_bit(found, process->doorbell_bitmap);
90 
91 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 				process->doorbell_index, found);
93 
94 	return 0;
95 }
96 
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 					   struct amdgpu_mes_process *process,
99 					   uint32_t doorbell_index)
100 {
101 	unsigned int old, doorbell_id;
102 
103 	doorbell_id = doorbell_index -
104 		(process->doorbell_index *
105 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 	doorbell_id /= 2;
107 
108 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 	WARN_ON(!old);
110 }
111 
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113 {
114 	size_t doorbell_start_offset;
115 	size_t doorbell_aperture_size;
116 	size_t doorbell_process_limit;
117 	size_t aggregated_doorbell_start;
118 	int i;
119 
120 	aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
121 	aggregated_doorbell_start =
122 		roundup(aggregated_doorbell_start, PAGE_SIZE);
123 
124 	doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
125 	doorbell_start_offset =
126 		roundup(doorbell_start_offset,
127 			amdgpu_mes_doorbell_process_slice(adev));
128 
129 	doorbell_aperture_size = adev->doorbell.size;
130 	doorbell_aperture_size =
131 			rounddown(doorbell_aperture_size,
132 				  amdgpu_mes_doorbell_process_slice(adev));
133 
134 	if (doorbell_aperture_size > doorbell_start_offset)
135 		doorbell_process_limit =
136 			(doorbell_aperture_size - doorbell_start_offset) /
137 			amdgpu_mes_doorbell_process_slice(adev);
138 	else
139 		return -ENOSPC;
140 
141 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
142 	adev->mes.max_doorbell_slices = doorbell_process_limit;
143 
144 	/* allocate Qword range for aggregated doorbell */
145 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
146 		adev->mes.aggregated_doorbells[i] =
147 			aggregated_doorbell_start / sizeof(u32) + i * 2;
148 
149 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
150 	return 0;
151 }
152 
153 int amdgpu_mes_init(struct amdgpu_device *adev)
154 {
155 	int i, r;
156 
157 	adev->mes.adev = adev;
158 
159 	idr_init(&adev->mes.pasid_idr);
160 	idr_init(&adev->mes.gang_id_idr);
161 	idr_init(&adev->mes.queue_id_idr);
162 	ida_init(&adev->mes.doorbell_ida);
163 	spin_lock_init(&adev->mes.queue_id_lock);
164 	spin_lock_init(&adev->mes.ring_lock);
165 	mutex_init(&adev->mes.mutex_hidden);
166 
167 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
168 	adev->mes.vmid_mask_mmhub = 0xffffff00;
169 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
170 
171 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
172 		/* use only 1st MEC pipes */
173 		if (i >= 4)
174 			continue;
175 		adev->mes.compute_hqd_mask[i] = 0xc;
176 	}
177 
178 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
179 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
180 
181 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
182 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
183 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
184 		else
185 			adev->mes.sdma_hqd_mask[i] = 0xfc;
186 	}
187 
188 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
189 	if (r) {
190 		dev_err(adev->dev,
191 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
192 		goto error_ids;
193 	}
194 	adev->mes.sch_ctx_gpu_addr =
195 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
196 	adev->mes.sch_ctx_ptr =
197 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
198 
199 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
200 	if (r) {
201 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
202 		dev_err(adev->dev,
203 			"(%d) query_status_fence_offs wb alloc failed\n", r);
204 		goto error_ids;
205 	}
206 	adev->mes.query_status_fence_gpu_addr =
207 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
208 	adev->mes.query_status_fence_ptr =
209 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
210 
211 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
212 	if (r) {
213 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
214 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
215 		dev_err(adev->dev,
216 			"(%d) read_val_offs alloc failed\n", r);
217 		goto error_ids;
218 	}
219 	adev->mes.read_val_gpu_addr =
220 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
221 	adev->mes.read_val_ptr =
222 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
223 
224 	r = amdgpu_mes_doorbell_init(adev);
225 	if (r)
226 		goto error;
227 
228 	return 0;
229 
230 error:
231 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
232 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
233 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
234 error_ids:
235 	idr_destroy(&adev->mes.pasid_idr);
236 	idr_destroy(&adev->mes.gang_id_idr);
237 	idr_destroy(&adev->mes.queue_id_idr);
238 	ida_destroy(&adev->mes.doorbell_ida);
239 	mutex_destroy(&adev->mes.mutex_hidden);
240 	return r;
241 }
242 
243 void amdgpu_mes_fini(struct amdgpu_device *adev)
244 {
245 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
246 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
247 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
248 
249 	idr_destroy(&adev->mes.pasid_idr);
250 	idr_destroy(&adev->mes.gang_id_idr);
251 	idr_destroy(&adev->mes.queue_id_idr);
252 	ida_destroy(&adev->mes.doorbell_ida);
253 	mutex_destroy(&adev->mes.mutex_hidden);
254 }
255 
256 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
257 {
258 	amdgpu_bo_free_kernel(&q->mqd_obj,
259 			      &q->mqd_gpu_addr,
260 			      &q->mqd_cpu_ptr);
261 }
262 
263 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
264 			      struct amdgpu_vm *vm)
265 {
266 	struct amdgpu_mes_process *process;
267 	int r;
268 
269 	/* allocate the mes process buffer */
270 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
271 	if (!process) {
272 		DRM_ERROR("no more memory to create mes process\n");
273 		return -ENOMEM;
274 	}
275 
276 	process->doorbell_bitmap =
277 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
278 				     BITS_PER_BYTE), GFP_KERNEL);
279 	if (!process->doorbell_bitmap) {
280 		DRM_ERROR("failed to allocate doorbell bitmap\n");
281 		kfree(process);
282 		return -ENOMEM;
283 	}
284 
285 	/* allocate the process context bo and map it */
286 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
287 				    AMDGPU_GEM_DOMAIN_GTT,
288 				    &process->proc_ctx_bo,
289 				    &process->proc_ctx_gpu_addr,
290 				    &process->proc_ctx_cpu_ptr);
291 	if (r) {
292 		DRM_ERROR("failed to allocate process context bo\n");
293 		goto clean_up_memory;
294 	}
295 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
296 
297 	/*
298 	 * Avoid taking any other locks under MES lock to avoid circular
299 	 * lock dependencies.
300 	 */
301 	amdgpu_mes_lock(&adev->mes);
302 
303 	/* add the mes process to idr list */
304 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
305 		      GFP_KERNEL);
306 	if (r < 0) {
307 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
308 		goto clean_up_ctx;
309 	}
310 
311 	/* allocate the starting doorbell index of the process */
312 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
313 	if (r < 0) {
314 		DRM_ERROR("failed to allocate doorbell for process\n");
315 		goto clean_up_pasid;
316 	}
317 
318 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
319 
320 	INIT_LIST_HEAD(&process->gang_list);
321 	process->vm = vm;
322 	process->pasid = pasid;
323 	process->process_quantum = adev->mes.default_process_quantum;
324 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
325 
326 	amdgpu_mes_unlock(&adev->mes);
327 	return 0;
328 
329 clean_up_pasid:
330 	idr_remove(&adev->mes.pasid_idr, pasid);
331 	amdgpu_mes_unlock(&adev->mes);
332 clean_up_ctx:
333 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
334 			      &process->proc_ctx_gpu_addr,
335 			      &process->proc_ctx_cpu_ptr);
336 clean_up_memory:
337 	kfree(process->doorbell_bitmap);
338 	kfree(process);
339 	return r;
340 }
341 
342 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
343 {
344 	struct amdgpu_mes_process *process;
345 	struct amdgpu_mes_gang *gang, *tmp1;
346 	struct amdgpu_mes_queue *queue, *tmp2;
347 	struct mes_remove_queue_input queue_input;
348 	unsigned long flags;
349 	int r;
350 
351 	/*
352 	 * Avoid taking any other locks under MES lock to avoid circular
353 	 * lock dependencies.
354 	 */
355 	amdgpu_mes_lock(&adev->mes);
356 
357 	process = idr_find(&adev->mes.pasid_idr, pasid);
358 	if (!process) {
359 		DRM_WARN("pasid %d doesn't exist\n", pasid);
360 		amdgpu_mes_unlock(&adev->mes);
361 		return;
362 	}
363 
364 	/* Remove all queues from hardware */
365 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
366 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
367 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
368 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
369 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
370 
371 			queue_input.doorbell_offset = queue->doorbell_off;
372 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
373 
374 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
375 							     &queue_input);
376 			if (r)
377 				DRM_WARN("failed to remove hardware queue\n");
378 		}
379 
380 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
381 	}
382 
383 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
384 	idr_remove(&adev->mes.pasid_idr, pasid);
385 	amdgpu_mes_unlock(&adev->mes);
386 
387 	/* free all memory allocated by the process */
388 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
389 		/* free all queues in the gang */
390 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
391 			amdgpu_mes_queue_free_mqd(queue);
392 			list_del(&queue->list);
393 			kfree(queue);
394 		}
395 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
396 				      &gang->gang_ctx_gpu_addr,
397 				      &gang->gang_ctx_cpu_ptr);
398 		list_del(&gang->list);
399 		kfree(gang);
400 
401 	}
402 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
403 			      &process->proc_ctx_gpu_addr,
404 			      &process->proc_ctx_cpu_ptr);
405 	kfree(process->doorbell_bitmap);
406 	kfree(process);
407 }
408 
409 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
410 			struct amdgpu_mes_gang_properties *gprops,
411 			int *gang_id)
412 {
413 	struct amdgpu_mes_process *process;
414 	struct amdgpu_mes_gang *gang;
415 	int r;
416 
417 	/* allocate the mes gang buffer */
418 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
419 	if (!gang) {
420 		return -ENOMEM;
421 	}
422 
423 	/* allocate the gang context bo and map it to cpu space */
424 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
425 				    AMDGPU_GEM_DOMAIN_GTT,
426 				    &gang->gang_ctx_bo,
427 				    &gang->gang_ctx_gpu_addr,
428 				    &gang->gang_ctx_cpu_ptr);
429 	if (r) {
430 		DRM_ERROR("failed to allocate process context bo\n");
431 		goto clean_up_mem;
432 	}
433 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
434 
435 	/*
436 	 * Avoid taking any other locks under MES lock to avoid circular
437 	 * lock dependencies.
438 	 */
439 	amdgpu_mes_lock(&adev->mes);
440 
441 	process = idr_find(&adev->mes.pasid_idr, pasid);
442 	if (!process) {
443 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
444 		r = -EINVAL;
445 		goto clean_up_ctx;
446 	}
447 
448 	/* add the mes gang to idr list */
449 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
450 		      GFP_KERNEL);
451 	if (r < 0) {
452 		DRM_ERROR("failed to allocate idr for gang\n");
453 		goto clean_up_ctx;
454 	}
455 
456 	gang->gang_id = r;
457 	*gang_id = r;
458 
459 	INIT_LIST_HEAD(&gang->queue_list);
460 	gang->process = process;
461 	gang->priority = gprops->priority;
462 	gang->gang_quantum = gprops->gang_quantum ?
463 		gprops->gang_quantum : adev->mes.default_gang_quantum;
464 	gang->global_priority_level = gprops->global_priority_level;
465 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
466 	list_add_tail(&gang->list, &process->gang_list);
467 
468 	amdgpu_mes_unlock(&adev->mes);
469 	return 0;
470 
471 clean_up_ctx:
472 	amdgpu_mes_unlock(&adev->mes);
473 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
474 			      &gang->gang_ctx_gpu_addr,
475 			      &gang->gang_ctx_cpu_ptr);
476 clean_up_mem:
477 	kfree(gang);
478 	return r;
479 }
480 
481 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
482 {
483 	struct amdgpu_mes_gang *gang;
484 
485 	/*
486 	 * Avoid taking any other locks under MES lock to avoid circular
487 	 * lock dependencies.
488 	 */
489 	amdgpu_mes_lock(&adev->mes);
490 
491 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
492 	if (!gang) {
493 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
494 		amdgpu_mes_unlock(&adev->mes);
495 		return -EINVAL;
496 	}
497 
498 	if (!list_empty(&gang->queue_list)) {
499 		DRM_ERROR("queue list is not empty\n");
500 		amdgpu_mes_unlock(&adev->mes);
501 		return -EBUSY;
502 	}
503 
504 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
505 	list_del(&gang->list);
506 	amdgpu_mes_unlock(&adev->mes);
507 
508 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
509 			      &gang->gang_ctx_gpu_addr,
510 			      &gang->gang_ctx_cpu_ptr);
511 
512 	kfree(gang);
513 
514 	return 0;
515 }
516 
517 int amdgpu_mes_suspend(struct amdgpu_device *adev)
518 {
519 	struct idr *idp;
520 	struct amdgpu_mes_process *process;
521 	struct amdgpu_mes_gang *gang;
522 	struct mes_suspend_gang_input input;
523 	int r, pasid;
524 
525 	/*
526 	 * Avoid taking any other locks under MES lock to avoid circular
527 	 * lock dependencies.
528 	 */
529 	amdgpu_mes_lock(&adev->mes);
530 
531 	idp = &adev->mes.pasid_idr;
532 
533 	idr_for_each_entry(idp, process, pasid) {
534 		list_for_each_entry(gang, &process->gang_list, list) {
535 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
536 			if (r)
537 				DRM_ERROR("failed to suspend pasid %d gangid %d",
538 					 pasid, gang->gang_id);
539 		}
540 	}
541 
542 	amdgpu_mes_unlock(&adev->mes);
543 	return 0;
544 }
545 
546 int amdgpu_mes_resume(struct amdgpu_device *adev)
547 {
548 	struct idr *idp;
549 	struct amdgpu_mes_process *process;
550 	struct amdgpu_mes_gang *gang;
551 	struct mes_resume_gang_input input;
552 	int r, pasid;
553 
554 	/*
555 	 * Avoid taking any other locks under MES lock to avoid circular
556 	 * lock dependencies.
557 	 */
558 	amdgpu_mes_lock(&adev->mes);
559 
560 	idp = &adev->mes.pasid_idr;
561 
562 	idr_for_each_entry(idp, process, pasid) {
563 		list_for_each_entry(gang, &process->gang_list, list) {
564 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
565 			if (r)
566 				DRM_ERROR("failed to resume pasid %d gangid %d",
567 					 pasid, gang->gang_id);
568 		}
569 	}
570 
571 	amdgpu_mes_unlock(&adev->mes);
572 	return 0;
573 }
574 
575 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
576 				     struct amdgpu_mes_queue *q,
577 				     struct amdgpu_mes_queue_properties *p)
578 {
579 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
580 	u32 mqd_size = mqd_mgr->mqd_size;
581 	int r;
582 
583 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
584 				    AMDGPU_GEM_DOMAIN_GTT,
585 				    &q->mqd_obj,
586 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
587 	if (r) {
588 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
589 		return r;
590 	}
591 	memset(q->mqd_cpu_ptr, 0, mqd_size);
592 
593 	r = amdgpu_bo_reserve(q->mqd_obj, false);
594 	if (unlikely(r != 0))
595 		goto clean_up;
596 
597 	return 0;
598 
599 clean_up:
600 	amdgpu_bo_free_kernel(&q->mqd_obj,
601 			      &q->mqd_gpu_addr,
602 			      &q->mqd_cpu_ptr);
603 	return r;
604 }
605 
606 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
607 				     struct amdgpu_mes_queue *q,
608 				     struct amdgpu_mes_queue_properties *p)
609 {
610 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
611 	struct amdgpu_mqd_prop mqd_prop = {0};
612 
613 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
614 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
615 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
616 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
617 	mqd_prop.queue_size = p->queue_size;
618 	mqd_prop.use_doorbell = true;
619 	mqd_prop.doorbell_index = p->doorbell_off;
620 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
621 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
622 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
623 	mqd_prop.hqd_active = false;
624 
625 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
626 
627 	amdgpu_bo_unreserve(q->mqd_obj);
628 }
629 
630 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
631 			    struct amdgpu_mes_queue_properties *qprops,
632 			    int *queue_id)
633 {
634 	struct amdgpu_mes_queue *queue;
635 	struct amdgpu_mes_gang *gang;
636 	struct mes_add_queue_input queue_input;
637 	unsigned long flags;
638 	int r;
639 
640 	/* allocate the mes queue buffer */
641 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
642 	if (!queue) {
643 		DRM_ERROR("Failed to allocate memory for queue\n");
644 		return -ENOMEM;
645 	}
646 
647 	/* Allocate the queue mqd */
648 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
649 	if (r)
650 		goto clean_up_memory;
651 
652 	/*
653 	 * Avoid taking any other locks under MES lock to avoid circular
654 	 * lock dependencies.
655 	 */
656 	amdgpu_mes_lock(&adev->mes);
657 
658 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
659 	if (!gang) {
660 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
661 		r = -EINVAL;
662 		goto clean_up_mqd;
663 	}
664 
665 	/* add the mes gang to idr list */
666 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
667 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
668 		      GFP_ATOMIC);
669 	if (r < 0) {
670 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
671 		goto clean_up_mqd;
672 	}
673 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
674 	*queue_id = queue->queue_id = r;
675 
676 	/* allocate a doorbell index for the queue */
677 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
678 					  qprops->queue_type,
679 					  &qprops->doorbell_off);
680 	if (r)
681 		goto clean_up_queue_id;
682 
683 	/* initialize the queue mqd */
684 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
685 
686 	/* add hw queue to mes */
687 	queue_input.process_id = gang->process->pasid;
688 
689 	queue_input.page_table_base_addr =
690 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
691 		adev->gmc.vram_start;
692 
693 	queue_input.process_va_start = 0;
694 	queue_input.process_va_end =
695 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
696 	queue_input.process_quantum = gang->process->process_quantum;
697 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
698 	queue_input.gang_quantum = gang->gang_quantum;
699 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
700 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
701 	queue_input.gang_global_priority_level = gang->global_priority_level;
702 	queue_input.doorbell_offset = qprops->doorbell_off;
703 	queue_input.mqd_addr = queue->mqd_gpu_addr;
704 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
705 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
706 	queue_input.queue_type = qprops->queue_type;
707 	queue_input.paging = qprops->paging;
708 	queue_input.is_kfd_process = 0;
709 
710 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
711 	if (r) {
712 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
713 			  qprops->doorbell_off);
714 		goto clean_up_doorbell;
715 	}
716 
717 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
718 		  "queue type=%d, doorbell=0x%llx\n",
719 		  gang->process->pasid, gang_id, qprops->queue_type,
720 		  qprops->doorbell_off);
721 
722 	queue->ring = qprops->ring;
723 	queue->doorbell_off = qprops->doorbell_off;
724 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
725 	queue->queue_type = qprops->queue_type;
726 	queue->paging = qprops->paging;
727 	queue->gang = gang;
728 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
729 	list_add_tail(&queue->list, &gang->queue_list);
730 
731 	amdgpu_mes_unlock(&adev->mes);
732 	return 0;
733 
734 clean_up_doorbell:
735 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
736 				       qprops->doorbell_off);
737 clean_up_queue_id:
738 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
739 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
740 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
741 clean_up_mqd:
742 	amdgpu_mes_unlock(&adev->mes);
743 	amdgpu_mes_queue_free_mqd(queue);
744 clean_up_memory:
745 	kfree(queue);
746 	return r;
747 }
748 
749 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
750 {
751 	unsigned long flags;
752 	struct amdgpu_mes_queue *queue;
753 	struct amdgpu_mes_gang *gang;
754 	struct mes_remove_queue_input queue_input;
755 	int r;
756 
757 	/*
758 	 * Avoid taking any other locks under MES lock to avoid circular
759 	 * lock dependencies.
760 	 */
761 	amdgpu_mes_lock(&adev->mes);
762 
763 	/* remove the mes gang from idr list */
764 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
765 
766 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
767 	if (!queue) {
768 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
769 		amdgpu_mes_unlock(&adev->mes);
770 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
771 		return -EINVAL;
772 	}
773 
774 	idr_remove(&adev->mes.queue_id_idr, queue_id);
775 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
776 
777 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
778 		  queue->doorbell_off);
779 
780 	gang = queue->gang;
781 	queue_input.doorbell_offset = queue->doorbell_off;
782 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
783 
784 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
785 	if (r)
786 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
787 			  queue_id);
788 
789 	list_del(&queue->list);
790 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
791 				       queue->doorbell_off);
792 	amdgpu_mes_unlock(&adev->mes);
793 
794 	amdgpu_mes_queue_free_mqd(queue);
795 	kfree(queue);
796 	return 0;
797 }
798 
799 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
800 				  struct amdgpu_ring *ring,
801 				  enum amdgpu_unmap_queues_action action,
802 				  u64 gpu_addr, u64 seq)
803 {
804 	struct mes_unmap_legacy_queue_input queue_input;
805 	int r;
806 
807 	queue_input.action = action;
808 	queue_input.queue_type = ring->funcs->type;
809 	queue_input.doorbell_offset = ring->doorbell_index;
810 	queue_input.pipe_id = ring->pipe;
811 	queue_input.queue_id = ring->queue;
812 	queue_input.trail_fence_addr = gpu_addr;
813 	queue_input.trail_fence_data = seq;
814 
815 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
816 	if (r)
817 		DRM_ERROR("failed to unmap legacy queue\n");
818 
819 	return r;
820 }
821 
822 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
823 {
824 	struct mes_misc_op_input op_input;
825 	int r, val = 0;
826 
827 	op_input.op = MES_MISC_OP_READ_REG;
828 	op_input.read_reg.reg_offset = reg;
829 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
830 
831 	if (!adev->mes.funcs->misc_op) {
832 		DRM_ERROR("mes rreg is not supported!\n");
833 		goto error;
834 	}
835 
836 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
837 	if (r)
838 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
839 	else
840 		val = *(adev->mes.read_val_ptr);
841 
842 error:
843 	return val;
844 }
845 
846 int amdgpu_mes_wreg(struct amdgpu_device *adev,
847 		    uint32_t reg, uint32_t val)
848 {
849 	struct mes_misc_op_input op_input;
850 	int r;
851 
852 	op_input.op = MES_MISC_OP_WRITE_REG;
853 	op_input.write_reg.reg_offset = reg;
854 	op_input.write_reg.reg_value = val;
855 
856 	if (!adev->mes.funcs->misc_op) {
857 		DRM_ERROR("mes wreg is not supported!\n");
858 		r = -EINVAL;
859 		goto error;
860 	}
861 
862 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
863 	if (r)
864 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
865 
866 error:
867 	return r;
868 }
869 
870 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
871 				  uint32_t reg0, uint32_t reg1,
872 				  uint32_t ref, uint32_t mask)
873 {
874 	struct mes_misc_op_input op_input;
875 	int r;
876 
877 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
878 	op_input.wrm_reg.reg0 = reg0;
879 	op_input.wrm_reg.reg1 = reg1;
880 	op_input.wrm_reg.ref = ref;
881 	op_input.wrm_reg.mask = mask;
882 
883 	if (!adev->mes.funcs->misc_op) {
884 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
885 		r = -EINVAL;
886 		goto error;
887 	}
888 
889 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
890 	if (r)
891 		DRM_ERROR("failed to reg_write_reg_wait\n");
892 
893 error:
894 	return r;
895 }
896 
897 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
898 			uint32_t val, uint32_t mask)
899 {
900 	struct mes_misc_op_input op_input;
901 	int r;
902 
903 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
904 	op_input.wrm_reg.reg0 = reg;
905 	op_input.wrm_reg.ref = val;
906 	op_input.wrm_reg.mask = mask;
907 
908 	if (!adev->mes.funcs->misc_op) {
909 		DRM_ERROR("mes reg wait is not supported!\n");
910 		r = -EINVAL;
911 		goto error;
912 	}
913 
914 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
915 	if (r)
916 		DRM_ERROR("failed to reg_write_reg_wait\n");
917 
918 error:
919 	return r;
920 }
921 
922 static void
923 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
924 			       struct amdgpu_ring *ring,
925 			       struct amdgpu_mes_queue_properties *props)
926 {
927 	props->queue_type = ring->funcs->type;
928 	props->hqd_base_gpu_addr = ring->gpu_addr;
929 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
930 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
931 	props->wptr_mc_addr =
932 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
933 	props->queue_size = ring->ring_size;
934 	props->eop_gpu_addr = ring->eop_gpu_addr;
935 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
936 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
937 	props->paging = false;
938 	props->ring = ring;
939 }
940 
941 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
942 do {									\
943        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
944 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
945 				_eng[ring->idx].slots[id_offs]);        \
946        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
947 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
948 				_eng[ring->idx].ring);                  \
949        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
950 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
951 				_eng[ring->idx].ib);                    \
952        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
953 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
954 				_eng[ring->idx].padding);               \
955 } while(0)
956 
957 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
958 {
959 	switch (ring->funcs->type) {
960 	case AMDGPU_RING_TYPE_GFX:
961 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
962 		break;
963 	case AMDGPU_RING_TYPE_COMPUTE:
964 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
965 		break;
966 	case AMDGPU_RING_TYPE_SDMA:
967 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
968 		break;
969 	default:
970 		break;
971 	}
972 
973 	WARN_ON(1);
974 	return -EINVAL;
975 }
976 
977 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
978 			int queue_type, int idx,
979 			struct amdgpu_mes_ctx_data *ctx_data,
980 			struct amdgpu_ring **out)
981 {
982 	struct amdgpu_ring *ring;
983 	struct amdgpu_mes_gang *gang;
984 	struct amdgpu_mes_queue_properties qprops = {0};
985 	int r, queue_id, pasid;
986 
987 	/*
988 	 * Avoid taking any other locks under MES lock to avoid circular
989 	 * lock dependencies.
990 	 */
991 	amdgpu_mes_lock(&adev->mes);
992 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
993 	if (!gang) {
994 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
995 		amdgpu_mes_unlock(&adev->mes);
996 		return -EINVAL;
997 	}
998 	pasid = gang->process->pasid;
999 
1000 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1001 	if (!ring) {
1002 		amdgpu_mes_unlock(&adev->mes);
1003 		return -ENOMEM;
1004 	}
1005 
1006 	ring->ring_obj = NULL;
1007 	ring->use_doorbell = true;
1008 	ring->is_mes_queue = true;
1009 	ring->mes_ctx = ctx_data;
1010 	ring->idx = idx;
1011 	ring->no_scheduler = true;
1012 
1013 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1014 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1015 				      compute[ring->idx].mec_hpd);
1016 		ring->eop_gpu_addr =
1017 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1018 	}
1019 
1020 	switch (queue_type) {
1021 	case AMDGPU_RING_TYPE_GFX:
1022 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1023 		break;
1024 	case AMDGPU_RING_TYPE_COMPUTE:
1025 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1026 		break;
1027 	case AMDGPU_RING_TYPE_SDMA:
1028 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1029 		break;
1030 	default:
1031 		BUG();
1032 	}
1033 
1034 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1035 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1036 	if (r)
1037 		goto clean_up_memory;
1038 
1039 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1040 
1041 	dma_fence_wait(gang->process->vm->last_update, false);
1042 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1043 	amdgpu_mes_unlock(&adev->mes);
1044 
1045 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1046 	if (r)
1047 		goto clean_up_ring;
1048 
1049 	ring->hw_queue_id = queue_id;
1050 	ring->doorbell_index = qprops.doorbell_off;
1051 
1052 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1053 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1054 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1055 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1056 			queue_id);
1057 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1058 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1059 			queue_id);
1060 	else
1061 		BUG();
1062 
1063 	*out = ring;
1064 	return 0;
1065 
1066 clean_up_ring:
1067 	amdgpu_ring_fini(ring);
1068 clean_up_memory:
1069 	kfree(ring);
1070 	amdgpu_mes_unlock(&adev->mes);
1071 	return r;
1072 }
1073 
1074 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1075 			    struct amdgpu_ring *ring)
1076 {
1077 	if (!ring)
1078 		return;
1079 
1080 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1081 	amdgpu_ring_fini(ring);
1082 	kfree(ring);
1083 }
1084 
1085 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1086 						   enum amdgpu_mes_priority_level prio)
1087 {
1088 	return adev->mes.aggregated_doorbells[prio];
1089 }
1090 
1091 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1092 				   struct amdgpu_mes_ctx_data *ctx_data)
1093 {
1094 	int r;
1095 
1096 	r = amdgpu_bo_create_kernel(adev,
1097 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1098 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1099 			    &ctx_data->meta_data_obj,
1100 			    &ctx_data->meta_data_mc_addr,
1101 			    &ctx_data->meta_data_ptr);
1102 	if (!ctx_data->meta_data_obj)
1103 		return -ENOMEM;
1104 
1105 	memset(ctx_data->meta_data_ptr, 0,
1106 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1107 
1108 	return 0;
1109 }
1110 
1111 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1112 {
1113 	if (ctx_data->meta_data_obj)
1114 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1115 				      &ctx_data->meta_data_mc_addr,
1116 				      &ctx_data->meta_data_ptr);
1117 }
1118 
1119 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1120 				 struct amdgpu_vm *vm,
1121 				 struct amdgpu_mes_ctx_data *ctx_data)
1122 {
1123 	struct amdgpu_bo_va *bo_va;
1124 	struct ww_acquire_ctx ticket;
1125 	struct list_head list;
1126 	struct amdgpu_bo_list_entry pd;
1127 	struct ttm_validate_buffer csa_tv;
1128 	struct amdgpu_sync sync;
1129 	int r;
1130 
1131 	amdgpu_sync_create(&sync);
1132 	INIT_LIST_HEAD(&list);
1133 	INIT_LIST_HEAD(&csa_tv.head);
1134 
1135 	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1136 	csa_tv.num_shared = 1;
1137 
1138 	list_add(&csa_tv.head, &list);
1139 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1140 
1141 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1142 	if (r) {
1143 		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1144 		return r;
1145 	}
1146 
1147 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1148 	if (!bo_va) {
1149 		ttm_eu_backoff_reservation(&ticket, &list);
1150 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1151 		return -ENOMEM;
1152 	}
1153 
1154 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1155 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1156 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1157 			     AMDGPU_PTE_EXECUTABLE);
1158 
1159 	if (r) {
1160 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1161 		goto error;
1162 	}
1163 
1164 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1165 	if (r) {
1166 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1167 		goto error;
1168 	}
1169 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1170 
1171 	r = amdgpu_vm_update_pdes(adev, vm, false);
1172 	if (r) {
1173 		DRM_ERROR("failed to update pdes on meta data\n");
1174 		goto error;
1175 	}
1176 	amdgpu_sync_fence(&sync, vm->last_update);
1177 
1178 	amdgpu_sync_wait(&sync, false);
1179 	ttm_eu_backoff_reservation(&ticket, &list);
1180 
1181 	amdgpu_sync_free(&sync);
1182 	ctx_data->meta_data_va = bo_va;
1183 	return 0;
1184 
1185 error:
1186 	amdgpu_vm_bo_del(adev, bo_va);
1187 	ttm_eu_backoff_reservation(&ticket, &list);
1188 	amdgpu_sync_free(&sync);
1189 	return r;
1190 }
1191 
1192 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1193 				   struct amdgpu_mes_ctx_data *ctx_data)
1194 {
1195 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1196 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1197 	struct amdgpu_vm *vm = bo_va->base.vm;
1198 	struct amdgpu_bo_list_entry vm_pd;
1199 	struct list_head list, duplicates;
1200 	struct dma_fence *fence = NULL;
1201 	struct ttm_validate_buffer tv;
1202 	struct ww_acquire_ctx ticket;
1203 	long r = 0;
1204 
1205 	INIT_LIST_HEAD(&list);
1206 	INIT_LIST_HEAD(&duplicates);
1207 
1208 	tv.bo = &bo->tbo;
1209 	tv.num_shared = 2;
1210 	list_add(&tv.head, &list);
1211 
1212 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1213 
1214 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1215 	if (r) {
1216 		dev_err(adev->dev, "leaking bo va because "
1217 			"we fail to reserve bo (%ld)\n", r);
1218 		return r;
1219 	}
1220 
1221 	amdgpu_vm_bo_del(adev, bo_va);
1222 	if (!amdgpu_vm_ready(vm))
1223 		goto out_unlock;
1224 
1225 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
1226 	if (r)
1227 		goto out_unlock;
1228 	if (fence) {
1229 		amdgpu_bo_fence(bo, fence, true);
1230 		fence = NULL;
1231 	}
1232 
1233 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1234 	if (r || !fence)
1235 		goto out_unlock;
1236 
1237 	dma_fence_wait(fence, false);
1238 	amdgpu_bo_fence(bo, fence, true);
1239 	dma_fence_put(fence);
1240 
1241 out_unlock:
1242 	if (unlikely(r < 0))
1243 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1244 	ttm_eu_backoff_reservation(&ticket, &list);
1245 
1246 	return r;
1247 }
1248 
1249 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1250 					  int pasid, int *gang_id,
1251 					  int queue_type, int num_queue,
1252 					  struct amdgpu_ring **added_rings,
1253 					  struct amdgpu_mes_ctx_data *ctx_data)
1254 {
1255 	struct amdgpu_ring *ring;
1256 	struct amdgpu_mes_gang_properties gprops = {0};
1257 	int r, j;
1258 
1259 	/* create a gang for the process */
1260 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1261 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1262 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1263 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1264 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1265 
1266 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1267 	if (r) {
1268 		DRM_ERROR("failed to add gang\n");
1269 		return r;
1270 	}
1271 
1272 	/* create queues for the gang */
1273 	for (j = 0; j < num_queue; j++) {
1274 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1275 					ctx_data, &ring);
1276 		if (r) {
1277 			DRM_ERROR("failed to add ring\n");
1278 			break;
1279 		}
1280 
1281 		DRM_INFO("ring %s was added\n", ring->name);
1282 		added_rings[j] = ring;
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1289 {
1290 	struct amdgpu_ring *ring;
1291 	int i, r;
1292 
1293 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1294 		ring = added_rings[i];
1295 		if (!ring)
1296 			continue;
1297 
1298 		r = amdgpu_ring_test_ring(ring);
1299 		if (r) {
1300 			DRM_DEV_ERROR(ring->adev->dev,
1301 				      "ring %s test failed (%d)\n",
1302 				      ring->name, r);
1303 			return r;
1304 		} else
1305 			DRM_INFO("ring %s test pass\n", ring->name);
1306 
1307 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1308 		if (r) {
1309 			DRM_DEV_ERROR(ring->adev->dev,
1310 				      "ring %s ib test failed (%d)\n",
1311 				      ring->name, r);
1312 			return r;
1313 		} else
1314 			DRM_INFO("ring %s ib test pass\n", ring->name);
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1321 {
1322 	struct amdgpu_vm *vm = NULL;
1323 	struct amdgpu_mes_ctx_data ctx_data = {0};
1324 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1325 	int gang_ids[3] = {0};
1326 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1327 				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1328 				 { AMDGPU_RING_TYPE_COMPUTE,
1329 				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1330 				 { AMDGPU_RING_TYPE_SDMA,
1331 				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1332 	int i, r, pasid, k = 0;
1333 
1334 	pasid = amdgpu_pasid_alloc(16);
1335 	if (pasid < 0) {
1336 		dev_warn(adev->dev, "No more PASIDs available!");
1337 		pasid = 0;
1338 	}
1339 
1340 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1341 	if (!vm) {
1342 		r = -ENOMEM;
1343 		goto error_pasid;
1344 	}
1345 
1346 	r = amdgpu_vm_init(adev, vm);
1347 	if (r) {
1348 		DRM_ERROR("failed to initialize vm\n");
1349 		goto error_pasid;
1350 	}
1351 
1352 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1353 	if (r) {
1354 		DRM_ERROR("failed to alloc ctx meta data\n");
1355 		goto error_fini;
1356 	}
1357 
1358 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1359 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1360 	if (r) {
1361 		DRM_ERROR("failed to map ctx meta data\n");
1362 		goto error_vm;
1363 	}
1364 
1365 	r = amdgpu_mes_create_process(adev, pasid, vm);
1366 	if (r) {
1367 		DRM_ERROR("failed to create MES process\n");
1368 		goto error_vm;
1369 	}
1370 
1371 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1372 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1373 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1374 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1375 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1376 			continue;
1377 
1378 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1379 							   &gang_ids[i],
1380 							   queue_types[i][0],
1381 							   queue_types[i][1],
1382 							   &added_rings[k],
1383 							   &ctx_data);
1384 		if (r)
1385 			goto error_queues;
1386 
1387 		k += queue_types[i][1];
1388 	}
1389 
1390 	/* start ring test and ib test for MES queues */
1391 	amdgpu_mes_test_queues(added_rings);
1392 
1393 error_queues:
1394 	/* remove all queues */
1395 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1396 		if (!added_rings[i])
1397 			continue;
1398 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1399 	}
1400 
1401 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1402 		if (!gang_ids[i])
1403 			continue;
1404 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1405 	}
1406 
1407 	amdgpu_mes_destroy_process(adev, pasid);
1408 
1409 error_vm:
1410 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1411 
1412 error_fini:
1413 	amdgpu_vm_fini(adev, vm);
1414 
1415 error_pasid:
1416 	if (pasid)
1417 		amdgpu_pasid_free(pasid);
1418 
1419 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1420 	kfree(vm);
1421 	return 0;
1422 }
1423