1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_mes.h"
25 #include "amdgpu.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
28 
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
31 
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33 {
34 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 		       PAGE_SIZE);
37 }
38 
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 				      unsigned int *doorbell_index)
41 {
42 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 			       adev->mes.max_doorbell_slices,
44 			       GFP_KERNEL);
45 	if (r > 0)
46 		*doorbell_index = r;
47 
48 	return r;
49 }
50 
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 				      unsigned int doorbell_index)
53 {
54 	if (doorbell_index)
55 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56 }
57 
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 					struct amdgpu_device *adev,
60 					uint32_t doorbell_index,
61 					unsigned int doorbell_id)
62 {
63 	return ((doorbell_index *
64 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 		doorbell_id * 2);
66 }
67 
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 					 struct amdgpu_mes_process *process,
70 					 int ip_type, uint64_t *doorbell_index)
71 {
72 	unsigned int offset, found;
73 
74 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 		offset = adev->doorbell_index.sdma_engine[0];
76 		found = find_next_zero_bit(process->doorbell_bitmap,
77 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 					   offset);
79 	} else {
80 		found = find_first_zero_bit(process->doorbell_bitmap,
81 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 	}
83 
84 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 		DRM_WARN("No doorbell available\n");
86 		return -ENOSPC;
87 	}
88 
89 	set_bit(found, process->doorbell_bitmap);
90 
91 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 				process->doorbell_index, found);
93 
94 	return 0;
95 }
96 
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 					   struct amdgpu_mes_process *process,
99 					   uint32_t doorbell_index)
100 {
101 	unsigned int old, doorbell_id;
102 
103 	doorbell_id = doorbell_index -
104 		(process->doorbell_index *
105 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 	doorbell_id /= 2;
107 
108 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 	WARN_ON(!old);
110 }
111 
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113 {
114 	size_t doorbell_start_offset;
115 	size_t doorbell_aperture_size;
116 	size_t doorbell_process_limit;
117 
118 	doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32);
119 	doorbell_start_offset =
120 		roundup(doorbell_start_offset,
121 			amdgpu_mes_doorbell_process_slice(adev));
122 
123 	doorbell_aperture_size = adev->doorbell.size;
124 	doorbell_aperture_size =
125 			rounddown(doorbell_aperture_size,
126 				  amdgpu_mes_doorbell_process_slice(adev));
127 
128 	if (doorbell_aperture_size > doorbell_start_offset)
129 		doorbell_process_limit =
130 			(doorbell_aperture_size - doorbell_start_offset) /
131 			amdgpu_mes_doorbell_process_slice(adev);
132 	else
133 		return -ENOSPC;
134 
135 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
136 	adev->mes.max_doorbell_slices = doorbell_process_limit;
137 
138 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
139 	return 0;
140 }
141 
142 int amdgpu_mes_init(struct amdgpu_device *adev)
143 {
144 	int i, r;
145 
146 	adev->mes.adev = adev;
147 
148 	idr_init(&adev->mes.pasid_idr);
149 	idr_init(&adev->mes.gang_id_idr);
150 	idr_init(&adev->mes.queue_id_idr);
151 	ida_init(&adev->mes.doorbell_ida);
152 	spin_lock_init(&adev->mes.queue_id_lock);
153 	mutex_init(&adev->mes.mutex_hidden);
154 
155 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
156 	adev->mes.vmid_mask_mmhub = 0xffffff00;
157 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
158 
159 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
160 		/* use only 1st MEC pipes */
161 		if (i >= 4)
162 			continue;
163 		adev->mes.compute_hqd_mask[i] = 0xc;
164 	}
165 
166 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
167 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
168 
169 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
170 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
171 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
172 		else
173 			adev->mes.sdma_hqd_mask[i] = 0xfc;
174 	}
175 
176 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
177 		adev->mes.agreegated_doorbells[i] = 0xffffffff;
178 
179 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
180 	if (r) {
181 		dev_err(adev->dev,
182 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
183 		goto error_ids;
184 	}
185 	adev->mes.sch_ctx_gpu_addr =
186 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
187 	adev->mes.sch_ctx_ptr =
188 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
189 
190 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
191 	if (r) {
192 		dev_err(adev->dev,
193 			"(%d) query_status_fence_offs wb alloc failed\n", r);
194 		return r;
195 	}
196 	adev->mes.query_status_fence_gpu_addr =
197 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
198 	adev->mes.query_status_fence_ptr =
199 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
200 
201 	r = amdgpu_mes_doorbell_init(adev);
202 	if (r)
203 		goto error;
204 
205 	return 0;
206 
207 error:
208 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
209 error_ids:
210 	idr_destroy(&adev->mes.pasid_idr);
211 	idr_destroy(&adev->mes.gang_id_idr);
212 	idr_destroy(&adev->mes.queue_id_idr);
213 	ida_destroy(&adev->mes.doorbell_ida);
214 	mutex_destroy(&adev->mes.mutex_hidden);
215 	return r;
216 }
217 
218 void amdgpu_mes_fini(struct amdgpu_device *adev)
219 {
220 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
221 
222 	idr_destroy(&adev->mes.pasid_idr);
223 	idr_destroy(&adev->mes.gang_id_idr);
224 	idr_destroy(&adev->mes.queue_id_idr);
225 	ida_destroy(&adev->mes.doorbell_ida);
226 	mutex_destroy(&adev->mes.mutex_hidden);
227 }
228 
229 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
230 {
231 	amdgpu_bo_free_kernel(&q->mqd_obj,
232 			      &q->mqd_gpu_addr,
233 			      &q->mqd_cpu_ptr);
234 }
235 
236 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
237 			      struct amdgpu_vm *vm)
238 {
239 	struct amdgpu_mes_process *process;
240 	int r;
241 
242 	/* allocate the mes process buffer */
243 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
244 	if (!process) {
245 		DRM_ERROR("no more memory to create mes process\n");
246 		return -ENOMEM;
247 	}
248 
249 	process->doorbell_bitmap =
250 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
251 				     BITS_PER_BYTE), GFP_KERNEL);
252 	if (!process->doorbell_bitmap) {
253 		DRM_ERROR("failed to allocate doorbell bitmap\n");
254 		kfree(process);
255 		return -ENOMEM;
256 	}
257 
258 	/* allocate the process context bo and map it */
259 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
260 				    AMDGPU_GEM_DOMAIN_GTT,
261 				    &process->proc_ctx_bo,
262 				    &process->proc_ctx_gpu_addr,
263 				    &process->proc_ctx_cpu_ptr);
264 	if (r) {
265 		DRM_ERROR("failed to allocate process context bo\n");
266 		goto clean_up_memory;
267 	}
268 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
269 
270 	/*
271 	 * Avoid taking any other locks under MES lock to avoid circular
272 	 * lock dependencies.
273 	 */
274 	amdgpu_mes_lock(&adev->mes);
275 
276 	/* add the mes process to idr list */
277 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
278 		      GFP_KERNEL);
279 	if (r < 0) {
280 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
281 		goto clean_up_ctx;
282 	}
283 
284 	/* allocate the starting doorbell index of the process */
285 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
286 	if (r < 0) {
287 		DRM_ERROR("failed to allocate doorbell for process\n");
288 		goto clean_up_pasid;
289 	}
290 
291 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
292 
293 	INIT_LIST_HEAD(&process->gang_list);
294 	process->vm = vm;
295 	process->pasid = pasid;
296 	process->process_quantum = adev->mes.default_process_quantum;
297 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
298 
299 	amdgpu_mes_unlock(&adev->mes);
300 	return 0;
301 
302 clean_up_pasid:
303 	idr_remove(&adev->mes.pasid_idr, pasid);
304 	amdgpu_mes_unlock(&adev->mes);
305 clean_up_ctx:
306 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
307 			      &process->proc_ctx_gpu_addr,
308 			      &process->proc_ctx_cpu_ptr);
309 clean_up_memory:
310 	kfree(process->doorbell_bitmap);
311 	kfree(process);
312 	return r;
313 }
314 
315 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
316 {
317 	struct amdgpu_mes_process *process;
318 	struct amdgpu_mes_gang *gang, *tmp1;
319 	struct amdgpu_mes_queue *queue, *tmp2;
320 	struct mes_remove_queue_input queue_input;
321 	unsigned long flags;
322 	int r;
323 
324 	/*
325 	 * Avoid taking any other locks under MES lock to avoid circular
326 	 * lock dependencies.
327 	 */
328 	amdgpu_mes_lock(&adev->mes);
329 
330 	process = idr_find(&adev->mes.pasid_idr, pasid);
331 	if (!process) {
332 		DRM_WARN("pasid %d doesn't exist\n", pasid);
333 		amdgpu_mes_unlock(&adev->mes);
334 		return;
335 	}
336 
337 	/* Remove all queues from hardware */
338 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
339 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
340 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
341 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
342 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
343 
344 			queue_input.doorbell_offset = queue->doorbell_off;
345 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
346 
347 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
348 							     &queue_input);
349 			if (r)
350 				DRM_WARN("failed to remove hardware queue\n");
351 		}
352 
353 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
354 	}
355 
356 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
357 	idr_remove(&adev->mes.pasid_idr, pasid);
358 	amdgpu_mes_unlock(&adev->mes);
359 
360 	/* free all memory allocated by the process */
361 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
362 		/* free all queues in the gang */
363 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
364 			amdgpu_mes_queue_free_mqd(queue);
365 			list_del(&queue->list);
366 			kfree(queue);
367 		}
368 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
369 				      &gang->gang_ctx_gpu_addr,
370 				      &gang->gang_ctx_cpu_ptr);
371 		list_del(&gang->list);
372 		kfree(gang);
373 
374 	}
375 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
376 			      &process->proc_ctx_gpu_addr,
377 			      &process->proc_ctx_cpu_ptr);
378 	kfree(process->doorbell_bitmap);
379 	kfree(process);
380 }
381 
382 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
383 			struct amdgpu_mes_gang_properties *gprops,
384 			int *gang_id)
385 {
386 	struct amdgpu_mes_process *process;
387 	struct amdgpu_mes_gang *gang;
388 	int r;
389 
390 	/* allocate the mes gang buffer */
391 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
392 	if (!gang) {
393 		return -ENOMEM;
394 	}
395 
396 	/* allocate the gang context bo and map it to cpu space */
397 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
398 				    AMDGPU_GEM_DOMAIN_GTT,
399 				    &gang->gang_ctx_bo,
400 				    &gang->gang_ctx_gpu_addr,
401 				    &gang->gang_ctx_cpu_ptr);
402 	if (r) {
403 		DRM_ERROR("failed to allocate process context bo\n");
404 		goto clean_up_mem;
405 	}
406 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
407 
408 	/*
409 	 * Avoid taking any other locks under MES lock to avoid circular
410 	 * lock dependencies.
411 	 */
412 	amdgpu_mes_lock(&adev->mes);
413 
414 	process = idr_find(&adev->mes.pasid_idr, pasid);
415 	if (!process) {
416 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
417 		r = -EINVAL;
418 		goto clean_up_ctx;
419 	}
420 
421 	/* add the mes gang to idr list */
422 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
423 		      GFP_KERNEL);
424 	if (r < 0) {
425 		DRM_ERROR("failed to allocate idr for gang\n");
426 		goto clean_up_ctx;
427 	}
428 
429 	gang->gang_id = r;
430 	*gang_id = r;
431 
432 	INIT_LIST_HEAD(&gang->queue_list);
433 	gang->process = process;
434 	gang->priority = gprops->priority;
435 	gang->gang_quantum = gprops->gang_quantum ?
436 		gprops->gang_quantum : adev->mes.default_gang_quantum;
437 	gang->global_priority_level = gprops->global_priority_level;
438 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
439 	list_add_tail(&gang->list, &process->gang_list);
440 
441 	amdgpu_mes_unlock(&adev->mes);
442 	return 0;
443 
444 clean_up_ctx:
445 	amdgpu_mes_unlock(&adev->mes);
446 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
447 			      &gang->gang_ctx_gpu_addr,
448 			      &gang->gang_ctx_cpu_ptr);
449 clean_up_mem:
450 	kfree(gang);
451 	return r;
452 }
453 
454 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
455 {
456 	struct amdgpu_mes_gang *gang;
457 
458 	/*
459 	 * Avoid taking any other locks under MES lock to avoid circular
460 	 * lock dependencies.
461 	 */
462 	amdgpu_mes_lock(&adev->mes);
463 
464 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
465 	if (!gang) {
466 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
467 		amdgpu_mes_unlock(&adev->mes);
468 		return -EINVAL;
469 	}
470 
471 	if (!list_empty(&gang->queue_list)) {
472 		DRM_ERROR("queue list is not empty\n");
473 		amdgpu_mes_unlock(&adev->mes);
474 		return -EBUSY;
475 	}
476 
477 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
478 	list_del(&gang->list);
479 	amdgpu_mes_unlock(&adev->mes);
480 
481 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
482 			      &gang->gang_ctx_gpu_addr,
483 			      &gang->gang_ctx_cpu_ptr);
484 
485 	kfree(gang);
486 
487 	return 0;
488 }
489 
490 int amdgpu_mes_suspend(struct amdgpu_device *adev)
491 {
492 	struct idr *idp;
493 	struct amdgpu_mes_process *process;
494 	struct amdgpu_mes_gang *gang;
495 	struct mes_suspend_gang_input input;
496 	int r, pasid;
497 
498 	/*
499 	 * Avoid taking any other locks under MES lock to avoid circular
500 	 * lock dependencies.
501 	 */
502 	amdgpu_mes_lock(&adev->mes);
503 
504 	idp = &adev->mes.pasid_idr;
505 
506 	idr_for_each_entry(idp, process, pasid) {
507 		list_for_each_entry(gang, &process->gang_list, list) {
508 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
509 			if (r)
510 				DRM_ERROR("failed to suspend pasid %d gangid %d",
511 					 pasid, gang->gang_id);
512 		}
513 	}
514 
515 	amdgpu_mes_unlock(&adev->mes);
516 	return 0;
517 }
518 
519 int amdgpu_mes_resume(struct amdgpu_device *adev)
520 {
521 	struct idr *idp;
522 	struct amdgpu_mes_process *process;
523 	struct amdgpu_mes_gang *gang;
524 	struct mes_resume_gang_input input;
525 	int r, pasid;
526 
527 	/*
528 	 * Avoid taking any other locks under MES lock to avoid circular
529 	 * lock dependencies.
530 	 */
531 	amdgpu_mes_lock(&adev->mes);
532 
533 	idp = &adev->mes.pasid_idr;
534 
535 	idr_for_each_entry(idp, process, pasid) {
536 		list_for_each_entry(gang, &process->gang_list, list) {
537 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
538 			if (r)
539 				DRM_ERROR("failed to resume pasid %d gangid %d",
540 					 pasid, gang->gang_id);
541 		}
542 	}
543 
544 	amdgpu_mes_unlock(&adev->mes);
545 	return 0;
546 }
547 
548 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
549 				     struct amdgpu_mes_queue *q,
550 				     struct amdgpu_mes_queue_properties *p)
551 {
552 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
553 	u32 mqd_size = mqd_mgr->mqd_size;
554 	int r;
555 
556 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
557 				    AMDGPU_GEM_DOMAIN_GTT,
558 				    &q->mqd_obj,
559 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
560 	if (r) {
561 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
562 		return r;
563 	}
564 	memset(q->mqd_cpu_ptr, 0, mqd_size);
565 
566 	r = amdgpu_bo_reserve(q->mqd_obj, false);
567 	if (unlikely(r != 0))
568 		goto clean_up;
569 
570 	return 0;
571 
572 clean_up:
573 	amdgpu_bo_free_kernel(&q->mqd_obj,
574 			      &q->mqd_gpu_addr,
575 			      &q->mqd_cpu_ptr);
576 	return r;
577 }
578 
579 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
580 				     struct amdgpu_mes_queue *q,
581 				     struct amdgpu_mes_queue_properties *p)
582 {
583 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
584 	struct amdgpu_mqd_prop mqd_prop = {0};
585 
586 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
587 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
588 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
589 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
590 	mqd_prop.queue_size = p->queue_size;
591 	mqd_prop.use_doorbell = true;
592 	mqd_prop.doorbell_index = p->doorbell_off;
593 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
594 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
595 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
596 	mqd_prop.hqd_active = false;
597 
598 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
599 
600 	amdgpu_bo_unreserve(q->mqd_obj);
601 }
602 
603 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
604 			    struct amdgpu_mes_queue_properties *qprops,
605 			    int *queue_id)
606 {
607 	struct amdgpu_mes_queue *queue;
608 	struct amdgpu_mes_gang *gang;
609 	struct mes_add_queue_input queue_input;
610 	unsigned long flags;
611 	int r;
612 
613 	/* allocate the mes queue buffer */
614 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
615 	if (!queue) {
616 		DRM_ERROR("Failed to allocate memory for queue\n");
617 		return -ENOMEM;
618 	}
619 
620 	/* Allocate the queue mqd */
621 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
622 	if (r)
623 		goto clean_up_memory;
624 
625 	/*
626 	 * Avoid taking any other locks under MES lock to avoid circular
627 	 * lock dependencies.
628 	 */
629 	amdgpu_mes_lock(&adev->mes);
630 
631 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
632 	if (!gang) {
633 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
634 		r = -EINVAL;
635 		goto clean_up_mqd;
636 	}
637 
638 	/* add the mes gang to idr list */
639 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
640 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
641 		      GFP_ATOMIC);
642 	if (r < 0) {
643 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
644 		goto clean_up_mqd;
645 	}
646 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
647 	*queue_id = queue->queue_id = r;
648 
649 	/* allocate a doorbell index for the queue */
650 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
651 					  qprops->queue_type,
652 					  &qprops->doorbell_off);
653 	if (r)
654 		goto clean_up_queue_id;
655 
656 	/* initialize the queue mqd */
657 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
658 
659 	/* add hw queue to mes */
660 	queue_input.process_id = gang->process->pasid;
661 
662 	queue_input.page_table_base_addr =
663 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
664 		adev->gmc.vram_start;
665 
666 	queue_input.process_va_start = 0;
667 	queue_input.process_va_end =
668 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
669 	queue_input.process_quantum = gang->process->process_quantum;
670 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
671 	queue_input.gang_quantum = gang->gang_quantum;
672 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
673 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
674 	queue_input.gang_global_priority_level = gang->global_priority_level;
675 	queue_input.doorbell_offset = qprops->doorbell_off;
676 	queue_input.mqd_addr = queue->mqd_gpu_addr;
677 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
678 	queue_input.queue_type = qprops->queue_type;
679 	queue_input.paging = qprops->paging;
680 
681 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
682 	if (r) {
683 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
684 			  qprops->doorbell_off);
685 		goto clean_up_doorbell;
686 	}
687 
688 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
689 		  "queue type=%d, doorbell=0x%llx\n",
690 		  gang->process->pasid, gang_id, qprops->queue_type,
691 		  qprops->doorbell_off);
692 
693 	queue->ring = qprops->ring;
694 	queue->doorbell_off = qprops->doorbell_off;
695 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
696 	queue->queue_type = qprops->queue_type;
697 	queue->paging = qprops->paging;
698 	queue->gang = gang;
699 	list_add_tail(&queue->list, &gang->queue_list);
700 
701 	amdgpu_mes_unlock(&adev->mes);
702 	return 0;
703 
704 clean_up_doorbell:
705 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
706 				       qprops->doorbell_off);
707 clean_up_queue_id:
708 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
709 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
710 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
711 clean_up_mqd:
712 	amdgpu_mes_unlock(&adev->mes);
713 	amdgpu_mes_queue_free_mqd(queue);
714 clean_up_memory:
715 	kfree(queue);
716 	return r;
717 }
718 
719 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
720 {
721 	unsigned long flags;
722 	struct amdgpu_mes_queue *queue;
723 	struct amdgpu_mes_gang *gang;
724 	struct mes_remove_queue_input queue_input;
725 	int r;
726 
727 	/*
728 	 * Avoid taking any other locks under MES lock to avoid circular
729 	 * lock dependencies.
730 	 */
731 	amdgpu_mes_lock(&adev->mes);
732 
733 	/* remove the mes gang from idr list */
734 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
735 
736 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
737 	if (!queue) {
738 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
739 		amdgpu_mes_unlock(&adev->mes);
740 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
741 		return -EINVAL;
742 	}
743 
744 	idr_remove(&adev->mes.queue_id_idr, queue_id);
745 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
746 
747 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
748 		  queue->doorbell_off);
749 
750 	gang = queue->gang;
751 	queue_input.doorbell_offset = queue->doorbell_off;
752 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
753 
754 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
755 	if (r)
756 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
757 			  queue_id);
758 
759 	list_del(&queue->list);
760 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
761 				       queue->doorbell_off);
762 	amdgpu_mes_unlock(&adev->mes);
763 
764 	amdgpu_mes_queue_free_mqd(queue);
765 	kfree(queue);
766 	return 0;
767 }
768 
769 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
770 				  struct amdgpu_ring *ring,
771 				  enum amdgpu_unmap_queues_action action,
772 				  u64 gpu_addr, u64 seq)
773 {
774 	struct mes_unmap_legacy_queue_input queue_input;
775 	int r;
776 
777 	amdgpu_mes_lock(&adev->mes);
778 
779 	queue_input.action = action;
780 	queue_input.queue_type = ring->funcs->type;
781 	queue_input.doorbell_offset = ring->doorbell_index;
782 	queue_input.pipe_id = ring->pipe;
783 	queue_input.queue_id = ring->queue;
784 	queue_input.trail_fence_addr = gpu_addr;
785 	queue_input.trail_fence_data = seq;
786 
787 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
788 	if (r)
789 		DRM_ERROR("failed to unmap legacy queue\n");
790 
791 	amdgpu_mes_unlock(&adev->mes);
792 	return r;
793 }
794 
795 static void
796 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
797 			       struct amdgpu_ring *ring,
798 			       struct amdgpu_mes_queue_properties *props)
799 {
800 	props->queue_type = ring->funcs->type;
801 	props->hqd_base_gpu_addr = ring->gpu_addr;
802 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
803 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
804 	props->queue_size = ring->ring_size;
805 	props->eop_gpu_addr = ring->eop_gpu_addr;
806 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
807 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
808 	props->paging = false;
809 	props->ring = ring;
810 }
811 
812 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
813 do {									\
814        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
815 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
816 				_eng[ring->idx].slots[id_offs]);        \
817        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
818 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
819 				_eng[ring->idx].ring);                  \
820        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
821 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
822 				_eng[ring->idx].ib);                    \
823        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
824 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
825 				_eng[ring->idx].padding);               \
826 } while(0)
827 
828 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
829 {
830 	switch (ring->funcs->type) {
831 	case AMDGPU_RING_TYPE_GFX:
832 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
833 		break;
834 	case AMDGPU_RING_TYPE_COMPUTE:
835 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
836 		break;
837 	case AMDGPU_RING_TYPE_SDMA:
838 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
839 		break;
840 	default:
841 		break;
842 	}
843 
844 	WARN_ON(1);
845 	return -EINVAL;
846 }
847 
848 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
849 			int queue_type, int idx,
850 			struct amdgpu_mes_ctx_data *ctx_data,
851 			struct amdgpu_ring **out)
852 {
853 	struct amdgpu_ring *ring;
854 	struct amdgpu_mes_gang *gang;
855 	struct amdgpu_mes_queue_properties qprops = {0};
856 	int r, queue_id, pasid;
857 
858 	/*
859 	 * Avoid taking any other locks under MES lock to avoid circular
860 	 * lock dependencies.
861 	 */
862 	amdgpu_mes_lock(&adev->mes);
863 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
864 	if (!gang) {
865 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
866 		amdgpu_mes_unlock(&adev->mes);
867 		return -EINVAL;
868 	}
869 	pasid = gang->process->pasid;
870 
871 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
872 	if (!ring) {
873 		amdgpu_mes_unlock(&adev->mes);
874 		return -ENOMEM;
875 	}
876 
877 	ring->ring_obj = NULL;
878 	ring->use_doorbell = true;
879 	ring->is_mes_queue = true;
880 	ring->mes_ctx = ctx_data;
881 	ring->idx = idx;
882 	ring->no_scheduler = true;
883 
884 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
885 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
886 				      compute[ring->idx].mec_hpd);
887 		ring->eop_gpu_addr =
888 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
889 	}
890 
891 	switch (queue_type) {
892 	case AMDGPU_RING_TYPE_GFX:
893 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
894 		break;
895 	case AMDGPU_RING_TYPE_COMPUTE:
896 		ring->funcs = adev->gfx.compute_ring[0].funcs;
897 		break;
898 	case AMDGPU_RING_TYPE_SDMA:
899 		ring->funcs = adev->sdma.instance[0].ring.funcs;
900 		break;
901 	default:
902 		BUG();
903 	}
904 
905 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
906 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
907 	if (r)
908 		goto clean_up_memory;
909 
910 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
911 
912 	dma_fence_wait(gang->process->vm->last_update, false);
913 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
914 	amdgpu_mes_unlock(&adev->mes);
915 
916 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
917 	if (r)
918 		goto clean_up_ring;
919 
920 	ring->hw_queue_id = queue_id;
921 	ring->doorbell_index = qprops.doorbell_off;
922 
923 	if (queue_type == AMDGPU_RING_TYPE_GFX)
924 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
925 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
926 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
927 			queue_id);
928 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
929 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
930 			queue_id);
931 	else
932 		BUG();
933 
934 	*out = ring;
935 	return 0;
936 
937 clean_up_ring:
938 	amdgpu_ring_fini(ring);
939 clean_up_memory:
940 	kfree(ring);
941 	amdgpu_mes_unlock(&adev->mes);
942 	return r;
943 }
944 
945 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
946 			    struct amdgpu_ring *ring)
947 {
948 	if (!ring)
949 		return;
950 
951 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
952 	amdgpu_ring_fini(ring);
953 	kfree(ring);
954 }
955 
956 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
957 				   struct amdgpu_mes_ctx_data *ctx_data)
958 {
959 	int r;
960 
961 	r = amdgpu_bo_create_kernel(adev,
962 			    sizeof(struct amdgpu_mes_ctx_meta_data),
963 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
964 			    &ctx_data->meta_data_obj, NULL,
965 			    &ctx_data->meta_data_ptr);
966 	if (!ctx_data->meta_data_obj)
967 		return -ENOMEM;
968 
969 	memset(ctx_data->meta_data_ptr, 0,
970 	       sizeof(struct amdgpu_mes_ctx_meta_data));
971 
972 	return 0;
973 }
974 
975 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
976 {
977 	if (ctx_data->meta_data_obj)
978 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, NULL, NULL);
979 }
980 
981 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
982 				 struct amdgpu_vm *vm,
983 				 struct amdgpu_mes_ctx_data *ctx_data)
984 {
985 	struct amdgpu_bo_va *bo_va;
986 	struct ww_acquire_ctx ticket;
987 	struct list_head list;
988 	struct amdgpu_bo_list_entry pd;
989 	struct ttm_validate_buffer csa_tv;
990 	struct amdgpu_sync sync;
991 	int r;
992 
993 	amdgpu_sync_create(&sync);
994 	INIT_LIST_HEAD(&list);
995 	INIT_LIST_HEAD(&csa_tv.head);
996 
997 	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
998 	csa_tv.num_shared = 1;
999 
1000 	list_add(&csa_tv.head, &list);
1001 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1002 
1003 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1004 	if (r) {
1005 		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1006 		return r;
1007 	}
1008 
1009 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1010 	if (!bo_va) {
1011 		ttm_eu_backoff_reservation(&ticket, &list);
1012 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1013 		return -ENOMEM;
1014 	}
1015 
1016 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1017 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1018 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1019 			     AMDGPU_PTE_EXECUTABLE);
1020 
1021 	if (r) {
1022 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1023 		goto error;
1024 	}
1025 
1026 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1027 	if (r) {
1028 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1029 		goto error;
1030 	}
1031 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1032 
1033 	r = amdgpu_vm_update_pdes(adev, vm, false);
1034 	if (r) {
1035 		DRM_ERROR("failed to update pdes on meta data\n");
1036 		goto error;
1037 	}
1038 	amdgpu_sync_fence(&sync, vm->last_update);
1039 
1040 	amdgpu_sync_wait(&sync, false);
1041 	ttm_eu_backoff_reservation(&ticket, &list);
1042 
1043 	amdgpu_sync_free(&sync);
1044 	ctx_data->meta_data_va = bo_va;
1045 	return 0;
1046 
1047 error:
1048 	amdgpu_vm_bo_del(adev, bo_va);
1049 	ttm_eu_backoff_reservation(&ticket, &list);
1050 	amdgpu_sync_free(&sync);
1051 	return r;
1052 }
1053 
1054 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1055 					  int pasid, int *gang_id,
1056 					  int queue_type, int num_queue,
1057 					  struct amdgpu_ring **added_rings,
1058 					  struct amdgpu_mes_ctx_data *ctx_data)
1059 {
1060 	struct amdgpu_ring *ring;
1061 	struct amdgpu_mes_gang_properties gprops = {0};
1062 	int r, j;
1063 
1064 	/* create a gang for the process */
1065 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1066 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1067 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1068 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1069 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1070 
1071 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1072 	if (r) {
1073 		DRM_ERROR("failed to add gang\n");
1074 		return r;
1075 	}
1076 
1077 	/* create queues for the gang */
1078 	for (j = 0; j < num_queue; j++) {
1079 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1080 					ctx_data, &ring);
1081 		if (r) {
1082 			DRM_ERROR("failed to add ring\n");
1083 			break;
1084 		}
1085 
1086 		DRM_INFO("ring %s was added\n", ring->name);
1087 		added_rings[j] = ring;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1094 {
1095 	struct amdgpu_ring *ring;
1096 	int i, r;
1097 
1098 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1099 		ring = added_rings[i];
1100 		if (!ring)
1101 			continue;
1102 
1103 		r = amdgpu_ring_test_ring(ring);
1104 		if (r) {
1105 			DRM_DEV_ERROR(ring->adev->dev,
1106 				      "ring %s test failed (%d)\n",
1107 				      ring->name, r);
1108 			return r;
1109 		} else
1110 			DRM_INFO("ring %s test pass\n", ring->name);
1111 
1112 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1113 		if (r) {
1114 			DRM_DEV_ERROR(ring->adev->dev,
1115 				      "ring %s ib test failed (%d)\n",
1116 				      ring->name, r);
1117 			return r;
1118 		} else
1119 			DRM_INFO("ring %s ib test pass\n", ring->name);
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1126 {
1127 	struct amdgpu_vm *vm = NULL;
1128 	struct amdgpu_mes_ctx_data ctx_data = {0};
1129 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1130 	int gang_ids[3] = {0};
1131 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1132 				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1133 				 { AMDGPU_RING_TYPE_COMPUTE,
1134 				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1135 				 { AMDGPU_RING_TYPE_SDMA,
1136 				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1137 	int i, r, pasid, k = 0;
1138 
1139 	pasid = amdgpu_pasid_alloc(16);
1140 	if (pasid < 0) {
1141 		dev_warn(adev->dev, "No more PASIDs available!");
1142 		pasid = 0;
1143 	}
1144 
1145 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1146 	if (!vm) {
1147 		r = -ENOMEM;
1148 		goto error_pasid;
1149 	}
1150 
1151 	r = amdgpu_vm_init(adev, vm);
1152 	if (r) {
1153 		DRM_ERROR("failed to initialize vm\n");
1154 		goto error_pasid;
1155 	}
1156 
1157 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1158 	if (r) {
1159 		DRM_ERROR("failed to alloc ctx meta data\n");
1160 		goto error_pasid;
1161 	}
1162 
1163 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1164 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1165 	if (r) {
1166 		DRM_ERROR("failed to map ctx meta data\n");
1167 		goto error_vm;
1168 	}
1169 
1170 	r = amdgpu_mes_create_process(adev, pasid, vm);
1171 	if (r) {
1172 		DRM_ERROR("failed to create MES process\n");
1173 		goto error_vm;
1174 	}
1175 
1176 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1177 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1178 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1179 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1180 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1181 			continue;
1182 
1183 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1184 							   &gang_ids[i],
1185 							   queue_types[i][0],
1186 							   queue_types[i][1],
1187 							   &added_rings[k],
1188 							   &ctx_data);
1189 		if (r)
1190 			goto error_queues;
1191 
1192 		k += queue_types[i][1];
1193 	}
1194 
1195 	/* start ring test and ib test for MES queues */
1196 	amdgpu_mes_test_queues(added_rings);
1197 
1198 error_queues:
1199 	/* remove all queues */
1200 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1201 		if (!added_rings[i])
1202 			continue;
1203 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1204 	}
1205 
1206 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1207 		if (!gang_ids[i])
1208 			continue;
1209 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1210 	}
1211 
1212 	amdgpu_mes_destroy_process(adev, pasid);
1213 
1214 error_vm:
1215 	BUG_ON(amdgpu_bo_reserve(ctx_data.meta_data_obj, true));
1216 	amdgpu_vm_bo_del(adev, ctx_data.meta_data_va);
1217 	amdgpu_bo_unreserve(ctx_data.meta_data_obj);
1218 	amdgpu_vm_fini(adev, vm);
1219 
1220 error_pasid:
1221 	if (pasid)
1222 		amdgpu_pasid_free(pasid);
1223 
1224 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1225 	kfree(vm);
1226 	return 0;
1227 }
1228