1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_mes.h"
25 #include "amdgpu.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
28 
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
31 
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33 {
34 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 		       PAGE_SIZE);
37 }
38 
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 				      unsigned int *doorbell_index)
41 {
42 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 			       adev->mes.max_doorbell_slices,
44 			       GFP_KERNEL);
45 	if (r > 0)
46 		*doorbell_index = r;
47 
48 	return r;
49 }
50 
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 				      unsigned int doorbell_index)
53 {
54 	if (doorbell_index)
55 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56 }
57 
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 					struct amdgpu_device *adev,
60 					uint32_t doorbell_index,
61 					unsigned int doorbell_id)
62 {
63 	return ((doorbell_index *
64 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 		doorbell_id * 2);
66 }
67 
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 					 struct amdgpu_mes_process *process,
70 					 int ip_type, uint64_t *doorbell_index)
71 {
72 	unsigned int offset, found;
73 
74 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 		offset = adev->doorbell_index.sdma_engine[0];
76 		found = find_next_zero_bit(process->doorbell_bitmap,
77 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 					   offset);
79 	} else {
80 		found = find_first_zero_bit(process->doorbell_bitmap,
81 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 	}
83 
84 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 		DRM_WARN("No doorbell available\n");
86 		return -ENOSPC;
87 	}
88 
89 	set_bit(found, process->doorbell_bitmap);
90 
91 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 				process->doorbell_index, found);
93 
94 	return 0;
95 }
96 
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 					   struct amdgpu_mes_process *process,
99 					   uint32_t doorbell_index)
100 {
101 	unsigned int old, doorbell_id;
102 
103 	doorbell_id = doorbell_index -
104 		(process->doorbell_index *
105 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 	doorbell_id /= 2;
107 
108 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 	WARN_ON(!old);
110 }
111 
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113 {
114 	size_t doorbell_start_offset;
115 	size_t doorbell_aperture_size;
116 	size_t doorbell_process_limit;
117 
118 	doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32);
119 	doorbell_start_offset =
120 		roundup(doorbell_start_offset,
121 			amdgpu_mes_doorbell_process_slice(adev));
122 
123 	doorbell_aperture_size = adev->doorbell.size;
124 	doorbell_aperture_size =
125 			rounddown(doorbell_aperture_size,
126 				  amdgpu_mes_doorbell_process_slice(adev));
127 
128 	if (doorbell_aperture_size > doorbell_start_offset)
129 		doorbell_process_limit =
130 			(doorbell_aperture_size - doorbell_start_offset) /
131 			amdgpu_mes_doorbell_process_slice(adev);
132 	else
133 		return -ENOSPC;
134 
135 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
136 	adev->mes.max_doorbell_slices = doorbell_process_limit;
137 
138 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
139 	return 0;
140 }
141 
142 int amdgpu_mes_init(struct amdgpu_device *adev)
143 {
144 	int i, r;
145 
146 	adev->mes.adev = adev;
147 
148 	idr_init(&adev->mes.pasid_idr);
149 	idr_init(&adev->mes.gang_id_idr);
150 	idr_init(&adev->mes.queue_id_idr);
151 	ida_init(&adev->mes.doorbell_ida);
152 	spin_lock_init(&adev->mes.queue_id_lock);
153 	spin_lock_init(&adev->mes.ring_lock);
154 	mutex_init(&adev->mes.mutex_hidden);
155 
156 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
157 	adev->mes.vmid_mask_mmhub = 0xffffff00;
158 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
159 
160 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
161 		/* use only 1st MEC pipes */
162 		if (i >= 4)
163 			continue;
164 		adev->mes.compute_hqd_mask[i] = 0xc;
165 	}
166 
167 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
168 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
169 
170 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
171 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
172 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
173 		else
174 			adev->mes.sdma_hqd_mask[i] = 0xfc;
175 	}
176 
177 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
178 		adev->mes.agreegated_doorbells[i] = 0xffffffff;
179 
180 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
181 	if (r) {
182 		dev_err(adev->dev,
183 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
184 		goto error_ids;
185 	}
186 	adev->mes.sch_ctx_gpu_addr =
187 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
188 	adev->mes.sch_ctx_ptr =
189 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
190 
191 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
192 	if (r) {
193 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
194 		dev_err(adev->dev,
195 			"(%d) query_status_fence_offs wb alloc failed\n", r);
196 		goto error_ids;
197 	}
198 	adev->mes.query_status_fence_gpu_addr =
199 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
200 	adev->mes.query_status_fence_ptr =
201 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
202 
203 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
204 	if (r) {
205 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
206 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
207 		dev_err(adev->dev,
208 			"(%d) read_val_offs alloc failed\n", r);
209 		goto error_ids;
210 	}
211 	adev->mes.read_val_gpu_addr =
212 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
213 	adev->mes.read_val_ptr =
214 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
215 
216 	r = amdgpu_mes_doorbell_init(adev);
217 	if (r)
218 		goto error;
219 
220 	return 0;
221 
222 error:
223 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
224 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
225 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
226 error_ids:
227 	idr_destroy(&adev->mes.pasid_idr);
228 	idr_destroy(&adev->mes.gang_id_idr);
229 	idr_destroy(&adev->mes.queue_id_idr);
230 	ida_destroy(&adev->mes.doorbell_ida);
231 	mutex_destroy(&adev->mes.mutex_hidden);
232 	return r;
233 }
234 
235 void amdgpu_mes_fini(struct amdgpu_device *adev)
236 {
237 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
238 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
239 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
240 
241 	idr_destroy(&adev->mes.pasid_idr);
242 	idr_destroy(&adev->mes.gang_id_idr);
243 	idr_destroy(&adev->mes.queue_id_idr);
244 	ida_destroy(&adev->mes.doorbell_ida);
245 	mutex_destroy(&adev->mes.mutex_hidden);
246 }
247 
248 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
249 {
250 	amdgpu_bo_free_kernel(&q->mqd_obj,
251 			      &q->mqd_gpu_addr,
252 			      &q->mqd_cpu_ptr);
253 }
254 
255 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
256 			      struct amdgpu_vm *vm)
257 {
258 	struct amdgpu_mes_process *process;
259 	int r;
260 
261 	/* allocate the mes process buffer */
262 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
263 	if (!process) {
264 		DRM_ERROR("no more memory to create mes process\n");
265 		return -ENOMEM;
266 	}
267 
268 	process->doorbell_bitmap =
269 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
270 				     BITS_PER_BYTE), GFP_KERNEL);
271 	if (!process->doorbell_bitmap) {
272 		DRM_ERROR("failed to allocate doorbell bitmap\n");
273 		kfree(process);
274 		return -ENOMEM;
275 	}
276 
277 	/* allocate the process context bo and map it */
278 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
279 				    AMDGPU_GEM_DOMAIN_GTT,
280 				    &process->proc_ctx_bo,
281 				    &process->proc_ctx_gpu_addr,
282 				    &process->proc_ctx_cpu_ptr);
283 	if (r) {
284 		DRM_ERROR("failed to allocate process context bo\n");
285 		goto clean_up_memory;
286 	}
287 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
288 
289 	/*
290 	 * Avoid taking any other locks under MES lock to avoid circular
291 	 * lock dependencies.
292 	 */
293 	amdgpu_mes_lock(&adev->mes);
294 
295 	/* add the mes process to idr list */
296 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
297 		      GFP_KERNEL);
298 	if (r < 0) {
299 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
300 		goto clean_up_ctx;
301 	}
302 
303 	/* allocate the starting doorbell index of the process */
304 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
305 	if (r < 0) {
306 		DRM_ERROR("failed to allocate doorbell for process\n");
307 		goto clean_up_pasid;
308 	}
309 
310 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
311 
312 	INIT_LIST_HEAD(&process->gang_list);
313 	process->vm = vm;
314 	process->pasid = pasid;
315 	process->process_quantum = adev->mes.default_process_quantum;
316 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
317 
318 	amdgpu_mes_unlock(&adev->mes);
319 	return 0;
320 
321 clean_up_pasid:
322 	idr_remove(&adev->mes.pasid_idr, pasid);
323 	amdgpu_mes_unlock(&adev->mes);
324 clean_up_ctx:
325 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
326 			      &process->proc_ctx_gpu_addr,
327 			      &process->proc_ctx_cpu_ptr);
328 clean_up_memory:
329 	kfree(process->doorbell_bitmap);
330 	kfree(process);
331 	return r;
332 }
333 
334 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
335 {
336 	struct amdgpu_mes_process *process;
337 	struct amdgpu_mes_gang *gang, *tmp1;
338 	struct amdgpu_mes_queue *queue, *tmp2;
339 	struct mes_remove_queue_input queue_input;
340 	unsigned long flags;
341 	int r;
342 
343 	/*
344 	 * Avoid taking any other locks under MES lock to avoid circular
345 	 * lock dependencies.
346 	 */
347 	amdgpu_mes_lock(&adev->mes);
348 
349 	process = idr_find(&adev->mes.pasid_idr, pasid);
350 	if (!process) {
351 		DRM_WARN("pasid %d doesn't exist\n", pasid);
352 		amdgpu_mes_unlock(&adev->mes);
353 		return;
354 	}
355 
356 	/* Remove all queues from hardware */
357 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
358 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
359 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
360 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
361 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
362 
363 			queue_input.doorbell_offset = queue->doorbell_off;
364 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
365 
366 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
367 							     &queue_input);
368 			if (r)
369 				DRM_WARN("failed to remove hardware queue\n");
370 		}
371 
372 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
373 	}
374 
375 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
376 	idr_remove(&adev->mes.pasid_idr, pasid);
377 	amdgpu_mes_unlock(&adev->mes);
378 
379 	/* free all memory allocated by the process */
380 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
381 		/* free all queues in the gang */
382 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
383 			amdgpu_mes_queue_free_mqd(queue);
384 			list_del(&queue->list);
385 			kfree(queue);
386 		}
387 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
388 				      &gang->gang_ctx_gpu_addr,
389 				      &gang->gang_ctx_cpu_ptr);
390 		list_del(&gang->list);
391 		kfree(gang);
392 
393 	}
394 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
395 			      &process->proc_ctx_gpu_addr,
396 			      &process->proc_ctx_cpu_ptr);
397 	kfree(process->doorbell_bitmap);
398 	kfree(process);
399 }
400 
401 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
402 			struct amdgpu_mes_gang_properties *gprops,
403 			int *gang_id)
404 {
405 	struct amdgpu_mes_process *process;
406 	struct amdgpu_mes_gang *gang;
407 	int r;
408 
409 	/* allocate the mes gang buffer */
410 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
411 	if (!gang) {
412 		return -ENOMEM;
413 	}
414 
415 	/* allocate the gang context bo and map it to cpu space */
416 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
417 				    AMDGPU_GEM_DOMAIN_GTT,
418 				    &gang->gang_ctx_bo,
419 				    &gang->gang_ctx_gpu_addr,
420 				    &gang->gang_ctx_cpu_ptr);
421 	if (r) {
422 		DRM_ERROR("failed to allocate process context bo\n");
423 		goto clean_up_mem;
424 	}
425 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
426 
427 	/*
428 	 * Avoid taking any other locks under MES lock to avoid circular
429 	 * lock dependencies.
430 	 */
431 	amdgpu_mes_lock(&adev->mes);
432 
433 	process = idr_find(&adev->mes.pasid_idr, pasid);
434 	if (!process) {
435 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
436 		r = -EINVAL;
437 		goto clean_up_ctx;
438 	}
439 
440 	/* add the mes gang to idr list */
441 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
442 		      GFP_KERNEL);
443 	if (r < 0) {
444 		DRM_ERROR("failed to allocate idr for gang\n");
445 		goto clean_up_ctx;
446 	}
447 
448 	gang->gang_id = r;
449 	*gang_id = r;
450 
451 	INIT_LIST_HEAD(&gang->queue_list);
452 	gang->process = process;
453 	gang->priority = gprops->priority;
454 	gang->gang_quantum = gprops->gang_quantum ?
455 		gprops->gang_quantum : adev->mes.default_gang_quantum;
456 	gang->global_priority_level = gprops->global_priority_level;
457 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
458 	list_add_tail(&gang->list, &process->gang_list);
459 
460 	amdgpu_mes_unlock(&adev->mes);
461 	return 0;
462 
463 clean_up_ctx:
464 	amdgpu_mes_unlock(&adev->mes);
465 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
466 			      &gang->gang_ctx_gpu_addr,
467 			      &gang->gang_ctx_cpu_ptr);
468 clean_up_mem:
469 	kfree(gang);
470 	return r;
471 }
472 
473 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
474 {
475 	struct amdgpu_mes_gang *gang;
476 
477 	/*
478 	 * Avoid taking any other locks under MES lock to avoid circular
479 	 * lock dependencies.
480 	 */
481 	amdgpu_mes_lock(&adev->mes);
482 
483 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
484 	if (!gang) {
485 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
486 		amdgpu_mes_unlock(&adev->mes);
487 		return -EINVAL;
488 	}
489 
490 	if (!list_empty(&gang->queue_list)) {
491 		DRM_ERROR("queue list is not empty\n");
492 		amdgpu_mes_unlock(&adev->mes);
493 		return -EBUSY;
494 	}
495 
496 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
497 	list_del(&gang->list);
498 	amdgpu_mes_unlock(&adev->mes);
499 
500 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
501 			      &gang->gang_ctx_gpu_addr,
502 			      &gang->gang_ctx_cpu_ptr);
503 
504 	kfree(gang);
505 
506 	return 0;
507 }
508 
509 int amdgpu_mes_suspend(struct amdgpu_device *adev)
510 {
511 	struct idr *idp;
512 	struct amdgpu_mes_process *process;
513 	struct amdgpu_mes_gang *gang;
514 	struct mes_suspend_gang_input input;
515 	int r, pasid;
516 
517 	/*
518 	 * Avoid taking any other locks under MES lock to avoid circular
519 	 * lock dependencies.
520 	 */
521 	amdgpu_mes_lock(&adev->mes);
522 
523 	idp = &adev->mes.pasid_idr;
524 
525 	idr_for_each_entry(idp, process, pasid) {
526 		list_for_each_entry(gang, &process->gang_list, list) {
527 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
528 			if (r)
529 				DRM_ERROR("failed to suspend pasid %d gangid %d",
530 					 pasid, gang->gang_id);
531 		}
532 	}
533 
534 	amdgpu_mes_unlock(&adev->mes);
535 	return 0;
536 }
537 
538 int amdgpu_mes_resume(struct amdgpu_device *adev)
539 {
540 	struct idr *idp;
541 	struct amdgpu_mes_process *process;
542 	struct amdgpu_mes_gang *gang;
543 	struct mes_resume_gang_input input;
544 	int r, pasid;
545 
546 	/*
547 	 * Avoid taking any other locks under MES lock to avoid circular
548 	 * lock dependencies.
549 	 */
550 	amdgpu_mes_lock(&adev->mes);
551 
552 	idp = &adev->mes.pasid_idr;
553 
554 	idr_for_each_entry(idp, process, pasid) {
555 		list_for_each_entry(gang, &process->gang_list, list) {
556 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
557 			if (r)
558 				DRM_ERROR("failed to resume pasid %d gangid %d",
559 					 pasid, gang->gang_id);
560 		}
561 	}
562 
563 	amdgpu_mes_unlock(&adev->mes);
564 	return 0;
565 }
566 
567 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
568 				     struct amdgpu_mes_queue *q,
569 				     struct amdgpu_mes_queue_properties *p)
570 {
571 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
572 	u32 mqd_size = mqd_mgr->mqd_size;
573 	int r;
574 
575 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
576 				    AMDGPU_GEM_DOMAIN_GTT,
577 				    &q->mqd_obj,
578 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
579 	if (r) {
580 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
581 		return r;
582 	}
583 	memset(q->mqd_cpu_ptr, 0, mqd_size);
584 
585 	r = amdgpu_bo_reserve(q->mqd_obj, false);
586 	if (unlikely(r != 0))
587 		goto clean_up;
588 
589 	return 0;
590 
591 clean_up:
592 	amdgpu_bo_free_kernel(&q->mqd_obj,
593 			      &q->mqd_gpu_addr,
594 			      &q->mqd_cpu_ptr);
595 	return r;
596 }
597 
598 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
599 				     struct amdgpu_mes_queue *q,
600 				     struct amdgpu_mes_queue_properties *p)
601 {
602 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
603 	struct amdgpu_mqd_prop mqd_prop = {0};
604 
605 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
606 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
607 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
608 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
609 	mqd_prop.queue_size = p->queue_size;
610 	mqd_prop.use_doorbell = true;
611 	mqd_prop.doorbell_index = p->doorbell_off;
612 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
613 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
614 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
615 	mqd_prop.hqd_active = false;
616 
617 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
618 
619 	amdgpu_bo_unreserve(q->mqd_obj);
620 }
621 
622 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
623 			    struct amdgpu_mes_queue_properties *qprops,
624 			    int *queue_id)
625 {
626 	struct amdgpu_mes_queue *queue;
627 	struct amdgpu_mes_gang *gang;
628 	struct mes_add_queue_input queue_input;
629 	unsigned long flags;
630 	int r;
631 
632 	/* allocate the mes queue buffer */
633 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
634 	if (!queue) {
635 		DRM_ERROR("Failed to allocate memory for queue\n");
636 		return -ENOMEM;
637 	}
638 
639 	/* Allocate the queue mqd */
640 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
641 	if (r)
642 		goto clean_up_memory;
643 
644 	/*
645 	 * Avoid taking any other locks under MES lock to avoid circular
646 	 * lock dependencies.
647 	 */
648 	amdgpu_mes_lock(&adev->mes);
649 
650 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
651 	if (!gang) {
652 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
653 		r = -EINVAL;
654 		goto clean_up_mqd;
655 	}
656 
657 	/* add the mes gang to idr list */
658 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
659 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
660 		      GFP_ATOMIC);
661 	if (r < 0) {
662 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
663 		goto clean_up_mqd;
664 	}
665 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
666 	*queue_id = queue->queue_id = r;
667 
668 	/* allocate a doorbell index for the queue */
669 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
670 					  qprops->queue_type,
671 					  &qprops->doorbell_off);
672 	if (r)
673 		goto clean_up_queue_id;
674 
675 	/* initialize the queue mqd */
676 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
677 
678 	/* add hw queue to mes */
679 	queue_input.process_id = gang->process->pasid;
680 
681 	queue_input.page_table_base_addr =
682 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
683 		adev->gmc.vram_start;
684 
685 	queue_input.process_va_start = 0;
686 	queue_input.process_va_end =
687 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
688 	queue_input.process_quantum = gang->process->process_quantum;
689 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
690 	queue_input.gang_quantum = gang->gang_quantum;
691 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
692 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
693 	queue_input.gang_global_priority_level = gang->global_priority_level;
694 	queue_input.doorbell_offset = qprops->doorbell_off;
695 	queue_input.mqd_addr = queue->mqd_gpu_addr;
696 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
697 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
698 	queue_input.queue_type = qprops->queue_type;
699 	queue_input.paging = qprops->paging;
700 	queue_input.is_kfd_process = 0;
701 
702 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
703 	if (r) {
704 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
705 			  qprops->doorbell_off);
706 		goto clean_up_doorbell;
707 	}
708 
709 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
710 		  "queue type=%d, doorbell=0x%llx\n",
711 		  gang->process->pasid, gang_id, qprops->queue_type,
712 		  qprops->doorbell_off);
713 
714 	queue->ring = qprops->ring;
715 	queue->doorbell_off = qprops->doorbell_off;
716 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
717 	queue->queue_type = qprops->queue_type;
718 	queue->paging = qprops->paging;
719 	queue->gang = gang;
720 	list_add_tail(&queue->list, &gang->queue_list);
721 
722 	amdgpu_mes_unlock(&adev->mes);
723 	return 0;
724 
725 clean_up_doorbell:
726 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
727 				       qprops->doorbell_off);
728 clean_up_queue_id:
729 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
730 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
731 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
732 clean_up_mqd:
733 	amdgpu_mes_unlock(&adev->mes);
734 	amdgpu_mes_queue_free_mqd(queue);
735 clean_up_memory:
736 	kfree(queue);
737 	return r;
738 }
739 
740 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
741 {
742 	unsigned long flags;
743 	struct amdgpu_mes_queue *queue;
744 	struct amdgpu_mes_gang *gang;
745 	struct mes_remove_queue_input queue_input;
746 	int r;
747 
748 	/*
749 	 * Avoid taking any other locks under MES lock to avoid circular
750 	 * lock dependencies.
751 	 */
752 	amdgpu_mes_lock(&adev->mes);
753 
754 	/* remove the mes gang from idr list */
755 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
756 
757 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
758 	if (!queue) {
759 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
760 		amdgpu_mes_unlock(&adev->mes);
761 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
762 		return -EINVAL;
763 	}
764 
765 	idr_remove(&adev->mes.queue_id_idr, queue_id);
766 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
767 
768 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
769 		  queue->doorbell_off);
770 
771 	gang = queue->gang;
772 	queue_input.doorbell_offset = queue->doorbell_off;
773 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
774 
775 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
776 	if (r)
777 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
778 			  queue_id);
779 
780 	list_del(&queue->list);
781 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
782 				       queue->doorbell_off);
783 	amdgpu_mes_unlock(&adev->mes);
784 
785 	amdgpu_mes_queue_free_mqd(queue);
786 	kfree(queue);
787 	return 0;
788 }
789 
790 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
791 				  struct amdgpu_ring *ring,
792 				  enum amdgpu_unmap_queues_action action,
793 				  u64 gpu_addr, u64 seq)
794 {
795 	struct mes_unmap_legacy_queue_input queue_input;
796 	int r;
797 
798 	queue_input.action = action;
799 	queue_input.queue_type = ring->funcs->type;
800 	queue_input.doorbell_offset = ring->doorbell_index;
801 	queue_input.pipe_id = ring->pipe;
802 	queue_input.queue_id = ring->queue;
803 	queue_input.trail_fence_addr = gpu_addr;
804 	queue_input.trail_fence_data = seq;
805 
806 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
807 	if (r)
808 		DRM_ERROR("failed to unmap legacy queue\n");
809 
810 	return r;
811 }
812 
813 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
814 {
815 	struct mes_misc_op_input op_input;
816 	int r, val = 0;
817 
818 	op_input.op = MES_MISC_OP_READ_REG;
819 	op_input.read_reg.reg_offset = reg;
820 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
821 
822 	if (!adev->mes.funcs->misc_op) {
823 		DRM_ERROR("mes rreg is not supported!\n");
824 		goto error;
825 	}
826 
827 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
828 	if (r)
829 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
830 	else
831 		val = *(adev->mes.read_val_ptr);
832 
833 error:
834 	return val;
835 }
836 
837 int amdgpu_mes_wreg(struct amdgpu_device *adev,
838 		    uint32_t reg, uint32_t val)
839 {
840 	struct mes_misc_op_input op_input;
841 	int r;
842 
843 	op_input.op = MES_MISC_OP_WRITE_REG;
844 	op_input.write_reg.reg_offset = reg;
845 	op_input.write_reg.reg_value = val;
846 
847 	if (!adev->mes.funcs->misc_op) {
848 		DRM_ERROR("mes wreg is not supported!\n");
849 		r = -EINVAL;
850 		goto error;
851 	}
852 
853 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
854 	if (r)
855 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
856 
857 error:
858 	return r;
859 }
860 
861 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
862 				  uint32_t reg0, uint32_t reg1,
863 				  uint32_t ref, uint32_t mask)
864 {
865 	struct mes_misc_op_input op_input;
866 	int r;
867 
868 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
869 	op_input.wrm_reg.reg0 = reg0;
870 	op_input.wrm_reg.reg1 = reg1;
871 	op_input.wrm_reg.ref = ref;
872 	op_input.wrm_reg.mask = mask;
873 
874 	if (!adev->mes.funcs->misc_op) {
875 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
876 		r = -EINVAL;
877 		goto error;
878 	}
879 
880 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
881 	if (r)
882 		DRM_ERROR("failed to reg_write_reg_wait\n");
883 
884 error:
885 	return r;
886 }
887 
888 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
889 			uint32_t val, uint32_t mask)
890 {
891 	struct mes_misc_op_input op_input;
892 	int r;
893 
894 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
895 	op_input.wrm_reg.reg0 = reg;
896 	op_input.wrm_reg.ref = val;
897 	op_input.wrm_reg.mask = mask;
898 
899 	if (!adev->mes.funcs->misc_op) {
900 		DRM_ERROR("mes reg wait is not supported!\n");
901 		r = -EINVAL;
902 		goto error;
903 	}
904 
905 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
906 	if (r)
907 		DRM_ERROR("failed to reg_write_reg_wait\n");
908 
909 error:
910 	return r;
911 }
912 
913 static void
914 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
915 			       struct amdgpu_ring *ring,
916 			       struct amdgpu_mes_queue_properties *props)
917 {
918 	props->queue_type = ring->funcs->type;
919 	props->hqd_base_gpu_addr = ring->gpu_addr;
920 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
921 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
922 	props->wptr_mc_addr =
923 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
924 	props->queue_size = ring->ring_size;
925 	props->eop_gpu_addr = ring->eop_gpu_addr;
926 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
927 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
928 	props->paging = false;
929 	props->ring = ring;
930 }
931 
932 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
933 do {									\
934        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
935 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
936 				_eng[ring->idx].slots[id_offs]);        \
937        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
938 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
939 				_eng[ring->idx].ring);                  \
940        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
941 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
942 				_eng[ring->idx].ib);                    \
943        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
944 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
945 				_eng[ring->idx].padding);               \
946 } while(0)
947 
948 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
949 {
950 	switch (ring->funcs->type) {
951 	case AMDGPU_RING_TYPE_GFX:
952 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
953 		break;
954 	case AMDGPU_RING_TYPE_COMPUTE:
955 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
956 		break;
957 	case AMDGPU_RING_TYPE_SDMA:
958 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
959 		break;
960 	default:
961 		break;
962 	}
963 
964 	WARN_ON(1);
965 	return -EINVAL;
966 }
967 
968 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
969 			int queue_type, int idx,
970 			struct amdgpu_mes_ctx_data *ctx_data,
971 			struct amdgpu_ring **out)
972 {
973 	struct amdgpu_ring *ring;
974 	struct amdgpu_mes_gang *gang;
975 	struct amdgpu_mes_queue_properties qprops = {0};
976 	int r, queue_id, pasid;
977 
978 	/*
979 	 * Avoid taking any other locks under MES lock to avoid circular
980 	 * lock dependencies.
981 	 */
982 	amdgpu_mes_lock(&adev->mes);
983 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
984 	if (!gang) {
985 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
986 		amdgpu_mes_unlock(&adev->mes);
987 		return -EINVAL;
988 	}
989 	pasid = gang->process->pasid;
990 
991 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
992 	if (!ring) {
993 		amdgpu_mes_unlock(&adev->mes);
994 		return -ENOMEM;
995 	}
996 
997 	ring->ring_obj = NULL;
998 	ring->use_doorbell = true;
999 	ring->is_mes_queue = true;
1000 	ring->mes_ctx = ctx_data;
1001 	ring->idx = idx;
1002 	ring->no_scheduler = true;
1003 
1004 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1005 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1006 				      compute[ring->idx].mec_hpd);
1007 		ring->eop_gpu_addr =
1008 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1009 	}
1010 
1011 	switch (queue_type) {
1012 	case AMDGPU_RING_TYPE_GFX:
1013 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1014 		break;
1015 	case AMDGPU_RING_TYPE_COMPUTE:
1016 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1017 		break;
1018 	case AMDGPU_RING_TYPE_SDMA:
1019 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1020 		break;
1021 	default:
1022 		BUG();
1023 	}
1024 
1025 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1026 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1027 	if (r)
1028 		goto clean_up_memory;
1029 
1030 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1031 
1032 	dma_fence_wait(gang->process->vm->last_update, false);
1033 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1034 	amdgpu_mes_unlock(&adev->mes);
1035 
1036 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1037 	if (r)
1038 		goto clean_up_ring;
1039 
1040 	ring->hw_queue_id = queue_id;
1041 	ring->doorbell_index = qprops.doorbell_off;
1042 
1043 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1044 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1045 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1046 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1047 			queue_id);
1048 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1049 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1050 			queue_id);
1051 	else
1052 		BUG();
1053 
1054 	*out = ring;
1055 	return 0;
1056 
1057 clean_up_ring:
1058 	amdgpu_ring_fini(ring);
1059 clean_up_memory:
1060 	kfree(ring);
1061 	amdgpu_mes_unlock(&adev->mes);
1062 	return r;
1063 }
1064 
1065 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1066 			    struct amdgpu_ring *ring)
1067 {
1068 	if (!ring)
1069 		return;
1070 
1071 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1072 	amdgpu_ring_fini(ring);
1073 	kfree(ring);
1074 }
1075 
1076 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1077 				   struct amdgpu_mes_ctx_data *ctx_data)
1078 {
1079 	int r;
1080 
1081 	r = amdgpu_bo_create_kernel(adev,
1082 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1083 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1084 			    &ctx_data->meta_data_obj,
1085 			    &ctx_data->meta_data_mc_addr,
1086 			    &ctx_data->meta_data_ptr);
1087 	if (!ctx_data->meta_data_obj)
1088 		return -ENOMEM;
1089 
1090 	memset(ctx_data->meta_data_ptr, 0,
1091 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1092 
1093 	return 0;
1094 }
1095 
1096 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1097 {
1098 	if (ctx_data->meta_data_obj)
1099 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1100 				      &ctx_data->meta_data_mc_addr,
1101 				      &ctx_data->meta_data_ptr);
1102 }
1103 
1104 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1105 				 struct amdgpu_vm *vm,
1106 				 struct amdgpu_mes_ctx_data *ctx_data)
1107 {
1108 	struct amdgpu_bo_va *bo_va;
1109 	struct ww_acquire_ctx ticket;
1110 	struct list_head list;
1111 	struct amdgpu_bo_list_entry pd;
1112 	struct ttm_validate_buffer csa_tv;
1113 	struct amdgpu_sync sync;
1114 	int r;
1115 
1116 	amdgpu_sync_create(&sync);
1117 	INIT_LIST_HEAD(&list);
1118 	INIT_LIST_HEAD(&csa_tv.head);
1119 
1120 	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1121 	csa_tv.num_shared = 1;
1122 
1123 	list_add(&csa_tv.head, &list);
1124 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1125 
1126 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1127 	if (r) {
1128 		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1129 		return r;
1130 	}
1131 
1132 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1133 	if (!bo_va) {
1134 		ttm_eu_backoff_reservation(&ticket, &list);
1135 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1136 		return -ENOMEM;
1137 	}
1138 
1139 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1140 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1141 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1142 			     AMDGPU_PTE_EXECUTABLE);
1143 
1144 	if (r) {
1145 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1146 		goto error;
1147 	}
1148 
1149 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1150 	if (r) {
1151 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1152 		goto error;
1153 	}
1154 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1155 
1156 	r = amdgpu_vm_update_pdes(adev, vm, false);
1157 	if (r) {
1158 		DRM_ERROR("failed to update pdes on meta data\n");
1159 		goto error;
1160 	}
1161 	amdgpu_sync_fence(&sync, vm->last_update);
1162 
1163 	amdgpu_sync_wait(&sync, false);
1164 	ttm_eu_backoff_reservation(&ticket, &list);
1165 
1166 	amdgpu_sync_free(&sync);
1167 	ctx_data->meta_data_va = bo_va;
1168 	return 0;
1169 
1170 error:
1171 	amdgpu_vm_bo_del(adev, bo_va);
1172 	ttm_eu_backoff_reservation(&ticket, &list);
1173 	amdgpu_sync_free(&sync);
1174 	return r;
1175 }
1176 
1177 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1178 				   struct amdgpu_mes_ctx_data *ctx_data)
1179 {
1180 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1181 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1182 	struct amdgpu_vm *vm = bo_va->base.vm;
1183 	struct amdgpu_bo_list_entry vm_pd;
1184 	struct list_head list, duplicates;
1185 	struct dma_fence *fence = NULL;
1186 	struct ttm_validate_buffer tv;
1187 	struct ww_acquire_ctx ticket;
1188 	long r = 0;
1189 
1190 	INIT_LIST_HEAD(&list);
1191 	INIT_LIST_HEAD(&duplicates);
1192 
1193 	tv.bo = &bo->tbo;
1194 	tv.num_shared = 2;
1195 	list_add(&tv.head, &list);
1196 
1197 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1198 
1199 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1200 	if (r) {
1201 		dev_err(adev->dev, "leaking bo va because "
1202 			"we fail to reserve bo (%ld)\n", r);
1203 		return r;
1204 	}
1205 
1206 	amdgpu_vm_bo_del(adev, bo_va);
1207 	if (!amdgpu_vm_ready(vm))
1208 		goto out_unlock;
1209 
1210 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
1211 	if (r)
1212 		goto out_unlock;
1213 	if (fence) {
1214 		amdgpu_bo_fence(bo, fence, true);
1215 		fence = NULL;
1216 	}
1217 
1218 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1219 	if (r || !fence)
1220 		goto out_unlock;
1221 
1222 	dma_fence_wait(fence, false);
1223 	amdgpu_bo_fence(bo, fence, true);
1224 	dma_fence_put(fence);
1225 
1226 out_unlock:
1227 	if (unlikely(r < 0))
1228 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1229 	ttm_eu_backoff_reservation(&ticket, &list);
1230 
1231 	return r;
1232 }
1233 
1234 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1235 					  int pasid, int *gang_id,
1236 					  int queue_type, int num_queue,
1237 					  struct amdgpu_ring **added_rings,
1238 					  struct amdgpu_mes_ctx_data *ctx_data)
1239 {
1240 	struct amdgpu_ring *ring;
1241 	struct amdgpu_mes_gang_properties gprops = {0};
1242 	int r, j;
1243 
1244 	/* create a gang for the process */
1245 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1246 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1247 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1248 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1249 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1250 
1251 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1252 	if (r) {
1253 		DRM_ERROR("failed to add gang\n");
1254 		return r;
1255 	}
1256 
1257 	/* create queues for the gang */
1258 	for (j = 0; j < num_queue; j++) {
1259 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1260 					ctx_data, &ring);
1261 		if (r) {
1262 			DRM_ERROR("failed to add ring\n");
1263 			break;
1264 		}
1265 
1266 		DRM_INFO("ring %s was added\n", ring->name);
1267 		added_rings[j] = ring;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1274 {
1275 	struct amdgpu_ring *ring;
1276 	int i, r;
1277 
1278 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1279 		ring = added_rings[i];
1280 		if (!ring)
1281 			continue;
1282 
1283 		r = amdgpu_ring_test_ring(ring);
1284 		if (r) {
1285 			DRM_DEV_ERROR(ring->adev->dev,
1286 				      "ring %s test failed (%d)\n",
1287 				      ring->name, r);
1288 			return r;
1289 		} else
1290 			DRM_INFO("ring %s test pass\n", ring->name);
1291 
1292 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1293 		if (r) {
1294 			DRM_DEV_ERROR(ring->adev->dev,
1295 				      "ring %s ib test failed (%d)\n",
1296 				      ring->name, r);
1297 			return r;
1298 		} else
1299 			DRM_INFO("ring %s ib test pass\n", ring->name);
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1306 {
1307 	struct amdgpu_vm *vm = NULL;
1308 	struct amdgpu_mes_ctx_data ctx_data = {0};
1309 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1310 	int gang_ids[3] = {0};
1311 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1312 				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1313 				 { AMDGPU_RING_TYPE_COMPUTE,
1314 				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1315 				 { AMDGPU_RING_TYPE_SDMA,
1316 				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1317 	int i, r, pasid, k = 0;
1318 
1319 	pasid = amdgpu_pasid_alloc(16);
1320 	if (pasid < 0) {
1321 		dev_warn(adev->dev, "No more PASIDs available!");
1322 		pasid = 0;
1323 	}
1324 
1325 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1326 	if (!vm) {
1327 		r = -ENOMEM;
1328 		goto error_pasid;
1329 	}
1330 
1331 	r = amdgpu_vm_init(adev, vm);
1332 	if (r) {
1333 		DRM_ERROR("failed to initialize vm\n");
1334 		goto error_pasid;
1335 	}
1336 
1337 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1338 	if (r) {
1339 		DRM_ERROR("failed to alloc ctx meta data\n");
1340 		goto error_fini;
1341 	}
1342 
1343 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1344 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1345 	if (r) {
1346 		DRM_ERROR("failed to map ctx meta data\n");
1347 		goto error_vm;
1348 	}
1349 
1350 	r = amdgpu_mes_create_process(adev, pasid, vm);
1351 	if (r) {
1352 		DRM_ERROR("failed to create MES process\n");
1353 		goto error_vm;
1354 	}
1355 
1356 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1357 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1358 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1359 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1360 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1361 			continue;
1362 
1363 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1364 							   &gang_ids[i],
1365 							   queue_types[i][0],
1366 							   queue_types[i][1],
1367 							   &added_rings[k],
1368 							   &ctx_data);
1369 		if (r)
1370 			goto error_queues;
1371 
1372 		k += queue_types[i][1];
1373 	}
1374 
1375 	/* start ring test and ib test for MES queues */
1376 	amdgpu_mes_test_queues(added_rings);
1377 
1378 error_queues:
1379 	/* remove all queues */
1380 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1381 		if (!added_rings[i])
1382 			continue;
1383 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1384 	}
1385 
1386 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1387 		if (!gang_ids[i])
1388 			continue;
1389 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1390 	}
1391 
1392 	amdgpu_mes_destroy_process(adev, pasid);
1393 
1394 error_vm:
1395 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1396 
1397 error_fini:
1398 	amdgpu_vm_fini(adev, vm);
1399 
1400 error_pasid:
1401 	if (pasid)
1402 		amdgpu_pasid_free(pasid);
1403 
1404 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1405 	kfree(vm);
1406 	return 0;
1407 }
1408