1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_mes.h"
25 #include "amdgpu.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
28 
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
31 
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33 {
34 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 		       PAGE_SIZE);
37 }
38 
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 				      unsigned int *doorbell_index)
41 {
42 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 			       adev->mes.max_doorbell_slices,
44 			       GFP_KERNEL);
45 	if (r > 0)
46 		*doorbell_index = r;
47 
48 	return r;
49 }
50 
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 				      unsigned int doorbell_index)
53 {
54 	if (doorbell_index)
55 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56 }
57 
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 					struct amdgpu_device *adev,
60 					uint32_t doorbell_index,
61 					unsigned int doorbell_id)
62 {
63 	return ((doorbell_index *
64 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 		doorbell_id * 2);
66 }
67 
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 					 struct amdgpu_mes_process *process,
70 					 int ip_type, uint64_t *doorbell_index)
71 {
72 	unsigned int offset, found;
73 
74 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 		offset = adev->doorbell_index.sdma_engine[0];
76 		found = find_next_zero_bit(process->doorbell_bitmap,
77 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 					   offset);
79 	} else {
80 		found = find_first_zero_bit(process->doorbell_bitmap,
81 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 	}
83 
84 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 		DRM_WARN("No doorbell available\n");
86 		return -ENOSPC;
87 	}
88 
89 	set_bit(found, process->doorbell_bitmap);
90 
91 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 				process->doorbell_index, found);
93 
94 	return 0;
95 }
96 
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 					   struct amdgpu_mes_process *process,
99 					   uint32_t doorbell_index)
100 {
101 	unsigned int old, doorbell_id;
102 
103 	doorbell_id = doorbell_index -
104 		(process->doorbell_index *
105 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 	doorbell_id /= 2;
107 
108 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 	WARN_ON(!old);
110 }
111 
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113 {
114 	size_t doorbell_start_offset;
115 	size_t doorbell_aperture_size;
116 	size_t doorbell_process_limit;
117 	size_t aggregated_doorbell_start;
118 	int i;
119 
120 	aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
121 	aggregated_doorbell_start =
122 		roundup(aggregated_doorbell_start, PAGE_SIZE);
123 
124 	doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
125 	doorbell_start_offset =
126 		roundup(doorbell_start_offset,
127 			amdgpu_mes_doorbell_process_slice(adev));
128 
129 	doorbell_aperture_size = adev->doorbell.size;
130 	doorbell_aperture_size =
131 			rounddown(doorbell_aperture_size,
132 				  amdgpu_mes_doorbell_process_slice(adev));
133 
134 	if (doorbell_aperture_size > doorbell_start_offset)
135 		doorbell_process_limit =
136 			(doorbell_aperture_size - doorbell_start_offset) /
137 			amdgpu_mes_doorbell_process_slice(adev);
138 	else
139 		return -ENOSPC;
140 
141 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
142 	adev->mes.max_doorbell_slices = doorbell_process_limit;
143 
144 	/* allocate Qword range for aggregated doorbell */
145 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
146 		adev->mes.aggregated_doorbells[i] =
147 			aggregated_doorbell_start / sizeof(u32) + i * 2;
148 
149 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
150 	return 0;
151 }
152 
153 int amdgpu_mes_init(struct amdgpu_device *adev)
154 {
155 	int i, r;
156 
157 	adev->mes.adev = adev;
158 
159 	idr_init(&adev->mes.pasid_idr);
160 	idr_init(&adev->mes.gang_id_idr);
161 	idr_init(&adev->mes.queue_id_idr);
162 	ida_init(&adev->mes.doorbell_ida);
163 	spin_lock_init(&adev->mes.queue_id_lock);
164 	spin_lock_init(&adev->mes.ring_lock);
165 	mutex_init(&adev->mes.mutex_hidden);
166 
167 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
168 	adev->mes.vmid_mask_mmhub = 0xffffff00;
169 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
170 
171 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
172 		/* use only 1st MEC pipes */
173 		if (i >= 4)
174 			continue;
175 		adev->mes.compute_hqd_mask[i] = 0xc;
176 	}
177 
178 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
179 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
180 
181 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
182 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
183 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
184 		/* zero sdma_hqd_mask for non-existent engine */
185 		else if (adev->sdma.num_instances == 1)
186 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
187 		else
188 			adev->mes.sdma_hqd_mask[i] = 0xfc;
189 	}
190 
191 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
192 	if (r) {
193 		dev_err(adev->dev,
194 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
195 		goto error_ids;
196 	}
197 	adev->mes.sch_ctx_gpu_addr =
198 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
199 	adev->mes.sch_ctx_ptr =
200 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
201 
202 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
203 	if (r) {
204 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
205 		dev_err(adev->dev,
206 			"(%d) query_status_fence_offs wb alloc failed\n", r);
207 		goto error_ids;
208 	}
209 	adev->mes.query_status_fence_gpu_addr =
210 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
211 	adev->mes.query_status_fence_ptr =
212 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
213 
214 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
215 	if (r) {
216 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
217 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
218 		dev_err(adev->dev,
219 			"(%d) read_val_offs alloc failed\n", r);
220 		goto error_ids;
221 	}
222 	adev->mes.read_val_gpu_addr =
223 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
224 	adev->mes.read_val_ptr =
225 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
226 
227 	r = amdgpu_mes_doorbell_init(adev);
228 	if (r)
229 		goto error;
230 
231 	return 0;
232 
233 error:
234 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
235 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
236 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
237 error_ids:
238 	idr_destroy(&adev->mes.pasid_idr);
239 	idr_destroy(&adev->mes.gang_id_idr);
240 	idr_destroy(&adev->mes.queue_id_idr);
241 	ida_destroy(&adev->mes.doorbell_ida);
242 	mutex_destroy(&adev->mes.mutex_hidden);
243 	return r;
244 }
245 
246 void amdgpu_mes_fini(struct amdgpu_device *adev)
247 {
248 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
249 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
250 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
251 
252 	idr_destroy(&adev->mes.pasid_idr);
253 	idr_destroy(&adev->mes.gang_id_idr);
254 	idr_destroy(&adev->mes.queue_id_idr);
255 	ida_destroy(&adev->mes.doorbell_ida);
256 	mutex_destroy(&adev->mes.mutex_hidden);
257 }
258 
259 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
260 {
261 	amdgpu_bo_free_kernel(&q->mqd_obj,
262 			      &q->mqd_gpu_addr,
263 			      &q->mqd_cpu_ptr);
264 }
265 
266 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
267 			      struct amdgpu_vm *vm)
268 {
269 	struct amdgpu_mes_process *process;
270 	int r;
271 
272 	/* allocate the mes process buffer */
273 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
274 	if (!process) {
275 		DRM_ERROR("no more memory to create mes process\n");
276 		return -ENOMEM;
277 	}
278 
279 	process->doorbell_bitmap =
280 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
281 				     BITS_PER_BYTE), GFP_KERNEL);
282 	if (!process->doorbell_bitmap) {
283 		DRM_ERROR("failed to allocate doorbell bitmap\n");
284 		kfree(process);
285 		return -ENOMEM;
286 	}
287 
288 	/* allocate the process context bo and map it */
289 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
290 				    AMDGPU_GEM_DOMAIN_GTT,
291 				    &process->proc_ctx_bo,
292 				    &process->proc_ctx_gpu_addr,
293 				    &process->proc_ctx_cpu_ptr);
294 	if (r) {
295 		DRM_ERROR("failed to allocate process context bo\n");
296 		goto clean_up_memory;
297 	}
298 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
299 
300 	/*
301 	 * Avoid taking any other locks under MES lock to avoid circular
302 	 * lock dependencies.
303 	 */
304 	amdgpu_mes_lock(&adev->mes);
305 
306 	/* add the mes process to idr list */
307 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
308 		      GFP_KERNEL);
309 	if (r < 0) {
310 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
311 		goto clean_up_ctx;
312 	}
313 
314 	/* allocate the starting doorbell index of the process */
315 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
316 	if (r < 0) {
317 		DRM_ERROR("failed to allocate doorbell for process\n");
318 		goto clean_up_pasid;
319 	}
320 
321 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
322 
323 	INIT_LIST_HEAD(&process->gang_list);
324 	process->vm = vm;
325 	process->pasid = pasid;
326 	process->process_quantum = adev->mes.default_process_quantum;
327 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
328 
329 	amdgpu_mes_unlock(&adev->mes);
330 	return 0;
331 
332 clean_up_pasid:
333 	idr_remove(&adev->mes.pasid_idr, pasid);
334 	amdgpu_mes_unlock(&adev->mes);
335 clean_up_ctx:
336 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
337 			      &process->proc_ctx_gpu_addr,
338 			      &process->proc_ctx_cpu_ptr);
339 clean_up_memory:
340 	kfree(process->doorbell_bitmap);
341 	kfree(process);
342 	return r;
343 }
344 
345 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
346 {
347 	struct amdgpu_mes_process *process;
348 	struct amdgpu_mes_gang *gang, *tmp1;
349 	struct amdgpu_mes_queue *queue, *tmp2;
350 	struct mes_remove_queue_input queue_input;
351 	unsigned long flags;
352 	int r;
353 
354 	/*
355 	 * Avoid taking any other locks under MES lock to avoid circular
356 	 * lock dependencies.
357 	 */
358 	amdgpu_mes_lock(&adev->mes);
359 
360 	process = idr_find(&adev->mes.pasid_idr, pasid);
361 	if (!process) {
362 		DRM_WARN("pasid %d doesn't exist\n", pasid);
363 		amdgpu_mes_unlock(&adev->mes);
364 		return;
365 	}
366 
367 	/* Remove all queues from hardware */
368 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
369 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
370 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
371 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
372 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
373 
374 			queue_input.doorbell_offset = queue->doorbell_off;
375 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
376 
377 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
378 							     &queue_input);
379 			if (r)
380 				DRM_WARN("failed to remove hardware queue\n");
381 		}
382 
383 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
384 	}
385 
386 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
387 	idr_remove(&adev->mes.pasid_idr, pasid);
388 	amdgpu_mes_unlock(&adev->mes);
389 
390 	/* free all memory allocated by the process */
391 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
392 		/* free all queues in the gang */
393 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
394 			amdgpu_mes_queue_free_mqd(queue);
395 			list_del(&queue->list);
396 			kfree(queue);
397 		}
398 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
399 				      &gang->gang_ctx_gpu_addr,
400 				      &gang->gang_ctx_cpu_ptr);
401 		list_del(&gang->list);
402 		kfree(gang);
403 
404 	}
405 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
406 			      &process->proc_ctx_gpu_addr,
407 			      &process->proc_ctx_cpu_ptr);
408 	kfree(process->doorbell_bitmap);
409 	kfree(process);
410 }
411 
412 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
413 			struct amdgpu_mes_gang_properties *gprops,
414 			int *gang_id)
415 {
416 	struct amdgpu_mes_process *process;
417 	struct amdgpu_mes_gang *gang;
418 	int r;
419 
420 	/* allocate the mes gang buffer */
421 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
422 	if (!gang) {
423 		return -ENOMEM;
424 	}
425 
426 	/* allocate the gang context bo and map it to cpu space */
427 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
428 				    AMDGPU_GEM_DOMAIN_GTT,
429 				    &gang->gang_ctx_bo,
430 				    &gang->gang_ctx_gpu_addr,
431 				    &gang->gang_ctx_cpu_ptr);
432 	if (r) {
433 		DRM_ERROR("failed to allocate process context bo\n");
434 		goto clean_up_mem;
435 	}
436 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
437 
438 	/*
439 	 * Avoid taking any other locks under MES lock to avoid circular
440 	 * lock dependencies.
441 	 */
442 	amdgpu_mes_lock(&adev->mes);
443 
444 	process = idr_find(&adev->mes.pasid_idr, pasid);
445 	if (!process) {
446 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
447 		r = -EINVAL;
448 		goto clean_up_ctx;
449 	}
450 
451 	/* add the mes gang to idr list */
452 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
453 		      GFP_KERNEL);
454 	if (r < 0) {
455 		DRM_ERROR("failed to allocate idr for gang\n");
456 		goto clean_up_ctx;
457 	}
458 
459 	gang->gang_id = r;
460 	*gang_id = r;
461 
462 	INIT_LIST_HEAD(&gang->queue_list);
463 	gang->process = process;
464 	gang->priority = gprops->priority;
465 	gang->gang_quantum = gprops->gang_quantum ?
466 		gprops->gang_quantum : adev->mes.default_gang_quantum;
467 	gang->global_priority_level = gprops->global_priority_level;
468 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
469 	list_add_tail(&gang->list, &process->gang_list);
470 
471 	amdgpu_mes_unlock(&adev->mes);
472 	return 0;
473 
474 clean_up_ctx:
475 	amdgpu_mes_unlock(&adev->mes);
476 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
477 			      &gang->gang_ctx_gpu_addr,
478 			      &gang->gang_ctx_cpu_ptr);
479 clean_up_mem:
480 	kfree(gang);
481 	return r;
482 }
483 
484 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
485 {
486 	struct amdgpu_mes_gang *gang;
487 
488 	/*
489 	 * Avoid taking any other locks under MES lock to avoid circular
490 	 * lock dependencies.
491 	 */
492 	amdgpu_mes_lock(&adev->mes);
493 
494 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
495 	if (!gang) {
496 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
497 		amdgpu_mes_unlock(&adev->mes);
498 		return -EINVAL;
499 	}
500 
501 	if (!list_empty(&gang->queue_list)) {
502 		DRM_ERROR("queue list is not empty\n");
503 		amdgpu_mes_unlock(&adev->mes);
504 		return -EBUSY;
505 	}
506 
507 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
508 	list_del(&gang->list);
509 	amdgpu_mes_unlock(&adev->mes);
510 
511 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
512 			      &gang->gang_ctx_gpu_addr,
513 			      &gang->gang_ctx_cpu_ptr);
514 
515 	kfree(gang);
516 
517 	return 0;
518 }
519 
520 int amdgpu_mes_suspend(struct amdgpu_device *adev)
521 {
522 	struct idr *idp;
523 	struct amdgpu_mes_process *process;
524 	struct amdgpu_mes_gang *gang;
525 	struct mes_suspend_gang_input input;
526 	int r, pasid;
527 
528 	/*
529 	 * Avoid taking any other locks under MES lock to avoid circular
530 	 * lock dependencies.
531 	 */
532 	amdgpu_mes_lock(&adev->mes);
533 
534 	idp = &adev->mes.pasid_idr;
535 
536 	idr_for_each_entry(idp, process, pasid) {
537 		list_for_each_entry(gang, &process->gang_list, list) {
538 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
539 			if (r)
540 				DRM_ERROR("failed to suspend pasid %d gangid %d",
541 					 pasid, gang->gang_id);
542 		}
543 	}
544 
545 	amdgpu_mes_unlock(&adev->mes);
546 	return 0;
547 }
548 
549 int amdgpu_mes_resume(struct amdgpu_device *adev)
550 {
551 	struct idr *idp;
552 	struct amdgpu_mes_process *process;
553 	struct amdgpu_mes_gang *gang;
554 	struct mes_resume_gang_input input;
555 	int r, pasid;
556 
557 	/*
558 	 * Avoid taking any other locks under MES lock to avoid circular
559 	 * lock dependencies.
560 	 */
561 	amdgpu_mes_lock(&adev->mes);
562 
563 	idp = &adev->mes.pasid_idr;
564 
565 	idr_for_each_entry(idp, process, pasid) {
566 		list_for_each_entry(gang, &process->gang_list, list) {
567 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
568 			if (r)
569 				DRM_ERROR("failed to resume pasid %d gangid %d",
570 					 pasid, gang->gang_id);
571 		}
572 	}
573 
574 	amdgpu_mes_unlock(&adev->mes);
575 	return 0;
576 }
577 
578 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
579 				     struct amdgpu_mes_queue *q,
580 				     struct amdgpu_mes_queue_properties *p)
581 {
582 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
583 	u32 mqd_size = mqd_mgr->mqd_size;
584 	int r;
585 
586 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
587 				    AMDGPU_GEM_DOMAIN_GTT,
588 				    &q->mqd_obj,
589 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
590 	if (r) {
591 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
592 		return r;
593 	}
594 	memset(q->mqd_cpu_ptr, 0, mqd_size);
595 
596 	r = amdgpu_bo_reserve(q->mqd_obj, false);
597 	if (unlikely(r != 0))
598 		goto clean_up;
599 
600 	return 0;
601 
602 clean_up:
603 	amdgpu_bo_free_kernel(&q->mqd_obj,
604 			      &q->mqd_gpu_addr,
605 			      &q->mqd_cpu_ptr);
606 	return r;
607 }
608 
609 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
610 				     struct amdgpu_mes_queue *q,
611 				     struct amdgpu_mes_queue_properties *p)
612 {
613 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
614 	struct amdgpu_mqd_prop mqd_prop = {0};
615 
616 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
617 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
618 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
619 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
620 	mqd_prop.queue_size = p->queue_size;
621 	mqd_prop.use_doorbell = true;
622 	mqd_prop.doorbell_index = p->doorbell_off;
623 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
624 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
625 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
626 	mqd_prop.hqd_active = false;
627 
628 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
629 
630 	amdgpu_bo_unreserve(q->mqd_obj);
631 }
632 
633 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
634 			    struct amdgpu_mes_queue_properties *qprops,
635 			    int *queue_id)
636 {
637 	struct amdgpu_mes_queue *queue;
638 	struct amdgpu_mes_gang *gang;
639 	struct mes_add_queue_input queue_input;
640 	unsigned long flags;
641 	int r;
642 
643 	/* allocate the mes queue buffer */
644 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
645 	if (!queue) {
646 		DRM_ERROR("Failed to allocate memory for queue\n");
647 		return -ENOMEM;
648 	}
649 
650 	/* Allocate the queue mqd */
651 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
652 	if (r)
653 		goto clean_up_memory;
654 
655 	/*
656 	 * Avoid taking any other locks under MES lock to avoid circular
657 	 * lock dependencies.
658 	 */
659 	amdgpu_mes_lock(&adev->mes);
660 
661 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
662 	if (!gang) {
663 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
664 		r = -EINVAL;
665 		goto clean_up_mqd;
666 	}
667 
668 	/* add the mes gang to idr list */
669 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
670 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
671 		      GFP_ATOMIC);
672 	if (r < 0) {
673 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
674 		goto clean_up_mqd;
675 	}
676 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
677 	*queue_id = queue->queue_id = r;
678 
679 	/* allocate a doorbell index for the queue */
680 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
681 					  qprops->queue_type,
682 					  &qprops->doorbell_off);
683 	if (r)
684 		goto clean_up_queue_id;
685 
686 	/* initialize the queue mqd */
687 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
688 
689 	/* add hw queue to mes */
690 	queue_input.process_id = gang->process->pasid;
691 
692 	queue_input.page_table_base_addr =
693 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
694 		adev->gmc.vram_start;
695 
696 	queue_input.process_va_start = 0;
697 	queue_input.process_va_end =
698 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
699 	queue_input.process_quantum = gang->process->process_quantum;
700 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
701 	queue_input.gang_quantum = gang->gang_quantum;
702 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
703 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
704 	queue_input.gang_global_priority_level = gang->global_priority_level;
705 	queue_input.doorbell_offset = qprops->doorbell_off;
706 	queue_input.mqd_addr = queue->mqd_gpu_addr;
707 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
708 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
709 	queue_input.queue_type = qprops->queue_type;
710 	queue_input.paging = qprops->paging;
711 	queue_input.is_kfd_process = 0;
712 
713 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
714 	if (r) {
715 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
716 			  qprops->doorbell_off);
717 		goto clean_up_doorbell;
718 	}
719 
720 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
721 		  "queue type=%d, doorbell=0x%llx\n",
722 		  gang->process->pasid, gang_id, qprops->queue_type,
723 		  qprops->doorbell_off);
724 
725 	queue->ring = qprops->ring;
726 	queue->doorbell_off = qprops->doorbell_off;
727 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
728 	queue->queue_type = qprops->queue_type;
729 	queue->paging = qprops->paging;
730 	queue->gang = gang;
731 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
732 	list_add_tail(&queue->list, &gang->queue_list);
733 
734 	amdgpu_mes_unlock(&adev->mes);
735 	return 0;
736 
737 clean_up_doorbell:
738 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
739 				       qprops->doorbell_off);
740 clean_up_queue_id:
741 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
742 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
743 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
744 clean_up_mqd:
745 	amdgpu_mes_unlock(&adev->mes);
746 	amdgpu_mes_queue_free_mqd(queue);
747 clean_up_memory:
748 	kfree(queue);
749 	return r;
750 }
751 
752 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
753 {
754 	unsigned long flags;
755 	struct amdgpu_mes_queue *queue;
756 	struct amdgpu_mes_gang *gang;
757 	struct mes_remove_queue_input queue_input;
758 	int r;
759 
760 	/*
761 	 * Avoid taking any other locks under MES lock to avoid circular
762 	 * lock dependencies.
763 	 */
764 	amdgpu_mes_lock(&adev->mes);
765 
766 	/* remove the mes gang from idr list */
767 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
768 
769 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
770 	if (!queue) {
771 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
772 		amdgpu_mes_unlock(&adev->mes);
773 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
774 		return -EINVAL;
775 	}
776 
777 	idr_remove(&adev->mes.queue_id_idr, queue_id);
778 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
779 
780 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
781 		  queue->doorbell_off);
782 
783 	gang = queue->gang;
784 	queue_input.doorbell_offset = queue->doorbell_off;
785 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
786 
787 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
788 	if (r)
789 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
790 			  queue_id);
791 
792 	list_del(&queue->list);
793 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
794 				       queue->doorbell_off);
795 	amdgpu_mes_unlock(&adev->mes);
796 
797 	amdgpu_mes_queue_free_mqd(queue);
798 	kfree(queue);
799 	return 0;
800 }
801 
802 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
803 				  struct amdgpu_ring *ring,
804 				  enum amdgpu_unmap_queues_action action,
805 				  u64 gpu_addr, u64 seq)
806 {
807 	struct mes_unmap_legacy_queue_input queue_input;
808 	int r;
809 
810 	queue_input.action = action;
811 	queue_input.queue_type = ring->funcs->type;
812 	queue_input.doorbell_offset = ring->doorbell_index;
813 	queue_input.pipe_id = ring->pipe;
814 	queue_input.queue_id = ring->queue;
815 	queue_input.trail_fence_addr = gpu_addr;
816 	queue_input.trail_fence_data = seq;
817 
818 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
819 	if (r)
820 		DRM_ERROR("failed to unmap legacy queue\n");
821 
822 	return r;
823 }
824 
825 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
826 {
827 	struct mes_misc_op_input op_input;
828 	int r, val = 0;
829 
830 	op_input.op = MES_MISC_OP_READ_REG;
831 	op_input.read_reg.reg_offset = reg;
832 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
833 
834 	if (!adev->mes.funcs->misc_op) {
835 		DRM_ERROR("mes rreg is not supported!\n");
836 		goto error;
837 	}
838 
839 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
840 	if (r)
841 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
842 	else
843 		val = *(adev->mes.read_val_ptr);
844 
845 error:
846 	return val;
847 }
848 
849 int amdgpu_mes_wreg(struct amdgpu_device *adev,
850 		    uint32_t reg, uint32_t val)
851 {
852 	struct mes_misc_op_input op_input;
853 	int r;
854 
855 	op_input.op = MES_MISC_OP_WRITE_REG;
856 	op_input.write_reg.reg_offset = reg;
857 	op_input.write_reg.reg_value = val;
858 
859 	if (!adev->mes.funcs->misc_op) {
860 		DRM_ERROR("mes wreg is not supported!\n");
861 		r = -EINVAL;
862 		goto error;
863 	}
864 
865 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
866 	if (r)
867 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
868 
869 error:
870 	return r;
871 }
872 
873 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
874 				  uint32_t reg0, uint32_t reg1,
875 				  uint32_t ref, uint32_t mask)
876 {
877 	struct mes_misc_op_input op_input;
878 	int r;
879 
880 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
881 	op_input.wrm_reg.reg0 = reg0;
882 	op_input.wrm_reg.reg1 = reg1;
883 	op_input.wrm_reg.ref = ref;
884 	op_input.wrm_reg.mask = mask;
885 
886 	if (!adev->mes.funcs->misc_op) {
887 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
888 		r = -EINVAL;
889 		goto error;
890 	}
891 
892 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
893 	if (r)
894 		DRM_ERROR("failed to reg_write_reg_wait\n");
895 
896 error:
897 	return r;
898 }
899 
900 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
901 			uint32_t val, uint32_t mask)
902 {
903 	struct mes_misc_op_input op_input;
904 	int r;
905 
906 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
907 	op_input.wrm_reg.reg0 = reg;
908 	op_input.wrm_reg.ref = val;
909 	op_input.wrm_reg.mask = mask;
910 
911 	if (!adev->mes.funcs->misc_op) {
912 		DRM_ERROR("mes reg wait is not supported!\n");
913 		r = -EINVAL;
914 		goto error;
915 	}
916 
917 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
918 	if (r)
919 		DRM_ERROR("failed to reg_write_reg_wait\n");
920 
921 error:
922 	return r;
923 }
924 
925 static void
926 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
927 			       struct amdgpu_ring *ring,
928 			       struct amdgpu_mes_queue_properties *props)
929 {
930 	props->queue_type = ring->funcs->type;
931 	props->hqd_base_gpu_addr = ring->gpu_addr;
932 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
933 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
934 	props->wptr_mc_addr =
935 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
936 	props->queue_size = ring->ring_size;
937 	props->eop_gpu_addr = ring->eop_gpu_addr;
938 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
939 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
940 	props->paging = false;
941 	props->ring = ring;
942 }
943 
944 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
945 do {									\
946        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
947 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
948 				_eng[ring->idx].slots[id_offs]);        \
949        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
950 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
951 				_eng[ring->idx].ring);                  \
952        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
953 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
954 				_eng[ring->idx].ib);                    \
955        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
956 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
957 				_eng[ring->idx].padding);               \
958 } while(0)
959 
960 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
961 {
962 	switch (ring->funcs->type) {
963 	case AMDGPU_RING_TYPE_GFX:
964 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
965 		break;
966 	case AMDGPU_RING_TYPE_COMPUTE:
967 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
968 		break;
969 	case AMDGPU_RING_TYPE_SDMA:
970 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
971 		break;
972 	default:
973 		break;
974 	}
975 
976 	WARN_ON(1);
977 	return -EINVAL;
978 }
979 
980 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
981 			int queue_type, int idx,
982 			struct amdgpu_mes_ctx_data *ctx_data,
983 			struct amdgpu_ring **out)
984 {
985 	struct amdgpu_ring *ring;
986 	struct amdgpu_mes_gang *gang;
987 	struct amdgpu_mes_queue_properties qprops = {0};
988 	int r, queue_id, pasid;
989 
990 	/*
991 	 * Avoid taking any other locks under MES lock to avoid circular
992 	 * lock dependencies.
993 	 */
994 	amdgpu_mes_lock(&adev->mes);
995 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
996 	if (!gang) {
997 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
998 		amdgpu_mes_unlock(&adev->mes);
999 		return -EINVAL;
1000 	}
1001 	pasid = gang->process->pasid;
1002 
1003 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1004 	if (!ring) {
1005 		amdgpu_mes_unlock(&adev->mes);
1006 		return -ENOMEM;
1007 	}
1008 
1009 	ring->ring_obj = NULL;
1010 	ring->use_doorbell = true;
1011 	ring->is_mes_queue = true;
1012 	ring->mes_ctx = ctx_data;
1013 	ring->idx = idx;
1014 	ring->no_scheduler = true;
1015 
1016 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1017 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1018 				      compute[ring->idx].mec_hpd);
1019 		ring->eop_gpu_addr =
1020 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1021 	}
1022 
1023 	switch (queue_type) {
1024 	case AMDGPU_RING_TYPE_GFX:
1025 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1026 		break;
1027 	case AMDGPU_RING_TYPE_COMPUTE:
1028 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1029 		break;
1030 	case AMDGPU_RING_TYPE_SDMA:
1031 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1032 		break;
1033 	default:
1034 		BUG();
1035 	}
1036 
1037 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1038 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1039 	if (r)
1040 		goto clean_up_memory;
1041 
1042 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1043 
1044 	dma_fence_wait(gang->process->vm->last_update, false);
1045 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1046 	amdgpu_mes_unlock(&adev->mes);
1047 
1048 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1049 	if (r)
1050 		goto clean_up_ring;
1051 
1052 	ring->hw_queue_id = queue_id;
1053 	ring->doorbell_index = qprops.doorbell_off;
1054 
1055 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1056 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1057 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1058 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1059 			queue_id);
1060 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1061 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1062 			queue_id);
1063 	else
1064 		BUG();
1065 
1066 	*out = ring;
1067 	return 0;
1068 
1069 clean_up_ring:
1070 	amdgpu_ring_fini(ring);
1071 clean_up_memory:
1072 	kfree(ring);
1073 	amdgpu_mes_unlock(&adev->mes);
1074 	return r;
1075 }
1076 
1077 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1078 			    struct amdgpu_ring *ring)
1079 {
1080 	if (!ring)
1081 		return;
1082 
1083 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1084 	amdgpu_ring_fini(ring);
1085 	kfree(ring);
1086 }
1087 
1088 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1089 						   enum amdgpu_mes_priority_level prio)
1090 {
1091 	return adev->mes.aggregated_doorbells[prio];
1092 }
1093 
1094 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1095 				   struct amdgpu_mes_ctx_data *ctx_data)
1096 {
1097 	int r;
1098 
1099 	r = amdgpu_bo_create_kernel(adev,
1100 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1101 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1102 			    &ctx_data->meta_data_obj,
1103 			    &ctx_data->meta_data_mc_addr,
1104 			    &ctx_data->meta_data_ptr);
1105 	if (!ctx_data->meta_data_obj)
1106 		return -ENOMEM;
1107 
1108 	memset(ctx_data->meta_data_ptr, 0,
1109 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1110 
1111 	return 0;
1112 }
1113 
1114 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1115 {
1116 	if (ctx_data->meta_data_obj)
1117 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1118 				      &ctx_data->meta_data_mc_addr,
1119 				      &ctx_data->meta_data_ptr);
1120 }
1121 
1122 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1123 				 struct amdgpu_vm *vm,
1124 				 struct amdgpu_mes_ctx_data *ctx_data)
1125 {
1126 	struct amdgpu_bo_va *bo_va;
1127 	struct ww_acquire_ctx ticket;
1128 	struct list_head list;
1129 	struct amdgpu_bo_list_entry pd;
1130 	struct ttm_validate_buffer csa_tv;
1131 	struct amdgpu_sync sync;
1132 	int r;
1133 
1134 	amdgpu_sync_create(&sync);
1135 	INIT_LIST_HEAD(&list);
1136 	INIT_LIST_HEAD(&csa_tv.head);
1137 
1138 	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1139 	csa_tv.num_shared = 1;
1140 
1141 	list_add(&csa_tv.head, &list);
1142 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1143 
1144 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1145 	if (r) {
1146 		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1147 		return r;
1148 	}
1149 
1150 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1151 	if (!bo_va) {
1152 		ttm_eu_backoff_reservation(&ticket, &list);
1153 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1154 		return -ENOMEM;
1155 	}
1156 
1157 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1158 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1159 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1160 			     AMDGPU_PTE_EXECUTABLE);
1161 
1162 	if (r) {
1163 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1164 		goto error;
1165 	}
1166 
1167 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1168 	if (r) {
1169 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1170 		goto error;
1171 	}
1172 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1173 
1174 	r = amdgpu_vm_update_pdes(adev, vm, false);
1175 	if (r) {
1176 		DRM_ERROR("failed to update pdes on meta data\n");
1177 		goto error;
1178 	}
1179 	amdgpu_sync_fence(&sync, vm->last_update);
1180 
1181 	amdgpu_sync_wait(&sync, false);
1182 	ttm_eu_backoff_reservation(&ticket, &list);
1183 
1184 	amdgpu_sync_free(&sync);
1185 	ctx_data->meta_data_va = bo_va;
1186 	return 0;
1187 
1188 error:
1189 	amdgpu_vm_bo_del(adev, bo_va);
1190 	ttm_eu_backoff_reservation(&ticket, &list);
1191 	amdgpu_sync_free(&sync);
1192 	return r;
1193 }
1194 
1195 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1196 				   struct amdgpu_mes_ctx_data *ctx_data)
1197 {
1198 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1199 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1200 	struct amdgpu_vm *vm = bo_va->base.vm;
1201 	struct amdgpu_bo_list_entry vm_pd;
1202 	struct list_head list, duplicates;
1203 	struct dma_fence *fence = NULL;
1204 	struct ttm_validate_buffer tv;
1205 	struct ww_acquire_ctx ticket;
1206 	long r = 0;
1207 
1208 	INIT_LIST_HEAD(&list);
1209 	INIT_LIST_HEAD(&duplicates);
1210 
1211 	tv.bo = &bo->tbo;
1212 	tv.num_shared = 2;
1213 	list_add(&tv.head, &list);
1214 
1215 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1216 
1217 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1218 	if (r) {
1219 		dev_err(adev->dev, "leaking bo va because "
1220 			"we fail to reserve bo (%ld)\n", r);
1221 		return r;
1222 	}
1223 
1224 	amdgpu_vm_bo_del(adev, bo_va);
1225 	if (!amdgpu_vm_ready(vm))
1226 		goto out_unlock;
1227 
1228 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
1229 	if (r)
1230 		goto out_unlock;
1231 	if (fence) {
1232 		amdgpu_bo_fence(bo, fence, true);
1233 		fence = NULL;
1234 	}
1235 
1236 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1237 	if (r || !fence)
1238 		goto out_unlock;
1239 
1240 	dma_fence_wait(fence, false);
1241 	amdgpu_bo_fence(bo, fence, true);
1242 	dma_fence_put(fence);
1243 
1244 out_unlock:
1245 	if (unlikely(r < 0))
1246 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1247 	ttm_eu_backoff_reservation(&ticket, &list);
1248 
1249 	return r;
1250 }
1251 
1252 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1253 					  int pasid, int *gang_id,
1254 					  int queue_type, int num_queue,
1255 					  struct amdgpu_ring **added_rings,
1256 					  struct amdgpu_mes_ctx_data *ctx_data)
1257 {
1258 	struct amdgpu_ring *ring;
1259 	struct amdgpu_mes_gang_properties gprops = {0};
1260 	int r, j;
1261 
1262 	/* create a gang for the process */
1263 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1264 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1265 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1266 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1267 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1268 
1269 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1270 	if (r) {
1271 		DRM_ERROR("failed to add gang\n");
1272 		return r;
1273 	}
1274 
1275 	/* create queues for the gang */
1276 	for (j = 0; j < num_queue; j++) {
1277 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1278 					ctx_data, &ring);
1279 		if (r) {
1280 			DRM_ERROR("failed to add ring\n");
1281 			break;
1282 		}
1283 
1284 		DRM_INFO("ring %s was added\n", ring->name);
1285 		added_rings[j] = ring;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1292 {
1293 	struct amdgpu_ring *ring;
1294 	int i, r;
1295 
1296 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1297 		ring = added_rings[i];
1298 		if (!ring)
1299 			continue;
1300 
1301 		r = amdgpu_ring_test_ring(ring);
1302 		if (r) {
1303 			DRM_DEV_ERROR(ring->adev->dev,
1304 				      "ring %s test failed (%d)\n",
1305 				      ring->name, r);
1306 			return r;
1307 		} else
1308 			DRM_INFO("ring %s test pass\n", ring->name);
1309 
1310 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1311 		if (r) {
1312 			DRM_DEV_ERROR(ring->adev->dev,
1313 				      "ring %s ib test failed (%d)\n",
1314 				      ring->name, r);
1315 			return r;
1316 		} else
1317 			DRM_INFO("ring %s ib test pass\n", ring->name);
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1324 {
1325 	struct amdgpu_vm *vm = NULL;
1326 	struct amdgpu_mes_ctx_data ctx_data = {0};
1327 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1328 	int gang_ids[3] = {0};
1329 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1330 				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1331 				 { AMDGPU_RING_TYPE_COMPUTE,
1332 				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1333 				 { AMDGPU_RING_TYPE_SDMA,
1334 				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1335 	int i, r, pasid, k = 0;
1336 
1337 	pasid = amdgpu_pasid_alloc(16);
1338 	if (pasid < 0) {
1339 		dev_warn(adev->dev, "No more PASIDs available!");
1340 		pasid = 0;
1341 	}
1342 
1343 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1344 	if (!vm) {
1345 		r = -ENOMEM;
1346 		goto error_pasid;
1347 	}
1348 
1349 	r = amdgpu_vm_init(adev, vm);
1350 	if (r) {
1351 		DRM_ERROR("failed to initialize vm\n");
1352 		goto error_pasid;
1353 	}
1354 
1355 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1356 	if (r) {
1357 		DRM_ERROR("failed to alloc ctx meta data\n");
1358 		goto error_fini;
1359 	}
1360 
1361 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1362 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1363 	if (r) {
1364 		DRM_ERROR("failed to map ctx meta data\n");
1365 		goto error_vm;
1366 	}
1367 
1368 	r = amdgpu_mes_create_process(adev, pasid, vm);
1369 	if (r) {
1370 		DRM_ERROR("failed to create MES process\n");
1371 		goto error_vm;
1372 	}
1373 
1374 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1375 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1376 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1377 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1378 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1379 			continue;
1380 
1381 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1382 							   &gang_ids[i],
1383 							   queue_types[i][0],
1384 							   queue_types[i][1],
1385 							   &added_rings[k],
1386 							   &ctx_data);
1387 		if (r)
1388 			goto error_queues;
1389 
1390 		k += queue_types[i][1];
1391 	}
1392 
1393 	/* start ring test and ib test for MES queues */
1394 	amdgpu_mes_test_queues(added_rings);
1395 
1396 error_queues:
1397 	/* remove all queues */
1398 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1399 		if (!added_rings[i])
1400 			continue;
1401 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1402 	}
1403 
1404 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1405 		if (!gang_ids[i])
1406 			continue;
1407 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1408 	}
1409 
1410 	amdgpu_mes_destroy_process(adev, pasid);
1411 
1412 error_vm:
1413 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1414 
1415 error_fini:
1416 	amdgpu_vm_fini(adev, vm);
1417 
1418 error_pasid:
1419 	if (pasid)
1420 		amdgpu_pasid_free(pasid);
1421 
1422 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1423 	kfree(vm);
1424 	return 0;
1425 }
1426