1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
43 				      unsigned int *doorbell_index)
44 {
45 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
46 			       adev->mes.max_doorbell_slices,
47 			       GFP_KERNEL);
48 	if (r > 0)
49 		*doorbell_index = r;
50 
51 	return r;
52 }
53 
54 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
55 				      unsigned int doorbell_index)
56 {
57 	if (doorbell_index)
58 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
59 }
60 
61 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
62 					struct amdgpu_device *adev,
63 					uint32_t doorbell_index,
64 					unsigned int doorbell_id)
65 {
66 	return ((doorbell_index *
67 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
68 		doorbell_id * 2);
69 }
70 
71 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
72 					 struct amdgpu_mes_process *process,
73 					 int ip_type, uint64_t *doorbell_index)
74 {
75 	unsigned int offset, found;
76 
77 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
78 		offset = adev->doorbell_index.sdma_engine[0];
79 		found = find_next_zero_bit(process->doorbell_bitmap,
80 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
81 					   offset);
82 	} else {
83 		found = find_first_zero_bit(process->doorbell_bitmap,
84 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
85 	}
86 
87 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
88 		DRM_WARN("No doorbell available\n");
89 		return -ENOSPC;
90 	}
91 
92 	set_bit(found, process->doorbell_bitmap);
93 
94 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
95 				process->doorbell_index, found);
96 
97 	return 0;
98 }
99 
100 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
101 					   struct amdgpu_mes_process *process,
102 					   uint32_t doorbell_index)
103 {
104 	unsigned int old, doorbell_id;
105 
106 	doorbell_id = doorbell_index -
107 		(process->doorbell_index *
108 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
109 	doorbell_id /= 2;
110 
111 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
112 	WARN_ON(!old);
113 }
114 
115 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
116 {
117 	size_t doorbell_start_offset;
118 	size_t doorbell_aperture_size;
119 	size_t doorbell_process_limit;
120 	size_t aggregated_doorbell_start;
121 	int i;
122 
123 	aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
124 	aggregated_doorbell_start =
125 		roundup(aggregated_doorbell_start, PAGE_SIZE);
126 
127 	doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
128 	doorbell_start_offset =
129 		roundup(doorbell_start_offset,
130 			amdgpu_mes_doorbell_process_slice(adev));
131 
132 	doorbell_aperture_size = adev->doorbell.size;
133 	doorbell_aperture_size =
134 			rounddown(doorbell_aperture_size,
135 				  amdgpu_mes_doorbell_process_slice(adev));
136 
137 	if (doorbell_aperture_size > doorbell_start_offset)
138 		doorbell_process_limit =
139 			(doorbell_aperture_size - doorbell_start_offset) /
140 			amdgpu_mes_doorbell_process_slice(adev);
141 	else
142 		return -ENOSPC;
143 
144 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
145 	adev->mes.max_doorbell_slices = doorbell_process_limit;
146 
147 	/* allocate Qword range for aggregated doorbell */
148 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
149 		adev->mes.aggregated_doorbells[i] =
150 			aggregated_doorbell_start / sizeof(u32) + i * 2;
151 
152 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
153 	return 0;
154 }
155 
156 int amdgpu_mes_init(struct amdgpu_device *adev)
157 {
158 	int i, r;
159 
160 	adev->mes.adev = adev;
161 
162 	idr_init(&adev->mes.pasid_idr);
163 	idr_init(&adev->mes.gang_id_idr);
164 	idr_init(&adev->mes.queue_id_idr);
165 	ida_init(&adev->mes.doorbell_ida);
166 	spin_lock_init(&adev->mes.queue_id_lock);
167 	spin_lock_init(&adev->mes.ring_lock);
168 	mutex_init(&adev->mes.mutex_hidden);
169 
170 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
171 	adev->mes.vmid_mask_mmhub = 0xffffff00;
172 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
173 
174 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
175 		/* use only 1st MEC pipes */
176 		if (i >= 4)
177 			continue;
178 		adev->mes.compute_hqd_mask[i] = 0xc;
179 	}
180 
181 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
182 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
183 
184 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
185 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
186 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
187 		/* zero sdma_hqd_mask for non-existent engine */
188 		else if (adev->sdma.num_instances == 1)
189 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
190 		else
191 			adev->mes.sdma_hqd_mask[i] = 0xfc;
192 	}
193 
194 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
195 	if (r) {
196 		dev_err(adev->dev,
197 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
198 		goto error_ids;
199 	}
200 	adev->mes.sch_ctx_gpu_addr =
201 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
202 	adev->mes.sch_ctx_ptr =
203 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
204 
205 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
206 	if (r) {
207 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
208 		dev_err(adev->dev,
209 			"(%d) query_status_fence_offs wb alloc failed\n", r);
210 		goto error_ids;
211 	}
212 	adev->mes.query_status_fence_gpu_addr =
213 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
214 	adev->mes.query_status_fence_ptr =
215 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
216 
217 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
218 	if (r) {
219 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
220 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
221 		dev_err(adev->dev,
222 			"(%d) read_val_offs alloc failed\n", r);
223 		goto error_ids;
224 	}
225 	adev->mes.read_val_gpu_addr =
226 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
227 	adev->mes.read_val_ptr =
228 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
229 
230 	r = amdgpu_mes_doorbell_init(adev);
231 	if (r)
232 		goto error;
233 
234 	return 0;
235 
236 error:
237 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
238 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
239 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
240 error_ids:
241 	idr_destroy(&adev->mes.pasid_idr);
242 	idr_destroy(&adev->mes.gang_id_idr);
243 	idr_destroy(&adev->mes.queue_id_idr);
244 	ida_destroy(&adev->mes.doorbell_ida);
245 	mutex_destroy(&adev->mes.mutex_hidden);
246 	return r;
247 }
248 
249 void amdgpu_mes_fini(struct amdgpu_device *adev)
250 {
251 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
252 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
253 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
254 
255 	idr_destroy(&adev->mes.pasid_idr);
256 	idr_destroy(&adev->mes.gang_id_idr);
257 	idr_destroy(&adev->mes.queue_id_idr);
258 	ida_destroy(&adev->mes.doorbell_ida);
259 	mutex_destroy(&adev->mes.mutex_hidden);
260 }
261 
262 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
263 {
264 	amdgpu_bo_free_kernel(&q->mqd_obj,
265 			      &q->mqd_gpu_addr,
266 			      &q->mqd_cpu_ptr);
267 }
268 
269 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
270 			      struct amdgpu_vm *vm)
271 {
272 	struct amdgpu_mes_process *process;
273 	int r;
274 
275 	/* allocate the mes process buffer */
276 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
277 	if (!process) {
278 		DRM_ERROR("no more memory to create mes process\n");
279 		return -ENOMEM;
280 	}
281 
282 	process->doorbell_bitmap =
283 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
284 				     BITS_PER_BYTE), GFP_KERNEL);
285 	if (!process->doorbell_bitmap) {
286 		DRM_ERROR("failed to allocate doorbell bitmap\n");
287 		kfree(process);
288 		return -ENOMEM;
289 	}
290 
291 	/* allocate the process context bo and map it */
292 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
293 				    AMDGPU_GEM_DOMAIN_GTT,
294 				    &process->proc_ctx_bo,
295 				    &process->proc_ctx_gpu_addr,
296 				    &process->proc_ctx_cpu_ptr);
297 	if (r) {
298 		DRM_ERROR("failed to allocate process context bo\n");
299 		goto clean_up_memory;
300 	}
301 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
302 
303 	/*
304 	 * Avoid taking any other locks under MES lock to avoid circular
305 	 * lock dependencies.
306 	 */
307 	amdgpu_mes_lock(&adev->mes);
308 
309 	/* add the mes process to idr list */
310 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
311 		      GFP_KERNEL);
312 	if (r < 0) {
313 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
314 		goto clean_up_ctx;
315 	}
316 
317 	/* allocate the starting doorbell index of the process */
318 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
319 	if (r < 0) {
320 		DRM_ERROR("failed to allocate doorbell for process\n");
321 		goto clean_up_pasid;
322 	}
323 
324 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
325 
326 	INIT_LIST_HEAD(&process->gang_list);
327 	process->vm = vm;
328 	process->pasid = pasid;
329 	process->process_quantum = adev->mes.default_process_quantum;
330 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
331 
332 	amdgpu_mes_unlock(&adev->mes);
333 	return 0;
334 
335 clean_up_pasid:
336 	idr_remove(&adev->mes.pasid_idr, pasid);
337 	amdgpu_mes_unlock(&adev->mes);
338 clean_up_ctx:
339 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
340 			      &process->proc_ctx_gpu_addr,
341 			      &process->proc_ctx_cpu_ptr);
342 clean_up_memory:
343 	kfree(process->doorbell_bitmap);
344 	kfree(process);
345 	return r;
346 }
347 
348 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
349 {
350 	struct amdgpu_mes_process *process;
351 	struct amdgpu_mes_gang *gang, *tmp1;
352 	struct amdgpu_mes_queue *queue, *tmp2;
353 	struct mes_remove_queue_input queue_input;
354 	unsigned long flags;
355 	int r;
356 
357 	/*
358 	 * Avoid taking any other locks under MES lock to avoid circular
359 	 * lock dependencies.
360 	 */
361 	amdgpu_mes_lock(&adev->mes);
362 
363 	process = idr_find(&adev->mes.pasid_idr, pasid);
364 	if (!process) {
365 		DRM_WARN("pasid %d doesn't exist\n", pasid);
366 		amdgpu_mes_unlock(&adev->mes);
367 		return;
368 	}
369 
370 	/* Remove all queues from hardware */
371 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
372 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
373 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
374 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
375 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
376 
377 			queue_input.doorbell_offset = queue->doorbell_off;
378 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
379 
380 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
381 							     &queue_input);
382 			if (r)
383 				DRM_WARN("failed to remove hardware queue\n");
384 		}
385 
386 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
387 	}
388 
389 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
390 	idr_remove(&adev->mes.pasid_idr, pasid);
391 	amdgpu_mes_unlock(&adev->mes);
392 
393 	/* free all memory allocated by the process */
394 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
395 		/* free all queues in the gang */
396 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
397 			amdgpu_mes_queue_free_mqd(queue);
398 			list_del(&queue->list);
399 			kfree(queue);
400 		}
401 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
402 				      &gang->gang_ctx_gpu_addr,
403 				      &gang->gang_ctx_cpu_ptr);
404 		list_del(&gang->list);
405 		kfree(gang);
406 
407 	}
408 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
409 			      &process->proc_ctx_gpu_addr,
410 			      &process->proc_ctx_cpu_ptr);
411 	kfree(process->doorbell_bitmap);
412 	kfree(process);
413 }
414 
415 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
416 			struct amdgpu_mes_gang_properties *gprops,
417 			int *gang_id)
418 {
419 	struct amdgpu_mes_process *process;
420 	struct amdgpu_mes_gang *gang;
421 	int r;
422 
423 	/* allocate the mes gang buffer */
424 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
425 	if (!gang) {
426 		return -ENOMEM;
427 	}
428 
429 	/* allocate the gang context bo and map it to cpu space */
430 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
431 				    AMDGPU_GEM_DOMAIN_GTT,
432 				    &gang->gang_ctx_bo,
433 				    &gang->gang_ctx_gpu_addr,
434 				    &gang->gang_ctx_cpu_ptr);
435 	if (r) {
436 		DRM_ERROR("failed to allocate process context bo\n");
437 		goto clean_up_mem;
438 	}
439 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
440 
441 	/*
442 	 * Avoid taking any other locks under MES lock to avoid circular
443 	 * lock dependencies.
444 	 */
445 	amdgpu_mes_lock(&adev->mes);
446 
447 	process = idr_find(&adev->mes.pasid_idr, pasid);
448 	if (!process) {
449 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
450 		r = -EINVAL;
451 		goto clean_up_ctx;
452 	}
453 
454 	/* add the mes gang to idr list */
455 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
456 		      GFP_KERNEL);
457 	if (r < 0) {
458 		DRM_ERROR("failed to allocate idr for gang\n");
459 		goto clean_up_ctx;
460 	}
461 
462 	gang->gang_id = r;
463 	*gang_id = r;
464 
465 	INIT_LIST_HEAD(&gang->queue_list);
466 	gang->process = process;
467 	gang->priority = gprops->priority;
468 	gang->gang_quantum = gprops->gang_quantum ?
469 		gprops->gang_quantum : adev->mes.default_gang_quantum;
470 	gang->global_priority_level = gprops->global_priority_level;
471 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
472 	list_add_tail(&gang->list, &process->gang_list);
473 
474 	amdgpu_mes_unlock(&adev->mes);
475 	return 0;
476 
477 clean_up_ctx:
478 	amdgpu_mes_unlock(&adev->mes);
479 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
480 			      &gang->gang_ctx_gpu_addr,
481 			      &gang->gang_ctx_cpu_ptr);
482 clean_up_mem:
483 	kfree(gang);
484 	return r;
485 }
486 
487 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
488 {
489 	struct amdgpu_mes_gang *gang;
490 
491 	/*
492 	 * Avoid taking any other locks under MES lock to avoid circular
493 	 * lock dependencies.
494 	 */
495 	amdgpu_mes_lock(&adev->mes);
496 
497 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
498 	if (!gang) {
499 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
500 		amdgpu_mes_unlock(&adev->mes);
501 		return -EINVAL;
502 	}
503 
504 	if (!list_empty(&gang->queue_list)) {
505 		DRM_ERROR("queue list is not empty\n");
506 		amdgpu_mes_unlock(&adev->mes);
507 		return -EBUSY;
508 	}
509 
510 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
511 	list_del(&gang->list);
512 	amdgpu_mes_unlock(&adev->mes);
513 
514 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
515 			      &gang->gang_ctx_gpu_addr,
516 			      &gang->gang_ctx_cpu_ptr);
517 
518 	kfree(gang);
519 
520 	return 0;
521 }
522 
523 int amdgpu_mes_suspend(struct amdgpu_device *adev)
524 {
525 	struct idr *idp;
526 	struct amdgpu_mes_process *process;
527 	struct amdgpu_mes_gang *gang;
528 	struct mes_suspend_gang_input input;
529 	int r, pasid;
530 
531 	/*
532 	 * Avoid taking any other locks under MES lock to avoid circular
533 	 * lock dependencies.
534 	 */
535 	amdgpu_mes_lock(&adev->mes);
536 
537 	idp = &adev->mes.pasid_idr;
538 
539 	idr_for_each_entry(idp, process, pasid) {
540 		list_for_each_entry(gang, &process->gang_list, list) {
541 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
542 			if (r)
543 				DRM_ERROR("failed to suspend pasid %d gangid %d",
544 					 pasid, gang->gang_id);
545 		}
546 	}
547 
548 	amdgpu_mes_unlock(&adev->mes);
549 	return 0;
550 }
551 
552 int amdgpu_mes_resume(struct amdgpu_device *adev)
553 {
554 	struct idr *idp;
555 	struct amdgpu_mes_process *process;
556 	struct amdgpu_mes_gang *gang;
557 	struct mes_resume_gang_input input;
558 	int r, pasid;
559 
560 	/*
561 	 * Avoid taking any other locks under MES lock to avoid circular
562 	 * lock dependencies.
563 	 */
564 	amdgpu_mes_lock(&adev->mes);
565 
566 	idp = &adev->mes.pasid_idr;
567 
568 	idr_for_each_entry(idp, process, pasid) {
569 		list_for_each_entry(gang, &process->gang_list, list) {
570 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
571 			if (r)
572 				DRM_ERROR("failed to resume pasid %d gangid %d",
573 					 pasid, gang->gang_id);
574 		}
575 	}
576 
577 	amdgpu_mes_unlock(&adev->mes);
578 	return 0;
579 }
580 
581 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
582 				     struct amdgpu_mes_queue *q,
583 				     struct amdgpu_mes_queue_properties *p)
584 {
585 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
586 	u32 mqd_size = mqd_mgr->mqd_size;
587 	int r;
588 
589 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
590 				    AMDGPU_GEM_DOMAIN_GTT,
591 				    &q->mqd_obj,
592 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
593 	if (r) {
594 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
595 		return r;
596 	}
597 	memset(q->mqd_cpu_ptr, 0, mqd_size);
598 
599 	r = amdgpu_bo_reserve(q->mqd_obj, false);
600 	if (unlikely(r != 0))
601 		goto clean_up;
602 
603 	return 0;
604 
605 clean_up:
606 	amdgpu_bo_free_kernel(&q->mqd_obj,
607 			      &q->mqd_gpu_addr,
608 			      &q->mqd_cpu_ptr);
609 	return r;
610 }
611 
612 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
613 				     struct amdgpu_mes_queue *q,
614 				     struct amdgpu_mes_queue_properties *p)
615 {
616 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
617 	struct amdgpu_mqd_prop mqd_prop = {0};
618 
619 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
620 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
621 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
622 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
623 	mqd_prop.queue_size = p->queue_size;
624 	mqd_prop.use_doorbell = true;
625 	mqd_prop.doorbell_index = p->doorbell_off;
626 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
627 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
628 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
629 	mqd_prop.hqd_active = false;
630 
631 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
632 
633 	amdgpu_bo_unreserve(q->mqd_obj);
634 }
635 
636 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
637 			    struct amdgpu_mes_queue_properties *qprops,
638 			    int *queue_id)
639 {
640 	struct amdgpu_mes_queue *queue;
641 	struct amdgpu_mes_gang *gang;
642 	struct mes_add_queue_input queue_input;
643 	unsigned long flags;
644 	int r;
645 
646 	/* allocate the mes queue buffer */
647 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
648 	if (!queue) {
649 		DRM_ERROR("Failed to allocate memory for queue\n");
650 		return -ENOMEM;
651 	}
652 
653 	/* Allocate the queue mqd */
654 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
655 	if (r)
656 		goto clean_up_memory;
657 
658 	/*
659 	 * Avoid taking any other locks under MES lock to avoid circular
660 	 * lock dependencies.
661 	 */
662 	amdgpu_mes_lock(&adev->mes);
663 
664 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
665 	if (!gang) {
666 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
667 		r = -EINVAL;
668 		goto clean_up_mqd;
669 	}
670 
671 	/* add the mes gang to idr list */
672 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
673 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
674 		      GFP_ATOMIC);
675 	if (r < 0) {
676 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
677 		goto clean_up_mqd;
678 	}
679 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
680 	*queue_id = queue->queue_id = r;
681 
682 	/* allocate a doorbell index for the queue */
683 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
684 					  qprops->queue_type,
685 					  &qprops->doorbell_off);
686 	if (r)
687 		goto clean_up_queue_id;
688 
689 	/* initialize the queue mqd */
690 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
691 
692 	/* add hw queue to mes */
693 	queue_input.process_id = gang->process->pasid;
694 
695 	queue_input.page_table_base_addr =
696 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
697 		adev->gmc.vram_start;
698 
699 	queue_input.process_va_start = 0;
700 	queue_input.process_va_end =
701 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
702 	queue_input.process_quantum = gang->process->process_quantum;
703 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
704 	queue_input.gang_quantum = gang->gang_quantum;
705 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
706 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
707 	queue_input.gang_global_priority_level = gang->global_priority_level;
708 	queue_input.doorbell_offset = qprops->doorbell_off;
709 	queue_input.mqd_addr = queue->mqd_gpu_addr;
710 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
711 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
712 	queue_input.queue_type = qprops->queue_type;
713 	queue_input.paging = qprops->paging;
714 	queue_input.is_kfd_process = 0;
715 
716 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
717 	if (r) {
718 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
719 			  qprops->doorbell_off);
720 		goto clean_up_doorbell;
721 	}
722 
723 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
724 		  "queue type=%d, doorbell=0x%llx\n",
725 		  gang->process->pasid, gang_id, qprops->queue_type,
726 		  qprops->doorbell_off);
727 
728 	queue->ring = qprops->ring;
729 	queue->doorbell_off = qprops->doorbell_off;
730 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
731 	queue->queue_type = qprops->queue_type;
732 	queue->paging = qprops->paging;
733 	queue->gang = gang;
734 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
735 	list_add_tail(&queue->list, &gang->queue_list);
736 
737 	amdgpu_mes_unlock(&adev->mes);
738 	return 0;
739 
740 clean_up_doorbell:
741 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
742 				       qprops->doorbell_off);
743 clean_up_queue_id:
744 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
745 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
746 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
747 clean_up_mqd:
748 	amdgpu_mes_unlock(&adev->mes);
749 	amdgpu_mes_queue_free_mqd(queue);
750 clean_up_memory:
751 	kfree(queue);
752 	return r;
753 }
754 
755 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
756 {
757 	unsigned long flags;
758 	struct amdgpu_mes_queue *queue;
759 	struct amdgpu_mes_gang *gang;
760 	struct mes_remove_queue_input queue_input;
761 	int r;
762 
763 	/*
764 	 * Avoid taking any other locks under MES lock to avoid circular
765 	 * lock dependencies.
766 	 */
767 	amdgpu_mes_lock(&adev->mes);
768 
769 	/* remove the mes gang from idr list */
770 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
771 
772 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
773 	if (!queue) {
774 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
775 		amdgpu_mes_unlock(&adev->mes);
776 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
777 		return -EINVAL;
778 	}
779 
780 	idr_remove(&adev->mes.queue_id_idr, queue_id);
781 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
782 
783 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
784 		  queue->doorbell_off);
785 
786 	gang = queue->gang;
787 	queue_input.doorbell_offset = queue->doorbell_off;
788 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
789 
790 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
791 	if (r)
792 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
793 			  queue_id);
794 
795 	list_del(&queue->list);
796 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
797 				       queue->doorbell_off);
798 	amdgpu_mes_unlock(&adev->mes);
799 
800 	amdgpu_mes_queue_free_mqd(queue);
801 	kfree(queue);
802 	return 0;
803 }
804 
805 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
806 				  struct amdgpu_ring *ring,
807 				  enum amdgpu_unmap_queues_action action,
808 				  u64 gpu_addr, u64 seq)
809 {
810 	struct mes_unmap_legacy_queue_input queue_input;
811 	int r;
812 
813 	queue_input.action = action;
814 	queue_input.queue_type = ring->funcs->type;
815 	queue_input.doorbell_offset = ring->doorbell_index;
816 	queue_input.pipe_id = ring->pipe;
817 	queue_input.queue_id = ring->queue;
818 	queue_input.trail_fence_addr = gpu_addr;
819 	queue_input.trail_fence_data = seq;
820 
821 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
822 	if (r)
823 		DRM_ERROR("failed to unmap legacy queue\n");
824 
825 	return r;
826 }
827 
828 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
829 {
830 	struct mes_misc_op_input op_input;
831 	int r, val = 0;
832 
833 	op_input.op = MES_MISC_OP_READ_REG;
834 	op_input.read_reg.reg_offset = reg;
835 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
836 
837 	if (!adev->mes.funcs->misc_op) {
838 		DRM_ERROR("mes rreg is not supported!\n");
839 		goto error;
840 	}
841 
842 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
843 	if (r)
844 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
845 	else
846 		val = *(adev->mes.read_val_ptr);
847 
848 error:
849 	return val;
850 }
851 
852 int amdgpu_mes_wreg(struct amdgpu_device *adev,
853 		    uint32_t reg, uint32_t val)
854 {
855 	struct mes_misc_op_input op_input;
856 	int r;
857 
858 	op_input.op = MES_MISC_OP_WRITE_REG;
859 	op_input.write_reg.reg_offset = reg;
860 	op_input.write_reg.reg_value = val;
861 
862 	if (!adev->mes.funcs->misc_op) {
863 		DRM_ERROR("mes wreg is not supported!\n");
864 		r = -EINVAL;
865 		goto error;
866 	}
867 
868 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
869 	if (r)
870 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
871 
872 error:
873 	return r;
874 }
875 
876 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
877 				  uint32_t reg0, uint32_t reg1,
878 				  uint32_t ref, uint32_t mask)
879 {
880 	struct mes_misc_op_input op_input;
881 	int r;
882 
883 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
884 	op_input.wrm_reg.reg0 = reg0;
885 	op_input.wrm_reg.reg1 = reg1;
886 	op_input.wrm_reg.ref = ref;
887 	op_input.wrm_reg.mask = mask;
888 
889 	if (!adev->mes.funcs->misc_op) {
890 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
891 		r = -EINVAL;
892 		goto error;
893 	}
894 
895 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
896 	if (r)
897 		DRM_ERROR("failed to reg_write_reg_wait\n");
898 
899 error:
900 	return r;
901 }
902 
903 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
904 			uint32_t val, uint32_t mask)
905 {
906 	struct mes_misc_op_input op_input;
907 	int r;
908 
909 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
910 	op_input.wrm_reg.reg0 = reg;
911 	op_input.wrm_reg.ref = val;
912 	op_input.wrm_reg.mask = mask;
913 
914 	if (!adev->mes.funcs->misc_op) {
915 		DRM_ERROR("mes reg wait is not supported!\n");
916 		r = -EINVAL;
917 		goto error;
918 	}
919 
920 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
921 	if (r)
922 		DRM_ERROR("failed to reg_write_reg_wait\n");
923 
924 error:
925 	return r;
926 }
927 
928 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
929 				uint64_t process_context_addr,
930 				uint32_t spi_gdbg_per_vmid_cntl,
931 				const uint32_t *tcp_watch_cntl,
932 				uint32_t flags,
933 				bool trap_en)
934 {
935 	struct mes_misc_op_input op_input = {0};
936 	int r;
937 
938 	if (!adev->mes.funcs->misc_op) {
939 		DRM_ERROR("mes set shader debugger is not supported!\n");
940 		return -EINVAL;
941 	}
942 
943 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
944 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
945 	op_input.set_shader_debugger.flags.u32all = flags;
946 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
947 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
948 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
949 
950 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
951 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
952 		op_input.set_shader_debugger.trap_en = trap_en;
953 
954 	amdgpu_mes_lock(&adev->mes);
955 
956 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
957 	if (r)
958 		DRM_ERROR("failed to set_shader_debugger\n");
959 
960 	amdgpu_mes_unlock(&adev->mes);
961 
962 	return r;
963 }
964 
965 static void
966 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
967 			       struct amdgpu_ring *ring,
968 			       struct amdgpu_mes_queue_properties *props)
969 {
970 	props->queue_type = ring->funcs->type;
971 	props->hqd_base_gpu_addr = ring->gpu_addr;
972 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
973 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
974 	props->wptr_mc_addr =
975 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
976 	props->queue_size = ring->ring_size;
977 	props->eop_gpu_addr = ring->eop_gpu_addr;
978 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
979 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
980 	props->paging = false;
981 	props->ring = ring;
982 }
983 
984 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
985 do {									\
986        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
987 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
988 				_eng[ring->idx].slots[id_offs]);        \
989        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
990 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
991 				_eng[ring->idx].ring);                  \
992        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
993 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
994 				_eng[ring->idx].ib);                    \
995        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
996 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
997 				_eng[ring->idx].padding);               \
998 } while(0)
999 
1000 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1001 {
1002 	switch (ring->funcs->type) {
1003 	case AMDGPU_RING_TYPE_GFX:
1004 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1005 		break;
1006 	case AMDGPU_RING_TYPE_COMPUTE:
1007 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1008 		break;
1009 	case AMDGPU_RING_TYPE_SDMA:
1010 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1011 		break;
1012 	default:
1013 		break;
1014 	}
1015 
1016 	WARN_ON(1);
1017 	return -EINVAL;
1018 }
1019 
1020 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1021 			int queue_type, int idx,
1022 			struct amdgpu_mes_ctx_data *ctx_data,
1023 			struct amdgpu_ring **out)
1024 {
1025 	struct amdgpu_ring *ring;
1026 	struct amdgpu_mes_gang *gang;
1027 	struct amdgpu_mes_queue_properties qprops = {0};
1028 	int r, queue_id, pasid;
1029 
1030 	/*
1031 	 * Avoid taking any other locks under MES lock to avoid circular
1032 	 * lock dependencies.
1033 	 */
1034 	amdgpu_mes_lock(&adev->mes);
1035 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1036 	if (!gang) {
1037 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1038 		amdgpu_mes_unlock(&adev->mes);
1039 		return -EINVAL;
1040 	}
1041 	pasid = gang->process->pasid;
1042 
1043 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1044 	if (!ring) {
1045 		amdgpu_mes_unlock(&adev->mes);
1046 		return -ENOMEM;
1047 	}
1048 
1049 	ring->ring_obj = NULL;
1050 	ring->use_doorbell = true;
1051 	ring->is_mes_queue = true;
1052 	ring->mes_ctx = ctx_data;
1053 	ring->idx = idx;
1054 	ring->no_scheduler = true;
1055 
1056 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1057 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1058 				      compute[ring->idx].mec_hpd);
1059 		ring->eop_gpu_addr =
1060 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1061 	}
1062 
1063 	switch (queue_type) {
1064 	case AMDGPU_RING_TYPE_GFX:
1065 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1066 		break;
1067 	case AMDGPU_RING_TYPE_COMPUTE:
1068 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1069 		break;
1070 	case AMDGPU_RING_TYPE_SDMA:
1071 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1072 		break;
1073 	default:
1074 		BUG();
1075 	}
1076 
1077 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1078 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1079 	if (r)
1080 		goto clean_up_memory;
1081 
1082 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1083 
1084 	dma_fence_wait(gang->process->vm->last_update, false);
1085 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1086 	amdgpu_mes_unlock(&adev->mes);
1087 
1088 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1089 	if (r)
1090 		goto clean_up_ring;
1091 
1092 	ring->hw_queue_id = queue_id;
1093 	ring->doorbell_index = qprops.doorbell_off;
1094 
1095 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1096 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1097 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1098 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1099 			queue_id);
1100 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1101 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1102 			queue_id);
1103 	else
1104 		BUG();
1105 
1106 	*out = ring;
1107 	return 0;
1108 
1109 clean_up_ring:
1110 	amdgpu_ring_fini(ring);
1111 clean_up_memory:
1112 	kfree(ring);
1113 	amdgpu_mes_unlock(&adev->mes);
1114 	return r;
1115 }
1116 
1117 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1118 			    struct amdgpu_ring *ring)
1119 {
1120 	if (!ring)
1121 		return;
1122 
1123 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1124 	amdgpu_ring_fini(ring);
1125 	kfree(ring);
1126 }
1127 
1128 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1129 						   enum amdgpu_mes_priority_level prio)
1130 {
1131 	return adev->mes.aggregated_doorbells[prio];
1132 }
1133 
1134 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1135 				   struct amdgpu_mes_ctx_data *ctx_data)
1136 {
1137 	int r;
1138 
1139 	r = amdgpu_bo_create_kernel(adev,
1140 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1141 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1142 			    &ctx_data->meta_data_obj,
1143 			    &ctx_data->meta_data_mc_addr,
1144 			    &ctx_data->meta_data_ptr);
1145 	if (r) {
1146 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1147 		return r;
1148 	}
1149 
1150 	if (!ctx_data->meta_data_obj)
1151 		return -ENOMEM;
1152 
1153 	memset(ctx_data->meta_data_ptr, 0,
1154 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1155 
1156 	return 0;
1157 }
1158 
1159 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1160 {
1161 	if (ctx_data->meta_data_obj)
1162 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1163 				      &ctx_data->meta_data_mc_addr,
1164 				      &ctx_data->meta_data_ptr);
1165 }
1166 
1167 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1168 				 struct amdgpu_vm *vm,
1169 				 struct amdgpu_mes_ctx_data *ctx_data)
1170 {
1171 	struct amdgpu_bo_va *bo_va;
1172 	struct amdgpu_sync sync;
1173 	struct drm_exec exec;
1174 	int r;
1175 
1176 	amdgpu_sync_create(&sync);
1177 
1178 	drm_exec_init(&exec, 0);
1179 	drm_exec_until_all_locked(&exec) {
1180 		r = drm_exec_lock_obj(&exec,
1181 				      &ctx_data->meta_data_obj->tbo.base);
1182 		drm_exec_retry_on_contention(&exec);
1183 		if (unlikely(r))
1184 			goto error_fini_exec;
1185 
1186 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1187 		drm_exec_retry_on_contention(&exec);
1188 		if (unlikely(r))
1189 			goto error_fini_exec;
1190 	}
1191 
1192 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1193 	if (!bo_va) {
1194 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1195 		r = -ENOMEM;
1196 		goto error_fini_exec;
1197 	}
1198 
1199 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1200 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1201 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1202 			     AMDGPU_PTE_EXECUTABLE);
1203 
1204 	if (r) {
1205 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1206 		goto error_del_bo_va;
1207 	}
1208 
1209 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1210 	if (r) {
1211 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1212 		goto error_del_bo_va;
1213 	}
1214 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1215 
1216 	r = amdgpu_vm_update_pdes(adev, vm, false);
1217 	if (r) {
1218 		DRM_ERROR("failed to update pdes on meta data\n");
1219 		goto error_del_bo_va;
1220 	}
1221 	amdgpu_sync_fence(&sync, vm->last_update);
1222 
1223 	amdgpu_sync_wait(&sync, false);
1224 	drm_exec_fini(&exec);
1225 
1226 	amdgpu_sync_free(&sync);
1227 	ctx_data->meta_data_va = bo_va;
1228 	return 0;
1229 
1230 error_del_bo_va:
1231 	amdgpu_vm_bo_del(adev, bo_va);
1232 
1233 error_fini_exec:
1234 	drm_exec_fini(&exec);
1235 	amdgpu_sync_free(&sync);
1236 	return r;
1237 }
1238 
1239 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1240 				   struct amdgpu_mes_ctx_data *ctx_data)
1241 {
1242 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1243 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1244 	struct amdgpu_vm *vm = bo_va->base.vm;
1245 	struct dma_fence *fence;
1246 	struct drm_exec exec;
1247 	long r;
1248 
1249 	drm_exec_init(&exec, 0);
1250 	drm_exec_until_all_locked(&exec) {
1251 		r = drm_exec_lock_obj(&exec,
1252 				      &ctx_data->meta_data_obj->tbo.base);
1253 		drm_exec_retry_on_contention(&exec);
1254 		if (unlikely(r))
1255 			goto out_unlock;
1256 
1257 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1258 		drm_exec_retry_on_contention(&exec);
1259 		if (unlikely(r))
1260 			goto out_unlock;
1261 	}
1262 
1263 	amdgpu_vm_bo_del(adev, bo_va);
1264 	if (!amdgpu_vm_ready(vm))
1265 		goto out_unlock;
1266 
1267 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1268 				   &fence);
1269 	if (r)
1270 		goto out_unlock;
1271 	if (fence) {
1272 		amdgpu_bo_fence(bo, fence, true);
1273 		fence = NULL;
1274 	}
1275 
1276 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1277 	if (r || !fence)
1278 		goto out_unlock;
1279 
1280 	dma_fence_wait(fence, false);
1281 	amdgpu_bo_fence(bo, fence, true);
1282 	dma_fence_put(fence);
1283 
1284 out_unlock:
1285 	if (unlikely(r < 0))
1286 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1287 	drm_exec_fini(&exec);
1288 
1289 	return r;
1290 }
1291 
1292 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1293 					  int pasid, int *gang_id,
1294 					  int queue_type, int num_queue,
1295 					  struct amdgpu_ring **added_rings,
1296 					  struct amdgpu_mes_ctx_data *ctx_data)
1297 {
1298 	struct amdgpu_ring *ring;
1299 	struct amdgpu_mes_gang_properties gprops = {0};
1300 	int r, j;
1301 
1302 	/* create a gang for the process */
1303 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1304 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1305 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1306 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1307 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1308 
1309 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1310 	if (r) {
1311 		DRM_ERROR("failed to add gang\n");
1312 		return r;
1313 	}
1314 
1315 	/* create queues for the gang */
1316 	for (j = 0; j < num_queue; j++) {
1317 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1318 					ctx_data, &ring);
1319 		if (r) {
1320 			DRM_ERROR("failed to add ring\n");
1321 			break;
1322 		}
1323 
1324 		DRM_INFO("ring %s was added\n", ring->name);
1325 		added_rings[j] = ring;
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1332 {
1333 	struct amdgpu_ring *ring;
1334 	int i, r;
1335 
1336 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1337 		ring = added_rings[i];
1338 		if (!ring)
1339 			continue;
1340 
1341 		r = amdgpu_ring_test_helper(ring);
1342 		if (r)
1343 			return r;
1344 
1345 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1346 		if (r) {
1347 			DRM_DEV_ERROR(ring->adev->dev,
1348 				      "ring %s ib test failed (%d)\n",
1349 				      ring->name, r);
1350 			return r;
1351 		} else
1352 			DRM_INFO("ring %s ib test pass\n", ring->name);
1353 	}
1354 
1355 	return 0;
1356 }
1357 
1358 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1359 {
1360 	struct amdgpu_vm *vm = NULL;
1361 	struct amdgpu_mes_ctx_data ctx_data = {0};
1362 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1363 	int gang_ids[3] = {0};
1364 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1365 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1366 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1367 	int i, r, pasid, k = 0;
1368 
1369 	pasid = amdgpu_pasid_alloc(16);
1370 	if (pasid < 0) {
1371 		dev_warn(adev->dev, "No more PASIDs available!");
1372 		pasid = 0;
1373 	}
1374 
1375 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1376 	if (!vm) {
1377 		r = -ENOMEM;
1378 		goto error_pasid;
1379 	}
1380 
1381 	r = amdgpu_vm_init(adev, vm);
1382 	if (r) {
1383 		DRM_ERROR("failed to initialize vm\n");
1384 		goto error_pasid;
1385 	}
1386 
1387 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1388 	if (r) {
1389 		DRM_ERROR("failed to alloc ctx meta data\n");
1390 		goto error_fini;
1391 	}
1392 
1393 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1394 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1395 	if (r) {
1396 		DRM_ERROR("failed to map ctx meta data\n");
1397 		goto error_vm;
1398 	}
1399 
1400 	r = amdgpu_mes_create_process(adev, pasid, vm);
1401 	if (r) {
1402 		DRM_ERROR("failed to create MES process\n");
1403 		goto error_vm;
1404 	}
1405 
1406 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1407 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1408 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1409 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1410 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1411 			continue;
1412 
1413 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1414 							   &gang_ids[i],
1415 							   queue_types[i][0],
1416 							   queue_types[i][1],
1417 							   &added_rings[k],
1418 							   &ctx_data);
1419 		if (r)
1420 			goto error_queues;
1421 
1422 		k += queue_types[i][1];
1423 	}
1424 
1425 	/* start ring test and ib test for MES queues */
1426 	amdgpu_mes_test_queues(added_rings);
1427 
1428 error_queues:
1429 	/* remove all queues */
1430 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1431 		if (!added_rings[i])
1432 			continue;
1433 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1434 	}
1435 
1436 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1437 		if (!gang_ids[i])
1438 			continue;
1439 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1440 	}
1441 
1442 	amdgpu_mes_destroy_process(adev, pasid);
1443 
1444 error_vm:
1445 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1446 
1447 error_fini:
1448 	amdgpu_vm_fini(adev, vm);
1449 
1450 error_pasid:
1451 	if (pasid)
1452 		amdgpu_pasid_free(pasid);
1453 
1454 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1455 	kfree(vm);
1456 	return 0;
1457 }
1458 
1459 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1460 {
1461 	const struct mes_firmware_header_v1_0 *mes_hdr;
1462 	struct amdgpu_firmware_info *info;
1463 	char ucode_prefix[30];
1464 	char fw_name[40];
1465 	bool need_retry = false;
1466 	int r;
1467 
1468 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1469 				       sizeof(ucode_prefix));
1470 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1471 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1472 			 ucode_prefix,
1473 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1474 		need_retry = true;
1475 	} else {
1476 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1477 			 ucode_prefix,
1478 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1479 	}
1480 
1481 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1482 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1483 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1484 			 ucode_prefix);
1485 		DRM_INFO("try to fall back to %s\n", fw_name);
1486 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1487 					 fw_name);
1488 	}
1489 
1490 	if (r)
1491 		goto out;
1492 
1493 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1494 		adev->mes.fw[pipe]->data;
1495 	adev->mes.uc_start_addr[pipe] =
1496 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1497 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1498 	adev->mes.data_start_addr[pipe] =
1499 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1500 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1501 
1502 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1503 		int ucode, ucode_data;
1504 
1505 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1506 			ucode = AMDGPU_UCODE_ID_CP_MES;
1507 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1508 		} else {
1509 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1510 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1511 		}
1512 
1513 		info = &adev->firmware.ucode[ucode];
1514 		info->ucode_id = ucode;
1515 		info->fw = adev->mes.fw[pipe];
1516 		adev->firmware.fw_size +=
1517 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1518 			      PAGE_SIZE);
1519 
1520 		info = &adev->firmware.ucode[ucode_data];
1521 		info->ucode_id = ucode_data;
1522 		info->fw = adev->mes.fw[pipe];
1523 		adev->firmware.fw_size +=
1524 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1525 			      PAGE_SIZE);
1526 	}
1527 
1528 	return 0;
1529 out:
1530 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1531 	return r;
1532 }
1533