1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_dma_buf.h"
29 #include <linux/module.h>
30 #include <linux/dma-buf.h>
31 #include "amdgpu_xgmi.h"
32 #include <uapi/linux/kfd_ioctl.h>
33 
34 /* Total memory size in system memory and all GPU VRAM. Used to
35  * estimate worst case amount of memory to reserve for page tables
36  */
37 uint64_t amdgpu_amdkfd_total_mem_size;
38 
39 static bool kfd_initialized;
40 
41 int amdgpu_amdkfd_init(void)
42 {
43 	struct sysinfo si;
44 	int ret;
45 
46 	si_meminfo(&si);
47 	amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
48 	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
49 
50 	ret = kgd2kfd_init();
51 	amdgpu_amdkfd_gpuvm_init_mem_limits();
52 	kfd_initialized = !ret;
53 
54 	return ret;
55 }
56 
57 void amdgpu_amdkfd_fini(void)
58 {
59 	if (kfd_initialized) {
60 		kgd2kfd_exit();
61 		kfd_initialized = false;
62 	}
63 }
64 
65 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
66 {
67 	bool vf = amdgpu_sriov_vf(adev);
68 
69 	if (!kfd_initialized)
70 		return;
71 
72 	adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
73 				      adev->pdev, adev->asic_type, vf);
74 
75 	if (adev->kfd.dev)
76 		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
77 }
78 
79 /**
80  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
81  *                                setup amdkfd
82  *
83  * @adev: amdgpu_device pointer
84  * @aperture_base: output returning doorbell aperture base physical address
85  * @aperture_size: output returning doorbell aperture size in bytes
86  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
87  *
88  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
89  * takes doorbells required for its own rings and reports the setup to amdkfd.
90  * amdgpu reserved doorbells are at the start of the doorbell aperture.
91  */
92 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
93 					 phys_addr_t *aperture_base,
94 					 size_t *aperture_size,
95 					 size_t *start_offset)
96 {
97 	/*
98 	 * The first num_doorbells are used by amdgpu.
99 	 * amdkfd takes whatever's left in the aperture.
100 	 */
101 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
102 		*aperture_base = adev->doorbell.base;
103 		*aperture_size = adev->doorbell.size;
104 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
105 	} else {
106 		*aperture_base = 0;
107 		*aperture_size = 0;
108 		*start_offset = 0;
109 	}
110 }
111 
112 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
113 {
114 	int i;
115 	int last_valid_bit;
116 
117 	if (adev->kfd.dev) {
118 		struct kgd2kfd_shared_resources gpu_resources = {
119 			.compute_vmid_bitmap =
120 				((1 << AMDGPU_NUM_VMID) - 1) -
121 				((1 << adev->vm_manager.first_kfd_vmid) - 1),
122 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
123 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
124 			.gpuvm_size = min(adev->vm_manager.max_pfn
125 					  << AMDGPU_GPU_PAGE_SHIFT,
126 					  AMDGPU_GMC_HOLE_START),
127 			.drm_render_minor = adev_to_drm(adev)->render->index,
128 			.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
129 
130 		};
131 
132 		/* this is going to have a few of the MSBs set that we need to
133 		 * clear
134 		 */
135 		bitmap_complement(gpu_resources.cp_queue_bitmap,
136 				  adev->gfx.mec.queue_bitmap,
137 				  KGD_MAX_QUEUES);
138 
139 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
140 		 * nbits is not compile time constant
141 		 */
142 		last_valid_bit = 1 /* only first MEC can have compute queues */
143 				* adev->gfx.mec.num_pipe_per_mec
144 				* adev->gfx.mec.num_queue_per_pipe;
145 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
146 			clear_bit(i, gpu_resources.cp_queue_bitmap);
147 
148 		amdgpu_doorbell_get_kfd_info(adev,
149 				&gpu_resources.doorbell_physical_address,
150 				&gpu_resources.doorbell_aperture_size,
151 				&gpu_resources.doorbell_start_offset);
152 
153 		/* Since SOC15, BIF starts to statically use the
154 		 * lower 12 bits of doorbell addresses for routing
155 		 * based on settings in registers like
156 		 * SDMA0_DOORBELL_RANGE etc..
157 		 * In order to route a doorbell to CP engine, the lower
158 		 * 12 bits of its address has to be outside the range
159 		 * set for SDMA, VCN, and IH blocks.
160 		 */
161 		if (adev->asic_type >= CHIP_VEGA10) {
162 			gpu_resources.non_cp_doorbells_start =
163 					adev->doorbell_index.first_non_cp;
164 			gpu_resources.non_cp_doorbells_end =
165 					adev->doorbell_index.last_non_cp;
166 		}
167 
168 		adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
169 						adev_to_drm(adev), &gpu_resources);
170 	}
171 }
172 
173 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
174 {
175 	if (adev->kfd.dev) {
176 		kgd2kfd_device_exit(adev->kfd.dev);
177 		adev->kfd.dev = NULL;
178 	}
179 }
180 
181 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
182 		const void *ih_ring_entry)
183 {
184 	if (adev->kfd.dev)
185 		kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
186 }
187 
188 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
189 {
190 	if (adev->kfd.dev)
191 		kgd2kfd_suspend(adev->kfd.dev, run_pm);
192 }
193 
194 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
195 {
196 	int r = 0;
197 
198 	if (adev->kfd.dev)
199 		r = kgd2kfd_resume(adev->kfd.dev, run_pm);
200 
201 	return r;
202 }
203 
204 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
205 {
206 	int r = 0;
207 
208 	if (adev->kfd.dev)
209 		r = kgd2kfd_pre_reset(adev->kfd.dev);
210 
211 	return r;
212 }
213 
214 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
215 {
216 	int r = 0;
217 
218 	if (adev->kfd.dev)
219 		r = kgd2kfd_post_reset(adev->kfd.dev);
220 
221 	return r;
222 }
223 
224 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
225 {
226 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
227 
228 	if (amdgpu_device_should_recover_gpu(adev))
229 		amdgpu_device_gpu_recover(adev, NULL);
230 }
231 
232 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
233 				void **mem_obj, uint64_t *gpu_addr,
234 				void **cpu_ptr, bool cp_mqd_gfx9)
235 {
236 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
237 	struct amdgpu_bo *bo = NULL;
238 	struct amdgpu_bo_param bp;
239 	int r;
240 	void *cpu_ptr_tmp = NULL;
241 
242 	memset(&bp, 0, sizeof(bp));
243 	bp.size = size;
244 	bp.byte_align = PAGE_SIZE;
245 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
246 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
247 	bp.type = ttm_bo_type_kernel;
248 	bp.resv = NULL;
249 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
250 
251 	if (cp_mqd_gfx9)
252 		bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
253 
254 	r = amdgpu_bo_create(adev, &bp, &bo);
255 	if (r) {
256 		dev_err(adev->dev,
257 			"failed to allocate BO for amdkfd (%d)\n", r);
258 		return r;
259 	}
260 
261 	/* map the buffer */
262 	r = amdgpu_bo_reserve(bo, true);
263 	if (r) {
264 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
265 		goto allocate_mem_reserve_bo_failed;
266 	}
267 
268 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
269 	if (r) {
270 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
271 		goto allocate_mem_pin_bo_failed;
272 	}
273 
274 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
275 	if (r) {
276 		dev_err(adev->dev, "%p bind failed\n", bo);
277 		goto allocate_mem_kmap_bo_failed;
278 	}
279 
280 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
281 	if (r) {
282 		dev_err(adev->dev,
283 			"(%d) failed to map bo to kernel for amdkfd\n", r);
284 		goto allocate_mem_kmap_bo_failed;
285 	}
286 
287 	*mem_obj = bo;
288 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
289 	*cpu_ptr = cpu_ptr_tmp;
290 
291 	amdgpu_bo_unreserve(bo);
292 
293 	return 0;
294 
295 allocate_mem_kmap_bo_failed:
296 	amdgpu_bo_unpin(bo);
297 allocate_mem_pin_bo_failed:
298 	amdgpu_bo_unreserve(bo);
299 allocate_mem_reserve_bo_failed:
300 	amdgpu_bo_unref(&bo);
301 
302 	return r;
303 }
304 
305 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
306 {
307 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
308 
309 	amdgpu_bo_reserve(bo, true);
310 	amdgpu_bo_kunmap(bo);
311 	amdgpu_bo_unpin(bo);
312 	amdgpu_bo_unreserve(bo);
313 	amdgpu_bo_unref(&(bo));
314 }
315 
316 int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
317 				void **mem_obj)
318 {
319 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
320 	struct amdgpu_bo *bo = NULL;
321 	struct amdgpu_bo_user *ubo;
322 	struct amdgpu_bo_param bp;
323 	int r;
324 
325 	memset(&bp, 0, sizeof(bp));
326 	bp.size = size;
327 	bp.byte_align = 1;
328 	bp.domain = AMDGPU_GEM_DOMAIN_GWS;
329 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
330 	bp.type = ttm_bo_type_device;
331 	bp.resv = NULL;
332 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
333 
334 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
335 	if (r) {
336 		dev_err(adev->dev,
337 			"failed to allocate gws BO for amdkfd (%d)\n", r);
338 		return r;
339 	}
340 
341 	bo = &ubo->bo;
342 	*mem_obj = bo;
343 	return 0;
344 }
345 
346 void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
347 {
348 	struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
349 
350 	amdgpu_bo_unref(&bo);
351 }
352 
353 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
354 				      enum kgd_engine_type type)
355 {
356 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
357 
358 	switch (type) {
359 	case KGD_ENGINE_PFP:
360 		return adev->gfx.pfp_fw_version;
361 
362 	case KGD_ENGINE_ME:
363 		return adev->gfx.me_fw_version;
364 
365 	case KGD_ENGINE_CE:
366 		return adev->gfx.ce_fw_version;
367 
368 	case KGD_ENGINE_MEC1:
369 		return adev->gfx.mec_fw_version;
370 
371 	case KGD_ENGINE_MEC2:
372 		return adev->gfx.mec2_fw_version;
373 
374 	case KGD_ENGINE_RLC:
375 		return adev->gfx.rlc_fw_version;
376 
377 	case KGD_ENGINE_SDMA1:
378 		return adev->sdma.instance[0].fw_version;
379 
380 	case KGD_ENGINE_SDMA2:
381 		return adev->sdma.instance[1].fw_version;
382 
383 	default:
384 		return 0;
385 	}
386 
387 	return 0;
388 }
389 
390 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
391 				      struct kfd_local_mem_info *mem_info)
392 {
393 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
394 
395 	memset(mem_info, 0, sizeof(*mem_info));
396 
397 	mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
398 	mem_info->local_mem_size_private = adev->gmc.real_vram_size -
399 						adev->gmc.visible_vram_size;
400 
401 	mem_info->vram_width = adev->gmc.vram_width;
402 
403 	pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
404 			&adev->gmc.aper_base,
405 			mem_info->local_mem_size_public,
406 			mem_info->local_mem_size_private);
407 
408 	if (amdgpu_sriov_vf(adev))
409 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
410 	else if (adev->pm.dpm_enabled) {
411 		if (amdgpu_emu_mode == 1)
412 			mem_info->mem_clk_max = 0;
413 		else
414 			mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
415 	} else
416 		mem_info->mem_clk_max = 100;
417 }
418 
419 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
420 {
421 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
422 
423 	if (adev->gfx.funcs->get_gpu_clock_counter)
424 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
425 	return 0;
426 }
427 
428 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
429 {
430 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
431 
432 	/* the sclk is in quantas of 10kHz */
433 	if (amdgpu_sriov_vf(adev))
434 		return adev->clock.default_sclk / 100;
435 	else if (adev->pm.dpm_enabled)
436 		return amdgpu_dpm_get_sclk(adev, false) / 100;
437 	else
438 		return 100;
439 }
440 
441 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
442 {
443 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
444 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
445 
446 	memset(cu_info, 0, sizeof(*cu_info));
447 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
448 		return;
449 
450 	cu_info->cu_active_number = acu_info.number;
451 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
452 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
453 	       sizeof(acu_info.bitmap));
454 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
455 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
456 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
457 	cu_info->simd_per_cu = acu_info.simd_per_cu;
458 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
459 	cu_info->wave_front_size = acu_info.wave_front_size;
460 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
461 	cu_info->lds_size = acu_info.lds_size;
462 }
463 
464 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
465 				  struct kgd_dev **dma_buf_kgd,
466 				  uint64_t *bo_size, void *metadata_buffer,
467 				  size_t buffer_size, uint32_t *metadata_size,
468 				  uint32_t *flags)
469 {
470 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
471 	struct dma_buf *dma_buf;
472 	struct drm_gem_object *obj;
473 	struct amdgpu_bo *bo;
474 	uint64_t metadata_flags;
475 	int r = -EINVAL;
476 
477 	dma_buf = dma_buf_get(dma_buf_fd);
478 	if (IS_ERR(dma_buf))
479 		return PTR_ERR(dma_buf);
480 
481 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
482 		/* Can't handle non-graphics buffers */
483 		goto out_put;
484 
485 	obj = dma_buf->priv;
486 	if (obj->dev->driver != adev_to_drm(adev)->driver)
487 		/* Can't handle buffers from different drivers */
488 		goto out_put;
489 
490 	adev = drm_to_adev(obj->dev);
491 	bo = gem_to_amdgpu_bo(obj);
492 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
493 				    AMDGPU_GEM_DOMAIN_GTT)))
494 		/* Only VRAM and GTT BOs are supported */
495 		goto out_put;
496 
497 	r = 0;
498 	if (dma_buf_kgd)
499 		*dma_buf_kgd = (struct kgd_dev *)adev;
500 	if (bo_size)
501 		*bo_size = amdgpu_bo_size(bo);
502 	if (metadata_buffer)
503 		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
504 					   metadata_size, &metadata_flags);
505 	if (flags) {
506 		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
507 				KFD_IOC_ALLOC_MEM_FLAGS_VRAM
508 				: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
509 
510 		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
511 			*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
512 	}
513 
514 out_put:
515 	dma_buf_put(dma_buf);
516 	return r;
517 }
518 
519 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
520 {
521 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
522 	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
523 
524 	return amdgpu_vram_mgr_usage(vram_man);
525 }
526 
527 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
528 {
529 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
530 
531 	return adev->gmc.xgmi.hive_id;
532 }
533 
534 uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
535 {
536 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
537 
538 	return adev->unique_id;
539 }
540 
541 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
542 {
543 	struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
544 	struct amdgpu_device *adev = (struct amdgpu_device *)dst;
545 	int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
546 
547 	if (ret < 0) {
548 		DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
549 			adev->gmc.xgmi.physical_node_id,
550 			peer_adev->gmc.xgmi.physical_node_id, ret);
551 		ret = 0;
552 	}
553 	return  (uint8_t)ret;
554 }
555 
556 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
557 {
558 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
559 
560 	return adev->rmmio_remap.bus_addr;
561 }
562 
563 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
564 {
565 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
566 
567 	return adev->gds.gws_size;
568 }
569 
570 uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
571 {
572 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
573 
574 	return adev->rev_id;
575 }
576 
577 int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
578 {
579 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
580 
581 	return adev->gmc.noretry;
582 }
583 
584 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
585 				uint32_t vmid, uint64_t gpu_addr,
586 				uint32_t *ib_cmd, uint32_t ib_len)
587 {
588 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
589 	struct amdgpu_job *job;
590 	struct amdgpu_ib *ib;
591 	struct amdgpu_ring *ring;
592 	struct dma_fence *f = NULL;
593 	int ret;
594 
595 	switch (engine) {
596 	case KGD_ENGINE_MEC1:
597 		ring = &adev->gfx.compute_ring[0];
598 		break;
599 	case KGD_ENGINE_SDMA1:
600 		ring = &adev->sdma.instance[0].ring;
601 		break;
602 	case KGD_ENGINE_SDMA2:
603 		ring = &adev->sdma.instance[1].ring;
604 		break;
605 	default:
606 		pr_err("Invalid engine in IB submission: %d\n", engine);
607 		ret = -EINVAL;
608 		goto err;
609 	}
610 
611 	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
612 	if (ret)
613 		goto err;
614 
615 	ib = &job->ibs[0];
616 	memset(ib, 0, sizeof(struct amdgpu_ib));
617 
618 	ib->gpu_addr = gpu_addr;
619 	ib->ptr = ib_cmd;
620 	ib->length_dw = ib_len;
621 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
622 	job->vmid = vmid;
623 
624 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
625 
626 	if (ret) {
627 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
628 		goto err_ib_sched;
629 	}
630 
631 	ret = dma_fence_wait(f, false);
632 
633 err_ib_sched:
634 	dma_fence_put(f);
635 	amdgpu_job_free(job);
636 err:
637 	return ret;
638 }
639 
640 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
641 {
642 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
643 
644 	amdgpu_dpm_switch_power_profile(adev,
645 					PP_SMC_POWER_PROFILE_COMPUTE,
646 					!idle);
647 }
648 
649 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
650 {
651 	if (adev->kfd.dev)
652 		return vmid >= adev->vm_manager.first_kfd_vmid;
653 
654 	return false;
655 }
656 
657 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
658 {
659 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
660 
661 	if (adev->family == AMDGPU_FAMILY_AI) {
662 		int i;
663 
664 		for (i = 0; i < adev->num_vmhubs; i++)
665 			amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
666 	} else {
667 		amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
668 	}
669 
670 	return 0;
671 }
672 
673 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
674 {
675 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
676 	const uint32_t flush_type = 0;
677 	bool all_hub = false;
678 
679 	if (adev->family == AMDGPU_FAMILY_AI)
680 		all_hub = true;
681 
682 	return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
683 }
684 
685 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
686 {
687 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
688 
689 	return adev->have_atomics_support;
690 }
691