1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2014 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "amdgpu_amdkfd.h"
25 #include "amd_pcie.h"
26 #include "amd_shared.h"
27 
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_dma_buf.h"
31 #include <linux/module.h>
32 #include <linux/dma-buf.h>
33 #include "amdgpu_xgmi.h"
34 #include <uapi/linux/kfd_ioctl.h>
35 #include "amdgpu_ras.h"
36 #include "amdgpu_umc.h"
37 #include "amdgpu_reset.h"
38 
39 /* Total memory size in system memory and all GPU VRAM. Used to
40  * estimate worst case amount of memory to reserve for page tables
41  */
42 uint64_t amdgpu_amdkfd_total_mem_size;
43 
44 static bool kfd_initialized;
45 
amdgpu_amdkfd_init(void)46 int amdgpu_amdkfd_init(void)
47 {
48 	struct sysinfo si;
49 	int ret;
50 
51 	si_meminfo(&si);
52 	amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
53 	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
54 
55 	ret = kgd2kfd_init();
56 	kfd_initialized = !ret;
57 
58 	return ret;
59 }
60 
amdgpu_amdkfd_fini(void)61 void amdgpu_amdkfd_fini(void)
62 {
63 	if (kfd_initialized) {
64 		kgd2kfd_exit();
65 		kfd_initialized = false;
66 	}
67 }
68 
amdgpu_amdkfd_device_probe(struct amdgpu_device * adev)69 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
70 {
71 	bool vf = amdgpu_sriov_vf(adev);
72 
73 	if (!kfd_initialized)
74 		return;
75 
76 	adev->kfd.dev = kgd2kfd_probe(adev, vf);
77 }
78 
79 /**
80  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
81  *                                setup amdkfd
82  *
83  * @adev: amdgpu_device pointer
84  * @aperture_base: output returning doorbell aperture base physical address
85  * @aperture_size: output returning doorbell aperture size in bytes
86  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
87  *
88  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
89  * takes doorbells required for its own rings and reports the setup to amdkfd.
90  * amdgpu reserved doorbells are at the start of the doorbell aperture.
91  */
amdgpu_doorbell_get_kfd_info(struct amdgpu_device * adev,phys_addr_t * aperture_base,size_t * aperture_size,size_t * start_offset)92 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
93 					 phys_addr_t *aperture_base,
94 					 size_t *aperture_size,
95 					 size_t *start_offset)
96 {
97 	/*
98 	 * The first num_kernel_doorbells are used by amdgpu.
99 	 * amdkfd takes whatever's left in the aperture.
100 	 */
101 	if (adev->enable_mes) {
102 		/*
103 		 * With MES enabled, we only need to initialize
104 		 * the base address. The size and offset are
105 		 * not initialized as AMDGPU manages the whole
106 		 * doorbell space.
107 		 */
108 		*aperture_base = adev->doorbell.base;
109 		*aperture_size = 0;
110 		*start_offset = 0;
111 	} else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells *
112 						sizeof(u32)) {
113 		*aperture_base = adev->doorbell.base;
114 		*aperture_size = adev->doorbell.size;
115 		*start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32);
116 	} else {
117 		*aperture_base = 0;
118 		*aperture_size = 0;
119 		*start_offset = 0;
120 	}
121 }
122 
123 
amdgpu_amdkfd_reset_work(struct work_struct * work)124 static void amdgpu_amdkfd_reset_work(struct work_struct *work)
125 {
126 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
127 						  kfd.reset_work);
128 
129 	struct amdgpu_reset_context reset_context;
130 
131 	memset(&reset_context, 0, sizeof(reset_context));
132 
133 	reset_context.method = AMD_RESET_METHOD_NONE;
134 	reset_context.reset_req_dev = adev;
135 	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
136 
137 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
138 }
139 
amdgpu_amdkfd_device_init(struct amdgpu_device * adev)140 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
141 {
142 	int i;
143 	int last_valid_bit;
144 
145 	amdgpu_amdkfd_gpuvm_init_mem_limits();
146 
147 	if (adev->kfd.dev) {
148 		struct kgd2kfd_shared_resources gpu_resources = {
149 			.compute_vmid_bitmap =
150 				((1 << AMDGPU_NUM_VMID) - 1) -
151 				((1 << adev->vm_manager.first_kfd_vmid) - 1),
152 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
153 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
154 			.gpuvm_size = min(adev->vm_manager.max_pfn
155 					  << AMDGPU_GPU_PAGE_SHIFT,
156 					  AMDGPU_GMC_HOLE_START),
157 			.drm_render_minor = adev_to_drm(adev)->render->index,
158 			.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
159 			.enable_mes = adev->enable_mes,
160 		};
161 
162 		/* this is going to have a few of the MSBs set that we need to
163 		 * clear
164 		 */
165 		bitmap_complement(gpu_resources.cp_queue_bitmap,
166 				  adev->gfx.mec_bitmap[0].queue_bitmap,
167 				  KGD_MAX_QUEUES);
168 
169 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
170 		 * nbits is not compile time constant
171 		 */
172 		last_valid_bit = 1 /* only first MEC can have compute queues */
173 				* adev->gfx.mec.num_pipe_per_mec
174 				* adev->gfx.mec.num_queue_per_pipe;
175 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
176 			clear_bit(i, gpu_resources.cp_queue_bitmap);
177 
178 		amdgpu_doorbell_get_kfd_info(adev,
179 				&gpu_resources.doorbell_physical_address,
180 				&gpu_resources.doorbell_aperture_size,
181 				&gpu_resources.doorbell_start_offset);
182 
183 		/* Since SOC15, BIF starts to statically use the
184 		 * lower 12 bits of doorbell addresses for routing
185 		 * based on settings in registers like
186 		 * SDMA0_DOORBELL_RANGE etc..
187 		 * In order to route a doorbell to CP engine, the lower
188 		 * 12 bits of its address has to be outside the range
189 		 * set for SDMA, VCN, and IH blocks.
190 		 */
191 		if (adev->asic_type >= CHIP_VEGA10) {
192 			gpu_resources.non_cp_doorbells_start =
193 					adev->doorbell_index.first_non_cp;
194 			gpu_resources.non_cp_doorbells_end =
195 					adev->doorbell_index.last_non_cp;
196 		}
197 
198 		adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
199 							&gpu_resources);
200 
201 		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
202 
203 		INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
204 	}
205 }
206 
amdgpu_amdkfd_device_fini_sw(struct amdgpu_device * adev)207 void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
208 {
209 	if (adev->kfd.dev) {
210 		kgd2kfd_device_exit(adev->kfd.dev);
211 		adev->kfd.dev = NULL;
212 		amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
213 	}
214 }
215 
amdgpu_amdkfd_interrupt(struct amdgpu_device * adev,const void * ih_ring_entry)216 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
217 		const void *ih_ring_entry)
218 {
219 	if (adev->kfd.dev)
220 		kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
221 }
222 
amdgpu_amdkfd_suspend(struct amdgpu_device * adev,bool run_pm)223 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
224 {
225 	if (adev->kfd.dev)
226 		kgd2kfd_suspend(adev->kfd.dev, run_pm);
227 }
228 
amdgpu_amdkfd_resume(struct amdgpu_device * adev,bool run_pm)229 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
230 {
231 	int r = 0;
232 
233 	if (adev->kfd.dev)
234 		r = kgd2kfd_resume(adev->kfd.dev, run_pm);
235 
236 	return r;
237 }
238 
amdgpu_amdkfd_pre_reset(struct amdgpu_device * adev)239 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
240 {
241 	int r = 0;
242 
243 	if (adev->kfd.dev)
244 		r = kgd2kfd_pre_reset(adev->kfd.dev);
245 
246 	return r;
247 }
248 
amdgpu_amdkfd_post_reset(struct amdgpu_device * adev)249 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
250 {
251 	int r = 0;
252 
253 	if (adev->kfd.dev)
254 		r = kgd2kfd_post_reset(adev->kfd.dev);
255 
256 	return r;
257 }
258 
amdgpu_amdkfd_gpu_reset(struct amdgpu_device * adev)259 void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
260 {
261 	if (amdgpu_device_should_recover_gpu(adev))
262 		amdgpu_reset_domain_schedule(adev->reset_domain,
263 					     &adev->kfd.reset_work);
264 }
265 
amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device * adev,size_t size,void ** mem_obj,uint64_t * gpu_addr,void ** cpu_ptr,bool cp_mqd_gfx9)266 int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
267 				void **mem_obj, uint64_t *gpu_addr,
268 				void **cpu_ptr, bool cp_mqd_gfx9)
269 {
270 	struct amdgpu_bo *bo = NULL;
271 	struct amdgpu_bo_param bp;
272 	int r;
273 	void *cpu_ptr_tmp = NULL;
274 
275 	memset(&bp, 0, sizeof(bp));
276 	bp.size = size;
277 	bp.byte_align = PAGE_SIZE;
278 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
279 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
280 	bp.type = ttm_bo_type_kernel;
281 	bp.resv = NULL;
282 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
283 
284 	if (cp_mqd_gfx9)
285 		bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
286 
287 	r = amdgpu_bo_create(adev, &bp, &bo);
288 	if (r) {
289 		dev_err(adev->dev,
290 			"failed to allocate BO for amdkfd (%d)\n", r);
291 		return r;
292 	}
293 
294 	/* map the buffer */
295 	r = amdgpu_bo_reserve(bo, true);
296 	if (r) {
297 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
298 		goto allocate_mem_reserve_bo_failed;
299 	}
300 
301 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
302 	if (r) {
303 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
304 		goto allocate_mem_pin_bo_failed;
305 	}
306 
307 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
308 	if (r) {
309 		dev_err(adev->dev, "%p bind failed\n", bo);
310 		goto allocate_mem_kmap_bo_failed;
311 	}
312 
313 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
314 	if (r) {
315 		dev_err(adev->dev,
316 			"(%d) failed to map bo to kernel for amdkfd\n", r);
317 		goto allocate_mem_kmap_bo_failed;
318 	}
319 
320 	*mem_obj = bo;
321 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
322 	*cpu_ptr = cpu_ptr_tmp;
323 
324 	amdgpu_bo_unreserve(bo);
325 
326 	return 0;
327 
328 allocate_mem_kmap_bo_failed:
329 	amdgpu_bo_unpin(bo);
330 allocate_mem_pin_bo_failed:
331 	amdgpu_bo_unreserve(bo);
332 allocate_mem_reserve_bo_failed:
333 	amdgpu_bo_unref(&bo);
334 
335 	return r;
336 }
337 
amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device * adev,void ** mem_obj)338 void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
339 {
340 	struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
341 
342 	amdgpu_bo_reserve(*bo, true);
343 	amdgpu_bo_kunmap(*bo);
344 	amdgpu_bo_unpin(*bo);
345 	amdgpu_bo_unreserve(*bo);
346 	amdgpu_bo_unref(bo);
347 }
348 
amdgpu_amdkfd_alloc_gws(struct amdgpu_device * adev,size_t size,void ** mem_obj)349 int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
350 				void **mem_obj)
351 {
352 	struct amdgpu_bo *bo = NULL;
353 	struct amdgpu_bo_user *ubo;
354 	struct amdgpu_bo_param bp;
355 	int r;
356 
357 	memset(&bp, 0, sizeof(bp));
358 	bp.size = size;
359 	bp.byte_align = 1;
360 	bp.domain = AMDGPU_GEM_DOMAIN_GWS;
361 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
362 	bp.type = ttm_bo_type_device;
363 	bp.resv = NULL;
364 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
365 
366 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
367 	if (r) {
368 		dev_err(adev->dev,
369 			"failed to allocate gws BO for amdkfd (%d)\n", r);
370 		return r;
371 	}
372 
373 	bo = &ubo->bo;
374 	*mem_obj = bo;
375 	return 0;
376 }
377 
amdgpu_amdkfd_free_gws(struct amdgpu_device * adev,void * mem_obj)378 void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
379 {
380 	struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
381 
382 	amdgpu_bo_unref(&bo);
383 }
384 
amdgpu_amdkfd_get_fw_version(struct amdgpu_device * adev,enum kgd_engine_type type)385 uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
386 				      enum kgd_engine_type type)
387 {
388 	switch (type) {
389 	case KGD_ENGINE_PFP:
390 		return adev->gfx.pfp_fw_version;
391 
392 	case KGD_ENGINE_ME:
393 		return adev->gfx.me_fw_version;
394 
395 	case KGD_ENGINE_CE:
396 		return adev->gfx.ce_fw_version;
397 
398 	case KGD_ENGINE_MEC1:
399 		return adev->gfx.mec_fw_version;
400 
401 	case KGD_ENGINE_MEC2:
402 		return adev->gfx.mec2_fw_version;
403 
404 	case KGD_ENGINE_RLC:
405 		return adev->gfx.rlc_fw_version;
406 
407 	case KGD_ENGINE_SDMA1:
408 		return adev->sdma.instance[0].fw_version;
409 
410 	case KGD_ENGINE_SDMA2:
411 		return adev->sdma.instance[1].fw_version;
412 
413 	default:
414 		return 0;
415 	}
416 
417 	return 0;
418 }
419 
amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device * adev,struct kfd_local_mem_info * mem_info,struct amdgpu_xcp * xcp)420 void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
421 				      struct kfd_local_mem_info *mem_info,
422 				      struct amdgpu_xcp *xcp)
423 {
424 	memset(mem_info, 0, sizeof(*mem_info));
425 
426 	if (xcp) {
427 		if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size)
428 			mem_info->local_mem_size_public =
429 					KFD_XCP_MEMORY_SIZE(adev, xcp->id);
430 		else
431 			mem_info->local_mem_size_private =
432 					KFD_XCP_MEMORY_SIZE(adev, xcp->id);
433 	} else {
434 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
435 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
436 						adev->gmc.visible_vram_size;
437 	}
438 	mem_info->vram_width = adev->gmc.vram_width;
439 
440 	pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
441 			&adev->gmc.aper_base,
442 			mem_info->local_mem_size_public,
443 			mem_info->local_mem_size_private);
444 
445 	if (adev->pm.dpm_enabled) {
446 		if (amdgpu_emu_mode == 1)
447 			mem_info->mem_clk_max = 0;
448 		else
449 			mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
450 	} else
451 		mem_info->mem_clk_max = 100;
452 }
453 
amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device * adev)454 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
455 {
456 	if (adev->gfx.funcs->get_gpu_clock_counter)
457 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
458 	return 0;
459 }
460 
amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device * adev)461 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
462 {
463 	/* the sclk is in quantas of 10kHz */
464 	if (adev->pm.dpm_enabled)
465 		return amdgpu_dpm_get_sclk(adev, false) / 100;
466 	else
467 		return 100;
468 }
469 
amdgpu_amdkfd_get_cu_info(struct amdgpu_device * adev,struct kfd_cu_info * cu_info)470 void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info)
471 {
472 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
473 
474 	memset(cu_info, 0, sizeof(*cu_info));
475 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
476 		return;
477 
478 	cu_info->cu_active_number = acu_info.number;
479 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
480 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
481 	       sizeof(cu_info->cu_bitmap));
482 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
483 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
484 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
485 	cu_info->simd_per_cu = acu_info.simd_per_cu;
486 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
487 	cu_info->wave_front_size = acu_info.wave_front_size;
488 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
489 	cu_info->lds_size = acu_info.lds_size;
490 }
491 
amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device * adev,int dma_buf_fd,struct amdgpu_device ** dmabuf_adev,uint64_t * bo_size,void * metadata_buffer,size_t buffer_size,uint32_t * metadata_size,uint32_t * flags,int8_t * xcp_id)492 int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
493 				  struct amdgpu_device **dmabuf_adev,
494 				  uint64_t *bo_size, void *metadata_buffer,
495 				  size_t buffer_size, uint32_t *metadata_size,
496 				  uint32_t *flags, int8_t *xcp_id)
497 {
498 	struct dma_buf *dma_buf;
499 	struct drm_gem_object *obj;
500 	struct amdgpu_bo *bo;
501 	uint64_t metadata_flags;
502 	int r = -EINVAL;
503 
504 	dma_buf = dma_buf_get(dma_buf_fd);
505 	if (IS_ERR(dma_buf))
506 		return PTR_ERR(dma_buf);
507 
508 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
509 		/* Can't handle non-graphics buffers */
510 		goto out_put;
511 
512 	obj = dma_buf->priv;
513 	if (obj->dev->driver != adev_to_drm(adev)->driver)
514 		/* Can't handle buffers from different drivers */
515 		goto out_put;
516 
517 	adev = drm_to_adev(obj->dev);
518 	bo = gem_to_amdgpu_bo(obj);
519 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
520 				    AMDGPU_GEM_DOMAIN_GTT)))
521 		/* Only VRAM and GTT BOs are supported */
522 		goto out_put;
523 
524 	r = 0;
525 	if (dmabuf_adev)
526 		*dmabuf_adev = adev;
527 	if (bo_size)
528 		*bo_size = amdgpu_bo_size(bo);
529 	if (metadata_buffer)
530 		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
531 					   metadata_size, &metadata_flags);
532 	if (flags) {
533 		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
534 				KFD_IOC_ALLOC_MEM_FLAGS_VRAM
535 				: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
536 
537 		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
538 			*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
539 	}
540 	if (xcp_id)
541 		*xcp_id = bo->xcp_id;
542 
543 out_put:
544 	dma_buf_put(dma_buf);
545 	return r;
546 }
547 
amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device * dst,struct amdgpu_device * src)548 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
549 					  struct amdgpu_device *src)
550 {
551 	struct amdgpu_device *peer_adev = src;
552 	struct amdgpu_device *adev = dst;
553 	int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
554 
555 	if (ret < 0) {
556 		DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
557 			adev->gmc.xgmi.physical_node_id,
558 			peer_adev->gmc.xgmi.physical_node_id, ret);
559 		ret = 0;
560 	}
561 	return  (uint8_t)ret;
562 }
563 
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device * dst,struct amdgpu_device * src,bool is_min)564 int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
565 					    struct amdgpu_device *src,
566 					    bool is_min)
567 {
568 	struct amdgpu_device *adev = dst, *peer_adev;
569 	int num_links;
570 
571 	if (adev->asic_type != CHIP_ALDEBARAN)
572 		return 0;
573 
574 	if (src)
575 		peer_adev = src;
576 
577 	/* num links returns 0 for indirect peers since indirect route is unknown. */
578 	num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
579 	if (num_links < 0) {
580 		DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
581 			adev->gmc.xgmi.physical_node_id,
582 			peer_adev->gmc.xgmi.physical_node_id, num_links);
583 		num_links = 0;
584 	}
585 
586 	/* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
587 	return (num_links * 16 * 25000)/BITS_PER_BYTE;
588 }
589 
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device * adev,bool is_min)590 int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
591 {
592 	int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
593 							fls(adev->pm.pcie_mlw_mask)) - 1;
594 	int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
595 						CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
596 					fls(adev->pm.pcie_gen_mask &
597 						CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
598 	uint32_t num_lanes_mask = 1 << num_lanes_shift;
599 	uint32_t gen_speed_mask = 1 << gen_speed_shift;
600 	int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
601 
602 	switch (num_lanes_mask) {
603 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
604 		num_lanes_factor = 1;
605 		break;
606 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
607 		num_lanes_factor = 2;
608 		break;
609 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
610 		num_lanes_factor = 4;
611 		break;
612 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
613 		num_lanes_factor = 8;
614 		break;
615 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
616 		num_lanes_factor = 12;
617 		break;
618 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
619 		num_lanes_factor = 16;
620 		break;
621 	case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
622 		num_lanes_factor = 32;
623 		break;
624 	}
625 
626 	switch (gen_speed_mask) {
627 	case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
628 		gen_speed_mbits_factor = 2500;
629 		break;
630 	case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
631 		gen_speed_mbits_factor = 5000;
632 		break;
633 	case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
634 		gen_speed_mbits_factor = 8000;
635 		break;
636 	case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
637 		gen_speed_mbits_factor = 16000;
638 		break;
639 	case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
640 		gen_speed_mbits_factor = 32000;
641 		break;
642 	}
643 
644 	return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
645 }
646 
amdgpu_amdkfd_submit_ib(struct amdgpu_device * adev,enum kgd_engine_type engine,uint32_t vmid,uint64_t gpu_addr,uint32_t * ib_cmd,uint32_t ib_len)647 int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
648 				enum kgd_engine_type engine,
649 				uint32_t vmid, uint64_t gpu_addr,
650 				uint32_t *ib_cmd, uint32_t ib_len)
651 {
652 	struct amdgpu_job *job;
653 	struct amdgpu_ib *ib;
654 	struct amdgpu_ring *ring;
655 	struct dma_fence *f = NULL;
656 	int ret;
657 
658 	switch (engine) {
659 	case KGD_ENGINE_MEC1:
660 		ring = &adev->gfx.compute_ring[0];
661 		break;
662 	case KGD_ENGINE_SDMA1:
663 		ring = &adev->sdma.instance[0].ring;
664 		break;
665 	case KGD_ENGINE_SDMA2:
666 		ring = &adev->sdma.instance[1].ring;
667 		break;
668 	default:
669 		pr_err("Invalid engine in IB submission: %d\n", engine);
670 		ret = -EINVAL;
671 		goto err;
672 	}
673 
674 	ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
675 	if (ret)
676 		goto err;
677 
678 	ib = &job->ibs[0];
679 	memset(ib, 0, sizeof(struct amdgpu_ib));
680 
681 	ib->gpu_addr = gpu_addr;
682 	ib->ptr = ib_cmd;
683 	ib->length_dw = ib_len;
684 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
685 	job->vmid = vmid;
686 	job->num_ibs = 1;
687 
688 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
689 
690 	if (ret) {
691 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
692 		goto err_ib_sched;
693 	}
694 
695 	/* Drop the initial kref_init count (see drm_sched_main as example) */
696 	dma_fence_put(f);
697 	ret = dma_fence_wait(f, false);
698 
699 err_ib_sched:
700 	amdgpu_job_free(job);
701 err:
702 	return ret;
703 }
704 
amdgpu_amdkfd_set_compute_idle(struct amdgpu_device * adev,bool idle)705 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
706 {
707 	/* Temporary workaround to fix issues observed in some
708 	 * compute applications when GFXOFF is enabled on GFX11.
709 	 */
710 	if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) {
711 		pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
712 		amdgpu_gfx_off_ctrl(adev, idle);
713 	}
714 	amdgpu_dpm_switch_power_profile(adev,
715 					PP_SMC_POWER_PROFILE_COMPUTE,
716 					!idle);
717 }
718 
amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device * adev,u32 vmid)719 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
720 {
721 	if (adev->kfd.dev)
722 		return vmid >= adev->vm_manager.first_kfd_vmid;
723 
724 	return false;
725 }
726 
amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device * adev,uint16_t vmid)727 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
728 				     uint16_t vmid)
729 {
730 	if (adev->family == AMDGPU_FAMILY_AI) {
731 		int i;
732 
733 		for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
734 			amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
735 	} else {
736 		amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0);
737 	}
738 
739 	return 0;
740 }
741 
amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,enum TLB_FLUSH_TYPE flush_type,uint32_t inst)742 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
743 				      uint16_t pasid,
744 				      enum TLB_FLUSH_TYPE flush_type,
745 				      uint32_t inst)
746 {
747 	bool all_hub = false;
748 
749 	if (adev->family == AMDGPU_FAMILY_AI ||
750 	    adev->family == AMDGPU_FAMILY_RV)
751 		all_hub = true;
752 
753 	return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst);
754 }
755 
amdgpu_amdkfd_have_atomics_support(struct amdgpu_device * adev)756 bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
757 {
758 	return adev->have_atomics_support;
759 }
760 
amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device * adev)761 void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
762 {
763 	amdgpu_device_flush_hdp(adev, NULL);
764 }
765 
amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device * adev,bool reset)766 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
767 {
768 	amdgpu_umc_poison_handler(adev, reset);
769 }
770 
amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device * adev,uint32_t * payload)771 int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
772 					uint32_t *payload)
773 {
774 	int ret;
775 
776 	/* Device or IH ring is not ready so bail. */
777 	ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih);
778 	if (ret)
779 		return ret;
780 
781 	/* Send payload to fence KFD interrupts */
782 	amdgpu_amdkfd_interrupt(adev, payload);
783 
784 	return 0;
785 }
786 
amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device * adev)787 bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
788 {
789 	if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
790 		return adev->gfx.ras->query_utcl2_poison_status(adev);
791 	else
792 		return false;
793 }
794 
amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device * adev)795 int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
796 {
797 	return kgd2kfd_check_and_lock_kfd();
798 }
799 
amdgpu_amdkfd_unlock_kfd(struct amdgpu_device * adev)800 void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
801 {
802 	kgd2kfd_unlock_kfd();
803 }
804 
805 
amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device * adev,int xcp_id)806 u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
807 {
808 	u64 tmp;
809 	s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
810 
811 	if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
812 		tmp = adev->gmc.mem_partitions[mem_id].size;
813 		do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
814 		return ALIGN_DOWN(tmp, PAGE_SIZE);
815 	} else {
816 		return adev->gmc.real_vram_size;
817 	}
818 }
819 
amdgpu_amdkfd_unmap_hiq(struct amdgpu_device * adev,u32 doorbell_off,u32 inst)820 int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
821 			    u32 inst)
822 {
823 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
824 	struct amdgpu_ring *kiq_ring = &kiq->ring;
825 	struct amdgpu_ring_funcs *ring_funcs;
826 	struct amdgpu_ring *ring;
827 	int r = 0;
828 
829 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
830 		return -EINVAL;
831 
832 	ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
833 	if (!ring_funcs)
834 		return -ENOMEM;
835 
836 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
837 	if (!ring) {
838 		r = -ENOMEM;
839 		goto free_ring_funcs;
840 	}
841 
842 	ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
843 	ring->doorbell_index = doorbell_off;
844 	ring->funcs = ring_funcs;
845 
846 	spin_lock(&kiq->ring_lock);
847 
848 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
849 		spin_unlock(&kiq->ring_lock);
850 		r = -ENOMEM;
851 		goto free_ring;
852 	}
853 
854 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
855 
856 	if (kiq_ring->sched.ready && !adev->job_hang)
857 		r = amdgpu_ring_test_helper(kiq_ring);
858 
859 	spin_unlock(&kiq->ring_lock);
860 
861 free_ring:
862 	kfree(ring);
863 
864 free_ring_funcs:
865 	kfree(ring_funcs);
866 
867 	return r;
868 }
869