1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
32 
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
34 
35 int amdgpu_amdkfd_init(void)
36 {
37 	int ret;
38 
39 #if defined(CONFIG_HSA_AMD_MODULE)
40 	int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
41 
42 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
43 
44 	if (kgd2kfd_init_p == NULL)
45 		return -ENOENT;
46 
47 	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
48 	if (ret) {
49 		symbol_put(kgd2kfd_init);
50 		kgd2kfd = NULL;
51 	}
52 
53 
54 #elif defined(CONFIG_HSA_AMD)
55 
56 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
57 	if (ret)
58 		kgd2kfd = NULL;
59 
60 #else
61 	kgd2kfd = NULL;
62 	ret = -ENOENT;
63 #endif
64 
65 #if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD)
66 	amdgpu_amdkfd_gpuvm_init_mem_limits();
67 #endif
68 
69 	return ret;
70 }
71 
72 void amdgpu_amdkfd_fini(void)
73 {
74 	if (kgd2kfd) {
75 		kgd2kfd->exit();
76 		symbol_put(kgd2kfd_init);
77 	}
78 }
79 
80 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
81 {
82 	const struct kfd2kgd_calls *kfd2kgd;
83 
84 	if (!kgd2kfd)
85 		return;
86 
87 	switch (adev->asic_type) {
88 #ifdef CONFIG_DRM_AMDGPU_CIK
89 	case CHIP_KAVERI:
90 	case CHIP_HAWAII:
91 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
92 		break;
93 #endif
94 	case CHIP_CARRIZO:
95 	case CHIP_TONGA:
96 	case CHIP_FIJI:
97 	case CHIP_POLARIS10:
98 	case CHIP_POLARIS11:
99 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
100 		break;
101 	case CHIP_VEGA10:
102 	case CHIP_RAVEN:
103 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
104 		break;
105 	default:
106 		dev_info(adev->dev, "kfd not supported on this ASIC\n");
107 		return;
108 	}
109 
110 	adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
111 				   adev->pdev, kfd2kgd);
112 }
113 
114 /**
115  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
116  *                                setup amdkfd
117  *
118  * @adev: amdgpu_device pointer
119  * @aperture_base: output returning doorbell aperture base physical address
120  * @aperture_size: output returning doorbell aperture size in bytes
121  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
122  *
123  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
124  * takes doorbells required for its own rings and reports the setup to amdkfd.
125  * amdgpu reserved doorbells are at the start of the doorbell aperture.
126  */
127 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
128 					 phys_addr_t *aperture_base,
129 					 size_t *aperture_size,
130 					 size_t *start_offset)
131 {
132 	/*
133 	 * The first num_doorbells are used by amdgpu.
134 	 * amdkfd takes whatever's left in the aperture.
135 	 */
136 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
137 		*aperture_base = adev->doorbell.base;
138 		*aperture_size = adev->doorbell.size;
139 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
140 	} else {
141 		*aperture_base = 0;
142 		*aperture_size = 0;
143 		*start_offset = 0;
144 	}
145 }
146 
147 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
148 {
149 	int i;
150 	int last_valid_bit;
151 	if (adev->kfd) {
152 		struct kgd2kfd_shared_resources gpu_resources = {
153 			.compute_vmid_bitmap = compute_vmid_bitmap,
154 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
155 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
156 			.gpuvm_size = min(adev->vm_manager.max_pfn
157 					  << AMDGPU_GPU_PAGE_SHIFT,
158 					  AMDGPU_VA_HOLE_START),
159 			.drm_render_minor = adev->ddev->render->index
160 		};
161 
162 		/* this is going to have a few of the MSBs set that we need to
163 		 * clear */
164 		bitmap_complement(gpu_resources.queue_bitmap,
165 				  adev->gfx.mec.queue_bitmap,
166 				  KGD_MAX_QUEUES);
167 
168 		/* remove the KIQ bit as well */
169 		if (adev->gfx.kiq.ring.ready)
170 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
171 							  adev->gfx.kiq.ring.me - 1,
172 							  adev->gfx.kiq.ring.pipe,
173 							  adev->gfx.kiq.ring.queue),
174 				  gpu_resources.queue_bitmap);
175 
176 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
177 		 * nbits is not compile time constant */
178 		last_valid_bit = 1 /* only first MEC can have compute queues */
179 				* adev->gfx.mec.num_pipe_per_mec
180 				* adev->gfx.mec.num_queue_per_pipe;
181 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
182 			clear_bit(i, gpu_resources.queue_bitmap);
183 
184 		amdgpu_doorbell_get_kfd_info(adev,
185 				&gpu_resources.doorbell_physical_address,
186 				&gpu_resources.doorbell_aperture_size,
187 				&gpu_resources.doorbell_start_offset);
188 		if (adev->asic_type >= CHIP_VEGA10) {
189 			/* On SOC15 the BIF is involved in routing
190 			 * doorbells using the low 12 bits of the
191 			 * address. Communicate the assignments to
192 			 * KFD. KFD uses two doorbell pages per
193 			 * process in case of 64-bit doorbells so we
194 			 * can use each doorbell assignment twice.
195 			 */
196 			gpu_resources.sdma_doorbell[0][0] =
197 				AMDGPU_DOORBELL64_sDMA_ENGINE0;
198 			gpu_resources.sdma_doorbell[0][1] =
199 				AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
200 			gpu_resources.sdma_doorbell[1][0] =
201 				AMDGPU_DOORBELL64_sDMA_ENGINE1;
202 			gpu_resources.sdma_doorbell[1][1] =
203 				AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
204 			/* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
205 			 * SDMA, IH and VCN. So don't use them for the CP.
206 			 */
207 			gpu_resources.reserved_doorbell_mask = 0x1f0;
208 			gpu_resources.reserved_doorbell_val  = 0x0f0;
209 		}
210 
211 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
212 	}
213 }
214 
215 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
216 {
217 	if (adev->kfd) {
218 		kgd2kfd->device_exit(adev->kfd);
219 		adev->kfd = NULL;
220 	}
221 }
222 
223 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
224 		const void *ih_ring_entry)
225 {
226 	if (adev->kfd)
227 		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
228 }
229 
230 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
231 {
232 	if (adev->kfd)
233 		kgd2kfd->suspend(adev->kfd);
234 }
235 
236 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
237 {
238 	int r = 0;
239 
240 	if (adev->kfd)
241 		r = kgd2kfd->resume(adev->kfd);
242 
243 	return r;
244 }
245 
246 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
247 {
248 	int r = 0;
249 
250 	if (adev->kfd)
251 		r = kgd2kfd->pre_reset(adev->kfd);
252 
253 	return r;
254 }
255 
256 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
257 {
258 	int r = 0;
259 
260 	if (adev->kfd)
261 		r = kgd2kfd->post_reset(adev->kfd);
262 
263 	return r;
264 }
265 
266 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
267 {
268 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
269 
270 	amdgpu_device_gpu_recover(adev, NULL, false);
271 }
272 
273 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
274 			void **mem_obj, uint64_t *gpu_addr,
275 			void **cpu_ptr)
276 {
277 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
278 	struct amdgpu_bo *bo = NULL;
279 	struct amdgpu_bo_param bp;
280 	int r;
281 	void *cpu_ptr_tmp = NULL;
282 
283 	memset(&bp, 0, sizeof(bp));
284 	bp.size = size;
285 	bp.byte_align = PAGE_SIZE;
286 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
287 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
288 	bp.type = ttm_bo_type_kernel;
289 	bp.resv = NULL;
290 	r = amdgpu_bo_create(adev, &bp, &bo);
291 	if (r) {
292 		dev_err(adev->dev,
293 			"failed to allocate BO for amdkfd (%d)\n", r);
294 		return r;
295 	}
296 
297 	/* map the buffer */
298 	r = amdgpu_bo_reserve(bo, true);
299 	if (r) {
300 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
301 		goto allocate_mem_reserve_bo_failed;
302 	}
303 
304 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
305 	if (r) {
306 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
307 		goto allocate_mem_pin_bo_failed;
308 	}
309 
310 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
311 	if (r) {
312 		dev_err(adev->dev, "%p bind failed\n", bo);
313 		goto allocate_mem_kmap_bo_failed;
314 	}
315 
316 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
317 	if (r) {
318 		dev_err(adev->dev,
319 			"(%d) failed to map bo to kernel for amdkfd\n", r);
320 		goto allocate_mem_kmap_bo_failed;
321 	}
322 
323 	*mem_obj = bo;
324 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
325 	*cpu_ptr = cpu_ptr_tmp;
326 
327 	amdgpu_bo_unreserve(bo);
328 
329 	return 0;
330 
331 allocate_mem_kmap_bo_failed:
332 	amdgpu_bo_unpin(bo);
333 allocate_mem_pin_bo_failed:
334 	amdgpu_bo_unreserve(bo);
335 allocate_mem_reserve_bo_failed:
336 	amdgpu_bo_unref(&bo);
337 
338 	return r;
339 }
340 
341 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
342 {
343 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
344 
345 	amdgpu_bo_reserve(bo, true);
346 	amdgpu_bo_kunmap(bo);
347 	amdgpu_bo_unpin(bo);
348 	amdgpu_bo_unreserve(bo);
349 	amdgpu_bo_unref(&(bo));
350 }
351 
352 void get_local_mem_info(struct kgd_dev *kgd,
353 			struct kfd_local_mem_info *mem_info)
354 {
355 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
356 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
357 					     ~((1ULL << 32) - 1);
358 	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
359 
360 	memset(mem_info, 0, sizeof(*mem_info));
361 	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
362 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
363 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
364 				adev->gmc.visible_vram_size;
365 	} else {
366 		mem_info->local_mem_size_public = 0;
367 		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
368 	}
369 	mem_info->vram_width = adev->gmc.vram_width;
370 
371 	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
372 			&adev->gmc.aper_base, &aper_limit,
373 			mem_info->local_mem_size_public,
374 			mem_info->local_mem_size_private);
375 
376 	if (amdgpu_sriov_vf(adev))
377 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
378 	else if (adev->powerplay.pp_funcs)
379 		mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
380 	else
381 		mem_info->mem_clk_max = 100;
382 }
383 
384 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
385 {
386 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
387 
388 	if (adev->gfx.funcs->get_gpu_clock_counter)
389 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
390 	return 0;
391 }
392 
393 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
394 {
395 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
396 
397 	/* the sclk is in quantas of 10kHz */
398 	if (amdgpu_sriov_vf(adev))
399 		return adev->clock.default_sclk / 100;
400 	else if (adev->powerplay.pp_funcs)
401 		return amdgpu_dpm_get_sclk(adev, false) / 100;
402 	else
403 		return 100;
404 }
405 
406 void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
407 {
408 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
409 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
410 
411 	memset(cu_info, 0, sizeof(*cu_info));
412 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
413 		return;
414 
415 	cu_info->cu_active_number = acu_info.number;
416 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
417 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
418 	       sizeof(acu_info.bitmap));
419 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
420 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
421 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
422 	cu_info->simd_per_cu = acu_info.simd_per_cu;
423 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
424 	cu_info->wave_front_size = acu_info.wave_front_size;
425 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
426 	cu_info->lds_size = acu_info.lds_size;
427 }
428 
429 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
430 {
431 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
432 
433 	return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
434 }
435 
436 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
437 				uint32_t vmid, uint64_t gpu_addr,
438 				uint32_t *ib_cmd, uint32_t ib_len)
439 {
440 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
441 	struct amdgpu_job *job;
442 	struct amdgpu_ib *ib;
443 	struct amdgpu_ring *ring;
444 	struct dma_fence *f = NULL;
445 	int ret;
446 
447 	switch (engine) {
448 	case KGD_ENGINE_MEC1:
449 		ring = &adev->gfx.compute_ring[0];
450 		break;
451 	case KGD_ENGINE_SDMA1:
452 		ring = &adev->sdma.instance[0].ring;
453 		break;
454 	case KGD_ENGINE_SDMA2:
455 		ring = &adev->sdma.instance[1].ring;
456 		break;
457 	default:
458 		pr_err("Invalid engine in IB submission: %d\n", engine);
459 		ret = -EINVAL;
460 		goto err;
461 	}
462 
463 	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
464 	if (ret)
465 		goto err;
466 
467 	ib = &job->ibs[0];
468 	memset(ib, 0, sizeof(struct amdgpu_ib));
469 
470 	ib->gpu_addr = gpu_addr;
471 	ib->ptr = ib_cmd;
472 	ib->length_dw = ib_len;
473 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
474 	job->vmid = vmid;
475 
476 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
477 	if (ret) {
478 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
479 		goto err_ib_sched;
480 	}
481 
482 	ret = dma_fence_wait(f, false);
483 
484 err_ib_sched:
485 	dma_fence_put(f);
486 	amdgpu_job_free(job);
487 err:
488 	return ret;
489 }
490 
491 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
492 {
493 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
494 
495 	amdgpu_dpm_switch_power_profile(adev,
496 					PP_SMC_POWER_PROFILE_COMPUTE, !idle);
497 }
498 
499 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
500 {
501 	if (adev->kfd) {
502 		if ((1 << vmid) & compute_vmid_bitmap)
503 			return true;
504 	}
505 
506 	return false;
507 }
508 
509 #if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD)
510 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
511 {
512 	return false;
513 }
514 
515 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
516 {
517 }
518 
519 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
520 					struct amdgpu_vm *vm)
521 {
522 }
523 
524 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
525 {
526 	return NULL;
527 }
528 
529 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
530 {
531 	return 0;
532 }
533 
534 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
535 {
536 	return NULL;
537 }
538 
539 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
540 {
541 	return NULL;
542 }
543 
544 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
545 {
546 	return NULL;
547 }
548 #endif
549