1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
32 
33 int amdgpu_amdkfd_init(void)
34 {
35 	int ret;
36 
37 #if defined(CONFIG_HSA_AMD_MODULE)
38 	int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
39 
40 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
41 
42 	if (kgd2kfd_init_p == NULL)
43 		return -ENOENT;
44 
45 	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
46 	if (ret) {
47 		symbol_put(kgd2kfd_init);
48 		kgd2kfd = NULL;
49 	}
50 
51 #elif defined(CONFIG_HSA_AMD)
52 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
53 	if (ret)
54 		kgd2kfd = NULL;
55 
56 #else
57 	ret = -ENOENT;
58 #endif
59 
60 	return ret;
61 }
62 
63 void amdgpu_amdkfd_fini(void)
64 {
65 	if (kgd2kfd) {
66 		kgd2kfd->exit();
67 		symbol_put(kgd2kfd_init);
68 	}
69 }
70 
71 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
72 {
73 	const struct kfd2kgd_calls *kfd2kgd;
74 
75 	if (!kgd2kfd)
76 		return;
77 
78 	switch (adev->asic_type) {
79 #ifdef CONFIG_DRM_AMDGPU_CIK
80 	case CHIP_KAVERI:
81 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
82 		break;
83 #endif
84 	case CHIP_CARRIZO:
85 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
86 		break;
87 	default:
88 		dev_info(adev->dev, "kfd not supported on this ASIC\n");
89 		return;
90 	}
91 
92 	adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
93 				   adev->pdev, kfd2kgd);
94 }
95 
96 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
97 {
98 	int i;
99 	int last_valid_bit;
100 	if (adev->kfd) {
101 		struct kgd2kfd_shared_resources gpu_resources = {
102 			.compute_vmid_bitmap = 0xFF00,
103 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
104 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
105 		};
106 
107 		/* this is going to have a few of the MSBs set that we need to
108 		 * clear */
109 		bitmap_complement(gpu_resources.queue_bitmap,
110 				  adev->gfx.mec.queue_bitmap,
111 				  KGD_MAX_QUEUES);
112 
113 		/* remove the KIQ bit as well */
114 		if (adev->gfx.kiq.ring.ready)
115 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
116 							  adev->gfx.kiq.ring.me - 1,
117 							  adev->gfx.kiq.ring.pipe,
118 							  adev->gfx.kiq.ring.queue),
119 				  gpu_resources.queue_bitmap);
120 
121 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
122 		 * nbits is not compile time constant */
123 		last_valid_bit = 1 /* only first MEC can have compute queues */
124 				* adev->gfx.mec.num_pipe_per_mec
125 				* adev->gfx.mec.num_queue_per_pipe;
126 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
127 			clear_bit(i, gpu_resources.queue_bitmap);
128 
129 		amdgpu_doorbell_get_kfd_info(adev,
130 				&gpu_resources.doorbell_physical_address,
131 				&gpu_resources.doorbell_aperture_size,
132 				&gpu_resources.doorbell_start_offset);
133 
134 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
135 	}
136 }
137 
138 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
139 {
140 	if (adev->kfd) {
141 		kgd2kfd->device_exit(adev->kfd);
142 		adev->kfd = NULL;
143 	}
144 }
145 
146 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
147 		const void *ih_ring_entry)
148 {
149 	if (adev->kfd)
150 		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
151 }
152 
153 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
154 {
155 	if (adev->kfd)
156 		kgd2kfd->suspend(adev->kfd);
157 }
158 
159 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
160 {
161 	int r = 0;
162 
163 	if (adev->kfd)
164 		r = kgd2kfd->resume(adev->kfd);
165 
166 	return r;
167 }
168 
169 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
170 			void **mem_obj, uint64_t *gpu_addr,
171 			void **cpu_ptr)
172 {
173 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
174 	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
175 	int r;
176 
177 	BUG_ON(kgd == NULL);
178 	BUG_ON(gpu_addr == NULL);
179 	BUG_ON(cpu_ptr == NULL);
180 
181 	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
182 	if ((*mem) == NULL)
183 		return -ENOMEM;
184 
185 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
186 			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
187 			     &(*mem)->bo);
188 	if (r) {
189 		dev_err(adev->dev,
190 			"failed to allocate BO for amdkfd (%d)\n", r);
191 		return r;
192 	}
193 
194 	/* map the buffer */
195 	r = amdgpu_bo_reserve((*mem)->bo, true);
196 	if (r) {
197 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
198 		goto allocate_mem_reserve_bo_failed;
199 	}
200 
201 	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
202 				&(*mem)->gpu_addr);
203 	if (r) {
204 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
205 		goto allocate_mem_pin_bo_failed;
206 	}
207 	*gpu_addr = (*mem)->gpu_addr;
208 
209 	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
210 	if (r) {
211 		dev_err(adev->dev,
212 			"(%d) failed to map bo to kernel for amdkfd\n", r);
213 		goto allocate_mem_kmap_bo_failed;
214 	}
215 	*cpu_ptr = (*mem)->cpu_ptr;
216 
217 	amdgpu_bo_unreserve((*mem)->bo);
218 
219 	return 0;
220 
221 allocate_mem_kmap_bo_failed:
222 	amdgpu_bo_unpin((*mem)->bo);
223 allocate_mem_pin_bo_failed:
224 	amdgpu_bo_unreserve((*mem)->bo);
225 allocate_mem_reserve_bo_failed:
226 	amdgpu_bo_unref(&(*mem)->bo);
227 
228 	return r;
229 }
230 
231 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
232 {
233 	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
234 
235 	BUG_ON(mem == NULL);
236 
237 	amdgpu_bo_reserve(mem->bo, true);
238 	amdgpu_bo_kunmap(mem->bo);
239 	amdgpu_bo_unpin(mem->bo);
240 	amdgpu_bo_unreserve(mem->bo);
241 	amdgpu_bo_unref(&(mem->bo));
242 	kfree(mem);
243 }
244 
245 uint64_t get_vmem_size(struct kgd_dev *kgd)
246 {
247 	struct amdgpu_device *adev =
248 		(struct amdgpu_device *)kgd;
249 
250 	BUG_ON(kgd == NULL);
251 
252 	return adev->mc.real_vram_size;
253 }
254 
255 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
256 {
257 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
258 
259 	if (adev->gfx.funcs->get_gpu_clock_counter)
260 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
261 	return 0;
262 }
263 
264 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
265 {
266 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
267 
268 	/* The sclk is in quantas of 10kHz */
269 	return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
270 }
271