1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 
30 const struct kfd2kgd_calls *kfd2kgd;
31 const struct kgd2kfd_calls *kgd2kfd;
32 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
33 
34 int amdgpu_amdkfd_init(void)
35 {
36 	int ret;
37 
38 #if defined(CONFIG_HSA_AMD_MODULE)
39 	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
40 
41 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
42 
43 	if (kgd2kfd_init_p == NULL)
44 		return -ENOENT;
45 
46 	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
47 	if (ret) {
48 		symbol_put(kgd2kfd_init);
49 		kgd2kfd = NULL;
50 	}
51 
52 #elif defined(CONFIG_HSA_AMD)
53 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
54 	if (ret)
55 		kgd2kfd = NULL;
56 
57 #else
58 	ret = -ENOENT;
59 #endif
60 
61 	return ret;
62 }
63 
64 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
65 {
66 	switch (adev->asic_type) {
67 #ifdef CONFIG_DRM_AMDGPU_CIK
68 	case CHIP_KAVERI:
69 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
70 		break;
71 #endif
72 	case CHIP_CARRIZO:
73 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
74 		break;
75 	default:
76 		return false;
77 	}
78 
79 	return true;
80 }
81 
82 void amdgpu_amdkfd_fini(void)
83 {
84 	if (kgd2kfd) {
85 		kgd2kfd->exit();
86 		symbol_put(kgd2kfd_init);
87 	}
88 }
89 
90 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
91 {
92 	if (kgd2kfd)
93 		adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
94 					adev->pdev, kfd2kgd);
95 }
96 
97 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
98 {
99 	int i;
100 	int last_valid_bit;
101 	if (adev->kfd) {
102 		struct kgd2kfd_shared_resources gpu_resources = {
103 			.compute_vmid_bitmap = 0xFF00,
104 			.num_mec = adev->gfx.mec.num_mec,
105 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
106 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
107 		};
108 
109 		/* this is going to have a few of the MSBs set that we need to
110 		 * clear */
111 		bitmap_complement(gpu_resources.queue_bitmap,
112 				  adev->gfx.mec.queue_bitmap,
113 				  KGD_MAX_QUEUES);
114 
115 		/* remove the KIQ bit as well */
116 		if (adev->gfx.kiq.ring.ready)
117 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
118 							  adev->gfx.kiq.ring.me - 1,
119 							  adev->gfx.kiq.ring.pipe,
120 							  adev->gfx.kiq.ring.queue),
121 				  gpu_resources.queue_bitmap);
122 
123 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
124 		 * nbits is not compile time constant */
125 		last_valid_bit = adev->gfx.mec.num_mec
126 				* adev->gfx.mec.num_pipe_per_mec
127 				* adev->gfx.mec.num_queue_per_pipe;
128 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
129 			clear_bit(i, gpu_resources.queue_bitmap);
130 
131 		amdgpu_doorbell_get_kfd_info(adev,
132 				&gpu_resources.doorbell_physical_address,
133 				&gpu_resources.doorbell_aperture_size,
134 				&gpu_resources.doorbell_start_offset);
135 
136 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
137 	}
138 }
139 
140 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
141 {
142 	if (adev->kfd) {
143 		kgd2kfd->device_exit(adev->kfd);
144 		adev->kfd = NULL;
145 	}
146 }
147 
148 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
149 		const void *ih_ring_entry)
150 {
151 	if (adev->kfd)
152 		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
153 }
154 
155 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
156 {
157 	if (adev->kfd)
158 		kgd2kfd->suspend(adev->kfd);
159 }
160 
161 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
162 {
163 	int r = 0;
164 
165 	if (adev->kfd)
166 		r = kgd2kfd->resume(adev->kfd);
167 
168 	return r;
169 }
170 
171 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
172 			void **mem_obj, uint64_t *gpu_addr,
173 			void **cpu_ptr)
174 {
175 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
176 	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
177 	int r;
178 
179 	BUG_ON(kgd == NULL);
180 	BUG_ON(gpu_addr == NULL);
181 	BUG_ON(cpu_ptr == NULL);
182 
183 	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
184 	if ((*mem) == NULL)
185 		return -ENOMEM;
186 
187 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
188 			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
189 	if (r) {
190 		dev_err(adev->dev,
191 			"failed to allocate BO for amdkfd (%d)\n", r);
192 		return r;
193 	}
194 
195 	/* map the buffer */
196 	r = amdgpu_bo_reserve((*mem)->bo, true);
197 	if (r) {
198 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
199 		goto allocate_mem_reserve_bo_failed;
200 	}
201 
202 	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
203 				&(*mem)->gpu_addr);
204 	if (r) {
205 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
206 		goto allocate_mem_pin_bo_failed;
207 	}
208 	*gpu_addr = (*mem)->gpu_addr;
209 
210 	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
211 	if (r) {
212 		dev_err(adev->dev,
213 			"(%d) failed to map bo to kernel for amdkfd\n", r);
214 		goto allocate_mem_kmap_bo_failed;
215 	}
216 	*cpu_ptr = (*mem)->cpu_ptr;
217 
218 	amdgpu_bo_unreserve((*mem)->bo);
219 
220 	return 0;
221 
222 allocate_mem_kmap_bo_failed:
223 	amdgpu_bo_unpin((*mem)->bo);
224 allocate_mem_pin_bo_failed:
225 	amdgpu_bo_unreserve((*mem)->bo);
226 allocate_mem_reserve_bo_failed:
227 	amdgpu_bo_unref(&(*mem)->bo);
228 
229 	return r;
230 }
231 
232 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
233 {
234 	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
235 
236 	BUG_ON(mem == NULL);
237 
238 	amdgpu_bo_reserve(mem->bo, true);
239 	amdgpu_bo_kunmap(mem->bo);
240 	amdgpu_bo_unpin(mem->bo);
241 	amdgpu_bo_unreserve(mem->bo);
242 	amdgpu_bo_unref(&(mem->bo));
243 	kfree(mem);
244 }
245 
246 uint64_t get_vmem_size(struct kgd_dev *kgd)
247 {
248 	struct amdgpu_device *adev =
249 		(struct amdgpu_device *)kgd;
250 
251 	BUG_ON(kgd == NULL);
252 
253 	return adev->mc.real_vram_size;
254 }
255 
256 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
257 {
258 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
259 
260 	if (adev->gfx.funcs->get_gpu_clock_counter)
261 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
262 	return 0;
263 }
264 
265 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
266 {
267 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
268 
269 	/* The sclk is in quantas of 10kHz */
270 	return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
271 }
272