1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 
30 const struct kfd2kgd_calls *kfd2kgd;
31 const struct kgd2kfd_calls *kgd2kfd;
32 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
33 
34 int amdgpu_amdkfd_init(void)
35 {
36 	int ret;
37 
38 #if defined(CONFIG_HSA_AMD_MODULE)
39 	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
40 
41 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
42 
43 	if (kgd2kfd_init_p == NULL)
44 		return -ENOENT;
45 
46 	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
47 	if (ret) {
48 		symbol_put(kgd2kfd_init);
49 		kgd2kfd = NULL;
50 	}
51 
52 #elif defined(CONFIG_HSA_AMD)
53 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
54 	if (ret)
55 		kgd2kfd = NULL;
56 
57 #else
58 	ret = -ENOENT;
59 #endif
60 
61 	return ret;
62 }
63 
64 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
65 {
66 	switch (adev->asic_type) {
67 #ifdef CONFIG_DRM_AMDGPU_CIK
68 	case CHIP_KAVERI:
69 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
70 		break;
71 #endif
72 	case CHIP_CARRIZO:
73 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
74 		break;
75 	default:
76 		return false;
77 	}
78 
79 	return true;
80 }
81 
82 void amdgpu_amdkfd_fini(void)
83 {
84 	if (kgd2kfd) {
85 		kgd2kfd->exit();
86 		symbol_put(kgd2kfd_init);
87 	}
88 }
89 
90 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
91 {
92 	if (kgd2kfd)
93 		adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
94 					adev->pdev, kfd2kgd);
95 }
96 
97 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
98 {
99 	int i;
100 	int last_valid_bit;
101 	if (adev->kfd) {
102 		struct kgd2kfd_shared_resources gpu_resources = {
103 			.compute_vmid_bitmap = 0xFF00,
104 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
105 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
106 		};
107 
108 		/* this is going to have a few of the MSBs set that we need to
109 		 * clear */
110 		bitmap_complement(gpu_resources.queue_bitmap,
111 				  adev->gfx.mec.queue_bitmap,
112 				  KGD_MAX_QUEUES);
113 
114 		/* remove the KIQ bit as well */
115 		if (adev->gfx.kiq.ring.ready)
116 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
117 							  adev->gfx.kiq.ring.me - 1,
118 							  adev->gfx.kiq.ring.pipe,
119 							  adev->gfx.kiq.ring.queue),
120 				  gpu_resources.queue_bitmap);
121 
122 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
123 		 * nbits is not compile time constant */
124 		last_valid_bit = 1 /* only first MEC can have compute queues */
125 				* adev->gfx.mec.num_pipe_per_mec
126 				* adev->gfx.mec.num_queue_per_pipe;
127 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
128 			clear_bit(i, gpu_resources.queue_bitmap);
129 
130 		amdgpu_doorbell_get_kfd_info(adev,
131 				&gpu_resources.doorbell_physical_address,
132 				&gpu_resources.doorbell_aperture_size,
133 				&gpu_resources.doorbell_start_offset);
134 
135 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
136 	}
137 }
138 
139 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
140 {
141 	if (adev->kfd) {
142 		kgd2kfd->device_exit(adev->kfd);
143 		adev->kfd = NULL;
144 	}
145 }
146 
147 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
148 		const void *ih_ring_entry)
149 {
150 	if (adev->kfd)
151 		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
152 }
153 
154 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
155 {
156 	if (adev->kfd)
157 		kgd2kfd->suspend(adev->kfd);
158 }
159 
160 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
161 {
162 	int r = 0;
163 
164 	if (adev->kfd)
165 		r = kgd2kfd->resume(adev->kfd);
166 
167 	return r;
168 }
169 
170 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
171 			void **mem_obj, uint64_t *gpu_addr,
172 			void **cpu_ptr)
173 {
174 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
175 	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
176 	int r;
177 
178 	BUG_ON(kgd == NULL);
179 	BUG_ON(gpu_addr == NULL);
180 	BUG_ON(cpu_ptr == NULL);
181 
182 	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
183 	if ((*mem) == NULL)
184 		return -ENOMEM;
185 
186 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
187 			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
188 	if (r) {
189 		dev_err(adev->dev,
190 			"failed to allocate BO for amdkfd (%d)\n", r);
191 		return r;
192 	}
193 
194 	/* map the buffer */
195 	r = amdgpu_bo_reserve((*mem)->bo, true);
196 	if (r) {
197 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
198 		goto allocate_mem_reserve_bo_failed;
199 	}
200 
201 	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
202 				&(*mem)->gpu_addr);
203 	if (r) {
204 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
205 		goto allocate_mem_pin_bo_failed;
206 	}
207 	*gpu_addr = (*mem)->gpu_addr;
208 
209 	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
210 	if (r) {
211 		dev_err(adev->dev,
212 			"(%d) failed to map bo to kernel for amdkfd\n", r);
213 		goto allocate_mem_kmap_bo_failed;
214 	}
215 	*cpu_ptr = (*mem)->cpu_ptr;
216 
217 	amdgpu_bo_unreserve((*mem)->bo);
218 
219 	return 0;
220 
221 allocate_mem_kmap_bo_failed:
222 	amdgpu_bo_unpin((*mem)->bo);
223 allocate_mem_pin_bo_failed:
224 	amdgpu_bo_unreserve((*mem)->bo);
225 allocate_mem_reserve_bo_failed:
226 	amdgpu_bo_unref(&(*mem)->bo);
227 
228 	return r;
229 }
230 
231 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
232 {
233 	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
234 
235 	BUG_ON(mem == NULL);
236 
237 	amdgpu_bo_reserve(mem->bo, true);
238 	amdgpu_bo_kunmap(mem->bo);
239 	amdgpu_bo_unpin(mem->bo);
240 	amdgpu_bo_unreserve(mem->bo);
241 	amdgpu_bo_unref(&(mem->bo));
242 	kfree(mem);
243 }
244 
245 uint64_t get_vmem_size(struct kgd_dev *kgd)
246 {
247 	struct amdgpu_device *adev =
248 		(struct amdgpu_device *)kgd;
249 
250 	BUG_ON(kgd == NULL);
251 
252 	return adev->mc.real_vram_size;
253 }
254 
255 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
256 {
257 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
258 
259 	if (adev->gfx.funcs->get_gpu_clock_counter)
260 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
261 	return 0;
262 }
263 
264 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
265 {
266 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
267 
268 	/* The sclk is in quantas of 10kHz */
269 	return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
270 }
271