1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include <linux/module.h>
28 
29 const struct kfd2kgd_calls *kfd2kgd;
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
32 
33 int amdgpu_amdkfd_init(void)
34 {
35 	int ret;
36 
37 #if defined(CONFIG_HSA_AMD_MODULE)
38 	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
39 
40 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
41 
42 	if (kgd2kfd_init_p == NULL)
43 		return -ENOENT;
44 
45 	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
46 	if (ret) {
47 		symbol_put(kgd2kfd_init);
48 		kgd2kfd = NULL;
49 	}
50 
51 #elif defined(CONFIG_HSA_AMD)
52 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
53 	if (ret)
54 		kgd2kfd = NULL;
55 
56 #else
57 	ret = -ENOENT;
58 #endif
59 
60 	return ret;
61 }
62 
63 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
64 {
65 	switch (rdev->asic_type) {
66 #ifdef CONFIG_DRM_AMDGPU_CIK
67 	case CHIP_KAVERI:
68 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
69 		break;
70 #endif
71 	case CHIP_CARRIZO:
72 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
73 		break;
74 	default:
75 		return false;
76 	}
77 
78 	return true;
79 }
80 
81 void amdgpu_amdkfd_fini(void)
82 {
83 	if (kgd2kfd) {
84 		kgd2kfd->exit();
85 		symbol_put(kgd2kfd_init);
86 	}
87 }
88 
89 void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
90 {
91 	if (kgd2kfd)
92 		rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
93 					rdev->pdev, kfd2kgd);
94 }
95 
96 void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
97 {
98 	if (rdev->kfd) {
99 		struct kgd2kfd_shared_resources gpu_resources = {
100 			.compute_vmid_bitmap = 0xFF00,
101 
102 			.first_compute_pipe = 1,
103 			.compute_pipe_count = 4 - 1,
104 		};
105 
106 		amdgpu_doorbell_get_kfd_info(rdev,
107 				&gpu_resources.doorbell_physical_address,
108 				&gpu_resources.doorbell_aperture_size,
109 				&gpu_resources.doorbell_start_offset);
110 
111 		kgd2kfd->device_init(rdev->kfd, &gpu_resources);
112 	}
113 }
114 
115 void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
116 {
117 	if (rdev->kfd) {
118 		kgd2kfd->device_exit(rdev->kfd);
119 		rdev->kfd = NULL;
120 	}
121 }
122 
123 void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
124 		const void *ih_ring_entry)
125 {
126 	if (rdev->kfd)
127 		kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
128 }
129 
130 void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
131 {
132 	if (rdev->kfd)
133 		kgd2kfd->suspend(rdev->kfd);
134 }
135 
136 int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
137 {
138 	int r = 0;
139 
140 	if (rdev->kfd)
141 		r = kgd2kfd->resume(rdev->kfd);
142 
143 	return r;
144 }
145 
146 u32 pool_to_domain(enum kgd_memory_pool p)
147 {
148 	switch (p) {
149 	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
150 	default: return AMDGPU_GEM_DOMAIN_GTT;
151 	}
152 }
153 
154 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
155 			void **mem_obj, uint64_t *gpu_addr,
156 			void **cpu_ptr)
157 {
158 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
159 	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
160 	int r;
161 
162 	BUG_ON(kgd == NULL);
163 	BUG_ON(gpu_addr == NULL);
164 	BUG_ON(cpu_ptr == NULL);
165 
166 	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
167 	if ((*mem) == NULL)
168 		return -ENOMEM;
169 
170 	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
171 			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
172 	if (r) {
173 		dev_err(rdev->dev,
174 			"failed to allocate BO for amdkfd (%d)\n", r);
175 		return r;
176 	}
177 
178 	/* map the buffer */
179 	r = amdgpu_bo_reserve((*mem)->bo, true);
180 	if (r) {
181 		dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
182 		goto allocate_mem_reserve_bo_failed;
183 	}
184 
185 	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
186 				&(*mem)->gpu_addr);
187 	if (r) {
188 		dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
189 		goto allocate_mem_pin_bo_failed;
190 	}
191 	*gpu_addr = (*mem)->gpu_addr;
192 
193 	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
194 	if (r) {
195 		dev_err(rdev->dev,
196 			"(%d) failed to map bo to kernel for amdkfd\n", r);
197 		goto allocate_mem_kmap_bo_failed;
198 	}
199 	*cpu_ptr = (*mem)->cpu_ptr;
200 
201 	amdgpu_bo_unreserve((*mem)->bo);
202 
203 	return 0;
204 
205 allocate_mem_kmap_bo_failed:
206 	amdgpu_bo_unpin((*mem)->bo);
207 allocate_mem_pin_bo_failed:
208 	amdgpu_bo_unreserve((*mem)->bo);
209 allocate_mem_reserve_bo_failed:
210 	amdgpu_bo_unref(&(*mem)->bo);
211 
212 	return r;
213 }
214 
215 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
216 {
217 	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
218 
219 	BUG_ON(mem == NULL);
220 
221 	amdgpu_bo_reserve(mem->bo, true);
222 	amdgpu_bo_kunmap(mem->bo);
223 	amdgpu_bo_unpin(mem->bo);
224 	amdgpu_bo_unreserve(mem->bo);
225 	amdgpu_bo_unref(&(mem->bo));
226 	kfree(mem);
227 }
228 
229 uint64_t get_vmem_size(struct kgd_dev *kgd)
230 {
231 	struct amdgpu_device *rdev =
232 		(struct amdgpu_device *)kgd;
233 
234 	BUG_ON(kgd == NULL);
235 
236 	return rdev->mc.real_vram_size;
237 }
238 
239 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
240 {
241 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
242 
243 	if (rdev->gfx.funcs->get_gpu_clock_counter)
244 		return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
245 	return 0;
246 }
247 
248 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
249 {
250 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
251 
252 	/* The sclk is in quantas of 10kHz */
253 	return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
254 }
255