1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_smu.h"
28 
29 
30 static DEFINE_MUTEX(xgmi_mutex);
31 
32 #define AMDGPU_MAX_XGMI_HIVE			8
33 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
34 
35 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
36 static unsigned hive_count = 0;
37 
38 void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
39 {
40 	return &hive->device_list;
41 }
42 
43 static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
44 		struct device_attribute *attr, char *buf)
45 {
46 	struct amdgpu_hive_info *hive =
47 			container_of(attr, struct amdgpu_hive_info, dev_attr);
48 
49 	return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
50 }
51 
52 static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
53 				    struct amdgpu_hive_info *hive)
54 {
55 	int ret = 0;
56 
57 	if (WARN_ON(hive->kobj))
58 		return -EINVAL;
59 
60 	hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
61 	if (!hive->kobj) {
62 		dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
63 		return -EINVAL;
64 	}
65 
66 	hive->dev_attr = (struct device_attribute) {
67 		.attr = {
68 			.name = "xgmi_hive_id",
69 			.mode = S_IRUGO,
70 
71 		},
72 		.show = amdgpu_xgmi_show_hive_id,
73 	};
74 
75 	ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
76 	if (ret) {
77 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
78 		kobject_del(hive->kobj);
79 		kobject_put(hive->kobj);
80 		hive->kobj = NULL;
81 	}
82 
83 	return ret;
84 }
85 
86 static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
87 				    struct amdgpu_hive_info *hive)
88 {
89 	sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
90 	kobject_del(hive->kobj);
91 	kobject_put(hive->kobj);
92 	hive->kobj = NULL;
93 }
94 
95 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
96 				     struct device_attribute *attr,
97 				     char *buf)
98 {
99 	struct drm_device *ddev = dev_get_drvdata(dev);
100 	struct amdgpu_device *adev = ddev->dev_private;
101 
102 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
103 
104 }
105 
106 
107 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
108 
109 
110 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
111 					 struct amdgpu_hive_info *hive)
112 {
113 	int ret = 0;
114 	char node[10] = { 0 };
115 
116 	/* Create xgmi device id file */
117 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
118 	if (ret) {
119 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
120 		return ret;
121 	}
122 
123 	/* Create sysfs link to hive info folder on the first device */
124 	if (adev != hive->adev) {
125 		ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
126 					"xgmi_hive_info");
127 		if (ret) {
128 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
129 			goto remove_file;
130 		}
131 	}
132 
133 	sprintf(node, "node%d", hive->number_devices);
134 	/* Create sysfs link form the hive folder to yourself */
135 	ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
136 	if (ret) {
137 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
138 		goto remove_link;
139 	}
140 
141 	goto success;
142 
143 
144 remove_link:
145 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
146 
147 remove_file:
148 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
149 
150 success:
151 	return ret;
152 }
153 
154 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
155 					  struct amdgpu_hive_info *hive)
156 {
157 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
158 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
159 	sysfs_remove_link(hive->kobj, adev->ddev->unique);
160 }
161 
162 
163 
164 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
165 {
166 	int i;
167 	struct amdgpu_hive_info *tmp;
168 
169 	if (!adev->gmc.xgmi.hive_id)
170 		return NULL;
171 
172 	mutex_lock(&xgmi_mutex);
173 
174 	for (i = 0 ; i < hive_count; ++i) {
175 		tmp = &xgmi_hives[i];
176 		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
177 			if (lock)
178 				mutex_lock(&tmp->hive_lock);
179 			mutex_unlock(&xgmi_mutex);
180 			return tmp;
181 		}
182 	}
183 	if (i >= AMDGPU_MAX_XGMI_HIVE) {
184 		mutex_unlock(&xgmi_mutex);
185 		return NULL;
186 	}
187 
188 	/* initialize new hive if not exist */
189 	tmp = &xgmi_hives[hive_count++];
190 
191 	if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
192 		mutex_unlock(&xgmi_mutex);
193 		return NULL;
194 	}
195 
196 	tmp->adev = adev;
197 	tmp->hive_id = adev->gmc.xgmi.hive_id;
198 	INIT_LIST_HEAD(&tmp->device_list);
199 	mutex_init(&tmp->hive_lock);
200 	mutex_init(&tmp->reset_lock);
201 
202 	if (lock)
203 		mutex_lock(&tmp->hive_lock);
204 	tmp->pstate = -1;
205 	mutex_unlock(&xgmi_mutex);
206 
207 	return tmp;
208 }
209 
210 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
211 {
212 	int ret = 0;
213 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
214 
215 	if (!hive)
216 		return 0;
217 
218 	if (hive->pstate == pstate)
219 		return 0;
220 
221 	dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
222 
223 	if (is_support_sw_smu(adev))
224 		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
225 	if (ret)
226 		dev_err(adev->dev,
227 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
228 			adev->gmc.xgmi.node_id,
229 			adev->gmc.xgmi.hive_id, ret);
230 
231 	return ret;
232 }
233 
234 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
235 {
236 	int ret = -EINVAL;
237 
238 	/* Each psp need to set the latest topology */
239 	ret = psp_xgmi_set_topology_info(&adev->psp,
240 					 hive->number_devices,
241 					 &hive->topology_info);
242 	if (ret)
243 		dev_err(adev->dev,
244 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
245 			adev->gmc.xgmi.node_id,
246 			adev->gmc.xgmi.hive_id, ret);
247 
248 	return ret;
249 }
250 
251 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
252 {
253 	struct psp_xgmi_topology_info *hive_topology;
254 	struct amdgpu_hive_info *hive;
255 	struct amdgpu_xgmi	*entry;
256 	struct amdgpu_device *tmp_adev = NULL;
257 
258 	int count = 0, ret = -EINVAL;
259 
260 	if (!adev->gmc.xgmi.supported)
261 		return 0;
262 
263 	ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
264 	if (ret) {
265 		dev_err(adev->dev,
266 			"XGMI: Failed to get node id\n");
267 		return ret;
268 	}
269 
270 	ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
271 	if (ret) {
272 		dev_err(adev->dev,
273 			"XGMI: Failed to get hive id\n");
274 		return ret;
275 	}
276 
277 	hive = amdgpu_get_xgmi_hive(adev, 1);
278 	if (!hive) {
279 		ret = -EINVAL;
280 		dev_err(adev->dev,
281 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
282 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
283 		goto exit;
284 	}
285 
286 	hive_topology = &hive->topology_info;
287 
288 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
289 	list_for_each_entry(entry, &hive->device_list, head)
290 		hive_topology->nodes[count++].node_id = entry->node_id;
291 	hive->number_devices = count;
292 
293 	/* Each psp need to get the latest topology */
294 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
295 		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
296 		if (ret) {
297 			dev_err(tmp_adev->dev,
298 				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
299 				tmp_adev->gmc.xgmi.node_id,
300 				tmp_adev->gmc.xgmi.hive_id, ret);
301 			/* To do : continue with some node failed or disable the whole hive */
302 			break;
303 		}
304 	}
305 
306 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
307 		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
308 		if (ret)
309 			break;
310 	}
311 
312 	if (!ret)
313 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
314 
315 	if (!ret)
316 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
317 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
318 	else
319 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
320 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
321 			ret);
322 
323 
324 	mutex_unlock(&hive->hive_lock);
325 exit:
326 	return ret;
327 }
328 
329 void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
330 {
331 	struct amdgpu_hive_info *hive;
332 
333 	if (!adev->gmc.xgmi.supported)
334 		return;
335 
336 	hive = amdgpu_get_xgmi_hive(adev, 1);
337 	if (!hive)
338 		return;
339 
340 	if (!(hive->number_devices--)) {
341 		amdgpu_xgmi_sysfs_destroy(adev, hive);
342 		mutex_destroy(&hive->hive_lock);
343 		mutex_destroy(&hive->reset_lock);
344 	} else {
345 		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
346 		mutex_unlock(&hive->hive_lock);
347 	}
348 }
349