1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 
28 
29 static DEFINE_MUTEX(xgmi_mutex);
30 
31 #define AMDGPU_MAX_XGMI_HIVE			8
32 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
33 
34 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
35 static unsigned hive_count = 0;
36 
37 void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
38 {
39 	return &hive->device_list;
40 }
41 
42 static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
43 		struct device_attribute *attr, char *buf)
44 {
45 	struct amdgpu_hive_info *hive =
46 			container_of(attr, struct amdgpu_hive_info, dev_attr);
47 
48 	return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
49 }
50 
51 static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
52 				    struct amdgpu_hive_info *hive)
53 {
54 	int ret = 0;
55 
56 	if (WARN_ON(hive->kobj))
57 		return -EINVAL;
58 
59 	hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
60 	if (!hive->kobj) {
61 		dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
62 		return -EINVAL;
63 	}
64 
65 	hive->dev_attr = (struct device_attribute) {
66 		.attr = {
67 			.name = "xgmi_hive_id",
68 			.mode = S_IRUGO,
69 
70 		},
71 		.show = amdgpu_xgmi_show_hive_id,
72 	};
73 
74 	ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
75 	if (ret) {
76 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
77 		kobject_del(hive->kobj);
78 		kobject_put(hive->kobj);
79 		hive->kobj = NULL;
80 	}
81 
82 	return ret;
83 }
84 
85 static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
86 				    struct amdgpu_hive_info *hive)
87 {
88 	sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
89 	kobject_del(hive->kobj);
90 	kobject_put(hive->kobj);
91 	hive->kobj = NULL;
92 }
93 
94 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
95 				     struct device_attribute *attr,
96 				     char *buf)
97 {
98 	struct drm_device *ddev = dev_get_drvdata(dev);
99 	struct amdgpu_device *adev = ddev->dev_private;
100 
101 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
102 
103 }
104 
105 
106 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
107 
108 
109 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
110 					 struct amdgpu_hive_info *hive)
111 {
112 	int ret = 0;
113 	char node[10] = { 0 };
114 
115 	/* Create xgmi device id file */
116 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
117 	if (ret) {
118 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
119 		return ret;
120 	}
121 
122 	/* Create sysfs link to hive info folder on the first device */
123 	if (adev != hive->adev) {
124 		ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
125 					"xgmi_hive_info");
126 		if (ret) {
127 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
128 			goto remove_file;
129 		}
130 	}
131 
132 	sprintf(node, "node%d", hive->number_devices);
133 	/* Create sysfs link form the hive folder to yourself */
134 	ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
135 	if (ret) {
136 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
137 		goto remove_link;
138 	}
139 
140 	goto success;
141 
142 
143 remove_link:
144 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
145 
146 remove_file:
147 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
148 
149 success:
150 	return ret;
151 }
152 
153 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
154 					  struct amdgpu_hive_info *hive)
155 {
156 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
157 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
158 	sysfs_remove_link(hive->kobj, adev->ddev->unique);
159 }
160 
161 
162 
163 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
164 {
165 	int i;
166 	struct amdgpu_hive_info *tmp;
167 
168 	if (!adev->gmc.xgmi.hive_id)
169 		return NULL;
170 
171 	mutex_lock(&xgmi_mutex);
172 
173 	for (i = 0 ; i < hive_count; ++i) {
174 		tmp = &xgmi_hives[i];
175 		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
176 			if (lock)
177 				mutex_lock(&tmp->hive_lock);
178 			mutex_unlock(&xgmi_mutex);
179 			return tmp;
180 		}
181 	}
182 	if (i >= AMDGPU_MAX_XGMI_HIVE) {
183 		mutex_unlock(&xgmi_mutex);
184 		return NULL;
185 	}
186 
187 	/* initialize new hive if not exist */
188 	tmp = &xgmi_hives[hive_count++];
189 
190 	if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
191 		mutex_unlock(&xgmi_mutex);
192 		return NULL;
193 	}
194 
195 	tmp->adev = adev;
196 	tmp->hive_id = adev->gmc.xgmi.hive_id;
197 	INIT_LIST_HEAD(&tmp->device_list);
198 	mutex_init(&tmp->hive_lock);
199 	mutex_init(&tmp->reset_lock);
200 
201 	if (lock)
202 		mutex_lock(&tmp->hive_lock);
203 	tmp->pstate = -1;
204 	mutex_unlock(&xgmi_mutex);
205 
206 	return tmp;
207 }
208 
209 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
210 {
211 	int ret = 0;
212 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
213 
214 	if (!hive)
215 		return 0;
216 
217 	if (hive->pstate == pstate)
218 		return 0;
219 	/* Todo : sent the message to SMU for pstate change */
220 	return ret;
221 }
222 
223 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
224 {
225 	int ret = -EINVAL;
226 
227 	/* Each psp need to set the latest topology */
228 	ret = psp_xgmi_set_topology_info(&adev->psp,
229 					 hive->number_devices,
230 					 &hive->topology_info);
231 	if (ret)
232 		dev_err(adev->dev,
233 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
234 			adev->gmc.xgmi.node_id,
235 			adev->gmc.xgmi.hive_id, ret);
236 
237 	return ret;
238 }
239 
240 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
241 {
242 	struct psp_xgmi_topology_info *hive_topology;
243 	struct amdgpu_hive_info *hive;
244 	struct amdgpu_xgmi	*entry;
245 	struct amdgpu_device *tmp_adev = NULL;
246 
247 	int count = 0, ret = -EINVAL;
248 
249 	if (!adev->gmc.xgmi.supported)
250 		return 0;
251 
252 	ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
253 	if (ret) {
254 		dev_err(adev->dev,
255 			"XGMI: Failed to get node id\n");
256 		return ret;
257 	}
258 
259 	ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
260 	if (ret) {
261 		dev_err(adev->dev,
262 			"XGMI: Failed to get hive id\n");
263 		return ret;
264 	}
265 
266 	hive = amdgpu_get_xgmi_hive(adev, 1);
267 	if (!hive) {
268 		ret = -EINVAL;
269 		dev_err(adev->dev,
270 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
271 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
272 		goto exit;
273 	}
274 
275 	hive_topology = &hive->topology_info;
276 
277 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
278 	list_for_each_entry(entry, &hive->device_list, head)
279 		hive_topology->nodes[count++].node_id = entry->node_id;
280 	hive->number_devices = count;
281 
282 	/* Each psp need to get the latest topology */
283 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
284 		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
285 		if (ret) {
286 			dev_err(tmp_adev->dev,
287 				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
288 				tmp_adev->gmc.xgmi.node_id,
289 				tmp_adev->gmc.xgmi.hive_id, ret);
290 			/* To do : continue with some node failed or disable the whole hive */
291 			break;
292 		}
293 	}
294 
295 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
296 		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
297 		if (ret)
298 			break;
299 	}
300 
301 	if (!ret)
302 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
303 
304 	if (!ret)
305 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
306 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
307 	else
308 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
309 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
310 			ret);
311 
312 
313 	mutex_unlock(&hive->hive_lock);
314 exit:
315 	return ret;
316 }
317 
318 void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
319 {
320 	struct amdgpu_hive_info *hive;
321 
322 	if (!adev->gmc.xgmi.supported)
323 		return;
324 
325 	hive = amdgpu_get_xgmi_hive(adev, 1);
326 	if (!hive)
327 		return;
328 
329 	if (!(hive->number_devices--)) {
330 		amdgpu_xgmi_sysfs_destroy(adev, hive);
331 		mutex_destroy(&hive->hive_lock);
332 		mutex_destroy(&hive->reset_lock);
333 	} else {
334 		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
335 		mutex_unlock(&hive->hive_lock);
336 	}
337 }
338