1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_smu.h"
28 #include "amdgpu_ras.h"
29 #include "soc15.h"
30 #include "df/df_3_6_offset.h"
31 #include "xgmi/xgmi_4_0_0_smn.h"
32 #include "xgmi/xgmi_4_0_0_sh_mask.h"
33 #include "wafl/wafl2_4_0_0_smn.h"
34 #include "wafl/wafl2_4_0_0_sh_mask.h"
35 
36 static DEFINE_MUTEX(xgmi_mutex);
37 
38 #define AMDGPU_MAX_XGMI_HIVE			8
39 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
40 
41 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
42 static unsigned hive_count = 0;
43 
44 static const int xgmi_pcs_err_status_reg_vg20[] = {
45 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
46 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
47 };
48 
49 static const int wafl_pcs_err_status_reg_vg20[] = {
50 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
51 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
52 };
53 
54 static const int xgmi_pcs_err_status_reg_arct[] = {
55 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
56 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
57 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
58 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
59 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
60 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
61 };
62 
63 /* same as vg20*/
64 static const int wafl_pcs_err_status_reg_arct[] = {
65 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
66 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
67 };
68 
69 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
70 	{"XGMI PCS DataLossErr",
71 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
72 	{"XGMI PCS TrainingErr",
73 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
74 	{"XGMI PCS CRCErr",
75 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
76 	{"XGMI PCS BERExceededErr",
77 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
78 	{"XGMI PCS TxMetaDataErr",
79 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
80 	{"XGMI PCS ReplayBufParityErr",
81 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
82 	{"XGMI PCS DataParityErr",
83 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
84 	{"XGMI PCS ReplayFifoOverflowErr",
85 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
86 	{"XGMI PCS ReplayFifoUnderflowErr",
87 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
88 	{"XGMI PCS ElasticFifoOverflowErr",
89 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
90 	{"XGMI PCS DeskewErr",
91 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
92 	{"XGMI PCS DataStartupLimitErr",
93 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
94 	{"XGMI PCS FCInitTimeoutErr",
95 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
96 	{"XGMI PCS RecoveryTimeoutErr",
97 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
98 	{"XGMI PCS ReadySerialTimeoutErr",
99 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
100 	{"XGMI PCS ReadySerialAttemptErr",
101 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
102 	{"XGMI PCS RecoveryAttemptErr",
103 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
104 	{"XGMI PCS RecoveryRelockAttemptErr",
105 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
106 };
107 
108 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
109 	{"WAFL PCS DataLossErr",
110 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
111 	{"WAFL PCS TrainingErr",
112 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
113 	{"WAFL PCS CRCErr",
114 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
115 	{"WAFL PCS BERExceededErr",
116 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
117 	{"WAFL PCS TxMetaDataErr",
118 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
119 	{"WAFL PCS ReplayBufParityErr",
120 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
121 	{"WAFL PCS DataParityErr",
122 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
123 	{"WAFL PCS ReplayFifoOverflowErr",
124 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
125 	{"WAFL PCS ReplayFifoUnderflowErr",
126 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
127 	{"WAFL PCS ElasticFifoOverflowErr",
128 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
129 	{"WAFL PCS DeskewErr",
130 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
131 	{"WAFL PCS DataStartupLimitErr",
132 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
133 	{"WAFL PCS FCInitTimeoutErr",
134 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
135 	{"WAFL PCS RecoveryTimeoutErr",
136 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
137 	{"WAFL PCS ReadySerialTimeoutErr",
138 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
139 	{"WAFL PCS ReadySerialAttemptErr",
140 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
141 	{"WAFL PCS RecoveryAttemptErr",
142 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
143 	{"WAFL PCS RecoveryRelockAttemptErr",
144 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
145 };
146 
147 void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
148 {
149 	return &hive->device_list;
150 }
151 
152 /**
153  * DOC: AMDGPU XGMI Support
154  *
155  * XGMI is a high speed interconnect that joins multiple GPU cards
156  * into a homogeneous memory space that is organized by a collective
157  * hive ID and individual node IDs, both of which are 64-bit numbers.
158  *
159  * The file xgmi_device_id contains the unique per GPU device ID and
160  * is stored in the /sys/class/drm/card${cardno}/device/ directory.
161  *
162  * Inside the device directory a sub-directory 'xgmi_hive_info' is
163  * created which contains the hive ID and the list of nodes.
164  *
165  * The hive ID is stored in:
166  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
167  *
168  * The node information is stored in numbered directories:
169  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
170  *
171  * Each device has their own xgmi_hive_info direction with a mirror
172  * set of node sub-directories.
173  *
174  * The XGMI memory space is built by contiguously adding the power of
175  * two padded VRAM space from each node to each other.
176  *
177  */
178 
179 
180 static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct amdgpu_hive_info *hive =
184 			container_of(attr, struct amdgpu_hive_info, dev_attr);
185 
186 	return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
187 }
188 
189 static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
190 				    struct amdgpu_hive_info *hive)
191 {
192 	int ret = 0;
193 
194 	if (WARN_ON(hive->kobj))
195 		return -EINVAL;
196 
197 	hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
198 	if (!hive->kobj) {
199 		dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
200 		return -EINVAL;
201 	}
202 
203 	hive->dev_attr = (struct device_attribute) {
204 		.attr = {
205 			.name = "xgmi_hive_id",
206 			.mode = S_IRUGO,
207 
208 		},
209 		.show = amdgpu_xgmi_show_hive_id,
210 	};
211 
212 	ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
213 	if (ret) {
214 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
215 		kobject_del(hive->kobj);
216 		kobject_put(hive->kobj);
217 		hive->kobj = NULL;
218 	}
219 
220 	return ret;
221 }
222 
223 static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
224 				    struct amdgpu_hive_info *hive)
225 {
226 	sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
227 	kobject_del(hive->kobj);
228 	kobject_put(hive->kobj);
229 	hive->kobj = NULL;
230 }
231 
232 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
233 				     struct device_attribute *attr,
234 				     char *buf)
235 {
236 	struct drm_device *ddev = dev_get_drvdata(dev);
237 	struct amdgpu_device *adev = ddev->dev_private;
238 
239 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
240 
241 }
242 
243 #define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
244 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
245 				      struct device_attribute *attr,
246 				      char *buf)
247 {
248 	struct drm_device *ddev = dev_get_drvdata(dev);
249 	struct amdgpu_device *adev = ddev->dev_private;
250 	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
251 	uint64_t fica_out;
252 	unsigned int error_count = 0;
253 
254 	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
255 	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
256 
257 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
258 	if (fica_out != 0x1f)
259 		pr_err("xGMI error counters not enabled!\n");
260 
261 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
262 
263 	if ((fica_out & 0xffff) == 2)
264 		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
265 
266 	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
267 
268 	return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
269 }
270 
271 
272 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
273 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
274 
275 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
276 					 struct amdgpu_hive_info *hive)
277 {
278 	int ret = 0;
279 	char node[10] = { 0 };
280 
281 	/* Create xgmi device id file */
282 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
283 	if (ret) {
284 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
285 		return ret;
286 	}
287 
288 	/* Create xgmi error file */
289 	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
290 	if (ret)
291 		pr_err("failed to create xgmi_error\n");
292 
293 
294 	/* Create sysfs link to hive info folder on the first device */
295 	if (adev != hive->adev) {
296 		ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
297 					"xgmi_hive_info");
298 		if (ret) {
299 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
300 			goto remove_file;
301 		}
302 	}
303 
304 	sprintf(node, "node%d", hive->number_devices);
305 	/* Create sysfs link form the hive folder to yourself */
306 	ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
307 	if (ret) {
308 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
309 		goto remove_link;
310 	}
311 
312 	goto success;
313 
314 
315 remove_link:
316 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
317 
318 remove_file:
319 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
320 
321 success:
322 	return ret;
323 }
324 
325 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
326 					  struct amdgpu_hive_info *hive)
327 {
328 	char node[10];
329 	memset(node, 0, sizeof(node));
330 
331 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
332 	device_remove_file(adev->dev, &dev_attr_xgmi_error);
333 
334 	if (adev != hive->adev)
335 		sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
336 
337 	sprintf(node, "node%d", hive->number_devices);
338 	sysfs_remove_link(hive->kobj, node);
339 
340 }
341 
342 
343 
344 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
345 {
346 	int i;
347 	struct amdgpu_hive_info *tmp;
348 
349 	if (!adev->gmc.xgmi.hive_id)
350 		return NULL;
351 
352 	mutex_lock(&xgmi_mutex);
353 
354 	for (i = 0 ; i < hive_count; ++i) {
355 		tmp = &xgmi_hives[i];
356 		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
357 			if (lock)
358 				mutex_lock(&tmp->hive_lock);
359 			mutex_unlock(&xgmi_mutex);
360 			return tmp;
361 		}
362 	}
363 	if (i >= AMDGPU_MAX_XGMI_HIVE) {
364 		mutex_unlock(&xgmi_mutex);
365 		return NULL;
366 	}
367 
368 	/* initialize new hive if not exist */
369 	tmp = &xgmi_hives[hive_count++];
370 
371 	if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
372 		mutex_unlock(&xgmi_mutex);
373 		return NULL;
374 	}
375 
376 	tmp->adev = adev;
377 	tmp->hive_id = adev->gmc.xgmi.hive_id;
378 	INIT_LIST_HEAD(&tmp->device_list);
379 	mutex_init(&tmp->hive_lock);
380 	mutex_init(&tmp->reset_lock);
381 	task_barrier_init(&tmp->tb);
382 
383 	if (lock)
384 		mutex_lock(&tmp->hive_lock);
385 	tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
386 	tmp->hi_req_gpu = NULL;
387 	/*
388 	 * hive pstate on boot is high in vega20 so we have to go to low
389 	 * pstate on after boot.
390 	 */
391 	tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
392 	mutex_unlock(&xgmi_mutex);
393 
394 	return tmp;
395 }
396 
397 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
398 {
399 	int ret = 0;
400 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
401 	struct amdgpu_device *request_adev = hive->hi_req_gpu ?
402 						hive->hi_req_gpu : adev;
403 	bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
404 	bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
405 
406 	/* fw bug so temporarily disable pstate switching */
407 	return 0;
408 
409 	if (!hive || adev->asic_type != CHIP_VEGA20)
410 		return 0;
411 
412 	mutex_lock(&hive->hive_lock);
413 
414 	if (is_hi_req)
415 		hive->hi_req_count++;
416 	else
417 		hive->hi_req_count--;
418 
419 	/*
420 	 * Vega20 only needs single peer to request pstate high for the hive to
421 	 * go high but all peers must request pstate low for the hive to go low
422 	 */
423 	if (hive->pstate == pstate ||
424 			(!is_hi_req && hive->hi_req_count && !init_low))
425 		goto out;
426 
427 	dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
428 
429 	ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
430 	if (ret) {
431 		dev_err(request_adev->dev,
432 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
433 			request_adev->gmc.xgmi.node_id,
434 			request_adev->gmc.xgmi.hive_id, ret);
435 		goto out;
436 	}
437 
438 	if (init_low)
439 		hive->pstate = hive->hi_req_count ?
440 					hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
441 	else {
442 		hive->pstate = pstate;
443 		hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
444 							adev : NULL;
445 	}
446 out:
447 	mutex_unlock(&hive->hive_lock);
448 	return ret;
449 }
450 
451 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
452 {
453 	int ret;
454 
455 	/* Each psp need to set the latest topology */
456 	ret = psp_xgmi_set_topology_info(&adev->psp,
457 					 hive->number_devices,
458 					 &adev->psp.xgmi_context.top_info);
459 	if (ret)
460 		dev_err(adev->dev,
461 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
462 			adev->gmc.xgmi.node_id,
463 			adev->gmc.xgmi.hive_id, ret);
464 
465 	return ret;
466 }
467 
468 
469 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
470 		struct amdgpu_device *peer_adev)
471 {
472 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
473 	int i;
474 
475 	for (i = 0 ; i < top->num_nodes; ++i)
476 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
477 			return top->nodes[i].num_hops;
478 	return	-EINVAL;
479 }
480 
481 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
482 {
483 	struct psp_xgmi_topology_info *top_info;
484 	struct amdgpu_hive_info *hive;
485 	struct amdgpu_xgmi	*entry;
486 	struct amdgpu_device *tmp_adev = NULL;
487 
488 	int count = 0, ret = 0;
489 
490 	if (!adev->gmc.xgmi.supported)
491 		return 0;
492 
493 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
494 		ret = psp_xgmi_initialize(&adev->psp);
495 		if (ret) {
496 			dev_err(adev->dev,
497 				"XGMI: Failed to initialize xgmi session\n");
498 			return ret;
499 		}
500 
501 		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
502 		if (ret) {
503 			dev_err(adev->dev,
504 				"XGMI: Failed to get hive id\n");
505 			return ret;
506 		}
507 
508 		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
509 		if (ret) {
510 			dev_err(adev->dev,
511 				"XGMI: Failed to get node id\n");
512 			return ret;
513 		}
514 	} else {
515 		adev->gmc.xgmi.hive_id = 16;
516 		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
517 	}
518 
519 	hive = amdgpu_get_xgmi_hive(adev, 1);
520 	if (!hive) {
521 		ret = -EINVAL;
522 		dev_err(adev->dev,
523 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
524 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
525 		goto exit;
526 	}
527 
528 	top_info = &adev->psp.xgmi_context.top_info;
529 
530 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
531 	list_for_each_entry(entry, &hive->device_list, head)
532 		top_info->nodes[count++].node_id = entry->node_id;
533 	top_info->num_nodes = count;
534 	hive->number_devices = count;
535 
536 	task_barrier_add_task(&hive->tb);
537 
538 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
539 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
540 			/* update node list for other device in the hive */
541 			if (tmp_adev != adev) {
542 				top_info = &tmp_adev->psp.xgmi_context.top_info;
543 				top_info->nodes[count - 1].node_id =
544 					adev->gmc.xgmi.node_id;
545 				top_info->num_nodes = count;
546 			}
547 			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
548 			if (ret)
549 				goto exit;
550 		}
551 
552 		/* get latest topology info for each device from psp */
553 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
554 			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
555 					&tmp_adev->psp.xgmi_context.top_info);
556 			if (ret) {
557 				dev_err(tmp_adev->dev,
558 					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
559 					tmp_adev->gmc.xgmi.node_id,
560 					tmp_adev->gmc.xgmi.hive_id, ret);
561 				/* To do : continue with some node failed or disable the whole hive */
562 				goto exit;
563 			}
564 		}
565 	}
566 
567 	if (!ret)
568 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
569 
570 
571 	mutex_unlock(&hive->hive_lock);
572 exit:
573 	if (!ret)
574 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
575 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
576 	else
577 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
578 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
579 			ret);
580 
581 	return ret;
582 }
583 
584 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
585 {
586 	struct amdgpu_hive_info *hive;
587 
588 	if (!adev->gmc.xgmi.supported)
589 		return -EINVAL;
590 
591 	hive = amdgpu_get_xgmi_hive(adev, 1);
592 	if (!hive)
593 		return -EINVAL;
594 
595 	task_barrier_rem_task(&hive->tb);
596 	amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
597 	mutex_unlock(&hive->hive_lock);
598 
599 	if(!(--hive->number_devices)){
600 		amdgpu_xgmi_sysfs_destroy(adev, hive);
601 		mutex_destroy(&hive->hive_lock);
602 		mutex_destroy(&hive->reset_lock);
603 	}
604 
605 	return psp_xgmi_terminate(&adev->psp);
606 }
607 
608 int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
609 {
610 	int r;
611 	struct ras_ih_if ih_info = {
612 		.cb = NULL,
613 	};
614 	struct ras_fs_if fs_info = {
615 		.sysfs_name = "xgmi_wafl_err_count",
616 	};
617 
618 	if (!adev->gmc.xgmi.supported ||
619 	    adev->gmc.xgmi.num_physical_nodes == 0)
620 		return 0;
621 
622 	amdgpu_xgmi_reset_ras_error_count(adev);
623 
624 	if (!adev->gmc.xgmi.ras_if) {
625 		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
626 		if (!adev->gmc.xgmi.ras_if)
627 			return -ENOMEM;
628 		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
629 		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
630 		adev->gmc.xgmi.ras_if->sub_block_index = 0;
631 		strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
632 	}
633 	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
634 	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
635 				 &fs_info, &ih_info);
636 	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
637 		kfree(adev->gmc.xgmi.ras_if);
638 		adev->gmc.xgmi.ras_if = NULL;
639 	}
640 
641 	return r;
642 }
643 
644 void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
645 {
646 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
647 			adev->gmc.xgmi.ras_if) {
648 		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
649 		struct ras_ih_if ih_info = {
650 			.cb = NULL,
651 		};
652 
653 		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
654 		kfree(ras_if);
655 	}
656 }
657 
658 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
659 					   uint64_t addr)
660 {
661 	struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
662 	return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
663 }
664 
665 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
666 {
667 	WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
668 	WREG32_PCIE(pcs_status_reg, 0);
669 }
670 
671 void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
672 {
673 	uint32_t i;
674 
675 	switch (adev->asic_type) {
676 	case CHIP_ARCTURUS:
677 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
678 			pcs_clear_status(adev,
679 					 xgmi_pcs_err_status_reg_arct[i]);
680 		break;
681 	case CHIP_VEGA20:
682 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
683 			pcs_clear_status(adev,
684 					 xgmi_pcs_err_status_reg_vg20[i]);
685 		break;
686 	default:
687 		break;
688 	}
689 }
690 
691 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
692 					      uint32_t value,
693 					      uint32_t *ue_count,
694 					      uint32_t *ce_count,
695 					      bool is_xgmi_pcs)
696 {
697 	int i;
698 	int ue_cnt;
699 
700 	if (is_xgmi_pcs) {
701 		/* query xgmi pcs error status,
702 		 * only ue is supported */
703 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
704 			ue_cnt = (value &
705 				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
706 				  xgmi_pcs_ras_fields[i].pcs_err_shift;
707 			if (ue_cnt) {
708 				dev_info(adev->dev, "%s detected\n",
709 					 xgmi_pcs_ras_fields[i].err_name);
710 				*ue_count += ue_cnt;
711 			}
712 		}
713 	} else {
714 		/* query wafl pcs error status,
715 		 * only ue is supported */
716 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
717 			ue_cnt = (value &
718 				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
719 				  wafl_pcs_ras_fields[i].pcs_err_shift;
720 			if (ue_cnt) {
721 				dev_info(adev->dev, "%s detected\n",
722 					 wafl_pcs_ras_fields[i].err_name);
723 				*ue_count += ue_cnt;
724 			}
725 		}
726 	}
727 
728 	return 0;
729 }
730 
731 int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
732 				      void *ras_error_status)
733 {
734 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
735 	int i;
736 	uint32_t data;
737 	uint32_t ue_cnt = 0, ce_cnt = 0;
738 
739 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
740 		return -EINVAL;
741 
742 	err_data->ue_count = 0;
743 	err_data->ce_count = 0;
744 
745 	switch (adev->asic_type) {
746 	case CHIP_ARCTURUS:
747 		/* check xgmi pcs error */
748 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
749 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
750 			if (data)
751 				amdgpu_xgmi_query_pcs_error_status(adev,
752 						data, &ue_cnt, &ce_cnt, true);
753 		}
754 		/* check wafl pcs error */
755 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
756 			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
757 			if (data)
758 				amdgpu_xgmi_query_pcs_error_status(adev,
759 						data, &ue_cnt, &ce_cnt, false);
760 		}
761 		break;
762 	case CHIP_VEGA20:
763 	default:
764 		/* check xgmi pcs error */
765 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
766 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
767 			if (data)
768 				amdgpu_xgmi_query_pcs_error_status(adev,
769 						data, &ue_cnt, &ce_cnt, true);
770 		}
771 		/* check wafl pcs error */
772 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
773 			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
774 			if (data)
775 				amdgpu_xgmi_query_pcs_error_status(adev,
776 						data, &ue_cnt, &ce_cnt, false);
777 		}
778 		break;
779 	}
780 
781 	amdgpu_xgmi_reset_ras_error_count(adev);
782 
783 	err_data->ue_count += ue_cnt;
784 	err_data->ce_count += ce_cnt;
785 
786 	return 0;
787 }
788