xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision 5e18b9737004ef6f34862f6fb39d3c9027a4044a)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "atom.h"
38 #ifdef CONFIG_X86_MCE_AMD
39 #include <asm/mce.h>
40 
41 static bool notifier_registered;
42 #endif
43 static const char *RAS_FS_NAME = "ras";
44 
45 const char *ras_error_string[] = {
46 	"none",
47 	"parity",
48 	"single_correctable",
49 	"multi_uncorrectable",
50 	"poison",
51 };
52 
53 const char *ras_block_string[] = {
54 	"umc",
55 	"sdma",
56 	"gfx",
57 	"mmhub",
58 	"athub",
59 	"pcie_bif",
60 	"hdp",
61 	"xgmi_wafl",
62 	"df",
63 	"smn",
64 	"sem",
65 	"mp0",
66 	"mp1",
67 	"fuse",
68 	"mca",
69 };
70 
71 const char *ras_mca_block_string[] = {
72 	"mca_mp0",
73 	"mca_mp1",
74 	"mca_mpio",
75 	"mca_iohc",
76 };
77 
78 const char *get_ras_block_str(struct ras_common_if *ras_block)
79 {
80 	if (!ras_block)
81 		return "NULL";
82 
83 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
84 		return "OUT OF RANGE";
85 
86 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
87 		return ras_mca_block_string[ras_block->sub_block_index];
88 
89 	return ras_block_string[ras_block->block];
90 }
91 
92 #define ras_err_str(i) (ras_error_string[ffs(i)])
93 
94 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
95 
96 /* inject address is 52 bits */
97 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
98 
99 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
100 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
101 
102 enum amdgpu_ras_retire_page_reservation {
103 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
104 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
105 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
106 };
107 
108 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
109 
110 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
111 				uint64_t addr);
112 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
113 				uint64_t addr);
114 #ifdef CONFIG_X86_MCE_AMD
115 static void amdgpu_register_bad_pages_mca_notifier(void);
116 #endif
117 
118 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
119 {
120 	if (adev && amdgpu_ras_get_context(adev))
121 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
122 }
123 
124 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
125 {
126 	if (adev && amdgpu_ras_get_context(adev))
127 		return amdgpu_ras_get_context(adev)->error_query_ready;
128 
129 	return false;
130 }
131 
132 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
133 {
134 	struct ras_err_data err_data = {0, 0, 0, NULL};
135 	struct eeprom_table_record err_rec;
136 
137 	if ((address >= adev->gmc.mc_vram_size) ||
138 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
139 		dev_warn(adev->dev,
140 		         "RAS WARN: input address 0x%llx is invalid.\n",
141 		         address);
142 		return -EINVAL;
143 	}
144 
145 	if (amdgpu_ras_check_bad_page(adev, address)) {
146 		dev_warn(adev->dev,
147 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
148 			 address);
149 		return 0;
150 	}
151 
152 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
153 
154 	err_rec.address = address;
155 	err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
156 	err_rec.ts = (uint64_t)ktime_get_real_seconds();
157 	err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
158 
159 	err_data.err_addr = &err_rec;
160 	err_data.err_addr_cnt = 1;
161 
162 	if (amdgpu_bad_page_threshold != 0) {
163 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
164 					 err_data.err_addr_cnt);
165 		amdgpu_ras_save_bad_pages(adev);
166 	}
167 
168 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
169 	dev_warn(adev->dev, "Clear EEPROM:\n");
170 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
171 
172 	return 0;
173 }
174 
175 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
176 					size_t size, loff_t *pos)
177 {
178 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
179 	struct ras_query_if info = {
180 		.head = obj->head,
181 	};
182 	ssize_t s;
183 	char val[128];
184 
185 	if (amdgpu_ras_query_error_status(obj->adev, &info))
186 		return -EINVAL;
187 
188 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
189 			"ue", info.ue_count,
190 			"ce", info.ce_count);
191 	if (*pos >= s)
192 		return 0;
193 
194 	s -= *pos;
195 	s = min_t(u64, s, size);
196 
197 
198 	if (copy_to_user(buf, &val[*pos], s))
199 		return -EINVAL;
200 
201 	*pos += s;
202 
203 	return s;
204 }
205 
206 static const struct file_operations amdgpu_ras_debugfs_ops = {
207 	.owner = THIS_MODULE,
208 	.read = amdgpu_ras_debugfs_read,
209 	.write = NULL,
210 	.llseek = default_llseek
211 };
212 
213 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
214 {
215 	int i;
216 
217 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
218 		*block_id = i;
219 		if (strcmp(name, ras_block_string[i]) == 0)
220 			return 0;
221 	}
222 	return -EINVAL;
223 }
224 
225 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
226 		const char __user *buf, size_t size,
227 		loff_t *pos, struct ras_debug_if *data)
228 {
229 	ssize_t s = min_t(u64, 64, size);
230 	char str[65];
231 	char block_name[33];
232 	char err[9] = "ue";
233 	int op = -1;
234 	int block_id;
235 	uint32_t sub_block;
236 	u64 address, value;
237 
238 	if (*pos)
239 		return -EINVAL;
240 	*pos = size;
241 
242 	memset(str, 0, sizeof(str));
243 	memset(data, 0, sizeof(*data));
244 
245 	if (copy_from_user(str, buf, s))
246 		return -EINVAL;
247 
248 	if (sscanf(str, "disable %32s", block_name) == 1)
249 		op = 0;
250 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
251 		op = 1;
252 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
253 		op = 2;
254 	else if (strstr(str, "retire_page") != NULL)
255 		op = 3;
256 	else if (str[0] && str[1] && str[2] && str[3])
257 		/* ascii string, but commands are not matched. */
258 		return -EINVAL;
259 
260 	if (op != -1) {
261 		if (op == 3) {
262 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
263 			    sscanf(str, "%*s %llu", &address) != 1)
264 				return -EINVAL;
265 
266 			data->op = op;
267 			data->inject.address = address;
268 
269 			return 0;
270 		}
271 
272 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
273 			return -EINVAL;
274 
275 		data->head.block = block_id;
276 		/* only ue and ce errors are supported */
277 		if (!memcmp("ue", err, 2))
278 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
279 		else if (!memcmp("ce", err, 2))
280 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
281 		else
282 			return -EINVAL;
283 
284 		data->op = op;
285 
286 		if (op == 2) {
287 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
288 				   &sub_block, &address, &value) != 3 &&
289 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
290 				   &sub_block, &address, &value) != 3)
291 				return -EINVAL;
292 			data->head.sub_block_index = sub_block;
293 			data->inject.address = address;
294 			data->inject.value = value;
295 		}
296 	} else {
297 		if (size < sizeof(*data))
298 			return -EINVAL;
299 
300 		if (copy_from_user(data, buf, sizeof(*data)))
301 			return -EINVAL;
302 	}
303 
304 	return 0;
305 }
306 
307 /**
308  * DOC: AMDGPU RAS debugfs control interface
309  *
310  * The control interface accepts struct ras_debug_if which has two members.
311  *
312  * First member: ras_debug_if::head or ras_debug_if::inject.
313  *
314  * head is used to indicate which IP block will be under control.
315  *
316  * head has four members, they are block, type, sub_block_index, name.
317  * block: which IP will be under control.
318  * type: what kind of error will be enabled/disabled/injected.
319  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
320  * name: the name of IP.
321  *
322  * inject has two more members than head, they are address, value.
323  * As their names indicate, inject operation will write the
324  * value to the address.
325  *
326  * The second member: struct ras_debug_if::op.
327  * It has three kinds of operations.
328  *
329  * - 0: disable RAS on the block. Take ::head as its data.
330  * - 1: enable RAS on the block. Take ::head as its data.
331  * - 2: inject errors on the block. Take ::inject as its data.
332  *
333  * How to use the interface?
334  *
335  * In a program
336  *
337  * Copy the struct ras_debug_if in your code and initialize it.
338  * Write the struct to the control interface.
339  *
340  * From shell
341  *
342  * .. code-block:: bash
343  *
344  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
345  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
346  *	echo "inject  <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
347  *
348  * Where N, is the card which you want to affect.
349  *
350  * "disable" requires only the block.
351  * "enable" requires the block and error type.
352  * "inject" requires the block, error type, address, and value.
353  *
354  * The block is one of: umc, sdma, gfx, etc.
355  *	see ras_block_string[] for details
356  *
357  * The error type is one of: ue, ce, where,
358  *	ue is multi-uncorrectable
359  *	ce is single-correctable
360  *
361  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
362  * The address and value are hexadecimal numbers, leading 0x is optional.
363  *
364  * For instance,
365  *
366  * .. code-block:: bash
367  *
368  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
369  *	echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
370  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
371  *
372  * How to check the result of the operation?
373  *
374  * To check disable/enable, see "ras" features at,
375  * /sys/class/drm/card[0/1/2...]/device/ras/features
376  *
377  * To check inject, see the corresponding error count at,
378  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
379  *
380  * .. note::
381  *	Operations are only allowed on blocks which are supported.
382  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
383  *	to see which blocks support RAS on a particular asic.
384  *
385  */
386 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
387 					     const char __user *buf,
388 					     size_t size, loff_t *pos)
389 {
390 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
391 	struct ras_debug_if data;
392 	int ret = 0;
393 
394 	if (!amdgpu_ras_get_error_query_ready(adev)) {
395 		dev_warn(adev->dev, "RAS WARN: error injection "
396 				"currently inaccessible\n");
397 		return size;
398 	}
399 
400 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
401 	if (ret)
402 		return ret;
403 
404 	if (data.op == 3) {
405 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
406 		if (!ret)
407 			return size;
408 		else
409 			return ret;
410 	}
411 
412 	if (!amdgpu_ras_is_supported(adev, data.head.block))
413 		return -EINVAL;
414 
415 	switch (data.op) {
416 	case 0:
417 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
418 		break;
419 	case 1:
420 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
421 		break;
422 	case 2:
423 		if ((data.inject.address >= adev->gmc.mc_vram_size) ||
424 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
425 			dev_warn(adev->dev, "RAS WARN: input address "
426 					"0x%llx is invalid.",
427 					data.inject.address);
428 			ret = -EINVAL;
429 			break;
430 		}
431 
432 		/* umc ce/ue error injection for a bad page is not allowed */
433 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
434 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
435 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
436 				 "already been marked as bad!\n",
437 				 data.inject.address);
438 			break;
439 		}
440 
441 		/* data.inject.address is offset instead of absolute gpu address */
442 		ret = amdgpu_ras_error_inject(adev, &data.inject);
443 		break;
444 	default:
445 		ret = -EINVAL;
446 		break;
447 	}
448 
449 	if (ret)
450 		return -EINVAL;
451 
452 	return size;
453 }
454 
455 /**
456  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
457  *
458  * Some boards contain an EEPROM which is used to persistently store a list of
459  * bad pages which experiences ECC errors in vram.  This interface provides
460  * a way to reset the EEPROM, e.g., after testing error injection.
461  *
462  * Usage:
463  *
464  * .. code-block:: bash
465  *
466  *	echo 1 > ../ras/ras_eeprom_reset
467  *
468  * will reset EEPROM table to 0 entries.
469  *
470  */
471 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
472 					       const char __user *buf,
473 					       size_t size, loff_t *pos)
474 {
475 	struct amdgpu_device *adev =
476 		(struct amdgpu_device *)file_inode(f)->i_private;
477 	int ret;
478 
479 	ret = amdgpu_ras_eeprom_reset_table(
480 		&(amdgpu_ras_get_context(adev)->eeprom_control));
481 
482 	if (!ret) {
483 		/* Something was written to EEPROM.
484 		 */
485 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
486 		return size;
487 	} else {
488 		return ret;
489 	}
490 }
491 
492 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
493 	.owner = THIS_MODULE,
494 	.read = NULL,
495 	.write = amdgpu_ras_debugfs_ctrl_write,
496 	.llseek = default_llseek
497 };
498 
499 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
500 	.owner = THIS_MODULE,
501 	.read = NULL,
502 	.write = amdgpu_ras_debugfs_eeprom_write,
503 	.llseek = default_llseek
504 };
505 
506 /**
507  * DOC: AMDGPU RAS sysfs Error Count Interface
508  *
509  * It allows the user to read the error count for each IP block on the gpu through
510  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
511  *
512  * It outputs the multiple lines which report the uncorrected (ue) and corrected
513  * (ce) error counts.
514  *
515  * The format of one line is below,
516  *
517  * [ce|ue]: count
518  *
519  * Example:
520  *
521  * .. code-block:: bash
522  *
523  *	ue: 0
524  *	ce: 1
525  *
526  */
527 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
528 		struct device_attribute *attr, char *buf)
529 {
530 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
531 	struct ras_query_if info = {
532 		.head = obj->head,
533 	};
534 
535 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
536 		return sysfs_emit(buf, "Query currently inaccessible\n");
537 
538 	if (amdgpu_ras_query_error_status(obj->adev, &info))
539 		return -EINVAL;
540 
541 	if (obj->adev->asic_type == CHIP_ALDEBARAN) {
542 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
543 			DRM_WARN("Failed to reset error counter and error status");
544 	}
545 
546 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
547 			  "ce", info.ce_count);
548 }
549 
550 /* obj begin */
551 
552 #define get_obj(obj) do { (obj)->use++; } while (0)
553 #define alive_obj(obj) ((obj)->use)
554 
555 static inline void put_obj(struct ras_manager *obj)
556 {
557 	if (obj && (--obj->use == 0))
558 		list_del(&obj->node);
559 	if (obj && (obj->use < 0))
560 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
561 }
562 
563 /* make one obj and return it. */
564 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
565 		struct ras_common_if *head)
566 {
567 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
568 	struct ras_manager *obj;
569 
570 	if (!adev->ras_enabled || !con)
571 		return NULL;
572 
573 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
574 		return NULL;
575 
576 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
577 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
578 			return NULL;
579 
580 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
581 	} else
582 		obj = &con->objs[head->block];
583 
584 	/* already exist. return obj? */
585 	if (alive_obj(obj))
586 		return NULL;
587 
588 	obj->head = *head;
589 	obj->adev = adev;
590 	list_add(&obj->node, &con->head);
591 	get_obj(obj);
592 
593 	return obj;
594 }
595 
596 /* return an obj equal to head, or the first when head is NULL */
597 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
598 		struct ras_common_if *head)
599 {
600 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
601 	struct ras_manager *obj;
602 	int i;
603 
604 	if (!adev->ras_enabled || !con)
605 		return NULL;
606 
607 	if (head) {
608 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
609 			return NULL;
610 
611 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
612 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
613 				return NULL;
614 
615 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
616 		} else
617 			obj = &con->objs[head->block];
618 
619 		if (alive_obj(obj))
620 			return obj;
621 	} else {
622 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
623 			obj = &con->objs[i];
624 			if (alive_obj(obj))
625 				return obj;
626 		}
627 	}
628 
629 	return NULL;
630 }
631 /* obj end */
632 
633 /* feature ctl begin */
634 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
635 					 struct ras_common_if *head)
636 {
637 	return adev->ras_hw_enabled & BIT(head->block);
638 }
639 
640 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
641 		struct ras_common_if *head)
642 {
643 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
644 
645 	return con->features & BIT(head->block);
646 }
647 
648 /*
649  * if obj is not created, then create one.
650  * set feature enable flag.
651  */
652 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
653 		struct ras_common_if *head, int enable)
654 {
655 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
656 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
657 
658 	/* If hardware does not support ras, then do not create obj.
659 	 * But if hardware support ras, we can create the obj.
660 	 * Ras framework checks con->hw_supported to see if it need do
661 	 * corresponding initialization.
662 	 * IP checks con->support to see if it need disable ras.
663 	 */
664 	if (!amdgpu_ras_is_feature_allowed(adev, head))
665 		return 0;
666 
667 	if (enable) {
668 		if (!obj) {
669 			obj = amdgpu_ras_create_obj(adev, head);
670 			if (!obj)
671 				return -EINVAL;
672 		} else {
673 			/* In case we create obj somewhere else */
674 			get_obj(obj);
675 		}
676 		con->features |= BIT(head->block);
677 	} else {
678 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
679 			con->features &= ~BIT(head->block);
680 			put_obj(obj);
681 		}
682 	}
683 
684 	return 0;
685 }
686 
687 /* wrapper of psp_ras_enable_features */
688 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
689 		struct ras_common_if *head, bool enable)
690 {
691 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
692 	union ta_ras_cmd_input *info;
693 	int ret;
694 
695 	if (!con)
696 		return -EINVAL;
697 
698 	info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
699 	if (!info)
700 		return -ENOMEM;
701 
702 	if (!enable) {
703 		info->disable_features = (struct ta_ras_disable_features_input) {
704 			.block_id =  amdgpu_ras_block_to_ta(head->block),
705 			.error_type = amdgpu_ras_error_to_ta(head->type),
706 		};
707 	} else {
708 		info->enable_features = (struct ta_ras_enable_features_input) {
709 			.block_id =  amdgpu_ras_block_to_ta(head->block),
710 			.error_type = amdgpu_ras_error_to_ta(head->type),
711 		};
712 	}
713 
714 	/* Do not enable if it is not allowed. */
715 	WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
716 
717 	if (!amdgpu_ras_intr_triggered()) {
718 		ret = psp_ras_enable_features(&adev->psp, info, enable);
719 		if (ret) {
720 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
721 				enable ? "enable":"disable",
722 				get_ras_block_str(head),
723 				amdgpu_ras_is_poison_mode_supported(adev), ret);
724 			goto out;
725 		}
726 	}
727 
728 	/* setup the obj */
729 	__amdgpu_ras_feature_enable(adev, head, enable);
730 	ret = 0;
731 out:
732 	kfree(info);
733 	return ret;
734 }
735 
736 /* Only used in device probe stage and called only once. */
737 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
738 		struct ras_common_if *head, bool enable)
739 {
740 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
741 	int ret;
742 
743 	if (!con)
744 		return -EINVAL;
745 
746 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
747 		if (enable) {
748 			/* There is no harm to issue a ras TA cmd regardless of
749 			 * the currecnt ras state.
750 			 * If current state == target state, it will do nothing
751 			 * But sometimes it requests driver to reset and repost
752 			 * with error code -EAGAIN.
753 			 */
754 			ret = amdgpu_ras_feature_enable(adev, head, 1);
755 			/* With old ras TA, we might fail to enable ras.
756 			 * Log it and just setup the object.
757 			 * TODO need remove this WA in the future.
758 			 */
759 			if (ret == -EINVAL) {
760 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
761 				if (!ret)
762 					dev_info(adev->dev,
763 						"RAS INFO: %s setup object\n",
764 						get_ras_block_str(head));
765 			}
766 		} else {
767 			/* setup the object then issue a ras TA disable cmd.*/
768 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
769 			if (ret)
770 				return ret;
771 
772 			/* gfx block ras dsiable cmd must send to ras-ta */
773 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
774 				con->features |= BIT(head->block);
775 
776 			ret = amdgpu_ras_feature_enable(adev, head, 0);
777 
778 			/* clean gfx block ras features flag */
779 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
780 				con->features &= ~BIT(head->block);
781 		}
782 	} else
783 		ret = amdgpu_ras_feature_enable(adev, head, enable);
784 
785 	return ret;
786 }
787 
788 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
789 		bool bypass)
790 {
791 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
792 	struct ras_manager *obj, *tmp;
793 
794 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
795 		/* bypass psp.
796 		 * aka just release the obj and corresponding flags
797 		 */
798 		if (bypass) {
799 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
800 				break;
801 		} else {
802 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
803 				break;
804 		}
805 	}
806 
807 	return con->features;
808 }
809 
810 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
811 		bool bypass)
812 {
813 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
814 	int i;
815 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
816 
817 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
818 		struct ras_common_if head = {
819 			.block = i,
820 			.type = default_ras_type,
821 			.sub_block_index = 0,
822 		};
823 
824 		if (i == AMDGPU_RAS_BLOCK__MCA)
825 			continue;
826 
827 		if (bypass) {
828 			/*
829 			 * bypass psp. vbios enable ras for us.
830 			 * so just create the obj
831 			 */
832 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
833 				break;
834 		} else {
835 			if (amdgpu_ras_feature_enable(adev, &head, 1))
836 				break;
837 		}
838 	}
839 
840 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
841 		struct ras_common_if head = {
842 			.block = AMDGPU_RAS_BLOCK__MCA,
843 			.type = default_ras_type,
844 			.sub_block_index = i,
845 		};
846 
847 		if (bypass) {
848 			/*
849 			 * bypass psp. vbios enable ras for us.
850 			 * so just create the obj
851 			 */
852 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
853 				break;
854 		} else {
855 			if (amdgpu_ras_feature_enable(adev, &head, 1))
856 				break;
857 		}
858 	}
859 
860 	return con->features;
861 }
862 /* feature ctl end */
863 
864 
865 void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
866 				       struct ras_common_if *ras_block,
867 				       struct ras_err_data  *err_data)
868 {
869 	switch (ras_block->sub_block_index) {
870 	case AMDGPU_RAS_MCA_BLOCK__MP0:
871 		if (adev->mca.mp0.ras_funcs &&
872 		    adev->mca.mp0.ras_funcs->query_ras_error_count)
873 			adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
874 		break;
875 	case AMDGPU_RAS_MCA_BLOCK__MP1:
876 		if (adev->mca.mp1.ras_funcs &&
877 		    adev->mca.mp1.ras_funcs->query_ras_error_count)
878 			adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
879 		break;
880 	case AMDGPU_RAS_MCA_BLOCK__MPIO:
881 		if (adev->mca.mpio.ras_funcs &&
882 		    adev->mca.mpio.ras_funcs->query_ras_error_count)
883 			adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
884 		break;
885 	default:
886 		break;
887 	}
888 }
889 
890 /* query/inject/cure begin */
891 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
892 				  struct ras_query_if *info)
893 {
894 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
895 	struct ras_err_data err_data = {0, 0, 0, NULL};
896 	int i;
897 
898 	if (!obj)
899 		return -EINVAL;
900 
901 	switch (info->head.block) {
902 	case AMDGPU_RAS_BLOCK__UMC:
903 		if (adev->umc.ras_funcs &&
904 		    adev->umc.ras_funcs->query_ras_error_count)
905 			adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
906 		/* umc query_ras_error_address is also responsible for clearing
907 		 * error status
908 		 */
909 		if (adev->umc.ras_funcs &&
910 		    adev->umc.ras_funcs->query_ras_error_address)
911 			adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
912 		break;
913 	case AMDGPU_RAS_BLOCK__SDMA:
914 		if (adev->sdma.funcs->query_ras_error_count) {
915 			for (i = 0; i < adev->sdma.num_instances; i++)
916 				adev->sdma.funcs->query_ras_error_count(adev, i,
917 									&err_data);
918 		}
919 		break;
920 	case AMDGPU_RAS_BLOCK__GFX:
921 		if (adev->gfx.ras_funcs &&
922 		    adev->gfx.ras_funcs->query_ras_error_count)
923 			adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
924 
925 		if (adev->gfx.ras_funcs &&
926 		    adev->gfx.ras_funcs->query_ras_error_status)
927 			adev->gfx.ras_funcs->query_ras_error_status(adev);
928 		break;
929 	case AMDGPU_RAS_BLOCK__MMHUB:
930 		if (adev->mmhub.ras_funcs &&
931 		    adev->mmhub.ras_funcs->query_ras_error_count)
932 			adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
933 
934 		if (adev->mmhub.ras_funcs &&
935 		    adev->mmhub.ras_funcs->query_ras_error_status)
936 			adev->mmhub.ras_funcs->query_ras_error_status(adev);
937 		break;
938 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
939 		if (adev->nbio.ras_funcs &&
940 		    adev->nbio.ras_funcs->query_ras_error_count)
941 			adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
942 		break;
943 	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
944 		if (adev->gmc.xgmi.ras_funcs &&
945 		    adev->gmc.xgmi.ras_funcs->query_ras_error_count)
946 			adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
947 		break;
948 	case AMDGPU_RAS_BLOCK__HDP:
949 		if (adev->hdp.ras_funcs &&
950 		    adev->hdp.ras_funcs->query_ras_error_count)
951 			adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
952 		break;
953 	case AMDGPU_RAS_BLOCK__MCA:
954 		amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
955 		break;
956 	default:
957 		break;
958 	}
959 
960 	obj->err_data.ue_count += err_data.ue_count;
961 	obj->err_data.ce_count += err_data.ce_count;
962 
963 	info->ue_count = obj->err_data.ue_count;
964 	info->ce_count = obj->err_data.ce_count;
965 
966 	if (err_data.ce_count) {
967 		if (adev->smuio.funcs &&
968 		    adev->smuio.funcs->get_socket_id &&
969 		    adev->smuio.funcs->get_die_id) {
970 			dev_info(adev->dev, "socket: %d, die: %d "
971 					"%ld correctable hardware errors "
972 					"detected in %s block, no user "
973 					"action is needed.\n",
974 					adev->smuio.funcs->get_socket_id(adev),
975 					adev->smuio.funcs->get_die_id(adev),
976 					obj->err_data.ce_count,
977 					get_ras_block_str(&info->head));
978 		} else {
979 			dev_info(adev->dev, "%ld correctable hardware errors "
980 					"detected in %s block, no user "
981 					"action is needed.\n",
982 					obj->err_data.ce_count,
983 					get_ras_block_str(&info->head));
984 		}
985 	}
986 	if (err_data.ue_count) {
987 		if (adev->smuio.funcs &&
988 		    adev->smuio.funcs->get_socket_id &&
989 		    adev->smuio.funcs->get_die_id) {
990 			dev_info(adev->dev, "socket: %d, die: %d "
991 					"%ld uncorrectable hardware errors "
992 					"detected in %s block\n",
993 					adev->smuio.funcs->get_socket_id(adev),
994 					adev->smuio.funcs->get_die_id(adev),
995 					obj->err_data.ue_count,
996 					get_ras_block_str(&info->head));
997 		} else {
998 			dev_info(adev->dev, "%ld uncorrectable hardware errors "
999 					"detected in %s block\n",
1000 					obj->err_data.ue_count,
1001 					get_ras_block_str(&info->head));
1002 		}
1003 	}
1004 
1005 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
1006 		amdgpu_ras_reset_error_status(adev, info->head.block);
1007 
1008 	return 0;
1009 }
1010 
1011 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1012 		enum amdgpu_ras_block block)
1013 {
1014 	if (!amdgpu_ras_is_supported(adev, block))
1015 		return -EINVAL;
1016 
1017 	switch (block) {
1018 	case AMDGPU_RAS_BLOCK__GFX:
1019 		if (adev->gfx.ras_funcs &&
1020 		    adev->gfx.ras_funcs->reset_ras_error_count)
1021 			adev->gfx.ras_funcs->reset_ras_error_count(adev);
1022 
1023 		if (adev->gfx.ras_funcs &&
1024 		    adev->gfx.ras_funcs->reset_ras_error_status)
1025 			adev->gfx.ras_funcs->reset_ras_error_status(adev);
1026 		break;
1027 	case AMDGPU_RAS_BLOCK__MMHUB:
1028 		if (adev->mmhub.ras_funcs &&
1029 		    adev->mmhub.ras_funcs->reset_ras_error_count)
1030 			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
1031 
1032 		if (adev->mmhub.ras_funcs &&
1033 		    adev->mmhub.ras_funcs->reset_ras_error_status)
1034 			adev->mmhub.ras_funcs->reset_ras_error_status(adev);
1035 		break;
1036 	case AMDGPU_RAS_BLOCK__SDMA:
1037 		if (adev->sdma.funcs->reset_ras_error_count)
1038 			adev->sdma.funcs->reset_ras_error_count(adev);
1039 		break;
1040 	case AMDGPU_RAS_BLOCK__HDP:
1041 		if (adev->hdp.ras_funcs &&
1042 		    adev->hdp.ras_funcs->reset_ras_error_count)
1043 			adev->hdp.ras_funcs->reset_ras_error_count(adev);
1044 		break;
1045 	default:
1046 		break;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 /* Trigger XGMI/WAFL error */
1053 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
1054 				 struct ta_ras_trigger_error_input *block_info)
1055 {
1056 	int ret;
1057 
1058 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
1059 		dev_warn(adev->dev, "Failed to disallow df cstate");
1060 
1061 	if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
1062 		dev_warn(adev->dev, "Failed to disallow XGMI power down");
1063 
1064 	ret = psp_ras_trigger_error(&adev->psp, block_info);
1065 
1066 	if (amdgpu_ras_intr_triggered())
1067 		return ret;
1068 
1069 	if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
1070 		dev_warn(adev->dev, "Failed to allow XGMI power down");
1071 
1072 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
1073 		dev_warn(adev->dev, "Failed to allow df cstate");
1074 
1075 	return ret;
1076 }
1077 
1078 /* wrapper of psp_ras_trigger_error */
1079 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1080 		struct ras_inject_if *info)
1081 {
1082 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1083 	struct ta_ras_trigger_error_input block_info = {
1084 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1085 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1086 		.sub_block_index = info->head.sub_block_index,
1087 		.address = info->address,
1088 		.value = info->value,
1089 	};
1090 	int ret = 0;
1091 
1092 	if (!obj)
1093 		return -EINVAL;
1094 
1095 	/* Calculate XGMI relative offset */
1096 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
1097 		block_info.address =
1098 			amdgpu_xgmi_get_relative_phy_addr(adev,
1099 							  block_info.address);
1100 	}
1101 
1102 	switch (info->head.block) {
1103 	case AMDGPU_RAS_BLOCK__GFX:
1104 		if (adev->gfx.ras_funcs &&
1105 		    adev->gfx.ras_funcs->ras_error_inject)
1106 			ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
1107 		else
1108 			ret = -EINVAL;
1109 		break;
1110 	case AMDGPU_RAS_BLOCK__UMC:
1111 	case AMDGPU_RAS_BLOCK__SDMA:
1112 	case AMDGPU_RAS_BLOCK__MMHUB:
1113 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
1114 	case AMDGPU_RAS_BLOCK__MCA:
1115 		ret = psp_ras_trigger_error(&adev->psp, &block_info);
1116 		break;
1117 	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
1118 		ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
1119 		break;
1120 	default:
1121 		dev_info(adev->dev, "%s error injection is not supported yet\n",
1122 			 get_ras_block_str(&info->head));
1123 		ret = -EINVAL;
1124 	}
1125 
1126 	if (ret)
1127 		dev_err(adev->dev, "ras inject %s failed %d\n",
1128 			get_ras_block_str(&info->head), ret);
1129 
1130 	return ret;
1131 }
1132 
1133 /**
1134  * amdgpu_ras_query_error_count -- Get error counts of all IPs
1135  * adev: pointer to AMD GPU device
1136  * ce_count: pointer to an integer to be set to the count of correctible errors.
1137  * ue_count: pointer to an integer to be set to the count of uncorrectible
1138  * errors.
1139  *
1140  * If set, @ce_count or @ue_count, count and return the corresponding
1141  * error counts in those integer pointers. Return 0 if the device
1142  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1143  */
1144 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1145 				 unsigned long *ce_count,
1146 				 unsigned long *ue_count)
1147 {
1148 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1149 	struct ras_manager *obj;
1150 	unsigned long ce, ue;
1151 
1152 	if (!adev->ras_enabled || !con)
1153 		return -EOPNOTSUPP;
1154 
1155 	/* Don't count since no reporting.
1156 	 */
1157 	if (!ce_count && !ue_count)
1158 		return 0;
1159 
1160 	ce = 0;
1161 	ue = 0;
1162 	list_for_each_entry(obj, &con->head, node) {
1163 		struct ras_query_if info = {
1164 			.head = obj->head,
1165 		};
1166 		int res;
1167 
1168 		res = amdgpu_ras_query_error_status(adev, &info);
1169 		if (res)
1170 			return res;
1171 
1172 		ce += info.ce_count;
1173 		ue += info.ue_count;
1174 	}
1175 
1176 	if (ce_count)
1177 		*ce_count = ce;
1178 
1179 	if (ue_count)
1180 		*ue_count = ue;
1181 
1182 	return 0;
1183 }
1184 /* query/inject/cure end */
1185 
1186 
1187 /* sysfs begin */
1188 
1189 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1190 		struct ras_badpage **bps, unsigned int *count);
1191 
1192 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1193 {
1194 	switch (flags) {
1195 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1196 		return "R";
1197 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1198 		return "P";
1199 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1200 	default:
1201 		return "F";
1202 	}
1203 }
1204 
1205 /**
1206  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1207  *
1208  * It allows user to read the bad pages of vram on the gpu through
1209  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1210  *
1211  * It outputs multiple lines, and each line stands for one gpu page.
1212  *
1213  * The format of one line is below,
1214  * gpu pfn : gpu page size : flags
1215  *
1216  * gpu pfn and gpu page size are printed in hex format.
1217  * flags can be one of below character,
1218  *
1219  * R: reserved, this gpu page is reserved and not able to use.
1220  *
1221  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1222  * in next window of page_reserve.
1223  *
1224  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1225  *
1226  * Examples:
1227  *
1228  * .. code-block:: bash
1229  *
1230  *	0x00000001 : 0x00001000 : R
1231  *	0x00000002 : 0x00001000 : P
1232  *
1233  */
1234 
1235 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1236 		struct kobject *kobj, struct bin_attribute *attr,
1237 		char *buf, loff_t ppos, size_t count)
1238 {
1239 	struct amdgpu_ras *con =
1240 		container_of(attr, struct amdgpu_ras, badpages_attr);
1241 	struct amdgpu_device *adev = con->adev;
1242 	const unsigned int element_size =
1243 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1244 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1245 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1246 	ssize_t s = 0;
1247 	struct ras_badpage *bps = NULL;
1248 	unsigned int bps_count = 0;
1249 
1250 	memset(buf, 0, count);
1251 
1252 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1253 		return 0;
1254 
1255 	for (; start < end && start < bps_count; start++)
1256 		s += scnprintf(&buf[s], element_size + 1,
1257 				"0x%08x : 0x%08x : %1s\n",
1258 				bps[start].bp,
1259 				bps[start].size,
1260 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1261 
1262 	kfree(bps);
1263 
1264 	return s;
1265 }
1266 
1267 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1268 		struct device_attribute *attr, char *buf)
1269 {
1270 	struct amdgpu_ras *con =
1271 		container_of(attr, struct amdgpu_ras, features_attr);
1272 
1273 	return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1274 }
1275 
1276 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1277 {
1278 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1279 
1280 	sysfs_remove_file_from_group(&adev->dev->kobj,
1281 				&con->badpages_attr.attr,
1282 				RAS_FS_NAME);
1283 }
1284 
1285 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1286 {
1287 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1288 	struct attribute *attrs[] = {
1289 		&con->features_attr.attr,
1290 		NULL
1291 	};
1292 	struct attribute_group group = {
1293 		.name = RAS_FS_NAME,
1294 		.attrs = attrs,
1295 	};
1296 
1297 	sysfs_remove_group(&adev->dev->kobj, &group);
1298 
1299 	return 0;
1300 }
1301 
1302 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1303 		struct ras_fs_if *head)
1304 {
1305 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1306 
1307 	if (!obj || obj->attr_inuse)
1308 		return -EINVAL;
1309 
1310 	get_obj(obj);
1311 
1312 	memcpy(obj->fs_data.sysfs_name,
1313 			head->sysfs_name,
1314 			sizeof(obj->fs_data.sysfs_name));
1315 
1316 	obj->sysfs_attr = (struct device_attribute){
1317 		.attr = {
1318 			.name = obj->fs_data.sysfs_name,
1319 			.mode = S_IRUGO,
1320 		},
1321 			.show = amdgpu_ras_sysfs_read,
1322 	};
1323 	sysfs_attr_init(&obj->sysfs_attr.attr);
1324 
1325 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1326 				&obj->sysfs_attr.attr,
1327 				RAS_FS_NAME)) {
1328 		put_obj(obj);
1329 		return -EINVAL;
1330 	}
1331 
1332 	obj->attr_inuse = 1;
1333 
1334 	return 0;
1335 }
1336 
1337 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1338 		struct ras_common_if *head)
1339 {
1340 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1341 
1342 	if (!obj || !obj->attr_inuse)
1343 		return -EINVAL;
1344 
1345 	sysfs_remove_file_from_group(&adev->dev->kobj,
1346 				&obj->sysfs_attr.attr,
1347 				RAS_FS_NAME);
1348 	obj->attr_inuse = 0;
1349 	put_obj(obj);
1350 
1351 	return 0;
1352 }
1353 
1354 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1355 {
1356 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1357 	struct ras_manager *obj, *tmp;
1358 
1359 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1360 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1361 	}
1362 
1363 	if (amdgpu_bad_page_threshold != 0)
1364 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1365 
1366 	amdgpu_ras_sysfs_remove_feature_node(adev);
1367 
1368 	return 0;
1369 }
1370 /* sysfs end */
1371 
1372 /**
1373  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1374  *
1375  * Normally when there is an uncorrectable error, the driver will reset
1376  * the GPU to recover.  However, in the event of an unrecoverable error,
1377  * the driver provides an interface to reboot the system automatically
1378  * in that event.
1379  *
1380  * The following file in debugfs provides that interface:
1381  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1382  *
1383  * Usage:
1384  *
1385  * .. code-block:: bash
1386  *
1387  *	echo true > .../ras/auto_reboot
1388  *
1389  */
1390 /* debugfs begin */
1391 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1392 {
1393 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1394 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1395 	struct dentry     *dir;
1396 
1397 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1398 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1399 			    &amdgpu_ras_debugfs_ctrl_ops);
1400 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1401 			    &amdgpu_ras_debugfs_eeprom_ops);
1402 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1403 			   &con->bad_page_cnt_threshold);
1404 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1405 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1406 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1407 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1408 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1409 						       S_IRUGO, dir, adev,
1410 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1411 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1412 
1413 	/*
1414 	 * After one uncorrectable error happens, usually GPU recovery will
1415 	 * be scheduled. But due to the known problem in GPU recovery failing
1416 	 * to bring GPU back, below interface provides one direct way to
1417 	 * user to reboot system automatically in such case within
1418 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1419 	 * will never be called.
1420 	 */
1421 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1422 
1423 	/*
1424 	 * User could set this not to clean up hardware's error count register
1425 	 * of RAS IPs during ras recovery.
1426 	 */
1427 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1428 			    &con->disable_ras_err_cnt_harvest);
1429 	return dir;
1430 }
1431 
1432 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1433 				      struct ras_fs_if *head,
1434 				      struct dentry *dir)
1435 {
1436 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1437 
1438 	if (!obj || !dir)
1439 		return;
1440 
1441 	get_obj(obj);
1442 
1443 	memcpy(obj->fs_data.debugfs_name,
1444 			head->debugfs_name,
1445 			sizeof(obj->fs_data.debugfs_name));
1446 
1447 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1448 			    obj, &amdgpu_ras_debugfs_ops);
1449 }
1450 
1451 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1452 {
1453 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1454 	struct dentry *dir;
1455 	struct ras_manager *obj;
1456 	struct ras_fs_if fs_info;
1457 
1458 	/*
1459 	 * it won't be called in resume path, no need to check
1460 	 * suspend and gpu reset status
1461 	 */
1462 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1463 		return;
1464 
1465 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1466 
1467 	list_for_each_entry(obj, &con->head, node) {
1468 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1469 			(obj->attr_inuse == 1)) {
1470 			sprintf(fs_info.debugfs_name, "%s_err_inject",
1471 					get_ras_block_str(&obj->head));
1472 			fs_info.head = obj->head;
1473 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1474 		}
1475 	}
1476 }
1477 
1478 /* debugfs end */
1479 
1480 /* ras fs */
1481 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1482 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
1483 static DEVICE_ATTR(features, S_IRUGO,
1484 		amdgpu_ras_sysfs_features_read, NULL);
1485 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1486 {
1487 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1488 	struct attribute_group group = {
1489 		.name = RAS_FS_NAME,
1490 	};
1491 	struct attribute *attrs[] = {
1492 		&con->features_attr.attr,
1493 		NULL
1494 	};
1495 	struct bin_attribute *bin_attrs[] = {
1496 		NULL,
1497 		NULL,
1498 	};
1499 	int r;
1500 
1501 	/* add features entry */
1502 	con->features_attr = dev_attr_features;
1503 	group.attrs = attrs;
1504 	sysfs_attr_init(attrs[0]);
1505 
1506 	if (amdgpu_bad_page_threshold != 0) {
1507 		/* add bad_page_features entry */
1508 		bin_attr_gpu_vram_bad_pages.private = NULL;
1509 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1510 		bin_attrs[0] = &con->badpages_attr;
1511 		group.bin_attrs = bin_attrs;
1512 		sysfs_bin_attr_init(bin_attrs[0]);
1513 	}
1514 
1515 	r = sysfs_create_group(&adev->dev->kobj, &group);
1516 	if (r)
1517 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
1518 
1519 	return 0;
1520 }
1521 
1522 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1523 {
1524 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1525 	struct ras_manager *con_obj, *ip_obj, *tmp;
1526 
1527 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1528 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1529 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1530 			if (ip_obj)
1531 				put_obj(ip_obj);
1532 		}
1533 	}
1534 
1535 	amdgpu_ras_sysfs_remove_all(adev);
1536 	return 0;
1537 }
1538 /* ras fs end */
1539 
1540 /* ih begin */
1541 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1542 {
1543 	struct ras_ih_data *data = &obj->ih_data;
1544 	struct amdgpu_iv_entry entry;
1545 	int ret;
1546 	struct ras_err_data err_data = {0, 0, 0, NULL};
1547 
1548 	while (data->rptr != data->wptr) {
1549 		rmb();
1550 		memcpy(&entry, &data->ring[data->rptr],
1551 				data->element_size);
1552 
1553 		wmb();
1554 		data->rptr = (data->aligned_element_size +
1555 				data->rptr) % data->ring_size;
1556 
1557 		if (data->cb) {
1558 			if (amdgpu_ras_is_poison_mode_supported(obj->adev) &&
1559 			    obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1560 				dev_info(obj->adev->dev,
1561 						"Poison is created, no user action is needed.\n");
1562 			else {
1563 				/* Let IP handle its data, maybe we need get the output
1564 				 * from the callback to udpate the error type/count, etc
1565 				 */
1566 				ret = data->cb(obj->adev, &err_data, &entry);
1567 				/* ue will trigger an interrupt, and in that case
1568 				 * we need do a reset to recovery the whole system.
1569 				 * But leave IP do that recovery, here we just dispatch
1570 				 * the error.
1571 				 */
1572 				if (ret == AMDGPU_RAS_SUCCESS) {
1573 					/* these counts could be left as 0 if
1574 					 * some blocks do not count error number
1575 					 */
1576 					obj->err_data.ue_count += err_data.ue_count;
1577 					obj->err_data.ce_count += err_data.ce_count;
1578 				}
1579 			}
1580 		}
1581 	}
1582 }
1583 
1584 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1585 {
1586 	struct ras_ih_data *data =
1587 		container_of(work, struct ras_ih_data, ih_work);
1588 	struct ras_manager *obj =
1589 		container_of(data, struct ras_manager, ih_data);
1590 
1591 	amdgpu_ras_interrupt_handler(obj);
1592 }
1593 
1594 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1595 		struct ras_dispatch_if *info)
1596 {
1597 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1598 	struct ras_ih_data *data = &obj->ih_data;
1599 
1600 	if (!obj)
1601 		return -EINVAL;
1602 
1603 	if (data->inuse == 0)
1604 		return 0;
1605 
1606 	/* Might be overflow... */
1607 	memcpy(&data->ring[data->wptr], info->entry,
1608 			data->element_size);
1609 
1610 	wmb();
1611 	data->wptr = (data->aligned_element_size +
1612 			data->wptr) % data->ring_size;
1613 
1614 	schedule_work(&data->ih_work);
1615 
1616 	return 0;
1617 }
1618 
1619 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1620 		struct ras_ih_if *info)
1621 {
1622 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1623 	struct ras_ih_data *data;
1624 
1625 	if (!obj)
1626 		return -EINVAL;
1627 
1628 	data = &obj->ih_data;
1629 	if (data->inuse == 0)
1630 		return 0;
1631 
1632 	cancel_work_sync(&data->ih_work);
1633 
1634 	kfree(data->ring);
1635 	memset(data, 0, sizeof(*data));
1636 	put_obj(obj);
1637 
1638 	return 0;
1639 }
1640 
1641 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1642 		struct ras_ih_if *info)
1643 {
1644 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1645 	struct ras_ih_data *data;
1646 
1647 	if (!obj) {
1648 		/* in case we registe the IH before enable ras feature */
1649 		obj = amdgpu_ras_create_obj(adev, &info->head);
1650 		if (!obj)
1651 			return -EINVAL;
1652 	} else
1653 		get_obj(obj);
1654 
1655 	data = &obj->ih_data;
1656 	/* add the callback.etc */
1657 	*data = (struct ras_ih_data) {
1658 		.inuse = 0,
1659 		.cb = info->cb,
1660 		.element_size = sizeof(struct amdgpu_iv_entry),
1661 		.rptr = 0,
1662 		.wptr = 0,
1663 	};
1664 
1665 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1666 
1667 	data->aligned_element_size = ALIGN(data->element_size, 8);
1668 	/* the ring can store 64 iv entries. */
1669 	data->ring_size = 64 * data->aligned_element_size;
1670 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1671 	if (!data->ring) {
1672 		put_obj(obj);
1673 		return -ENOMEM;
1674 	}
1675 
1676 	/* IH is ready */
1677 	data->inuse = 1;
1678 
1679 	return 0;
1680 }
1681 
1682 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1683 {
1684 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1685 	struct ras_manager *obj, *tmp;
1686 
1687 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1688 		struct ras_ih_if info = {
1689 			.head = obj->head,
1690 		};
1691 		amdgpu_ras_interrupt_remove_handler(adev, &info);
1692 	}
1693 
1694 	return 0;
1695 }
1696 /* ih end */
1697 
1698 /* traversal all IPs except NBIO to query error counter */
1699 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1700 {
1701 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1702 	struct ras_manager *obj;
1703 
1704 	if (!adev->ras_enabled || !con)
1705 		return;
1706 
1707 	list_for_each_entry(obj, &con->head, node) {
1708 		struct ras_query_if info = {
1709 			.head = obj->head,
1710 		};
1711 
1712 		/*
1713 		 * PCIE_BIF IP has one different isr by ras controller
1714 		 * interrupt, the specific ras counter query will be
1715 		 * done in that isr. So skip such block from common
1716 		 * sync flood interrupt isr calling.
1717 		 */
1718 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1719 			continue;
1720 
1721 		amdgpu_ras_query_error_status(adev, &info);
1722 	}
1723 }
1724 
1725 /* Parse RdRspStatus and WrRspStatus */
1726 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1727 					  struct ras_query_if *info)
1728 {
1729 	/*
1730 	 * Only two block need to query read/write
1731 	 * RspStatus at current state
1732 	 */
1733 	switch (info->head.block) {
1734 	case AMDGPU_RAS_BLOCK__GFX:
1735 		if (adev->gfx.ras_funcs &&
1736 		    adev->gfx.ras_funcs->query_ras_error_status)
1737 			adev->gfx.ras_funcs->query_ras_error_status(adev);
1738 		break;
1739 	case AMDGPU_RAS_BLOCK__MMHUB:
1740 		if (adev->mmhub.ras_funcs &&
1741 		    adev->mmhub.ras_funcs->query_ras_error_status)
1742 			adev->mmhub.ras_funcs->query_ras_error_status(adev);
1743 		break;
1744 	default:
1745 		break;
1746 	}
1747 }
1748 
1749 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1750 {
1751 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1752 	struct ras_manager *obj;
1753 
1754 	if (!adev->ras_enabled || !con)
1755 		return;
1756 
1757 	list_for_each_entry(obj, &con->head, node) {
1758 		struct ras_query_if info = {
1759 			.head = obj->head,
1760 		};
1761 
1762 		amdgpu_ras_error_status_query(adev, &info);
1763 	}
1764 }
1765 
1766 /* recovery begin */
1767 
1768 /* return 0 on success.
1769  * caller need free bps.
1770  */
1771 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1772 		struct ras_badpage **bps, unsigned int *count)
1773 {
1774 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1775 	struct ras_err_handler_data *data;
1776 	int i = 0;
1777 	int ret = 0, status;
1778 
1779 	if (!con || !con->eh_data || !bps || !count)
1780 		return -EINVAL;
1781 
1782 	mutex_lock(&con->recovery_lock);
1783 	data = con->eh_data;
1784 	if (!data || data->count == 0) {
1785 		*bps = NULL;
1786 		ret = -EINVAL;
1787 		goto out;
1788 	}
1789 
1790 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1791 	if (!*bps) {
1792 		ret = -ENOMEM;
1793 		goto out;
1794 	}
1795 
1796 	for (; i < data->count; i++) {
1797 		(*bps)[i] = (struct ras_badpage){
1798 			.bp = data->bps[i].retired_page,
1799 			.size = AMDGPU_GPU_PAGE_SIZE,
1800 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1801 		};
1802 		status = amdgpu_vram_mgr_query_page_status(
1803 				ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1804 				data->bps[i].retired_page);
1805 		if (status == -EBUSY)
1806 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1807 		else if (status == -ENOENT)
1808 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1809 	}
1810 
1811 	*count = data->count;
1812 out:
1813 	mutex_unlock(&con->recovery_lock);
1814 	return ret;
1815 }
1816 
1817 static void amdgpu_ras_do_recovery(struct work_struct *work)
1818 {
1819 	struct amdgpu_ras *ras =
1820 		container_of(work, struct amdgpu_ras, recovery_work);
1821 	struct amdgpu_device *remote_adev = NULL;
1822 	struct amdgpu_device *adev = ras->adev;
1823 	struct list_head device_list, *device_list_handle =  NULL;
1824 
1825 	if (!ras->disable_ras_err_cnt_harvest) {
1826 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1827 
1828 		/* Build list of devices to query RAS related errors */
1829 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1830 			device_list_handle = &hive->device_list;
1831 		} else {
1832 			INIT_LIST_HEAD(&device_list);
1833 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
1834 			device_list_handle = &device_list;
1835 		}
1836 
1837 		list_for_each_entry(remote_adev,
1838 				device_list_handle, gmc.xgmi.head) {
1839 			amdgpu_ras_query_err_status(remote_adev);
1840 			amdgpu_ras_log_on_err_counter(remote_adev);
1841 		}
1842 
1843 		amdgpu_put_xgmi_hive(hive);
1844 	}
1845 
1846 	if (amdgpu_device_should_recover_gpu(ras->adev))
1847 		amdgpu_device_gpu_recover(ras->adev, NULL);
1848 	atomic_set(&ras->in_recovery, 0);
1849 }
1850 
1851 /* alloc/realloc bps array */
1852 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1853 		struct ras_err_handler_data *data, int pages)
1854 {
1855 	unsigned int old_space = data->count + data->space_left;
1856 	unsigned int new_space = old_space + pages;
1857 	unsigned int align_space = ALIGN(new_space, 512);
1858 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1859 
1860 	if (!bps) {
1861 		kfree(bps);
1862 		return -ENOMEM;
1863 	}
1864 
1865 	if (data->bps) {
1866 		memcpy(bps, data->bps,
1867 				data->count * sizeof(*data->bps));
1868 		kfree(data->bps);
1869 	}
1870 
1871 	data->bps = bps;
1872 	data->space_left += align_space - old_space;
1873 	return 0;
1874 }
1875 
1876 /* it deal with vram only. */
1877 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1878 		struct eeprom_table_record *bps, int pages)
1879 {
1880 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1881 	struct ras_err_handler_data *data;
1882 	int ret = 0;
1883 	uint32_t i;
1884 
1885 	if (!con || !con->eh_data || !bps || pages <= 0)
1886 		return 0;
1887 
1888 	mutex_lock(&con->recovery_lock);
1889 	data = con->eh_data;
1890 	if (!data)
1891 		goto out;
1892 
1893 	for (i = 0; i < pages; i++) {
1894 		if (amdgpu_ras_check_bad_page_unlock(con,
1895 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1896 			continue;
1897 
1898 		if (!data->space_left &&
1899 			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
1900 			ret = -ENOMEM;
1901 			goto out;
1902 		}
1903 
1904 		amdgpu_vram_mgr_reserve_range(
1905 			ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1906 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1907 			AMDGPU_GPU_PAGE_SIZE);
1908 
1909 		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1910 		data->count++;
1911 		data->space_left--;
1912 	}
1913 out:
1914 	mutex_unlock(&con->recovery_lock);
1915 
1916 	return ret;
1917 }
1918 
1919 /*
1920  * write error record array to eeprom, the function should be
1921  * protected by recovery_lock
1922  */
1923 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1924 {
1925 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1926 	struct ras_err_handler_data *data;
1927 	struct amdgpu_ras_eeprom_control *control;
1928 	int save_count;
1929 
1930 	if (!con || !con->eh_data)
1931 		return 0;
1932 
1933 	control = &con->eeprom_control;
1934 	data = con->eh_data;
1935 	save_count = data->count - control->ras_num_recs;
1936 	/* only new entries are saved */
1937 	if (save_count > 0) {
1938 		if (amdgpu_ras_eeprom_append(control,
1939 					     &data->bps[control->ras_num_recs],
1940 					     save_count)) {
1941 			dev_err(adev->dev, "Failed to save EEPROM table data!");
1942 			return -EIO;
1943 		}
1944 
1945 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 /*
1952  * read error record array in eeprom and reserve enough space for
1953  * storing new bad pages
1954  */
1955 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1956 {
1957 	struct amdgpu_ras_eeprom_control *control =
1958 		&adev->psp.ras_context.ras->eeprom_control;
1959 	struct eeprom_table_record *bps;
1960 	int ret;
1961 
1962 	/* no bad page record, skip eeprom access */
1963 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
1964 		return 0;
1965 
1966 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
1967 	if (!bps)
1968 		return -ENOMEM;
1969 
1970 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
1971 	if (ret)
1972 		dev_err(adev->dev, "Failed to load EEPROM table records!");
1973 	else
1974 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
1975 
1976 	kfree(bps);
1977 	return ret;
1978 }
1979 
1980 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
1981 				uint64_t addr)
1982 {
1983 	struct ras_err_handler_data *data = con->eh_data;
1984 	int i;
1985 
1986 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
1987 	for (i = 0; i < data->count; i++)
1988 		if (addr == data->bps[i].retired_page)
1989 			return true;
1990 
1991 	return false;
1992 }
1993 
1994 /*
1995  * check if an address belongs to bad page
1996  *
1997  * Note: this check is only for umc block
1998  */
1999 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2000 				uint64_t addr)
2001 {
2002 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2003 	bool ret = false;
2004 
2005 	if (!con || !con->eh_data)
2006 		return ret;
2007 
2008 	mutex_lock(&con->recovery_lock);
2009 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2010 	mutex_unlock(&con->recovery_lock);
2011 	return ret;
2012 }
2013 
2014 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2015 					  uint32_t max_count)
2016 {
2017 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2018 
2019 	/*
2020 	 * Justification of value bad_page_cnt_threshold in ras structure
2021 	 *
2022 	 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
2023 	 * in eeprom, and introduce two scenarios accordingly.
2024 	 *
2025 	 * Bad page retirement enablement:
2026 	 *    - If amdgpu_bad_page_threshold = -1,
2027 	 *      bad_page_cnt_threshold = typical value by formula.
2028 	 *
2029 	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2030 	 *      max record length in eeprom, use it directly.
2031 	 *
2032 	 * Bad page retirement disablement:
2033 	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2034 	 *      functionality is disabled, and bad_page_cnt_threshold will
2035 	 *      take no effect.
2036 	 */
2037 
2038 	if (amdgpu_bad_page_threshold < 0) {
2039 		u64 val = adev->gmc.mc_vram_size;
2040 
2041 		do_div(val, RAS_BAD_PAGE_COVER);
2042 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2043 						  max_count);
2044 	} else {
2045 		con->bad_page_cnt_threshold = min_t(int, max_count,
2046 						    amdgpu_bad_page_threshold);
2047 	}
2048 }
2049 
2050 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2051 {
2052 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2053 	struct ras_err_handler_data **data;
2054 	u32  max_eeprom_records_count = 0;
2055 	bool exc_err_limit = false;
2056 	int ret;
2057 
2058 	if (!con)
2059 		return 0;
2060 
2061 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
2062 	 * supports RAS and debugfs is enabled, but when
2063 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
2064 	 * module parameter is set to 0.
2065 	 */
2066 	con->adev = adev;
2067 
2068 	if (!adev->ras_enabled)
2069 		return 0;
2070 
2071 	data = &con->eh_data;
2072 	*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2073 	if (!*data) {
2074 		ret = -ENOMEM;
2075 		goto out;
2076 	}
2077 
2078 	mutex_init(&con->recovery_lock);
2079 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2080 	atomic_set(&con->in_recovery, 0);
2081 
2082 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2083 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2084 
2085 	/* Todo: During test the SMU might fail to read the eeprom through I2C
2086 	 * when the GPU is pending on XGMI reset during probe time
2087 	 * (Mostly after second bus reset), skip it now
2088 	 */
2089 	if (adev->gmc.xgmi.pending_reset)
2090 		return 0;
2091 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2092 	/*
2093 	 * This calling fails when exc_err_limit is true or
2094 	 * ret != 0.
2095 	 */
2096 	if (exc_err_limit || ret)
2097 		goto free;
2098 
2099 	if (con->eeprom_control.ras_num_recs) {
2100 		ret = amdgpu_ras_load_bad_pages(adev);
2101 		if (ret)
2102 			goto free;
2103 
2104 		if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
2105 			adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
2106 	}
2107 
2108 #ifdef CONFIG_X86_MCE_AMD
2109 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
2110 	    (adev->gmc.xgmi.connected_to_cpu))
2111 		amdgpu_register_bad_pages_mca_notifier();
2112 #endif
2113 	return 0;
2114 
2115 free:
2116 	kfree((*data)->bps);
2117 	kfree(*data);
2118 	con->eh_data = NULL;
2119 out:
2120 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2121 
2122 	/*
2123 	 * Except error threshold exceeding case, other failure cases in this
2124 	 * function would not fail amdgpu driver init.
2125 	 */
2126 	if (!exc_err_limit)
2127 		ret = 0;
2128 	else
2129 		ret = -EINVAL;
2130 
2131 	return ret;
2132 }
2133 
2134 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2135 {
2136 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2137 	struct ras_err_handler_data *data = con->eh_data;
2138 
2139 	/* recovery_init failed to init it, fini is useless */
2140 	if (!data)
2141 		return 0;
2142 
2143 	cancel_work_sync(&con->recovery_work);
2144 
2145 	mutex_lock(&con->recovery_lock);
2146 	con->eh_data = NULL;
2147 	kfree(data->bps);
2148 	kfree(data);
2149 	mutex_unlock(&con->recovery_lock);
2150 
2151 	return 0;
2152 }
2153 /* recovery end */
2154 
2155 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2156 {
2157 	return adev->asic_type == CHIP_VEGA10 ||
2158 		adev->asic_type == CHIP_VEGA20 ||
2159 		adev->asic_type == CHIP_ARCTURUS ||
2160 		adev->asic_type == CHIP_ALDEBARAN ||
2161 		adev->asic_type == CHIP_SIENNA_CICHLID;
2162 }
2163 
2164 /*
2165  * this is workaround for vega20 workstation sku,
2166  * force enable gfx ras, ignore vbios gfx ras flag
2167  * due to GC EDC can not write
2168  */
2169 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2170 {
2171 	struct atom_context *ctx = adev->mode_info.atom_context;
2172 
2173 	if (!ctx)
2174 		return;
2175 
2176 	if (strnstr(ctx->vbios_version, "D16406",
2177 		    sizeof(ctx->vbios_version)) ||
2178 		strnstr(ctx->vbios_version, "D36002",
2179 			sizeof(ctx->vbios_version)))
2180 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2181 }
2182 
2183 /*
2184  * check hardware's ras ability which will be saved in hw_supported.
2185  * if hardware does not support ras, we can skip some ras initializtion and
2186  * forbid some ras operations from IP.
2187  * if software itself, say boot parameter, limit the ras ability. We still
2188  * need allow IP do some limited operations, like disable. In such case,
2189  * we have to initialize ras as normal. but need check if operation is
2190  * allowed or not in each function.
2191  */
2192 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2193 {
2194 	adev->ras_hw_enabled = adev->ras_enabled = 0;
2195 
2196 	if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
2197 	    !amdgpu_ras_asic_supported(adev))
2198 		return;
2199 
2200 	if (!adev->gmc.xgmi.connected_to_cpu) {
2201 		if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2202 			dev_info(adev->dev, "MEM ECC is active.\n");
2203 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2204 						   1 << AMDGPU_RAS_BLOCK__DF);
2205 		} else {
2206 			dev_info(adev->dev, "MEM ECC is not presented.\n");
2207 		}
2208 
2209 		if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2210 			dev_info(adev->dev, "SRAM ECC is active.\n");
2211 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2212 						    1 << AMDGPU_RAS_BLOCK__DF);
2213 		} else {
2214 			dev_info(adev->dev, "SRAM ECC is not presented.\n");
2215 		}
2216 	} else {
2217 		/* driver only manages a few IP blocks RAS feature
2218 		 * when GPU is connected cpu through XGMI */
2219 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2220 					   1 << AMDGPU_RAS_BLOCK__SDMA |
2221 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
2222 	}
2223 
2224 	amdgpu_ras_get_quirks(adev);
2225 
2226 	/* hw_supported needs to be aligned with RAS block mask. */
2227 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2228 
2229 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2230 		adev->ras_hw_enabled & amdgpu_ras_mask;
2231 }
2232 
2233 static void amdgpu_ras_counte_dw(struct work_struct *work)
2234 {
2235 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2236 					      ras_counte_delay_work.work);
2237 	struct amdgpu_device *adev = con->adev;
2238 	struct drm_device *dev = adev_to_drm(adev);
2239 	unsigned long ce_count, ue_count;
2240 	int res;
2241 
2242 	res = pm_runtime_get_sync(dev->dev);
2243 	if (res < 0)
2244 		goto Out;
2245 
2246 	/* Cache new values.
2247 	 */
2248 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2249 		atomic_set(&con->ras_ce_count, ce_count);
2250 		atomic_set(&con->ras_ue_count, ue_count);
2251 	}
2252 
2253 	pm_runtime_mark_last_busy(dev->dev);
2254 Out:
2255 	pm_runtime_put_autosuspend(dev->dev);
2256 }
2257 
2258 int amdgpu_ras_init(struct amdgpu_device *adev)
2259 {
2260 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2261 	int r;
2262 	bool df_poison, umc_poison;
2263 
2264 	if (con)
2265 		return 0;
2266 
2267 	con = kmalloc(sizeof(struct amdgpu_ras) +
2268 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2269 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2270 			GFP_KERNEL|__GFP_ZERO);
2271 	if (!con)
2272 		return -ENOMEM;
2273 
2274 	con->adev = adev;
2275 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2276 	atomic_set(&con->ras_ce_count, 0);
2277 	atomic_set(&con->ras_ue_count, 0);
2278 
2279 	con->objs = (struct ras_manager *)(con + 1);
2280 
2281 	amdgpu_ras_set_context(adev, con);
2282 
2283 	amdgpu_ras_check_supported(adev);
2284 
2285 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2286 		/* set gfx block ras context feature for VEGA20 Gaming
2287 		 * send ras disable cmd to ras ta during ras late init.
2288 		 */
2289 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2290 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2291 
2292 			return 0;
2293 		}
2294 
2295 		r = 0;
2296 		goto release_con;
2297 	}
2298 
2299 	con->features = 0;
2300 	INIT_LIST_HEAD(&con->head);
2301 	/* Might need get this flag from vbios. */
2302 	con->flags = RAS_DEFAULT_FLAGS;
2303 
2304 	/* initialize nbio ras function ahead of any other
2305 	 * ras functions so hardware fatal error interrupt
2306 	 * can be enabled as early as possible */
2307 	switch (adev->asic_type) {
2308 	case CHIP_VEGA20:
2309 	case CHIP_ARCTURUS:
2310 	case CHIP_ALDEBARAN:
2311 		if (!adev->gmc.xgmi.connected_to_cpu)
2312 			adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
2313 		break;
2314 	default:
2315 		/* nbio ras is not available */
2316 		break;
2317 	}
2318 
2319 	if (adev->nbio.ras_funcs &&
2320 	    adev->nbio.ras_funcs->init_ras_controller_interrupt) {
2321 		r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
2322 		if (r)
2323 			goto release_con;
2324 	}
2325 
2326 	if (adev->nbio.ras_funcs &&
2327 	    adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
2328 		r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
2329 		if (r)
2330 			goto release_con;
2331 	}
2332 
2333 	/* Init poison supported flag, the default value is false */
2334 	if (adev->df.funcs &&
2335 	    adev->df.funcs->query_ras_poison_mode &&
2336 	    adev->umc.ras_funcs &&
2337 	    adev->umc.ras_funcs->query_ras_poison_mode) {
2338 		df_poison =
2339 			adev->df.funcs->query_ras_poison_mode(adev);
2340 		umc_poison =
2341 			adev->umc.ras_funcs->query_ras_poison_mode(adev);
2342 		/* Only poison is set in both DF and UMC, we can support it */
2343 		if (df_poison && umc_poison)
2344 			con->poison_supported = true;
2345 		else if (df_poison != umc_poison)
2346 			dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2347 					df_poison, umc_poison);
2348 	}
2349 
2350 	if (amdgpu_ras_fs_init(adev)) {
2351 		r = -EINVAL;
2352 		goto release_con;
2353 	}
2354 
2355 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2356 		 "hardware ability[%x] ras_mask[%x]\n",
2357 		 adev->ras_hw_enabled, adev->ras_enabled);
2358 
2359 	return 0;
2360 release_con:
2361 	amdgpu_ras_set_context(adev, NULL);
2362 	kfree(con);
2363 
2364 	return r;
2365 }
2366 
2367 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2368 {
2369 	if (adev->gmc.xgmi.connected_to_cpu)
2370 		return 1;
2371 	return 0;
2372 }
2373 
2374 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2375 					struct ras_common_if *ras_block)
2376 {
2377 	struct ras_query_if info = {
2378 		.head = *ras_block,
2379 	};
2380 
2381 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
2382 		return 0;
2383 
2384 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
2385 		DRM_WARN("RAS init harvest failure");
2386 
2387 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2388 		DRM_WARN("RAS init harvest reset failure");
2389 
2390 	return 0;
2391 }
2392 
2393 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2394 {
2395        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2396 
2397        if (!con)
2398                return false;
2399 
2400        return con->poison_supported;
2401 }
2402 
2403 /* helper function to handle common stuff in ip late init phase */
2404 int amdgpu_ras_late_init(struct amdgpu_device *adev,
2405 			 struct ras_common_if *ras_block,
2406 			 struct ras_fs_if *fs_info,
2407 			 struct ras_ih_if *ih_info)
2408 {
2409 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2410 	unsigned long ue_count, ce_count;
2411 	int r;
2412 
2413 	/* disable RAS feature per IP block if it is not supported */
2414 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2415 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2416 		return 0;
2417 	}
2418 
2419 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2420 	if (r) {
2421 		if (adev->in_suspend || amdgpu_in_reset(adev)) {
2422 			/* in resume phase, if fail to enable ras,
2423 			 * clean up all ras fs nodes, and disable ras */
2424 			goto cleanup;
2425 		} else
2426 			return r;
2427 	}
2428 
2429 	/* check for errors on warm reset edc persisant supported ASIC */
2430 	amdgpu_persistent_edc_harvesting(adev, ras_block);
2431 
2432 	/* in resume phase, no need to create ras fs node */
2433 	if (adev->in_suspend || amdgpu_in_reset(adev))
2434 		return 0;
2435 
2436 	if (ih_info->cb) {
2437 		r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2438 		if (r)
2439 			goto interrupt;
2440 	}
2441 
2442 	r = amdgpu_ras_sysfs_create(adev, fs_info);
2443 	if (r)
2444 		goto sysfs;
2445 
2446 	/* Those are the cached values at init.
2447 	 */
2448 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2449 		atomic_set(&con->ras_ce_count, ce_count);
2450 		atomic_set(&con->ras_ue_count, ue_count);
2451 	}
2452 
2453 	return 0;
2454 cleanup:
2455 	amdgpu_ras_sysfs_remove(adev, ras_block);
2456 sysfs:
2457 	if (ih_info->cb)
2458 		amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2459 interrupt:
2460 	amdgpu_ras_feature_enable(adev, ras_block, 0);
2461 	return r;
2462 }
2463 
2464 /* helper function to remove ras fs node and interrupt handler */
2465 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2466 			  struct ras_common_if *ras_block,
2467 			  struct ras_ih_if *ih_info)
2468 {
2469 	if (!ras_block || !ih_info)
2470 		return;
2471 
2472 	amdgpu_ras_sysfs_remove(adev, ras_block);
2473 	if (ih_info->cb)
2474 		amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2475 	amdgpu_ras_feature_enable(adev, ras_block, 0);
2476 }
2477 
2478 /* do some init work after IP late init as dependence.
2479  * and it runs in resume/gpu reset/booting up cases.
2480  */
2481 void amdgpu_ras_resume(struct amdgpu_device *adev)
2482 {
2483 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2484 	struct ras_manager *obj, *tmp;
2485 
2486 	if (!adev->ras_enabled || !con) {
2487 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
2488 		amdgpu_release_ras_context(adev);
2489 
2490 		return;
2491 	}
2492 
2493 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2494 		/* Set up all other IPs which are not implemented. There is a
2495 		 * tricky thing that IP's actual ras error type should be
2496 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2497 		 * ERROR_NONE make sense anyway.
2498 		 */
2499 		amdgpu_ras_enable_all_features(adev, 1);
2500 
2501 		/* We enable ras on all hw_supported block, but as boot
2502 		 * parameter might disable some of them and one or more IP has
2503 		 * not implemented yet. So we disable them on behalf.
2504 		 */
2505 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
2506 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2507 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
2508 				/* there should be no any reference. */
2509 				WARN_ON(alive_obj(obj));
2510 			}
2511 		}
2512 	}
2513 }
2514 
2515 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2516 {
2517 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2518 
2519 	if (!adev->ras_enabled || !con)
2520 		return;
2521 
2522 	amdgpu_ras_disable_all_features(adev, 0);
2523 	/* Make sure all ras objects are disabled. */
2524 	if (con->features)
2525 		amdgpu_ras_disable_all_features(adev, 1);
2526 }
2527 
2528 /* do some fini work before IP fini as dependence */
2529 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2530 {
2531 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2532 
2533 	if (!adev->ras_enabled || !con)
2534 		return 0;
2535 
2536 
2537 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
2538 	amdgpu_ras_disable_all_features(adev, 0);
2539 	amdgpu_ras_recovery_fini(adev);
2540 	return 0;
2541 }
2542 
2543 int amdgpu_ras_fini(struct amdgpu_device *adev)
2544 {
2545 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2546 
2547 	if (!adev->ras_enabled || !con)
2548 		return 0;
2549 
2550 	amdgpu_ras_fs_fini(adev);
2551 	amdgpu_ras_interrupt_remove_all(adev);
2552 
2553 	WARN(con->features, "Feature mask is not cleared");
2554 
2555 	if (con->features)
2556 		amdgpu_ras_disable_all_features(adev, 1);
2557 
2558 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
2559 
2560 	amdgpu_ras_set_context(adev, NULL);
2561 	kfree(con);
2562 
2563 	return 0;
2564 }
2565 
2566 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2567 {
2568 	amdgpu_ras_check_supported(adev);
2569 	if (!adev->ras_hw_enabled)
2570 		return;
2571 
2572 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2573 		dev_info(adev->dev, "uncorrectable hardware error"
2574 			"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2575 
2576 		amdgpu_ras_reset_gpu(adev);
2577 	}
2578 }
2579 
2580 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2581 {
2582 	if (adev->asic_type == CHIP_VEGA20 &&
2583 	    adev->pm.fw_version <= 0x283400) {
2584 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2585 				amdgpu_ras_intr_triggered();
2586 	}
2587 
2588 	return false;
2589 }
2590 
2591 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2592 {
2593 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2594 
2595 	if (!con)
2596 		return;
2597 
2598 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2599 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2600 		amdgpu_ras_set_context(adev, NULL);
2601 		kfree(con);
2602 	}
2603 }
2604 
2605 #ifdef CONFIG_X86_MCE_AMD
2606 static struct amdgpu_device *find_adev(uint32_t node_id)
2607 {
2608 	struct amdgpu_gpu_instance *gpu_instance;
2609 	int i;
2610 	struct amdgpu_device *adev = NULL;
2611 
2612 	mutex_lock(&mgpu_info.mutex);
2613 
2614 	for (i = 0; i < mgpu_info.num_gpu; i++) {
2615 		gpu_instance = &(mgpu_info.gpu_ins[i]);
2616 		adev = gpu_instance->adev;
2617 
2618 		if (adev->gmc.xgmi.connected_to_cpu &&
2619 		    adev->gmc.xgmi.physical_node_id == node_id)
2620 			break;
2621 		adev = NULL;
2622 	}
2623 
2624 	mutex_unlock(&mgpu_info.mutex);
2625 
2626 	return adev;
2627 }
2628 
2629 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
2630 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
2631 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2632 #define GPU_ID_OFFSET		8
2633 
2634 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2635 				    unsigned long val, void *data)
2636 {
2637 	struct mce *m = (struct mce *)data;
2638 	struct amdgpu_device *adev = NULL;
2639 	uint32_t gpu_id = 0;
2640 	uint32_t umc_inst = 0;
2641 	uint32_t ch_inst, channel_index = 0;
2642 	struct ras_err_data err_data = {0, 0, 0, NULL};
2643 	struct eeprom_table_record err_rec;
2644 	uint64_t retired_page;
2645 
2646 	/*
2647 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2648 	 * and error occurred in DramECC (Extended error code = 0) then only
2649 	 * process the error, else bail out.
2650 	 */
2651 	if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
2652 		    (XEC(m->status, 0x3f) == 0x0)))
2653 		return NOTIFY_DONE;
2654 
2655 	/*
2656 	 * If it is correctable error, return.
2657 	 */
2658 	if (mce_is_correctable(m))
2659 		return NOTIFY_OK;
2660 
2661 	/*
2662 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2663 	 */
2664 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2665 
2666 	adev = find_adev(gpu_id);
2667 	if (!adev) {
2668 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2669 								gpu_id);
2670 		return NOTIFY_DONE;
2671 	}
2672 
2673 	/*
2674 	 * If it is uncorrectable error, then find out UMC instance and
2675 	 * channel index.
2676 	 */
2677 	umc_inst = GET_UMC_INST(m->ipid);
2678 	ch_inst = GET_CHAN_INDEX(m->ipid);
2679 
2680 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2681 			     umc_inst, ch_inst);
2682 
2683 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
2684 
2685 	/*
2686 	 * Translate UMC channel address to Physical address
2687 	 */
2688 	channel_index =
2689 		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
2690 					  + ch_inst];
2691 
2692 	retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
2693 			ADDR_OF_256B_BLOCK(channel_index) |
2694 			OFFSET_IN_256B_BLOCK(m->addr);
2695 
2696 	err_rec.address = m->addr;
2697 	err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
2698 	err_rec.ts = (uint64_t)ktime_get_real_seconds();
2699 	err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
2700 	err_rec.cu = 0;
2701 	err_rec.mem_channel = channel_index;
2702 	err_rec.mcumc_id = umc_inst;
2703 
2704 	err_data.err_addr = &err_rec;
2705 	err_data.err_addr_cnt = 1;
2706 
2707 	if (amdgpu_bad_page_threshold != 0) {
2708 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
2709 						err_data.err_addr_cnt);
2710 		amdgpu_ras_save_bad_pages(adev);
2711 	}
2712 
2713 	return NOTIFY_OK;
2714 }
2715 
2716 static struct notifier_block amdgpu_bad_page_nb = {
2717 	.notifier_call  = amdgpu_bad_page_notifier,
2718 	.priority       = MCE_PRIO_UC,
2719 };
2720 
2721 static void amdgpu_register_bad_pages_mca_notifier(void)
2722 {
2723 	/*
2724 	 * Register the x86 notifier only once
2725 	 * with MCE subsystem.
2726 	 */
2727 	if (notifier_registered == false) {
2728 		mce_register_decode_chain(&amdgpu_bad_page_nb);
2729 		notifier_registered = true;
2730 	}
2731 }
2732 #endif
2733