1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
38 #include "nbio_v7_9.h"
39 #include "atom.h"
40 #include "amdgpu_reset.h"
41
42 #ifdef CONFIG_X86_MCE_AMD
43 #include <asm/mce.h>
44
45 static bool notifier_registered;
46 #endif
47 static const char *RAS_FS_NAME = "ras";
48
49 const char *ras_error_string[] = {
50 "none",
51 "parity",
52 "single_correctable",
53 "multi_uncorrectable",
54 "poison",
55 };
56
57 const char *ras_block_string[] = {
58 "umc",
59 "sdma",
60 "gfx",
61 "mmhub",
62 "athub",
63 "pcie_bif",
64 "hdp",
65 "xgmi_wafl",
66 "df",
67 "smn",
68 "sem",
69 "mp0",
70 "mp1",
71 "fuse",
72 "mca",
73 "vcn",
74 "jpeg",
75 };
76
77 const char *ras_mca_block_string[] = {
78 "mca_mp0",
79 "mca_mp1",
80 "mca_mpio",
81 "mca_iohc",
82 };
83
84 struct amdgpu_ras_block_list {
85 /* ras block link */
86 struct list_head node;
87
88 struct amdgpu_ras_block_object *ras_obj;
89 };
90
get_ras_block_str(struct ras_common_if * ras_block)91 const char *get_ras_block_str(struct ras_common_if *ras_block)
92 {
93 if (!ras_block)
94 return "NULL";
95
96 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
97 return "OUT OF RANGE";
98
99 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
100 return ras_mca_block_string[ras_block->sub_block_index];
101
102 return ras_block_string[ras_block->block];
103 }
104
105 #define ras_block_str(_BLOCK_) \
106 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
107
108 #define ras_err_str(i) (ras_error_string[ffs(i)])
109
110 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
111
112 /* inject address is 52 bits */
113 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
114
115 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
116 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
117
118 enum amdgpu_ras_retire_page_reservation {
119 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
120 AMDGPU_RAS_RETIRE_PAGE_PENDING,
121 AMDGPU_RAS_RETIRE_PAGE_FAULT,
122 };
123
124 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
125
126 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
127 uint64_t addr);
128 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
129 uint64_t addr);
130 #ifdef CONFIG_X86_MCE_AMD
131 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
132 struct mce_notifier_adev_list {
133 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
134 int num_gpu;
135 };
136 static struct mce_notifier_adev_list mce_adev_list;
137 #endif
138
amdgpu_ras_set_error_query_ready(struct amdgpu_device * adev,bool ready)139 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
140 {
141 if (adev && amdgpu_ras_get_context(adev))
142 amdgpu_ras_get_context(adev)->error_query_ready = ready;
143 }
144
amdgpu_ras_get_error_query_ready(struct amdgpu_device * adev)145 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
146 {
147 if (adev && amdgpu_ras_get_context(adev))
148 return amdgpu_ras_get_context(adev)->error_query_ready;
149
150 return false;
151 }
152
amdgpu_reserve_page_direct(struct amdgpu_device * adev,uint64_t address)153 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
154 {
155 struct ras_err_data err_data = {0, 0, 0, NULL};
156 struct eeprom_table_record err_rec;
157
158 if ((address >= adev->gmc.mc_vram_size) ||
159 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
160 dev_warn(adev->dev,
161 "RAS WARN: input address 0x%llx is invalid.\n",
162 address);
163 return -EINVAL;
164 }
165
166 if (amdgpu_ras_check_bad_page(adev, address)) {
167 dev_warn(adev->dev,
168 "RAS WARN: 0x%llx has already been marked as bad page!\n",
169 address);
170 return 0;
171 }
172
173 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
174 err_data.err_addr = &err_rec;
175 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
176
177 if (amdgpu_bad_page_threshold != 0) {
178 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
179 err_data.err_addr_cnt);
180 amdgpu_ras_save_bad_pages(adev, NULL);
181 }
182
183 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
184 dev_warn(adev->dev, "Clear EEPROM:\n");
185 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
186
187 return 0;
188 }
189
amdgpu_ras_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * pos)190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
191 size_t size, loff_t *pos)
192 {
193 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
194 struct ras_query_if info = {
195 .head = obj->head,
196 };
197 ssize_t s;
198 char val[128];
199
200 if (amdgpu_ras_query_error_status(obj->adev, &info))
201 return -EINVAL;
202
203 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
204 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
205 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
206 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
207 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
208 }
209
210 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211 "ue", info.ue_count,
212 "ce", info.ce_count);
213 if (*pos >= s)
214 return 0;
215
216 s -= *pos;
217 s = min_t(u64, s, size);
218
219
220 if (copy_to_user(buf, &val[*pos], s))
221 return -EINVAL;
222
223 *pos += s;
224
225 return s;
226 }
227
228 static const struct file_operations amdgpu_ras_debugfs_ops = {
229 .owner = THIS_MODULE,
230 .read = amdgpu_ras_debugfs_read,
231 .write = NULL,
232 .llseek = default_llseek
233 };
234
amdgpu_ras_find_block_id_by_name(const char * name,int * block_id)235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
236 {
237 int i;
238
239 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240 *block_id = i;
241 if (strcmp(name, ras_block_string[i]) == 0)
242 return 0;
243 }
244 return -EINVAL;
245 }
246
amdgpu_ras_debugfs_ctrl_parse_data(struct file * f,const char __user * buf,size_t size,loff_t * pos,struct ras_debug_if * data)247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
248 const char __user *buf, size_t size,
249 loff_t *pos, struct ras_debug_if *data)
250 {
251 ssize_t s = min_t(u64, 64, size);
252 char str[65];
253 char block_name[33];
254 char err[9] = "ue";
255 int op = -1;
256 int block_id;
257 uint32_t sub_block;
258 u64 address, value;
259 /* default value is 0 if the mask is not set by user */
260 u32 instance_mask = 0;
261
262 if (*pos)
263 return -EINVAL;
264 *pos = size;
265
266 memset(str, 0, sizeof(str));
267 memset(data, 0, sizeof(*data));
268
269 if (copy_from_user(str, buf, s))
270 return -EINVAL;
271
272 if (sscanf(str, "disable %32s", block_name) == 1)
273 op = 0;
274 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
275 op = 1;
276 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
277 op = 2;
278 else if (strstr(str, "retire_page") != NULL)
279 op = 3;
280 else if (str[0] && str[1] && str[2] && str[3])
281 /* ascii string, but commands are not matched. */
282 return -EINVAL;
283
284 if (op != -1) {
285 if (op == 3) {
286 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
287 sscanf(str, "%*s %llu", &address) != 1)
288 return -EINVAL;
289
290 data->op = op;
291 data->inject.address = address;
292
293 return 0;
294 }
295
296 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
297 return -EINVAL;
298
299 data->head.block = block_id;
300 /* only ue and ce errors are supported */
301 if (!memcmp("ue", err, 2))
302 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
303 else if (!memcmp("ce", err, 2))
304 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
305 else
306 return -EINVAL;
307
308 data->op = op;
309
310 if (op == 2) {
311 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
312 &sub_block, &address, &value, &instance_mask) != 4 &&
313 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
314 &sub_block, &address, &value, &instance_mask) != 4 &&
315 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
316 &sub_block, &address, &value) != 3 &&
317 sscanf(str, "%*s %*s %*s %u %llu %llu",
318 &sub_block, &address, &value) != 3)
319 return -EINVAL;
320 data->head.sub_block_index = sub_block;
321 data->inject.address = address;
322 data->inject.value = value;
323 data->inject.instance_mask = instance_mask;
324 }
325 } else {
326 if (size < sizeof(*data))
327 return -EINVAL;
328
329 if (copy_from_user(data, buf, sizeof(*data)))
330 return -EINVAL;
331 }
332
333 return 0;
334 }
335
amdgpu_ras_instance_mask_check(struct amdgpu_device * adev,struct ras_debug_if * data)336 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
337 struct ras_debug_if *data)
338 {
339 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
340 uint32_t mask, inst_mask = data->inject.instance_mask;
341
342 /* no need to set instance mask if there is only one instance */
343 if (num_xcc <= 1 && inst_mask) {
344 data->inject.instance_mask = 0;
345 dev_dbg(adev->dev,
346 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
347 inst_mask);
348
349 return;
350 }
351
352 switch (data->head.block) {
353 case AMDGPU_RAS_BLOCK__GFX:
354 mask = GENMASK(num_xcc - 1, 0);
355 break;
356 case AMDGPU_RAS_BLOCK__SDMA:
357 mask = GENMASK(adev->sdma.num_instances - 1, 0);
358 break;
359 case AMDGPU_RAS_BLOCK__VCN:
360 case AMDGPU_RAS_BLOCK__JPEG:
361 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
362 break;
363 default:
364 mask = inst_mask;
365 break;
366 }
367
368 /* remove invalid bits in instance mask */
369 data->inject.instance_mask &= mask;
370 if (inst_mask != data->inject.instance_mask)
371 dev_dbg(adev->dev,
372 "Adjust RAS inject mask 0x%x to 0x%x\n",
373 inst_mask, data->inject.instance_mask);
374 }
375
376 /**
377 * DOC: AMDGPU RAS debugfs control interface
378 *
379 * The control interface accepts struct ras_debug_if which has two members.
380 *
381 * First member: ras_debug_if::head or ras_debug_if::inject.
382 *
383 * head is used to indicate which IP block will be under control.
384 *
385 * head has four members, they are block, type, sub_block_index, name.
386 * block: which IP will be under control.
387 * type: what kind of error will be enabled/disabled/injected.
388 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
389 * name: the name of IP.
390 *
391 * inject has three more members than head, they are address, value and mask.
392 * As their names indicate, inject operation will write the
393 * value to the address.
394 *
395 * The second member: struct ras_debug_if::op.
396 * It has three kinds of operations.
397 *
398 * - 0: disable RAS on the block. Take ::head as its data.
399 * - 1: enable RAS on the block. Take ::head as its data.
400 * - 2: inject errors on the block. Take ::inject as its data.
401 *
402 * How to use the interface?
403 *
404 * In a program
405 *
406 * Copy the struct ras_debug_if in your code and initialize it.
407 * Write the struct to the control interface.
408 *
409 * From shell
410 *
411 * .. code-block:: bash
412 *
413 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
414 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
415 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
416 *
417 * Where N, is the card which you want to affect.
418 *
419 * "disable" requires only the block.
420 * "enable" requires the block and error type.
421 * "inject" requires the block, error type, address, and value.
422 *
423 * The block is one of: umc, sdma, gfx, etc.
424 * see ras_block_string[] for details
425 *
426 * The error type is one of: ue, ce, where,
427 * ue is multi-uncorrectable
428 * ce is single-correctable
429 *
430 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
431 * The address and value are hexadecimal numbers, leading 0x is optional.
432 * The mask means instance mask, is optional, default value is 0x1.
433 *
434 * For instance,
435 *
436 * .. code-block:: bash
437 *
438 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
439 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
440 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
441 *
442 * How to check the result of the operation?
443 *
444 * To check disable/enable, see "ras" features at,
445 * /sys/class/drm/card[0/1/2...]/device/ras/features
446 *
447 * To check inject, see the corresponding error count at,
448 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
449 *
450 * .. note::
451 * Operations are only allowed on blocks which are supported.
452 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
453 * to see which blocks support RAS on a particular asic.
454 *
455 */
amdgpu_ras_debugfs_ctrl_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)456 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
457 const char __user *buf,
458 size_t size, loff_t *pos)
459 {
460 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
461 struct ras_debug_if data;
462 int ret = 0;
463
464 if (!amdgpu_ras_get_error_query_ready(adev)) {
465 dev_warn(adev->dev, "RAS WARN: error injection "
466 "currently inaccessible\n");
467 return size;
468 }
469
470 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
471 if (ret)
472 return ret;
473
474 if (data.op == 3) {
475 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
476 if (!ret)
477 return size;
478 else
479 return ret;
480 }
481
482 if (!amdgpu_ras_is_supported(adev, data.head.block))
483 return -EINVAL;
484
485 switch (data.op) {
486 case 0:
487 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
488 break;
489 case 1:
490 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
491 break;
492 case 2:
493 if ((data.inject.address >= adev->gmc.mc_vram_size &&
494 adev->gmc.mc_vram_size) ||
495 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
496 dev_warn(adev->dev, "RAS WARN: input address "
497 "0x%llx is invalid.",
498 data.inject.address);
499 ret = -EINVAL;
500 break;
501 }
502
503 /* umc ce/ue error injection for a bad page is not allowed */
504 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
505 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
506 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
507 "already been marked as bad!\n",
508 data.inject.address);
509 break;
510 }
511
512 amdgpu_ras_instance_mask_check(adev, &data);
513
514 /* data.inject.address is offset instead of absolute gpu address */
515 ret = amdgpu_ras_error_inject(adev, &data.inject);
516 break;
517 default:
518 ret = -EINVAL;
519 break;
520 }
521
522 if (ret)
523 return ret;
524
525 return size;
526 }
527
528 /**
529 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
530 *
531 * Some boards contain an EEPROM which is used to persistently store a list of
532 * bad pages which experiences ECC errors in vram. This interface provides
533 * a way to reset the EEPROM, e.g., after testing error injection.
534 *
535 * Usage:
536 *
537 * .. code-block:: bash
538 *
539 * echo 1 > ../ras/ras_eeprom_reset
540 *
541 * will reset EEPROM table to 0 entries.
542 *
543 */
amdgpu_ras_debugfs_eeprom_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)544 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
545 const char __user *buf,
546 size_t size, loff_t *pos)
547 {
548 struct amdgpu_device *adev =
549 (struct amdgpu_device *)file_inode(f)->i_private;
550 int ret;
551
552 ret = amdgpu_ras_eeprom_reset_table(
553 &(amdgpu_ras_get_context(adev)->eeprom_control));
554
555 if (!ret) {
556 /* Something was written to EEPROM.
557 */
558 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
559 return size;
560 } else {
561 return ret;
562 }
563 }
564
565 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
566 .owner = THIS_MODULE,
567 .read = NULL,
568 .write = amdgpu_ras_debugfs_ctrl_write,
569 .llseek = default_llseek
570 };
571
572 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
573 .owner = THIS_MODULE,
574 .read = NULL,
575 .write = amdgpu_ras_debugfs_eeprom_write,
576 .llseek = default_llseek
577 };
578
579 /**
580 * DOC: AMDGPU RAS sysfs Error Count Interface
581 *
582 * It allows the user to read the error count for each IP block on the gpu through
583 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
584 *
585 * It outputs the multiple lines which report the uncorrected (ue) and corrected
586 * (ce) error counts.
587 *
588 * The format of one line is below,
589 *
590 * [ce|ue]: count
591 *
592 * Example:
593 *
594 * .. code-block:: bash
595 *
596 * ue: 0
597 * ce: 1
598 *
599 */
amdgpu_ras_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)600 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
601 struct device_attribute *attr, char *buf)
602 {
603 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
604 struct ras_query_if info = {
605 .head = obj->head,
606 };
607
608 if (!amdgpu_ras_get_error_query_ready(obj->adev))
609 return sysfs_emit(buf, "Query currently inaccessible\n");
610
611 if (amdgpu_ras_query_error_status(obj->adev, &info))
612 return -EINVAL;
613
614 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
615 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
616 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
617 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
618 }
619
620 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
621 "ce", info.ce_count);
622 }
623
624 /* obj begin */
625
626 #define get_obj(obj) do { (obj)->use++; } while (0)
627 #define alive_obj(obj) ((obj)->use)
628
put_obj(struct ras_manager * obj)629 static inline void put_obj(struct ras_manager *obj)
630 {
631 if (obj && (--obj->use == 0))
632 list_del(&obj->node);
633 if (obj && (obj->use < 0))
634 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
635 }
636
637 /* make one obj and return it. */
amdgpu_ras_create_obj(struct amdgpu_device * adev,struct ras_common_if * head)638 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
639 struct ras_common_if *head)
640 {
641 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
642 struct ras_manager *obj;
643
644 if (!adev->ras_enabled || !con)
645 return NULL;
646
647 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
648 return NULL;
649
650 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
651 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
652 return NULL;
653
654 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
655 } else
656 obj = &con->objs[head->block];
657
658 /* already exist. return obj? */
659 if (alive_obj(obj))
660 return NULL;
661
662 obj->head = *head;
663 obj->adev = adev;
664 list_add(&obj->node, &con->head);
665 get_obj(obj);
666
667 return obj;
668 }
669
670 /* return an obj equal to head, or the first when head is NULL */
amdgpu_ras_find_obj(struct amdgpu_device * adev,struct ras_common_if * head)671 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
672 struct ras_common_if *head)
673 {
674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
675 struct ras_manager *obj;
676 int i;
677
678 if (!adev->ras_enabled || !con)
679 return NULL;
680
681 if (head) {
682 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
683 return NULL;
684
685 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
686 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
687 return NULL;
688
689 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
690 } else
691 obj = &con->objs[head->block];
692
693 if (alive_obj(obj))
694 return obj;
695 } else {
696 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
697 obj = &con->objs[i];
698 if (alive_obj(obj))
699 return obj;
700 }
701 }
702
703 return NULL;
704 }
705 /* obj end */
706
707 /* feature ctl begin */
amdgpu_ras_is_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)708 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
709 struct ras_common_if *head)
710 {
711 return adev->ras_hw_enabled & BIT(head->block);
712 }
713
amdgpu_ras_is_feature_enabled(struct amdgpu_device * adev,struct ras_common_if * head)714 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
715 struct ras_common_if *head)
716 {
717 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
718
719 return con->features & BIT(head->block);
720 }
721
722 /*
723 * if obj is not created, then create one.
724 * set feature enable flag.
725 */
__amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,int enable)726 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
727 struct ras_common_if *head, int enable)
728 {
729 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
730 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
731
732 /* If hardware does not support ras, then do not create obj.
733 * But if hardware support ras, we can create the obj.
734 * Ras framework checks con->hw_supported to see if it need do
735 * corresponding initialization.
736 * IP checks con->support to see if it need disable ras.
737 */
738 if (!amdgpu_ras_is_feature_allowed(adev, head))
739 return 0;
740
741 if (enable) {
742 if (!obj) {
743 obj = amdgpu_ras_create_obj(adev, head);
744 if (!obj)
745 return -EINVAL;
746 } else {
747 /* In case we create obj somewhere else */
748 get_obj(obj);
749 }
750 con->features |= BIT(head->block);
751 } else {
752 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
753 con->features &= ~BIT(head->block);
754 put_obj(obj);
755 }
756 }
757
758 return 0;
759 }
760
761 /* wrapper of psp_ras_enable_features */
amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)762 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
763 struct ras_common_if *head, bool enable)
764 {
765 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
766 union ta_ras_cmd_input *info;
767 int ret;
768
769 if (!con)
770 return -EINVAL;
771
772 /* Do not enable ras feature if it is not allowed */
773 if (enable &&
774 head->block != AMDGPU_RAS_BLOCK__GFX &&
775 !amdgpu_ras_is_feature_allowed(adev, head))
776 return 0;
777
778 /* Only enable gfx ras feature from host side */
779 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
780 !amdgpu_sriov_vf(adev) &&
781 !amdgpu_ras_intr_triggered()) {
782 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
783 if (!info)
784 return -ENOMEM;
785
786 if (!enable) {
787 info->disable_features = (struct ta_ras_disable_features_input) {
788 .block_id = amdgpu_ras_block_to_ta(head->block),
789 .error_type = amdgpu_ras_error_to_ta(head->type),
790 };
791 } else {
792 info->enable_features = (struct ta_ras_enable_features_input) {
793 .block_id = amdgpu_ras_block_to_ta(head->block),
794 .error_type = amdgpu_ras_error_to_ta(head->type),
795 };
796 }
797
798 ret = psp_ras_enable_features(&adev->psp, info, enable);
799 if (ret) {
800 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
801 enable ? "enable":"disable",
802 get_ras_block_str(head),
803 amdgpu_ras_is_poison_mode_supported(adev), ret);
804 kfree(info);
805 return ret;
806 }
807
808 kfree(info);
809 }
810
811 /* setup the obj */
812 __amdgpu_ras_feature_enable(adev, head, enable);
813
814 return 0;
815 }
816
817 /* Only used in device probe stage and called only once. */
amdgpu_ras_feature_enable_on_boot(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)818 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
819 struct ras_common_if *head, bool enable)
820 {
821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
822 int ret;
823
824 if (!con)
825 return -EINVAL;
826
827 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
828 if (enable) {
829 /* There is no harm to issue a ras TA cmd regardless of
830 * the currecnt ras state.
831 * If current state == target state, it will do nothing
832 * But sometimes it requests driver to reset and repost
833 * with error code -EAGAIN.
834 */
835 ret = amdgpu_ras_feature_enable(adev, head, 1);
836 /* With old ras TA, we might fail to enable ras.
837 * Log it and just setup the object.
838 * TODO need remove this WA in the future.
839 */
840 if (ret == -EINVAL) {
841 ret = __amdgpu_ras_feature_enable(adev, head, 1);
842 if (!ret)
843 dev_info(adev->dev,
844 "RAS INFO: %s setup object\n",
845 get_ras_block_str(head));
846 }
847 } else {
848 /* setup the object then issue a ras TA disable cmd.*/
849 ret = __amdgpu_ras_feature_enable(adev, head, 1);
850 if (ret)
851 return ret;
852
853 /* gfx block ras dsiable cmd must send to ras-ta */
854 if (head->block == AMDGPU_RAS_BLOCK__GFX)
855 con->features |= BIT(head->block);
856
857 ret = amdgpu_ras_feature_enable(adev, head, 0);
858
859 /* clean gfx block ras features flag */
860 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
861 con->features &= ~BIT(head->block);
862 }
863 } else
864 ret = amdgpu_ras_feature_enable(adev, head, enable);
865
866 return ret;
867 }
868
amdgpu_ras_disable_all_features(struct amdgpu_device * adev,bool bypass)869 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
870 bool bypass)
871 {
872 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
873 struct ras_manager *obj, *tmp;
874
875 list_for_each_entry_safe(obj, tmp, &con->head, node) {
876 /* bypass psp.
877 * aka just release the obj and corresponding flags
878 */
879 if (bypass) {
880 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
881 break;
882 } else {
883 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
884 break;
885 }
886 }
887
888 return con->features;
889 }
890
amdgpu_ras_enable_all_features(struct amdgpu_device * adev,bool bypass)891 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
892 bool bypass)
893 {
894 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
895 int i;
896 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
897
898 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
899 struct ras_common_if head = {
900 .block = i,
901 .type = default_ras_type,
902 .sub_block_index = 0,
903 };
904
905 if (i == AMDGPU_RAS_BLOCK__MCA)
906 continue;
907
908 if (bypass) {
909 /*
910 * bypass psp. vbios enable ras for us.
911 * so just create the obj
912 */
913 if (__amdgpu_ras_feature_enable(adev, &head, 1))
914 break;
915 } else {
916 if (amdgpu_ras_feature_enable(adev, &head, 1))
917 break;
918 }
919 }
920
921 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
922 struct ras_common_if head = {
923 .block = AMDGPU_RAS_BLOCK__MCA,
924 .type = default_ras_type,
925 .sub_block_index = i,
926 };
927
928 if (bypass) {
929 /*
930 * bypass psp. vbios enable ras for us.
931 * so just create the obj
932 */
933 if (__amdgpu_ras_feature_enable(adev, &head, 1))
934 break;
935 } else {
936 if (amdgpu_ras_feature_enable(adev, &head, 1))
937 break;
938 }
939 }
940
941 return con->features;
942 }
943 /* feature ctl end */
944
amdgpu_ras_block_match_default(struct amdgpu_ras_block_object * block_obj,enum amdgpu_ras_block block)945 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
946 enum amdgpu_ras_block block)
947 {
948 if (!block_obj)
949 return -EINVAL;
950
951 if (block_obj->ras_comm.block == block)
952 return 0;
953
954 return -EINVAL;
955 }
956
amdgpu_ras_get_ras_block(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t sub_block_index)957 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
958 enum amdgpu_ras_block block, uint32_t sub_block_index)
959 {
960 struct amdgpu_ras_block_list *node, *tmp;
961 struct amdgpu_ras_block_object *obj;
962
963 if (block >= AMDGPU_RAS_BLOCK__LAST)
964 return NULL;
965
966 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
967 if (!node->ras_obj) {
968 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
969 continue;
970 }
971
972 obj = node->ras_obj;
973 if (obj->ras_block_match) {
974 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
975 return obj;
976 } else {
977 if (amdgpu_ras_block_match_default(obj, block) == 0)
978 return obj;
979 }
980 }
981
982 return NULL;
983 }
984
amdgpu_ras_get_ecc_info(struct amdgpu_device * adev,struct ras_err_data * err_data)985 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
986 {
987 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
988 int ret = 0;
989
990 /*
991 * choosing right query method according to
992 * whether smu support query error information
993 */
994 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
995 if (ret == -EOPNOTSUPP) {
996 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
997 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
998 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
999
1000 /* umc query_ras_error_address is also responsible for clearing
1001 * error status
1002 */
1003 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1004 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1005 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1006 } else if (!ret) {
1007 if (adev->umc.ras &&
1008 adev->umc.ras->ecc_info_query_ras_error_count)
1009 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1010
1011 if (adev->umc.ras &&
1012 adev->umc.ras->ecc_info_query_ras_error_address)
1013 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1014 }
1015 }
1016
1017 /* query/inject/cure begin */
amdgpu_ras_query_error_status(struct amdgpu_device * adev,struct ras_query_if * info)1018 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
1019 struct ras_query_if *info)
1020 {
1021 struct amdgpu_ras_block_object *block_obj = NULL;
1022 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1023 struct ras_err_data err_data = {0, 0, 0, NULL};
1024
1025 if (!obj)
1026 return -EINVAL;
1027
1028 if (!info || info->head.block == AMDGPU_RAS_BLOCK_COUNT)
1029 return -EINVAL;
1030
1031 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1032 amdgpu_ras_get_ecc_info(adev, &err_data);
1033 } else {
1034 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1035 if (!block_obj || !block_obj->hw_ops) {
1036 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1037 get_ras_block_str(&info->head));
1038 return -EINVAL;
1039 }
1040
1041 if (block_obj->hw_ops->query_ras_error_count)
1042 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
1043
1044 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1045 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1046 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1047 if (block_obj->hw_ops->query_ras_error_status)
1048 block_obj->hw_ops->query_ras_error_status(adev);
1049 }
1050 }
1051
1052 obj->err_data.ue_count += err_data.ue_count;
1053 obj->err_data.ce_count += err_data.ce_count;
1054
1055 info->ue_count = obj->err_data.ue_count;
1056 info->ce_count = obj->err_data.ce_count;
1057
1058 if (err_data.ce_count) {
1059 if (!adev->aid_mask &&
1060 adev->smuio.funcs &&
1061 adev->smuio.funcs->get_socket_id &&
1062 adev->smuio.funcs->get_die_id) {
1063 dev_info(adev->dev, "socket: %d, die: %d "
1064 "%ld correctable hardware errors "
1065 "detected in %s block, no user "
1066 "action is needed.\n",
1067 adev->smuio.funcs->get_socket_id(adev),
1068 adev->smuio.funcs->get_die_id(adev),
1069 obj->err_data.ce_count,
1070 get_ras_block_str(&info->head));
1071 } else {
1072 dev_info(adev->dev, "%ld correctable hardware errors "
1073 "detected in %s block, no user "
1074 "action is needed.\n",
1075 obj->err_data.ce_count,
1076 get_ras_block_str(&info->head));
1077 }
1078 }
1079 if (err_data.ue_count) {
1080 if (!adev->aid_mask &&
1081 adev->smuio.funcs &&
1082 adev->smuio.funcs->get_socket_id &&
1083 adev->smuio.funcs->get_die_id) {
1084 dev_info(adev->dev, "socket: %d, die: %d "
1085 "%ld uncorrectable hardware errors "
1086 "detected in %s block\n",
1087 adev->smuio.funcs->get_socket_id(adev),
1088 adev->smuio.funcs->get_die_id(adev),
1089 obj->err_data.ue_count,
1090 get_ras_block_str(&info->head));
1091 } else {
1092 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1093 "detected in %s block\n",
1094 obj->err_data.ue_count,
1095 get_ras_block_str(&info->head));
1096 }
1097 }
1098
1099 return 0;
1100 }
1101
amdgpu_ras_reset_error_status(struct amdgpu_device * adev,enum amdgpu_ras_block block)1102 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1103 enum amdgpu_ras_block block)
1104 {
1105 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1106
1107 if (!amdgpu_ras_is_supported(adev, block))
1108 return -EINVAL;
1109
1110 if (!block_obj || !block_obj->hw_ops) {
1111 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1112 ras_block_str(block));
1113 return -EINVAL;
1114 }
1115
1116 if (block_obj->hw_ops->reset_ras_error_count)
1117 block_obj->hw_ops->reset_ras_error_count(adev);
1118
1119 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1120 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1121 if (block_obj->hw_ops->reset_ras_error_status)
1122 block_obj->hw_ops->reset_ras_error_status(adev);
1123 }
1124
1125 return 0;
1126 }
1127
1128 /* wrapper of psp_ras_trigger_error */
amdgpu_ras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1129 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1130 struct ras_inject_if *info)
1131 {
1132 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1133 struct ta_ras_trigger_error_input block_info = {
1134 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1135 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1136 .sub_block_index = info->head.sub_block_index,
1137 .address = info->address,
1138 .value = info->value,
1139 };
1140 int ret = -EINVAL;
1141 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1142 info->head.block,
1143 info->head.sub_block_index);
1144
1145 /* inject on guest isn't allowed, return success directly */
1146 if (amdgpu_sriov_vf(adev))
1147 return 0;
1148
1149 if (!obj)
1150 return -EINVAL;
1151
1152 if (!block_obj || !block_obj->hw_ops) {
1153 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1154 get_ras_block_str(&info->head));
1155 return -EINVAL;
1156 }
1157
1158 /* Calculate XGMI relative offset */
1159 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1160 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1161 block_info.address =
1162 amdgpu_xgmi_get_relative_phy_addr(adev,
1163 block_info.address);
1164 }
1165
1166 if (block_obj->hw_ops->ras_error_inject) {
1167 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1168 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1169 else /* Special ras_error_inject is defined (e.g: xgmi) */
1170 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1171 info->instance_mask);
1172 } else {
1173 /* default path */
1174 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1175 }
1176
1177 if (ret)
1178 dev_err(adev->dev, "ras inject %s failed %d\n",
1179 get_ras_block_str(&info->head), ret);
1180
1181 return ret;
1182 }
1183
1184 /**
1185 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1186 * @adev: pointer to AMD GPU device
1187 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1188 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1189 * @query_info: pointer to ras_query_if
1190 *
1191 * Return 0 for query success or do nothing, otherwise return an error
1192 * on failures
1193 */
amdgpu_ras_query_error_count_helper(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1194 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1195 unsigned long *ce_count,
1196 unsigned long *ue_count,
1197 struct ras_query_if *query_info)
1198 {
1199 int ret;
1200
1201 if (!query_info)
1202 /* do nothing if query_info is not specified */
1203 return 0;
1204
1205 ret = amdgpu_ras_query_error_status(adev, query_info);
1206 if (ret)
1207 return ret;
1208
1209 *ce_count += query_info->ce_count;
1210 *ue_count += query_info->ue_count;
1211
1212 /* some hardware/IP supports read to clear
1213 * no need to explictly reset the err status after the query call */
1214 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1215 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1216 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1217 dev_warn(adev->dev,
1218 "Failed to reset error counter and error status\n");
1219 }
1220
1221 return 0;
1222 }
1223
1224 /**
1225 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1226 * @adev: pointer to AMD GPU device
1227 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1228 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1229 * errors.
1230 * @query_info: pointer to ras_query_if if the query request is only for
1231 * specific ip block; if info is NULL, then the qurey request is for
1232 * all the ip blocks that support query ras error counters/status
1233 *
1234 * If set, @ce_count or @ue_count, count and return the corresponding
1235 * error counts in those integer pointers. Return 0 if the device
1236 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1237 */
amdgpu_ras_query_error_count(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1238 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1239 unsigned long *ce_count,
1240 unsigned long *ue_count,
1241 struct ras_query_if *query_info)
1242 {
1243 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1244 struct ras_manager *obj;
1245 unsigned long ce, ue;
1246 int ret;
1247
1248 if (!adev->ras_enabled || !con)
1249 return -EOPNOTSUPP;
1250
1251 /* Don't count since no reporting.
1252 */
1253 if (!ce_count && !ue_count)
1254 return 0;
1255
1256 ce = 0;
1257 ue = 0;
1258 if (!query_info) {
1259 /* query all the ip blocks that support ras query interface */
1260 list_for_each_entry(obj, &con->head, node) {
1261 struct ras_query_if info = {
1262 .head = obj->head,
1263 };
1264
1265 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1266 }
1267 } else {
1268 /* query specific ip block */
1269 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1270 }
1271
1272 if (ret)
1273 return ret;
1274
1275 if (ce_count)
1276 *ce_count = ce;
1277
1278 if (ue_count)
1279 *ue_count = ue;
1280
1281 return 0;
1282 }
1283 /* query/inject/cure end */
1284
1285
1286 /* sysfs begin */
1287
1288 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1289 struct ras_badpage **bps, unsigned int *count);
1290
amdgpu_ras_badpage_flags_str(unsigned int flags)1291 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1292 {
1293 switch (flags) {
1294 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1295 return "R";
1296 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1297 return "P";
1298 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1299 default:
1300 return "F";
1301 }
1302 }
1303
1304 /**
1305 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1306 *
1307 * It allows user to read the bad pages of vram on the gpu through
1308 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1309 *
1310 * It outputs multiple lines, and each line stands for one gpu page.
1311 *
1312 * The format of one line is below,
1313 * gpu pfn : gpu page size : flags
1314 *
1315 * gpu pfn and gpu page size are printed in hex format.
1316 * flags can be one of below character,
1317 *
1318 * R: reserved, this gpu page is reserved and not able to use.
1319 *
1320 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1321 * in next window of page_reserve.
1322 *
1323 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1324 *
1325 * Examples:
1326 *
1327 * .. code-block:: bash
1328 *
1329 * 0x00000001 : 0x00001000 : R
1330 * 0x00000002 : 0x00001000 : P
1331 *
1332 */
1333
amdgpu_ras_sysfs_badpages_read(struct file * f,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)1334 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1335 struct kobject *kobj, struct bin_attribute *attr,
1336 char *buf, loff_t ppos, size_t count)
1337 {
1338 struct amdgpu_ras *con =
1339 container_of(attr, struct amdgpu_ras, badpages_attr);
1340 struct amdgpu_device *adev = con->adev;
1341 const unsigned int element_size =
1342 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1343 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1344 unsigned int end = div64_ul(ppos + count - 1, element_size);
1345 ssize_t s = 0;
1346 struct ras_badpage *bps = NULL;
1347 unsigned int bps_count = 0;
1348
1349 memset(buf, 0, count);
1350
1351 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1352 return 0;
1353
1354 for (; start < end && start < bps_count; start++)
1355 s += scnprintf(&buf[s], element_size + 1,
1356 "0x%08x : 0x%08x : %1s\n",
1357 bps[start].bp,
1358 bps[start].size,
1359 amdgpu_ras_badpage_flags_str(bps[start].flags));
1360
1361 kfree(bps);
1362
1363 return s;
1364 }
1365
amdgpu_ras_sysfs_features_read(struct device * dev,struct device_attribute * attr,char * buf)1366 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1367 struct device_attribute *attr, char *buf)
1368 {
1369 struct amdgpu_ras *con =
1370 container_of(attr, struct amdgpu_ras, features_attr);
1371
1372 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1373 }
1374
amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device * adev)1375 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1376 {
1377 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1378
1379 if (adev->dev->kobj.sd)
1380 sysfs_remove_file_from_group(&adev->dev->kobj,
1381 &con->badpages_attr.attr,
1382 RAS_FS_NAME);
1383 }
1384
amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device * adev)1385 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1386 {
1387 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1388 struct attribute *attrs[] = {
1389 &con->features_attr.attr,
1390 NULL
1391 };
1392 struct attribute_group group = {
1393 .name = RAS_FS_NAME,
1394 .attrs = attrs,
1395 };
1396
1397 if (adev->dev->kobj.sd)
1398 sysfs_remove_group(&adev->dev->kobj, &group);
1399
1400 return 0;
1401 }
1402
amdgpu_ras_sysfs_create(struct amdgpu_device * adev,struct ras_common_if * head)1403 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1404 struct ras_common_if *head)
1405 {
1406 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1407
1408 if (!obj || obj->attr_inuse)
1409 return -EINVAL;
1410
1411 get_obj(obj);
1412
1413 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1414 "%s_err_count", head->name);
1415
1416 obj->sysfs_attr = (struct device_attribute){
1417 .attr = {
1418 .name = obj->fs_data.sysfs_name,
1419 .mode = S_IRUGO,
1420 },
1421 .show = amdgpu_ras_sysfs_read,
1422 };
1423 sysfs_attr_init(&obj->sysfs_attr.attr);
1424
1425 if (sysfs_add_file_to_group(&adev->dev->kobj,
1426 &obj->sysfs_attr.attr,
1427 RAS_FS_NAME)) {
1428 put_obj(obj);
1429 return -EINVAL;
1430 }
1431
1432 obj->attr_inuse = 1;
1433
1434 return 0;
1435 }
1436
amdgpu_ras_sysfs_remove(struct amdgpu_device * adev,struct ras_common_if * head)1437 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1438 struct ras_common_if *head)
1439 {
1440 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1441
1442 if (!obj || !obj->attr_inuse)
1443 return -EINVAL;
1444
1445 if (adev->dev->kobj.sd)
1446 sysfs_remove_file_from_group(&adev->dev->kobj,
1447 &obj->sysfs_attr.attr,
1448 RAS_FS_NAME);
1449 obj->attr_inuse = 0;
1450 put_obj(obj);
1451
1452 return 0;
1453 }
1454
amdgpu_ras_sysfs_remove_all(struct amdgpu_device * adev)1455 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1456 {
1457 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1458 struct ras_manager *obj, *tmp;
1459
1460 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1461 amdgpu_ras_sysfs_remove(adev, &obj->head);
1462 }
1463
1464 if (amdgpu_bad_page_threshold != 0)
1465 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1466
1467 amdgpu_ras_sysfs_remove_feature_node(adev);
1468
1469 return 0;
1470 }
1471 /* sysfs end */
1472
1473 /**
1474 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1475 *
1476 * Normally when there is an uncorrectable error, the driver will reset
1477 * the GPU to recover. However, in the event of an unrecoverable error,
1478 * the driver provides an interface to reboot the system automatically
1479 * in that event.
1480 *
1481 * The following file in debugfs provides that interface:
1482 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1483 *
1484 * Usage:
1485 *
1486 * .. code-block:: bash
1487 *
1488 * echo true > .../ras/auto_reboot
1489 *
1490 */
1491 /* debugfs begin */
amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * adev)1492 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1493 {
1494 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1495 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1496 struct drm_minor *minor = adev_to_drm(adev)->primary;
1497 struct dentry *dir;
1498
1499 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1500 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1501 &amdgpu_ras_debugfs_ctrl_ops);
1502 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1503 &amdgpu_ras_debugfs_eeprom_ops);
1504 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1505 &con->bad_page_cnt_threshold);
1506 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1507 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1508 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1509 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1510 &amdgpu_ras_debugfs_eeprom_size_ops);
1511 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1512 S_IRUGO, dir, adev,
1513 &amdgpu_ras_debugfs_eeprom_table_ops);
1514 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1515
1516 /*
1517 * After one uncorrectable error happens, usually GPU recovery will
1518 * be scheduled. But due to the known problem in GPU recovery failing
1519 * to bring GPU back, below interface provides one direct way to
1520 * user to reboot system automatically in such case within
1521 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1522 * will never be called.
1523 */
1524 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1525
1526 /*
1527 * User could set this not to clean up hardware's error count register
1528 * of RAS IPs during ras recovery.
1529 */
1530 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1531 &con->disable_ras_err_cnt_harvest);
1532 return dir;
1533 }
1534
amdgpu_ras_debugfs_create(struct amdgpu_device * adev,struct ras_fs_if * head,struct dentry * dir)1535 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1536 struct ras_fs_if *head,
1537 struct dentry *dir)
1538 {
1539 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1540
1541 if (!obj || !dir)
1542 return;
1543
1544 get_obj(obj);
1545
1546 memcpy(obj->fs_data.debugfs_name,
1547 head->debugfs_name,
1548 sizeof(obj->fs_data.debugfs_name));
1549
1550 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1551 obj, &amdgpu_ras_debugfs_ops);
1552 }
1553
amdgpu_ras_debugfs_create_all(struct amdgpu_device * adev)1554 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1555 {
1556 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1557 struct dentry *dir;
1558 struct ras_manager *obj;
1559 struct ras_fs_if fs_info;
1560
1561 /*
1562 * it won't be called in resume path, no need to check
1563 * suspend and gpu reset status
1564 */
1565 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1566 return;
1567
1568 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1569
1570 list_for_each_entry(obj, &con->head, node) {
1571 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1572 (obj->attr_inuse == 1)) {
1573 sprintf(fs_info.debugfs_name, "%s_err_inject",
1574 get_ras_block_str(&obj->head));
1575 fs_info.head = obj->head;
1576 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1577 }
1578 }
1579 }
1580
1581 /* debugfs end */
1582
1583 /* ras fs */
1584 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1585 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1586 static DEVICE_ATTR(features, S_IRUGO,
1587 amdgpu_ras_sysfs_features_read, NULL);
amdgpu_ras_fs_init(struct amdgpu_device * adev)1588 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1589 {
1590 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1591 struct attribute_group group = {
1592 .name = RAS_FS_NAME,
1593 };
1594 struct attribute *attrs[] = {
1595 &con->features_attr.attr,
1596 NULL
1597 };
1598 struct bin_attribute *bin_attrs[] = {
1599 NULL,
1600 NULL,
1601 };
1602 int r;
1603
1604 /* add features entry */
1605 con->features_attr = dev_attr_features;
1606 group.attrs = attrs;
1607 sysfs_attr_init(attrs[0]);
1608
1609 if (amdgpu_bad_page_threshold != 0) {
1610 /* add bad_page_features entry */
1611 bin_attr_gpu_vram_bad_pages.private = NULL;
1612 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1613 bin_attrs[0] = &con->badpages_attr;
1614 group.bin_attrs = bin_attrs;
1615 sysfs_bin_attr_init(bin_attrs[0]);
1616 }
1617
1618 r = sysfs_create_group(&adev->dev->kobj, &group);
1619 if (r)
1620 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1621
1622 return 0;
1623 }
1624
amdgpu_ras_fs_fini(struct amdgpu_device * adev)1625 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1626 {
1627 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1628 struct ras_manager *con_obj, *ip_obj, *tmp;
1629
1630 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1631 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1632 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1633 if (ip_obj)
1634 put_obj(ip_obj);
1635 }
1636 }
1637
1638 amdgpu_ras_sysfs_remove_all(adev);
1639 return 0;
1640 }
1641 /* ras fs end */
1642
1643 /* ih begin */
1644
1645 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1646 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1647 * register to check whether the interrupt is triggered or not, and properly
1648 * ack the interrupt if it is there
1649 */
amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device * adev)1650 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1651 {
1652 /* Fatal error events are handled on host side */
1653 if (amdgpu_sriov_vf(adev))
1654 return;
1655
1656 if (adev->nbio.ras &&
1657 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1658 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1659
1660 if (adev->nbio.ras &&
1661 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1662 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1663 }
1664
amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1665 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1666 struct amdgpu_iv_entry *entry)
1667 {
1668 bool poison_stat = false;
1669 struct amdgpu_device *adev = obj->adev;
1670 struct amdgpu_ras_block_object *block_obj =
1671 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1672
1673 if (!block_obj)
1674 return;
1675
1676 /* both query_poison_status and handle_poison_consumption are optional,
1677 * but at least one of them should be implemented if we need poison
1678 * consumption handler
1679 */
1680 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1681 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1682 if (!poison_stat) {
1683 /* Not poison consumption interrupt, no need to handle it */
1684 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1685 block_obj->ras_comm.name);
1686
1687 return;
1688 }
1689 }
1690
1691 amdgpu_umc_poison_handler(adev, false);
1692
1693 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1694 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1695
1696 /* gpu reset is fallback for failed and default cases */
1697 if (poison_stat) {
1698 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1699 block_obj->ras_comm.name);
1700 amdgpu_ras_reset_gpu(adev);
1701 } else {
1702 amdgpu_gfx_poison_consumption_handler(adev, entry);
1703 }
1704 }
1705
amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1706 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1707 struct amdgpu_iv_entry *entry)
1708 {
1709 dev_info(obj->adev->dev,
1710 "Poison is created, no user action is needed.\n");
1711 }
1712
amdgpu_ras_interrupt_umc_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1713 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1714 struct amdgpu_iv_entry *entry)
1715 {
1716 struct ras_ih_data *data = &obj->ih_data;
1717 struct ras_err_data err_data = {0, 0, 0, NULL};
1718 int ret;
1719
1720 if (!data->cb)
1721 return;
1722
1723 /* Let IP handle its data, maybe we need get the output
1724 * from the callback to update the error type/count, etc
1725 */
1726 ret = data->cb(obj->adev, &err_data, entry);
1727 /* ue will trigger an interrupt, and in that case
1728 * we need do a reset to recovery the whole system.
1729 * But leave IP do that recovery, here we just dispatch
1730 * the error.
1731 */
1732 if (ret == AMDGPU_RAS_SUCCESS) {
1733 /* these counts could be left as 0 if
1734 * some blocks do not count error number
1735 */
1736 obj->err_data.ue_count += err_data.ue_count;
1737 obj->err_data.ce_count += err_data.ce_count;
1738 }
1739 }
1740
amdgpu_ras_interrupt_handler(struct ras_manager * obj)1741 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1742 {
1743 struct ras_ih_data *data = &obj->ih_data;
1744 struct amdgpu_iv_entry entry;
1745
1746 while (data->rptr != data->wptr) {
1747 rmb();
1748 memcpy(&entry, &data->ring[data->rptr],
1749 data->element_size);
1750
1751 wmb();
1752 data->rptr = (data->aligned_element_size +
1753 data->rptr) % data->ring_size;
1754
1755 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1756 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1757 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1758 else
1759 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1760 } else {
1761 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1762 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1763 else
1764 dev_warn(obj->adev->dev,
1765 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1766 }
1767 }
1768 }
1769
amdgpu_ras_interrupt_process_handler(struct work_struct * work)1770 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1771 {
1772 struct ras_ih_data *data =
1773 container_of(work, struct ras_ih_data, ih_work);
1774 struct ras_manager *obj =
1775 container_of(data, struct ras_manager, ih_data);
1776
1777 amdgpu_ras_interrupt_handler(obj);
1778 }
1779
amdgpu_ras_interrupt_dispatch(struct amdgpu_device * adev,struct ras_dispatch_if * info)1780 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1781 struct ras_dispatch_if *info)
1782 {
1783 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1784 struct ras_ih_data *data = &obj->ih_data;
1785
1786 if (!obj)
1787 return -EINVAL;
1788
1789 if (data->inuse == 0)
1790 return 0;
1791
1792 /* Might be overflow... */
1793 memcpy(&data->ring[data->wptr], info->entry,
1794 data->element_size);
1795
1796 wmb();
1797 data->wptr = (data->aligned_element_size +
1798 data->wptr) % data->ring_size;
1799
1800 schedule_work(&data->ih_work);
1801
1802 return 0;
1803 }
1804
amdgpu_ras_interrupt_remove_handler(struct amdgpu_device * adev,struct ras_common_if * head)1805 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1806 struct ras_common_if *head)
1807 {
1808 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1809 struct ras_ih_data *data;
1810
1811 if (!obj)
1812 return -EINVAL;
1813
1814 data = &obj->ih_data;
1815 if (data->inuse == 0)
1816 return 0;
1817
1818 cancel_work_sync(&data->ih_work);
1819
1820 kfree(data->ring);
1821 memset(data, 0, sizeof(*data));
1822 put_obj(obj);
1823
1824 return 0;
1825 }
1826
amdgpu_ras_interrupt_add_handler(struct amdgpu_device * adev,struct ras_common_if * head)1827 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1828 struct ras_common_if *head)
1829 {
1830 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1831 struct ras_ih_data *data;
1832 struct amdgpu_ras_block_object *ras_obj;
1833
1834 if (!obj) {
1835 /* in case we registe the IH before enable ras feature */
1836 obj = amdgpu_ras_create_obj(adev, head);
1837 if (!obj)
1838 return -EINVAL;
1839 } else
1840 get_obj(obj);
1841
1842 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1843
1844 data = &obj->ih_data;
1845 /* add the callback.etc */
1846 *data = (struct ras_ih_data) {
1847 .inuse = 0,
1848 .cb = ras_obj->ras_cb,
1849 .element_size = sizeof(struct amdgpu_iv_entry),
1850 .rptr = 0,
1851 .wptr = 0,
1852 };
1853
1854 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1855
1856 data->aligned_element_size = ALIGN(data->element_size, 8);
1857 /* the ring can store 64 iv entries. */
1858 data->ring_size = 64 * data->aligned_element_size;
1859 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1860 if (!data->ring) {
1861 put_obj(obj);
1862 return -ENOMEM;
1863 }
1864
1865 /* IH is ready */
1866 data->inuse = 1;
1867
1868 return 0;
1869 }
1870
amdgpu_ras_interrupt_remove_all(struct amdgpu_device * adev)1871 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1872 {
1873 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1874 struct ras_manager *obj, *tmp;
1875
1876 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1877 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1878 }
1879
1880 return 0;
1881 }
1882 /* ih end */
1883
1884 /* traversal all IPs except NBIO to query error counter */
amdgpu_ras_log_on_err_counter(struct amdgpu_device * adev)1885 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1886 {
1887 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1888 struct ras_manager *obj;
1889
1890 if (!adev->ras_enabled || !con)
1891 return;
1892
1893 list_for_each_entry(obj, &con->head, node) {
1894 struct ras_query_if info = {
1895 .head = obj->head,
1896 };
1897
1898 /*
1899 * PCIE_BIF IP has one different isr by ras controller
1900 * interrupt, the specific ras counter query will be
1901 * done in that isr. So skip such block from common
1902 * sync flood interrupt isr calling.
1903 */
1904 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1905 continue;
1906
1907 /*
1908 * this is a workaround for aldebaran, skip send msg to
1909 * smu to get ecc_info table due to smu handle get ecc
1910 * info table failed temporarily.
1911 * should be removed until smu fix handle ecc_info table.
1912 */
1913 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1914 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1915 continue;
1916
1917 amdgpu_ras_query_error_status(adev, &info);
1918
1919 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1920 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1921 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1922 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1923 dev_warn(adev->dev, "Failed to reset error counter and error status");
1924 }
1925 }
1926 }
1927
1928 /* Parse RdRspStatus and WrRspStatus */
amdgpu_ras_error_status_query(struct amdgpu_device * adev,struct ras_query_if * info)1929 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1930 struct ras_query_if *info)
1931 {
1932 struct amdgpu_ras_block_object *block_obj;
1933 /*
1934 * Only two block need to query read/write
1935 * RspStatus at current state
1936 */
1937 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1938 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1939 return;
1940
1941 block_obj = amdgpu_ras_get_ras_block(adev,
1942 info->head.block,
1943 info->head.sub_block_index);
1944
1945 if (!block_obj || !block_obj->hw_ops) {
1946 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1947 get_ras_block_str(&info->head));
1948 return;
1949 }
1950
1951 if (block_obj->hw_ops->query_ras_error_status)
1952 block_obj->hw_ops->query_ras_error_status(adev);
1953
1954 }
1955
amdgpu_ras_query_err_status(struct amdgpu_device * adev)1956 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1957 {
1958 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1959 struct ras_manager *obj;
1960
1961 if (!adev->ras_enabled || !con)
1962 return;
1963
1964 list_for_each_entry(obj, &con->head, node) {
1965 struct ras_query_if info = {
1966 .head = obj->head,
1967 };
1968
1969 amdgpu_ras_error_status_query(adev, &info);
1970 }
1971 }
1972
1973 /* recovery begin */
1974
1975 /* return 0 on success.
1976 * caller need free bps.
1977 */
amdgpu_ras_badpages_read(struct amdgpu_device * adev,struct ras_badpage ** bps,unsigned int * count)1978 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1979 struct ras_badpage **bps, unsigned int *count)
1980 {
1981 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1982 struct ras_err_handler_data *data;
1983 int i = 0;
1984 int ret = 0, status;
1985
1986 if (!con || !con->eh_data || !bps || !count)
1987 return -EINVAL;
1988
1989 mutex_lock(&con->recovery_lock);
1990 data = con->eh_data;
1991 if (!data || data->count == 0) {
1992 *bps = NULL;
1993 ret = -EINVAL;
1994 goto out;
1995 }
1996
1997 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1998 if (!*bps) {
1999 ret = -ENOMEM;
2000 goto out;
2001 }
2002
2003 for (; i < data->count; i++) {
2004 (*bps)[i] = (struct ras_badpage){
2005 .bp = data->bps[i].retired_page,
2006 .size = AMDGPU_GPU_PAGE_SIZE,
2007 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2008 };
2009 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2010 data->bps[i].retired_page);
2011 if (status == -EBUSY)
2012 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2013 else if (status == -ENOENT)
2014 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2015 }
2016
2017 *count = data->count;
2018 out:
2019 mutex_unlock(&con->recovery_lock);
2020 return ret;
2021 }
2022
amdgpu_ras_do_recovery(struct work_struct * work)2023 static void amdgpu_ras_do_recovery(struct work_struct *work)
2024 {
2025 struct amdgpu_ras *ras =
2026 container_of(work, struct amdgpu_ras, recovery_work);
2027 struct amdgpu_device *remote_adev = NULL;
2028 struct amdgpu_device *adev = ras->adev;
2029 struct list_head device_list, *device_list_handle = NULL;
2030
2031 if (!ras->disable_ras_err_cnt_harvest) {
2032 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2033
2034 /* Build list of devices to query RAS related errors */
2035 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2036 device_list_handle = &hive->device_list;
2037 } else {
2038 INIT_LIST_HEAD(&device_list);
2039 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2040 device_list_handle = &device_list;
2041 }
2042
2043 list_for_each_entry(remote_adev,
2044 device_list_handle, gmc.xgmi.head) {
2045 amdgpu_ras_query_err_status(remote_adev);
2046 amdgpu_ras_log_on_err_counter(remote_adev);
2047 }
2048
2049 amdgpu_put_xgmi_hive(hive);
2050 }
2051
2052 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2053 struct amdgpu_reset_context reset_context;
2054 memset(&reset_context, 0, sizeof(reset_context));
2055
2056 reset_context.method = AMD_RESET_METHOD_NONE;
2057 reset_context.reset_req_dev = adev;
2058
2059 /* Perform full reset in fatal error mode */
2060 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2061 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2062 else {
2063 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2064
2065 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2066 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2067 reset_context.method = AMD_RESET_METHOD_MODE2;
2068 }
2069
2070 /* Fatal error occurs in poison mode, mode1 reset is used to
2071 * recover gpu.
2072 */
2073 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2074 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2075 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2076
2077 psp_fatal_error_recovery_quirk(&adev->psp);
2078 }
2079 }
2080
2081 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2082 }
2083 atomic_set(&ras->in_recovery, 0);
2084 }
2085
2086 /* alloc/realloc bps array */
amdgpu_ras_realloc_eh_data_space(struct amdgpu_device * adev,struct ras_err_handler_data * data,int pages)2087 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2088 struct ras_err_handler_data *data, int pages)
2089 {
2090 unsigned int old_space = data->count + data->space_left;
2091 unsigned int new_space = old_space + pages;
2092 unsigned int align_space = ALIGN(new_space, 512);
2093 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2094
2095 if (!bps) {
2096 return -ENOMEM;
2097 }
2098
2099 if (data->bps) {
2100 memcpy(bps, data->bps,
2101 data->count * sizeof(*data->bps));
2102 kfree(data->bps);
2103 }
2104
2105 data->bps = bps;
2106 data->space_left += align_space - old_space;
2107 return 0;
2108 }
2109
2110 /* it deal with vram only. */
amdgpu_ras_add_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)2111 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2112 struct eeprom_table_record *bps, int pages)
2113 {
2114 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2115 struct ras_err_handler_data *data;
2116 int ret = 0;
2117 uint32_t i;
2118
2119 if (!con || !con->eh_data || !bps || pages <= 0)
2120 return 0;
2121
2122 mutex_lock(&con->recovery_lock);
2123 data = con->eh_data;
2124 if (!data)
2125 goto out;
2126
2127 for (i = 0; i < pages; i++) {
2128 if (amdgpu_ras_check_bad_page_unlock(con,
2129 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2130 continue;
2131
2132 if (!data->space_left &&
2133 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2134 ret = -ENOMEM;
2135 goto out;
2136 }
2137
2138 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2139 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2140 AMDGPU_GPU_PAGE_SIZE);
2141
2142 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2143 data->count++;
2144 data->space_left--;
2145 }
2146 out:
2147 mutex_unlock(&con->recovery_lock);
2148
2149 return ret;
2150 }
2151
2152 /*
2153 * write error record array to eeprom, the function should be
2154 * protected by recovery_lock
2155 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2156 */
amdgpu_ras_save_bad_pages(struct amdgpu_device * adev,unsigned long * new_cnt)2157 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2158 unsigned long *new_cnt)
2159 {
2160 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2161 struct ras_err_handler_data *data;
2162 struct amdgpu_ras_eeprom_control *control;
2163 int save_count;
2164
2165 if (!con || !con->eh_data) {
2166 if (new_cnt)
2167 *new_cnt = 0;
2168
2169 return 0;
2170 }
2171
2172 mutex_lock(&con->recovery_lock);
2173 control = &con->eeprom_control;
2174 data = con->eh_data;
2175 save_count = data->count - control->ras_num_recs;
2176 mutex_unlock(&con->recovery_lock);
2177
2178 if (new_cnt)
2179 *new_cnt = save_count / adev->umc.retire_unit;
2180
2181 /* only new entries are saved */
2182 if (save_count > 0) {
2183 if (amdgpu_ras_eeprom_append(control,
2184 &data->bps[control->ras_num_recs],
2185 save_count)) {
2186 dev_err(adev->dev, "Failed to save EEPROM table data!");
2187 return -EIO;
2188 }
2189
2190 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2191 }
2192
2193 return 0;
2194 }
2195
2196 /*
2197 * read error record array in eeprom and reserve enough space for
2198 * storing new bad pages
2199 */
amdgpu_ras_load_bad_pages(struct amdgpu_device * adev)2200 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2201 {
2202 struct amdgpu_ras_eeprom_control *control =
2203 &adev->psp.ras_context.ras->eeprom_control;
2204 struct eeprom_table_record *bps;
2205 int ret;
2206
2207 /* no bad page record, skip eeprom access */
2208 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2209 return 0;
2210
2211 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2212 if (!bps)
2213 return -ENOMEM;
2214
2215 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2216 if (ret)
2217 dev_err(adev->dev, "Failed to load EEPROM table records!");
2218 else
2219 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2220
2221 kfree(bps);
2222 return ret;
2223 }
2224
amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras * con,uint64_t addr)2225 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2226 uint64_t addr)
2227 {
2228 struct ras_err_handler_data *data = con->eh_data;
2229 int i;
2230
2231 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2232 for (i = 0; i < data->count; i++)
2233 if (addr == data->bps[i].retired_page)
2234 return true;
2235
2236 return false;
2237 }
2238
2239 /*
2240 * check if an address belongs to bad page
2241 *
2242 * Note: this check is only for umc block
2243 */
amdgpu_ras_check_bad_page(struct amdgpu_device * adev,uint64_t addr)2244 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2245 uint64_t addr)
2246 {
2247 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2248 bool ret = false;
2249
2250 if (!con || !con->eh_data)
2251 return ret;
2252
2253 mutex_lock(&con->recovery_lock);
2254 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2255 mutex_unlock(&con->recovery_lock);
2256 return ret;
2257 }
2258
amdgpu_ras_validate_threshold(struct amdgpu_device * adev,uint32_t max_count)2259 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2260 uint32_t max_count)
2261 {
2262 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2263
2264 /*
2265 * Justification of value bad_page_cnt_threshold in ras structure
2266 *
2267 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2268 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2269 * scenarios accordingly.
2270 *
2271 * Bad page retirement enablement:
2272 * - If amdgpu_bad_page_threshold = -2,
2273 * bad_page_cnt_threshold = typical value by formula.
2274 *
2275 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2276 * max record length in eeprom, use it directly.
2277 *
2278 * Bad page retirement disablement:
2279 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2280 * functionality is disabled, and bad_page_cnt_threshold will
2281 * take no effect.
2282 */
2283
2284 if (amdgpu_bad_page_threshold < 0) {
2285 u64 val = adev->gmc.mc_vram_size;
2286
2287 do_div(val, RAS_BAD_PAGE_COVER);
2288 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2289 max_count);
2290 } else {
2291 con->bad_page_cnt_threshold = min_t(int, max_count,
2292 amdgpu_bad_page_threshold);
2293 }
2294 }
2295
amdgpu_ras_recovery_init(struct amdgpu_device * adev)2296 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2297 {
2298 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2299 struct ras_err_handler_data **data;
2300 u32 max_eeprom_records_count = 0;
2301 bool exc_err_limit = false;
2302 int ret;
2303
2304 if (!con || amdgpu_sriov_vf(adev))
2305 return 0;
2306
2307 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2308 * supports RAS and debugfs is enabled, but when
2309 * adev->ras_enabled is unset, i.e. when "ras_enable"
2310 * module parameter is set to 0.
2311 */
2312 con->adev = adev;
2313
2314 if (!adev->ras_enabled)
2315 return 0;
2316
2317 data = &con->eh_data;
2318 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2319 if (!*data) {
2320 ret = -ENOMEM;
2321 goto out;
2322 }
2323
2324 mutex_init(&con->recovery_lock);
2325 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2326 atomic_set(&con->in_recovery, 0);
2327 con->eeprom_control.bad_channel_bitmap = 0;
2328
2329 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2330 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2331
2332 /* Todo: During test the SMU might fail to read the eeprom through I2C
2333 * when the GPU is pending on XGMI reset during probe time
2334 * (Mostly after second bus reset), skip it now
2335 */
2336 if (adev->gmc.xgmi.pending_reset)
2337 return 0;
2338 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2339 /*
2340 * This calling fails when exc_err_limit is true or
2341 * ret != 0.
2342 */
2343 if (exc_err_limit || ret)
2344 goto free;
2345
2346 if (con->eeprom_control.ras_num_recs) {
2347 ret = amdgpu_ras_load_bad_pages(adev);
2348 if (ret)
2349 goto free;
2350
2351 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2352
2353 if (con->update_channel_flag == true) {
2354 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2355 con->update_channel_flag = false;
2356 }
2357 }
2358
2359 #ifdef CONFIG_X86_MCE_AMD
2360 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2361 (adev->gmc.xgmi.connected_to_cpu))
2362 amdgpu_register_bad_pages_mca_notifier(adev);
2363 #endif
2364 return 0;
2365
2366 free:
2367 kfree((*data)->bps);
2368 kfree(*data);
2369 con->eh_data = NULL;
2370 out:
2371 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2372
2373 /*
2374 * Except error threshold exceeding case, other failure cases in this
2375 * function would not fail amdgpu driver init.
2376 */
2377 if (!exc_err_limit)
2378 ret = 0;
2379 else
2380 ret = -EINVAL;
2381
2382 return ret;
2383 }
2384
amdgpu_ras_recovery_fini(struct amdgpu_device * adev)2385 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2386 {
2387 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2388 struct ras_err_handler_data *data = con->eh_data;
2389
2390 /* recovery_init failed to init it, fini is useless */
2391 if (!data)
2392 return 0;
2393
2394 cancel_work_sync(&con->recovery_work);
2395
2396 mutex_lock(&con->recovery_lock);
2397 con->eh_data = NULL;
2398 kfree(data->bps);
2399 kfree(data);
2400 mutex_unlock(&con->recovery_lock);
2401
2402 return 0;
2403 }
2404 /* recovery end */
2405
amdgpu_ras_asic_supported(struct amdgpu_device * adev)2406 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2407 {
2408 if (amdgpu_sriov_vf(adev)) {
2409 switch (adev->ip_versions[MP0_HWIP][0]) {
2410 case IP_VERSION(13, 0, 2):
2411 case IP_VERSION(13, 0, 6):
2412 return true;
2413 default:
2414 return false;
2415 }
2416 }
2417
2418 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2419 switch (adev->ip_versions[MP0_HWIP][0]) {
2420 case IP_VERSION(13, 0, 0):
2421 case IP_VERSION(13, 0, 6):
2422 case IP_VERSION(13, 0, 10):
2423 return true;
2424 default:
2425 return false;
2426 }
2427 }
2428
2429 return adev->asic_type == CHIP_VEGA10 ||
2430 adev->asic_type == CHIP_VEGA20 ||
2431 adev->asic_type == CHIP_ARCTURUS ||
2432 adev->asic_type == CHIP_ALDEBARAN ||
2433 adev->asic_type == CHIP_SIENNA_CICHLID;
2434 }
2435
2436 /*
2437 * this is workaround for vega20 workstation sku,
2438 * force enable gfx ras, ignore vbios gfx ras flag
2439 * due to GC EDC can not write
2440 */
amdgpu_ras_get_quirks(struct amdgpu_device * adev)2441 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2442 {
2443 struct atom_context *ctx = adev->mode_info.atom_context;
2444
2445 if (!ctx)
2446 return;
2447
2448 if (strnstr(ctx->vbios_pn, "D16406",
2449 sizeof(ctx->vbios_pn)) ||
2450 strnstr(ctx->vbios_pn, "D36002",
2451 sizeof(ctx->vbios_pn)))
2452 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2453 }
2454
2455 /*
2456 * check hardware's ras ability which will be saved in hw_supported.
2457 * if hardware does not support ras, we can skip some ras initializtion and
2458 * forbid some ras operations from IP.
2459 * if software itself, say boot parameter, limit the ras ability. We still
2460 * need allow IP do some limited operations, like disable. In such case,
2461 * we have to initialize ras as normal. but need check if operation is
2462 * allowed or not in each function.
2463 */
amdgpu_ras_check_supported(struct amdgpu_device * adev)2464 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2465 {
2466 adev->ras_hw_enabled = adev->ras_enabled = 0;
2467
2468 if (!amdgpu_ras_asic_supported(adev))
2469 return;
2470
2471 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2472 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2473 dev_info(adev->dev, "MEM ECC is active.\n");
2474 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2475 1 << AMDGPU_RAS_BLOCK__DF);
2476 } else {
2477 dev_info(adev->dev, "MEM ECC is not presented.\n");
2478 }
2479
2480 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2481 dev_info(adev->dev, "SRAM ECC is active.\n");
2482 if (!amdgpu_sriov_vf(adev))
2483 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2484 1 << AMDGPU_RAS_BLOCK__DF);
2485 else
2486 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2487 1 << AMDGPU_RAS_BLOCK__SDMA |
2488 1 << AMDGPU_RAS_BLOCK__GFX);
2489
2490 /* VCN/JPEG RAS can be supported on both bare metal and
2491 * SRIOV environment
2492 */
2493 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2494 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2495 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2496 1 << AMDGPU_RAS_BLOCK__JPEG);
2497 else
2498 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2499 1 << AMDGPU_RAS_BLOCK__JPEG);
2500
2501 /*
2502 * XGMI RAS is not supported if xgmi num physical nodes
2503 * is zero
2504 */
2505 if (!adev->gmc.xgmi.num_physical_nodes)
2506 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2507 } else {
2508 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2509 }
2510 } else {
2511 /* driver only manages a few IP blocks RAS feature
2512 * when GPU is connected cpu through XGMI */
2513 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2514 1 << AMDGPU_RAS_BLOCK__SDMA |
2515 1 << AMDGPU_RAS_BLOCK__MMHUB);
2516 }
2517
2518 amdgpu_ras_get_quirks(adev);
2519
2520 /* hw_supported needs to be aligned with RAS block mask. */
2521 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2522
2523
2524 /*
2525 * Disable ras feature for aqua vanjaram
2526 * by default on apu platform.
2527 */
2528 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&
2529 adev->gmc.is_app_apu)
2530 adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
2531 adev->ras_hw_enabled & amdgpu_ras_mask;
2532 else
2533 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2534 adev->ras_hw_enabled & amdgpu_ras_mask;
2535 }
2536
amdgpu_ras_counte_dw(struct work_struct * work)2537 static void amdgpu_ras_counte_dw(struct work_struct *work)
2538 {
2539 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2540 ras_counte_delay_work.work);
2541 struct amdgpu_device *adev = con->adev;
2542 struct drm_device *dev = adev_to_drm(adev);
2543 unsigned long ce_count, ue_count;
2544 int res;
2545
2546 res = pm_runtime_get_sync(dev->dev);
2547 if (res < 0)
2548 goto Out;
2549
2550 /* Cache new values.
2551 */
2552 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2553 atomic_set(&con->ras_ce_count, ce_count);
2554 atomic_set(&con->ras_ue_count, ue_count);
2555 }
2556
2557 pm_runtime_mark_last_busy(dev->dev);
2558 Out:
2559 pm_runtime_put_autosuspend(dev->dev);
2560 }
2561
amdgpu_ras_query_poison_mode(struct amdgpu_device * adev)2562 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2563 {
2564 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2565 bool df_poison, umc_poison;
2566
2567 /* poison setting is useless on SRIOV guest */
2568 if (amdgpu_sriov_vf(adev) || !con)
2569 return;
2570
2571 /* Init poison supported flag, the default value is false */
2572 if (adev->gmc.xgmi.connected_to_cpu) {
2573 /* enabled by default when GPU is connected to CPU */
2574 con->poison_supported = true;
2575 } else if (adev->df.funcs &&
2576 adev->df.funcs->query_ras_poison_mode &&
2577 adev->umc.ras &&
2578 adev->umc.ras->query_ras_poison_mode) {
2579 df_poison =
2580 adev->df.funcs->query_ras_poison_mode(adev);
2581 umc_poison =
2582 adev->umc.ras->query_ras_poison_mode(adev);
2583
2584 /* Only poison is set in both DF and UMC, we can support it */
2585 if (df_poison && umc_poison)
2586 con->poison_supported = true;
2587 else if (df_poison != umc_poison)
2588 dev_warn(adev->dev,
2589 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2590 df_poison, umc_poison);
2591 }
2592 }
2593
amdgpu_ras_init(struct amdgpu_device * adev)2594 int amdgpu_ras_init(struct amdgpu_device *adev)
2595 {
2596 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2597 int r;
2598
2599 if (con)
2600 return 0;
2601
2602 con = kmalloc(sizeof(struct amdgpu_ras) +
2603 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2604 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2605 GFP_KERNEL|__GFP_ZERO);
2606 if (!con)
2607 return -ENOMEM;
2608
2609 con->adev = adev;
2610 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2611 atomic_set(&con->ras_ce_count, 0);
2612 atomic_set(&con->ras_ue_count, 0);
2613
2614 con->objs = (struct ras_manager *)(con + 1);
2615
2616 amdgpu_ras_set_context(adev, con);
2617
2618 amdgpu_ras_check_supported(adev);
2619
2620 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2621 /* set gfx block ras context feature for VEGA20 Gaming
2622 * send ras disable cmd to ras ta during ras late init.
2623 */
2624 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2625 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2626
2627 return 0;
2628 }
2629
2630 r = 0;
2631 goto release_con;
2632 }
2633
2634 con->update_channel_flag = false;
2635 con->features = 0;
2636 INIT_LIST_HEAD(&con->head);
2637 /* Might need get this flag from vbios. */
2638 con->flags = RAS_DEFAULT_FLAGS;
2639
2640 /* initialize nbio ras function ahead of any other
2641 * ras functions so hardware fatal error interrupt
2642 * can be enabled as early as possible */
2643 switch (adev->ip_versions[NBIO_HWIP][0]) {
2644 case IP_VERSION(7, 4, 0):
2645 case IP_VERSION(7, 4, 1):
2646 case IP_VERSION(7, 4, 4):
2647 if (!adev->gmc.xgmi.connected_to_cpu)
2648 adev->nbio.ras = &nbio_v7_4_ras;
2649 break;
2650 case IP_VERSION(4, 3, 0):
2651 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2652 /* unlike other generation of nbio ras,
2653 * nbio v4_3 only support fatal error interrupt
2654 * to inform software that DF is freezed due to
2655 * system fatal error event. driver should not
2656 * enable nbio ras in such case. Instead,
2657 * check DF RAS */
2658 adev->nbio.ras = &nbio_v4_3_ras;
2659 break;
2660 case IP_VERSION(7, 9, 0):
2661 if (!adev->gmc.is_app_apu)
2662 adev->nbio.ras = &nbio_v7_9_ras;
2663 break;
2664 default:
2665 /* nbio ras is not available */
2666 break;
2667 }
2668
2669 /* nbio ras block needs to be enabled ahead of other ras blocks
2670 * to handle fatal error */
2671 r = amdgpu_nbio_ras_sw_init(adev);
2672 if (r)
2673 return r;
2674
2675 if (adev->nbio.ras &&
2676 adev->nbio.ras->init_ras_controller_interrupt) {
2677 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2678 if (r)
2679 goto release_con;
2680 }
2681
2682 if (adev->nbio.ras &&
2683 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2684 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2685 if (r)
2686 goto release_con;
2687 }
2688
2689 amdgpu_ras_query_poison_mode(adev);
2690
2691 if (amdgpu_ras_fs_init(adev)) {
2692 r = -EINVAL;
2693 goto release_con;
2694 }
2695
2696 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2697 "hardware ability[%x] ras_mask[%x]\n",
2698 adev->ras_hw_enabled, adev->ras_enabled);
2699
2700 return 0;
2701 release_con:
2702 amdgpu_ras_set_context(adev, NULL);
2703 kfree(con);
2704
2705 return r;
2706 }
2707
amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device * adev)2708 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2709 {
2710 if (adev->gmc.xgmi.connected_to_cpu ||
2711 adev->gmc.is_app_apu)
2712 return 1;
2713 return 0;
2714 }
2715
amdgpu_persistent_edc_harvesting(struct amdgpu_device * adev,struct ras_common_if * ras_block)2716 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2717 struct ras_common_if *ras_block)
2718 {
2719 struct ras_query_if info = {
2720 .head = *ras_block,
2721 };
2722
2723 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2724 return 0;
2725
2726 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2727 DRM_WARN("RAS init harvest failure");
2728
2729 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2730 DRM_WARN("RAS init harvest reset failure");
2731
2732 return 0;
2733 }
2734
amdgpu_ras_is_poison_mode_supported(struct amdgpu_device * adev)2735 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2736 {
2737 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2738
2739 if (!con)
2740 return false;
2741
2742 return con->poison_supported;
2743 }
2744
2745 /* helper function to handle common stuff in ip late init phase */
amdgpu_ras_block_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)2746 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2747 struct ras_common_if *ras_block)
2748 {
2749 struct amdgpu_ras_block_object *ras_obj = NULL;
2750 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2751 struct ras_query_if *query_info;
2752 unsigned long ue_count, ce_count;
2753 int r;
2754
2755 /* disable RAS feature per IP block if it is not supported */
2756 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2757 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2758 return 0;
2759 }
2760
2761 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2762 if (r) {
2763 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2764 /* in resume phase, if fail to enable ras,
2765 * clean up all ras fs nodes, and disable ras */
2766 goto cleanup;
2767 } else
2768 return r;
2769 }
2770
2771 /* check for errors on warm reset edc persisant supported ASIC */
2772 amdgpu_persistent_edc_harvesting(adev, ras_block);
2773
2774 /* in resume phase, no need to create ras fs node */
2775 if (adev->in_suspend || amdgpu_in_reset(adev))
2776 return 0;
2777
2778 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2779 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2780 (ras_obj->hw_ops->query_poison_status ||
2781 ras_obj->hw_ops->handle_poison_consumption))) {
2782 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2783 if (r)
2784 goto cleanup;
2785 }
2786
2787 if (ras_obj->hw_ops &&
2788 (ras_obj->hw_ops->query_ras_error_count ||
2789 ras_obj->hw_ops->query_ras_error_status)) {
2790 r = amdgpu_ras_sysfs_create(adev, ras_block);
2791 if (r)
2792 goto interrupt;
2793
2794 /* Those are the cached values at init.
2795 */
2796 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
2797 if (!query_info)
2798 return -ENOMEM;
2799 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2800
2801 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2802 atomic_set(&con->ras_ce_count, ce_count);
2803 atomic_set(&con->ras_ue_count, ue_count);
2804 }
2805
2806 kfree(query_info);
2807 }
2808
2809 return 0;
2810
2811 interrupt:
2812 if (ras_obj->ras_cb)
2813 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2814 cleanup:
2815 amdgpu_ras_feature_enable(adev, ras_block, 0);
2816 return r;
2817 }
2818
amdgpu_ras_block_late_init_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2819 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2820 struct ras_common_if *ras_block)
2821 {
2822 return amdgpu_ras_block_late_init(adev, ras_block);
2823 }
2824
2825 /* helper function to remove ras fs node and interrupt handler */
amdgpu_ras_block_late_fini(struct amdgpu_device * adev,struct ras_common_if * ras_block)2826 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2827 struct ras_common_if *ras_block)
2828 {
2829 struct amdgpu_ras_block_object *ras_obj;
2830 if (!ras_block)
2831 return;
2832
2833 amdgpu_ras_sysfs_remove(adev, ras_block);
2834
2835 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2836 if (ras_obj->ras_cb)
2837 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2838 }
2839
amdgpu_ras_block_late_fini_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2840 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2841 struct ras_common_if *ras_block)
2842 {
2843 return amdgpu_ras_block_late_fini(adev, ras_block);
2844 }
2845
2846 /* do some init work after IP late init as dependence.
2847 * and it runs in resume/gpu reset/booting up cases.
2848 */
amdgpu_ras_resume(struct amdgpu_device * adev)2849 void amdgpu_ras_resume(struct amdgpu_device *adev)
2850 {
2851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2852 struct ras_manager *obj, *tmp;
2853
2854 if (!adev->ras_enabled || !con) {
2855 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2856 amdgpu_release_ras_context(adev);
2857
2858 return;
2859 }
2860
2861 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2862 /* Set up all other IPs which are not implemented. There is a
2863 * tricky thing that IP's actual ras error type should be
2864 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2865 * ERROR_NONE make sense anyway.
2866 */
2867 amdgpu_ras_enable_all_features(adev, 1);
2868
2869 /* We enable ras on all hw_supported block, but as boot
2870 * parameter might disable some of them and one or more IP has
2871 * not implemented yet. So we disable them on behalf.
2872 */
2873 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2874 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2875 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2876 /* there should be no any reference. */
2877 WARN_ON(alive_obj(obj));
2878 }
2879 }
2880 }
2881 }
2882
amdgpu_ras_suspend(struct amdgpu_device * adev)2883 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2884 {
2885 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2886
2887 if (!adev->ras_enabled || !con)
2888 return;
2889
2890 amdgpu_ras_disable_all_features(adev, 0);
2891 /* Make sure all ras objects are disabled. */
2892 if (con->features)
2893 amdgpu_ras_disable_all_features(adev, 1);
2894 }
2895
amdgpu_ras_late_init(struct amdgpu_device * adev)2896 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2897 {
2898 struct amdgpu_ras_block_list *node, *tmp;
2899 struct amdgpu_ras_block_object *obj;
2900 int r;
2901
2902 /* Guest side doesn't need init ras feature */
2903 if (amdgpu_sriov_vf(adev))
2904 return 0;
2905
2906 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2907 if (!node->ras_obj) {
2908 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2909 continue;
2910 }
2911
2912 obj = node->ras_obj;
2913 if (obj->ras_late_init) {
2914 r = obj->ras_late_init(adev, &obj->ras_comm);
2915 if (r) {
2916 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2917 obj->ras_comm.name, r);
2918 return r;
2919 }
2920 } else
2921 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2922 }
2923
2924 return 0;
2925 }
2926
2927 /* do some fini work before IP fini as dependence */
amdgpu_ras_pre_fini(struct amdgpu_device * adev)2928 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2929 {
2930 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2931
2932 if (!adev->ras_enabled || !con)
2933 return 0;
2934
2935
2936 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2937 if (con->features)
2938 amdgpu_ras_disable_all_features(adev, 0);
2939 amdgpu_ras_recovery_fini(adev);
2940 return 0;
2941 }
2942
amdgpu_ras_fini(struct amdgpu_device * adev)2943 int amdgpu_ras_fini(struct amdgpu_device *adev)
2944 {
2945 struct amdgpu_ras_block_list *ras_node, *tmp;
2946 struct amdgpu_ras_block_object *obj = NULL;
2947 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2948
2949 if (!adev->ras_enabled || !con)
2950 return 0;
2951
2952 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2953 if (ras_node->ras_obj) {
2954 obj = ras_node->ras_obj;
2955 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2956 obj->ras_fini)
2957 obj->ras_fini(adev, &obj->ras_comm);
2958 else
2959 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2960 }
2961
2962 /* Clear ras blocks from ras_list and free ras block list node */
2963 list_del(&ras_node->node);
2964 kfree(ras_node);
2965 }
2966
2967 amdgpu_ras_fs_fini(adev);
2968 amdgpu_ras_interrupt_remove_all(adev);
2969
2970 WARN(con->features, "Feature mask is not cleared");
2971
2972 if (con->features)
2973 amdgpu_ras_disable_all_features(adev, 1);
2974
2975 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2976
2977 amdgpu_ras_set_context(adev, NULL);
2978 kfree(con);
2979
2980 return 0;
2981 }
2982
amdgpu_ras_global_ras_isr(struct amdgpu_device * adev)2983 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2984 {
2985 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2986 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2987
2988 dev_info(adev->dev, "uncorrectable hardware error"
2989 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2990
2991 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2992 amdgpu_ras_reset_gpu(adev);
2993 }
2994 }
2995
amdgpu_ras_need_emergency_restart(struct amdgpu_device * adev)2996 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2997 {
2998 if (adev->asic_type == CHIP_VEGA20 &&
2999 adev->pm.fw_version <= 0x283400) {
3000 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3001 amdgpu_ras_intr_triggered();
3002 }
3003
3004 return false;
3005 }
3006
amdgpu_release_ras_context(struct amdgpu_device * adev)3007 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3008 {
3009 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3010
3011 if (!con)
3012 return;
3013
3014 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3015 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3016 amdgpu_ras_set_context(adev, NULL);
3017 kfree(con);
3018 }
3019 }
3020
3021 #ifdef CONFIG_X86_MCE_AMD
find_adev(uint32_t node_id)3022 static struct amdgpu_device *find_adev(uint32_t node_id)
3023 {
3024 int i;
3025 struct amdgpu_device *adev = NULL;
3026
3027 for (i = 0; i < mce_adev_list.num_gpu; i++) {
3028 adev = mce_adev_list.devs[i];
3029
3030 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3031 adev->gmc.xgmi.physical_node_id == node_id)
3032 break;
3033 adev = NULL;
3034 }
3035
3036 return adev;
3037 }
3038
3039 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3040 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3041 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3042 #define GPU_ID_OFFSET 8
3043
amdgpu_bad_page_notifier(struct notifier_block * nb,unsigned long val,void * data)3044 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3045 unsigned long val, void *data)
3046 {
3047 struct mce *m = (struct mce *)data;
3048 struct amdgpu_device *adev = NULL;
3049 uint32_t gpu_id = 0;
3050 uint32_t umc_inst = 0, ch_inst = 0;
3051
3052 /*
3053 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3054 * and error occurred in DramECC (Extended error code = 0) then only
3055 * process the error, else bail out.
3056 */
3057 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3058 (XEC(m->status, 0x3f) == 0x0)))
3059 return NOTIFY_DONE;
3060
3061 /*
3062 * If it is correctable error, return.
3063 */
3064 if (mce_is_correctable(m))
3065 return NOTIFY_OK;
3066
3067 /*
3068 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3069 */
3070 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3071
3072 adev = find_adev(gpu_id);
3073 if (!adev) {
3074 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3075 gpu_id);
3076 return NOTIFY_DONE;
3077 }
3078
3079 /*
3080 * If it is uncorrectable error, then find out UMC instance and
3081 * channel index.
3082 */
3083 umc_inst = GET_UMC_INST(m->ipid);
3084 ch_inst = GET_CHAN_INDEX(m->ipid);
3085
3086 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3087 umc_inst, ch_inst);
3088
3089 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3090 return NOTIFY_OK;
3091 else
3092 return NOTIFY_DONE;
3093 }
3094
3095 static struct notifier_block amdgpu_bad_page_nb = {
3096 .notifier_call = amdgpu_bad_page_notifier,
3097 .priority = MCE_PRIO_UC,
3098 };
3099
amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device * adev)3100 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3101 {
3102 /*
3103 * Add the adev to the mce_adev_list.
3104 * During mode2 reset, amdgpu device is temporarily
3105 * removed from the mgpu_info list which can cause
3106 * page retirement to fail.
3107 * Use this list instead of mgpu_info to find the amdgpu
3108 * device on which the UMC error was reported.
3109 */
3110 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3111
3112 /*
3113 * Register the x86 notifier only once
3114 * with MCE subsystem.
3115 */
3116 if (notifier_registered == false) {
3117 mce_register_decode_chain(&amdgpu_bad_page_nb);
3118 notifier_registered = true;
3119 }
3120 }
3121 #endif
3122
amdgpu_ras_get_context(struct amdgpu_device * adev)3123 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3124 {
3125 if (!adev)
3126 return NULL;
3127
3128 return adev->psp.ras_context.ras;
3129 }
3130
amdgpu_ras_set_context(struct amdgpu_device * adev,struct amdgpu_ras * ras_con)3131 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3132 {
3133 if (!adev)
3134 return -EINVAL;
3135
3136 adev->psp.ras_context.ras = ras_con;
3137 return 0;
3138 }
3139
3140 /* check if ras is supported on block, say, sdma, gfx */
amdgpu_ras_is_supported(struct amdgpu_device * adev,unsigned int block)3141 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3142 unsigned int block)
3143 {
3144 int ret = 0;
3145 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3146
3147 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3148 return 0;
3149
3150 ret = ras && (adev->ras_enabled & (1 << block));
3151
3152 /* For the special asic with mem ecc enabled but sram ecc
3153 * not enabled, even if the ras block is not supported on
3154 * .ras_enabled, if the asic supports poison mode and the
3155 * ras block has ras configuration, it can be considered
3156 * that the ras block supports ras function.
3157 */
3158 if (!ret &&
3159 (block == AMDGPU_RAS_BLOCK__GFX ||
3160 block == AMDGPU_RAS_BLOCK__SDMA ||
3161 block == AMDGPU_RAS_BLOCK__VCN ||
3162 block == AMDGPU_RAS_BLOCK__JPEG) &&
3163 amdgpu_ras_is_poison_mode_supported(adev) &&
3164 amdgpu_ras_get_ras_block(adev, block, 0))
3165 ret = 1;
3166
3167 return ret;
3168 }
3169
amdgpu_ras_reset_gpu(struct amdgpu_device * adev)3170 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3171 {
3172 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3173
3174 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3175 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3176 return 0;
3177 }
3178
3179
3180 /* Register each ip ras block into amdgpu ras */
amdgpu_ras_register_ras_block(struct amdgpu_device * adev,struct amdgpu_ras_block_object * ras_block_obj)3181 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3182 struct amdgpu_ras_block_object *ras_block_obj)
3183 {
3184 struct amdgpu_ras_block_list *ras_node;
3185 if (!adev || !ras_block_obj)
3186 return -EINVAL;
3187
3188 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3189 if (!ras_node)
3190 return -ENOMEM;
3191
3192 INIT_LIST_HEAD(&ras_node->node);
3193 ras_node->ras_obj = ras_block_obj;
3194 list_add_tail(&ras_node->node, &adev->ras_list);
3195
3196 return 0;
3197 }
3198
amdgpu_ras_get_error_type_name(uint32_t err_type,char * err_type_name)3199 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3200 {
3201 if (!err_type_name)
3202 return;
3203
3204 switch (err_type) {
3205 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3206 sprintf(err_type_name, "correctable");
3207 break;
3208 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3209 sprintf(err_type_name, "uncorrectable");
3210 break;
3211 default:
3212 sprintf(err_type_name, "unknown");
3213 break;
3214 }
3215 }
3216
amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,uint32_t * memory_id)3217 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3218 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3219 uint32_t instance,
3220 uint32_t *memory_id)
3221 {
3222 uint32_t err_status_lo_data, err_status_lo_offset;
3223
3224 if (!reg_entry)
3225 return false;
3226
3227 err_status_lo_offset =
3228 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3229 reg_entry->seg_lo, reg_entry->reg_lo);
3230 err_status_lo_data = RREG32(err_status_lo_offset);
3231
3232 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3233 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3234 return false;
3235
3236 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3237
3238 return true;
3239 }
3240
amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,unsigned long * err_cnt)3241 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3242 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3243 uint32_t instance,
3244 unsigned long *err_cnt)
3245 {
3246 uint32_t err_status_hi_data, err_status_hi_offset;
3247
3248 if (!reg_entry)
3249 return false;
3250
3251 err_status_hi_offset =
3252 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3253 reg_entry->seg_hi, reg_entry->reg_hi);
3254 err_status_hi_data = RREG32(err_status_hi_offset);
3255
3256 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3257 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3258 /* keep the check here in case we need to refer to the result later */
3259 dev_dbg(adev->dev, "Invalid err_info field\n");
3260
3261 /* read err count */
3262 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3263
3264 return true;
3265 }
3266
amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,const struct amdgpu_ras_memory_id_entry * mem_list,uint32_t mem_list_size,uint32_t instance,uint32_t err_type,unsigned long * err_count)3267 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3268 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3269 uint32_t reg_list_size,
3270 const struct amdgpu_ras_memory_id_entry *mem_list,
3271 uint32_t mem_list_size,
3272 uint32_t instance,
3273 uint32_t err_type,
3274 unsigned long *err_count)
3275 {
3276 uint32_t memory_id;
3277 unsigned long err_cnt;
3278 char err_type_name[16];
3279 uint32_t i, j;
3280
3281 for (i = 0; i < reg_list_size; i++) {
3282 /* query memory_id from err_status_lo */
3283 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3284 instance, &memory_id))
3285 continue;
3286
3287 /* query err_cnt from err_status_hi */
3288 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3289 instance, &err_cnt) ||
3290 !err_cnt)
3291 continue;
3292
3293 *err_count += err_cnt;
3294
3295 /* log the errors */
3296 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3297 if (!mem_list) {
3298 /* memory_list is not supported */
3299 dev_info(adev->dev,
3300 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3301 err_cnt, err_type_name,
3302 reg_list[i].block_name,
3303 instance, memory_id);
3304 } else {
3305 for (j = 0; j < mem_list_size; j++) {
3306 if (memory_id == mem_list[j].memory_id) {
3307 dev_info(adev->dev,
3308 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3309 err_cnt, err_type_name,
3310 reg_list[i].block_name,
3311 instance, mem_list[j].name);
3312 break;
3313 }
3314 }
3315 }
3316 }
3317 }
3318
amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,uint32_t instance)3319 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3320 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3321 uint32_t reg_list_size,
3322 uint32_t instance)
3323 {
3324 uint32_t err_status_lo_offset, err_status_hi_offset;
3325 uint32_t i;
3326
3327 for (i = 0; i < reg_list_size; i++) {
3328 err_status_lo_offset =
3329 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3330 reg_list[i].seg_lo, reg_list[i].reg_lo);
3331 err_status_hi_offset =
3332 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3333 reg_list[i].seg_hi, reg_list[i].reg_hi);
3334 WREG32(err_status_lo_offset, 0);
3335 WREG32(err_status_hi_offset, 0);
3336 }
3337 }
3338