1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_ras.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_xgmi.h" 36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 37 #include "atom.h" 38 #include "amdgpu_reset.h" 39 40 #ifdef CONFIG_X86_MCE_AMD 41 #include <asm/mce.h> 42 43 static bool notifier_registered; 44 #endif 45 static const char *RAS_FS_NAME = "ras"; 46 47 const char *ras_error_string[] = { 48 "none", 49 "parity", 50 "single_correctable", 51 "multi_uncorrectable", 52 "poison", 53 }; 54 55 const char *ras_block_string[] = { 56 "umc", 57 "sdma", 58 "gfx", 59 "mmhub", 60 "athub", 61 "pcie_bif", 62 "hdp", 63 "xgmi_wafl", 64 "df", 65 "smn", 66 "sem", 67 "mp0", 68 "mp1", 69 "fuse", 70 "mca", 71 "vcn", 72 "jpeg", 73 }; 74 75 const char *ras_mca_block_string[] = { 76 "mca_mp0", 77 "mca_mp1", 78 "mca_mpio", 79 "mca_iohc", 80 }; 81 82 struct amdgpu_ras_block_list { 83 /* ras block link */ 84 struct list_head node; 85 86 struct amdgpu_ras_block_object *ras_obj; 87 }; 88 89 const char *get_ras_block_str(struct ras_common_if *ras_block) 90 { 91 if (!ras_block) 92 return "NULL"; 93 94 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) 95 return "OUT OF RANGE"; 96 97 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 98 return ras_mca_block_string[ras_block->sub_block_index]; 99 100 return ras_block_string[ras_block->block]; 101 } 102 103 #define ras_block_str(_BLOCK_) \ 104 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 105 106 #define ras_err_str(i) (ras_error_string[ffs(i)]) 107 108 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 109 110 /* inject address is 52 bits */ 111 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 112 113 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 114 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 115 116 enum amdgpu_ras_retire_page_reservation { 117 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 118 AMDGPU_RAS_RETIRE_PAGE_PENDING, 119 AMDGPU_RAS_RETIRE_PAGE_FAULT, 120 }; 121 122 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 123 124 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 125 uint64_t addr); 126 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 127 uint64_t addr); 128 #ifdef CONFIG_X86_MCE_AMD 129 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 130 struct mce_notifier_adev_list { 131 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 132 int num_gpu; 133 }; 134 static struct mce_notifier_adev_list mce_adev_list; 135 #endif 136 137 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 138 { 139 if (adev && amdgpu_ras_get_context(adev)) 140 amdgpu_ras_get_context(adev)->error_query_ready = ready; 141 } 142 143 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 144 { 145 if (adev && amdgpu_ras_get_context(adev)) 146 return amdgpu_ras_get_context(adev)->error_query_ready; 147 148 return false; 149 } 150 151 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 152 { 153 struct ras_err_data err_data = {0, 0, 0, NULL}; 154 struct eeprom_table_record err_rec; 155 156 if ((address >= adev->gmc.mc_vram_size) || 157 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 158 dev_warn(adev->dev, 159 "RAS WARN: input address 0x%llx is invalid.\n", 160 address); 161 return -EINVAL; 162 } 163 164 if (amdgpu_ras_check_bad_page(adev, address)) { 165 dev_warn(adev->dev, 166 "RAS WARN: 0x%llx has already been marked as bad page!\n", 167 address); 168 return 0; 169 } 170 171 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 172 err_data.err_addr = &err_rec; 173 amdgpu_umc_fill_error_record(&err_data, address, 174 (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0); 175 176 if (amdgpu_bad_page_threshold != 0) { 177 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 178 err_data.err_addr_cnt); 179 amdgpu_ras_save_bad_pages(adev); 180 } 181 182 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 183 dev_warn(adev->dev, "Clear EEPROM:\n"); 184 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 185 186 return 0; 187 } 188 189 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 190 size_t size, loff_t *pos) 191 { 192 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 193 struct ras_query_if info = { 194 .head = obj->head, 195 }; 196 ssize_t s; 197 char val[128]; 198 199 if (amdgpu_ras_query_error_status(obj->adev, &info)) 200 return -EINVAL; 201 202 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 203 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 204 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 205 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 206 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 207 } 208 209 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 210 "ue", info.ue_count, 211 "ce", info.ce_count); 212 if (*pos >= s) 213 return 0; 214 215 s -= *pos; 216 s = min_t(u64, s, size); 217 218 219 if (copy_to_user(buf, &val[*pos], s)) 220 return -EINVAL; 221 222 *pos += s; 223 224 return s; 225 } 226 227 static const struct file_operations amdgpu_ras_debugfs_ops = { 228 .owner = THIS_MODULE, 229 .read = amdgpu_ras_debugfs_read, 230 .write = NULL, 231 .llseek = default_llseek 232 }; 233 234 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 235 { 236 int i; 237 238 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 239 *block_id = i; 240 if (strcmp(name, ras_block_string[i]) == 0) 241 return 0; 242 } 243 return -EINVAL; 244 } 245 246 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 247 const char __user *buf, size_t size, 248 loff_t *pos, struct ras_debug_if *data) 249 { 250 ssize_t s = min_t(u64, 64, size); 251 char str[65]; 252 char block_name[33]; 253 char err[9] = "ue"; 254 int op = -1; 255 int block_id; 256 uint32_t sub_block; 257 u64 address, value; 258 259 if (*pos) 260 return -EINVAL; 261 *pos = size; 262 263 memset(str, 0, sizeof(str)); 264 memset(data, 0, sizeof(*data)); 265 266 if (copy_from_user(str, buf, s)) 267 return -EINVAL; 268 269 if (sscanf(str, "disable %32s", block_name) == 1) 270 op = 0; 271 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 272 op = 1; 273 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 274 op = 2; 275 else if (strstr(str, "retire_page") != NULL) 276 op = 3; 277 else if (str[0] && str[1] && str[2] && str[3]) 278 /* ascii string, but commands are not matched. */ 279 return -EINVAL; 280 281 if (op != -1) { 282 if (op == 3) { 283 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 284 sscanf(str, "%*s %llu", &address) != 1) 285 return -EINVAL; 286 287 data->op = op; 288 data->inject.address = address; 289 290 return 0; 291 } 292 293 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 294 return -EINVAL; 295 296 data->head.block = block_id; 297 /* only ue and ce errors are supported */ 298 if (!memcmp("ue", err, 2)) 299 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 300 else if (!memcmp("ce", err, 2)) 301 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 302 else 303 return -EINVAL; 304 305 data->op = op; 306 307 if (op == 2) { 308 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 309 &sub_block, &address, &value) != 3 && 310 sscanf(str, "%*s %*s %*s %u %llu %llu", 311 &sub_block, &address, &value) != 3) 312 return -EINVAL; 313 data->head.sub_block_index = sub_block; 314 data->inject.address = address; 315 data->inject.value = value; 316 } 317 } else { 318 if (size < sizeof(*data)) 319 return -EINVAL; 320 321 if (copy_from_user(data, buf, sizeof(*data))) 322 return -EINVAL; 323 } 324 325 return 0; 326 } 327 328 /** 329 * DOC: AMDGPU RAS debugfs control interface 330 * 331 * The control interface accepts struct ras_debug_if which has two members. 332 * 333 * First member: ras_debug_if::head or ras_debug_if::inject. 334 * 335 * head is used to indicate which IP block will be under control. 336 * 337 * head has four members, they are block, type, sub_block_index, name. 338 * block: which IP will be under control. 339 * type: what kind of error will be enabled/disabled/injected. 340 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 341 * name: the name of IP. 342 * 343 * inject has two more members than head, they are address, value. 344 * As their names indicate, inject operation will write the 345 * value to the address. 346 * 347 * The second member: struct ras_debug_if::op. 348 * It has three kinds of operations. 349 * 350 * - 0: disable RAS on the block. Take ::head as its data. 351 * - 1: enable RAS on the block. Take ::head as its data. 352 * - 2: inject errors on the block. Take ::inject as its data. 353 * 354 * How to use the interface? 355 * 356 * In a program 357 * 358 * Copy the struct ras_debug_if in your code and initialize it. 359 * Write the struct to the control interface. 360 * 361 * From shell 362 * 363 * .. code-block:: bash 364 * 365 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 366 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 367 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 368 * 369 * Where N, is the card which you want to affect. 370 * 371 * "disable" requires only the block. 372 * "enable" requires the block and error type. 373 * "inject" requires the block, error type, address, and value. 374 * 375 * The block is one of: umc, sdma, gfx, etc. 376 * see ras_block_string[] for details 377 * 378 * The error type is one of: ue, ce, where, 379 * ue is multi-uncorrectable 380 * ce is single-correctable 381 * 382 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 383 * The address and value are hexadecimal numbers, leading 0x is optional. 384 * 385 * For instance, 386 * 387 * .. code-block:: bash 388 * 389 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 390 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 391 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 392 * 393 * How to check the result of the operation? 394 * 395 * To check disable/enable, see "ras" features at, 396 * /sys/class/drm/card[0/1/2...]/device/ras/features 397 * 398 * To check inject, see the corresponding error count at, 399 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 400 * 401 * .. note:: 402 * Operations are only allowed on blocks which are supported. 403 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 404 * to see which blocks support RAS on a particular asic. 405 * 406 */ 407 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 408 const char __user *buf, 409 size_t size, loff_t *pos) 410 { 411 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 412 struct ras_debug_if data; 413 int ret = 0; 414 415 if (!amdgpu_ras_get_error_query_ready(adev)) { 416 dev_warn(adev->dev, "RAS WARN: error injection " 417 "currently inaccessible\n"); 418 return size; 419 } 420 421 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 422 if (ret) 423 return ret; 424 425 if (data.op == 3) { 426 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 427 if (!ret) 428 return size; 429 else 430 return ret; 431 } 432 433 if (!amdgpu_ras_is_supported(adev, data.head.block)) 434 return -EINVAL; 435 436 switch (data.op) { 437 case 0: 438 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 439 break; 440 case 1: 441 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 442 break; 443 case 2: 444 if ((data.inject.address >= adev->gmc.mc_vram_size) || 445 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 446 dev_warn(adev->dev, "RAS WARN: input address " 447 "0x%llx is invalid.", 448 data.inject.address); 449 ret = -EINVAL; 450 break; 451 } 452 453 /* umc ce/ue error injection for a bad page is not allowed */ 454 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 455 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 456 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " 457 "already been marked as bad!\n", 458 data.inject.address); 459 break; 460 } 461 462 /* data.inject.address is offset instead of absolute gpu address */ 463 ret = amdgpu_ras_error_inject(adev, &data.inject); 464 break; 465 default: 466 ret = -EINVAL; 467 break; 468 } 469 470 if (ret) 471 return ret; 472 473 return size; 474 } 475 476 /** 477 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 478 * 479 * Some boards contain an EEPROM which is used to persistently store a list of 480 * bad pages which experiences ECC errors in vram. This interface provides 481 * a way to reset the EEPROM, e.g., after testing error injection. 482 * 483 * Usage: 484 * 485 * .. code-block:: bash 486 * 487 * echo 1 > ../ras/ras_eeprom_reset 488 * 489 * will reset EEPROM table to 0 entries. 490 * 491 */ 492 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 493 const char __user *buf, 494 size_t size, loff_t *pos) 495 { 496 struct amdgpu_device *adev = 497 (struct amdgpu_device *)file_inode(f)->i_private; 498 int ret; 499 500 ret = amdgpu_ras_eeprom_reset_table( 501 &(amdgpu_ras_get_context(adev)->eeprom_control)); 502 503 if (!ret) { 504 /* Something was written to EEPROM. 505 */ 506 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 507 return size; 508 } else { 509 return ret; 510 } 511 } 512 513 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 514 .owner = THIS_MODULE, 515 .read = NULL, 516 .write = amdgpu_ras_debugfs_ctrl_write, 517 .llseek = default_llseek 518 }; 519 520 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 521 .owner = THIS_MODULE, 522 .read = NULL, 523 .write = amdgpu_ras_debugfs_eeprom_write, 524 .llseek = default_llseek 525 }; 526 527 /** 528 * DOC: AMDGPU RAS sysfs Error Count Interface 529 * 530 * It allows the user to read the error count for each IP block on the gpu through 531 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 532 * 533 * It outputs the multiple lines which report the uncorrected (ue) and corrected 534 * (ce) error counts. 535 * 536 * The format of one line is below, 537 * 538 * [ce|ue]: count 539 * 540 * Example: 541 * 542 * .. code-block:: bash 543 * 544 * ue: 0 545 * ce: 1 546 * 547 */ 548 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 549 struct device_attribute *attr, char *buf) 550 { 551 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 552 struct ras_query_if info = { 553 .head = obj->head, 554 }; 555 556 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 557 return sysfs_emit(buf, "Query currently inaccessible\n"); 558 559 if (amdgpu_ras_query_error_status(obj->adev, &info)) 560 return -EINVAL; 561 562 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 563 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 564 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 565 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 566 } 567 568 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 569 "ce", info.ce_count); 570 } 571 572 /* obj begin */ 573 574 #define get_obj(obj) do { (obj)->use++; } while (0) 575 #define alive_obj(obj) ((obj)->use) 576 577 static inline void put_obj(struct ras_manager *obj) 578 { 579 if (obj && (--obj->use == 0)) 580 list_del(&obj->node); 581 if (obj && (obj->use < 0)) 582 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 583 } 584 585 /* make one obj and return it. */ 586 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 587 struct ras_common_if *head) 588 { 589 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 590 struct ras_manager *obj; 591 592 if (!adev->ras_enabled || !con) 593 return NULL; 594 595 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 596 return NULL; 597 598 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 599 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 600 return NULL; 601 602 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 603 } else 604 obj = &con->objs[head->block]; 605 606 /* already exist. return obj? */ 607 if (alive_obj(obj)) 608 return NULL; 609 610 obj->head = *head; 611 obj->adev = adev; 612 list_add(&obj->node, &con->head); 613 get_obj(obj); 614 615 return obj; 616 } 617 618 /* return an obj equal to head, or the first when head is NULL */ 619 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 620 struct ras_common_if *head) 621 { 622 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 623 struct ras_manager *obj; 624 int i; 625 626 if (!adev->ras_enabled || !con) 627 return NULL; 628 629 if (head) { 630 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 631 return NULL; 632 633 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 634 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 635 return NULL; 636 637 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 638 } else 639 obj = &con->objs[head->block]; 640 641 if (alive_obj(obj)) 642 return obj; 643 } else { 644 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 645 obj = &con->objs[i]; 646 if (alive_obj(obj)) 647 return obj; 648 } 649 } 650 651 return NULL; 652 } 653 /* obj end */ 654 655 /* feature ctl begin */ 656 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 657 struct ras_common_if *head) 658 { 659 return adev->ras_hw_enabled & BIT(head->block); 660 } 661 662 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 663 struct ras_common_if *head) 664 { 665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 666 667 return con->features & BIT(head->block); 668 } 669 670 /* 671 * if obj is not created, then create one. 672 * set feature enable flag. 673 */ 674 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 675 struct ras_common_if *head, int enable) 676 { 677 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 678 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 679 680 /* If hardware does not support ras, then do not create obj. 681 * But if hardware support ras, we can create the obj. 682 * Ras framework checks con->hw_supported to see if it need do 683 * corresponding initialization. 684 * IP checks con->support to see if it need disable ras. 685 */ 686 if (!amdgpu_ras_is_feature_allowed(adev, head)) 687 return 0; 688 689 if (enable) { 690 if (!obj) { 691 obj = amdgpu_ras_create_obj(adev, head); 692 if (!obj) 693 return -EINVAL; 694 } else { 695 /* In case we create obj somewhere else */ 696 get_obj(obj); 697 } 698 con->features |= BIT(head->block); 699 } else { 700 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 701 con->features &= ~BIT(head->block); 702 put_obj(obj); 703 } 704 } 705 706 return 0; 707 } 708 709 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev, 710 struct ras_common_if *head) 711 { 712 if (amdgpu_ras_is_feature_allowed(adev, head) || 713 amdgpu_ras_is_poison_mode_supported(adev)) 714 return 1; 715 else 716 return 0; 717 } 718 719 /* wrapper of psp_ras_enable_features */ 720 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 721 struct ras_common_if *head, bool enable) 722 { 723 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 724 union ta_ras_cmd_input *info; 725 int ret = 0; 726 727 if (!con) 728 return -EINVAL; 729 730 if (head->block == AMDGPU_RAS_BLOCK__GFX) { 731 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 732 if (!info) 733 return -ENOMEM; 734 735 if (!enable) { 736 info->disable_features = (struct ta_ras_disable_features_input) { 737 .block_id = amdgpu_ras_block_to_ta(head->block), 738 .error_type = amdgpu_ras_error_to_ta(head->type), 739 }; 740 } else { 741 info->enable_features = (struct ta_ras_enable_features_input) { 742 .block_id = amdgpu_ras_block_to_ta(head->block), 743 .error_type = amdgpu_ras_error_to_ta(head->type), 744 }; 745 } 746 } 747 748 /* Do not enable if it is not allowed. */ 749 if (enable && !amdgpu_ras_check_feature_allowed(adev, head)) 750 goto out; 751 752 /* Only enable ras feature operation handle on host side */ 753 if (head->block == AMDGPU_RAS_BLOCK__GFX && 754 !amdgpu_sriov_vf(adev) && 755 !amdgpu_ras_intr_triggered()) { 756 ret = psp_ras_enable_features(&adev->psp, info, enable); 757 if (ret) { 758 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 759 enable ? "enable":"disable", 760 get_ras_block_str(head), 761 amdgpu_ras_is_poison_mode_supported(adev), ret); 762 goto out; 763 } 764 } 765 766 /* setup the obj */ 767 __amdgpu_ras_feature_enable(adev, head, enable); 768 out: 769 if (head->block == AMDGPU_RAS_BLOCK__GFX) 770 kfree(info); 771 return ret; 772 } 773 774 /* Only used in device probe stage and called only once. */ 775 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 776 struct ras_common_if *head, bool enable) 777 { 778 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 779 int ret; 780 781 if (!con) 782 return -EINVAL; 783 784 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 785 if (enable) { 786 /* There is no harm to issue a ras TA cmd regardless of 787 * the currecnt ras state. 788 * If current state == target state, it will do nothing 789 * But sometimes it requests driver to reset and repost 790 * with error code -EAGAIN. 791 */ 792 ret = amdgpu_ras_feature_enable(adev, head, 1); 793 /* With old ras TA, we might fail to enable ras. 794 * Log it and just setup the object. 795 * TODO need remove this WA in the future. 796 */ 797 if (ret == -EINVAL) { 798 ret = __amdgpu_ras_feature_enable(adev, head, 1); 799 if (!ret) 800 dev_info(adev->dev, 801 "RAS INFO: %s setup object\n", 802 get_ras_block_str(head)); 803 } 804 } else { 805 /* setup the object then issue a ras TA disable cmd.*/ 806 ret = __amdgpu_ras_feature_enable(adev, head, 1); 807 if (ret) 808 return ret; 809 810 /* gfx block ras dsiable cmd must send to ras-ta */ 811 if (head->block == AMDGPU_RAS_BLOCK__GFX) 812 con->features |= BIT(head->block); 813 814 ret = amdgpu_ras_feature_enable(adev, head, 0); 815 816 /* clean gfx block ras features flag */ 817 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 818 con->features &= ~BIT(head->block); 819 } 820 } else 821 ret = amdgpu_ras_feature_enable(adev, head, enable); 822 823 return ret; 824 } 825 826 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 827 bool bypass) 828 { 829 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 830 struct ras_manager *obj, *tmp; 831 832 list_for_each_entry_safe(obj, tmp, &con->head, node) { 833 /* bypass psp. 834 * aka just release the obj and corresponding flags 835 */ 836 if (bypass) { 837 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 838 break; 839 } else { 840 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 841 break; 842 } 843 } 844 845 return con->features; 846 } 847 848 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 849 bool bypass) 850 { 851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 852 int i; 853 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 854 855 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 856 struct ras_common_if head = { 857 .block = i, 858 .type = default_ras_type, 859 .sub_block_index = 0, 860 }; 861 862 if (i == AMDGPU_RAS_BLOCK__MCA) 863 continue; 864 865 if (bypass) { 866 /* 867 * bypass psp. vbios enable ras for us. 868 * so just create the obj 869 */ 870 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 871 break; 872 } else { 873 if (amdgpu_ras_feature_enable(adev, &head, 1)) 874 break; 875 } 876 } 877 878 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 879 struct ras_common_if head = { 880 .block = AMDGPU_RAS_BLOCK__MCA, 881 .type = default_ras_type, 882 .sub_block_index = i, 883 }; 884 885 if (bypass) { 886 /* 887 * bypass psp. vbios enable ras for us. 888 * so just create the obj 889 */ 890 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 891 break; 892 } else { 893 if (amdgpu_ras_feature_enable(adev, &head, 1)) 894 break; 895 } 896 } 897 898 return con->features; 899 } 900 /* feature ctl end */ 901 902 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 903 enum amdgpu_ras_block block) 904 { 905 if (!block_obj) 906 return -EINVAL; 907 908 if (block_obj->ras_comm.block == block) 909 return 0; 910 911 return -EINVAL; 912 } 913 914 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 915 enum amdgpu_ras_block block, uint32_t sub_block_index) 916 { 917 struct amdgpu_ras_block_list *node, *tmp; 918 struct amdgpu_ras_block_object *obj; 919 920 if (block >= AMDGPU_RAS_BLOCK__LAST) 921 return NULL; 922 923 if (!amdgpu_ras_is_supported(adev, block)) 924 return NULL; 925 926 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 927 if (!node->ras_obj) { 928 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 929 continue; 930 } 931 932 obj = node->ras_obj; 933 if (obj->ras_block_match) { 934 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 935 return obj; 936 } else { 937 if (amdgpu_ras_block_match_default(obj, block) == 0) 938 return obj; 939 } 940 } 941 942 return NULL; 943 } 944 945 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 946 { 947 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 948 int ret = 0; 949 950 /* 951 * choosing right query method according to 952 * whether smu support query error information 953 */ 954 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 955 if (ret == -EOPNOTSUPP) { 956 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 957 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 958 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 959 960 /* umc query_ras_error_address is also responsible for clearing 961 * error status 962 */ 963 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 964 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 965 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 966 } else if (!ret) { 967 if (adev->umc.ras && 968 adev->umc.ras->ecc_info_query_ras_error_count) 969 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 970 971 if (adev->umc.ras && 972 adev->umc.ras->ecc_info_query_ras_error_address) 973 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 974 } 975 } 976 977 /* query/inject/cure begin */ 978 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 979 struct ras_query_if *info) 980 { 981 struct amdgpu_ras_block_object *block_obj = NULL; 982 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 983 struct ras_err_data err_data = {0, 0, 0, NULL}; 984 985 if (!obj) 986 return -EINVAL; 987 988 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 989 amdgpu_ras_get_ecc_info(adev, &err_data); 990 } else { 991 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 992 if (!block_obj || !block_obj->hw_ops) { 993 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 994 get_ras_block_str(&info->head)); 995 return -EINVAL; 996 } 997 998 if (block_obj->hw_ops->query_ras_error_count) 999 block_obj->hw_ops->query_ras_error_count(adev, &err_data); 1000 1001 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 1002 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 1003 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 1004 if (block_obj->hw_ops->query_ras_error_status) 1005 block_obj->hw_ops->query_ras_error_status(adev); 1006 } 1007 } 1008 1009 obj->err_data.ue_count += err_data.ue_count; 1010 obj->err_data.ce_count += err_data.ce_count; 1011 1012 info->ue_count = obj->err_data.ue_count; 1013 info->ce_count = obj->err_data.ce_count; 1014 1015 if (err_data.ce_count) { 1016 if (adev->smuio.funcs && 1017 adev->smuio.funcs->get_socket_id && 1018 adev->smuio.funcs->get_die_id) { 1019 dev_info(adev->dev, "socket: %d, die: %d " 1020 "%ld correctable hardware errors " 1021 "detected in %s block, no user " 1022 "action is needed.\n", 1023 adev->smuio.funcs->get_socket_id(adev), 1024 adev->smuio.funcs->get_die_id(adev), 1025 obj->err_data.ce_count, 1026 get_ras_block_str(&info->head)); 1027 } else { 1028 dev_info(adev->dev, "%ld correctable hardware errors " 1029 "detected in %s block, no user " 1030 "action is needed.\n", 1031 obj->err_data.ce_count, 1032 get_ras_block_str(&info->head)); 1033 } 1034 } 1035 if (err_data.ue_count) { 1036 if (adev->smuio.funcs && 1037 adev->smuio.funcs->get_socket_id && 1038 adev->smuio.funcs->get_die_id) { 1039 dev_info(adev->dev, "socket: %d, die: %d " 1040 "%ld uncorrectable hardware errors " 1041 "detected in %s block\n", 1042 adev->smuio.funcs->get_socket_id(adev), 1043 adev->smuio.funcs->get_die_id(adev), 1044 obj->err_data.ue_count, 1045 get_ras_block_str(&info->head)); 1046 } else { 1047 dev_info(adev->dev, "%ld uncorrectable hardware errors " 1048 "detected in %s block\n", 1049 obj->err_data.ue_count, 1050 get_ras_block_str(&info->head)); 1051 } 1052 } 1053 1054 return 0; 1055 } 1056 1057 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1058 enum amdgpu_ras_block block) 1059 { 1060 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1061 1062 if (!amdgpu_ras_is_supported(adev, block)) 1063 return -EINVAL; 1064 1065 if (!block_obj || !block_obj->hw_ops) { 1066 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1067 ras_block_str(block)); 1068 return -EINVAL; 1069 } 1070 1071 if (block_obj->hw_ops->reset_ras_error_count) 1072 block_obj->hw_ops->reset_ras_error_count(adev); 1073 1074 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1075 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1076 if (block_obj->hw_ops->reset_ras_error_status) 1077 block_obj->hw_ops->reset_ras_error_status(adev); 1078 } 1079 1080 return 0; 1081 } 1082 1083 /* wrapper of psp_ras_trigger_error */ 1084 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1085 struct ras_inject_if *info) 1086 { 1087 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1088 struct ta_ras_trigger_error_input block_info = { 1089 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1090 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1091 .sub_block_index = info->head.sub_block_index, 1092 .address = info->address, 1093 .value = info->value, 1094 }; 1095 int ret = -EINVAL; 1096 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1097 info->head.block, 1098 info->head.sub_block_index); 1099 1100 /* inject on guest isn't allowed, return success directly */ 1101 if (amdgpu_sriov_vf(adev)) 1102 return 0; 1103 1104 if (!obj) 1105 return -EINVAL; 1106 1107 if (!block_obj || !block_obj->hw_ops) { 1108 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1109 get_ras_block_str(&info->head)); 1110 return -EINVAL; 1111 } 1112 1113 /* Calculate XGMI relative offset */ 1114 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1115 block_info.address = 1116 amdgpu_xgmi_get_relative_phy_addr(adev, 1117 block_info.address); 1118 } 1119 1120 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { 1121 if (block_obj->hw_ops->ras_error_inject) 1122 ret = block_obj->hw_ops->ras_error_inject(adev, info); 1123 } else { 1124 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ 1125 if (block_obj->hw_ops->ras_error_inject) 1126 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); 1127 else /*If not defined .ras_error_inject, use default ras_error_inject*/ 1128 ret = psp_ras_trigger_error(&adev->psp, &block_info); 1129 } 1130 1131 if (ret) 1132 dev_err(adev->dev, "ras inject %s failed %d\n", 1133 get_ras_block_str(&info->head), ret); 1134 1135 return ret; 1136 } 1137 1138 /** 1139 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1140 * @adev: pointer to AMD GPU device 1141 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1142 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1143 * @query_info: pointer to ras_query_if 1144 * 1145 * Return 0 for query success or do nothing, otherwise return an error 1146 * on failures 1147 */ 1148 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1149 unsigned long *ce_count, 1150 unsigned long *ue_count, 1151 struct ras_query_if *query_info) 1152 { 1153 int ret; 1154 1155 if (!query_info) 1156 /* do nothing if query_info is not specified */ 1157 return 0; 1158 1159 ret = amdgpu_ras_query_error_status(adev, query_info); 1160 if (ret) 1161 return ret; 1162 1163 *ce_count += query_info->ce_count; 1164 *ue_count += query_info->ue_count; 1165 1166 /* some hardware/IP supports read to clear 1167 * no need to explictly reset the err status after the query call */ 1168 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1169 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1170 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1171 dev_warn(adev->dev, 1172 "Failed to reset error counter and error status\n"); 1173 } 1174 1175 return 0; 1176 } 1177 1178 /** 1179 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1180 * @adev: pointer to AMD GPU device 1181 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1182 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1183 * errors. 1184 * @query_info: pointer to ras_query_if if the query request is only for 1185 * specific ip block; if info is NULL, then the qurey request is for 1186 * all the ip blocks that support query ras error counters/status 1187 * 1188 * If set, @ce_count or @ue_count, count and return the corresponding 1189 * error counts in those integer pointers. Return 0 if the device 1190 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1191 */ 1192 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1193 unsigned long *ce_count, 1194 unsigned long *ue_count, 1195 struct ras_query_if *query_info) 1196 { 1197 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1198 struct ras_manager *obj; 1199 unsigned long ce, ue; 1200 int ret; 1201 1202 if (!adev->ras_enabled || !con) 1203 return -EOPNOTSUPP; 1204 1205 /* Don't count since no reporting. 1206 */ 1207 if (!ce_count && !ue_count) 1208 return 0; 1209 1210 ce = 0; 1211 ue = 0; 1212 if (!query_info) { 1213 /* query all the ip blocks that support ras query interface */ 1214 list_for_each_entry(obj, &con->head, node) { 1215 struct ras_query_if info = { 1216 .head = obj->head, 1217 }; 1218 1219 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1220 } 1221 } else { 1222 /* query specific ip block */ 1223 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1224 } 1225 1226 if (ret) 1227 return ret; 1228 1229 if (ce_count) 1230 *ce_count = ce; 1231 1232 if (ue_count) 1233 *ue_count = ue; 1234 1235 return 0; 1236 } 1237 /* query/inject/cure end */ 1238 1239 1240 /* sysfs begin */ 1241 1242 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1243 struct ras_badpage **bps, unsigned int *count); 1244 1245 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1246 { 1247 switch (flags) { 1248 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1249 return "R"; 1250 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1251 return "P"; 1252 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1253 default: 1254 return "F"; 1255 } 1256 } 1257 1258 /** 1259 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1260 * 1261 * It allows user to read the bad pages of vram on the gpu through 1262 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1263 * 1264 * It outputs multiple lines, and each line stands for one gpu page. 1265 * 1266 * The format of one line is below, 1267 * gpu pfn : gpu page size : flags 1268 * 1269 * gpu pfn and gpu page size are printed in hex format. 1270 * flags can be one of below character, 1271 * 1272 * R: reserved, this gpu page is reserved and not able to use. 1273 * 1274 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1275 * in next window of page_reserve. 1276 * 1277 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1278 * 1279 * Examples: 1280 * 1281 * .. code-block:: bash 1282 * 1283 * 0x00000001 : 0x00001000 : R 1284 * 0x00000002 : 0x00001000 : P 1285 * 1286 */ 1287 1288 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1289 struct kobject *kobj, struct bin_attribute *attr, 1290 char *buf, loff_t ppos, size_t count) 1291 { 1292 struct amdgpu_ras *con = 1293 container_of(attr, struct amdgpu_ras, badpages_attr); 1294 struct amdgpu_device *adev = con->adev; 1295 const unsigned int element_size = 1296 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1297 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1298 unsigned int end = div64_ul(ppos + count - 1, element_size); 1299 ssize_t s = 0; 1300 struct ras_badpage *bps = NULL; 1301 unsigned int bps_count = 0; 1302 1303 memset(buf, 0, count); 1304 1305 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1306 return 0; 1307 1308 for (; start < end && start < bps_count; start++) 1309 s += scnprintf(&buf[s], element_size + 1, 1310 "0x%08x : 0x%08x : %1s\n", 1311 bps[start].bp, 1312 bps[start].size, 1313 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1314 1315 kfree(bps); 1316 1317 return s; 1318 } 1319 1320 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1321 struct device_attribute *attr, char *buf) 1322 { 1323 struct amdgpu_ras *con = 1324 container_of(attr, struct amdgpu_ras, features_attr); 1325 1326 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); 1327 } 1328 1329 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1330 { 1331 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1332 1333 sysfs_remove_file_from_group(&adev->dev->kobj, 1334 &con->badpages_attr.attr, 1335 RAS_FS_NAME); 1336 } 1337 1338 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) 1339 { 1340 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1341 struct attribute *attrs[] = { 1342 &con->features_attr.attr, 1343 NULL 1344 }; 1345 struct attribute_group group = { 1346 .name = RAS_FS_NAME, 1347 .attrs = attrs, 1348 }; 1349 1350 sysfs_remove_group(&adev->dev->kobj, &group); 1351 1352 return 0; 1353 } 1354 1355 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1356 struct ras_common_if *head) 1357 { 1358 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1359 1360 if (!obj || obj->attr_inuse) 1361 return -EINVAL; 1362 1363 get_obj(obj); 1364 1365 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1366 "%s_err_count", head->name); 1367 1368 obj->sysfs_attr = (struct device_attribute){ 1369 .attr = { 1370 .name = obj->fs_data.sysfs_name, 1371 .mode = S_IRUGO, 1372 }, 1373 .show = amdgpu_ras_sysfs_read, 1374 }; 1375 sysfs_attr_init(&obj->sysfs_attr.attr); 1376 1377 if (sysfs_add_file_to_group(&adev->dev->kobj, 1378 &obj->sysfs_attr.attr, 1379 RAS_FS_NAME)) { 1380 put_obj(obj); 1381 return -EINVAL; 1382 } 1383 1384 obj->attr_inuse = 1; 1385 1386 return 0; 1387 } 1388 1389 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1390 struct ras_common_if *head) 1391 { 1392 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1393 1394 if (!obj || !obj->attr_inuse) 1395 return -EINVAL; 1396 1397 sysfs_remove_file_from_group(&adev->dev->kobj, 1398 &obj->sysfs_attr.attr, 1399 RAS_FS_NAME); 1400 obj->attr_inuse = 0; 1401 put_obj(obj); 1402 1403 return 0; 1404 } 1405 1406 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1407 { 1408 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1409 struct ras_manager *obj, *tmp; 1410 1411 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1412 amdgpu_ras_sysfs_remove(adev, &obj->head); 1413 } 1414 1415 if (amdgpu_bad_page_threshold != 0) 1416 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1417 1418 amdgpu_ras_sysfs_remove_feature_node(adev); 1419 1420 return 0; 1421 } 1422 /* sysfs end */ 1423 1424 /** 1425 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1426 * 1427 * Normally when there is an uncorrectable error, the driver will reset 1428 * the GPU to recover. However, in the event of an unrecoverable error, 1429 * the driver provides an interface to reboot the system automatically 1430 * in that event. 1431 * 1432 * The following file in debugfs provides that interface: 1433 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1434 * 1435 * Usage: 1436 * 1437 * .. code-block:: bash 1438 * 1439 * echo true > .../ras/auto_reboot 1440 * 1441 */ 1442 /* debugfs begin */ 1443 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1444 { 1445 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1446 struct drm_minor *minor = adev_to_drm(adev)->primary; 1447 struct dentry *dir; 1448 1449 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1450 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1451 &amdgpu_ras_debugfs_ctrl_ops); 1452 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1453 &amdgpu_ras_debugfs_eeprom_ops); 1454 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1455 &con->bad_page_cnt_threshold); 1456 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 1457 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 1458 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 1459 &amdgpu_ras_debugfs_eeprom_size_ops); 1460 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 1461 S_IRUGO, dir, adev, 1462 &amdgpu_ras_debugfs_eeprom_table_ops); 1463 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 1464 1465 /* 1466 * After one uncorrectable error happens, usually GPU recovery will 1467 * be scheduled. But due to the known problem in GPU recovery failing 1468 * to bring GPU back, below interface provides one direct way to 1469 * user to reboot system automatically in such case within 1470 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1471 * will never be called. 1472 */ 1473 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1474 1475 /* 1476 * User could set this not to clean up hardware's error count register 1477 * of RAS IPs during ras recovery. 1478 */ 1479 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1480 &con->disable_ras_err_cnt_harvest); 1481 return dir; 1482 } 1483 1484 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1485 struct ras_fs_if *head, 1486 struct dentry *dir) 1487 { 1488 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1489 1490 if (!obj || !dir) 1491 return; 1492 1493 get_obj(obj); 1494 1495 memcpy(obj->fs_data.debugfs_name, 1496 head->debugfs_name, 1497 sizeof(obj->fs_data.debugfs_name)); 1498 1499 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 1500 obj, &amdgpu_ras_debugfs_ops); 1501 } 1502 1503 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1504 { 1505 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1506 struct dentry *dir; 1507 struct ras_manager *obj; 1508 struct ras_fs_if fs_info; 1509 1510 /* 1511 * it won't be called in resume path, no need to check 1512 * suspend and gpu reset status 1513 */ 1514 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1515 return; 1516 1517 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 1518 1519 list_for_each_entry(obj, &con->head, node) { 1520 if (amdgpu_ras_is_supported(adev, obj->head.block) && 1521 (obj->attr_inuse == 1)) { 1522 sprintf(fs_info.debugfs_name, "%s_err_inject", 1523 get_ras_block_str(&obj->head)); 1524 fs_info.head = obj->head; 1525 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 1526 } 1527 } 1528 } 1529 1530 /* debugfs end */ 1531 1532 /* ras fs */ 1533 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 1534 amdgpu_ras_sysfs_badpages_read, NULL, 0); 1535 static DEVICE_ATTR(features, S_IRUGO, 1536 amdgpu_ras_sysfs_features_read, NULL); 1537 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 1538 { 1539 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1540 struct attribute_group group = { 1541 .name = RAS_FS_NAME, 1542 }; 1543 struct attribute *attrs[] = { 1544 &con->features_attr.attr, 1545 NULL 1546 }; 1547 struct bin_attribute *bin_attrs[] = { 1548 NULL, 1549 NULL, 1550 }; 1551 int r; 1552 1553 /* add features entry */ 1554 con->features_attr = dev_attr_features; 1555 group.attrs = attrs; 1556 sysfs_attr_init(attrs[0]); 1557 1558 if (amdgpu_bad_page_threshold != 0) { 1559 /* add bad_page_features entry */ 1560 bin_attr_gpu_vram_bad_pages.private = NULL; 1561 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 1562 bin_attrs[0] = &con->badpages_attr; 1563 group.bin_attrs = bin_attrs; 1564 sysfs_bin_attr_init(bin_attrs[0]); 1565 } 1566 1567 r = sysfs_create_group(&adev->dev->kobj, &group); 1568 if (r) 1569 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 1570 1571 return 0; 1572 } 1573 1574 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 1575 { 1576 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1577 struct ras_manager *con_obj, *ip_obj, *tmp; 1578 1579 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1580 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 1581 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 1582 if (ip_obj) 1583 put_obj(ip_obj); 1584 } 1585 } 1586 1587 amdgpu_ras_sysfs_remove_all(adev); 1588 return 0; 1589 } 1590 /* ras fs end */ 1591 1592 /* ih begin */ 1593 1594 /* For the hardware that cannot enable bif ring for both ras_controller_irq 1595 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 1596 * register to check whether the interrupt is triggered or not, and properly 1597 * ack the interrupt if it is there 1598 */ 1599 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 1600 { 1601 /* Fatal error events are handled on host side */ 1602 if (amdgpu_sriov_vf(adev) || 1603 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) 1604 return; 1605 1606 if (adev->nbio.ras && 1607 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 1608 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 1609 1610 if (adev->nbio.ras && 1611 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 1612 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 1613 } 1614 1615 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 1616 struct amdgpu_iv_entry *entry) 1617 { 1618 bool poison_stat = false; 1619 struct amdgpu_device *adev = obj->adev; 1620 struct amdgpu_ras_block_object *block_obj = 1621 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 1622 1623 if (!block_obj || !block_obj->hw_ops) 1624 return; 1625 1626 /* both query_poison_status and handle_poison_consumption are optional, 1627 * but at least one of them should be implemented if we need poison 1628 * consumption handler 1629 */ 1630 if (block_obj->hw_ops->query_poison_status) { 1631 poison_stat = block_obj->hw_ops->query_poison_status(adev); 1632 if (!poison_stat) { 1633 /* Not poison consumption interrupt, no need to handle it */ 1634 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 1635 block_obj->ras_comm.name); 1636 1637 return; 1638 } 1639 } 1640 1641 if (!adev->gmc.xgmi.connected_to_cpu) 1642 amdgpu_umc_poison_handler(adev, false); 1643 1644 if (block_obj->hw_ops->handle_poison_consumption) 1645 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 1646 1647 /* gpu reset is fallback for failed and default cases */ 1648 if (poison_stat) { 1649 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", 1650 block_obj->ras_comm.name); 1651 amdgpu_ras_reset_gpu(adev); 1652 } 1653 } 1654 1655 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 1656 struct amdgpu_iv_entry *entry) 1657 { 1658 dev_info(obj->adev->dev, 1659 "Poison is created, no user action is needed.\n"); 1660 } 1661 1662 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 1663 struct amdgpu_iv_entry *entry) 1664 { 1665 struct ras_ih_data *data = &obj->ih_data; 1666 struct ras_err_data err_data = {0, 0, 0, NULL}; 1667 int ret; 1668 1669 if (!data->cb) 1670 return; 1671 1672 /* Let IP handle its data, maybe we need get the output 1673 * from the callback to update the error type/count, etc 1674 */ 1675 ret = data->cb(obj->adev, &err_data, entry); 1676 /* ue will trigger an interrupt, and in that case 1677 * we need do a reset to recovery the whole system. 1678 * But leave IP do that recovery, here we just dispatch 1679 * the error. 1680 */ 1681 if (ret == AMDGPU_RAS_SUCCESS) { 1682 /* these counts could be left as 0 if 1683 * some blocks do not count error number 1684 */ 1685 obj->err_data.ue_count += err_data.ue_count; 1686 obj->err_data.ce_count += err_data.ce_count; 1687 } 1688 } 1689 1690 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 1691 { 1692 struct ras_ih_data *data = &obj->ih_data; 1693 struct amdgpu_iv_entry entry; 1694 1695 while (data->rptr != data->wptr) { 1696 rmb(); 1697 memcpy(&entry, &data->ring[data->rptr], 1698 data->element_size); 1699 1700 wmb(); 1701 data->rptr = (data->aligned_element_size + 1702 data->rptr) % data->ring_size; 1703 1704 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 1705 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1706 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 1707 else 1708 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 1709 } else { 1710 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1711 amdgpu_ras_interrupt_umc_handler(obj, &entry); 1712 else 1713 dev_warn(obj->adev->dev, 1714 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 1715 } 1716 } 1717 } 1718 1719 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 1720 { 1721 struct ras_ih_data *data = 1722 container_of(work, struct ras_ih_data, ih_work); 1723 struct ras_manager *obj = 1724 container_of(data, struct ras_manager, ih_data); 1725 1726 amdgpu_ras_interrupt_handler(obj); 1727 } 1728 1729 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 1730 struct ras_dispatch_if *info) 1731 { 1732 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1733 struct ras_ih_data *data = &obj->ih_data; 1734 1735 if (!obj) 1736 return -EINVAL; 1737 1738 if (data->inuse == 0) 1739 return 0; 1740 1741 /* Might be overflow... */ 1742 memcpy(&data->ring[data->wptr], info->entry, 1743 data->element_size); 1744 1745 wmb(); 1746 data->wptr = (data->aligned_element_size + 1747 data->wptr) % data->ring_size; 1748 1749 schedule_work(&data->ih_work); 1750 1751 return 0; 1752 } 1753 1754 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 1755 struct ras_common_if *head) 1756 { 1757 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1758 struct ras_ih_data *data; 1759 1760 if (!obj) 1761 return -EINVAL; 1762 1763 data = &obj->ih_data; 1764 if (data->inuse == 0) 1765 return 0; 1766 1767 cancel_work_sync(&data->ih_work); 1768 1769 kfree(data->ring); 1770 memset(data, 0, sizeof(*data)); 1771 put_obj(obj); 1772 1773 return 0; 1774 } 1775 1776 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 1777 struct ras_common_if *head) 1778 { 1779 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1780 struct ras_ih_data *data; 1781 struct amdgpu_ras_block_object *ras_obj; 1782 1783 if (!obj) { 1784 /* in case we registe the IH before enable ras feature */ 1785 obj = amdgpu_ras_create_obj(adev, head); 1786 if (!obj) 1787 return -EINVAL; 1788 } else 1789 get_obj(obj); 1790 1791 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 1792 1793 data = &obj->ih_data; 1794 /* add the callback.etc */ 1795 *data = (struct ras_ih_data) { 1796 .inuse = 0, 1797 .cb = ras_obj->ras_cb, 1798 .element_size = sizeof(struct amdgpu_iv_entry), 1799 .rptr = 0, 1800 .wptr = 0, 1801 }; 1802 1803 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 1804 1805 data->aligned_element_size = ALIGN(data->element_size, 8); 1806 /* the ring can store 64 iv entries. */ 1807 data->ring_size = 64 * data->aligned_element_size; 1808 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 1809 if (!data->ring) { 1810 put_obj(obj); 1811 return -ENOMEM; 1812 } 1813 1814 /* IH is ready */ 1815 data->inuse = 1; 1816 1817 return 0; 1818 } 1819 1820 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 1821 { 1822 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1823 struct ras_manager *obj, *tmp; 1824 1825 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1826 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 1827 } 1828 1829 return 0; 1830 } 1831 /* ih end */ 1832 1833 /* traversal all IPs except NBIO to query error counter */ 1834 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) 1835 { 1836 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1837 struct ras_manager *obj; 1838 1839 if (!adev->ras_enabled || !con) 1840 return; 1841 1842 list_for_each_entry(obj, &con->head, node) { 1843 struct ras_query_if info = { 1844 .head = obj->head, 1845 }; 1846 1847 /* 1848 * PCIE_BIF IP has one different isr by ras controller 1849 * interrupt, the specific ras counter query will be 1850 * done in that isr. So skip such block from common 1851 * sync flood interrupt isr calling. 1852 */ 1853 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 1854 continue; 1855 1856 /* 1857 * this is a workaround for aldebaran, skip send msg to 1858 * smu to get ecc_info table due to smu handle get ecc 1859 * info table failed temporarily. 1860 * should be removed until smu fix handle ecc_info table. 1861 */ 1862 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 1863 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) 1864 continue; 1865 1866 amdgpu_ras_query_error_status(adev, &info); 1867 1868 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1869 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && 1870 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { 1871 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1872 dev_warn(adev->dev, "Failed to reset error counter and error status"); 1873 } 1874 } 1875 } 1876 1877 /* Parse RdRspStatus and WrRspStatus */ 1878 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 1879 struct ras_query_if *info) 1880 { 1881 struct amdgpu_ras_block_object *block_obj; 1882 /* 1883 * Only two block need to query read/write 1884 * RspStatus at current state 1885 */ 1886 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 1887 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 1888 return; 1889 1890 block_obj = amdgpu_ras_get_ras_block(adev, 1891 info->head.block, 1892 info->head.sub_block_index); 1893 1894 if (!block_obj || !block_obj->hw_ops) { 1895 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1896 get_ras_block_str(&info->head)); 1897 return; 1898 } 1899 1900 if (block_obj->hw_ops->query_ras_error_status) 1901 block_obj->hw_ops->query_ras_error_status(adev); 1902 1903 } 1904 1905 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 1906 { 1907 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1908 struct ras_manager *obj; 1909 1910 if (!adev->ras_enabled || !con) 1911 return; 1912 1913 list_for_each_entry(obj, &con->head, node) { 1914 struct ras_query_if info = { 1915 .head = obj->head, 1916 }; 1917 1918 amdgpu_ras_error_status_query(adev, &info); 1919 } 1920 } 1921 1922 /* recovery begin */ 1923 1924 /* return 0 on success. 1925 * caller need free bps. 1926 */ 1927 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1928 struct ras_badpage **bps, unsigned int *count) 1929 { 1930 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1931 struct ras_err_handler_data *data; 1932 int i = 0; 1933 int ret = 0, status; 1934 1935 if (!con || !con->eh_data || !bps || !count) 1936 return -EINVAL; 1937 1938 mutex_lock(&con->recovery_lock); 1939 data = con->eh_data; 1940 if (!data || data->count == 0) { 1941 *bps = NULL; 1942 ret = -EINVAL; 1943 goto out; 1944 } 1945 1946 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 1947 if (!*bps) { 1948 ret = -ENOMEM; 1949 goto out; 1950 } 1951 1952 for (; i < data->count; i++) { 1953 (*bps)[i] = (struct ras_badpage){ 1954 .bp = data->bps[i].retired_page, 1955 .size = AMDGPU_GPU_PAGE_SIZE, 1956 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 1957 }; 1958 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 1959 data->bps[i].retired_page); 1960 if (status == -EBUSY) 1961 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 1962 else if (status == -ENOENT) 1963 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 1964 } 1965 1966 *count = data->count; 1967 out: 1968 mutex_unlock(&con->recovery_lock); 1969 return ret; 1970 } 1971 1972 static void amdgpu_ras_do_recovery(struct work_struct *work) 1973 { 1974 struct amdgpu_ras *ras = 1975 container_of(work, struct amdgpu_ras, recovery_work); 1976 struct amdgpu_device *remote_adev = NULL; 1977 struct amdgpu_device *adev = ras->adev; 1978 struct list_head device_list, *device_list_handle = NULL; 1979 1980 if (!ras->disable_ras_err_cnt_harvest) { 1981 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 1982 1983 /* Build list of devices to query RAS related errors */ 1984 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 1985 device_list_handle = &hive->device_list; 1986 } else { 1987 INIT_LIST_HEAD(&device_list); 1988 list_add_tail(&adev->gmc.xgmi.head, &device_list); 1989 device_list_handle = &device_list; 1990 } 1991 1992 list_for_each_entry(remote_adev, 1993 device_list_handle, gmc.xgmi.head) { 1994 amdgpu_ras_query_err_status(remote_adev); 1995 amdgpu_ras_log_on_err_counter(remote_adev); 1996 } 1997 1998 amdgpu_put_xgmi_hive(hive); 1999 } 2000 2001 if (amdgpu_device_should_recover_gpu(ras->adev)) { 2002 struct amdgpu_reset_context reset_context; 2003 memset(&reset_context, 0, sizeof(reset_context)); 2004 2005 reset_context.method = AMD_RESET_METHOD_NONE; 2006 reset_context.reset_req_dev = adev; 2007 2008 /* Perform full reset in fatal error mode */ 2009 if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) 2010 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2011 else 2012 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2013 2014 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 2015 } 2016 atomic_set(&ras->in_recovery, 0); 2017 } 2018 2019 /* alloc/realloc bps array */ 2020 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 2021 struct ras_err_handler_data *data, int pages) 2022 { 2023 unsigned int old_space = data->count + data->space_left; 2024 unsigned int new_space = old_space + pages; 2025 unsigned int align_space = ALIGN(new_space, 512); 2026 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 2027 2028 if (!bps) { 2029 return -ENOMEM; 2030 } 2031 2032 if (data->bps) { 2033 memcpy(bps, data->bps, 2034 data->count * sizeof(*data->bps)); 2035 kfree(data->bps); 2036 } 2037 2038 data->bps = bps; 2039 data->space_left += align_space - old_space; 2040 return 0; 2041 } 2042 2043 /* it deal with vram only. */ 2044 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 2045 struct eeprom_table_record *bps, int pages) 2046 { 2047 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2048 struct ras_err_handler_data *data; 2049 int ret = 0; 2050 uint32_t i; 2051 2052 if (!con || !con->eh_data || !bps || pages <= 0) 2053 return 0; 2054 2055 mutex_lock(&con->recovery_lock); 2056 data = con->eh_data; 2057 if (!data) 2058 goto out; 2059 2060 for (i = 0; i < pages; i++) { 2061 if (amdgpu_ras_check_bad_page_unlock(con, 2062 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2063 continue; 2064 2065 if (!data->space_left && 2066 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2067 ret = -ENOMEM; 2068 goto out; 2069 } 2070 2071 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, 2072 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, 2073 AMDGPU_GPU_PAGE_SIZE); 2074 2075 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); 2076 data->count++; 2077 data->space_left--; 2078 } 2079 out: 2080 mutex_unlock(&con->recovery_lock); 2081 2082 return ret; 2083 } 2084 2085 /* 2086 * write error record array to eeprom, the function should be 2087 * protected by recovery_lock 2088 */ 2089 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) 2090 { 2091 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2092 struct ras_err_handler_data *data; 2093 struct amdgpu_ras_eeprom_control *control; 2094 int save_count; 2095 2096 if (!con || !con->eh_data) 2097 return 0; 2098 2099 mutex_lock(&con->recovery_lock); 2100 control = &con->eeprom_control; 2101 data = con->eh_data; 2102 save_count = data->count - control->ras_num_recs; 2103 mutex_unlock(&con->recovery_lock); 2104 /* only new entries are saved */ 2105 if (save_count > 0) { 2106 if (amdgpu_ras_eeprom_append(control, 2107 &data->bps[control->ras_num_recs], 2108 save_count)) { 2109 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2110 return -EIO; 2111 } 2112 2113 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 2114 } 2115 2116 return 0; 2117 } 2118 2119 /* 2120 * read error record array in eeprom and reserve enough space for 2121 * storing new bad pages 2122 */ 2123 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 2124 { 2125 struct amdgpu_ras_eeprom_control *control = 2126 &adev->psp.ras_context.ras->eeprom_control; 2127 struct eeprom_table_record *bps; 2128 int ret; 2129 2130 /* no bad page record, skip eeprom access */ 2131 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 2132 return 0; 2133 2134 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 2135 if (!bps) 2136 return -ENOMEM; 2137 2138 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 2139 if (ret) 2140 dev_err(adev->dev, "Failed to load EEPROM table records!"); 2141 else 2142 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); 2143 2144 kfree(bps); 2145 return ret; 2146 } 2147 2148 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 2149 uint64_t addr) 2150 { 2151 struct ras_err_handler_data *data = con->eh_data; 2152 int i; 2153 2154 addr >>= AMDGPU_GPU_PAGE_SHIFT; 2155 for (i = 0; i < data->count; i++) 2156 if (addr == data->bps[i].retired_page) 2157 return true; 2158 2159 return false; 2160 } 2161 2162 /* 2163 * check if an address belongs to bad page 2164 * 2165 * Note: this check is only for umc block 2166 */ 2167 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 2168 uint64_t addr) 2169 { 2170 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2171 bool ret = false; 2172 2173 if (!con || !con->eh_data) 2174 return ret; 2175 2176 mutex_lock(&con->recovery_lock); 2177 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 2178 mutex_unlock(&con->recovery_lock); 2179 return ret; 2180 } 2181 2182 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 2183 uint32_t max_count) 2184 { 2185 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2186 2187 /* 2188 * Justification of value bad_page_cnt_threshold in ras structure 2189 * 2190 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length 2191 * in eeprom, and introduce two scenarios accordingly. 2192 * 2193 * Bad page retirement enablement: 2194 * - If amdgpu_bad_page_threshold = -1, 2195 * bad_page_cnt_threshold = typical value by formula. 2196 * 2197 * - When the value from user is 0 < amdgpu_bad_page_threshold < 2198 * max record length in eeprom, use it directly. 2199 * 2200 * Bad page retirement disablement: 2201 * - If amdgpu_bad_page_threshold = 0, bad page retirement 2202 * functionality is disabled, and bad_page_cnt_threshold will 2203 * take no effect. 2204 */ 2205 2206 if (amdgpu_bad_page_threshold < 0) { 2207 u64 val = adev->gmc.mc_vram_size; 2208 2209 do_div(val, RAS_BAD_PAGE_COVER); 2210 con->bad_page_cnt_threshold = min(lower_32_bits(val), 2211 max_count); 2212 } else { 2213 con->bad_page_cnt_threshold = min_t(int, max_count, 2214 amdgpu_bad_page_threshold); 2215 } 2216 } 2217 2218 int amdgpu_ras_recovery_init(struct amdgpu_device *adev) 2219 { 2220 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2221 struct ras_err_handler_data **data; 2222 u32 max_eeprom_records_count = 0; 2223 bool exc_err_limit = false; 2224 int ret; 2225 2226 if (!con || amdgpu_sriov_vf(adev)) 2227 return 0; 2228 2229 /* Allow access to RAS EEPROM via debugfs, when the ASIC 2230 * supports RAS and debugfs is enabled, but when 2231 * adev->ras_enabled is unset, i.e. when "ras_enable" 2232 * module parameter is set to 0. 2233 */ 2234 con->adev = adev; 2235 2236 if (!adev->ras_enabled) 2237 return 0; 2238 2239 data = &con->eh_data; 2240 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); 2241 if (!*data) { 2242 ret = -ENOMEM; 2243 goto out; 2244 } 2245 2246 mutex_init(&con->recovery_lock); 2247 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 2248 atomic_set(&con->in_recovery, 0); 2249 con->eeprom_control.bad_channel_bitmap = 0; 2250 2251 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); 2252 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 2253 2254 /* Todo: During test the SMU might fail to read the eeprom through I2C 2255 * when the GPU is pending on XGMI reset during probe time 2256 * (Mostly after second bus reset), skip it now 2257 */ 2258 if (adev->gmc.xgmi.pending_reset) 2259 return 0; 2260 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); 2261 /* 2262 * This calling fails when exc_err_limit is true or 2263 * ret != 0. 2264 */ 2265 if (exc_err_limit || ret) 2266 goto free; 2267 2268 if (con->eeprom_control.ras_num_recs) { 2269 ret = amdgpu_ras_load_bad_pages(adev); 2270 if (ret) 2271 goto free; 2272 2273 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); 2274 2275 if (con->update_channel_flag == true) { 2276 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); 2277 con->update_channel_flag = false; 2278 } 2279 } 2280 2281 #ifdef CONFIG_X86_MCE_AMD 2282 if ((adev->asic_type == CHIP_ALDEBARAN) && 2283 (adev->gmc.xgmi.connected_to_cpu)) 2284 amdgpu_register_bad_pages_mca_notifier(adev); 2285 #endif 2286 return 0; 2287 2288 free: 2289 kfree((*data)->bps); 2290 kfree(*data); 2291 con->eh_data = NULL; 2292 out: 2293 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 2294 2295 /* 2296 * Except error threshold exceeding case, other failure cases in this 2297 * function would not fail amdgpu driver init. 2298 */ 2299 if (!exc_err_limit) 2300 ret = 0; 2301 else 2302 ret = -EINVAL; 2303 2304 return ret; 2305 } 2306 2307 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 2308 { 2309 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2310 struct ras_err_handler_data *data = con->eh_data; 2311 2312 /* recovery_init failed to init it, fini is useless */ 2313 if (!data) 2314 return 0; 2315 2316 cancel_work_sync(&con->recovery_work); 2317 2318 mutex_lock(&con->recovery_lock); 2319 con->eh_data = NULL; 2320 kfree(data->bps); 2321 kfree(data); 2322 mutex_unlock(&con->recovery_lock); 2323 2324 return 0; 2325 } 2326 /* recovery end */ 2327 2328 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 2329 { 2330 if (amdgpu_sriov_vf(adev)) { 2331 switch (adev->ip_versions[MP0_HWIP][0]) { 2332 case IP_VERSION(13, 0, 2): 2333 return true; 2334 default: 2335 return false; 2336 } 2337 } 2338 2339 if (adev->asic_type == CHIP_IP_DISCOVERY) { 2340 switch (adev->ip_versions[MP0_HWIP][0]) { 2341 case IP_VERSION(13, 0, 0): 2342 case IP_VERSION(13, 0, 10): 2343 return true; 2344 default: 2345 return false; 2346 } 2347 } 2348 2349 return adev->asic_type == CHIP_VEGA10 || 2350 adev->asic_type == CHIP_VEGA20 || 2351 adev->asic_type == CHIP_ARCTURUS || 2352 adev->asic_type == CHIP_ALDEBARAN || 2353 adev->asic_type == CHIP_SIENNA_CICHLID; 2354 } 2355 2356 /* 2357 * this is workaround for vega20 workstation sku, 2358 * force enable gfx ras, ignore vbios gfx ras flag 2359 * due to GC EDC can not write 2360 */ 2361 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 2362 { 2363 struct atom_context *ctx = adev->mode_info.atom_context; 2364 2365 if (!ctx) 2366 return; 2367 2368 if (strnstr(ctx->vbios_version, "D16406", 2369 sizeof(ctx->vbios_version)) || 2370 strnstr(ctx->vbios_version, "D36002", 2371 sizeof(ctx->vbios_version))) 2372 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 2373 } 2374 2375 /* 2376 * check hardware's ras ability which will be saved in hw_supported. 2377 * if hardware does not support ras, we can skip some ras initializtion and 2378 * forbid some ras operations from IP. 2379 * if software itself, say boot parameter, limit the ras ability. We still 2380 * need allow IP do some limited operations, like disable. In such case, 2381 * we have to initialize ras as normal. but need check if operation is 2382 * allowed or not in each function. 2383 */ 2384 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 2385 { 2386 adev->ras_hw_enabled = adev->ras_enabled = 0; 2387 2388 if (!adev->is_atom_fw || 2389 !amdgpu_ras_asic_supported(adev)) 2390 return; 2391 2392 if (!adev->gmc.xgmi.connected_to_cpu) { 2393 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 2394 dev_info(adev->dev, "MEM ECC is active.\n"); 2395 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 2396 1 << AMDGPU_RAS_BLOCK__DF); 2397 } else { 2398 dev_info(adev->dev, "MEM ECC is not presented.\n"); 2399 } 2400 2401 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 2402 dev_info(adev->dev, "SRAM ECC is active.\n"); 2403 if (!amdgpu_sriov_vf(adev)) 2404 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 2405 1 << AMDGPU_RAS_BLOCK__DF); 2406 else 2407 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 2408 1 << AMDGPU_RAS_BLOCK__SDMA | 2409 1 << AMDGPU_RAS_BLOCK__GFX); 2410 2411 /* VCN/JPEG RAS can be supported on both bare metal and 2412 * SRIOV environment 2413 */ 2414 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || 2415 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) 2416 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2417 1 << AMDGPU_RAS_BLOCK__JPEG); 2418 else 2419 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 2420 1 << AMDGPU_RAS_BLOCK__JPEG); 2421 } else { 2422 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 2423 } 2424 } else { 2425 /* driver only manages a few IP blocks RAS feature 2426 * when GPU is connected cpu through XGMI */ 2427 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 2428 1 << AMDGPU_RAS_BLOCK__SDMA | 2429 1 << AMDGPU_RAS_BLOCK__MMHUB); 2430 } 2431 2432 amdgpu_ras_get_quirks(adev); 2433 2434 /* hw_supported needs to be aligned with RAS block mask. */ 2435 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 2436 2437 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 2438 adev->ras_hw_enabled & amdgpu_ras_mask; 2439 } 2440 2441 static void amdgpu_ras_counte_dw(struct work_struct *work) 2442 { 2443 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 2444 ras_counte_delay_work.work); 2445 struct amdgpu_device *adev = con->adev; 2446 struct drm_device *dev = adev_to_drm(adev); 2447 unsigned long ce_count, ue_count; 2448 int res; 2449 2450 res = pm_runtime_get_sync(dev->dev); 2451 if (res < 0) 2452 goto Out; 2453 2454 /* Cache new values. 2455 */ 2456 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 2457 atomic_set(&con->ras_ce_count, ce_count); 2458 atomic_set(&con->ras_ue_count, ue_count); 2459 } 2460 2461 pm_runtime_mark_last_busy(dev->dev); 2462 Out: 2463 pm_runtime_put_autosuspend(dev->dev); 2464 } 2465 2466 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 2467 { 2468 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2469 bool df_poison, umc_poison; 2470 2471 /* poison setting is useless on SRIOV guest */ 2472 if (amdgpu_sriov_vf(adev) || !con) 2473 return; 2474 2475 /* Init poison supported flag, the default value is false */ 2476 if (adev->gmc.xgmi.connected_to_cpu) { 2477 /* enabled by default when GPU is connected to CPU */ 2478 con->poison_supported = true; 2479 } else if (adev->df.funcs && 2480 adev->df.funcs->query_ras_poison_mode && 2481 adev->umc.ras && 2482 adev->umc.ras->query_ras_poison_mode) { 2483 df_poison = 2484 adev->df.funcs->query_ras_poison_mode(adev); 2485 umc_poison = 2486 adev->umc.ras->query_ras_poison_mode(adev); 2487 2488 /* Only poison is set in both DF and UMC, we can support it */ 2489 if (df_poison && umc_poison) 2490 con->poison_supported = true; 2491 else if (df_poison != umc_poison) 2492 dev_warn(adev->dev, 2493 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 2494 df_poison, umc_poison); 2495 } 2496 } 2497 2498 int amdgpu_ras_init(struct amdgpu_device *adev) 2499 { 2500 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2501 int r; 2502 2503 if (con) 2504 return 0; 2505 2506 con = kmalloc(sizeof(struct amdgpu_ras) + 2507 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 2508 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 2509 GFP_KERNEL|__GFP_ZERO); 2510 if (!con) 2511 return -ENOMEM; 2512 2513 con->adev = adev; 2514 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 2515 atomic_set(&con->ras_ce_count, 0); 2516 atomic_set(&con->ras_ue_count, 0); 2517 2518 con->objs = (struct ras_manager *)(con + 1); 2519 2520 amdgpu_ras_set_context(adev, con); 2521 2522 amdgpu_ras_check_supported(adev); 2523 2524 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 2525 /* set gfx block ras context feature for VEGA20 Gaming 2526 * send ras disable cmd to ras ta during ras late init. 2527 */ 2528 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 2529 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 2530 2531 return 0; 2532 } 2533 2534 r = 0; 2535 goto release_con; 2536 } 2537 2538 con->update_channel_flag = false; 2539 con->features = 0; 2540 INIT_LIST_HEAD(&con->head); 2541 /* Might need get this flag from vbios. */ 2542 con->flags = RAS_DEFAULT_FLAGS; 2543 2544 /* initialize nbio ras function ahead of any other 2545 * ras functions so hardware fatal error interrupt 2546 * can be enabled as early as possible */ 2547 switch (adev->asic_type) { 2548 case CHIP_VEGA20: 2549 case CHIP_ARCTURUS: 2550 case CHIP_ALDEBARAN: 2551 if (!adev->gmc.xgmi.connected_to_cpu) { 2552 adev->nbio.ras = &nbio_v7_4_ras; 2553 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); 2554 adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm; 2555 } 2556 break; 2557 default: 2558 /* nbio ras is not available */ 2559 break; 2560 } 2561 2562 if (adev->nbio.ras && 2563 adev->nbio.ras->init_ras_controller_interrupt) { 2564 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 2565 if (r) 2566 goto release_con; 2567 } 2568 2569 if (adev->nbio.ras && 2570 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 2571 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 2572 if (r) 2573 goto release_con; 2574 } 2575 2576 amdgpu_ras_query_poison_mode(adev); 2577 2578 if (amdgpu_ras_fs_init(adev)) { 2579 r = -EINVAL; 2580 goto release_con; 2581 } 2582 2583 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 2584 "hardware ability[%x] ras_mask[%x]\n", 2585 adev->ras_hw_enabled, adev->ras_enabled); 2586 2587 return 0; 2588 release_con: 2589 amdgpu_ras_set_context(adev, NULL); 2590 kfree(con); 2591 2592 return r; 2593 } 2594 2595 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 2596 { 2597 if (adev->gmc.xgmi.connected_to_cpu) 2598 return 1; 2599 return 0; 2600 } 2601 2602 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 2603 struct ras_common_if *ras_block) 2604 { 2605 struct ras_query_if info = { 2606 .head = *ras_block, 2607 }; 2608 2609 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 2610 return 0; 2611 2612 if (amdgpu_ras_query_error_status(adev, &info) != 0) 2613 DRM_WARN("RAS init harvest failure"); 2614 2615 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 2616 DRM_WARN("RAS init harvest reset failure"); 2617 2618 return 0; 2619 } 2620 2621 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 2622 { 2623 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2624 2625 if (!con) 2626 return false; 2627 2628 return con->poison_supported; 2629 } 2630 2631 /* helper function to handle common stuff in ip late init phase */ 2632 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 2633 struct ras_common_if *ras_block) 2634 { 2635 struct amdgpu_ras_block_object *ras_obj = NULL; 2636 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2637 struct ras_query_if *query_info; 2638 unsigned long ue_count, ce_count; 2639 int r; 2640 2641 /* disable RAS feature per IP block if it is not supported */ 2642 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 2643 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 2644 return 0; 2645 } 2646 2647 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 2648 if (r) { 2649 if (adev->in_suspend || amdgpu_in_reset(adev)) { 2650 /* in resume phase, if fail to enable ras, 2651 * clean up all ras fs nodes, and disable ras */ 2652 goto cleanup; 2653 } else 2654 return r; 2655 } 2656 2657 /* check for errors on warm reset edc persisant supported ASIC */ 2658 amdgpu_persistent_edc_harvesting(adev, ras_block); 2659 2660 /* in resume phase, no need to create ras fs node */ 2661 if (adev->in_suspend || amdgpu_in_reset(adev)) 2662 return 0; 2663 2664 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2665 if (ras_obj->ras_cb || (ras_obj->hw_ops && 2666 (ras_obj->hw_ops->query_poison_status || 2667 ras_obj->hw_ops->handle_poison_consumption))) { 2668 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 2669 if (r) 2670 goto cleanup; 2671 } 2672 2673 r = amdgpu_ras_sysfs_create(adev, ras_block); 2674 if (r) 2675 goto interrupt; 2676 2677 /* Those are the cached values at init. 2678 */ 2679 query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL); 2680 if (!query_info) 2681 return -ENOMEM; 2682 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 2683 2684 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 2685 atomic_set(&con->ras_ce_count, ce_count); 2686 atomic_set(&con->ras_ue_count, ue_count); 2687 } 2688 2689 kfree(query_info); 2690 return 0; 2691 2692 interrupt: 2693 if (ras_obj->ras_cb) 2694 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2695 cleanup: 2696 amdgpu_ras_feature_enable(adev, ras_block, 0); 2697 return r; 2698 } 2699 2700 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 2701 struct ras_common_if *ras_block) 2702 { 2703 return amdgpu_ras_block_late_init(adev, ras_block); 2704 } 2705 2706 /* helper function to remove ras fs node and interrupt handler */ 2707 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 2708 struct ras_common_if *ras_block) 2709 { 2710 struct amdgpu_ras_block_object *ras_obj; 2711 if (!ras_block) 2712 return; 2713 2714 amdgpu_ras_sysfs_remove(adev, ras_block); 2715 2716 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2717 if (ras_obj->ras_cb) 2718 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2719 } 2720 2721 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 2722 struct ras_common_if *ras_block) 2723 { 2724 return amdgpu_ras_block_late_fini(adev, ras_block); 2725 } 2726 2727 /* do some init work after IP late init as dependence. 2728 * and it runs in resume/gpu reset/booting up cases. 2729 */ 2730 void amdgpu_ras_resume(struct amdgpu_device *adev) 2731 { 2732 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2733 struct ras_manager *obj, *tmp; 2734 2735 if (!adev->ras_enabled || !con) { 2736 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 2737 amdgpu_release_ras_context(adev); 2738 2739 return; 2740 } 2741 2742 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 2743 /* Set up all other IPs which are not implemented. There is a 2744 * tricky thing that IP's actual ras error type should be 2745 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 2746 * ERROR_NONE make sense anyway. 2747 */ 2748 amdgpu_ras_enable_all_features(adev, 1); 2749 2750 /* We enable ras on all hw_supported block, but as boot 2751 * parameter might disable some of them and one or more IP has 2752 * not implemented yet. So we disable them on behalf. 2753 */ 2754 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2755 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 2756 amdgpu_ras_feature_enable(adev, &obj->head, 0); 2757 /* there should be no any reference. */ 2758 WARN_ON(alive_obj(obj)); 2759 } 2760 } 2761 } 2762 } 2763 2764 void amdgpu_ras_suspend(struct amdgpu_device *adev) 2765 { 2766 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2767 2768 if (!adev->ras_enabled || !con) 2769 return; 2770 2771 amdgpu_ras_disable_all_features(adev, 0); 2772 /* Make sure all ras objects are disabled. */ 2773 if (con->features) 2774 amdgpu_ras_disable_all_features(adev, 1); 2775 } 2776 2777 int amdgpu_ras_late_init(struct amdgpu_device *adev) 2778 { 2779 struct amdgpu_ras_block_list *node, *tmp; 2780 struct amdgpu_ras_block_object *obj; 2781 int r; 2782 2783 /* Guest side doesn't need init ras feature */ 2784 if (amdgpu_sriov_vf(adev)) 2785 return 0; 2786 2787 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 2788 if (!node->ras_obj) { 2789 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 2790 continue; 2791 } 2792 2793 obj = node->ras_obj; 2794 if (obj->ras_late_init) { 2795 r = obj->ras_late_init(adev, &obj->ras_comm); 2796 if (r) { 2797 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 2798 obj->ras_comm.name, r); 2799 return r; 2800 } 2801 } else 2802 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 2803 } 2804 2805 return 0; 2806 } 2807 2808 /* do some fini work before IP fini as dependence */ 2809 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 2810 { 2811 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2812 2813 if (!adev->ras_enabled || !con) 2814 return 0; 2815 2816 2817 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 2818 if (con->features) 2819 amdgpu_ras_disable_all_features(adev, 0); 2820 amdgpu_ras_recovery_fini(adev); 2821 return 0; 2822 } 2823 2824 int amdgpu_ras_fini(struct amdgpu_device *adev) 2825 { 2826 struct amdgpu_ras_block_list *ras_node, *tmp; 2827 struct amdgpu_ras_block_object *obj = NULL; 2828 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2829 2830 if (!adev->ras_enabled || !con) 2831 return 0; 2832 2833 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 2834 if (ras_node->ras_obj) { 2835 obj = ras_node->ras_obj; 2836 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 2837 obj->ras_fini) 2838 obj->ras_fini(adev, &obj->ras_comm); 2839 else 2840 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 2841 } 2842 2843 /* Clear ras blocks from ras_list and free ras block list node */ 2844 list_del(&ras_node->node); 2845 kfree(ras_node); 2846 } 2847 2848 amdgpu_ras_fs_fini(adev); 2849 amdgpu_ras_interrupt_remove_all(adev); 2850 2851 WARN(con->features, "Feature mask is not cleared"); 2852 2853 if (con->features) 2854 amdgpu_ras_disable_all_features(adev, 1); 2855 2856 cancel_delayed_work_sync(&con->ras_counte_delay_work); 2857 2858 amdgpu_ras_set_context(adev, NULL); 2859 kfree(con); 2860 2861 return 0; 2862 } 2863 2864 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 2865 { 2866 amdgpu_ras_check_supported(adev); 2867 if (!adev->ras_hw_enabled) 2868 return; 2869 2870 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 2871 dev_info(adev->dev, "uncorrectable hardware error" 2872 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 2873 2874 amdgpu_ras_reset_gpu(adev); 2875 } 2876 } 2877 2878 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 2879 { 2880 if (adev->asic_type == CHIP_VEGA20 && 2881 adev->pm.fw_version <= 0x283400) { 2882 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 2883 amdgpu_ras_intr_triggered(); 2884 } 2885 2886 return false; 2887 } 2888 2889 void amdgpu_release_ras_context(struct amdgpu_device *adev) 2890 { 2891 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2892 2893 if (!con) 2894 return; 2895 2896 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 2897 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 2898 amdgpu_ras_set_context(adev, NULL); 2899 kfree(con); 2900 } 2901 } 2902 2903 #ifdef CONFIG_X86_MCE_AMD 2904 static struct amdgpu_device *find_adev(uint32_t node_id) 2905 { 2906 int i; 2907 struct amdgpu_device *adev = NULL; 2908 2909 for (i = 0; i < mce_adev_list.num_gpu; i++) { 2910 adev = mce_adev_list.devs[i]; 2911 2912 if (adev && adev->gmc.xgmi.connected_to_cpu && 2913 adev->gmc.xgmi.physical_node_id == node_id) 2914 break; 2915 adev = NULL; 2916 } 2917 2918 return adev; 2919 } 2920 2921 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 2922 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 2923 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 2924 #define GPU_ID_OFFSET 8 2925 2926 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 2927 unsigned long val, void *data) 2928 { 2929 struct mce *m = (struct mce *)data; 2930 struct amdgpu_device *adev = NULL; 2931 uint32_t gpu_id = 0; 2932 uint32_t umc_inst = 0, ch_inst = 0; 2933 2934 /* 2935 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 2936 * and error occurred in DramECC (Extended error code = 0) then only 2937 * process the error, else bail out. 2938 */ 2939 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 2940 (XEC(m->status, 0x3f) == 0x0))) 2941 return NOTIFY_DONE; 2942 2943 /* 2944 * If it is correctable error, return. 2945 */ 2946 if (mce_is_correctable(m)) 2947 return NOTIFY_OK; 2948 2949 /* 2950 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 2951 */ 2952 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 2953 2954 adev = find_adev(gpu_id); 2955 if (!adev) { 2956 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 2957 gpu_id); 2958 return NOTIFY_DONE; 2959 } 2960 2961 /* 2962 * If it is uncorrectable error, then find out UMC instance and 2963 * channel index. 2964 */ 2965 umc_inst = GET_UMC_INST(m->ipid); 2966 ch_inst = GET_CHAN_INDEX(m->ipid); 2967 2968 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 2969 umc_inst, ch_inst); 2970 2971 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst)) 2972 return NOTIFY_OK; 2973 else 2974 return NOTIFY_DONE; 2975 } 2976 2977 static struct notifier_block amdgpu_bad_page_nb = { 2978 .notifier_call = amdgpu_bad_page_notifier, 2979 .priority = MCE_PRIO_UC, 2980 }; 2981 2982 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 2983 { 2984 /* 2985 * Add the adev to the mce_adev_list. 2986 * During mode2 reset, amdgpu device is temporarily 2987 * removed from the mgpu_info list which can cause 2988 * page retirement to fail. 2989 * Use this list instead of mgpu_info to find the amdgpu 2990 * device on which the UMC error was reported. 2991 */ 2992 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 2993 2994 /* 2995 * Register the x86 notifier only once 2996 * with MCE subsystem. 2997 */ 2998 if (notifier_registered == false) { 2999 mce_register_decode_chain(&amdgpu_bad_page_nb); 3000 notifier_registered = true; 3001 } 3002 } 3003 #endif 3004 3005 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 3006 { 3007 if (!adev) 3008 return NULL; 3009 3010 return adev->psp.ras_context.ras; 3011 } 3012 3013 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 3014 { 3015 if (!adev) 3016 return -EINVAL; 3017 3018 adev->psp.ras_context.ras = ras_con; 3019 return 0; 3020 } 3021 3022 /* check if ras is supported on block, say, sdma, gfx */ 3023 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 3024 unsigned int block) 3025 { 3026 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3027 3028 if (block >= AMDGPU_RAS_BLOCK_COUNT) 3029 return 0; 3030 return ras && (adev->ras_enabled & (1 << block)); 3031 } 3032 3033 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 3034 { 3035 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3036 3037 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) 3038 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 3039 return 0; 3040 } 3041 3042 3043 /* Register each ip ras block into amdgpu ras */ 3044 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 3045 struct amdgpu_ras_block_object *ras_block_obj) 3046 { 3047 struct amdgpu_ras_block_list *ras_node; 3048 if (!adev || !ras_block_obj) 3049 return -EINVAL; 3050 3051 if (!amdgpu_ras_asic_supported(adev)) 3052 return 0; 3053 3054 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 3055 if (!ras_node) 3056 return -ENOMEM; 3057 3058 INIT_LIST_HEAD(&ras_node->node); 3059 ras_node->ras_obj = ras_block_obj; 3060 list_add_tail(&ras_node->node, &adev->ras_list); 3061 3062 return 0; 3063 } 3064