1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 31 #include "amdgpu.h" 32 #include "amdgpu_ras.h" 33 #include "amdgpu_atomfirmware.h" 34 #include "amdgpu_xgmi.h" 35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 36 #include "atom.h" 37 38 static const char *RAS_FS_NAME = "ras"; 39 40 const char *ras_error_string[] = { 41 "none", 42 "parity", 43 "single_correctable", 44 "multi_uncorrectable", 45 "poison", 46 }; 47 48 const char *ras_block_string[] = { 49 "umc", 50 "sdma", 51 "gfx", 52 "mmhub", 53 "athub", 54 "pcie_bif", 55 "hdp", 56 "xgmi_wafl", 57 "df", 58 "smn", 59 "sem", 60 "mp0", 61 "mp1", 62 "fuse", 63 }; 64 65 #define ras_err_str(i) (ras_error_string[ffs(i)]) 66 #define ras_block_str(i) (ras_block_string[i]) 67 68 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 69 70 /* inject address is 52 bits */ 71 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 72 73 /* typical ECC bad page rate(1 bad page per 100MB VRAM) */ 74 #define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL) 75 76 enum amdgpu_ras_retire_page_reservation { 77 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 78 AMDGPU_RAS_RETIRE_PAGE_PENDING, 79 AMDGPU_RAS_RETIRE_PAGE_FAULT, 80 }; 81 82 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 83 84 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 85 uint64_t addr); 86 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 87 uint64_t addr); 88 89 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 90 { 91 if (adev && amdgpu_ras_get_context(adev)) 92 amdgpu_ras_get_context(adev)->error_query_ready = ready; 93 } 94 95 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 96 { 97 if (adev && amdgpu_ras_get_context(adev)) 98 return amdgpu_ras_get_context(adev)->error_query_ready; 99 100 return false; 101 } 102 103 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 104 { 105 struct ras_err_data err_data = {0, 0, 0, NULL}; 106 struct eeprom_table_record err_rec; 107 108 if ((address >= adev->gmc.mc_vram_size) || 109 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 110 dev_warn(adev->dev, 111 "RAS WARN: input address 0x%llx is invalid.\n", 112 address); 113 return -EINVAL; 114 } 115 116 if (amdgpu_ras_check_bad_page(adev, address)) { 117 dev_warn(adev->dev, 118 "RAS WARN: 0x%llx has already been marked as bad page!\n", 119 address); 120 return 0; 121 } 122 123 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 124 125 err_rec.address = address; 126 err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT; 127 err_rec.ts = (uint64_t)ktime_get_real_seconds(); 128 err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; 129 130 err_data.err_addr = &err_rec; 131 err_data.err_addr_cnt = 1; 132 133 if (amdgpu_bad_page_threshold != 0) { 134 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 135 err_data.err_addr_cnt); 136 amdgpu_ras_save_bad_pages(adev); 137 } 138 139 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 140 dev_warn(adev->dev, "Clear EEPROM:\n"); 141 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 142 143 return 0; 144 } 145 146 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 147 size_t size, loff_t *pos) 148 { 149 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 150 struct ras_query_if info = { 151 .head = obj->head, 152 }; 153 ssize_t s; 154 char val[128]; 155 156 if (amdgpu_ras_query_error_status(obj->adev, &info)) 157 return -EINVAL; 158 159 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 160 "ue", info.ue_count, 161 "ce", info.ce_count); 162 if (*pos >= s) 163 return 0; 164 165 s -= *pos; 166 s = min_t(u64, s, size); 167 168 169 if (copy_to_user(buf, &val[*pos], s)) 170 return -EINVAL; 171 172 *pos += s; 173 174 return s; 175 } 176 177 static const struct file_operations amdgpu_ras_debugfs_ops = { 178 .owner = THIS_MODULE, 179 .read = amdgpu_ras_debugfs_read, 180 .write = NULL, 181 .llseek = default_llseek 182 }; 183 184 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 185 { 186 int i; 187 188 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 189 *block_id = i; 190 if (strcmp(name, ras_block_str(i)) == 0) 191 return 0; 192 } 193 return -EINVAL; 194 } 195 196 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 197 const char __user *buf, size_t size, 198 loff_t *pos, struct ras_debug_if *data) 199 { 200 ssize_t s = min_t(u64, 64, size); 201 char str[65]; 202 char block_name[33]; 203 char err[9] = "ue"; 204 int op = -1; 205 int block_id; 206 uint32_t sub_block; 207 u64 address, value; 208 209 if (*pos) 210 return -EINVAL; 211 *pos = size; 212 213 memset(str, 0, sizeof(str)); 214 memset(data, 0, sizeof(*data)); 215 216 if (copy_from_user(str, buf, s)) 217 return -EINVAL; 218 219 if (sscanf(str, "disable %32s", block_name) == 1) 220 op = 0; 221 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 222 op = 1; 223 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 224 op = 2; 225 else if (strstr(str, "retire_page") != NULL) 226 op = 3; 227 else if (str[0] && str[1] && str[2] && str[3]) 228 /* ascii string, but commands are not matched. */ 229 return -EINVAL; 230 231 if (op != -1) { 232 if (op == 3) { 233 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 234 sscanf(str, "%*s %llu", &address) != 1) 235 return -EINVAL; 236 237 data->op = op; 238 data->inject.address = address; 239 240 return 0; 241 } 242 243 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 244 return -EINVAL; 245 246 data->head.block = block_id; 247 /* only ue and ce errors are supported */ 248 if (!memcmp("ue", err, 2)) 249 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 250 else if (!memcmp("ce", err, 2)) 251 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 252 else 253 return -EINVAL; 254 255 data->op = op; 256 257 if (op == 2) { 258 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 259 &sub_block, &address, &value) != 3 && 260 sscanf(str, "%*s %*s %*s %u %llu %llu", 261 &sub_block, &address, &value) != 3) 262 return -EINVAL; 263 data->head.sub_block_index = sub_block; 264 data->inject.address = address; 265 data->inject.value = value; 266 } 267 } else { 268 if (size < sizeof(*data)) 269 return -EINVAL; 270 271 if (copy_from_user(data, buf, sizeof(*data))) 272 return -EINVAL; 273 } 274 275 return 0; 276 } 277 278 /** 279 * DOC: AMDGPU RAS debugfs control interface 280 * 281 * The control interface accepts struct ras_debug_if which has two members. 282 * 283 * First member: ras_debug_if::head or ras_debug_if::inject. 284 * 285 * head is used to indicate which IP block will be under control. 286 * 287 * head has four members, they are block, type, sub_block_index, name. 288 * block: which IP will be under control. 289 * type: what kind of error will be enabled/disabled/injected. 290 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 291 * name: the name of IP. 292 * 293 * inject has two more members than head, they are address, value. 294 * As their names indicate, inject operation will write the 295 * value to the address. 296 * 297 * The second member: struct ras_debug_if::op. 298 * It has three kinds of operations. 299 * 300 * - 0: disable RAS on the block. Take ::head as its data. 301 * - 1: enable RAS on the block. Take ::head as its data. 302 * - 2: inject errors on the block. Take ::inject as its data. 303 * 304 * How to use the interface? 305 * 306 * In a program 307 * 308 * Copy the struct ras_debug_if in your code and initialize it. 309 * Write the struct to the control interface. 310 * 311 * From shell 312 * 313 * .. code-block:: bash 314 * 315 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 316 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 317 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 318 * 319 * Where N, is the card which you want to affect. 320 * 321 * "disable" requires only the block. 322 * "enable" requires the block and error type. 323 * "inject" requires the block, error type, address, and value. 324 * The block is one of: umc, sdma, gfx, etc. 325 * see ras_block_string[] for details 326 * The error type is one of: ue, ce, where, 327 * ue is multi-uncorrectable 328 * ce is single-correctable 329 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 330 * The address and value are hexadecimal numbers, leading 0x is optional. 331 * 332 * For instance, 333 * 334 * .. code-block:: bash 335 * 336 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 337 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 338 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 339 * 340 * How to check the result of the operation? 341 * 342 * To check disable/enable, see "ras" features at, 343 * /sys/class/drm/card[0/1/2...]/device/ras/features 344 * 345 * To check inject, see the corresponding error count at, 346 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 347 * 348 * .. note:: 349 * Operations are only allowed on blocks which are supported. 350 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 351 * to see which blocks support RAS on a particular asic. 352 * 353 */ 354 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, 355 size_t size, loff_t *pos) 356 { 357 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 358 struct ras_debug_if data; 359 int ret = 0; 360 361 if (!amdgpu_ras_get_error_query_ready(adev)) { 362 dev_warn(adev->dev, "RAS WARN: error injection " 363 "currently inaccessible\n"); 364 return size; 365 } 366 367 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 368 if (ret) 369 return -EINVAL; 370 371 if (data.op == 3) { 372 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 373 if (!ret) 374 return size; 375 else 376 return ret; 377 } 378 379 if (!amdgpu_ras_is_supported(adev, data.head.block)) 380 return -EINVAL; 381 382 switch (data.op) { 383 case 0: 384 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 385 break; 386 case 1: 387 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 388 break; 389 case 2: 390 if ((data.inject.address >= adev->gmc.mc_vram_size) || 391 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 392 dev_warn(adev->dev, "RAS WARN: input address " 393 "0x%llx is invalid.", 394 data.inject.address); 395 ret = -EINVAL; 396 break; 397 } 398 399 /* umc ce/ue error injection for a bad page is not allowed */ 400 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 401 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 402 dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked " 403 "as bad before error injection!\n", 404 data.inject.address); 405 break; 406 } 407 408 /* data.inject.address is offset instead of absolute gpu address */ 409 ret = amdgpu_ras_error_inject(adev, &data.inject); 410 break; 411 default: 412 ret = -EINVAL; 413 break; 414 } 415 416 if (ret) 417 return -EINVAL; 418 419 return size; 420 } 421 422 /** 423 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 424 * 425 * Some boards contain an EEPROM which is used to persistently store a list of 426 * bad pages which experiences ECC errors in vram. This interface provides 427 * a way to reset the EEPROM, e.g., after testing error injection. 428 * 429 * Usage: 430 * 431 * .. code-block:: bash 432 * 433 * echo 1 > ../ras/ras_eeprom_reset 434 * 435 * will reset EEPROM table to 0 entries. 436 * 437 */ 438 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, 439 size_t size, loff_t *pos) 440 { 441 struct amdgpu_device *adev = 442 (struct amdgpu_device *)file_inode(f)->i_private; 443 int ret; 444 445 ret = amdgpu_ras_eeprom_reset_table( 446 &(amdgpu_ras_get_context(adev)->eeprom_control)); 447 448 if (ret == 1) { 449 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 450 return size; 451 } else { 452 return -EIO; 453 } 454 } 455 456 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 457 .owner = THIS_MODULE, 458 .read = NULL, 459 .write = amdgpu_ras_debugfs_ctrl_write, 460 .llseek = default_llseek 461 }; 462 463 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 464 .owner = THIS_MODULE, 465 .read = NULL, 466 .write = amdgpu_ras_debugfs_eeprom_write, 467 .llseek = default_llseek 468 }; 469 470 /** 471 * DOC: AMDGPU RAS sysfs Error Count Interface 472 * 473 * It allows the user to read the error count for each IP block on the gpu through 474 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 475 * 476 * It outputs the multiple lines which report the uncorrected (ue) and corrected 477 * (ce) error counts. 478 * 479 * The format of one line is below, 480 * 481 * [ce|ue]: count 482 * 483 * Example: 484 * 485 * .. code-block:: bash 486 * 487 * ue: 0 488 * ce: 1 489 * 490 */ 491 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 492 struct device_attribute *attr, char *buf) 493 { 494 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 495 struct ras_query_if info = { 496 .head = obj->head, 497 }; 498 499 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 500 return sysfs_emit(buf, "Query currently inaccessible\n"); 501 502 if (amdgpu_ras_query_error_status(obj->adev, &info)) 503 return -EINVAL; 504 505 506 if (obj->adev->asic_type == CHIP_ALDEBARAN) { 507 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 508 DRM_WARN("Failed to reset error counter and error status"); 509 } 510 511 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 512 "ce", info.ce_count); 513 } 514 515 /* obj begin */ 516 517 #define get_obj(obj) do { (obj)->use++; } while (0) 518 #define alive_obj(obj) ((obj)->use) 519 520 static inline void put_obj(struct ras_manager *obj) 521 { 522 if (obj && (--obj->use == 0)) 523 list_del(&obj->node); 524 if (obj && (obj->use < 0)) 525 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); 526 } 527 528 /* make one obj and return it. */ 529 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 530 struct ras_common_if *head) 531 { 532 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 533 struct ras_manager *obj; 534 535 if (!adev->ras_features || !con) 536 return NULL; 537 538 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 539 return NULL; 540 541 obj = &con->objs[head->block]; 542 /* already exist. return obj? */ 543 if (alive_obj(obj)) 544 return NULL; 545 546 obj->head = *head; 547 obj->adev = adev; 548 list_add(&obj->node, &con->head); 549 get_obj(obj); 550 551 return obj; 552 } 553 554 /* return an obj equal to head, or the first when head is NULL */ 555 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 556 struct ras_common_if *head) 557 { 558 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 559 struct ras_manager *obj; 560 int i; 561 562 if (!adev->ras_features || !con) 563 return NULL; 564 565 if (head) { 566 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 567 return NULL; 568 569 obj = &con->objs[head->block]; 570 571 if (alive_obj(obj)) { 572 WARN_ON(head->block != obj->head.block); 573 return obj; 574 } 575 } else { 576 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 577 obj = &con->objs[i]; 578 if (alive_obj(obj)) { 579 WARN_ON(i != obj->head.block); 580 return obj; 581 } 582 } 583 } 584 585 return NULL; 586 } 587 /* obj end */ 588 589 static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev, 590 const char* invoke_type, 591 const char* block_name, 592 enum ta_ras_status ret) 593 { 594 switch (ret) { 595 case TA_RAS_STATUS__SUCCESS: 596 return; 597 case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE: 598 dev_warn(adev->dev, 599 "RAS WARN: %s %s currently unavailable\n", 600 invoke_type, 601 block_name); 602 break; 603 default: 604 dev_err(adev->dev, 605 "RAS ERROR: %s %s error failed ret 0x%X\n", 606 invoke_type, 607 block_name, 608 ret); 609 } 610 } 611 612 /* feature ctl begin */ 613 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 614 struct ras_common_if *head) 615 { 616 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 617 618 return con->hw_supported & BIT(head->block); 619 } 620 621 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 622 struct ras_common_if *head) 623 { 624 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 625 626 return con->features & BIT(head->block); 627 } 628 629 /* 630 * if obj is not created, then create one. 631 * set feature enable flag. 632 */ 633 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 634 struct ras_common_if *head, int enable) 635 { 636 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 637 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 638 639 /* If hardware does not support ras, then do not create obj. 640 * But if hardware support ras, we can create the obj. 641 * Ras framework checks con->hw_supported to see if it need do 642 * corresponding initialization. 643 * IP checks con->support to see if it need disable ras. 644 */ 645 if (!amdgpu_ras_is_feature_allowed(adev, head)) 646 return 0; 647 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) 648 return 0; 649 650 if (enable) { 651 if (!obj) { 652 obj = amdgpu_ras_create_obj(adev, head); 653 if (!obj) 654 return -EINVAL; 655 } else { 656 /* In case we create obj somewhere else */ 657 get_obj(obj); 658 } 659 con->features |= BIT(head->block); 660 } else { 661 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 662 con->features &= ~BIT(head->block); 663 put_obj(obj); 664 } 665 } 666 667 return 0; 668 } 669 670 /* wrapper of psp_ras_enable_features */ 671 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 672 struct ras_common_if *head, bool enable) 673 { 674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 675 union ta_ras_cmd_input *info; 676 int ret; 677 678 if (!con) 679 return -EINVAL; 680 681 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 682 if (!info) 683 return -ENOMEM; 684 685 if (!enable) { 686 info->disable_features = (struct ta_ras_disable_features_input) { 687 .block_id = amdgpu_ras_block_to_ta(head->block), 688 .error_type = amdgpu_ras_error_to_ta(head->type), 689 }; 690 } else { 691 info->enable_features = (struct ta_ras_enable_features_input) { 692 .block_id = amdgpu_ras_block_to_ta(head->block), 693 .error_type = amdgpu_ras_error_to_ta(head->type), 694 }; 695 } 696 697 /* Do not enable if it is not allowed. */ 698 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); 699 /* Are we alerady in that state we are going to set? */ 700 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) { 701 ret = 0; 702 goto out; 703 } 704 705 if (!amdgpu_ras_intr_triggered()) { 706 ret = psp_ras_enable_features(&adev->psp, info, enable); 707 if (ret) { 708 amdgpu_ras_parse_status_code(adev, 709 enable ? "enable":"disable", 710 ras_block_str(head->block), 711 (enum ta_ras_status)ret); 712 if (ret == TA_RAS_STATUS__RESET_NEEDED) 713 ret = -EAGAIN; 714 else 715 ret = -EINVAL; 716 717 goto out; 718 } 719 } 720 721 /* setup the obj */ 722 __amdgpu_ras_feature_enable(adev, head, enable); 723 ret = 0; 724 out: 725 kfree(info); 726 return ret; 727 } 728 729 /* Only used in device probe stage and called only once. */ 730 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 731 struct ras_common_if *head, bool enable) 732 { 733 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 734 int ret; 735 736 if (!con) 737 return -EINVAL; 738 739 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 740 if (enable) { 741 /* There is no harm to issue a ras TA cmd regardless of 742 * the currecnt ras state. 743 * If current state == target state, it will do nothing 744 * But sometimes it requests driver to reset and repost 745 * with error code -EAGAIN. 746 */ 747 ret = amdgpu_ras_feature_enable(adev, head, 1); 748 /* With old ras TA, we might fail to enable ras. 749 * Log it and just setup the object. 750 * TODO need remove this WA in the future. 751 */ 752 if (ret == -EINVAL) { 753 ret = __amdgpu_ras_feature_enable(adev, head, 1); 754 if (!ret) 755 dev_info(adev->dev, 756 "RAS INFO: %s setup object\n", 757 ras_block_str(head->block)); 758 } 759 } else { 760 /* setup the object then issue a ras TA disable cmd.*/ 761 ret = __amdgpu_ras_feature_enable(adev, head, 1); 762 if (ret) 763 return ret; 764 765 /* gfx block ras dsiable cmd must send to ras-ta */ 766 if (head->block == AMDGPU_RAS_BLOCK__GFX) 767 con->features |= BIT(head->block); 768 769 ret = amdgpu_ras_feature_enable(adev, head, 0); 770 771 /* clean gfx block ras features flag */ 772 if (adev->ras_features && head->block == AMDGPU_RAS_BLOCK__GFX) 773 con->features &= ~BIT(head->block); 774 } 775 } else 776 ret = amdgpu_ras_feature_enable(adev, head, enable); 777 778 return ret; 779 } 780 781 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 782 bool bypass) 783 { 784 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 785 struct ras_manager *obj, *tmp; 786 787 list_for_each_entry_safe(obj, tmp, &con->head, node) { 788 /* bypass psp. 789 * aka just release the obj and corresponding flags 790 */ 791 if (bypass) { 792 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 793 break; 794 } else { 795 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 796 break; 797 } 798 } 799 800 return con->features; 801 } 802 803 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 804 bool bypass) 805 { 806 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 807 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; 808 int i; 809 const enum amdgpu_ras_error_type default_ras_type = 810 AMDGPU_RAS_ERROR__NONE; 811 812 for (i = 0; i < ras_block_count; i++) { 813 struct ras_common_if head = { 814 .block = i, 815 .type = default_ras_type, 816 .sub_block_index = 0, 817 }; 818 strcpy(head.name, ras_block_str(i)); 819 if (bypass) { 820 /* 821 * bypass psp. vbios enable ras for us. 822 * so just create the obj 823 */ 824 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 825 break; 826 } else { 827 if (amdgpu_ras_feature_enable(adev, &head, 1)) 828 break; 829 } 830 } 831 832 return con->features; 833 } 834 /* feature ctl end */ 835 836 /* query/inject/cure begin */ 837 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 838 struct ras_query_if *info) 839 { 840 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 841 struct ras_err_data err_data = {0, 0, 0, NULL}; 842 int i; 843 844 if (!obj) 845 return -EINVAL; 846 847 switch (info->head.block) { 848 case AMDGPU_RAS_BLOCK__UMC: 849 if (adev->umc.ras_funcs && 850 adev->umc.ras_funcs->query_ras_error_count) 851 adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); 852 /* umc query_ras_error_address is also responsible for clearing 853 * error status 854 */ 855 if (adev->umc.ras_funcs && 856 adev->umc.ras_funcs->query_ras_error_address) 857 adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); 858 break; 859 case AMDGPU_RAS_BLOCK__SDMA: 860 if (adev->sdma.funcs->query_ras_error_count) { 861 for (i = 0; i < adev->sdma.num_instances; i++) 862 adev->sdma.funcs->query_ras_error_count(adev, i, 863 &err_data); 864 } 865 break; 866 case AMDGPU_RAS_BLOCK__GFX: 867 if (adev->gfx.ras_funcs && 868 adev->gfx.ras_funcs->query_ras_error_count) 869 adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); 870 871 if (adev->gfx.ras_funcs && 872 adev->gfx.ras_funcs->query_ras_error_status) 873 adev->gfx.ras_funcs->query_ras_error_status(adev); 874 break; 875 case AMDGPU_RAS_BLOCK__MMHUB: 876 if (adev->mmhub.ras_funcs && 877 adev->mmhub.ras_funcs->query_ras_error_count) 878 adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data); 879 880 if (adev->mmhub.ras_funcs && 881 adev->mmhub.ras_funcs->query_ras_error_status) 882 adev->mmhub.ras_funcs->query_ras_error_status(adev); 883 break; 884 case AMDGPU_RAS_BLOCK__PCIE_BIF: 885 if (adev->nbio.ras_funcs && 886 adev->nbio.ras_funcs->query_ras_error_count) 887 adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); 888 break; 889 case AMDGPU_RAS_BLOCK__XGMI_WAFL: 890 if (adev->gmc.xgmi.ras_funcs && 891 adev->gmc.xgmi.ras_funcs->query_ras_error_count) 892 adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); 893 break; 894 case AMDGPU_RAS_BLOCK__HDP: 895 if (adev->hdp.ras_funcs && 896 adev->hdp.ras_funcs->query_ras_error_count) 897 adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); 898 break; 899 default: 900 break; 901 } 902 903 obj->err_data.ue_count += err_data.ue_count; 904 obj->err_data.ce_count += err_data.ce_count; 905 906 info->ue_count = obj->err_data.ue_count; 907 info->ce_count = obj->err_data.ce_count; 908 909 if (err_data.ce_count) { 910 if (adev->smuio.funcs && 911 adev->smuio.funcs->get_socket_id && 912 adev->smuio.funcs->get_die_id) { 913 dev_info(adev->dev, "socket: %d, die: %d " 914 "%ld correctable hardware errors " 915 "detected in %s block, no user " 916 "action is needed.\n", 917 adev->smuio.funcs->get_socket_id(adev), 918 adev->smuio.funcs->get_die_id(adev), 919 obj->err_data.ce_count, 920 ras_block_str(info->head.block)); 921 } else { 922 dev_info(adev->dev, "%ld correctable hardware errors " 923 "detected in %s block, no user " 924 "action is needed.\n", 925 obj->err_data.ce_count, 926 ras_block_str(info->head.block)); 927 } 928 } 929 if (err_data.ue_count) { 930 if (adev->smuio.funcs && 931 adev->smuio.funcs->get_socket_id && 932 adev->smuio.funcs->get_die_id) { 933 dev_info(adev->dev, "socket: %d, die: %d " 934 "%ld uncorrectable hardware errors " 935 "detected in %s block\n", 936 adev->smuio.funcs->get_socket_id(adev), 937 adev->smuio.funcs->get_die_id(adev), 938 obj->err_data.ue_count, 939 ras_block_str(info->head.block)); 940 } else { 941 dev_info(adev->dev, "%ld uncorrectable hardware errors " 942 "detected in %s block\n", 943 obj->err_data.ue_count, 944 ras_block_str(info->head.block)); 945 } 946 } 947 948 return 0; 949 } 950 951 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 952 enum amdgpu_ras_block block) 953 { 954 if (!amdgpu_ras_is_supported(adev, block)) 955 return -EINVAL; 956 957 switch (block) { 958 case AMDGPU_RAS_BLOCK__GFX: 959 if (adev->gfx.ras_funcs && 960 adev->gfx.ras_funcs->reset_ras_error_count) 961 adev->gfx.ras_funcs->reset_ras_error_count(adev); 962 963 if (adev->gfx.ras_funcs && 964 adev->gfx.ras_funcs->reset_ras_error_status) 965 adev->gfx.ras_funcs->reset_ras_error_status(adev); 966 break; 967 case AMDGPU_RAS_BLOCK__MMHUB: 968 if (adev->mmhub.ras_funcs && 969 adev->mmhub.ras_funcs->reset_ras_error_count) 970 adev->mmhub.ras_funcs->reset_ras_error_count(adev); 971 break; 972 case AMDGPU_RAS_BLOCK__SDMA: 973 if (adev->sdma.funcs->reset_ras_error_count) 974 adev->sdma.funcs->reset_ras_error_count(adev); 975 break; 976 case AMDGPU_RAS_BLOCK__HDP: 977 if (adev->hdp.ras_funcs && 978 adev->hdp.ras_funcs->reset_ras_error_count) 979 adev->hdp.ras_funcs->reset_ras_error_count(adev); 980 break; 981 default: 982 break; 983 } 984 985 return 0; 986 } 987 988 /* Trigger XGMI/WAFL error */ 989 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, 990 struct ta_ras_trigger_error_input *block_info) 991 { 992 int ret; 993 994 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 995 dev_warn(adev->dev, "Failed to disallow df cstate"); 996 997 if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) 998 dev_warn(adev->dev, "Failed to disallow XGMI power down"); 999 1000 ret = psp_ras_trigger_error(&adev->psp, block_info); 1001 1002 if (amdgpu_ras_intr_triggered()) 1003 return ret; 1004 1005 if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) 1006 dev_warn(adev->dev, "Failed to allow XGMI power down"); 1007 1008 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) 1009 dev_warn(adev->dev, "Failed to allow df cstate"); 1010 1011 return ret; 1012 } 1013 1014 /* wrapper of psp_ras_trigger_error */ 1015 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1016 struct ras_inject_if *info) 1017 { 1018 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1019 struct ta_ras_trigger_error_input block_info = { 1020 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1021 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1022 .sub_block_index = info->head.sub_block_index, 1023 .address = info->address, 1024 .value = info->value, 1025 }; 1026 int ret = 0; 1027 1028 if (!obj) 1029 return -EINVAL; 1030 1031 /* Calculate XGMI relative offset */ 1032 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1033 block_info.address = 1034 amdgpu_xgmi_get_relative_phy_addr(adev, 1035 block_info.address); 1036 } 1037 1038 switch (info->head.block) { 1039 case AMDGPU_RAS_BLOCK__GFX: 1040 if (adev->gfx.ras_funcs && 1041 adev->gfx.ras_funcs->ras_error_inject) 1042 ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); 1043 else 1044 ret = -EINVAL; 1045 break; 1046 case AMDGPU_RAS_BLOCK__UMC: 1047 case AMDGPU_RAS_BLOCK__SDMA: 1048 case AMDGPU_RAS_BLOCK__MMHUB: 1049 case AMDGPU_RAS_BLOCK__PCIE_BIF: 1050 ret = psp_ras_trigger_error(&adev->psp, &block_info); 1051 break; 1052 case AMDGPU_RAS_BLOCK__XGMI_WAFL: 1053 ret = amdgpu_ras_error_inject_xgmi(adev, &block_info); 1054 break; 1055 default: 1056 dev_info(adev->dev, "%s error injection is not supported yet\n", 1057 ras_block_str(info->head.block)); 1058 ret = -EINVAL; 1059 } 1060 1061 amdgpu_ras_parse_status_code(adev, 1062 "inject", 1063 ras_block_str(info->head.block), 1064 (enum ta_ras_status)ret); 1065 1066 return ret; 1067 } 1068 1069 /* get the total error counts on all IPs */ 1070 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1071 bool is_ce) 1072 { 1073 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1074 struct ras_manager *obj; 1075 struct ras_err_data data = {0, 0}; 1076 1077 if (!adev->ras_features || !con) 1078 return 0; 1079 1080 list_for_each_entry(obj, &con->head, node) { 1081 struct ras_query_if info = { 1082 .head = obj->head, 1083 }; 1084 1085 if (amdgpu_ras_query_error_status(adev, &info)) 1086 return 0; 1087 1088 data.ce_count += info.ce_count; 1089 data.ue_count += info.ue_count; 1090 } 1091 1092 return is_ce ? data.ce_count : data.ue_count; 1093 } 1094 /* query/inject/cure end */ 1095 1096 1097 /* sysfs begin */ 1098 1099 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1100 struct ras_badpage **bps, unsigned int *count); 1101 1102 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1103 { 1104 switch (flags) { 1105 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1106 return "R"; 1107 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1108 return "P"; 1109 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1110 default: 1111 return "F"; 1112 } 1113 } 1114 1115 /** 1116 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1117 * 1118 * It allows user to read the bad pages of vram on the gpu through 1119 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1120 * 1121 * It outputs multiple lines, and each line stands for one gpu page. 1122 * 1123 * The format of one line is below, 1124 * gpu pfn : gpu page size : flags 1125 * 1126 * gpu pfn and gpu page size are printed in hex format. 1127 * flags can be one of below character, 1128 * 1129 * R: reserved, this gpu page is reserved and not able to use. 1130 * 1131 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1132 * in next window of page_reserve. 1133 * 1134 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1135 * 1136 * Examples: 1137 * 1138 * .. code-block:: bash 1139 * 1140 * 0x00000001 : 0x00001000 : R 1141 * 0x00000002 : 0x00001000 : P 1142 * 1143 */ 1144 1145 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1146 struct kobject *kobj, struct bin_attribute *attr, 1147 char *buf, loff_t ppos, size_t count) 1148 { 1149 struct amdgpu_ras *con = 1150 container_of(attr, struct amdgpu_ras, badpages_attr); 1151 struct amdgpu_device *adev = con->adev; 1152 const unsigned int element_size = 1153 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1154 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1155 unsigned int end = div64_ul(ppos + count - 1, element_size); 1156 ssize_t s = 0; 1157 struct ras_badpage *bps = NULL; 1158 unsigned int bps_count = 0; 1159 1160 memset(buf, 0, count); 1161 1162 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1163 return 0; 1164 1165 for (; start < end && start < bps_count; start++) 1166 s += scnprintf(&buf[s], element_size + 1, 1167 "0x%08x : 0x%08x : %1s\n", 1168 bps[start].bp, 1169 bps[start].size, 1170 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1171 1172 kfree(bps); 1173 1174 return s; 1175 } 1176 1177 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1178 struct device_attribute *attr, char *buf) 1179 { 1180 struct amdgpu_ras *con = 1181 container_of(attr, struct amdgpu_ras, features_attr); 1182 1183 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); 1184 } 1185 1186 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1187 { 1188 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1189 1190 sysfs_remove_file_from_group(&adev->dev->kobj, 1191 &con->badpages_attr.attr, 1192 RAS_FS_NAME); 1193 } 1194 1195 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) 1196 { 1197 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1198 struct attribute *attrs[] = { 1199 &con->features_attr.attr, 1200 NULL 1201 }; 1202 struct attribute_group group = { 1203 .name = RAS_FS_NAME, 1204 .attrs = attrs, 1205 }; 1206 1207 sysfs_remove_group(&adev->dev->kobj, &group); 1208 1209 return 0; 1210 } 1211 1212 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1213 struct ras_fs_if *head) 1214 { 1215 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1216 1217 if (!obj || obj->attr_inuse) 1218 return -EINVAL; 1219 1220 get_obj(obj); 1221 1222 memcpy(obj->fs_data.sysfs_name, 1223 head->sysfs_name, 1224 sizeof(obj->fs_data.sysfs_name)); 1225 1226 obj->sysfs_attr = (struct device_attribute){ 1227 .attr = { 1228 .name = obj->fs_data.sysfs_name, 1229 .mode = S_IRUGO, 1230 }, 1231 .show = amdgpu_ras_sysfs_read, 1232 }; 1233 sysfs_attr_init(&obj->sysfs_attr.attr); 1234 1235 if (sysfs_add_file_to_group(&adev->dev->kobj, 1236 &obj->sysfs_attr.attr, 1237 RAS_FS_NAME)) { 1238 put_obj(obj); 1239 return -EINVAL; 1240 } 1241 1242 obj->attr_inuse = 1; 1243 1244 return 0; 1245 } 1246 1247 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1248 struct ras_common_if *head) 1249 { 1250 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1251 1252 if (!obj || !obj->attr_inuse) 1253 return -EINVAL; 1254 1255 sysfs_remove_file_from_group(&adev->dev->kobj, 1256 &obj->sysfs_attr.attr, 1257 RAS_FS_NAME); 1258 obj->attr_inuse = 0; 1259 put_obj(obj); 1260 1261 return 0; 1262 } 1263 1264 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1265 { 1266 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1267 struct ras_manager *obj, *tmp; 1268 1269 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1270 amdgpu_ras_sysfs_remove(adev, &obj->head); 1271 } 1272 1273 if (amdgpu_bad_page_threshold != 0) 1274 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1275 1276 amdgpu_ras_sysfs_remove_feature_node(adev); 1277 1278 return 0; 1279 } 1280 /* sysfs end */ 1281 1282 /** 1283 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1284 * 1285 * Normally when there is an uncorrectable error, the driver will reset 1286 * the GPU to recover. However, in the event of an unrecoverable error, 1287 * the driver provides an interface to reboot the system automatically 1288 * in that event. 1289 * 1290 * The following file in debugfs provides that interface: 1291 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1292 * 1293 * Usage: 1294 * 1295 * .. code-block:: bash 1296 * 1297 * echo true > .../ras/auto_reboot 1298 * 1299 */ 1300 /* debugfs begin */ 1301 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1302 { 1303 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1304 struct dentry *dir; 1305 struct drm_minor *minor = adev_to_drm(adev)->primary; 1306 1307 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1308 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1309 &amdgpu_ras_debugfs_ctrl_ops); 1310 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1311 &amdgpu_ras_debugfs_eeprom_ops); 1312 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1313 &con->bad_page_cnt_threshold); 1314 1315 /* 1316 * After one uncorrectable error happens, usually GPU recovery will 1317 * be scheduled. But due to the known problem in GPU recovery failing 1318 * to bring GPU back, below interface provides one direct way to 1319 * user to reboot system automatically in such case within 1320 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1321 * will never be called. 1322 */ 1323 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1324 1325 /* 1326 * User could set this not to clean up hardware's error count register 1327 * of RAS IPs during ras recovery. 1328 */ 1329 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1330 &con->disable_ras_err_cnt_harvest); 1331 return dir; 1332 } 1333 1334 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1335 struct ras_fs_if *head, 1336 struct dentry *dir) 1337 { 1338 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1339 1340 if (!obj || !dir) 1341 return; 1342 1343 get_obj(obj); 1344 1345 memcpy(obj->fs_data.debugfs_name, 1346 head->debugfs_name, 1347 sizeof(obj->fs_data.debugfs_name)); 1348 1349 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 1350 obj, &amdgpu_ras_debugfs_ops); 1351 } 1352 1353 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1354 { 1355 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1356 struct dentry *dir; 1357 struct ras_manager *obj; 1358 struct ras_fs_if fs_info; 1359 1360 /* 1361 * it won't be called in resume path, no need to check 1362 * suspend and gpu reset status 1363 */ 1364 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1365 return; 1366 1367 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 1368 1369 list_for_each_entry(obj, &con->head, node) { 1370 if (amdgpu_ras_is_supported(adev, obj->head.block) && 1371 (obj->attr_inuse == 1)) { 1372 sprintf(fs_info.debugfs_name, "%s_err_inject", 1373 ras_block_str(obj->head.block)); 1374 fs_info.head = obj->head; 1375 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 1376 } 1377 } 1378 } 1379 1380 /* debugfs end */ 1381 1382 /* ras fs */ 1383 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 1384 amdgpu_ras_sysfs_badpages_read, NULL, 0); 1385 static DEVICE_ATTR(features, S_IRUGO, 1386 amdgpu_ras_sysfs_features_read, NULL); 1387 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 1388 { 1389 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1390 struct attribute_group group = { 1391 .name = RAS_FS_NAME, 1392 }; 1393 struct attribute *attrs[] = { 1394 &con->features_attr.attr, 1395 NULL 1396 }; 1397 struct bin_attribute *bin_attrs[] = { 1398 NULL, 1399 NULL, 1400 }; 1401 int r; 1402 1403 /* add features entry */ 1404 con->features_attr = dev_attr_features; 1405 group.attrs = attrs; 1406 sysfs_attr_init(attrs[0]); 1407 1408 if (amdgpu_bad_page_threshold != 0) { 1409 /* add bad_page_features entry */ 1410 bin_attr_gpu_vram_bad_pages.private = NULL; 1411 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 1412 bin_attrs[0] = &con->badpages_attr; 1413 group.bin_attrs = bin_attrs; 1414 sysfs_bin_attr_init(bin_attrs[0]); 1415 } 1416 1417 r = sysfs_create_group(&adev->dev->kobj, &group); 1418 if (r) 1419 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 1420 1421 return 0; 1422 } 1423 1424 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 1425 { 1426 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1427 struct ras_manager *con_obj, *ip_obj, *tmp; 1428 1429 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1430 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 1431 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 1432 if (ip_obj) 1433 put_obj(ip_obj); 1434 } 1435 } 1436 1437 amdgpu_ras_sysfs_remove_all(adev); 1438 return 0; 1439 } 1440 /* ras fs end */ 1441 1442 /* ih begin */ 1443 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 1444 { 1445 struct ras_ih_data *data = &obj->ih_data; 1446 struct amdgpu_iv_entry entry; 1447 int ret; 1448 struct ras_err_data err_data = {0, 0, 0, NULL}; 1449 1450 while (data->rptr != data->wptr) { 1451 rmb(); 1452 memcpy(&entry, &data->ring[data->rptr], 1453 data->element_size); 1454 1455 wmb(); 1456 data->rptr = (data->aligned_element_size + 1457 data->rptr) % data->ring_size; 1458 1459 /* Let IP handle its data, maybe we need get the output 1460 * from the callback to udpate the error type/count, etc 1461 */ 1462 if (data->cb) { 1463 ret = data->cb(obj->adev, &err_data, &entry); 1464 /* ue will trigger an interrupt, and in that case 1465 * we need do a reset to recovery the whole system. 1466 * But leave IP do that recovery, here we just dispatch 1467 * the error. 1468 */ 1469 if (ret == AMDGPU_RAS_SUCCESS) { 1470 /* these counts could be left as 0 if 1471 * some blocks do not count error number 1472 */ 1473 obj->err_data.ue_count += err_data.ue_count; 1474 obj->err_data.ce_count += err_data.ce_count; 1475 } 1476 } 1477 } 1478 } 1479 1480 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 1481 { 1482 struct ras_ih_data *data = 1483 container_of(work, struct ras_ih_data, ih_work); 1484 struct ras_manager *obj = 1485 container_of(data, struct ras_manager, ih_data); 1486 1487 amdgpu_ras_interrupt_handler(obj); 1488 } 1489 1490 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 1491 struct ras_dispatch_if *info) 1492 { 1493 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1494 struct ras_ih_data *data = &obj->ih_data; 1495 1496 if (!obj) 1497 return -EINVAL; 1498 1499 if (data->inuse == 0) 1500 return 0; 1501 1502 /* Might be overflow... */ 1503 memcpy(&data->ring[data->wptr], info->entry, 1504 data->element_size); 1505 1506 wmb(); 1507 data->wptr = (data->aligned_element_size + 1508 data->wptr) % data->ring_size; 1509 1510 schedule_work(&data->ih_work); 1511 1512 return 0; 1513 } 1514 1515 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 1516 struct ras_ih_if *info) 1517 { 1518 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1519 struct ras_ih_data *data; 1520 1521 if (!obj) 1522 return -EINVAL; 1523 1524 data = &obj->ih_data; 1525 if (data->inuse == 0) 1526 return 0; 1527 1528 cancel_work_sync(&data->ih_work); 1529 1530 kfree(data->ring); 1531 memset(data, 0, sizeof(*data)); 1532 put_obj(obj); 1533 1534 return 0; 1535 } 1536 1537 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 1538 struct ras_ih_if *info) 1539 { 1540 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1541 struct ras_ih_data *data; 1542 1543 if (!obj) { 1544 /* in case we registe the IH before enable ras feature */ 1545 obj = amdgpu_ras_create_obj(adev, &info->head); 1546 if (!obj) 1547 return -EINVAL; 1548 } else 1549 get_obj(obj); 1550 1551 data = &obj->ih_data; 1552 /* add the callback.etc */ 1553 *data = (struct ras_ih_data) { 1554 .inuse = 0, 1555 .cb = info->cb, 1556 .element_size = sizeof(struct amdgpu_iv_entry), 1557 .rptr = 0, 1558 .wptr = 0, 1559 }; 1560 1561 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 1562 1563 data->aligned_element_size = ALIGN(data->element_size, 8); 1564 /* the ring can store 64 iv entries. */ 1565 data->ring_size = 64 * data->aligned_element_size; 1566 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 1567 if (!data->ring) { 1568 put_obj(obj); 1569 return -ENOMEM; 1570 } 1571 1572 /* IH is ready */ 1573 data->inuse = 1; 1574 1575 return 0; 1576 } 1577 1578 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 1579 { 1580 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1581 struct ras_manager *obj, *tmp; 1582 1583 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1584 struct ras_ih_if info = { 1585 .head = obj->head, 1586 }; 1587 amdgpu_ras_interrupt_remove_handler(adev, &info); 1588 } 1589 1590 return 0; 1591 } 1592 /* ih end */ 1593 1594 /* traversal all IPs except NBIO to query error counter */ 1595 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) 1596 { 1597 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1598 struct ras_manager *obj; 1599 1600 if (!adev->ras_features || !con) 1601 return; 1602 1603 list_for_each_entry(obj, &con->head, node) { 1604 struct ras_query_if info = { 1605 .head = obj->head, 1606 }; 1607 1608 /* 1609 * PCIE_BIF IP has one different isr by ras controller 1610 * interrupt, the specific ras counter query will be 1611 * done in that isr. So skip such block from common 1612 * sync flood interrupt isr calling. 1613 */ 1614 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 1615 continue; 1616 1617 amdgpu_ras_query_error_status(adev, &info); 1618 } 1619 } 1620 1621 /* Parse RdRspStatus and WrRspStatus */ 1622 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 1623 struct ras_query_if *info) 1624 { 1625 /* 1626 * Only two block need to query read/write 1627 * RspStatus at current state 1628 */ 1629 switch (info->head.block) { 1630 case AMDGPU_RAS_BLOCK__GFX: 1631 if (adev->gfx.ras_funcs && 1632 adev->gfx.ras_funcs->query_ras_error_status) 1633 adev->gfx.ras_funcs->query_ras_error_status(adev); 1634 break; 1635 case AMDGPU_RAS_BLOCK__MMHUB: 1636 if (adev->mmhub.ras_funcs && 1637 adev->mmhub.ras_funcs->query_ras_error_status) 1638 adev->mmhub.ras_funcs->query_ras_error_status(adev); 1639 break; 1640 default: 1641 break; 1642 } 1643 } 1644 1645 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 1646 { 1647 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1648 struct ras_manager *obj; 1649 1650 if (!adev->ras_features || !con) 1651 return; 1652 1653 list_for_each_entry(obj, &con->head, node) { 1654 struct ras_query_if info = { 1655 .head = obj->head, 1656 }; 1657 1658 amdgpu_ras_error_status_query(adev, &info); 1659 } 1660 } 1661 1662 /* recovery begin */ 1663 1664 /* return 0 on success. 1665 * caller need free bps. 1666 */ 1667 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1668 struct ras_badpage **bps, unsigned int *count) 1669 { 1670 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1671 struct ras_err_handler_data *data; 1672 int i = 0; 1673 int ret = 0, status; 1674 1675 if (!con || !con->eh_data || !bps || !count) 1676 return -EINVAL; 1677 1678 mutex_lock(&con->recovery_lock); 1679 data = con->eh_data; 1680 if (!data || data->count == 0) { 1681 *bps = NULL; 1682 ret = -EINVAL; 1683 goto out; 1684 } 1685 1686 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 1687 if (!*bps) { 1688 ret = -ENOMEM; 1689 goto out; 1690 } 1691 1692 for (; i < data->count; i++) { 1693 (*bps)[i] = (struct ras_badpage){ 1694 .bp = data->bps[i].retired_page, 1695 .size = AMDGPU_GPU_PAGE_SIZE, 1696 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 1697 }; 1698 status = amdgpu_vram_mgr_query_page_status( 1699 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), 1700 data->bps[i].retired_page); 1701 if (status == -EBUSY) 1702 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 1703 else if (status == -ENOENT) 1704 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 1705 } 1706 1707 *count = data->count; 1708 out: 1709 mutex_unlock(&con->recovery_lock); 1710 return ret; 1711 } 1712 1713 static void amdgpu_ras_do_recovery(struct work_struct *work) 1714 { 1715 struct amdgpu_ras *ras = 1716 container_of(work, struct amdgpu_ras, recovery_work); 1717 struct amdgpu_device *remote_adev = NULL; 1718 struct amdgpu_device *adev = ras->adev; 1719 struct list_head device_list, *device_list_handle = NULL; 1720 1721 if (!ras->disable_ras_err_cnt_harvest) { 1722 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 1723 1724 /* Build list of devices to query RAS related errors */ 1725 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 1726 device_list_handle = &hive->device_list; 1727 } else { 1728 INIT_LIST_HEAD(&device_list); 1729 list_add_tail(&adev->gmc.xgmi.head, &device_list); 1730 device_list_handle = &device_list; 1731 } 1732 1733 list_for_each_entry(remote_adev, 1734 device_list_handle, gmc.xgmi.head) { 1735 amdgpu_ras_query_err_status(remote_adev); 1736 amdgpu_ras_log_on_err_counter(remote_adev); 1737 } 1738 1739 amdgpu_put_xgmi_hive(hive); 1740 } 1741 1742 if (amdgpu_device_should_recover_gpu(ras->adev)) 1743 amdgpu_device_gpu_recover(ras->adev, NULL); 1744 atomic_set(&ras->in_recovery, 0); 1745 } 1746 1747 /* alloc/realloc bps array */ 1748 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 1749 struct ras_err_handler_data *data, int pages) 1750 { 1751 unsigned int old_space = data->count + data->space_left; 1752 unsigned int new_space = old_space + pages; 1753 unsigned int align_space = ALIGN(new_space, 512); 1754 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 1755 1756 if (!bps) { 1757 kfree(bps); 1758 return -ENOMEM; 1759 } 1760 1761 if (data->bps) { 1762 memcpy(bps, data->bps, 1763 data->count * sizeof(*data->bps)); 1764 kfree(data->bps); 1765 } 1766 1767 data->bps = bps; 1768 data->space_left += align_space - old_space; 1769 return 0; 1770 } 1771 1772 /* it deal with vram only. */ 1773 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 1774 struct eeprom_table_record *bps, int pages) 1775 { 1776 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1777 struct ras_err_handler_data *data; 1778 int ret = 0; 1779 uint32_t i; 1780 1781 if (!con || !con->eh_data || !bps || pages <= 0) 1782 return 0; 1783 1784 mutex_lock(&con->recovery_lock); 1785 data = con->eh_data; 1786 if (!data) 1787 goto out; 1788 1789 for (i = 0; i < pages; i++) { 1790 if (amdgpu_ras_check_bad_page_unlock(con, 1791 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 1792 continue; 1793 1794 if (!data->space_left && 1795 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 1796 ret = -ENOMEM; 1797 goto out; 1798 } 1799 1800 amdgpu_vram_mgr_reserve_range( 1801 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), 1802 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, 1803 AMDGPU_GPU_PAGE_SIZE); 1804 1805 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); 1806 data->count++; 1807 data->space_left--; 1808 } 1809 out: 1810 mutex_unlock(&con->recovery_lock); 1811 1812 return ret; 1813 } 1814 1815 /* 1816 * write error record array to eeprom, the function should be 1817 * protected by recovery_lock 1818 */ 1819 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) 1820 { 1821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1822 struct ras_err_handler_data *data; 1823 struct amdgpu_ras_eeprom_control *control; 1824 int save_count; 1825 1826 if (!con || !con->eh_data) 1827 return 0; 1828 1829 control = &con->eeprom_control; 1830 data = con->eh_data; 1831 save_count = data->count - control->num_recs; 1832 /* only new entries are saved */ 1833 if (save_count > 0) { 1834 if (amdgpu_ras_eeprom_process_recods(control, 1835 &data->bps[control->num_recs], 1836 true, 1837 save_count)) { 1838 dev_err(adev->dev, "Failed to save EEPROM table data!"); 1839 return -EIO; 1840 } 1841 1842 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 1843 } 1844 1845 return 0; 1846 } 1847 1848 /* 1849 * read error record array in eeprom and reserve enough space for 1850 * storing new bad pages 1851 */ 1852 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 1853 { 1854 struct amdgpu_ras_eeprom_control *control = 1855 &adev->psp.ras.ras->eeprom_control; 1856 struct eeprom_table_record *bps = NULL; 1857 int ret = 0; 1858 1859 /* no bad page record, skip eeprom access */ 1860 if (!control->num_recs || (amdgpu_bad_page_threshold == 0)) 1861 return ret; 1862 1863 bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); 1864 if (!bps) 1865 return -ENOMEM; 1866 1867 if (amdgpu_ras_eeprom_process_recods(control, bps, false, 1868 control->num_recs)) { 1869 dev_err(adev->dev, "Failed to load EEPROM table records!"); 1870 ret = -EIO; 1871 goto out; 1872 } 1873 1874 ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); 1875 1876 out: 1877 kfree(bps); 1878 return ret; 1879 } 1880 1881 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 1882 uint64_t addr) 1883 { 1884 struct ras_err_handler_data *data = con->eh_data; 1885 int i; 1886 1887 addr >>= AMDGPU_GPU_PAGE_SHIFT; 1888 for (i = 0; i < data->count; i++) 1889 if (addr == data->bps[i].retired_page) 1890 return true; 1891 1892 return false; 1893 } 1894 1895 /* 1896 * check if an address belongs to bad page 1897 * 1898 * Note: this check is only for umc block 1899 */ 1900 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 1901 uint64_t addr) 1902 { 1903 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1904 bool ret = false; 1905 1906 if (!con || !con->eh_data) 1907 return ret; 1908 1909 mutex_lock(&con->recovery_lock); 1910 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 1911 mutex_unlock(&con->recovery_lock); 1912 return ret; 1913 } 1914 1915 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 1916 uint32_t max_length) 1917 { 1918 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1919 int tmp_threshold = amdgpu_bad_page_threshold; 1920 u64 val; 1921 1922 /* 1923 * Justification of value bad_page_cnt_threshold in ras structure 1924 * 1925 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length 1926 * in eeprom, and introduce two scenarios accordingly. 1927 * 1928 * Bad page retirement enablement: 1929 * - If amdgpu_bad_page_threshold = -1, 1930 * bad_page_cnt_threshold = typical value by formula. 1931 * 1932 * - When the value from user is 0 < amdgpu_bad_page_threshold < 1933 * max record length in eeprom, use it directly. 1934 * 1935 * Bad page retirement disablement: 1936 * - If amdgpu_bad_page_threshold = 0, bad page retirement 1937 * functionality is disabled, and bad_page_cnt_threshold will 1938 * take no effect. 1939 */ 1940 1941 if (tmp_threshold < -1) 1942 tmp_threshold = -1; 1943 else if (tmp_threshold > max_length) 1944 tmp_threshold = max_length; 1945 1946 if (tmp_threshold == -1) { 1947 val = adev->gmc.mc_vram_size; 1948 do_div(val, RAS_BAD_PAGE_RATE); 1949 con->bad_page_cnt_threshold = min(lower_32_bits(val), 1950 max_length); 1951 } else { 1952 con->bad_page_cnt_threshold = tmp_threshold; 1953 } 1954 } 1955 1956 int amdgpu_ras_recovery_init(struct amdgpu_device *adev) 1957 { 1958 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1959 struct ras_err_handler_data **data; 1960 uint32_t max_eeprom_records_len = 0; 1961 bool exc_err_limit = false; 1962 int ret; 1963 1964 if (adev->ras_features && con) 1965 data = &con->eh_data; 1966 else 1967 return 0; 1968 1969 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); 1970 if (!*data) { 1971 ret = -ENOMEM; 1972 goto out; 1973 } 1974 1975 mutex_init(&con->recovery_lock); 1976 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 1977 atomic_set(&con->in_recovery, 0); 1978 con->adev = adev; 1979 1980 max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length(); 1981 amdgpu_ras_validate_threshold(adev, max_eeprom_records_len); 1982 1983 /* Todo: During test the SMU might fail to read the eeprom through I2C 1984 * when the GPU is pending on XGMI reset during probe time 1985 * (Mostly after second bus reset), skip it now 1986 */ 1987 if (adev->gmc.xgmi.pending_reset) 1988 return 0; 1989 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); 1990 /* 1991 * This calling fails when exc_err_limit is true or 1992 * ret != 0. 1993 */ 1994 if (exc_err_limit || ret) 1995 goto free; 1996 1997 if (con->eeprom_control.num_recs) { 1998 ret = amdgpu_ras_load_bad_pages(adev); 1999 if (ret) 2000 goto free; 2001 } 2002 2003 return 0; 2004 2005 free: 2006 kfree((*data)->bps); 2007 kfree(*data); 2008 con->eh_data = NULL; 2009 out: 2010 dev_warn(adev->dev, "Failed to initialize ras recovery!\n"); 2011 2012 /* 2013 * Except error threshold exceeding case, other failure cases in this 2014 * function would not fail amdgpu driver init. 2015 */ 2016 if (!exc_err_limit) 2017 ret = 0; 2018 else 2019 ret = -EINVAL; 2020 2021 return ret; 2022 } 2023 2024 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 2025 { 2026 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2027 struct ras_err_handler_data *data = con->eh_data; 2028 2029 /* recovery_init failed to init it, fini is useless */ 2030 if (!data) 2031 return 0; 2032 2033 cancel_work_sync(&con->recovery_work); 2034 2035 mutex_lock(&con->recovery_lock); 2036 con->eh_data = NULL; 2037 kfree(data->bps); 2038 kfree(data); 2039 mutex_unlock(&con->recovery_lock); 2040 2041 return 0; 2042 } 2043 /* recovery end */ 2044 2045 /* return 0 if ras will reset gpu and repost.*/ 2046 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, 2047 unsigned int block) 2048 { 2049 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2050 2051 if (!ras) 2052 return -EINVAL; 2053 2054 ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; 2055 return 0; 2056 } 2057 2058 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 2059 { 2060 return adev->asic_type == CHIP_VEGA10 || 2061 adev->asic_type == CHIP_VEGA20 || 2062 adev->asic_type == CHIP_ARCTURUS || 2063 adev->asic_type == CHIP_ALDEBARAN || 2064 adev->asic_type == CHIP_SIENNA_CICHLID; 2065 } 2066 2067 /* 2068 * this is workaround for vega20 workstation sku, 2069 * force enable gfx ras, ignore vbios gfx ras flag 2070 * due to GC EDC can not write 2071 */ 2072 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev, 2073 uint32_t *hw_supported) 2074 { 2075 struct atom_context *ctx = adev->mode_info.atom_context; 2076 2077 if (!ctx) 2078 return; 2079 2080 if (strnstr(ctx->vbios_version, "D16406", 2081 sizeof(ctx->vbios_version))) 2082 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX); 2083 } 2084 2085 /* 2086 * check hardware's ras ability which will be saved in hw_supported. 2087 * if hardware does not support ras, we can skip some ras initializtion and 2088 * forbid some ras operations from IP. 2089 * if software itself, say boot parameter, limit the ras ability. We still 2090 * need allow IP do some limited operations, like disable. In such case, 2091 * we have to initialize ras as normal. but need check if operation is 2092 * allowed or not in each function. 2093 */ 2094 static void amdgpu_ras_check_supported(struct amdgpu_device *adev, 2095 uint32_t *hw_supported, uint32_t *supported) 2096 { 2097 *hw_supported = 0; 2098 *supported = 0; 2099 2100 if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || 2101 !amdgpu_ras_asic_supported(adev)) 2102 return; 2103 2104 if (!adev->gmc.xgmi.connected_to_cpu) { 2105 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 2106 dev_info(adev->dev, "MEM ECC is active.\n"); 2107 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | 2108 1 << AMDGPU_RAS_BLOCK__DF); 2109 } else { 2110 dev_info(adev->dev, "MEM ECC is not presented.\n"); 2111 } 2112 2113 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 2114 dev_info(adev->dev, "SRAM ECC is active.\n"); 2115 *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 2116 1 << AMDGPU_RAS_BLOCK__DF); 2117 } else { 2118 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 2119 } 2120 } else { 2121 /* driver only manages a few IP blocks RAS feature 2122 * when GPU is connected cpu through XGMI */ 2123 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX | 2124 1 << AMDGPU_RAS_BLOCK__SDMA | 2125 1 << AMDGPU_RAS_BLOCK__MMHUB); 2126 } 2127 2128 amdgpu_ras_get_quirks(adev, hw_supported); 2129 2130 /* hw_supported needs to be aligned with RAS block mask. */ 2131 *hw_supported &= AMDGPU_RAS_BLOCK_MASK; 2132 2133 *supported = amdgpu_ras_enable == 0 ? 2134 0 : *hw_supported & amdgpu_ras_mask; 2135 adev->ras_features = *supported; 2136 } 2137 2138 int amdgpu_ras_init(struct amdgpu_device *adev) 2139 { 2140 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2141 int r; 2142 2143 if (con) 2144 return 0; 2145 2146 con = kmalloc(sizeof(struct amdgpu_ras) + 2147 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, 2148 GFP_KERNEL|__GFP_ZERO); 2149 if (!con) 2150 return -ENOMEM; 2151 2152 con->objs = (struct ras_manager *)(con + 1); 2153 2154 amdgpu_ras_set_context(adev, con); 2155 2156 amdgpu_ras_check_supported(adev, &con->hw_supported, 2157 &con->supported); 2158 if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) { 2159 /* set gfx block ras context feature for VEGA20 Gaming 2160 * send ras disable cmd to ras ta during ras late init. 2161 */ 2162 if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) { 2163 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 2164 2165 return 0; 2166 } 2167 2168 r = 0; 2169 goto release_con; 2170 } 2171 2172 con->features = 0; 2173 INIT_LIST_HEAD(&con->head); 2174 /* Might need get this flag from vbios. */ 2175 con->flags = RAS_DEFAULT_FLAGS; 2176 2177 /* initialize nbio ras function ahead of any other 2178 * ras functions so hardware fatal error interrupt 2179 * can be enabled as early as possible */ 2180 switch (adev->asic_type) { 2181 case CHIP_VEGA20: 2182 case CHIP_ARCTURUS: 2183 case CHIP_ALDEBARAN: 2184 if (!adev->gmc.xgmi.connected_to_cpu) 2185 adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs; 2186 break; 2187 default: 2188 /* nbio ras is not available */ 2189 break; 2190 } 2191 2192 if (adev->nbio.ras_funcs && 2193 adev->nbio.ras_funcs->init_ras_controller_interrupt) { 2194 r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev); 2195 if (r) 2196 goto release_con; 2197 } 2198 2199 if (adev->nbio.ras_funcs && 2200 adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) { 2201 r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev); 2202 if (r) 2203 goto release_con; 2204 } 2205 2206 if (amdgpu_ras_fs_init(adev)) { 2207 r = -EINVAL; 2208 goto release_con; 2209 } 2210 2211 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 2212 "hardware ability[%x] ras_mask[%x]\n", 2213 con->hw_supported, con->supported); 2214 return 0; 2215 release_con: 2216 amdgpu_ras_set_context(adev, NULL); 2217 kfree(con); 2218 2219 return r; 2220 } 2221 2222 static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 2223 { 2224 if (adev->gmc.xgmi.connected_to_cpu) 2225 return 1; 2226 return 0; 2227 } 2228 2229 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 2230 struct ras_common_if *ras_block) 2231 { 2232 struct ras_query_if info = { 2233 .head = *ras_block, 2234 }; 2235 2236 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 2237 return 0; 2238 2239 if (amdgpu_ras_query_error_status(adev, &info) != 0) 2240 DRM_WARN("RAS init harvest failure"); 2241 2242 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 2243 DRM_WARN("RAS init harvest reset failure"); 2244 2245 return 0; 2246 } 2247 2248 /* helper function to handle common stuff in ip late init phase */ 2249 int amdgpu_ras_late_init(struct amdgpu_device *adev, 2250 struct ras_common_if *ras_block, 2251 struct ras_fs_if *fs_info, 2252 struct ras_ih_if *ih_info) 2253 { 2254 int r; 2255 2256 /* disable RAS feature per IP block if it is not supported */ 2257 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 2258 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 2259 return 0; 2260 } 2261 2262 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 2263 if (r) { 2264 if (r == -EAGAIN) { 2265 /* request gpu reset. will run again */ 2266 amdgpu_ras_request_reset_on_boot(adev, 2267 ras_block->block); 2268 return 0; 2269 } else if (adev->in_suspend || amdgpu_in_reset(adev)) { 2270 /* in resume phase, if fail to enable ras, 2271 * clean up all ras fs nodes, and disable ras */ 2272 goto cleanup; 2273 } else 2274 return r; 2275 } 2276 2277 /* check for errors on warm reset edc persisant supported ASIC */ 2278 amdgpu_persistent_edc_harvesting(adev, ras_block); 2279 2280 /* in resume phase, no need to create ras fs node */ 2281 if (adev->in_suspend || amdgpu_in_reset(adev)) 2282 return 0; 2283 2284 if (ih_info->cb) { 2285 r = amdgpu_ras_interrupt_add_handler(adev, ih_info); 2286 if (r) 2287 goto interrupt; 2288 } 2289 2290 r = amdgpu_ras_sysfs_create(adev, fs_info); 2291 if (r) 2292 goto sysfs; 2293 2294 return 0; 2295 cleanup: 2296 amdgpu_ras_sysfs_remove(adev, ras_block); 2297 sysfs: 2298 if (ih_info->cb) 2299 amdgpu_ras_interrupt_remove_handler(adev, ih_info); 2300 interrupt: 2301 amdgpu_ras_feature_enable(adev, ras_block, 0); 2302 return r; 2303 } 2304 2305 /* helper function to remove ras fs node and interrupt handler */ 2306 void amdgpu_ras_late_fini(struct amdgpu_device *adev, 2307 struct ras_common_if *ras_block, 2308 struct ras_ih_if *ih_info) 2309 { 2310 if (!ras_block || !ih_info) 2311 return; 2312 2313 amdgpu_ras_sysfs_remove(adev, ras_block); 2314 if (ih_info->cb) 2315 amdgpu_ras_interrupt_remove_handler(adev, ih_info); 2316 amdgpu_ras_feature_enable(adev, ras_block, 0); 2317 } 2318 2319 /* do some init work after IP late init as dependence. 2320 * and it runs in resume/gpu reset/booting up cases. 2321 */ 2322 void amdgpu_ras_resume(struct amdgpu_device *adev) 2323 { 2324 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2325 struct ras_manager *obj, *tmp; 2326 2327 if (!adev->ras_features || !con) { 2328 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 2329 amdgpu_release_ras_context(adev); 2330 2331 return; 2332 } 2333 2334 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 2335 /* Set up all other IPs which are not implemented. There is a 2336 * tricky thing that IP's actual ras error type should be 2337 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 2338 * ERROR_NONE make sense anyway. 2339 */ 2340 amdgpu_ras_enable_all_features(adev, 1); 2341 2342 /* We enable ras on all hw_supported block, but as boot 2343 * parameter might disable some of them and one or more IP has 2344 * not implemented yet. So we disable them on behalf. 2345 */ 2346 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2347 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 2348 amdgpu_ras_feature_enable(adev, &obj->head, 0); 2349 /* there should be no any reference. */ 2350 WARN_ON(alive_obj(obj)); 2351 } 2352 } 2353 } 2354 2355 if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { 2356 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; 2357 /* setup ras obj state as disabled. 2358 * for init_by_vbios case. 2359 * if we want to enable ras, just enable it in a normal way. 2360 * If we want do disable it, need setup ras obj as enabled, 2361 * then issue another TA disable cmd. 2362 * See feature_enable_on_boot 2363 */ 2364 amdgpu_ras_disable_all_features(adev, 1); 2365 amdgpu_ras_reset_gpu(adev); 2366 } 2367 } 2368 2369 void amdgpu_ras_suspend(struct amdgpu_device *adev) 2370 { 2371 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2372 2373 if (!adev->ras_features || !con) 2374 return; 2375 2376 amdgpu_ras_disable_all_features(adev, 0); 2377 /* Make sure all ras objects are disabled. */ 2378 if (con->features) 2379 amdgpu_ras_disable_all_features(adev, 1); 2380 } 2381 2382 /* do some fini work before IP fini as dependence */ 2383 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 2384 { 2385 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2386 2387 if (!adev->ras_features || !con) 2388 return 0; 2389 2390 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 2391 amdgpu_ras_disable_all_features(adev, 0); 2392 amdgpu_ras_recovery_fini(adev); 2393 return 0; 2394 } 2395 2396 int amdgpu_ras_fini(struct amdgpu_device *adev) 2397 { 2398 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2399 2400 if (!adev->ras_features || !con) 2401 return 0; 2402 2403 amdgpu_ras_fs_fini(adev); 2404 amdgpu_ras_interrupt_remove_all(adev); 2405 2406 WARN(con->features, "Feature mask is not cleared"); 2407 2408 if (con->features) 2409 amdgpu_ras_disable_all_features(adev, 1); 2410 2411 amdgpu_ras_set_context(adev, NULL); 2412 kfree(con); 2413 2414 return 0; 2415 } 2416 2417 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 2418 { 2419 uint32_t hw_supported, supported; 2420 2421 amdgpu_ras_check_supported(adev, &hw_supported, &supported); 2422 if (!hw_supported) 2423 return; 2424 2425 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 2426 dev_info(adev->dev, "uncorrectable hardware error" 2427 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 2428 2429 amdgpu_ras_reset_gpu(adev); 2430 } 2431 } 2432 2433 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 2434 { 2435 if (adev->asic_type == CHIP_VEGA20 && 2436 adev->pm.fw_version <= 0x283400) { 2437 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 2438 amdgpu_ras_intr_triggered(); 2439 } 2440 2441 return false; 2442 } 2443 2444 void amdgpu_release_ras_context(struct amdgpu_device *adev) 2445 { 2446 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2447 2448 if (!con) 2449 return; 2450 2451 if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 2452 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 2453 amdgpu_ras_set_context(adev, NULL); 2454 kfree(con); 2455 } 2456 } 2457