Lines Matching refs:con
126 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
641 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
644 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
654 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
656 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
664 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
678 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
689 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
691 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
697 obj = &con->objs[i]; in amdgpu_ras_find_obj()
717 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
719 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
729 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
750 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
753 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
765 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
769 if (!con) in amdgpu_ras_feature_enable()
821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
824 if (!con) in amdgpu_ras_feature_enable_on_boot()
827 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
855 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
861 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
872 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
875 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
888 return con->features; in amdgpu_ras_disable_all_features()
894 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
941 return con->features; in amdgpu_ras_enable_all_features()
1243 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1248 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1260 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1338 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1340 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1369 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1372 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1377 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1381 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1387 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_feature_node() local
1389 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_feature_node()
1457 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1460 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1494 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1495 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1505 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1511 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1514 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1524 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1531 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
1556 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
1565 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
1570 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
1590 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
1595 &con->features_attr.attr, in amdgpu_ras_fs_init()
1605 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
1612 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
1613 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
1627 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
1631 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
1876 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
1879 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
1890 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
1893 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
1896 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
1961 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
1964 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
1967 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
1984 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
1989 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
1992 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
1993 data = con->eh_data; in amdgpu_ras_badpages_read()
2022 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2117 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2122 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2125 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2126 data = con->eh_data; in amdgpu_ras_add_bad_pages()
2131 if (amdgpu_ras_check_bad_page_unlock(con, in amdgpu_ras_add_bad_pages()
2150 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2163 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
2168 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
2175 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2176 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
2177 data = con->eh_data; in amdgpu_ras_save_bad_pages()
2179 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2228 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
2231 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
2250 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
2253 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
2256 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2257 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
2258 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2265 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
2291 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
2294 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
2301 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
2307 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
2315 con->adev = adev; in amdgpu_ras_recovery_init()
2320 data = &con->eh_data; in amdgpu_ras_recovery_init()
2327 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
2328 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
2329 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
2330 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
2332 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
2341 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); in amdgpu_ras_recovery_init()
2349 if (con->eeprom_control.ras_num_recs) { in amdgpu_ras_recovery_init()
2354 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); in amdgpu_ras_recovery_init()
2356 if (con->update_channel_flag == true) { in amdgpu_ras_recovery_init()
2357 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); in amdgpu_ras_recovery_init()
2358 con->update_channel_flag = false; in amdgpu_ras_recovery_init()
2372 con->eh_data = NULL; in amdgpu_ras_recovery_init()
2390 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
2391 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
2397 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
2399 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2400 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
2403 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2542 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
2544 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
2556 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
2557 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
2567 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
2571 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
2577 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
2589 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
2599 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
2602 if (con) in amdgpu_ras_init()
2605 con = kmalloc(sizeof(struct amdgpu_ras) + in amdgpu_ras_init()
2609 if (!con) in amdgpu_ras_init()
2612 con->adev = adev; in amdgpu_ras_init()
2613 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
2614 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
2615 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
2617 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
2619 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
2628 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
2637 con->update_channel_flag = false; in amdgpu_ras_init()
2638 con->features = 0; in amdgpu_ras_init()
2639 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
2641 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
2706 kfree(con); in amdgpu_ras_init()
2740 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
2742 if (!con) in amdgpu_ras_is_poison_mode_supported()
2745 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
2753 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
2805 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
2806 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
2854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
2857 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
2864 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
2876 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
2888 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
2890 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
2895 if (con->features) in amdgpu_ras_suspend()
2933 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
2935 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
2940 if (con->features) in amdgpu_ras_pre_fini()
2950 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
2952 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
2973 WARN(con->features, "Feature mask is not cleared"); in amdgpu_ras_fini()
2975 if (con->features) in amdgpu_ras_fini()
2978 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
2981 kfree(con); in amdgpu_ras_fini()
3012 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
3014 if (!con) in amdgpu_release_ras_context()
3017 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
3018 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
3020 kfree(con); in amdgpu_release_ras_context()