1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26 
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
29 
amdgpu_mca_query_correctable_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,unsigned long * error_count)30 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
31 					      uint64_t mc_status_addr,
32 					      unsigned long *error_count)
33 {
34 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
35 
36 	if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
37 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
38 		*error_count += 1;
39 }
40 
amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,unsigned long * error_count)41 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
42 						uint64_t mc_status_addr,
43 						unsigned long *error_count)
44 {
45 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
46 
47 	if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
48 	    (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
49 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
50 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
51 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
52 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
53 		*error_count += 1;
54 }
55 
amdgpu_mca_reset_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr)56 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
57 				  uint64_t mc_status_addr)
58 {
59 	WREG64_PCIE(mc_status_addr, 0x0ULL);
60 }
61 
amdgpu_mca_query_ras_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,void * ras_error_status)62 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
63 				      uint64_t mc_status_addr,
64 				      void *ras_error_status)
65 {
66 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
67 
68 	amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
69 	amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
70 
71 	amdgpu_mca_reset_error_count(adev, mc_status_addr);
72 }
73 
amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device * adev)74 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
75 {
76 	int err;
77 	struct amdgpu_mca_ras_block *ras;
78 
79 	if (!adev->mca.mp0.ras)
80 		return 0;
81 
82 	ras = adev->mca.mp0.ras;
83 
84 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
85 	if (err) {
86 		dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
87 		return err;
88 	}
89 
90 	strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
91 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
92 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
93 	adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
94 
95 	return 0;
96 }
97 
amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device * adev)98 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
99 {
100 	int err;
101 	struct amdgpu_mca_ras_block *ras;
102 
103 	if (!adev->mca.mp1.ras)
104 		return 0;
105 
106 	ras = adev->mca.mp1.ras;
107 
108 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
109 	if (err) {
110 		dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
111 		return err;
112 	}
113 
114 	strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
115 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
116 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
117 	adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
118 
119 	return 0;
120 }
121 
amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device * adev)122 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
123 {
124 	int err;
125 	struct amdgpu_mca_ras_block *ras;
126 
127 	if (!adev->mca.mpio.ras)
128 		return 0;
129 
130 	ras = adev->mca.mpio.ras;
131 
132 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
133 	if (err) {
134 		dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
135 		return err;
136 	}
137 
138 	strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
139 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
140 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
141 	adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
142 
143 	return 0;
144 }
145