1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 
4 #include "hclge_err.h"
5 
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7 	{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
8 	{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
9 	{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
10 	{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
11 	{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
12 	{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
13 	{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
14 	{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
15 	{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
16 	{ /* sentinel */ }
17 };
18 
19 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
20 	{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
21 	{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
22 	{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
23 	{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
24 	{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
25 	{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
26 	{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
27 	{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
28 	{ .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
29 	{ .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
30 	{ .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
31 	{ .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
32 	{ .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
33 	{ .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
34 	{ .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
35 	{ .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
36 	{ /* sentinel */ }
37 };
38 
39 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
40 	{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
41 	{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
42 	{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
43 	{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
44 	{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
45 	{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
46 	{ /* sentinel */ }
47 };
48 
49 static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
50 	{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
51 	{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
52 	{ /* sentinel */ }
53 };
54 
55 static const struct hclge_hw_error hclge_igu_int[] = {
56 	{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
57 	{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
58 	{ /* sentinel */ }
59 };
60 
61 static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
62 	{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
63 	{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
64 	{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
65 	{ .int_msk = BIT(3), .msg = "tx_buf_overflow" },
66 	{ .int_msk = BIT(4), .msg = "tx_buf_underrun" },
67 	{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
68 	{ /* sentinel */ }
69 };
70 
71 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
72 	{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
73 	{ /* sentinel */ }
74 };
75 
76 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
77 	{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
78 	{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
79 	{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
80 	{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
81 	{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
82 	{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
83 	{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
84 	{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
85 	{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
86 	{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
87 	{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
88 	{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
89 	{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
90 	{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
91 	{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
92 	{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
93 	{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
94 	{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
95 	{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
96 	{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
97 	{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
98 	{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
99 	{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
100 	{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
101 	{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
102 	{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
103 	{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
104 	{ .int_msk = BIT(27),
105 		.msg = "flow_director_ad_mem0_ecc_mbit_err" },
106 	{ .int_msk = BIT(28),
107 		.msg = "flow_director_ad_mem1_ecc_mbit_err" },
108 	{ .int_msk = BIT(29),
109 		.msg = "rx_vlan_tag_memory_ecc_mbit_err" },
110 	{ .int_msk = BIT(30),
111 		.msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
112 	{ /* sentinel */ }
113 };
114 
115 static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
116 	{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
117 	{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
118 	{ /* sentinel */ }
119 };
120 
121 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
122 	{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
123 	{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
124 	{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
125 	{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
126 	{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
127 	{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
128 	{ /* sentinel */ }
129 };
130 
131 static const struct hclge_hw_error hclge_tm_sch_rint[] = {
132 	{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
133 	{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
134 	{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
135 	{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
136 	{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
137 	{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
138 	{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
139 	{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
140 	{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
141 	{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
142 	{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
143 	{ .int_msk = BIT(12),
144 	  .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
145 	{ .int_msk = BIT(13),
146 	  .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
147 	{ .int_msk = BIT(14),
148 	  .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
149 	{ .int_msk = BIT(15),
150 	  .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
151 	{ .int_msk = BIT(16),
152 	  .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
153 	{ .int_msk = BIT(17),
154 	  .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
155 	{ .int_msk = BIT(18),
156 	  .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
157 	{ .int_msk = BIT(19),
158 	  .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
159 	{ .int_msk = BIT(20),
160 	  .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
161 	{ .int_msk = BIT(21),
162 	  .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
163 	{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
164 	{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
165 	{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
166 	{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
167 	{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
168 	{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
169 	{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
170 	{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
171 	{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
172 	{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
173 	{ /* sentinel */ }
174 };
175 
176 static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
177 	{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
178 	{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
179 	{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
180 	{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
181 	{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
182 	{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
183 	{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
184 	{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
185 	{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
186 	{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
187 	{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
188 	{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
189 	{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
190 	{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
191 	{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
192 	{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
193 	{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
194 	{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
195 	{ /* sentinel */ }
196 };
197 
198 static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
199 	{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
200 	{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
201 	{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
202 	{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
203 	{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
204 	{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
205 	{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
206 	{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
207 	{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
208 	{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
209 	{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
210 	{ /* sentinel */ }
211 };
212 
213 static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
214 	{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
215 	{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
216 	{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
217 	{ .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
218 	{ .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
219 	{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
220 	{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
221 	{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
222 	{ .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
223 	{ .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
224 	{ .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
225 	{ .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
226 	{ .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
227 	{ .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
228 	{ /* sentinel */ }
229 };
230 
231 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
232 	{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
233 	{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
234 	{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
235 	{ .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
236 	{ .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
237 	{ .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
238 	{ .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
239 	{ .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
240 	{ .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
241 	{ .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
242 	{ .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
243 	{ .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
244 	{ .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
245 	{ .int_msk = BIT(26), .msg = "rd_bus_err" },
246 	{ .int_msk = BIT(27), .msg = "wr_bus_err" },
247 	{ .int_msk = BIT(28), .msg = "reg_search_miss" },
248 	{ .int_msk = BIT(29), .msg = "rx_q_search_miss" },
249 	{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
250 	{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
251 	{ /* sentinel */ }
252 };
253 
254 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
255 	{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
256 	{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
257 	{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
258 	{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
259 	{ /* sentinel */ }
260 };
261 
262 static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
263 	{ .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
264 	{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
265 	{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
266 	{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
267 	{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
268 	{ .int_msk = BIT(5), .msg = "buf_wait_timeout" },
269 	{ /* sentinel */ }
270 };
271 
272 static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
273 	{ .int_msk = BIT(0), .msg = "buf_sum_err" },
274 	{ .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
275 	{ .int_msk = BIT(2), .msg = "ppp_mbid_err" },
276 	{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
277 	{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
278 	{ .int_msk = BIT(5), .msg = "cks_edit_position_err" },
279 	{ .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
280 	{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
281 	{ .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
282 	{ .int_msk = BIT(9), .msg = "vlan_num_in_err" },
283 	{ /* sentinel */ }
284 };
285 
286 #define HCLGE_SSU_MEM_ECC_ERR(x) \
287 	{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
288 
289 static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
290 	HCLGE_SSU_MEM_ECC_ERR(0),
291 	HCLGE_SSU_MEM_ECC_ERR(1),
292 	HCLGE_SSU_MEM_ECC_ERR(2),
293 	HCLGE_SSU_MEM_ECC_ERR(3),
294 	HCLGE_SSU_MEM_ECC_ERR(4),
295 	HCLGE_SSU_MEM_ECC_ERR(5),
296 	HCLGE_SSU_MEM_ECC_ERR(6),
297 	HCLGE_SSU_MEM_ECC_ERR(7),
298 	HCLGE_SSU_MEM_ECC_ERR(8),
299 	HCLGE_SSU_MEM_ECC_ERR(9),
300 	HCLGE_SSU_MEM_ECC_ERR(10),
301 	HCLGE_SSU_MEM_ECC_ERR(11),
302 	HCLGE_SSU_MEM_ECC_ERR(12),
303 	HCLGE_SSU_MEM_ECC_ERR(13),
304 	HCLGE_SSU_MEM_ECC_ERR(14),
305 	HCLGE_SSU_MEM_ECC_ERR(15),
306 	HCLGE_SSU_MEM_ECC_ERR(16),
307 	HCLGE_SSU_MEM_ECC_ERR(17),
308 	HCLGE_SSU_MEM_ECC_ERR(18),
309 	HCLGE_SSU_MEM_ECC_ERR(19),
310 	HCLGE_SSU_MEM_ECC_ERR(20),
311 	HCLGE_SSU_MEM_ECC_ERR(21),
312 	HCLGE_SSU_MEM_ECC_ERR(22),
313 	HCLGE_SSU_MEM_ECC_ERR(23),
314 	HCLGE_SSU_MEM_ECC_ERR(24),
315 	HCLGE_SSU_MEM_ECC_ERR(25),
316 	HCLGE_SSU_MEM_ECC_ERR(26),
317 	HCLGE_SSU_MEM_ECC_ERR(27),
318 	HCLGE_SSU_MEM_ECC_ERR(28),
319 	HCLGE_SSU_MEM_ECC_ERR(29),
320 	HCLGE_SSU_MEM_ECC_ERR(30),
321 	HCLGE_SSU_MEM_ECC_ERR(31),
322 	{ /* sentinel */ }
323 };
324 
325 static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
326 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
327 	{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
328 	{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
329 	{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
330 	{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
331 	{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
332 	{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
333 	{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
334 	{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
335 	{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
336 	{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
337 	{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
338 	{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
339 	{ /* sentinel */ }
340 };
341 
342 static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
343 	{ .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
344 	{ .int_msk = BIT(1), .msg = "ig_host_inf_int" },
345 	{ .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
346 	{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
347 	{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
348 	{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
349 	{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
350 	{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
351 	{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
352 	{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
353 	{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
354 	{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
355 	{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
356 	{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
357 	{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
358 	{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
359 	{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
360 	{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
361 	{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
362 	{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
363 	{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
364 	{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
365 	{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
366 	{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
367 	{ /* sentinel */ }
368 };
369 
370 static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
371 	{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
372 	{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
373 	{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
374 	{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
375 	{ /* sentinel */ }
376 };
377 
378 static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
379 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
380 	{ .int_msk = BIT(9), .msg = "low_water_line_err_port" },
381 	{ .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
382 	{ /* sentinel */ }
383 };
384 
385 static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
386 	{ .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
387 	{ .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
388 	{ .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
389 	{ .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
390 	{ .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
391 	{ .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
392 	{ .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
393 	{ .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
394 	{ .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
395 	{ .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
396 	{ .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
397 	{ .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
398 	{ .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
399 	{ .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
400 	{ .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
401 	{ .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
402 	{ .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
403 	{ .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
404 	{ .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
405 	{ .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
406 	{ /* sentinel */ }
407 };
408 
409 static void hclge_log_error(struct device *dev, char *reg,
410 			    const struct hclge_hw_error *err,
411 			    u32 err_sts)
412 {
413 	while (err->msg) {
414 		if (err->int_msk & err_sts)
415 			dev_warn(dev, "%s %s found [error status=0x%x]\n",
416 				 reg, err->msg, err_sts);
417 		err++;
418 	}
419 }
420 
421 /* hclge_cmd_query_error: read the error information
422  * @hdev: pointer to struct hclge_dev
423  * @desc: descriptor for describing the command
424  * @cmd:  command opcode
425  * @flag: flag for extended command structure
426  * @w_num: offset for setting the read interrupt type.
427  * @int_type: select which type of the interrupt for which the error
428  * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
429  *
430  * This function query the error info from hw register/s using command
431  */
432 static int hclge_cmd_query_error(struct hclge_dev *hdev,
433 				 struct hclge_desc *desc, u32 cmd,
434 				 u16 flag, u8 w_num,
435 				 enum hclge_err_int_type int_type)
436 {
437 	struct device *dev = &hdev->pdev->dev;
438 	int num = 1;
439 	int ret;
440 
441 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
442 	if (flag) {
443 		desc[0].flag |= cpu_to_le16(flag);
444 		hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
445 		num = 2;
446 	}
447 	if (w_num)
448 		desc[0].data[w_num] = cpu_to_le32(int_type);
449 
450 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
451 	if (ret)
452 		dev_err(dev, "query error cmd failed (%d)\n", ret);
453 
454 	return ret;
455 }
456 
457 static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
458 {
459 	struct device *dev = &hdev->pdev->dev;
460 	struct hclge_desc desc[2];
461 	int ret;
462 
463 	/* configure common error interrupts */
464 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
465 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
466 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
467 
468 	if (en) {
469 		desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
470 		desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
471 					HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
472 		desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
473 		desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
474 					      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
475 		desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
476 	}
477 
478 	desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
479 	desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
480 				HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
481 	desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
482 	desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
483 				      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
484 	desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
485 
486 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
487 	if (ret)
488 		dev_err(dev,
489 			"fail(%d) to configure common err interrupts\n", ret);
490 
491 	return ret;
492 }
493 
494 static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
495 {
496 	struct device *dev = &hdev->pdev->dev;
497 	struct hclge_desc desc;
498 	int ret;
499 
500 	if (hdev->pdev->revision < 0x21)
501 		return 0;
502 
503 	/* configure NCSI error interrupts */
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
505 	if (en)
506 		desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
507 
508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 	if (ret)
510 		dev_err(dev,
511 			"fail(%d) to configure  NCSI error interrupts\n", ret);
512 
513 	return ret;
514 }
515 
516 static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
517 {
518 	struct device *dev = &hdev->pdev->dev;
519 	struct hclge_desc desc;
520 	int ret;
521 
522 	/* configure IGU,EGU error interrupts */
523 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
524 	if (en)
525 		desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
526 
527 	desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
528 
529 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
530 	if (ret) {
531 		dev_err(dev,
532 			"fail(%d) to configure IGU common interrupts\n", ret);
533 		return ret;
534 	}
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
537 	if (en)
538 		desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
539 
540 	desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
541 
542 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
543 	if (ret) {
544 		dev_err(dev,
545 			"fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
546 		return ret;
547 	}
548 
549 	ret = hclge_config_ncsi_hw_err_int(hdev, en);
550 
551 	return ret;
552 }
553 
554 static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
555 					    bool en)
556 {
557 	struct device *dev = &hdev->pdev->dev;
558 	struct hclge_desc desc[2];
559 	int ret;
560 
561 	/* configure PPP error interrupts */
562 	hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
563 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
564 	hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
565 
566 	if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
567 		if (en) {
568 			desc[0].data[0] =
569 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
570 			desc[0].data[1] =
571 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
572 			desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
573 		}
574 
575 		desc[1].data[0] =
576 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
577 		desc[1].data[1] =
578 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
579 		if (hdev->pdev->revision >= 0x21)
580 			desc[1].data[2] =
581 				cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
582 	} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
583 		if (en) {
584 			desc[0].data[0] =
585 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
586 			desc[0].data[1] =
587 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
588 		}
589 
590 		desc[1].data[0] =
591 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
592 		desc[1].data[1] =
593 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
594 	}
595 
596 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
597 	if (ret)
598 		dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
599 
600 	return ret;
601 }
602 
603 static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
604 {
605 	int ret;
606 
607 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
608 					       en);
609 	if (ret)
610 		return ret;
611 
612 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
613 					       en);
614 
615 	return ret;
616 }
617 
618 static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
619 {
620 	struct device *dev = &hdev->pdev->dev;
621 	struct hclge_desc desc;
622 	int ret;
623 
624 	/* configure TM SCH hw errors */
625 	hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
626 	if (en)
627 		desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
628 
629 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
630 	if (ret) {
631 		dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
632 		return ret;
633 	}
634 
635 	/* configure TM QCN hw errors */
636 	ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
637 				    0, 0, 0);
638 	if (ret) {
639 		dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
640 		return ret;
641 	}
642 
643 	hclge_cmd_reuse_desc(&desc, false);
644 	if (en)
645 		desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
646 
647 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
648 	if (ret)
649 		dev_err(dev,
650 			"fail(%d) to configure TM QCN mem errors\n", ret);
651 
652 	return ret;
653 }
654 
655 static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
656 {
657 	struct device *dev = &hdev->pdev->dev;
658 	struct hclge_desc desc;
659 	int ret;
660 
661 	/* configure MAC common error interrupts */
662 	hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
663 	if (en)
664 		desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
665 
666 	desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
667 
668 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
669 	if (ret)
670 		dev_err(dev,
671 			"fail(%d) to configure MAC COMMON error intr\n", ret);
672 
673 	return ret;
674 }
675 
676 static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
677 					     bool en)
678 {
679 	struct device *dev = &hdev->pdev->dev;
680 	struct hclge_desc desc[2];
681 	int num = 1;
682 	int ret;
683 
684 	/* configure PPU error interrupts */
685 	if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
686 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
687 		desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
688 		hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
689 		if (en) {
690 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
691 			desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
692 			desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
693 			desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
694 		}
695 
696 		desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
697 		desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
698 		desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
699 		desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
700 		num = 2;
701 	} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
702 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
703 		if (en)
704 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
705 
706 		desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
707 	} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
708 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
709 		if (en)
710 			desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
711 
712 		desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
713 	} else {
714 		dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
715 		return -EINVAL;
716 	}
717 
718 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
719 
720 	return ret;
721 }
722 
723 static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
724 {
725 	struct device *dev = &hdev->pdev->dev;
726 	int ret;
727 
728 	ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
729 						en);
730 	if (ret) {
731 		dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
732 			ret);
733 		return ret;
734 	}
735 
736 	ret = hclge_config_ppu_error_interrupts(hdev,
737 						HCLGE_PPU_MPF_OTHER_INT_CMD,
738 						en);
739 	if (ret) {
740 		dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
741 		return ret;
742 	}
743 
744 	ret = hclge_config_ppu_error_interrupts(hdev,
745 						HCLGE_PPU_PF_OTHER_INT_CMD, en);
746 	if (ret)
747 		dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
748 			ret);
749 	return ret;
750 }
751 
752 static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
753 {
754 	struct device *dev = &hdev->pdev->dev;
755 	struct hclge_desc desc[2];
756 	int ret;
757 
758 	/* configure SSU ecc error interrupts */
759 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
760 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
761 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
762 	if (en) {
763 		desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
764 		desc[0].data[1] =
765 			cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
766 		desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
767 	}
768 
769 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
770 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
771 	desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
772 
773 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
774 	if (ret) {
775 		dev_err(dev,
776 			"fail(%d) to configure SSU ECC error interrupt\n", ret);
777 		return ret;
778 	}
779 
780 	/* configure SSU common error interrupts */
781 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
782 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
783 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
784 
785 	if (en) {
786 		if (hdev->pdev->revision >= 0x21)
787 			desc[0].data[0] =
788 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
789 		else
790 			desc[0].data[0] =
791 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
792 		desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
793 		desc[0].data[2] =
794 			cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
795 	}
796 
797 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
798 				HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
799 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
800 
801 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
802 	if (ret)
803 		dev_err(dev,
804 			"fail(%d) to configure SSU COMMON error intr\n", ret);
805 
806 	return ret;
807 }
808 
809 #define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
810 	do { \
811 		if (ae_dev->ops->set_default_reset_request) \
812 			ae_dev->ops->set_default_reset_request(ae_dev, \
813 							       reset_type); \
814 	} while (0)
815 
816 /* hclge_handle_mpf_ras_error: handle all main PF RAS errors
817  * @hdev: pointer to struct hclge_dev
818  * @desc: descriptor for describing the command
819  * @num:  number of extended command structures
820  *
821  * This function handles all the main PF RAS errors in the
822  * hw register/s using command.
823  */
824 static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
825 				      struct hclge_desc *desc,
826 				      int num)
827 {
828 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
829 	struct device *dev = &hdev->pdev->dev;
830 	__le32 *desc_data;
831 	u32 status;
832 	int ret;
833 
834 	/* query all main PF RAS errors */
835 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
836 				   true);
837 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
838 
839 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
840 	if (ret) {
841 		dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
842 		return ret;
843 	}
844 
845 	/* log HNS common errors */
846 	status = le32_to_cpu(desc[0].data[0]);
847 	if (status) {
848 		hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
849 				&hclge_imp_tcm_ecc_int[0], status);
850 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
851 	}
852 
853 	status = le32_to_cpu(desc[0].data[1]);
854 	if (status) {
855 		hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
856 				&hclge_cmdq_nic_mem_ecc_int[0], status);
857 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
858 	}
859 
860 	if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
861 		dev_warn(dev, "imp_rd_data_poison_err found\n");
862 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
863 	}
864 
865 	status = le32_to_cpu(desc[0].data[3]);
866 	if (status) {
867 		hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
868 				&hclge_tqp_int_ecc_int[0], status);
869 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
870 	}
871 
872 	status = le32_to_cpu(desc[0].data[4]);
873 	if (status) {
874 		hclge_log_error(dev, "MSIX_ECC_INT_STS",
875 				&hclge_msix_sram_ecc_int[0], status);
876 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
877 	}
878 
879 	/* log SSU(Storage Switch Unit) errors */
880 	desc_data = (__le32 *)&desc[2];
881 	status = le32_to_cpu(*(desc_data + 2));
882 	if (status) {
883 		hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
884 				&hclge_ssu_mem_ecc_err_int[0], status);
885 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
886 	}
887 
888 	status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
889 	if (status) {
890 		dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
891 			 status);
892 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
893 	}
894 
895 	status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
896 	if (status) {
897 		hclge_log_error(dev, "SSU_COMMON_ERR_INT",
898 				&hclge_ssu_com_err_int[0], status);
899 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
900 	}
901 
902 	/* log IGU(Ingress Unit) errors */
903 	desc_data = (__le32 *)&desc[3];
904 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
905 	if (status)
906 		hclge_log_error(dev, "IGU_INT_STS",
907 				&hclge_igu_int[0], status);
908 
909 	/* log PPP(Programmable Packet Process) errors */
910 	desc_data = (__le32 *)&desc[4];
911 	status = le32_to_cpu(*(desc_data + 1));
912 	if (status)
913 		hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
914 				&hclge_ppp_mpf_abnormal_int_st1[0], status);
915 
916 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
917 	if (status)
918 		hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
919 				&hclge_ppp_mpf_abnormal_int_st3[0], status);
920 
921 	/* log PPU(RCB) errors */
922 	desc_data = (__le32 *)&desc[5];
923 	status = le32_to_cpu(*(desc_data + 1));
924 	if (status) {
925 		dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
926 			 "rpu_rx_pkt_ecc_mbit_err");
927 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
928 	}
929 
930 	status = le32_to_cpu(*(desc_data + 2));
931 	if (status) {
932 		hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
933 				&hclge_ppu_mpf_abnormal_int_st2[0], status);
934 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
935 	}
936 
937 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
938 	if (status) {
939 		hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
940 				&hclge_ppu_mpf_abnormal_int_st3[0], status);
941 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
942 	}
943 
944 	/* log TM(Traffic Manager) errors */
945 	desc_data = (__le32 *)&desc[6];
946 	status = le32_to_cpu(*desc_data);
947 	if (status) {
948 		hclge_log_error(dev, "TM_SCH_RINT",
949 				&hclge_tm_sch_rint[0], status);
950 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
951 	}
952 
953 	/* log QCN(Quantized Congestion Control) errors */
954 	desc_data = (__le32 *)&desc[7];
955 	status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
956 	if (status) {
957 		hclge_log_error(dev, "QCN_FIFO_RINT",
958 				&hclge_qcn_fifo_rint[0], status);
959 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
960 	}
961 
962 	status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
963 	if (status) {
964 		hclge_log_error(dev, "QCN_ECC_RINT",
965 				&hclge_qcn_ecc_rint[0], status);
966 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
967 	}
968 
969 	/* log NCSI errors */
970 	desc_data = (__le32 *)&desc[9];
971 	status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
972 	if (status) {
973 		hclge_log_error(dev, "NCSI_ECC_INT_RPT",
974 				&hclge_ncsi_err_int[0], status);
975 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
976 	}
977 
978 	/* clear all main PF RAS errors */
979 	hclge_cmd_reuse_desc(&desc[0], false);
980 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
981 
982 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
983 	if (ret)
984 		dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
985 
986 	return ret;
987 }
988 
989 /* hclge_handle_pf_ras_error: handle all PF RAS errors
990  * @hdev: pointer to struct hclge_dev
991  * @desc: descriptor for describing the command
992  * @num:  number of extended command structures
993  *
994  * This function handles all the PF RAS errors in the
995  * hw register/s using command.
996  */
997 static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
998 				     struct hclge_desc *desc,
999 				     int num)
1000 {
1001 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1002 	struct device *dev = &hdev->pdev->dev;
1003 	__le32 *desc_data;
1004 	u32 status;
1005 	int ret;
1006 
1007 	/* query all PF RAS errors */
1008 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
1009 				   true);
1010 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1011 
1012 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1013 	if (ret) {
1014 		dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
1015 		return ret;
1016 	}
1017 
1018 	/* log SSU(Storage Switch Unit) errors */
1019 	status = le32_to_cpu(desc[0].data[0]);
1020 	if (status) {
1021 		hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1022 				&hclge_ssu_port_based_err_int[0], status);
1023 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1024 	}
1025 
1026 	status = le32_to_cpu(desc[0].data[1]);
1027 	if (status) {
1028 		hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
1029 				&hclge_ssu_fifo_overflow_int[0], status);
1030 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1031 	}
1032 
1033 	status = le32_to_cpu(desc[0].data[2]);
1034 	if (status) {
1035 		hclge_log_error(dev, "SSU_ETS_TCG_INT",
1036 				&hclge_ssu_ets_tcg_int[0], status);
1037 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1038 	}
1039 
1040 	/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
1041 	desc_data = (__le32 *)&desc[1];
1042 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
1043 	if (status)
1044 		hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
1045 				&hclge_igu_egu_tnl_int[0], status);
1046 
1047 	/* log PPU(RCB) errors */
1048 	desc_data = (__le32 *)&desc[3];
1049 	status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
1050 	if (status)
1051 		hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
1052 				&hclge_ppu_pf_abnormal_int[0], status);
1053 
1054 	/* clear all PF RAS errors */
1055 	hclge_cmd_reuse_desc(&desc[0], false);
1056 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1057 
1058 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1059 	if (ret)
1060 		dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
1061 
1062 	return ret;
1063 }
1064 
1065 static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
1066 {
1067 	struct device *dev = &hdev->pdev->dev;
1068 	u32 mpf_bd_num, pf_bd_num, bd_num;
1069 	struct hclge_desc desc_bd;
1070 	struct hclge_desc *desc;
1071 	int ret;
1072 
1073 	/* query the number of registers in the RAS int status */
1074 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
1075 				   true);
1076 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1077 	if (ret) {
1078 		dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
1079 		return ret;
1080 	}
1081 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1082 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1083 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1084 
1085 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1086 	if (!desc)
1087 		return -ENOMEM;
1088 
1089 	/* handle all main PF RAS errors */
1090 	ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
1091 	if (ret) {
1092 		kfree(desc);
1093 		return ret;
1094 	}
1095 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1096 
1097 	/* handle all PF RAS errors */
1098 	ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
1099 	kfree(desc);
1100 
1101 	return ret;
1102 }
1103 
1104 static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
1105 {
1106 	struct device *dev = &hdev->pdev->dev;
1107 	struct hclge_desc desc[2];
1108 	int ret;
1109 
1110 	/* read overflow error status */
1111 	ret = hclge_cmd_query_error(hdev, &desc[0],
1112 				    HCLGE_ROCEE_PF_RAS_INT_CMD,
1113 				    0, 0, 0);
1114 	if (ret) {
1115 		dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
1116 		return ret;
1117 	}
1118 
1119 	/* log overflow error */
1120 	if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1121 		const struct hclge_hw_error *err;
1122 		u32 err_sts;
1123 
1124 		err = &hclge_rocee_qmm_ovf_err_int[0];
1125 		err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
1126 			  le32_to_cpu(desc[0].data[0]);
1127 		while (err->msg) {
1128 			if (err->int_msk == err_sts) {
1129 				dev_warn(dev, "%s [error status=0x%x] found\n",
1130 					 err->msg,
1131 					 le32_to_cpu(desc[0].data[0]));
1132 				break;
1133 			}
1134 			err++;
1135 		}
1136 	}
1137 
1138 	if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1139 		dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
1140 			 le32_to_cpu(desc[0].data[1]));
1141 	}
1142 
1143 	if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1144 		dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
1145 			 le32_to_cpu(desc[0].data[2]));
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 static enum hnae3_reset_type
1152 hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
1153 {
1154 	enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1155 	struct device *dev = &hdev->pdev->dev;
1156 	struct hclge_desc desc[2];
1157 	unsigned int status;
1158 	int ret;
1159 
1160 	/* read RAS error interrupt status */
1161 	ret = hclge_cmd_query_error(hdev, &desc[0],
1162 				    HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
1163 				    0, 0, 0);
1164 	if (ret) {
1165 		dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
1166 		/* reset everything for now */
1167 		return HNAE3_GLOBAL_RESET;
1168 	}
1169 
1170 	status = le32_to_cpu(desc[0].data[0]);
1171 
1172 	if (status & HCLGE_ROCEE_RERR_INT_MASK) {
1173 		dev_warn(dev, "ROCEE RAS AXI rresp error\n");
1174 		reset_type = HNAE3_FUNC_RESET;
1175 	}
1176 
1177 	if (status & HCLGE_ROCEE_BERR_INT_MASK) {
1178 		dev_warn(dev, "ROCEE RAS AXI bresp error\n");
1179 		reset_type = HNAE3_FUNC_RESET;
1180 	}
1181 
1182 	if (status & HCLGE_ROCEE_ECC_INT_MASK) {
1183 		dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
1184 		reset_type = HNAE3_GLOBAL_RESET;
1185 	}
1186 
1187 	if (status & HCLGE_ROCEE_OVF_INT_MASK) {
1188 		ret = hclge_log_rocee_ovf_error(hdev);
1189 		if (ret) {
1190 			dev_err(dev, "failed(%d) to process ovf error\n", ret);
1191 			/* reset everything for now */
1192 			return HNAE3_GLOBAL_RESET;
1193 		}
1194 		reset_type = HNAE3_FUNC_RESET;
1195 	}
1196 
1197 	/* clear error status */
1198 	hclge_cmd_reuse_desc(&desc[0], false);
1199 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
1200 	if (ret) {
1201 		dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
1202 		/* reset everything for now */
1203 		return HNAE3_GLOBAL_RESET;
1204 	}
1205 
1206 	return reset_type;
1207 }
1208 
1209 static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
1210 {
1211 	struct device *dev = &hdev->pdev->dev;
1212 	struct hclge_desc desc;
1213 	int ret;
1214 
1215 	if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
1216 		return 0;
1217 
1218 	hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
1219 	if (en) {
1220 		/* enable ROCEE hw error interrupts */
1221 		desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
1222 		desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
1223 
1224 		hclge_log_and_clear_rocee_ras_error(hdev);
1225 	}
1226 	desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
1227 	desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
1228 
1229 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1230 	if (ret)
1231 		dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
1232 
1233 	return ret;
1234 }
1235 
1236 static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
1237 {
1238 	enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1239 	struct hclge_dev *hdev = ae_dev->priv;
1240 
1241 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1242 	    hdev->pdev->revision < 0x21)
1243 		return;
1244 
1245 	reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
1246 	if (reset_type != HNAE3_NONE_RESET)
1247 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
1248 }
1249 
1250 static const struct hclge_hw_blk hw_blk[] = {
1251 	{
1252 	  .msk = BIT(0), .name = "IGU_EGU",
1253 	  .config_err_int = hclge_config_igu_egu_hw_err_int,
1254 	},
1255 	{
1256 	  .msk = BIT(1), .name = "PPP",
1257 	  .config_err_int = hclge_config_ppp_hw_err_int,
1258 	},
1259 	{
1260 	  .msk = BIT(2), .name = "SSU",
1261 	  .config_err_int = hclge_config_ssu_hw_err_int,
1262 	},
1263 	{
1264 	  .msk = BIT(3), .name = "PPU",
1265 	  .config_err_int = hclge_config_ppu_hw_err_int,
1266 	},
1267 	{
1268 	  .msk = BIT(4), .name = "TM",
1269 	  .config_err_int = hclge_config_tm_hw_err_int,
1270 	},
1271 	{
1272 	  .msk = BIT(5), .name = "COMMON",
1273 	  .config_err_int = hclge_config_common_hw_err_int,
1274 	},
1275 	{
1276 	  .msk = BIT(8), .name = "MAC",
1277 	  .config_err_int = hclge_config_mac_err_int,
1278 	},
1279 	{ /* sentinel */ }
1280 };
1281 
1282 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
1283 {
1284 	const struct hclge_hw_blk *module = hw_blk;
1285 	struct device *dev = &hdev->pdev->dev;
1286 	int ret = 0;
1287 
1288 	while (module->name) {
1289 		if (module->config_err_int) {
1290 			ret = module->config_err_int(hdev, state);
1291 			if (ret)
1292 				return ret;
1293 		}
1294 		module++;
1295 	}
1296 
1297 	ret = hclge_config_rocee_ras_interrupt(hdev, state);
1298 	if (ret)
1299 		dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
1300 
1301 	return ret;
1302 }
1303 
1304 pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
1305 {
1306 	struct hclge_dev *hdev = ae_dev->priv;
1307 	struct device *dev = &hdev->pdev->dev;
1308 	u32 status;
1309 
1310 	status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
1311 
1312 	/* Handling Non-fatal HNS RAS errors */
1313 	if (status & HCLGE_RAS_REG_NFE_MASK) {
1314 		dev_warn(dev,
1315 			 "HNS Non-Fatal RAS error(status=0x%x) identified\n",
1316 			 status);
1317 		hclge_handle_all_ras_errors(hdev);
1318 	} else {
1319 		if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1320 		    hdev->pdev->revision < 0x21) {
1321 			ae_dev->override_pci_need_reset = 1;
1322 			return PCI_ERS_RESULT_RECOVERED;
1323 		}
1324 	}
1325 
1326 	if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1327 		dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
1328 		hclge_handle_rocee_ras_error(ae_dev);
1329 	}
1330 
1331 	if (status & HCLGE_RAS_REG_NFE_MASK ||
1332 	    status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1333 		ae_dev->override_pci_need_reset = 0;
1334 		return PCI_ERS_RESULT_NEED_RESET;
1335 	}
1336 	ae_dev->override_pci_need_reset = 1;
1337 
1338 	return PCI_ERS_RESULT_RECOVERED;
1339 }
1340 
1341 int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
1342 			       unsigned long *reset_requests)
1343 {
1344 	struct device *dev = &hdev->pdev->dev;
1345 	u32 mpf_bd_num, pf_bd_num, bd_num;
1346 	struct hclge_desc desc_bd;
1347 	struct hclge_desc *desc;
1348 	__le32 *desc_data;
1349 	int ret = 0;
1350 	u32 status;
1351 
1352 	/* set default handling */
1353 	set_bit(HNAE3_FUNC_RESET, reset_requests);
1354 
1355 	/* query the number of bds for the MSIx int status */
1356 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
1357 				   true);
1358 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1359 	if (ret) {
1360 		dev_err(dev, "fail(%d) to query msix int status bd num\n",
1361 			ret);
1362 		/* reset everything for now */
1363 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1364 		return ret;
1365 	}
1366 
1367 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1368 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1369 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1370 
1371 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1372 	if (!desc)
1373 		goto out;
1374 
1375 	/* query all main PF MSIx errors */
1376 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
1377 				   true);
1378 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1379 
1380 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1381 	if (ret) {
1382 		dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
1383 			ret);
1384 		/* reset everything for now */
1385 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1386 		goto msi_error;
1387 	}
1388 
1389 	/* log MAC errors */
1390 	desc_data = (__le32 *)&desc[1];
1391 	status = le32_to_cpu(*desc_data);
1392 	if (status) {
1393 		hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
1394 				&hclge_mac_afifo_tnl_int[0], status);
1395 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1396 	}
1397 
1398 	/* log PPU(RCB) MPF errors */
1399 	desc_data = (__le32 *)&desc[5];
1400 	status = le32_to_cpu(*(desc_data + 2)) &
1401 			HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
1402 	if (status) {
1403 		hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
1404 				&hclge_ppu_mpf_abnormal_int_st2[0], status);
1405 		set_bit(HNAE3_CORE_RESET, reset_requests);
1406 	}
1407 
1408 	/* clear all main PF MSIx errors */
1409 	hclge_cmd_reuse_desc(&desc[0], false);
1410 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1411 
1412 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1413 	if (ret) {
1414 		dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
1415 			ret);
1416 		/* reset everything for now */
1417 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1418 		goto msi_error;
1419 	}
1420 
1421 	/* query all PF MSIx errors */
1422 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1423 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
1424 				   true);
1425 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1426 
1427 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1428 	if (ret) {
1429 		dev_err(dev, "query all pf msix int cmd failed (%d)\n",
1430 			ret);
1431 		/* reset everything for now */
1432 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1433 		goto msi_error;
1434 	}
1435 
1436 	/* log SSU PF errors */
1437 	status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
1438 	if (status) {
1439 		hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1440 				&hclge_ssu_port_based_pf_int[0], status);
1441 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1442 	}
1443 
1444 	/* read and log PPP PF errors */
1445 	desc_data = (__le32 *)&desc[2];
1446 	status = le32_to_cpu(*desc_data);
1447 	if (status)
1448 		hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
1449 				&hclge_ppp_pf_abnormal_int[0], status);
1450 
1451 	/* log PPU(RCB) PF errors */
1452 	desc_data = (__le32 *)&desc[3];
1453 	status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
1454 	if (status)
1455 		hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
1456 				&hclge_ppu_pf_abnormal_int[0], status);
1457 
1458 	/* clear all PF MSIx errors */
1459 	hclge_cmd_reuse_desc(&desc[0], false);
1460 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1461 
1462 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1463 	if (ret) {
1464 		dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
1465 			ret);
1466 		/* reset everything for now */
1467 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1468 	}
1469 
1470 msi_error:
1471 	kfree(desc);
1472 out:
1473 	return ret;
1474 }
1475