1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 
4 #include "hclge_err.h"
5 
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7 	{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
8 	  .reset_level = HNAE3_NONE_RESET },
9 	{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
10 	  .reset_level = HNAE3_NONE_RESET },
11 	{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
12 	  .reset_level = HNAE3_NONE_RESET },
13 	{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
14 	  .reset_level = HNAE3_NONE_RESET },
15 	{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
16 	  .reset_level = HNAE3_NONE_RESET },
17 	{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
18 	  .reset_level = HNAE3_NONE_RESET },
19 	{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
20 	  .reset_level = HNAE3_NONE_RESET },
21 	{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
22 	  .reset_level = HNAE3_NONE_RESET },
23 	{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
24 	  .reset_level = HNAE3_NONE_RESET },
25 	{ /* sentinel */ }
26 };
27 
28 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
29 	{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
30 	  .reset_level = HNAE3_NONE_RESET },
31 	{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
32 	  .reset_level = HNAE3_NONE_RESET },
33 	{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
34 	  .reset_level = HNAE3_NONE_RESET },
35 	{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
36 	  .reset_level = HNAE3_NONE_RESET },
37 	{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
38 	  .reset_level = HNAE3_NONE_RESET },
39 	{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
40 	  .reset_level = HNAE3_NONE_RESET },
41 	{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
42 	  .reset_level = HNAE3_NONE_RESET },
43 	{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
44 	  .reset_level = HNAE3_NONE_RESET },
45 	{ .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
46 	  .reset_level = HNAE3_NONE_RESET },
47 	{ .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
48 	  .reset_level = HNAE3_NONE_RESET },
49 	{ .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
50 	  .reset_level = HNAE3_NONE_RESET },
51 	{ .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
52 	  .reset_level = HNAE3_NONE_RESET },
53 	{ .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
54 	  .reset_level = HNAE3_NONE_RESET },
55 	{ .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
56 	  .reset_level = HNAE3_NONE_RESET },
57 	{ .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
58 	  .reset_level = HNAE3_NONE_RESET },
59 	{ .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
60 	  .reset_level = HNAE3_NONE_RESET },
61 	{ /* sentinel */ }
62 };
63 
64 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
65 	{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
66 	  .reset_level = HNAE3_NONE_RESET },
67 	{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
68 	  .reset_level = HNAE3_NONE_RESET },
69 	{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
70 	  .reset_level = HNAE3_NONE_RESET },
71 	{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
72 	  .reset_level = HNAE3_NONE_RESET },
73 	{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
74 	  .reset_level = HNAE3_NONE_RESET },
75 	{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
76 	  .reset_level = HNAE3_NONE_RESET },
77 	{ /* sentinel */ }
78 };
79 
80 static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
81 	{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
82 	  .reset_level = HNAE3_NONE_RESET },
83 	{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
84 	  .reset_level = HNAE3_NONE_RESET },
85 	{ /* sentinel */ }
86 };
87 
88 static const struct hclge_hw_error hclge_igu_int[] = {
89 	{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
90 	  .reset_level = HNAE3_CORE_RESET },
91 	{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
92 	  .reset_level = HNAE3_CORE_RESET },
93 	{ /* sentinel */ }
94 };
95 
96 static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
97 	{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
98 	  .reset_level = HNAE3_CORE_RESET },
99 	{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
100 	  .reset_level = HNAE3_CORE_RESET },
101 	{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
102 	  .reset_level = HNAE3_CORE_RESET },
103 	{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
104 	  .reset_level = HNAE3_CORE_RESET },
105 	{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
106 	  .reset_level = HNAE3_CORE_RESET },
107 	{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
108 	  .reset_level = HNAE3_CORE_RESET },
109 	{ /* sentinel */ }
110 };
111 
112 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
113 	{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
114 	  .reset_level = HNAE3_NONE_RESET },
115 	{ /* sentinel */ }
116 };
117 
118 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
119 	{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
120 	  .reset_level = HNAE3_GLOBAL_RESET },
121 	{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
122 	  .reset_level = HNAE3_GLOBAL_RESET },
123 	{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
124 	  .reset_level = HNAE3_GLOBAL_RESET },
125 	{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
126 	  .reset_level = HNAE3_GLOBAL_RESET },
127 	{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
128 	  .reset_level = HNAE3_GLOBAL_RESET },
129 	{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
130 	  .reset_level = HNAE3_GLOBAL_RESET },
131 	{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
132 	  .reset_level = HNAE3_GLOBAL_RESET },
133 	{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
134 	  .reset_level = HNAE3_GLOBAL_RESET },
135 	{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
136 	  .reset_level = HNAE3_GLOBAL_RESET },
137 	{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
138 	  .reset_level = HNAE3_GLOBAL_RESET },
139 	{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
140 	  .reset_level = HNAE3_GLOBAL_RESET },
141 	{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
142 	  .reset_level = HNAE3_GLOBAL_RESET },
143 	{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
144 	  .reset_level = HNAE3_GLOBAL_RESET },
145 	{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
146 	  .reset_level = HNAE3_GLOBAL_RESET },
147 	{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
148 	  .reset_level = HNAE3_GLOBAL_RESET },
149 	{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
150 	  .reset_level = HNAE3_GLOBAL_RESET },
151 	{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
152 	  .reset_level = HNAE3_GLOBAL_RESET },
153 	{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
154 	  .reset_level = HNAE3_GLOBAL_RESET },
155 	{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
156 	  .reset_level = HNAE3_GLOBAL_RESET },
157 	{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
158 	  .reset_level = HNAE3_GLOBAL_RESET },
159 	{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
160 	  .reset_level = HNAE3_GLOBAL_RESET },
161 	{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
162 	  .reset_level = HNAE3_GLOBAL_RESET },
163 	{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
164 	  .reset_level = HNAE3_GLOBAL_RESET },
165 	{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
166 	  .reset_level = HNAE3_GLOBAL_RESET },
167 	{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
168 	  .reset_level = HNAE3_GLOBAL_RESET },
169 	{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
170 	  .reset_level = HNAE3_GLOBAL_RESET },
171 	{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
172 	  .reset_level = HNAE3_GLOBAL_RESET },
173 	{ .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
174 	  .reset_level = HNAE3_GLOBAL_RESET },
175 	{ .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
176 	  .reset_level = HNAE3_GLOBAL_RESET },
177 	{ .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
178 	  .reset_level = HNAE3_GLOBAL_RESET },
179 	{ .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
180 	  .reset_level = HNAE3_GLOBAL_RESET },
181 	{ /* sentinel */ }
182 };
183 
184 static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
185 	{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
186 	  .reset_level = HNAE3_NONE_RESET },
187 	{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
188 	  .reset_level = HNAE3_NONE_RESET },
189 	{ /* sentinel */ }
190 };
191 
192 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
193 	{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
194 	  .reset_level = HNAE3_GLOBAL_RESET },
195 	{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
196 	  .reset_level = HNAE3_GLOBAL_RESET },
197 	{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
198 	  .reset_level = HNAE3_GLOBAL_RESET },
199 	{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
200 	  .reset_level = HNAE3_GLOBAL_RESET },
201 	{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
202 	  .reset_level = HNAE3_GLOBAL_RESET },
203 	{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
204 	  .reset_level = HNAE3_GLOBAL_RESET },
205 	{ /* sentinel */ }
206 };
207 
208 static const struct hclge_hw_error hclge_tm_sch_rint[] = {
209 	{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
210 	  .reset_level = HNAE3_GLOBAL_RESET },
211 	{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
212 	  .reset_level = HNAE3_GLOBAL_RESET },
213 	{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
214 	  .reset_level = HNAE3_GLOBAL_RESET },
215 	{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
216 	  .reset_level = HNAE3_GLOBAL_RESET },
217 	{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
218 	  .reset_level = HNAE3_GLOBAL_RESET },
219 	{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
220 	  .reset_level = HNAE3_GLOBAL_RESET },
221 	{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
222 	  .reset_level = HNAE3_GLOBAL_RESET },
223 	{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
224 	  .reset_level = HNAE3_GLOBAL_RESET },
225 	{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
226 	  .reset_level = HNAE3_GLOBAL_RESET },
227 	{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
228 	  .reset_level = HNAE3_GLOBAL_RESET },
229 	{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
230 	  .reset_level = HNAE3_GLOBAL_RESET },
231 	{ .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
232 	  .reset_level = HNAE3_GLOBAL_RESET },
233 	{ .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
234 	  .reset_level = HNAE3_GLOBAL_RESET },
235 	{ .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
236 	  .reset_level = HNAE3_GLOBAL_RESET },
237 	{ .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
238 	  .reset_level = HNAE3_GLOBAL_RESET },
239 	{ .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
240 	  .reset_level = HNAE3_GLOBAL_RESET },
241 	{ .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
242 	  .reset_level = HNAE3_GLOBAL_RESET },
243 	{ .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
244 	  .reset_level = HNAE3_GLOBAL_RESET },
245 	{ .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
246 	  .reset_level = HNAE3_GLOBAL_RESET },
247 	{ .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
248 	  .reset_level = HNAE3_GLOBAL_RESET },
249 	{ .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
250 	  .reset_level = HNAE3_GLOBAL_RESET },
251 	{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
252 	  .reset_level = HNAE3_GLOBAL_RESET },
253 	{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
254 	  .reset_level = HNAE3_GLOBAL_RESET },
255 	{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
256 	  .reset_level = HNAE3_GLOBAL_RESET },
257 	{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
258 	  .reset_level = HNAE3_GLOBAL_RESET },
259 	{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
260 	  .reset_level = HNAE3_GLOBAL_RESET },
261 	{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
262 	  .reset_level = HNAE3_GLOBAL_RESET },
263 	{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
264 	  .reset_level = HNAE3_GLOBAL_RESET },
265 	{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
266 	  .reset_level = HNAE3_GLOBAL_RESET },
267 	{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
268 	  .reset_level = HNAE3_GLOBAL_RESET },
269 	{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
270 	  .reset_level = HNAE3_GLOBAL_RESET },
271 	{ /* sentinel */ }
272 };
273 
274 static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
275 	{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
276 	  .reset_level = HNAE3_GLOBAL_RESET },
277 	{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
278 	  .reset_level = HNAE3_GLOBAL_RESET },
279 	{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
280 	  .reset_level = HNAE3_GLOBAL_RESET },
281 	{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
282 	  .reset_level = HNAE3_GLOBAL_RESET },
283 	{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
284 	  .reset_level = HNAE3_GLOBAL_RESET },
285 	{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
286 	  .reset_level = HNAE3_GLOBAL_RESET },
287 	{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
288 	  .reset_level = HNAE3_GLOBAL_RESET },
289 	{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
290 	  .reset_level = HNAE3_GLOBAL_RESET },
291 	{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
292 	  .reset_level = HNAE3_GLOBAL_RESET },
293 	{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
294 	  .reset_level = HNAE3_GLOBAL_RESET },
295 	{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
296 	  .reset_level = HNAE3_GLOBAL_RESET },
297 	{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
298 	  .reset_level = HNAE3_GLOBAL_RESET },
299 	{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
300 	  .reset_level = HNAE3_GLOBAL_RESET },
301 	{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
302 	  .reset_level = HNAE3_GLOBAL_RESET },
303 	{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
304 	  .reset_level = HNAE3_GLOBAL_RESET },
305 	{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
306 	  .reset_level = HNAE3_GLOBAL_RESET },
307 	{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
308 	  .reset_level = HNAE3_GLOBAL_RESET },
309 	{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
310 	  .reset_level = HNAE3_GLOBAL_RESET },
311 	{ /* sentinel */ }
312 };
313 
314 static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
315 	{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
316 	  .reset_level = HNAE3_GLOBAL_RESET },
317 	{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
318 	  .reset_level = HNAE3_GLOBAL_RESET },
319 	{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
320 	  .reset_level = HNAE3_GLOBAL_RESET },
321 	{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
322 	  .reset_level = HNAE3_GLOBAL_RESET },
323 	{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
324 	  .reset_level = HNAE3_GLOBAL_RESET },
325 	{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
326 	  .reset_level = HNAE3_GLOBAL_RESET },
327 	{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
328 	  .reset_level = HNAE3_GLOBAL_RESET },
329 	{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
330 	  .reset_level = HNAE3_GLOBAL_RESET },
331 	{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
332 	  .reset_level = HNAE3_GLOBAL_RESET },
333 	{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
334 	  .reset_level = HNAE3_GLOBAL_RESET },
335 	{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
336 	  .reset_level = HNAE3_GLOBAL_RESET },
337 	{ /* sentinel */ }
338 };
339 
340 static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
341 	{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
342 	  .reset_level = HNAE3_NONE_RESET },
343 	{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
344 	  .reset_level = HNAE3_GLOBAL_RESET },
345 	{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
346 	  .reset_level = HNAE3_NONE_RESET },
347 	{ .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
348 	  .reset_level = HNAE3_GLOBAL_RESET },
349 	{ .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
350 	  .reset_level = HNAE3_NONE_RESET },
351 	{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
352 	  .reset_level = HNAE3_GLOBAL_RESET },
353 	{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
354 	  .reset_level = HNAE3_NONE_RESET },
355 	{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
356 	  .reset_level = HNAE3_GLOBAL_RESET },
357 	{ .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
358 	  .reset_level = HNAE3_GLOBAL_RESET },
359 	{ .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
360 	  .reset_level = HNAE3_GLOBAL_RESET },
361 	{ .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
362 	  .reset_level = HNAE3_GLOBAL_RESET },
363 	{ .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
364 	  .reset_level = HNAE3_GLOBAL_RESET },
365 	{ .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
366 	  .reset_level = HNAE3_GLOBAL_RESET },
367 	{ .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
368 	  .reset_level = HNAE3_GLOBAL_RESET },
369 	{ /* sentinel */ }
370 };
371 
372 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
373 	{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
374 	  .reset_level = HNAE3_GLOBAL_RESET },
375 	{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
376 	  .reset_level = HNAE3_GLOBAL_RESET },
377 	{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
378 	  .reset_level = HNAE3_GLOBAL_RESET },
379 	{ .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
380 	  .reset_level = HNAE3_GLOBAL_RESET },
381 	{ .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
382 	  .reset_level = HNAE3_GLOBAL_RESET },
383 	{ .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
384 	  .reset_level = HNAE3_GLOBAL_RESET },
385 	{ .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
386 	  .reset_level = HNAE3_GLOBAL_RESET },
387 	{ .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
388 	  .reset_level = HNAE3_GLOBAL_RESET },
389 	{ .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
390 	  .reset_level = HNAE3_GLOBAL_RESET },
391 	{ .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
392 	  .reset_level = HNAE3_GLOBAL_RESET },
393 	{ .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
394 	  .reset_level = HNAE3_GLOBAL_RESET },
395 	{ .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
396 	  .reset_level = HNAE3_GLOBAL_RESET },
397 	{ .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
398 	  .reset_level = HNAE3_GLOBAL_RESET },
399 	{ .int_msk = BIT(26), .msg = "rd_bus_err",
400 	  .reset_level = HNAE3_GLOBAL_RESET },
401 	{ .int_msk = BIT(27), .msg = "wr_bus_err",
402 	  .reset_level = HNAE3_GLOBAL_RESET },
403 	{ .int_msk = BIT(28), .msg = "reg_search_miss",
404 	  .reset_level = HNAE3_GLOBAL_RESET },
405 	{ .int_msk = BIT(29), .msg = "rx_q_search_miss",
406 	  .reset_level = HNAE3_NONE_RESET },
407 	{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
408 	  .reset_level = HNAE3_NONE_RESET },
409 	{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
410 	  .reset_level = HNAE3_GLOBAL_RESET },
411 	{ /* sentinel */ }
412 };
413 
414 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
415 	{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
416 	  .reset_level = HNAE3_CORE_RESET },
417 	{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
418 	  .reset_level = HNAE3_CORE_RESET },
419 	{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
420 	  .reset_level = HNAE3_CORE_RESET },
421 	{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
422 	  .reset_level = HNAE3_CORE_RESET },
423 	{ /* sentinel */ }
424 };
425 
426 static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
427 	{ .int_msk = BIT(0), .msg = "over_8bd_no_fe",
428 	  .reset_level = HNAE3_FUNC_RESET },
429 	{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
430 	  .reset_level = HNAE3_NONE_RESET },
431 	{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
432 	  .reset_level = HNAE3_NONE_RESET },
433 	{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
434 	  .reset_level = HNAE3_FUNC_RESET },
435 	{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
436 	  .reset_level = HNAE3_FUNC_RESET },
437 	{ .int_msk = BIT(5), .msg = "buf_wait_timeout",
438 	  .reset_level = HNAE3_NONE_RESET },
439 	{ /* sentinel */ }
440 };
441 
442 static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
443 	{ .int_msk = BIT(0), .msg = "buf_sum_err",
444 	  .reset_level = HNAE3_NONE_RESET },
445 	{ .int_msk = BIT(1), .msg = "ppp_mb_num_err",
446 	  .reset_level = HNAE3_NONE_RESET },
447 	{ .int_msk = BIT(2), .msg = "ppp_mbid_err",
448 	  .reset_level = HNAE3_GLOBAL_RESET },
449 	{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
450 	  .reset_level = HNAE3_GLOBAL_RESET },
451 	{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
452 	  .reset_level = HNAE3_GLOBAL_RESET },
453 	{ .int_msk = BIT(5), .msg = "cks_edit_position_err",
454 	  .reset_level = HNAE3_GLOBAL_RESET },
455 	{ .int_msk = BIT(6), .msg = "cks_edit_condition_err",
456 	  .reset_level = HNAE3_GLOBAL_RESET },
457 	{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
458 	  .reset_level = HNAE3_GLOBAL_RESET },
459 	{ .int_msk = BIT(8), .msg = "vlan_num_ot_err",
460 	  .reset_level = HNAE3_GLOBAL_RESET },
461 	{ .int_msk = BIT(9), .msg = "vlan_num_in_err",
462 	  .reset_level = HNAE3_GLOBAL_RESET },
463 	{ /* sentinel */ }
464 };
465 
466 #define HCLGE_SSU_MEM_ECC_ERR(x) \
467 	{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
468 	  .reset_level = HNAE3_GLOBAL_RESET }
469 
470 static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
471 	HCLGE_SSU_MEM_ECC_ERR(0),
472 	HCLGE_SSU_MEM_ECC_ERR(1),
473 	HCLGE_SSU_MEM_ECC_ERR(2),
474 	HCLGE_SSU_MEM_ECC_ERR(3),
475 	HCLGE_SSU_MEM_ECC_ERR(4),
476 	HCLGE_SSU_MEM_ECC_ERR(5),
477 	HCLGE_SSU_MEM_ECC_ERR(6),
478 	HCLGE_SSU_MEM_ECC_ERR(7),
479 	HCLGE_SSU_MEM_ECC_ERR(8),
480 	HCLGE_SSU_MEM_ECC_ERR(9),
481 	HCLGE_SSU_MEM_ECC_ERR(10),
482 	HCLGE_SSU_MEM_ECC_ERR(11),
483 	HCLGE_SSU_MEM_ECC_ERR(12),
484 	HCLGE_SSU_MEM_ECC_ERR(13),
485 	HCLGE_SSU_MEM_ECC_ERR(14),
486 	HCLGE_SSU_MEM_ECC_ERR(15),
487 	HCLGE_SSU_MEM_ECC_ERR(16),
488 	HCLGE_SSU_MEM_ECC_ERR(17),
489 	HCLGE_SSU_MEM_ECC_ERR(18),
490 	HCLGE_SSU_MEM_ECC_ERR(19),
491 	HCLGE_SSU_MEM_ECC_ERR(20),
492 	HCLGE_SSU_MEM_ECC_ERR(21),
493 	HCLGE_SSU_MEM_ECC_ERR(22),
494 	HCLGE_SSU_MEM_ECC_ERR(23),
495 	HCLGE_SSU_MEM_ECC_ERR(24),
496 	HCLGE_SSU_MEM_ECC_ERR(25),
497 	HCLGE_SSU_MEM_ECC_ERR(26),
498 	HCLGE_SSU_MEM_ECC_ERR(27),
499 	HCLGE_SSU_MEM_ECC_ERR(28),
500 	HCLGE_SSU_MEM_ECC_ERR(29),
501 	HCLGE_SSU_MEM_ECC_ERR(30),
502 	HCLGE_SSU_MEM_ECC_ERR(31),
503 	{ /* sentinel */ }
504 };
505 
506 static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
507 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
508 	  .reset_level = HNAE3_GLOBAL_RESET },
509 	{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
510 	  .reset_level = HNAE3_GLOBAL_RESET },
511 	{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
512 	  .reset_level = HNAE3_GLOBAL_RESET },
513 	{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
514 	  .reset_level = HNAE3_GLOBAL_RESET },
515 	{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
516 	  .reset_level = HNAE3_GLOBAL_RESET },
517 	{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
518 	  .reset_level = HNAE3_GLOBAL_RESET },
519 	{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
520 	  .reset_level = HNAE3_GLOBAL_RESET },
521 	{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
522 	  .reset_level = HNAE3_GLOBAL_RESET },
523 	{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
524 	  .reset_level = HNAE3_GLOBAL_RESET },
525 	{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
526 	  .reset_level = HNAE3_GLOBAL_RESET },
527 	{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
528 	  .reset_level = HNAE3_GLOBAL_RESET },
529 	{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
530 	  .reset_level = HNAE3_GLOBAL_RESET },
531 	{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
532 	  .reset_level = HNAE3_GLOBAL_RESET },
533 	{ /* sentinel */ }
534 };
535 
536 static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
537 	{ .int_msk = BIT(0), .msg = "ig_mac_inf_int",
538 	  .reset_level = HNAE3_GLOBAL_RESET },
539 	{ .int_msk = BIT(1), .msg = "ig_host_inf_int",
540 	  .reset_level = HNAE3_GLOBAL_RESET },
541 	{ .int_msk = BIT(2), .msg = "ig_roc_buf_int",
542 	  .reset_level = HNAE3_GLOBAL_RESET },
543 	{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
544 	  .reset_level = HNAE3_GLOBAL_RESET },
545 	{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
546 	  .reset_level = HNAE3_GLOBAL_RESET },
547 	{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
548 	  .reset_level = HNAE3_GLOBAL_RESET },
549 	{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
550 	  .reset_level = HNAE3_GLOBAL_RESET },
551 	{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
552 	  .reset_level = HNAE3_GLOBAL_RESET },
553 	{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
554 	  .reset_level = HNAE3_GLOBAL_RESET },
555 	{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
556 	  .reset_level = HNAE3_GLOBAL_RESET },
557 	{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
558 	  .reset_level = HNAE3_GLOBAL_RESET },
559 	{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
560 	  .reset_level = HNAE3_GLOBAL_RESET },
561 	{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
562 	  .reset_level = HNAE3_GLOBAL_RESET },
563 	{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
564 	  .reset_level = HNAE3_GLOBAL_RESET },
565 	{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
566 	  .reset_level = HNAE3_GLOBAL_RESET },
567 	{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
568 	  .reset_level = HNAE3_GLOBAL_RESET },
569 	{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
570 	  .reset_level = HNAE3_GLOBAL_RESET },
571 	{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
572 	  .reset_level = HNAE3_GLOBAL_RESET },
573 	{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
574 	  .reset_level = HNAE3_GLOBAL_RESET },
575 	{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
576 	  .reset_level = HNAE3_GLOBAL_RESET },
577 	{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
578 	  .reset_level = HNAE3_GLOBAL_RESET },
579 	{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
580 	  .reset_level = HNAE3_GLOBAL_RESET },
581 	{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
582 	  .reset_level = HNAE3_GLOBAL_RESET },
583 	{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
584 	  .reset_level = HNAE3_GLOBAL_RESET },
585 	{ /* sentinel */ }
586 };
587 
588 static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
589 	{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
590 	  .reset_level = HNAE3_GLOBAL_RESET },
591 	{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
592 	  .reset_level = HNAE3_GLOBAL_RESET },
593 	{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
594 	  .reset_level = HNAE3_GLOBAL_RESET },
595 	{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
596 	  .reset_level = HNAE3_GLOBAL_RESET },
597 	{ /* sentinel */ }
598 };
599 
600 static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
601 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
602 	  .reset_level = HNAE3_GLOBAL_RESET },
603 	{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
604 	  .reset_level = HNAE3_NONE_RESET },
605 	{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
606 	  .reset_level = HNAE3_GLOBAL_RESET },
607 	{ /* sentinel */ }
608 };
609 
610 static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
611 	{ .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
612 	{ .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
613 	{ .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
614 	{ .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
615 	{ .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
616 	{ .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
617 	{ .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
618 	{ .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
619 	{ .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
620 	{ .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
621 	{ .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
622 	{ .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
623 	{ .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
624 	{ .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
625 	{ .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
626 	{ .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
627 	{ .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
628 	{ .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
629 	{ .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
630 	{ .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
631 	{ /* sentinel */ }
632 };
633 
634 static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
635 					     const struct hclge_hw_error *err,
636 					     u32 err_sts)
637 {
638 	enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
639 	bool need_reset = false;
640 
641 	while (err->msg) {
642 		if (err->int_msk & err_sts) {
643 			dev_warn(dev, "%s %s found [error status=0x%x]\n",
644 				 reg, err->msg, err_sts);
645 			if (err->reset_level != HNAE3_NONE_RESET &&
646 			    err->reset_level >= reset_level) {
647 				reset_level = err->reset_level;
648 				need_reset = true;
649 			}
650 		}
651 		err++;
652 	}
653 	if (need_reset)
654 		return reset_level;
655 	else
656 		return HNAE3_NONE_RESET;
657 }
658 
659 /* hclge_cmd_query_error: read the error information
660  * @hdev: pointer to struct hclge_dev
661  * @desc: descriptor for describing the command
662  * @cmd:  command opcode
663  * @flag: flag for extended command structure
664  * @w_num: offset for setting the read interrupt type.
665  * @int_type: select which type of the interrupt for which the error
666  * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
667  *
668  * This function query the error info from hw register/s using command
669  */
670 static int hclge_cmd_query_error(struct hclge_dev *hdev,
671 				 struct hclge_desc *desc, u32 cmd,
672 				 u16 flag, u8 w_num,
673 				 enum hclge_err_int_type int_type)
674 {
675 	struct device *dev = &hdev->pdev->dev;
676 	int num = 1;
677 	int ret;
678 
679 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
680 	if (flag) {
681 		desc[0].flag |= cpu_to_le16(flag);
682 		hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
683 		num = 2;
684 	}
685 	if (w_num)
686 		desc[0].data[w_num] = cpu_to_le32(int_type);
687 
688 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
689 	if (ret)
690 		dev_err(dev, "query error cmd failed (%d)\n", ret);
691 
692 	return ret;
693 }
694 
695 static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
696 {
697 	struct hclge_desc desc;
698 
699 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
700 	desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
701 
702 	return hclge_cmd_send(&hdev->hw, &desc, 1);
703 }
704 
705 static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
706 {
707 	struct device *dev = &hdev->pdev->dev;
708 	struct hclge_desc desc[2];
709 	int ret;
710 
711 	/* configure common error interrupts */
712 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
713 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
714 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
715 
716 	if (en) {
717 		desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
718 		desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
719 					HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
720 		desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
721 		desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
722 					      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
723 		desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
724 	}
725 
726 	desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
727 	desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
728 				HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
729 	desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
730 	desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
731 				      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
732 	desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
733 
734 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
735 	if (ret)
736 		dev_err(dev,
737 			"fail(%d) to configure common err interrupts\n", ret);
738 
739 	return ret;
740 }
741 
742 static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
743 {
744 	struct device *dev = &hdev->pdev->dev;
745 	struct hclge_desc desc;
746 	int ret;
747 
748 	if (hdev->pdev->revision < 0x21)
749 		return 0;
750 
751 	/* configure NCSI error interrupts */
752 	hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
753 	if (en)
754 		desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
755 
756 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
757 	if (ret)
758 		dev_err(dev,
759 			"fail(%d) to configure  NCSI error interrupts\n", ret);
760 
761 	return ret;
762 }
763 
764 static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
765 {
766 	struct device *dev = &hdev->pdev->dev;
767 	struct hclge_desc desc;
768 	int ret;
769 
770 	/* configure IGU,EGU error interrupts */
771 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
772 	if (en)
773 		desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
774 
775 	desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
776 
777 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
778 	if (ret) {
779 		dev_err(dev,
780 			"fail(%d) to configure IGU common interrupts\n", ret);
781 		return ret;
782 	}
783 
784 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
785 	if (en)
786 		desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
787 
788 	desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
789 
790 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
791 	if (ret) {
792 		dev_err(dev,
793 			"fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
794 		return ret;
795 	}
796 
797 	ret = hclge_config_ncsi_hw_err_int(hdev, en);
798 
799 	return ret;
800 }
801 
802 static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
803 					    bool en)
804 {
805 	struct device *dev = &hdev->pdev->dev;
806 	struct hclge_desc desc[2];
807 	int ret;
808 
809 	/* configure PPP error interrupts */
810 	hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
811 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
812 	hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
813 
814 	if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
815 		if (en) {
816 			desc[0].data[0] =
817 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
818 			desc[0].data[1] =
819 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
820 			desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
821 		}
822 
823 		desc[1].data[0] =
824 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
825 		desc[1].data[1] =
826 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
827 		if (hdev->pdev->revision >= 0x21)
828 			desc[1].data[2] =
829 				cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
830 	} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
831 		if (en) {
832 			desc[0].data[0] =
833 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
834 			desc[0].data[1] =
835 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
836 		}
837 
838 		desc[1].data[0] =
839 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
840 		desc[1].data[1] =
841 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
842 	}
843 
844 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
845 	if (ret)
846 		dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
847 
848 	return ret;
849 }
850 
851 static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
852 {
853 	int ret;
854 
855 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
856 					       en);
857 	if (ret)
858 		return ret;
859 
860 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
861 					       en);
862 
863 	return ret;
864 }
865 
866 static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
867 {
868 	struct device *dev = &hdev->pdev->dev;
869 	struct hclge_desc desc;
870 	int ret;
871 
872 	/* configure TM SCH hw errors */
873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
874 	if (en)
875 		desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
876 
877 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
878 	if (ret) {
879 		dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
880 		return ret;
881 	}
882 
883 	/* configure TM QCN hw errors */
884 	ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
885 				    0, 0, 0);
886 	if (ret) {
887 		dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
888 		return ret;
889 	}
890 
891 	hclge_cmd_reuse_desc(&desc, false);
892 	if (en)
893 		desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
894 
895 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
896 	if (ret)
897 		dev_err(dev,
898 			"fail(%d) to configure TM QCN mem errors\n", ret);
899 
900 	return ret;
901 }
902 
903 static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
904 {
905 	struct device *dev = &hdev->pdev->dev;
906 	struct hclge_desc desc;
907 	int ret;
908 
909 	/* configure MAC common error interrupts */
910 	hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
911 	if (en)
912 		desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
913 
914 	desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
915 
916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
917 	if (ret)
918 		dev_err(dev,
919 			"fail(%d) to configure MAC COMMON error intr\n", ret);
920 
921 	return ret;
922 }
923 
924 int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
925 {
926 	struct hclge_desc desc;
927 
928 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
929 	if (en)
930 		desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
931 	else
932 		desc.data[0] = 0;
933 
934 	desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
935 
936 	return hclge_cmd_send(&hdev->hw, &desc, 1);
937 }
938 
939 static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
940 					     bool en)
941 {
942 	struct device *dev = &hdev->pdev->dev;
943 	struct hclge_desc desc[2];
944 	int num = 1;
945 	int ret;
946 
947 	/* configure PPU error interrupts */
948 	if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
949 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
950 		desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
951 		hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
952 		if (en) {
953 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
954 			desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
955 			desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
956 			desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
957 		}
958 
959 		desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
960 		desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
961 		desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
962 		desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
963 		num = 2;
964 	} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
965 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
966 		if (en)
967 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
968 
969 		desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
970 	} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
971 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
972 		if (en)
973 			desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
974 
975 		desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
976 	} else {
977 		dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
978 		return -EINVAL;
979 	}
980 
981 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
982 
983 	return ret;
984 }
985 
986 static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
987 {
988 	struct device *dev = &hdev->pdev->dev;
989 	int ret;
990 
991 	ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
992 						en);
993 	if (ret) {
994 		dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
995 			ret);
996 		return ret;
997 	}
998 
999 	ret = hclge_config_ppu_error_interrupts(hdev,
1000 						HCLGE_PPU_MPF_OTHER_INT_CMD,
1001 						en);
1002 	if (ret) {
1003 		dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
1004 		return ret;
1005 	}
1006 
1007 	ret = hclge_config_ppu_error_interrupts(hdev,
1008 						HCLGE_PPU_PF_OTHER_INT_CMD, en);
1009 	if (ret)
1010 		dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
1011 			ret);
1012 	return ret;
1013 }
1014 
1015 static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
1016 {
1017 	struct device *dev = &hdev->pdev->dev;
1018 	struct hclge_desc desc[2];
1019 	int ret;
1020 
1021 	/* configure SSU ecc error interrupts */
1022 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
1023 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1024 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
1025 	if (en) {
1026 		desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
1027 		desc[0].data[1] =
1028 			cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
1029 		desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
1030 	}
1031 
1032 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
1033 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
1034 	desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
1035 
1036 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
1037 	if (ret) {
1038 		dev_err(dev,
1039 			"fail(%d) to configure SSU ECC error interrupt\n", ret);
1040 		return ret;
1041 	}
1042 
1043 	/* configure SSU common error interrupts */
1044 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
1045 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1046 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
1047 
1048 	if (en) {
1049 		if (hdev->pdev->revision >= 0x21)
1050 			desc[0].data[0] =
1051 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
1052 		else
1053 			desc[0].data[0] =
1054 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
1055 		desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
1056 		desc[0].data[2] =
1057 			cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
1058 	}
1059 
1060 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
1061 				HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
1062 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
1063 
1064 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
1065 	if (ret)
1066 		dev_err(dev,
1067 			"fail(%d) to configure SSU COMMON error intr\n", ret);
1068 
1069 	return ret;
1070 }
1071 
1072 #define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
1073 	do { \
1074 		if (ae_dev->ops->set_default_reset_request) \
1075 			ae_dev->ops->set_default_reset_request(ae_dev, \
1076 							       reset_type); \
1077 	} while (0)
1078 
1079 /* hclge_handle_mpf_ras_error: handle all main PF RAS errors
1080  * @hdev: pointer to struct hclge_dev
1081  * @desc: descriptor for describing the command
1082  * @num:  number of extended command structures
1083  *
1084  * This function handles all the main PF RAS errors in the
1085  * hw register/s using command.
1086  */
1087 static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
1088 				      struct hclge_desc *desc,
1089 				      int num)
1090 {
1091 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1092 	enum hnae3_reset_type reset_level;
1093 	struct device *dev = &hdev->pdev->dev;
1094 	__le32 *desc_data;
1095 	u32 status;
1096 	int ret;
1097 
1098 	/* query all main PF RAS errors */
1099 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
1100 				   true);
1101 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1102 
1103 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1104 	if (ret) {
1105 		dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
1106 		return ret;
1107 	}
1108 
1109 	/* log HNS common errors */
1110 	status = le32_to_cpu(desc[0].data[0]);
1111 	if (status) {
1112 		reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
1113 					      &hclge_imp_tcm_ecc_int[0],
1114 					      status);
1115 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1116 	}
1117 
1118 	status = le32_to_cpu(desc[0].data[1]);
1119 	if (status) {
1120 		reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
1121 					      &hclge_cmdq_nic_mem_ecc_int[0],
1122 					      status);
1123 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1124 	}
1125 
1126 	if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
1127 		dev_warn(dev, "imp_rd_data_poison_err found\n");
1128 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
1129 	}
1130 
1131 	status = le32_to_cpu(desc[0].data[3]);
1132 	if (status) {
1133 		reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
1134 					      &hclge_tqp_int_ecc_int[0],
1135 					      status);
1136 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1137 	}
1138 
1139 	status = le32_to_cpu(desc[0].data[4]);
1140 	if (status) {
1141 		reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
1142 					      &hclge_msix_sram_ecc_int[0],
1143 					      status);
1144 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1145 	}
1146 
1147 	/* log SSU(Storage Switch Unit) errors */
1148 	desc_data = (__le32 *)&desc[2];
1149 	status = le32_to_cpu(*(desc_data + 2));
1150 	if (status) {
1151 		reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
1152 					      &hclge_ssu_mem_ecc_err_int[0],
1153 					      status);
1154 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1155 	}
1156 
1157 	status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
1158 	if (status) {
1159 		dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
1160 			 status);
1161 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1162 	}
1163 
1164 	status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
1165 	if (status) {
1166 		reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
1167 					      &hclge_ssu_com_err_int[0],
1168 					      status);
1169 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1170 	}
1171 
1172 	/* log IGU(Ingress Unit) errors */
1173 	desc_data = (__le32 *)&desc[3];
1174 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
1175 	if (status) {
1176 		reset_level = hclge_log_error(dev, "IGU_INT_STS",
1177 					      &hclge_igu_int[0], status);
1178 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1179 	}
1180 
1181 	/* log PPP(Programmable Packet Process) errors */
1182 	desc_data = (__le32 *)&desc[4];
1183 	status = le32_to_cpu(*(desc_data + 1));
1184 	if (status) {
1185 		reset_level =
1186 			hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
1187 					&hclge_ppp_mpf_abnormal_int_st1[0],
1188 					status);
1189 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1190 	}
1191 
1192 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
1193 	if (status) {
1194 		reset_level =
1195 			hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
1196 					&hclge_ppp_mpf_abnormal_int_st3[0],
1197 					status);
1198 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1199 	}
1200 
1201 	/* log PPU(RCB) errors */
1202 	desc_data = (__le32 *)&desc[5];
1203 	status = le32_to_cpu(*(desc_data + 1));
1204 	if (status) {
1205 		dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
1206 			 "rpu_rx_pkt_ecc_mbit_err");
1207 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1208 	}
1209 
1210 	status = le32_to_cpu(*(desc_data + 2));
1211 	if (status) {
1212 		reset_level =
1213 			hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
1214 					&hclge_ppu_mpf_abnormal_int_st2[0],
1215 					status);
1216 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1217 	}
1218 
1219 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
1220 	if (status) {
1221 		reset_level =
1222 			hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
1223 					&hclge_ppu_mpf_abnormal_int_st3[0],
1224 					status);
1225 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1226 	}
1227 
1228 	/* log TM(Traffic Manager) errors */
1229 	desc_data = (__le32 *)&desc[6];
1230 	status = le32_to_cpu(*desc_data);
1231 	if (status) {
1232 		reset_level = hclge_log_error(dev, "TM_SCH_RINT",
1233 					      &hclge_tm_sch_rint[0], status);
1234 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1235 	}
1236 
1237 	/* log QCN(Quantized Congestion Control) errors */
1238 	desc_data = (__le32 *)&desc[7];
1239 	status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
1240 	if (status) {
1241 		reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
1242 					      &hclge_qcn_fifo_rint[0], status);
1243 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1244 	}
1245 
1246 	status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
1247 	if (status) {
1248 		reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
1249 					      &hclge_qcn_ecc_rint[0],
1250 					      status);
1251 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1252 	}
1253 
1254 	/* log NCSI errors */
1255 	desc_data = (__le32 *)&desc[9];
1256 	status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
1257 	if (status) {
1258 		reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
1259 					      &hclge_ncsi_err_int[0], status);
1260 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1261 	}
1262 
1263 	/* clear all main PF RAS errors */
1264 	hclge_cmd_reuse_desc(&desc[0], false);
1265 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1266 
1267 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1268 	if (ret)
1269 		dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
1270 
1271 	return ret;
1272 }
1273 
1274 /* hclge_handle_pf_ras_error: handle all PF RAS errors
1275  * @hdev: pointer to struct hclge_dev
1276  * @desc: descriptor for describing the command
1277  * @num:  number of extended command structures
1278  *
1279  * This function handles all the PF RAS errors in the
1280  * hw register/s using command.
1281  */
1282 static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
1283 				     struct hclge_desc *desc,
1284 				     int num)
1285 {
1286 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1287 	struct device *dev = &hdev->pdev->dev;
1288 	enum hnae3_reset_type reset_level;
1289 	__le32 *desc_data;
1290 	u32 status;
1291 	int ret;
1292 
1293 	/* query all PF RAS errors */
1294 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
1295 				   true);
1296 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1297 
1298 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1299 	if (ret) {
1300 		dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
1301 		return ret;
1302 	}
1303 
1304 	/* log SSU(Storage Switch Unit) errors */
1305 	status = le32_to_cpu(desc[0].data[0]);
1306 	if (status) {
1307 		reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1308 					      &hclge_ssu_port_based_err_int[0],
1309 					      status);
1310 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1311 	}
1312 
1313 	status = le32_to_cpu(desc[0].data[1]);
1314 	if (status) {
1315 		reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
1316 					      &hclge_ssu_fifo_overflow_int[0],
1317 					      status);
1318 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1319 	}
1320 
1321 	status = le32_to_cpu(desc[0].data[2]);
1322 	if (status) {
1323 		reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
1324 					      &hclge_ssu_ets_tcg_int[0],
1325 					      status);
1326 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1327 	}
1328 
1329 	/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
1330 	desc_data = (__le32 *)&desc[1];
1331 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
1332 	if (status) {
1333 		reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
1334 					      &hclge_igu_egu_tnl_int[0],
1335 					      status);
1336 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1337 	}
1338 
1339 	/* log PPU(RCB) errors */
1340 	desc_data = (__le32 *)&desc[3];
1341 	status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
1342 	if (status) {
1343 		reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
1344 					      &hclge_ppu_pf_abnormal_int[0],
1345 					      status);
1346 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1347 	}
1348 
1349 	/* clear all PF RAS errors */
1350 	hclge_cmd_reuse_desc(&desc[0], false);
1351 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1352 
1353 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1354 	if (ret)
1355 		dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
1356 
1357 	return ret;
1358 }
1359 
1360 static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
1361 {
1362 	struct device *dev = &hdev->pdev->dev;
1363 	u32 mpf_bd_num, pf_bd_num, bd_num;
1364 	struct hclge_desc desc_bd;
1365 	struct hclge_desc *desc;
1366 	int ret;
1367 
1368 	/* query the number of registers in the RAS int status */
1369 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
1370 				   true);
1371 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1372 	if (ret) {
1373 		dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
1374 		return ret;
1375 	}
1376 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1377 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1378 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1379 
1380 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1381 	if (!desc)
1382 		return -ENOMEM;
1383 
1384 	/* handle all main PF RAS errors */
1385 	ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
1386 	if (ret) {
1387 		kfree(desc);
1388 		return ret;
1389 	}
1390 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1391 
1392 	/* handle all PF RAS errors */
1393 	ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
1394 	kfree(desc);
1395 
1396 	return ret;
1397 }
1398 
1399 static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
1400 {
1401 	struct device *dev = &hdev->pdev->dev;
1402 	struct hclge_desc desc[2];
1403 	int ret;
1404 
1405 	/* read overflow error status */
1406 	ret = hclge_cmd_query_error(hdev, &desc[0],
1407 				    HCLGE_ROCEE_PF_RAS_INT_CMD,
1408 				    0, 0, 0);
1409 	if (ret) {
1410 		dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
1411 		return ret;
1412 	}
1413 
1414 	/* log overflow error */
1415 	if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1416 		const struct hclge_hw_error *err;
1417 		u32 err_sts;
1418 
1419 		err = &hclge_rocee_qmm_ovf_err_int[0];
1420 		err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
1421 			  le32_to_cpu(desc[0].data[0]);
1422 		while (err->msg) {
1423 			if (err->int_msk == err_sts) {
1424 				dev_warn(dev, "%s [error status=0x%x] found\n",
1425 					 err->msg,
1426 					 le32_to_cpu(desc[0].data[0]));
1427 				break;
1428 			}
1429 			err++;
1430 		}
1431 	}
1432 
1433 	if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1434 		dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
1435 			 le32_to_cpu(desc[0].data[1]));
1436 	}
1437 
1438 	if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1439 		dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
1440 			 le32_to_cpu(desc[0].data[2]));
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 static enum hnae3_reset_type
1447 hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
1448 {
1449 	enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1450 	struct device *dev = &hdev->pdev->dev;
1451 	struct hclge_desc desc[2];
1452 	unsigned int status;
1453 	int ret;
1454 
1455 	/* read RAS error interrupt status */
1456 	ret = hclge_cmd_query_error(hdev, &desc[0],
1457 				    HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
1458 				    0, 0, 0);
1459 	if (ret) {
1460 		dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
1461 		/* reset everything for now */
1462 		return HNAE3_GLOBAL_RESET;
1463 	}
1464 
1465 	status = le32_to_cpu(desc[0].data[0]);
1466 
1467 	if (status & HCLGE_ROCEE_RERR_INT_MASK) {
1468 		dev_warn(dev, "ROCEE RAS AXI rresp error\n");
1469 		reset_type = HNAE3_FUNC_RESET;
1470 	}
1471 
1472 	if (status & HCLGE_ROCEE_BERR_INT_MASK) {
1473 		dev_warn(dev, "ROCEE RAS AXI bresp error\n");
1474 		reset_type = HNAE3_FUNC_RESET;
1475 	}
1476 
1477 	if (status & HCLGE_ROCEE_ECC_INT_MASK) {
1478 		dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
1479 		reset_type = HNAE3_GLOBAL_RESET;
1480 	}
1481 
1482 	if (status & HCLGE_ROCEE_OVF_INT_MASK) {
1483 		ret = hclge_log_rocee_ovf_error(hdev);
1484 		if (ret) {
1485 			dev_err(dev, "failed(%d) to process ovf error\n", ret);
1486 			/* reset everything for now */
1487 			return HNAE3_GLOBAL_RESET;
1488 		}
1489 		reset_type = HNAE3_FUNC_RESET;
1490 	}
1491 
1492 	/* clear error status */
1493 	hclge_cmd_reuse_desc(&desc[0], false);
1494 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
1495 	if (ret) {
1496 		dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
1497 		/* reset everything for now */
1498 		return HNAE3_GLOBAL_RESET;
1499 	}
1500 
1501 	return reset_type;
1502 }
1503 
1504 static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
1505 {
1506 	struct device *dev = &hdev->pdev->dev;
1507 	struct hclge_desc desc;
1508 	int ret;
1509 
1510 	if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
1511 		return 0;
1512 
1513 	hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
1514 	if (en) {
1515 		/* enable ROCEE hw error interrupts */
1516 		desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
1517 		desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
1518 
1519 		hclge_log_and_clear_rocee_ras_error(hdev);
1520 	}
1521 	desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
1522 	desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
1523 
1524 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1525 	if (ret)
1526 		dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
1527 
1528 	return ret;
1529 }
1530 
1531 static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
1532 {
1533 	enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1534 	struct hclge_dev *hdev = ae_dev->priv;
1535 
1536 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1537 	    hdev->pdev->revision < 0x21)
1538 		return;
1539 
1540 	reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
1541 	if (reset_type != HNAE3_NONE_RESET)
1542 		HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
1543 }
1544 
1545 static const struct hclge_hw_blk hw_blk[] = {
1546 	{
1547 	  .msk = BIT(0), .name = "IGU_EGU",
1548 	  .config_err_int = hclge_config_igu_egu_hw_err_int,
1549 	},
1550 	{
1551 	  .msk = BIT(1), .name = "PPP",
1552 	  .config_err_int = hclge_config_ppp_hw_err_int,
1553 	},
1554 	{
1555 	  .msk = BIT(2), .name = "SSU",
1556 	  .config_err_int = hclge_config_ssu_hw_err_int,
1557 	},
1558 	{
1559 	  .msk = BIT(3), .name = "PPU",
1560 	  .config_err_int = hclge_config_ppu_hw_err_int,
1561 	},
1562 	{
1563 	  .msk = BIT(4), .name = "TM",
1564 	  .config_err_int = hclge_config_tm_hw_err_int,
1565 	},
1566 	{
1567 	  .msk = BIT(5), .name = "COMMON",
1568 	  .config_err_int = hclge_config_common_hw_err_int,
1569 	},
1570 	{
1571 	  .msk = BIT(8), .name = "MAC",
1572 	  .config_err_int = hclge_config_mac_err_int,
1573 	},
1574 	{ /* sentinel */ }
1575 };
1576 
1577 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
1578 {
1579 	const struct hclge_hw_blk *module = hw_blk;
1580 	struct device *dev = &hdev->pdev->dev;
1581 	int ret = 0;
1582 
1583 	while (module->name) {
1584 		if (module->config_err_int) {
1585 			ret = module->config_err_int(hdev, state);
1586 			if (ret)
1587 				return ret;
1588 		}
1589 		module++;
1590 	}
1591 
1592 	ret = hclge_config_rocee_ras_interrupt(hdev, state);
1593 	if (ret)
1594 		dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
1595 
1596 	return ret;
1597 }
1598 
1599 pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
1600 {
1601 	struct hclge_dev *hdev = ae_dev->priv;
1602 	struct device *dev = &hdev->pdev->dev;
1603 	u32 status;
1604 
1605 	status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
1606 
1607 	/* Handling Non-fatal HNS RAS errors */
1608 	if (status & HCLGE_RAS_REG_NFE_MASK) {
1609 		dev_warn(dev,
1610 			 "HNS Non-Fatal RAS error(status=0x%x) identified\n",
1611 			 status);
1612 		hclge_handle_all_ras_errors(hdev);
1613 	} else {
1614 		if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1615 		    hdev->pdev->revision < 0x21) {
1616 			ae_dev->override_pci_need_reset = 1;
1617 			return PCI_ERS_RESULT_RECOVERED;
1618 		}
1619 	}
1620 
1621 	if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1622 		dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
1623 		hclge_handle_rocee_ras_error(ae_dev);
1624 	}
1625 
1626 	if (status & HCLGE_RAS_REG_NFE_MASK ||
1627 	    status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1628 		ae_dev->override_pci_need_reset = 0;
1629 		return PCI_ERS_RESULT_NEED_RESET;
1630 	}
1631 	ae_dev->override_pci_need_reset = 1;
1632 
1633 	return PCI_ERS_RESULT_RECOVERED;
1634 }
1635 
1636 int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
1637 			       unsigned long *reset_requests)
1638 {
1639 	struct hclge_mac_tnl_stats mac_tnl_stats;
1640 	struct device *dev = &hdev->pdev->dev;
1641 	u32 mpf_bd_num, pf_bd_num, bd_num;
1642 	enum hnae3_reset_type reset_level;
1643 	struct hclge_desc desc_bd;
1644 	struct hclge_desc *desc;
1645 	__le32 *desc_data;
1646 	u32 status;
1647 	int ret;
1648 
1649 	/* query the number of bds for the MSIx int status */
1650 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
1651 				   true);
1652 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1653 	if (ret) {
1654 		dev_err(dev, "fail(%d) to query msix int status bd num\n",
1655 			ret);
1656 		return ret;
1657 	}
1658 
1659 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1660 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1661 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1662 
1663 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1664 	if (!desc)
1665 		goto out;
1666 
1667 	/* query all main PF MSIx errors */
1668 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
1669 				   true);
1670 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1671 
1672 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1673 	if (ret) {
1674 		dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
1675 			ret);
1676 		goto msi_error;
1677 	}
1678 
1679 	/* log MAC errors */
1680 	desc_data = (__le32 *)&desc[1];
1681 	status = le32_to_cpu(*desc_data);
1682 	if (status) {
1683 		reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
1684 					      &hclge_mac_afifo_tnl_int[0],
1685 					      status);
1686 		set_bit(reset_level, reset_requests);
1687 	}
1688 
1689 	/* log PPU(RCB) MPF errors */
1690 	desc_data = (__le32 *)&desc[5];
1691 	status = le32_to_cpu(*(desc_data + 2)) &
1692 			HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
1693 	if (status) {
1694 		reset_level =
1695 			hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
1696 					&hclge_ppu_mpf_abnormal_int_st2[0],
1697 					status);
1698 		set_bit(reset_level, reset_requests);
1699 	}
1700 
1701 	/* clear all main PF MSIx errors */
1702 	hclge_cmd_reuse_desc(&desc[0], false);
1703 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1704 
1705 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1706 	if (ret) {
1707 		dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
1708 			ret);
1709 		goto msi_error;
1710 	}
1711 
1712 	/* query all PF MSIx errors */
1713 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1714 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
1715 				   true);
1716 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1717 
1718 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1719 	if (ret) {
1720 		dev_err(dev, "query all pf msix int cmd failed (%d)\n",
1721 			ret);
1722 		goto msi_error;
1723 	}
1724 
1725 	/* log SSU PF errors */
1726 	status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
1727 	if (status) {
1728 		reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1729 					      &hclge_ssu_port_based_pf_int[0],
1730 					      status);
1731 		set_bit(reset_level, reset_requests);
1732 	}
1733 
1734 	/* read and log PPP PF errors */
1735 	desc_data = (__le32 *)&desc[2];
1736 	status = le32_to_cpu(*desc_data);
1737 	if (status) {
1738 		reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
1739 					      &hclge_ppp_pf_abnormal_int[0],
1740 					      status);
1741 		set_bit(reset_level, reset_requests);
1742 	}
1743 
1744 	/* log PPU(RCB) PF errors */
1745 	desc_data = (__le32 *)&desc[3];
1746 	status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
1747 	if (status) {
1748 		reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
1749 					      &hclge_ppu_pf_abnormal_int[0],
1750 					      status);
1751 		set_bit(reset_level, reset_requests);
1752 	}
1753 
1754 	/* clear all PF MSIx errors */
1755 	hclge_cmd_reuse_desc(&desc[0], false);
1756 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1757 
1758 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1759 	if (ret) {
1760 		dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
1761 			ret);
1762 	}
1763 
1764 	/* query and clear mac tnl interruptions */
1765 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
1766 				   true);
1767 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
1768 	if (ret) {
1769 		dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
1770 		goto msi_error;
1771 	}
1772 
1773 	status = le32_to_cpu(desc->data[0]);
1774 	if (status) {
1775 		/* When mac tnl interrupt occurs, we record current time and
1776 		 * register status here in a fifo, then clear the status. So
1777 		 * that if link status changes suddenly at some time, we can
1778 		 * query them by debugfs.
1779 		 */
1780 		mac_tnl_stats.time = local_clock();
1781 		mac_tnl_stats.status = status;
1782 		kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
1783 		ret = hclge_clear_mac_tnl_int(hdev);
1784 		if (ret)
1785 			dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
1786 		set_bit(HNAE3_NONE_RESET, reset_requests);
1787 	}
1788 
1789 msi_error:
1790 	kfree(desc);
1791 out:
1792 	return ret;
1793 }
1794