1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 
4 #include "hclge_err.h"
5 
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7 	{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
8 	{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
9 	{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
10 	{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
11 	{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
12 	{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
13 	{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
14 	{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
15 	{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
16 	{ /* sentinel */ }
17 };
18 
19 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
20 	{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
21 	{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
22 	{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
23 	{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
24 	{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
25 	{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
26 	{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
27 	{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
28 	{ .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
29 	{ .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
30 	{ .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
31 	{ .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
32 	{ .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
33 	{ .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
34 	{ .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
35 	{ .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
36 	{ /* sentinel */ }
37 };
38 
39 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
40 	{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
41 	{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
42 	{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
43 	{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
44 	{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
45 	{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
46 	{ /* sentinel */ }
47 };
48 
49 static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
50 	{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
51 	{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
52 	{ /* sentinel */ }
53 };
54 
55 static const struct hclge_hw_error hclge_igu_int[] = {
56 	{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
57 	{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
58 	{ /* sentinel */ }
59 };
60 
61 static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
62 	{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
63 	{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
64 	{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
65 	{ .int_msk = BIT(3), .msg = "tx_buf_overflow" },
66 	{ .int_msk = BIT(4), .msg = "tx_buf_underrun" },
67 	{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
68 	{ /* sentinel */ }
69 };
70 
71 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
72 	{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
73 	{ /* sentinel */ }
74 };
75 
76 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
77 	{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
78 	{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
79 	{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
80 	{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
81 	{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
82 	{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
83 	{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
84 	{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
85 	{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
86 	{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
87 	{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
88 	{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
89 	{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
90 	{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
91 	{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
92 	{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
93 	{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
94 	{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
95 	{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
96 	{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
97 	{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
98 	{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
99 	{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
100 	{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
101 	{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
102 	{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
103 	{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
104 	{ .int_msk = BIT(27),
105 		.msg = "flow_director_ad_mem0_ecc_mbit_err" },
106 	{ .int_msk = BIT(28),
107 		.msg = "flow_director_ad_mem1_ecc_mbit_err" },
108 	{ .int_msk = BIT(29),
109 		.msg = "rx_vlan_tag_memory_ecc_mbit_err" },
110 	{ .int_msk = BIT(30),
111 		.msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
112 	{ /* sentinel */ }
113 };
114 
115 static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
116 	{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
117 	{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
118 	{ /* sentinel */ }
119 };
120 
121 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
122 	{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
123 	{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
124 	{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
125 	{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
126 	{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
127 	{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
128 	{ /* sentinel */ }
129 };
130 
131 static const struct hclge_hw_error hclge_tm_sch_rint[] = {
132 	{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
133 	{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
134 	{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
135 	{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
136 	{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
137 	{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
138 	{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
139 	{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
140 	{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
141 	{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
142 	{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
143 	{ .int_msk = BIT(12),
144 	  .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
145 	{ .int_msk = BIT(13),
146 	  .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
147 	{ .int_msk = BIT(14),
148 	  .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
149 	{ .int_msk = BIT(15),
150 	  .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
151 	{ .int_msk = BIT(16),
152 	  .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
153 	{ .int_msk = BIT(17),
154 	  .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
155 	{ .int_msk = BIT(18),
156 	  .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
157 	{ .int_msk = BIT(19),
158 	  .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
159 	{ .int_msk = BIT(20),
160 	  .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
161 	{ .int_msk = BIT(21),
162 	  .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
163 	{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
164 	{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
165 	{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
166 	{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
167 	{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
168 	{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
169 	{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
170 	{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
171 	{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
172 	{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
173 	{ /* sentinel */ }
174 };
175 
176 static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
177 	{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
178 	{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
179 	{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
180 	{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
181 	{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
182 	{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
183 	{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
184 	{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
185 	{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
186 	{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
187 	{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
188 	{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
189 	{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
190 	{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
191 	{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
192 	{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
193 	{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
194 	{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
195 	{ /* sentinel */ }
196 };
197 
198 static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
199 	{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
200 	{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
201 	{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
202 	{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
203 	{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
204 	{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
205 	{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
206 	{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
207 	{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
208 	{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
209 	{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
210 	{ /* sentinel */ }
211 };
212 
213 static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
214 	{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
215 	{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
216 	{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
217 	{ .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
218 	{ .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
219 	{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
220 	{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
221 	{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
222 	{ /* sentinel */ }
223 };
224 
225 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
226 	{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
227 	{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
228 	{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
229 	{ .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
230 	{ .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
231 	{ .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
232 	{ .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
233 	{ .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
234 	{ .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
235 	{ .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
236 	{ .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
237 	{ .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
238 	{ .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
239 	{ .int_msk = BIT(26), .msg = "rd_bus_err" },
240 	{ .int_msk = BIT(27), .msg = "wr_bus_err" },
241 	{ .int_msk = BIT(28), .msg = "reg_search_miss" },
242 	{ .int_msk = BIT(29), .msg = "rx_q_search_miss" },
243 	{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
244 	{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
245 	{ /* sentinel */ }
246 };
247 
248 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
249 	{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
250 	{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
251 	{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
252 	{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
253 	{ /* sentinel */ }
254 };
255 
256 static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
257 	{ .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
258 	{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
259 	{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
260 	{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
261 	{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
262 	{ .int_msk = BIT(5), .msg = "buf_wait_timeout" },
263 	{ /* sentinel */ }
264 };
265 
266 static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
267 	{ .int_msk = BIT(0), .msg = "buf_sum_err" },
268 	{ .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
269 	{ .int_msk = BIT(2), .msg = "ppp_mbid_err" },
270 	{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
271 	{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
272 	{ .int_msk = BIT(5), .msg = "cks_edit_position_err" },
273 	{ .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
274 	{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
275 	{ .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
276 	{ .int_msk = BIT(9), .msg = "vlan_num_in_err" },
277 	{ /* sentinel */ }
278 };
279 
280 static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
281 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
282 	{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
283 	{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
284 	{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
285 	{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
286 	{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
287 	{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
288 	{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
289 	{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
290 	{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
291 	{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
292 	{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
293 	{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
294 	{ /* sentinel */ }
295 };
296 
297 static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
298 	{ .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
299 	{ .int_msk = BIT(1), .msg = "ig_host_inf_int" },
300 	{ .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
301 	{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
302 	{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
303 	{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
304 	{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
305 	{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
306 	{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
307 	{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
308 	{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
309 	{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
310 	{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
311 	{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
312 	{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
313 	{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
314 	{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
315 	{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
316 	{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
317 	{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
318 	{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
319 	{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
320 	{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
321 	{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
322 	{ /* sentinel */ }
323 };
324 
325 static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
326 	{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
327 	{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
328 	{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
329 	{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
330 	{ /* sentinel */ }
331 };
332 
333 static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
334 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
335 	{ .int_msk = BIT(9), .msg = "low_water_line_err_port" },
336 	{ .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
337 	{ /* sentinel */ }
338 };
339 
340 static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
341 	{ .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
342 	{ .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
343 	{ .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
344 	{ .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
345 	{ .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
346 	{ .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
347 	{ .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
348 	{ .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
349 	{ .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
350 	{ .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
351 	{ .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
352 	{ .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
353 	{ .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
354 	{ .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
355 	{ .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
356 	{ .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
357 	{ .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
358 	{ .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
359 	{ .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
360 	{ .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
361 	{ /* sentinel */ }
362 };
363 
364 static void hclge_log_error(struct device *dev, char *reg,
365 			    const struct hclge_hw_error *err,
366 			    u32 err_sts)
367 {
368 	while (err->msg) {
369 		if (err->int_msk & err_sts)
370 			dev_warn(dev, "%s %s found [error status=0x%x]\n",
371 				 reg, err->msg, err_sts);
372 		err++;
373 	}
374 }
375 
376 /* hclge_cmd_query_error: read the error information
377  * @hdev: pointer to struct hclge_dev
378  * @desc: descriptor for describing the command
379  * @cmd:  command opcode
380  * @flag: flag for extended command structure
381  * @w_num: offset for setting the read interrupt type.
382  * @int_type: select which type of the interrupt for which the error
383  * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
384  *
385  * This function query the error info from hw register/s using command
386  */
387 static int hclge_cmd_query_error(struct hclge_dev *hdev,
388 				 struct hclge_desc *desc, u32 cmd,
389 				 u16 flag, u8 w_num,
390 				 enum hclge_err_int_type int_type)
391 {
392 	struct device *dev = &hdev->pdev->dev;
393 	int num = 1;
394 	int ret;
395 
396 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
397 	if (flag) {
398 		desc[0].flag |= cpu_to_le16(flag);
399 		hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
400 		num = 2;
401 	}
402 	if (w_num)
403 		desc[0].data[w_num] = cpu_to_le32(int_type);
404 
405 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
406 	if (ret)
407 		dev_err(dev, "query error cmd failed (%d)\n", ret);
408 
409 	return ret;
410 }
411 
412 static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
413 {
414 	struct device *dev = &hdev->pdev->dev;
415 	struct hclge_desc desc[2];
416 	int ret;
417 
418 	/* configure common error interrupts */
419 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
420 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
421 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
422 
423 	if (en) {
424 		desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
425 		desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
426 					HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
427 		desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
428 		desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
429 					      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
430 		desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
431 	}
432 
433 	desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
434 	desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
435 				HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
436 	desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
437 	desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
438 				      HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
439 	desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
440 
441 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
442 	if (ret)
443 		dev_err(dev,
444 			"fail(%d) to configure common err interrupts\n", ret);
445 
446 	return ret;
447 }
448 
449 static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
450 {
451 	struct device *dev = &hdev->pdev->dev;
452 	struct hclge_desc desc;
453 	int ret;
454 
455 	if (hdev->pdev->revision < 0x21)
456 		return 0;
457 
458 	/* configure NCSI error interrupts */
459 	hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
460 	if (en)
461 		desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
462 
463 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
464 	if (ret)
465 		dev_err(dev,
466 			"fail(%d) to configure  NCSI error interrupts\n", ret);
467 
468 	return ret;
469 }
470 
471 static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
472 {
473 	struct device *dev = &hdev->pdev->dev;
474 	struct hclge_desc desc;
475 	int ret;
476 
477 	/* configure IGU,EGU error interrupts */
478 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
479 	if (en)
480 		desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
481 
482 	desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
483 
484 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
485 	if (ret) {
486 		dev_err(dev,
487 			"fail(%d) to configure IGU common interrupts\n", ret);
488 		return ret;
489 	}
490 
491 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
492 	if (en)
493 		desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
494 
495 	desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
496 
497 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
498 	if (ret) {
499 		dev_err(dev,
500 			"fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
501 		return ret;
502 	}
503 
504 	ret = hclge_config_ncsi_hw_err_int(hdev, en);
505 
506 	return ret;
507 }
508 
509 static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
510 					    bool en)
511 {
512 	struct device *dev = &hdev->pdev->dev;
513 	struct hclge_desc desc[2];
514 	int ret;
515 
516 	/* configure PPP error interrupts */
517 	hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
518 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
519 	hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
520 
521 	if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
522 		if (en) {
523 			desc[0].data[0] =
524 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
525 			desc[0].data[1] =
526 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
527 			desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
528 		}
529 
530 		desc[1].data[0] =
531 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
532 		desc[1].data[1] =
533 			cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
534 		if (hdev->pdev->revision >= 0x21)
535 			desc[1].data[2] =
536 				cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
537 	} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
538 		if (en) {
539 			desc[0].data[0] =
540 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
541 			desc[0].data[1] =
542 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
543 		}
544 
545 		desc[1].data[0] =
546 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
547 		desc[1].data[1] =
548 				cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
549 	}
550 
551 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
552 	if (ret)
553 		dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
554 
555 	return ret;
556 }
557 
558 static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
559 {
560 	int ret;
561 
562 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
563 					       en);
564 	if (ret)
565 		return ret;
566 
567 	ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
568 					       en);
569 
570 	return ret;
571 }
572 
573 static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
574 {
575 	struct device *dev = &hdev->pdev->dev;
576 	struct hclge_desc desc;
577 	int ret;
578 
579 	/* configure TM SCH hw errors */
580 	hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
581 	if (en)
582 		desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
583 
584 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
585 	if (ret) {
586 		dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
587 		return ret;
588 	}
589 
590 	/* configure TM QCN hw errors */
591 	ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
592 				    0, 0, 0);
593 	if (ret) {
594 		dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
595 		return ret;
596 	}
597 
598 	hclge_cmd_reuse_desc(&desc, false);
599 	if (en)
600 		desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
601 
602 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
603 	if (ret)
604 		dev_err(dev,
605 			"fail(%d) to configure TM QCN mem errors\n", ret);
606 
607 	return ret;
608 }
609 
610 static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
611 {
612 	struct device *dev = &hdev->pdev->dev;
613 	struct hclge_desc desc;
614 	int ret;
615 
616 	/* configure MAC common error interrupts */
617 	hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
618 	if (en)
619 		desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
620 
621 	desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
622 
623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624 	if (ret)
625 		dev_err(dev,
626 			"fail(%d) to configure MAC COMMON error intr\n", ret);
627 
628 	return ret;
629 }
630 
631 static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
632 					     bool en)
633 {
634 	struct device *dev = &hdev->pdev->dev;
635 	struct hclge_desc desc[2];
636 	int num = 1;
637 	int ret;
638 
639 	/* configure PPU error interrupts */
640 	if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
641 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
642 		desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
643 		hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
644 		if (en) {
645 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
646 			desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
647 			desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
648 			desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
649 		}
650 
651 		desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
652 		desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
653 		desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
654 		desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
655 		num = 2;
656 	} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
657 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
658 		if (en)
659 			desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
660 
661 		desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
662 	} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
663 		hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
664 		if (en)
665 			desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
666 
667 		desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
668 	} else {
669 		dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
670 		return -EINVAL;
671 	}
672 
673 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
674 
675 	return ret;
676 }
677 
678 static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
679 {
680 	struct device *dev = &hdev->pdev->dev;
681 	int ret;
682 
683 	ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
684 						en);
685 	if (ret) {
686 		dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
687 			ret);
688 		return ret;
689 	}
690 
691 	ret = hclge_config_ppu_error_interrupts(hdev,
692 						HCLGE_PPU_MPF_OTHER_INT_CMD,
693 						en);
694 	if (ret) {
695 		dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
696 		return ret;
697 	}
698 
699 	ret = hclge_config_ppu_error_interrupts(hdev,
700 						HCLGE_PPU_PF_OTHER_INT_CMD, en);
701 	if (ret)
702 		dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
703 			ret);
704 	return ret;
705 }
706 
707 static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
708 {
709 	struct device *dev = &hdev->pdev->dev;
710 	struct hclge_desc desc[2];
711 	int ret;
712 
713 	/* configure SSU ecc error interrupts */
714 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
715 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
716 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
717 	if (en) {
718 		desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
719 		desc[0].data[1] =
720 			cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
721 		desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
722 	}
723 
724 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
725 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
726 	desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
727 
728 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
729 	if (ret) {
730 		dev_err(dev,
731 			"fail(%d) to configure SSU ECC error interrupt\n", ret);
732 		return ret;
733 	}
734 
735 	/* configure SSU common error interrupts */
736 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
737 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
738 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
739 
740 	if (en) {
741 		if (hdev->pdev->revision >= 0x21)
742 			desc[0].data[0] =
743 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
744 		else
745 			desc[0].data[0] =
746 				cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
747 		desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
748 		desc[0].data[2] =
749 			cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
750 	}
751 
752 	desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
753 				HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
754 	desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
755 
756 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
757 	if (ret)
758 		dev_err(dev,
759 			"fail(%d) to configure SSU COMMON error intr\n", ret);
760 
761 	return ret;
762 }
763 
764 #define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
765 	do { \
766 		if (ae_dev->ops->set_default_reset_request) \
767 			ae_dev->ops->set_default_reset_request(ae_dev, \
768 							       reset_type); \
769 	} while (0)
770 
771 /* hclge_handle_mpf_ras_error: handle all main PF RAS errors
772  * @hdev: pointer to struct hclge_dev
773  * @desc: descriptor for describing the command
774  * @num:  number of extended command structures
775  *
776  * This function handles all the main PF RAS errors in the
777  * hw register/s using command.
778  */
779 static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
780 				      struct hclge_desc *desc,
781 				      int num)
782 {
783 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
784 	struct device *dev = &hdev->pdev->dev;
785 	__le32 *desc_data;
786 	u32 status;
787 	int ret;
788 
789 	/* query all main PF RAS errors */
790 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
791 				   true);
792 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
793 
794 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
795 	if (ret) {
796 		dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
797 		return ret;
798 	}
799 
800 	/* log HNS common errors */
801 	status = le32_to_cpu(desc[0].data[0]);
802 	if (status) {
803 		hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
804 				&hclge_imp_tcm_ecc_int[0], status);
805 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
806 	}
807 
808 	status = le32_to_cpu(desc[0].data[1]);
809 	if (status) {
810 		hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
811 				&hclge_cmdq_nic_mem_ecc_int[0], status);
812 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
813 	}
814 
815 	if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
816 		dev_warn(dev, "imp_rd_data_poison_err found\n");
817 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
818 	}
819 
820 	status = le32_to_cpu(desc[0].data[3]);
821 	if (status) {
822 		hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
823 				&hclge_tqp_int_ecc_int[0], status);
824 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
825 	}
826 
827 	status = le32_to_cpu(desc[0].data[4]);
828 	if (status) {
829 		hclge_log_error(dev, "MSIX_ECC_INT_STS",
830 				&hclge_msix_sram_ecc_int[0], status);
831 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
832 	}
833 
834 	/* log SSU(Storage Switch Unit) errors */
835 	desc_data = (__le32 *)&desc[2];
836 	status = le32_to_cpu(*(desc_data + 2));
837 	if (status) {
838 		dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
839 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
840 	}
841 
842 	status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
843 	if (status) {
844 		dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
845 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
846 	}
847 
848 	status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
849 	if (status) {
850 		hclge_log_error(dev, "SSU_COMMON_ERR_INT",
851 				&hclge_ssu_com_err_int[0], status);
852 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
853 	}
854 
855 	/* log IGU(Ingress Unit) errors */
856 	desc_data = (__le32 *)&desc[3];
857 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
858 	if (status)
859 		hclge_log_error(dev, "IGU_INT_STS",
860 				&hclge_igu_int[0], status);
861 
862 	/* log PPP(Programmable Packet Process) errors */
863 	desc_data = (__le32 *)&desc[4];
864 	status = le32_to_cpu(*(desc_data + 1));
865 	if (status)
866 		hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
867 				&hclge_ppp_mpf_abnormal_int_st1[0], status);
868 
869 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
870 	if (status)
871 		hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
872 				&hclge_ppp_mpf_abnormal_int_st3[0], status);
873 
874 	/* log PPU(RCB) errors */
875 	desc_data = (__le32 *)&desc[5];
876 	status = le32_to_cpu(*(desc_data + 1));
877 	if (status) {
878 		dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
879 			 "rpu_rx_pkt_ecc_mbit_err");
880 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
881 	}
882 
883 	status = le32_to_cpu(*(desc_data + 2));
884 	if (status) {
885 		hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
886 				&hclge_ppu_mpf_abnormal_int_st2[0], status);
887 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
888 	}
889 
890 	status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
891 	if (status) {
892 		hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
893 				&hclge_ppu_mpf_abnormal_int_st3[0], status);
894 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
895 	}
896 
897 	/* log TM(Traffic Manager) errors */
898 	desc_data = (__le32 *)&desc[6];
899 	status = le32_to_cpu(*desc_data);
900 	if (status) {
901 		hclge_log_error(dev, "TM_SCH_RINT",
902 				&hclge_tm_sch_rint[0], status);
903 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
904 	}
905 
906 	/* log QCN(Quantized Congestion Control) errors */
907 	desc_data = (__le32 *)&desc[7];
908 	status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
909 	if (status) {
910 		hclge_log_error(dev, "QCN_FIFO_RINT",
911 				&hclge_qcn_fifo_rint[0], status);
912 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
913 	}
914 
915 	status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
916 	if (status) {
917 		hclge_log_error(dev, "QCN_ECC_RINT",
918 				&hclge_qcn_ecc_rint[0], status);
919 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
920 	}
921 
922 	/* log NCSI errors */
923 	desc_data = (__le32 *)&desc[9];
924 	status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
925 	if (status) {
926 		hclge_log_error(dev, "NCSI_ECC_INT_RPT",
927 				&hclge_ncsi_err_int[0], status);
928 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
929 	}
930 
931 	/* clear all main PF RAS errors */
932 	hclge_cmd_reuse_desc(&desc[0], false);
933 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
934 
935 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
936 	if (ret)
937 		dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
938 
939 	return ret;
940 }
941 
942 /* hclge_handle_pf_ras_error: handle all PF RAS errors
943  * @hdev: pointer to struct hclge_dev
944  * @desc: descriptor for describing the command
945  * @num:  number of extended command structures
946  *
947  * This function handles all the PF RAS errors in the
948  * hw register/s using command.
949  */
950 static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
951 				     struct hclge_desc *desc,
952 				     int num)
953 {
954 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
955 	struct device *dev = &hdev->pdev->dev;
956 	__le32 *desc_data;
957 	u32 status;
958 	int ret;
959 
960 	/* query all PF RAS errors */
961 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
962 				   true);
963 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
964 
965 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
966 	if (ret) {
967 		dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
968 		return ret;
969 	}
970 
971 	/* log SSU(Storage Switch Unit) errors */
972 	status = le32_to_cpu(desc[0].data[0]);
973 	if (status) {
974 		hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
975 				&hclge_ssu_port_based_err_int[0], status);
976 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
977 	}
978 
979 	status = le32_to_cpu(desc[0].data[1]);
980 	if (status) {
981 		hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
982 				&hclge_ssu_fifo_overflow_int[0], status);
983 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
984 	}
985 
986 	status = le32_to_cpu(desc[0].data[2]);
987 	if (status) {
988 		hclge_log_error(dev, "SSU_ETS_TCG_INT",
989 				&hclge_ssu_ets_tcg_int[0], status);
990 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
991 	}
992 
993 	/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
994 	desc_data = (__le32 *)&desc[1];
995 	status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
996 	if (status)
997 		hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
998 				&hclge_igu_egu_tnl_int[0], status);
999 
1000 	/* clear all PF RAS errors */
1001 	hclge_cmd_reuse_desc(&desc[0], false);
1002 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1003 
1004 	ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1005 	if (ret)
1006 		dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
1007 
1008 	return ret;
1009 }
1010 
1011 static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
1012 {
1013 	struct device *dev = &hdev->pdev->dev;
1014 	u32 mpf_bd_num, pf_bd_num, bd_num;
1015 	struct hclge_desc desc_bd;
1016 	struct hclge_desc *desc;
1017 	int ret;
1018 
1019 	/* query the number of registers in the RAS int status */
1020 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
1021 				   true);
1022 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1023 	if (ret) {
1024 		dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
1025 		return ret;
1026 	}
1027 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1028 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1029 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1030 
1031 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1032 	if (!desc)
1033 		return -ENOMEM;
1034 
1035 	/* handle all main PF RAS errors */
1036 	ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
1037 	if (ret) {
1038 		kfree(desc);
1039 		return ret;
1040 	}
1041 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1042 
1043 	/* handle all PF RAS errors */
1044 	ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
1045 	kfree(desc);
1046 
1047 	return ret;
1048 }
1049 
1050 static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
1051 {
1052 	struct device *dev = &hdev->pdev->dev;
1053 	struct hclge_desc desc[2];
1054 	int ret;
1055 
1056 	/* read overflow error status */
1057 	ret = hclge_cmd_query_error(hdev, &desc[0],
1058 				    HCLGE_ROCEE_PF_RAS_INT_CMD,
1059 				    0, 0, 0);
1060 	if (ret) {
1061 		dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
1062 		return ret;
1063 	}
1064 
1065 	/* log overflow error */
1066 	if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1067 		const struct hclge_hw_error *err;
1068 		u32 err_sts;
1069 
1070 		err = &hclge_rocee_qmm_ovf_err_int[0];
1071 		err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
1072 			  le32_to_cpu(desc[0].data[0]);
1073 		while (err->msg) {
1074 			if (err->int_msk == err_sts) {
1075 				dev_warn(dev, "%s [error status=0x%x] found\n",
1076 					 err->msg,
1077 					 le32_to_cpu(desc[0].data[0]));
1078 				break;
1079 			}
1080 			err++;
1081 		}
1082 	}
1083 
1084 	if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1085 		dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
1086 			 le32_to_cpu(desc[0].data[1]));
1087 	}
1088 
1089 	if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1090 		dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
1091 			 le32_to_cpu(desc[0].data[2]));
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
1098 {
1099 	enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
1100 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1101 	struct device *dev = &hdev->pdev->dev;
1102 	struct hclge_desc desc[2];
1103 	unsigned int status;
1104 	int ret;
1105 
1106 	/* read RAS error interrupt status */
1107 	ret = hclge_cmd_query_error(hdev, &desc[0],
1108 				    HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
1109 				    0, 0, 0);
1110 	if (ret) {
1111 		dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
1112 		/* reset everything for now */
1113 		HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1114 		return ret;
1115 	}
1116 
1117 	status = le32_to_cpu(desc[0].data[0]);
1118 
1119 	if (status & HCLGE_ROCEE_RERR_INT_MASK)
1120 		dev_warn(dev, "ROCEE RAS AXI rresp error\n");
1121 
1122 	if (status & HCLGE_ROCEE_BERR_INT_MASK)
1123 		dev_warn(dev, "ROCEE RAS AXI bresp error\n");
1124 
1125 	if (status & HCLGE_ROCEE_ECC_INT_MASK) {
1126 		dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
1127 		reset_type = HNAE3_GLOBAL_RESET;
1128 	}
1129 
1130 	if (status & HCLGE_ROCEE_OVF_INT_MASK) {
1131 		ret = hclge_log_rocee_ovf_error(hdev);
1132 		if (ret) {
1133 			dev_err(dev, "failed(%d) to process ovf error\n", ret);
1134 			/* reset everything for now */
1135 			HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1136 			return ret;
1137 		}
1138 	}
1139 
1140 	/* clear error status */
1141 	hclge_cmd_reuse_desc(&desc[0], false);
1142 	ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
1143 	if (ret) {
1144 		dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
1145 		/* reset everything for now */
1146 		reset_type = HNAE3_GLOBAL_RESET;
1147 	}
1148 
1149 	HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
1150 
1151 	return ret;
1152 }
1153 
1154 static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
1155 {
1156 	struct device *dev = &hdev->pdev->dev;
1157 	struct hclge_desc desc;
1158 	int ret;
1159 
1160 	if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
1161 		return 0;
1162 
1163 	hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
1164 	if (en) {
1165 		/* enable ROCEE hw error interrupts */
1166 		desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
1167 		desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
1168 
1169 		hclge_log_and_clear_rocee_ras_error(hdev);
1170 	}
1171 	desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
1172 	desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
1173 
1174 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1175 	if (ret)
1176 		dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
1177 
1178 	return ret;
1179 }
1180 
1181 static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
1182 {
1183 	struct hclge_dev *hdev = ae_dev->priv;
1184 
1185 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1186 	    hdev->pdev->revision < 0x21)
1187 		return HNAE3_NONE_RESET;
1188 
1189 	return hclge_log_and_clear_rocee_ras_error(hdev);
1190 }
1191 
1192 static const struct hclge_hw_blk hw_blk[] = {
1193 	{
1194 	  .msk = BIT(0), .name = "IGU_EGU",
1195 	  .config_err_int = hclge_config_igu_egu_hw_err_int,
1196 	},
1197 	{
1198 	  .msk = BIT(1), .name = "PPP",
1199 	  .config_err_int = hclge_config_ppp_hw_err_int,
1200 	},
1201 	{
1202 	  .msk = BIT(2), .name = "SSU",
1203 	  .config_err_int = hclge_config_ssu_hw_err_int,
1204 	},
1205 	{
1206 	  .msk = BIT(3), .name = "PPU",
1207 	  .config_err_int = hclge_config_ppu_hw_err_int,
1208 	},
1209 	{
1210 	  .msk = BIT(4), .name = "TM",
1211 	  .config_err_int = hclge_config_tm_hw_err_int,
1212 	},
1213 	{
1214 	  .msk = BIT(5), .name = "COMMON",
1215 	  .config_err_int = hclge_config_common_hw_err_int,
1216 	},
1217 	{
1218 	  .msk = BIT(8), .name = "MAC",
1219 	  .config_err_int = hclge_config_mac_err_int,
1220 	},
1221 	{ /* sentinel */ }
1222 };
1223 
1224 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
1225 {
1226 	const struct hclge_hw_blk *module = hw_blk;
1227 	struct device *dev = &hdev->pdev->dev;
1228 	int ret = 0;
1229 
1230 	while (module->name) {
1231 		if (module->config_err_int) {
1232 			ret = module->config_err_int(hdev, state);
1233 			if (ret)
1234 				return ret;
1235 		}
1236 		module++;
1237 	}
1238 
1239 	ret = hclge_config_rocee_ras_interrupt(hdev, state);
1240 	if (ret)
1241 		dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
1242 
1243 	return ret;
1244 }
1245 
1246 pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
1247 {
1248 	struct hclge_dev *hdev = ae_dev->priv;
1249 	struct device *dev = &hdev->pdev->dev;
1250 	u32 status;
1251 
1252 	status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
1253 
1254 	/* Handling Non-fatal HNS RAS errors */
1255 	if (status & HCLGE_RAS_REG_NFE_MASK) {
1256 		dev_warn(dev,
1257 			 "HNS Non-Fatal RAS error(status=0x%x) identified\n",
1258 			 status);
1259 		hclge_handle_all_ras_errors(hdev);
1260 	} else {
1261 		if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1262 		    hdev->pdev->revision < 0x21)
1263 			return PCI_ERS_RESULT_RECOVERED;
1264 	}
1265 
1266 	if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1267 		dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
1268 		hclge_handle_rocee_ras_error(ae_dev);
1269 	}
1270 
1271 	if (status & HCLGE_RAS_REG_NFE_MASK ||
1272 	    status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
1273 		return PCI_ERS_RESULT_NEED_RESET;
1274 
1275 	return PCI_ERS_RESULT_RECOVERED;
1276 }
1277 
1278 int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
1279 			       unsigned long *reset_requests)
1280 {
1281 	struct device *dev = &hdev->pdev->dev;
1282 	u32 mpf_bd_num, pf_bd_num, bd_num;
1283 	struct hclge_desc desc_bd;
1284 	struct hclge_desc *desc;
1285 	__le32 *desc_data;
1286 	int ret = 0;
1287 	u32 status;
1288 
1289 	/* set default handling */
1290 	set_bit(HNAE3_FUNC_RESET, reset_requests);
1291 
1292 	/* query the number of bds for the MSIx int status */
1293 	hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
1294 				   true);
1295 	ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1296 	if (ret) {
1297 		dev_err(dev, "fail(%d) to query msix int status bd num\n",
1298 			ret);
1299 		/* reset everything for now */
1300 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1301 		return ret;
1302 	}
1303 
1304 	mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1305 	pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1306 	bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1307 
1308 	desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1309 	if (!desc)
1310 		goto out;
1311 
1312 	/* query all main PF MSIx errors */
1313 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
1314 				   true);
1315 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1316 
1317 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1318 	if (ret) {
1319 		dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
1320 			ret);
1321 		/* reset everything for now */
1322 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1323 		goto msi_error;
1324 	}
1325 
1326 	/* log MAC errors */
1327 	desc_data = (__le32 *)&desc[1];
1328 	status = le32_to_cpu(*desc_data);
1329 	if (status) {
1330 		hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
1331 				&hclge_mac_afifo_tnl_int[0], status);
1332 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1333 	}
1334 
1335 	/* log PPU(RCB) errors */
1336 	desc_data = (__le32 *)&desc[5];
1337 	status = le32_to_cpu(*(desc_data + 2)) &
1338 			HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
1339 	if (status) {
1340 		dev_warn(dev,
1341 			 "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n",
1342 			 status);
1343 		set_bit(HNAE3_CORE_RESET, reset_requests);
1344 	}
1345 
1346 	/* clear all main PF MSIx errors */
1347 	hclge_cmd_reuse_desc(&desc[0], false);
1348 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1349 
1350 	ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1351 	if (ret) {
1352 		dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
1353 			ret);
1354 		/* reset everything for now */
1355 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1356 		goto msi_error;
1357 	}
1358 
1359 	/* query all PF MSIx errors */
1360 	memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1361 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
1362 				   true);
1363 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1364 
1365 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1366 	if (ret) {
1367 		dev_err(dev, "query all pf msix int cmd failed (%d)\n",
1368 			ret);
1369 		/* reset everything for now */
1370 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1371 		goto msi_error;
1372 	}
1373 
1374 	/* log SSU PF errors */
1375 	status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
1376 	if (status) {
1377 		hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1378 				&hclge_ssu_port_based_pf_int[0], status);
1379 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1380 	}
1381 
1382 	/* read and log PPP PF errors */
1383 	desc_data = (__le32 *)&desc[2];
1384 	status = le32_to_cpu(*desc_data);
1385 	if (status)
1386 		hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
1387 				&hclge_ppp_pf_abnormal_int[0], status);
1388 
1389 	/* PPU(RCB) PF errors */
1390 	desc_data = (__le32 *)&desc[3];
1391 	status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
1392 	if (status)
1393 		hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
1394 				&hclge_ppu_pf_abnormal_int[0], status);
1395 
1396 	/* clear all PF MSIx errors */
1397 	hclge_cmd_reuse_desc(&desc[0], false);
1398 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1399 
1400 	ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1401 	if (ret) {
1402 		dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
1403 			ret);
1404 		/* reset everything for now */
1405 		set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1406 	}
1407 
1408 msi_error:
1409 	kfree(desc);
1410 out:
1411 	return ret;
1412 }
1413