1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2023 Hisilicon Limited.
3 
4 #include "hclge_cmd.h"
5 #include "hclge_main.h"
6 #include "hclge_regs.h"
7 #include "hnae3.h"
8 
9 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
10 					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
11 					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
12 					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
13 					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
14 					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
15 					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
16 					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
17 					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
18 					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
19 					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
20 					 HCLGE_COMM_CMDQ_INTR_STS_REG,
21 					 HCLGE_COMM_CMDQ_INTR_EN_REG,
22 					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
23 
24 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
25 					   HCLGE_PF_OTHER_INT_REG,
26 					   HCLGE_MISC_RESET_STS_REG,
27 					   HCLGE_MISC_VECTOR_INT_STS,
28 					   HCLGE_GLOBAL_RESET_REG,
29 					   HCLGE_FUN_RST_ING,
30 					   HCLGE_GRO_EN_REG};
31 
32 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
33 					 HCLGE_RING_RX_ADDR_H_REG,
34 					 HCLGE_RING_RX_BD_NUM_REG,
35 					 HCLGE_RING_RX_BD_LENGTH_REG,
36 					 HCLGE_RING_RX_MERGE_EN_REG,
37 					 HCLGE_RING_RX_TAIL_REG,
38 					 HCLGE_RING_RX_HEAD_REG,
39 					 HCLGE_RING_RX_FBD_NUM_REG,
40 					 HCLGE_RING_RX_OFFSET_REG,
41 					 HCLGE_RING_RX_FBD_OFFSET_REG,
42 					 HCLGE_RING_RX_STASH_REG,
43 					 HCLGE_RING_RX_BD_ERR_REG,
44 					 HCLGE_RING_TX_ADDR_L_REG,
45 					 HCLGE_RING_TX_ADDR_H_REG,
46 					 HCLGE_RING_TX_BD_NUM_REG,
47 					 HCLGE_RING_TX_PRIORITY_REG,
48 					 HCLGE_RING_TX_TC_REG,
49 					 HCLGE_RING_TX_MERGE_EN_REG,
50 					 HCLGE_RING_TX_TAIL_REG,
51 					 HCLGE_RING_TX_HEAD_REG,
52 					 HCLGE_RING_TX_FBD_NUM_REG,
53 					 HCLGE_RING_TX_OFFSET_REG,
54 					 HCLGE_RING_TX_EBD_NUM_REG,
55 					 HCLGE_RING_TX_EBD_OFFSET_REG,
56 					 HCLGE_RING_TX_BD_ERR_REG,
57 					 HCLGE_RING_EN_REG};
58 
59 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
60 					     HCLGE_TQP_INTR_GL0_REG,
61 					     HCLGE_TQP_INTR_GL1_REG,
62 					     HCLGE_TQP_INTR_GL2_REG,
63 					     HCLGE_TQP_INTR_RL_REG};
64 
65 /* Get DFX BD number offset */
66 #define HCLGE_DFX_BIOS_BD_OFFSET        1
67 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
68 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
69 #define HCLGE_DFX_IGU_BD_OFFSET         4
70 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
71 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
72 #define HCLGE_DFX_NCSI_BD_OFFSET        7
73 #define HCLGE_DFX_RTC_BD_OFFSET         8
74 #define HCLGE_DFX_PPP_BD_OFFSET         9
75 #define HCLGE_DFX_RCB_BD_OFFSET         10
76 #define HCLGE_DFX_TQP_BD_OFFSET         11
77 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
78 
79 static const u32 hclge_dfx_bd_offset_list[] = {
80 	HCLGE_DFX_BIOS_BD_OFFSET,
81 	HCLGE_DFX_SSU_0_BD_OFFSET,
82 	HCLGE_DFX_SSU_1_BD_OFFSET,
83 	HCLGE_DFX_IGU_BD_OFFSET,
84 	HCLGE_DFX_RPU_0_BD_OFFSET,
85 	HCLGE_DFX_RPU_1_BD_OFFSET,
86 	HCLGE_DFX_NCSI_BD_OFFSET,
87 	HCLGE_DFX_RTC_BD_OFFSET,
88 	HCLGE_DFX_PPP_BD_OFFSET,
89 	HCLGE_DFX_RCB_BD_OFFSET,
90 	HCLGE_DFX_TQP_BD_OFFSET,
91 	HCLGE_DFX_SSU_2_BD_OFFSET
92 };
93 
94 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
95 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
96 	HCLGE_OPC_DFX_SSU_REG_0,
97 	HCLGE_OPC_DFX_SSU_REG_1,
98 	HCLGE_OPC_DFX_IGU_EGU_REG,
99 	HCLGE_OPC_DFX_RPU_REG_0,
100 	HCLGE_OPC_DFX_RPU_REG_1,
101 	HCLGE_OPC_DFX_NCSI_REG,
102 	HCLGE_OPC_DFX_RTC_REG,
103 	HCLGE_OPC_DFX_PPP_REG,
104 	HCLGE_OPC_DFX_RCB_REG,
105 	HCLGE_OPC_DFX_TQP_REG,
106 	HCLGE_OPC_DFX_SSU_REG_2
107 };
108 
109 enum hclge_reg_tag {
110 	HCLGE_REG_TAG_CMDQ = 0,
111 	HCLGE_REG_TAG_COMMON,
112 	HCLGE_REG_TAG_RING,
113 	HCLGE_REG_TAG_TQP_INTR,
114 	HCLGE_REG_TAG_QUERY_32_BIT,
115 	HCLGE_REG_TAG_QUERY_64_BIT,
116 	HCLGE_REG_TAG_DFX_BIOS_COMMON,
117 	HCLGE_REG_TAG_DFX_SSU_0,
118 	HCLGE_REG_TAG_DFX_SSU_1,
119 	HCLGE_REG_TAG_DFX_IGU_EGU,
120 	HCLGE_REG_TAG_DFX_RPU_0,
121 	HCLGE_REG_TAG_DFX_RPU_1,
122 	HCLGE_REG_TAG_DFX_NCSI,
123 	HCLGE_REG_TAG_DFX_RTC,
124 	HCLGE_REG_TAG_DFX_PPP,
125 	HCLGE_REG_TAG_DFX_RCB,
126 	HCLGE_REG_TAG_DFX_TQP,
127 	HCLGE_REG_TAG_DFX_SSU_2,
128 	HCLGE_REG_TAG_RPU_TNL,
129 };
130 
131 #pragma pack(4)
132 struct hclge_reg_tlv {
133 	u16 tag;
134 	u16 len;
135 };
136 
137 struct hclge_reg_header {
138 	u64 magic_number;
139 	u8 is_vf;
140 	u8 rsv[7];
141 };
142 
143 #pragma pack()
144 
145 #define HCLGE_REG_TLV_SIZE	sizeof(struct hclge_reg_tlv)
146 #define HCLGE_REG_HEADER_SIZE	sizeof(struct hclge_reg_header)
147 #define HCLGE_REG_TLV_SPACE	(sizeof(struct hclge_reg_tlv) / sizeof(u32))
148 #define HCLGE_REG_HEADER_SPACE	(sizeof(struct hclge_reg_header) / sizeof(u32))
149 #define HCLGE_REG_MAGIC_NUMBER	0x686e733372656773 /* meaning is hns3regs */
150 
151 #define HCLGE_REG_RPU_TNL_ID_0	1
152 
hclge_reg_get_header(void * data)153 static u32 hclge_reg_get_header(void *data)
154 {
155 	struct hclge_reg_header *header = data;
156 
157 	header->magic_number = HCLGE_REG_MAGIC_NUMBER;
158 	header->is_vf = 0x0;
159 
160 	return HCLGE_REG_HEADER_SPACE;
161 }
162 
hclge_reg_get_tlv(u32 tag,u32 regs_num,void * data)163 static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data)
164 {
165 	struct hclge_reg_tlv *tlv = data;
166 
167 	tlv->tag = tag;
168 	tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE;
169 
170 	return HCLGE_REG_TLV_SPACE;
171 }
172 
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)173 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
174 				 void *data)
175 {
176 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
177 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
178 
179 	struct hclge_desc *desc;
180 	u32 *reg_val = data;
181 	__le32 *desc_data;
182 	int nodata_num;
183 	int cmd_num;
184 	int i, k, n;
185 	int ret;
186 
187 	if (regs_num == 0)
188 		return 0;
189 
190 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
191 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
192 			       HCLGE_32_BIT_REG_RTN_DATANUM);
193 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
194 	if (!desc)
195 		return -ENOMEM;
196 
197 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
198 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
199 	if (ret) {
200 		dev_err(&hdev->pdev->dev,
201 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
202 		kfree(desc);
203 		return ret;
204 	}
205 
206 	for (i = 0; i < cmd_num; i++) {
207 		if (i == 0) {
208 			desc_data = (__le32 *)(&desc[i].data[0]);
209 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
210 		} else {
211 			desc_data = (__le32 *)(&desc[i]);
212 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
213 		}
214 		for (k = 0; k < n; k++) {
215 			*reg_val++ = le32_to_cpu(*desc_data++);
216 
217 			regs_num--;
218 			if (!regs_num)
219 				break;
220 		}
221 	}
222 
223 	kfree(desc);
224 	return 0;
225 }
226 
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)227 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
228 				 void *data)
229 {
230 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
231 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
232 
233 	struct hclge_desc *desc;
234 	u64 *reg_val = data;
235 	__le64 *desc_data;
236 	int nodata_len;
237 	int cmd_num;
238 	int i, k, n;
239 	int ret;
240 
241 	if (regs_num == 0)
242 		return 0;
243 
244 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
245 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
246 			       HCLGE_64_BIT_REG_RTN_DATANUM);
247 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
248 	if (!desc)
249 		return -ENOMEM;
250 
251 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
252 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
253 	if (ret) {
254 		dev_err(&hdev->pdev->dev,
255 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
256 		kfree(desc);
257 		return ret;
258 	}
259 
260 	for (i = 0; i < cmd_num; i++) {
261 		if (i == 0) {
262 			desc_data = (__le64 *)(&desc[i].data[0]);
263 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
264 		} else {
265 			desc_data = (__le64 *)(&desc[i]);
266 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
267 		}
268 		for (k = 0; k < n; k++) {
269 			*reg_val++ = le64_to_cpu(*desc_data++);
270 
271 			regs_num--;
272 			if (!regs_num)
273 				break;
274 		}
275 	}
276 
277 	kfree(desc);
278 	return 0;
279 }
280 
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)281 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
282 {
283 	int i;
284 
285 	/* initialize command BD except the last one */
286 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
287 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
288 					   true);
289 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
290 	}
291 
292 	/* initialize the last command BD */
293 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
294 
295 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
296 }
297 
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)298 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
299 				    int *bd_num_list,
300 				    u32 type_num)
301 {
302 	u32 entries_per_desc, desc_index, index, offset, i;
303 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
304 	int ret;
305 
306 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
307 	if (ret) {
308 		dev_err(&hdev->pdev->dev,
309 			"Get dfx bd num fail, status is %d.\n", ret);
310 		return ret;
311 	}
312 
313 	entries_per_desc = ARRAY_SIZE(desc[0].data);
314 	for (i = 0; i < type_num; i++) {
315 		offset = hclge_dfx_bd_offset_list[i];
316 		index = offset % entries_per_desc;
317 		desc_index = offset / entries_per_desc;
318 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
319 	}
320 
321 	return ret;
322 }
323 
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)324 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
325 				  struct hclge_desc *desc_src, int bd_num,
326 				  enum hclge_opcode_type cmd)
327 {
328 	struct hclge_desc *desc = desc_src;
329 	int i, ret;
330 
331 	hclge_cmd_setup_basic_desc(desc, cmd, true);
332 	for (i = 0; i < bd_num - 1; i++) {
333 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
334 		desc++;
335 		hclge_cmd_setup_basic_desc(desc, cmd, true);
336 	}
337 
338 	desc = desc_src;
339 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
340 	if (ret)
341 		dev_err(&hdev->pdev->dev,
342 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
343 			cmd, ret);
344 
345 	return ret;
346 }
347 
348 /* tnl_id = 0 means get sum of all tnl reg's value */
hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev * hdev,u32 tnl_id,struct hclge_desc * desc,int bd_num)349 static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id,
350 					  struct hclge_desc *desc, int bd_num)
351 {
352 	int i, ret;
353 
354 	for (i = 0; i < bd_num; i++) {
355 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0,
356 					   true);
357 		if (i != bd_num - 1)
358 			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
359 	}
360 
361 	desc[0].data[0] = cpu_to_le32(tnl_id);
362 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
363 	if (ret)
364 		dev_err(&hdev->pdev->dev,
365 			"failed to query dfx rpu tnl reg, ret = %d\n",
366 			ret);
367 	return ret;
368 }
369 
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)370 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
371 				    void *data)
372 {
373 	int entries_per_desc, reg_num, desc_index, index, i;
374 	struct hclge_desc *desc = desc_src;
375 	u32 *reg = data;
376 
377 	entries_per_desc = ARRAY_SIZE(desc->data);
378 	reg_num = entries_per_desc * bd_num;
379 	for (i = 0; i < reg_num; i++) {
380 		index = i % entries_per_desc;
381 		desc_index = i / entries_per_desc;
382 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
383 	}
384 
385 	return reg_num;
386 }
387 
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)388 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
389 {
390 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
391 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
392 	int data_len_per_desc;
393 	int *bd_num_list;
394 	int ret;
395 	u32 i;
396 
397 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
398 	if (!bd_num_list)
399 		return -ENOMEM;
400 
401 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
402 	if (ret) {
403 		dev_err(&hdev->pdev->dev,
404 			"Get dfx reg bd num fail, status is %d.\n", ret);
405 		goto out;
406 	}
407 
408 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
409 	*len = 0;
410 	for (i = 0; i < dfx_reg_type_num; i++)
411 		*len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE;
412 
413 	/**
414 	 * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl
415 	 * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
416 	 * starting at 0, so offset need '- 1'.
417 	 */
418 	*len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc +
419 		 HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num;
420 
421 out:
422 	kfree(bd_num_list);
423 	return ret;
424 }
425 
hclge_get_dfx_rpu_tnl_reg(struct hclge_dev * hdev,u32 * reg,struct hclge_desc * desc_src,int bd_num)426 static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg,
427 				     struct hclge_desc *desc_src,
428 				     int bd_num)
429 {
430 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
431 	int ret = 0;
432 	u8 i;
433 
434 	for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) {
435 		ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num);
436 		if (ret)
437 			break;
438 
439 		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL,
440 					 ARRAY_SIZE(desc_src->data) * bd_num,
441 					 reg);
442 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
443 	}
444 
445 	return ret;
446 }
447 
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)448 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
449 {
450 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
451 	int bd_num, bd_num_max, buf_len;
452 	struct hclge_desc *desc_src;
453 	int *bd_num_list;
454 	u32 *reg = data;
455 	int ret;
456 	u32 i;
457 
458 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
459 	if (!bd_num_list)
460 		return -ENOMEM;
461 
462 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
463 	if (ret) {
464 		dev_err(&hdev->pdev->dev,
465 			"Get dfx reg bd num fail, status is %d.\n", ret);
466 		goto out;
467 	}
468 
469 	bd_num_max = bd_num_list[0];
470 	for (i = 1; i < dfx_reg_type_num; i++)
471 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
472 
473 	buf_len = sizeof(*desc_src) * bd_num_max;
474 	desc_src = kzalloc(buf_len, GFP_KERNEL);
475 	if (!desc_src) {
476 		ret = -ENOMEM;
477 		goto out;
478 	}
479 
480 	for (i = 0; i < dfx_reg_type_num; i++) {
481 		bd_num = bd_num_list[i];
482 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
483 					     hclge_dfx_reg_opcode_list[i]);
484 		if (ret) {
485 			dev_err(&hdev->pdev->dev,
486 				"Get dfx reg fail, status is %d.\n", ret);
487 			goto free;
488 		}
489 
490 		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i,
491 					 ARRAY_SIZE(desc_src->data) * bd_num,
492 					 reg);
493 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
494 	}
495 
496 	/**
497 	 * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
498 	 * starting at 0, so offset need '- 1'.
499 	 */
500 	bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1];
501 	ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num);
502 
503 free:
504 	kfree(desc_src);
505 out:
506 	kfree(bd_num_list);
507 	return ret;
508 }
509 
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)510 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
511 			      struct hnae3_knic_private_info *kinfo)
512 {
513 #define HCLGE_RING_REG_OFFSET		0x200
514 #define HCLGE_RING_INT_REG_OFFSET	0x4
515 
516 	int i, j, reg_num;
517 	int data_num_sum;
518 	u32 *reg = data;
519 
520 	/* fetching per-PF registers valus from PF PCIe register space */
521 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
522 	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg);
523 	for (i = 0; i < reg_num; i++)
524 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
525 	data_num_sum = reg_num + HCLGE_REG_TLV_SPACE;
526 
527 	reg_num = ARRAY_SIZE(common_reg_addr_list);
528 	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg);
529 	for (i = 0; i < reg_num; i++)
530 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
531 	data_num_sum += reg_num + HCLGE_REG_TLV_SPACE;
532 
533 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
534 	for (j = 0; j < kinfo->num_tqps; j++) {
535 		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
536 		for (i = 0; i < reg_num; i++)
537 			*reg++ = hclge_read_dev(&hdev->hw,
538 						ring_reg_addr_list[i] +
539 						HCLGE_RING_REG_OFFSET * j);
540 	}
541 	data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
542 
543 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
544 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
545 		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg);
546 		for (i = 0; i < reg_num; i++)
547 			*reg++ = hclge_read_dev(&hdev->hw,
548 						tqp_intr_reg_addr_list[i] +
549 						HCLGE_RING_INT_REG_OFFSET * j);
550 	}
551 	data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) *
552 			(hdev->num_msi_used - 1);
553 
554 	return data_num_sum;
555 }
556 
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)557 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
558 			      u32 *regs_num_64_bit)
559 {
560 	struct hclge_desc desc;
561 	u32 total_num;
562 	int ret;
563 
564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
565 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
566 	if (ret) {
567 		dev_err(&hdev->pdev->dev,
568 			"Query register number cmd failed, ret = %d.\n", ret);
569 		return ret;
570 	}
571 
572 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
573 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
574 
575 	total_num = *regs_num_32_bit + *regs_num_64_bit;
576 	if (!total_num)
577 		return -EINVAL;
578 
579 	return 0;
580 }
581 
hclge_get_regs_len(struct hnae3_handle * handle)582 int hclge_get_regs_len(struct hnae3_handle *handle)
583 {
584 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
585 	struct hclge_vport *vport = hclge_get_vport(handle);
586 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
587 	int cmdq_len, common_len, ring_len, tqp_intr_len;
588 	int regs_len_32_bit, regs_len_64_bit;
589 	struct hclge_dev *hdev = vport->back;
590 	int ret;
591 
592 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
593 	if (ret) {
594 		dev_err(&hdev->pdev->dev,
595 			"Get register number failed, ret = %d.\n", ret);
596 		return ret;
597 	}
598 
599 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
600 	if (ret) {
601 		dev_err(&hdev->pdev->dev,
602 			"Get dfx reg len failed, ret = %d.\n", ret);
603 		return ret;
604 	}
605 
606 	cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
607 	common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list);
608 	ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
609 	tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
610 	regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32);
611 	regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64);
612 
613 	/* return the total length of all register values */
614 	return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len *
615 		kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) +
616 		regs_len_32_bit + regs_len_64_bit + dfx_regs_len;
617 }
618 
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)619 void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
620 		    void *data)
621 {
622 #define HCLGE_REG_64_BIT_SPACE_MULTIPLE		2
623 
624 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 	struct hclge_vport *vport = hclge_get_vport(handle);
626 	struct hclge_dev *hdev = vport->back;
627 	u32 regs_num_32_bit, regs_num_64_bit;
628 	u32 *reg = data;
629 	int ret;
630 
631 	*version = hdev->fw_version;
632 
633 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
634 	if (ret) {
635 		dev_err(&hdev->pdev->dev,
636 			"Get register number failed, ret = %d.\n", ret);
637 		return;
638 	}
639 
640 	reg += hclge_reg_get_header(reg);
641 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
642 
643 	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT,
644 				 regs_num_32_bit, reg);
645 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
646 	if (ret) {
647 		dev_err(&hdev->pdev->dev,
648 			"Get 32 bit register failed, ret = %d.\n", ret);
649 		return;
650 	}
651 	reg += regs_num_32_bit;
652 
653 	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT,
654 				 regs_num_64_bit *
655 				 HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg);
656 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
657 	if (ret) {
658 		dev_err(&hdev->pdev->dev,
659 			"Get 64 bit register failed, ret = %d.\n", ret);
660 		return;
661 	}
662 	reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE;
663 
664 	ret = hclge_get_dfx_reg(hdev, reg);
665 	if (ret)
666 		dev_err(&hdev->pdev->dev,
667 			"Get dfx register failed, ret = %d.\n", ret);
668 }
669