1 /*
2  * Copyright (c) 2014-2015 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/cdev.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <asm/cacheflush.h>
17 #include <linux/platform_device.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/spinlock.h>
23 
24 #include "hns_dsaf_main.h"
25 #include "hns_dsaf_ppe.h"
26 #include "hns_dsaf_rcb.h"
27 
28 #define RCB_COMMON_REG_OFFSET 0x80000
29 #define TX_RING 0
30 #define RX_RING 1
31 
32 #define RCB_RESET_WAIT_TIMES 30
33 #define RCB_RESET_TRY_TIMES 10
34 
35 /* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
36 #define RCB_DEFAULT_BUFFER_SIZE 2048
37 
38 /**
39  *hns_rcb_wait_fbd_clean - clean fbd
40  *@qs: ring struct pointer array
41  *@qnum: num of array
42  *@flag: tx or rx flag
43  */
44 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
45 {
46 	int i, wait_cnt;
47 	u32 fbd_num;
48 
49 	for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
50 		usleep_range(200, 300);
51 		fbd_num = 0;
52 		if (flag & RCB_INT_FLAG_TX)
53 			fbd_num += dsaf_read_dev(qs[i],
54 						 RCB_RING_TX_RING_FBDNUM_REG);
55 		if (flag & RCB_INT_FLAG_RX)
56 			fbd_num += dsaf_read_dev(qs[i],
57 						 RCB_RING_RX_RING_FBDNUM_REG);
58 		if (!fbd_num)
59 			i++;
60 		if (wait_cnt >= 10000)
61 			break;
62 	}
63 
64 	if (i < q_num)
65 		dev_err(qs[i]->handle->owner_dev,
66 			"queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
67 }
68 
69 /**
70  *hns_rcb_reset_ring_hw - ring reset
71  *@q: ring struct pointer
72  */
73 void hns_rcb_reset_ring_hw(struct hnae_queue *q)
74 {
75 	u32 wait_cnt;
76 	u32 try_cnt = 0;
77 	u32 could_ret;
78 
79 	u32 tx_fbd_num;
80 
81 	while (try_cnt++ < RCB_RESET_TRY_TIMES) {
82 		usleep_range(100, 200);
83 		tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
84 		if (tx_fbd_num)
85 			continue;
86 
87 		dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
88 
89 		dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
90 
91 		msleep(20);
92 		could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
93 
94 		wait_cnt = 0;
95 		while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
96 			dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
97 
98 			dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
99 
100 			msleep(20);
101 			could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
102 
103 			wait_cnt++;
104 		}
105 
106 		dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
107 
108 		if (could_ret)
109 			break;
110 	}
111 
112 	if (try_cnt >= RCB_RESET_TRY_TIMES)
113 		dev_err(q->dev->dev, "port%d reset ring fail\n",
114 			hns_ae_get_vf_cb(q->handle)->port_index);
115 }
116 
117 /**
118  *hns_rcb_int_ctrl_hw - rcb irq enable control
119  *@q: hnae queue struct pointer
120  *@flag:ring flag tx or rx
121  *@mask:mask
122  */
123 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
124 {
125 	u32 int_mask_en = !!mask;
126 
127 	if (flag & RCB_INT_FLAG_TX) {
128 		dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
129 		dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
130 			       int_mask_en);
131 	}
132 
133 	if (flag & RCB_INT_FLAG_RX) {
134 		dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
135 		dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
136 			       int_mask_en);
137 	}
138 }
139 
140 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
141 {
142 	if (flag & RCB_INT_FLAG_TX) {
143 		dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
144 		dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
145 	}
146 
147 	if (flag & RCB_INT_FLAG_RX) {
148 		dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
149 		dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
150 	}
151 }
152 
153 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
154 {
155 	u32 int_mask_en = !!mask;
156 
157 	if (flag & RCB_INT_FLAG_TX)
158 		dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
159 
160 	if (flag & RCB_INT_FLAG_RX)
161 		dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
162 }
163 
164 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
165 {
166 	if (flag & RCB_INT_FLAG_TX)
167 		dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
168 
169 	if (flag & RCB_INT_FLAG_RX)
170 		dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
171 }
172 
173 /**
174  *hns_rcb_ring_enable_hw - enable ring
175  *@ring: rcb ring
176  */
177 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
178 {
179 	dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
180 }
181 
182 void hns_rcb_start(struct hnae_queue *q, u32 val)
183 {
184 	hns_rcb_ring_enable_hw(q, val);
185 }
186 
187 /**
188  *hns_rcb_common_init_commit_hw - make rcb common init completed
189  *@rcb_common: rcb common device
190  */
191 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
192 {
193 	wmb();	/* Sync point before breakpoint */
194 	dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
195 	wmb();	/* Sync point after breakpoint */
196 }
197 
198 /* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
199  *@q: hnae_queue
200  *@buf_size: buffer size set to hw
201  */
202 void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
203 {
204 	u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
205 
206 	dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
207 		       bd_size_type);
208 }
209 
210 /* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
211  *@q: hnae_queue
212  *@buf_size: buffer size set to hw
213  */
214 void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
215 {
216 	u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
217 
218 	dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
219 		       bd_size_type);
220 }
221 
222 /**
223  *hns_rcb_ring_init - init rcb ring
224  *@ring_pair: ring pair control block
225  *@ring_type: ring type, RX_RING or TX_RING
226  */
227 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
228 {
229 	struct hnae_queue *q = &ring_pair->q;
230 	struct hnae_ring *ring =
231 		(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
232 	dma_addr_t dma = ring->desc_dma_addr;
233 
234 	if (ring_type == RX_RING) {
235 		dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
236 			       (u32)dma);
237 		dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
238 			       (u32)((dma >> 31) >> 1));
239 
240 		hns_rcb_set_rx_ring_bs(q, ring->buf_size);
241 
242 		dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
243 			       ring_pair->port_id_in_comm);
244 		dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
245 			       ring_pair->port_id_in_comm);
246 	} else {
247 		dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
248 			       (u32)dma);
249 		dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
250 			       (u32)((dma >> 31) >> 1));
251 
252 		hns_rcb_set_tx_ring_bs(q, ring->buf_size);
253 
254 		dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
255 			       ring_pair->port_id_in_comm);
256 		dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
257 			ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
258 	}
259 }
260 
261 /**
262  *hns_rcb_init_hw - init rcb hardware
263  *@ring: rcb ring
264  */
265 void hns_rcb_init_hw(struct ring_pair_cb *ring)
266 {
267 	hns_rcb_ring_init(ring, RX_RING);
268 	hns_rcb_ring_init(ring, TX_RING);
269 }
270 
271 /**
272  *hns_rcb_set_port_desc_cnt - set rcb port description num
273  *@rcb_common: rcb_common device
274  *@port_idx:port index
275  *@desc_cnt:BD num
276  */
277 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
278 				      u32 port_idx, u32 desc_cnt)
279 {
280 	dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
281 		       desc_cnt);
282 }
283 
284 static void hns_rcb_set_port_timeout(
285 	struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
286 {
287 	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
288 		dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
289 			       timeout * HNS_RCB_CLK_FREQ_MHZ);
290 	} else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
291 		if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
292 			dsaf_write_dev(rcb_common,
293 				       RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
294 				       HNS_RCB_DEF_GAP_TIME_USECS);
295 		else
296 			dsaf_write_dev(rcb_common,
297 				       RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
298 				       timeout);
299 
300 		dsaf_write_dev(rcb_common,
301 			       RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
302 			       timeout);
303 	} else {
304 		dsaf_write_dev(rcb_common,
305 			       RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
306 			       timeout);
307 	}
308 }
309 
310 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
311 {
312 	if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
313 		return HNS_RCB_SERVICE_NW_ENGINE_NUM;
314 	else
315 		return HNS_RCB_DEBUG_NW_ENGINE_NUM;
316 }
317 
318 /*clr rcb comm exception irq**/
319 static void hns_rcb_comm_exc_irq_en(
320 			struct rcb_common_cb *rcb_common, int en)
321 {
322 	u32 clr_vlue = 0xfffffffful;
323 	u32 msk_vlue = en ? 0 : 0xfffffffful;
324 
325 	/* clr int*/
326 	dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
327 
328 	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
329 
330 	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
331 
332 	dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
333 	dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
334 
335 	/*en msk*/
336 	dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
337 
338 	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
339 
340 	/*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
341 	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
342 
343 	dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
344 	dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
345 }
346 
347 /**
348  *hns_rcb_common_init_hw - init rcb common hardware
349  *@rcb_common: rcb_common device
350  *retuen 0 - success , negative --fail
351  */
352 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
353 {
354 	u32 reg_val;
355 	int i;
356 	int port_num = hns_rcb_common_get_port_num(rcb_common);
357 
358 	hns_rcb_comm_exc_irq_en(rcb_common, 0);
359 
360 	reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
361 	if (0x1 != (reg_val & 0x1)) {
362 		dev_err(rcb_common->dsaf_dev->dev,
363 			"RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
364 		return -EBUSY;
365 	}
366 
367 	for (i = 0; i < port_num; i++) {
368 		hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
369 		hns_rcb_set_rx_coalesced_frames(
370 			rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
371 		if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
372 		    !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
373 			hns_rcb_set_tx_coalesced_frames(
374 				rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
375 		hns_rcb_set_port_timeout(
376 			rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
377 	}
378 
379 	dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
380 		       HNS_RCB_COMMON_ENDIAN);
381 
382 	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
383 		dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
384 		dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
385 	} else {
386 		dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
387 				 RCB_COM_CFG_FNA_B, false);
388 		dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
389 				 RCB_COM_CFG_FA_B, true);
390 		dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
391 				 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
392 	}
393 
394 	return 0;
395 }
396 
397 int hns_rcb_buf_size2type(u32 buf_size)
398 {
399 	int bd_size_type;
400 
401 	switch (buf_size) {
402 	case 512:
403 		bd_size_type = HNS_BD_SIZE_512_TYPE;
404 		break;
405 	case 1024:
406 		bd_size_type = HNS_BD_SIZE_1024_TYPE;
407 		break;
408 	case 2048:
409 		bd_size_type = HNS_BD_SIZE_2048_TYPE;
410 		break;
411 	case 4096:
412 		bd_size_type = HNS_BD_SIZE_4096_TYPE;
413 		break;
414 	default:
415 		bd_size_type = -EINVAL;
416 	}
417 
418 	return bd_size_type;
419 }
420 
421 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
422 {
423 	struct hnae_ring *ring;
424 	struct rcb_common_cb *rcb_common;
425 	struct ring_pair_cb *ring_pair_cb;
426 	u16 desc_num, mdnum_ppkt;
427 	bool irq_idx, is_ver1;
428 
429 	ring_pair_cb = container_of(q, struct ring_pair_cb, q);
430 	is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver);
431 	if (ring_type == RX_RING) {
432 		ring = &q->rx_ring;
433 		ring->io_base = ring_pair_cb->q.io_base;
434 		irq_idx = HNS_RCB_IRQ_IDX_RX;
435 		mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
436 	} else {
437 		ring = &q->tx_ring;
438 		ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
439 			HNS_RCB_TX_REG_OFFSET;
440 		irq_idx = HNS_RCB_IRQ_IDX_TX;
441 		mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
442 				 HNS_RCBV2_RING_MAX_TXBD_PER_PKT;
443 	}
444 
445 	rcb_common = ring_pair_cb->rcb_common;
446 	desc_num = rcb_common->dsaf_dev->desc_num;
447 
448 	ring->desc = NULL;
449 	ring->desc_cb = NULL;
450 
451 	ring->irq = ring_pair_cb->virq[irq_idx];
452 	ring->desc_dma_addr = 0;
453 
454 	ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
455 	ring->desc_num = desc_num;
456 	ring->max_desc_num_per_pkt = mdnum_ppkt;
457 	ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
458 	ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
459 	ring->next_to_use = 0;
460 	ring->next_to_clean = 0;
461 }
462 
463 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
464 {
465 	ring_pair_cb->q.handle = NULL;
466 
467 	hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
468 	hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
469 }
470 
471 static int hns_rcb_get_port_in_comm(
472 	struct rcb_common_cb *rcb_common, int ring_idx)
473 {
474 	return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
475 }
476 
477 #define SERVICE_RING_IRQ_IDX(v1) \
478 	((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
479 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
480 {
481 	bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
482 
483 	if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
484 		return SERVICE_RING_IRQ_IDX(is_ver1);
485 	else
486 		return  HNS_DEBUG_RING_IRQ_IDX;
487 }
488 
489 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
490 	((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
491 /**
492  *hns_rcb_get_cfg - get rcb config
493  *@rcb_common: rcb common device
494  */
495 int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
496 {
497 	struct ring_pair_cb *ring_pair_cb;
498 	u32 i;
499 	u32 ring_num = rcb_common->ring_num;
500 	int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
501 	struct platform_device *pdev =
502 		to_platform_device(rcb_common->dsaf_dev->dev);
503 	bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
504 
505 	for (i = 0; i < ring_num; i++) {
506 		ring_pair_cb = &rcb_common->ring_pair_cb[i];
507 		ring_pair_cb->rcb_common = rcb_common;
508 		ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
509 		ring_pair_cb->index = i;
510 		ring_pair_cb->q.io_base =
511 			RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
512 		ring_pair_cb->port_id_in_comm =
513 			hns_rcb_get_port_in_comm(rcb_common, i);
514 		ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
515 		is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
516 			  platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
517 		ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
518 		is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
519 			  platform_get_irq(pdev, base_irq_idx + i * 3);
520 		if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
521 		    (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
522 			return -EPROBE_DEFER;
523 
524 		ring_pair_cb->q.phy_base =
525 			RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
526 		hns_rcb_ring_pair_get_cfg(ring_pair_cb);
527 	}
528 
529 	return 0;
530 }
531 
532 /**
533  *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
534  *@rcb_common: rcb_common device
535  *@port_idx:port id in comm
536  *
537  *Returns: coalesced_frames
538  */
539 u32 hns_rcb_get_rx_coalesced_frames(
540 	struct rcb_common_cb *rcb_common, u32 port_idx)
541 {
542 	return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
543 }
544 
545 /**
546  *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
547  *@rcb_common: rcb_common device
548  *@port_idx:port id in comm
549  *
550  *Returns: coalesced_frames
551  */
552 u32 hns_rcb_get_tx_coalesced_frames(
553 	struct rcb_common_cb *rcb_common, u32 port_idx)
554 {
555 	u64 reg;
556 
557 	reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
558 	return dsaf_read_dev(rcb_common, reg);
559 }
560 
561 /**
562  *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
563  *@rcb_common: rcb_common device
564  *@port_idx:port id in comm
565  *
566  *Returns: time_out
567  */
568 u32 hns_rcb_get_coalesce_usecs(
569 	struct rcb_common_cb *rcb_common, u32 port_idx)
570 {
571 	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
572 		return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
573 		       HNS_RCB_CLK_FREQ_MHZ;
574 	else
575 		return dsaf_read_dev(rcb_common,
576 				     RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
577 }
578 
579 /**
580  *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
581  *@rcb_common: rcb_common device
582  *@port_idx:port id in comm
583  *@timeout:tx/rx time for coalesced time_out
584  *
585  * Returns:
586  * Zero for success, or an error code in case of failure
587  */
588 int hns_rcb_set_coalesce_usecs(
589 	struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
590 {
591 	u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
592 
593 	if (timeout == old_timeout)
594 		return 0;
595 
596 	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
597 		if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
598 			dev_err(rcb_common->dsaf_dev->dev,
599 				"error: not support coalesce_usecs setting!\n");
600 			return -EINVAL;
601 		}
602 	}
603 	if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
604 		dev_err(rcb_common->dsaf_dev->dev,
605 			"error: coalesce_usecs setting supports 1~1023us\n");
606 		return -EINVAL;
607 	}
608 	hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
609 	return 0;
610 }
611 
612 /**
613  *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
614  *@rcb_common: rcb_common device
615  *@port_idx:port id in comm
616  *@coalesced_frames:tx/rx BD num for coalesced frames
617  *
618  * Returns:
619  * Zero for success, or an error code in case of failure
620  */
621 int hns_rcb_set_tx_coalesced_frames(
622 	struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
623 {
624 	u32 old_waterline =
625 		hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
626 	u64 reg;
627 
628 	if (coalesced_frames == old_waterline)
629 		return 0;
630 
631 	if (coalesced_frames != 1) {
632 		dev_err(rcb_common->dsaf_dev->dev,
633 			"error: not support tx coalesce_frames setting!\n");
634 		return -EINVAL;
635 	}
636 
637 	reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
638 	dsaf_write_dev(rcb_common, reg,	coalesced_frames);
639 	return 0;
640 }
641 
642 /**
643  *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
644  *@rcb_common: rcb_common device
645  *@port_idx:port id in comm
646  *@coalesced_frames:tx/rx BD num for coalesced frames
647  *
648  * Returns:
649  * Zero for success, or an error code in case of failure
650  */
651 int hns_rcb_set_rx_coalesced_frames(
652 	struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
653 {
654 	u32 old_waterline =
655 		hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
656 
657 	if (coalesced_frames == old_waterline)
658 		return 0;
659 
660 	if (coalesced_frames >= rcb_common->desc_num ||
661 	    coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
662 	    coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
663 		dev_err(rcb_common->dsaf_dev->dev,
664 			"error: not support coalesce_frames setting!\n");
665 		return -EINVAL;
666 	}
667 
668 	dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
669 		       coalesced_frames);
670 	return 0;
671 }
672 
673 /**
674  *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
675  *						accordding to dsaf mode
676  *@dsaf_mode: dsaf mode
677  *@max_vfn : max vfn number
678  *@max_q_per_vf:max ring number per vm
679  */
680 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
681 			    u16 *max_q_per_vf)
682 {
683 	switch (dsaf_mode) {
684 	case DSAF_MODE_DISABLE_6PORT_0VM:
685 		*max_vfn = 1;
686 		*max_q_per_vf = 16;
687 		break;
688 	case DSAF_MODE_DISABLE_FIX:
689 	case DSAF_MODE_DISABLE_SP:
690 		*max_vfn = 1;
691 		*max_q_per_vf = 1;
692 		break;
693 	case DSAF_MODE_DISABLE_2PORT_64VM:
694 		*max_vfn = 64;
695 		*max_q_per_vf = 1;
696 		break;
697 	case DSAF_MODE_DISABLE_6PORT_16VM:
698 		*max_vfn = 16;
699 		*max_q_per_vf = 1;
700 		break;
701 	default:
702 		*max_vfn = 1;
703 		*max_q_per_vf = 16;
704 		break;
705 	}
706 }
707 
708 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
709 {
710 	switch (dsaf_dev->dsaf_mode) {
711 	case DSAF_MODE_ENABLE_FIX:
712 	case DSAF_MODE_DISABLE_SP:
713 		return 1;
714 
715 	case DSAF_MODE_DISABLE_FIX:
716 		return 6;
717 
718 	case DSAF_MODE_ENABLE_0VM:
719 		return 32;
720 
721 	case DSAF_MODE_DISABLE_6PORT_0VM:
722 	case DSAF_MODE_ENABLE_16VM:
723 	case DSAF_MODE_DISABLE_6PORT_2VM:
724 	case DSAF_MODE_DISABLE_6PORT_16VM:
725 	case DSAF_MODE_DISABLE_6PORT_4VM:
726 	case DSAF_MODE_ENABLE_8VM:
727 		return 96;
728 
729 	case DSAF_MODE_DISABLE_2PORT_16VM:
730 	case DSAF_MODE_DISABLE_2PORT_8VM:
731 	case DSAF_MODE_ENABLE_32VM:
732 	case DSAF_MODE_DISABLE_2PORT_64VM:
733 	case DSAF_MODE_ENABLE_128VM:
734 		return 128;
735 
736 	default:
737 		dev_warn(dsaf_dev->dev,
738 			 "get ring num fail,use default!dsaf_mode=%d\n",
739 			 dsaf_dev->dsaf_mode);
740 		return 128;
741 	}
742 }
743 
744 void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
745 {
746 	struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
747 
748 	return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
749 }
750 
751 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
752 {
753 	struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
754 
755 	return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
756 }
757 
758 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
759 			   int comm_index)
760 {
761 	struct rcb_common_cb *rcb_common;
762 	enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
763 	u16 max_vfn;
764 	u16 max_q_per_vf;
765 	int ring_num = hns_rcb_get_ring_num(dsaf_dev);
766 
767 	rcb_common =
768 		devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
769 			ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
770 	if (!rcb_common) {
771 		dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
772 		return -ENOMEM;
773 	}
774 	rcb_common->comm_index = comm_index;
775 	rcb_common->ring_num = ring_num;
776 	rcb_common->dsaf_dev = dsaf_dev;
777 
778 	rcb_common->desc_num = dsaf_dev->desc_num;
779 
780 	hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
781 	rcb_common->max_vfn = max_vfn;
782 	rcb_common->max_q_per_vf = max_q_per_vf;
783 
784 	rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
785 	rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
786 
787 	dsaf_dev->rcb_common[comm_index] = rcb_common;
788 	return 0;
789 }
790 
791 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
792 			     u32 comm_index)
793 {
794 	dsaf_dev->rcb_common[comm_index] = NULL;
795 }
796 
797 void hns_rcb_update_stats(struct hnae_queue *queue)
798 {
799 	struct ring_pair_cb *ring =
800 		container_of(queue, struct ring_pair_cb, q);
801 	struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
802 	struct ppe_common_cb *ppe_common
803 		= dsaf_dev->ppe_common[ring->rcb_common->comm_index];
804 	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
805 
806 	hw_stats->rx_pkts += dsaf_read_dev(queue,
807 			 RCB_RING_RX_RING_PKTNUM_RECORD_REG);
808 	dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
809 
810 	hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
811 			 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
812 	hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
813 			 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
814 
815 	hw_stats->tx_pkts += dsaf_read_dev(queue,
816 			 RCB_RING_TX_RING_PKTNUM_RECORD_REG);
817 	dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
818 
819 	hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
820 			 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
821 	hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
822 			 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
823 }
824 
825 /**
826  *hns_rcb_get_stats - get rcb statistic
827  *@ring: rcb ring
828  *@data:statistic value
829  */
830 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
831 {
832 	u64 *regs_buff = data;
833 	struct ring_pair_cb *ring =
834 		container_of(queue, struct ring_pair_cb, q);
835 	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
836 
837 	regs_buff[0] = hw_stats->tx_pkts;
838 	regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
839 	regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
840 	regs_buff[3] =
841 		dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
842 
843 	regs_buff[4] = queue->tx_ring.stats.tx_pkts;
844 	regs_buff[5] = queue->tx_ring.stats.tx_bytes;
845 	regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
846 	regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
847 	regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
848 	regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
849 	regs_buff[10] = queue->tx_ring.stats.restart_queue;
850 	regs_buff[11] = queue->tx_ring.stats.tx_busy;
851 
852 	regs_buff[12] = hw_stats->rx_pkts;
853 	regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
854 	regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
855 	regs_buff[15] =
856 		dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
857 
858 	regs_buff[16] = queue->rx_ring.stats.rx_pkts;
859 	regs_buff[17] = queue->rx_ring.stats.rx_bytes;
860 	regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
861 	regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
862 	regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
863 	regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
864 	regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
865 	regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
866 	regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
867 	regs_buff[25] = queue->rx_ring.stats.err_bd_num;
868 	regs_buff[26] = queue->rx_ring.stats.l2_err;
869 	regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
870 }
871 
872 /**
873  *hns_rcb_get_ring_sset_count - rcb string set count
874  *@stringset:ethtool cmd
875  *return rcb ring string set count
876  */
877 int hns_rcb_get_ring_sset_count(int stringset)
878 {
879 	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
880 		return HNS_RING_STATIC_REG_NUM;
881 
882 	return 0;
883 }
884 
885 /**
886  *hns_rcb_get_common_regs_count - rcb common regs count
887  *return regs count
888  */
889 int hns_rcb_get_common_regs_count(void)
890 {
891 	return HNS_RCB_COMMON_DUMP_REG_NUM;
892 }
893 
894 /**
895  *rcb_get_sset_count - rcb ring regs count
896  *return regs count
897  */
898 int hns_rcb_get_ring_regs_count(void)
899 {
900 	return HNS_RCB_RING_DUMP_REG_NUM;
901 }
902 
903 /**
904  *hns_rcb_get_strings - get rcb string set
905  *@stringset:string set index
906  *@data:strings name value
907  *@index:queue index
908  */
909 void hns_rcb_get_strings(int stringset, u8 *data, int index)
910 {
911 	char *buff = (char *)data;
912 
913 	if (stringset != ETH_SS_STATS)
914 		return;
915 
916 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
917 	buff = buff + ETH_GSTRING_LEN;
918 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
919 	buff = buff + ETH_GSTRING_LEN;
920 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
921 	buff = buff + ETH_GSTRING_LEN;
922 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
923 	buff = buff + ETH_GSTRING_LEN;
924 
925 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
926 	buff = buff + ETH_GSTRING_LEN;
927 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
928 	buff = buff + ETH_GSTRING_LEN;
929 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
930 	buff = buff + ETH_GSTRING_LEN;
931 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
932 	buff = buff + ETH_GSTRING_LEN;
933 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
934 	buff = buff + ETH_GSTRING_LEN;
935 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
936 	buff = buff + ETH_GSTRING_LEN;
937 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
938 	buff = buff + ETH_GSTRING_LEN;
939 	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
940 	buff = buff + ETH_GSTRING_LEN;
941 
942 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
943 	buff = buff + ETH_GSTRING_LEN;
944 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
945 	buff = buff + ETH_GSTRING_LEN;
946 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
947 	buff = buff + ETH_GSTRING_LEN;
948 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
949 	buff = buff + ETH_GSTRING_LEN;
950 
951 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
952 	buff = buff + ETH_GSTRING_LEN;
953 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
954 	buff = buff + ETH_GSTRING_LEN;
955 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
956 	buff = buff + ETH_GSTRING_LEN;
957 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
958 	buff = buff + ETH_GSTRING_LEN;
959 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
960 	buff = buff + ETH_GSTRING_LEN;
961 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
962 	buff = buff + ETH_GSTRING_LEN;
963 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
964 	buff = buff + ETH_GSTRING_LEN;
965 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
966 	buff = buff + ETH_GSTRING_LEN;
967 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
968 	buff = buff + ETH_GSTRING_LEN;
969 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
970 	buff = buff + ETH_GSTRING_LEN;
971 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
972 	buff = buff + ETH_GSTRING_LEN;
973 	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
974 }
975 
976 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
977 {
978 	u32 *regs = data;
979 	bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
980 	bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
981 	u32 reg_tmp;
982 	u32 reg_num_tmp;
983 	u32 i = 0;
984 
985 	/*rcb common registers */
986 	regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
987 	regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
988 	regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
989 
990 	regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
991 	regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
992 	regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
993 	regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
994 	regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
995 	regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
996 
997 	regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
998 	regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
999 	regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
1000 	regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
1001 	regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
1002 	regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
1003 	regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
1004 	regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
1005 	regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
1006 	regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
1007 	regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
1008 	regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
1009 	regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
1010 	regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
1011 	regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
1012 	regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
1013 	regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
1014 	regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
1015 	regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
1016 
1017 	regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
1018 	regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
1019 	regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
1020 	regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
1021 	regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
1022 	regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
1023 	regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
1024 	regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
1025 	regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
1026 	regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
1027 
1028 	/* rcb common entry registers */
1029 	for (i = 0; i < 16; i++) { /* total 16 model registers */
1030 		regs[38 + i]
1031 			= dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
1032 		regs[54 + i]
1033 			= dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
1034 	}
1035 
1036 	reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
1037 	reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
1038 	for (i = 0; i < reg_num_tmp; i++)
1039 		regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
1040 
1041 	regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
1042 	regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
1043 
1044 	/* mark end of rcb common regs */
1045 	for (i = 78; i < 80; i++)
1046 		regs[i] = 0xcccccccc;
1047 }
1048 
1049 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
1050 {
1051 	u32 *regs = data;
1052 	struct ring_pair_cb *ring_pair
1053 		= container_of(queue, struct ring_pair_cb, q);
1054 	u32 i = 0;
1055 
1056 	/*rcb ring registers */
1057 	regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
1058 	regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
1059 	regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
1060 	regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
1061 	regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
1062 	regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
1063 	regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
1064 	regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
1065 	regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
1066 
1067 	regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
1068 	regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
1069 	regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
1070 	regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
1071 	regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
1072 	regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
1073 	regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
1074 	regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
1075 	regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
1076 	regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
1077 
1078 	regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
1079 	regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
1080 	regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
1081 	regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
1082 	regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
1083 	regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
1084 	regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
1085 
1086 	regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
1087 	regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
1088 	regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
1089 	regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
1090 	regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
1091 	regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
1092 	regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
1093 	regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
1094 
1095 	/* mark end of ring regs */
1096 	for (i = 35; i < 40; i++)
1097 		regs[i] = 0xcccccc00 + ring_pair->index;
1098 }
1099