1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 if (flag & RCB_INT_FLAG_TX) { 140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); 141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); 142 } 143 144 if (flag & RCB_INT_FLAG_RX) { 145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); 146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); 147 } 148 } 149 150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 151 { 152 u32 int_mask_en = !!mask; 153 154 if (flag & RCB_INT_FLAG_TX) 155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 156 157 if (flag & RCB_INT_FLAG_RX) 158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 159 } 160 161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) 162 { 163 if (flag & RCB_INT_FLAG_TX) 164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); 165 166 if (flag & RCB_INT_FLAG_RX) 167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); 168 } 169 170 /** 171 *hns_rcb_ring_enable_hw - enable ring 172 *@ring: rcb ring 173 */ 174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 175 { 176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 177 } 178 179 void hns_rcb_start(struct hnae_queue *q, u32 val) 180 { 181 hns_rcb_ring_enable_hw(q, val); 182 } 183 184 /** 185 *hns_rcb_common_init_commit_hw - make rcb common init completed 186 *@rcb_common: rcb common device 187 */ 188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 189 { 190 wmb(); /* Sync point before breakpoint */ 191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 192 wmb(); /* Sync point after breakpoint */ 193 } 194 195 /** 196 *hns_rcb_ring_init - init rcb ring 197 *@ring_pair: ring pair control block 198 *@ring_type: ring type, RX_RING or TX_RING 199 */ 200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 201 { 202 struct hnae_queue *q = &ring_pair->q; 203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 205 struct hnae_ring *ring = 206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 207 dma_addr_t dma = ring->desc_dma_addr; 208 209 if (ring_type == RX_RING) { 210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 211 (u32)dma); 212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 213 (u32)((dma >> 31) >> 1)); 214 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 216 bd_size_type); 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 218 ring_pair->port_id_in_dsa); 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 220 ring_pair->port_id_in_dsa); 221 } else { 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 223 (u32)dma); 224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 225 (u32)((dma >> 31) >> 1)); 226 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 228 bd_size_type); 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 230 ring_pair->port_id_in_dsa); 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 232 ring_pair->port_id_in_dsa); 233 } 234 } 235 236 /** 237 *hns_rcb_init_hw - init rcb hardware 238 *@ring: rcb ring 239 */ 240 void hns_rcb_init_hw(struct ring_pair_cb *ring) 241 { 242 hns_rcb_ring_init(ring, RX_RING); 243 hns_rcb_ring_init(ring, TX_RING); 244 } 245 246 /** 247 *hns_rcb_set_port_desc_cnt - set rcb port description num 248 *@rcb_common: rcb_common device 249 *@port_idx:port index 250 *@desc_cnt:BD num 251 */ 252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 253 u32 port_idx, u32 desc_cnt) 254 { 255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 256 desc_cnt); 257 } 258 259 /** 260 *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames 261 *@rcb_common: rcb_common device 262 *@port_idx:port index 263 *@coalesced_frames:BD num for coalesced frames 264 */ 265 static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, 266 u32 port_idx, 267 u32 coalesced_frames) 268 { 269 if (coalesced_frames >= rcb_common->desc_num || 270 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) 271 return -EINVAL; 272 273 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 274 coalesced_frames); 275 return 0; 276 } 277 278 /** 279 *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames 280 *@rcb_common: rcb_common device 281 *@port_idx:port index 282 * return coaleseced frames value 283 */ 284 static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common, 285 u32 port_idx) 286 { 287 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 288 port_idx = 0; 289 290 return dsaf_read_dev(rcb_common, 291 RCB_CFG_PKTLINE_REG + port_idx * 4); 292 } 293 294 /** 295 *hns_rcb_set_timeout - set rcb port coalesced time_out 296 *@rcb_common: rcb_common device 297 *@time_out:time for coalesced time_out 298 */ 299 static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common, 300 u32 timeout) 301 { 302 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout); 303 } 304 305 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 306 { 307 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 308 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 309 else 310 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 311 } 312 313 /*clr rcb comm exception irq**/ 314 static void hns_rcb_comm_exc_irq_en( 315 struct rcb_common_cb *rcb_common, int en) 316 { 317 u32 clr_vlue = 0xfffffffful; 318 u32 msk_vlue = en ? 0 : 0xfffffffful; 319 320 /* clr int*/ 321 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 322 323 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 324 325 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 326 327 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 328 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 329 330 /*en msk*/ 331 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 332 333 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 334 335 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 336 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 337 338 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 339 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 340 } 341 342 /** 343 *hns_rcb_common_init_hw - init rcb common hardware 344 *@rcb_common: rcb_common device 345 *retuen 0 - success , negative --fail 346 */ 347 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 348 { 349 u32 reg_val; 350 int i; 351 int port_num = hns_rcb_common_get_port_num(rcb_common); 352 353 hns_rcb_comm_exc_irq_en(rcb_common, 0); 354 355 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 356 if (0x1 != (reg_val & 0x1)) { 357 dev_err(rcb_common->dsaf_dev->dev, 358 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 359 return -EBUSY; 360 } 361 362 for (i = 0; i < port_num; i++) { 363 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 364 (void)hns_rcb_set_port_coalesced_frames( 365 rcb_common, i, rcb_common->coalesced_frames); 366 } 367 hns_rcb_set_timeout(rcb_common, rcb_common->timeout); 368 369 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 370 HNS_RCB_COMMON_ENDIAN); 371 372 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 373 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); 374 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); 375 } else { 376 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 377 RCB_COM_CFG_FNA_B, false); 378 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 379 RCB_COM_CFG_FA_B, true); 380 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, 381 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); 382 } 383 384 return 0; 385 } 386 387 int hns_rcb_buf_size2type(u32 buf_size) 388 { 389 int bd_size_type; 390 391 switch (buf_size) { 392 case 512: 393 bd_size_type = HNS_BD_SIZE_512_TYPE; 394 break; 395 case 1024: 396 bd_size_type = HNS_BD_SIZE_1024_TYPE; 397 break; 398 case 2048: 399 bd_size_type = HNS_BD_SIZE_2048_TYPE; 400 break; 401 case 4096: 402 bd_size_type = HNS_BD_SIZE_4096_TYPE; 403 break; 404 default: 405 bd_size_type = -EINVAL; 406 } 407 408 return bd_size_type; 409 } 410 411 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 412 { 413 struct hnae_ring *ring; 414 struct rcb_common_cb *rcb_common; 415 struct ring_pair_cb *ring_pair_cb; 416 u32 buf_size; 417 u16 desc_num, mdnum_ppkt; 418 bool irq_idx, is_ver1; 419 420 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 421 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); 422 if (ring_type == RX_RING) { 423 ring = &q->rx_ring; 424 ring->io_base = ring_pair_cb->q.io_base; 425 irq_idx = HNS_RCB_IRQ_IDX_RX; 426 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 427 } else { 428 ring = &q->tx_ring; 429 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 430 HNS_RCB_TX_REG_OFFSET; 431 irq_idx = HNS_RCB_IRQ_IDX_TX; 432 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 433 HNS_RCBV2_RING_MAX_TXBD_PER_PKT; 434 } 435 436 rcb_common = ring_pair_cb->rcb_common; 437 buf_size = rcb_common->dsaf_dev->buf_size; 438 desc_num = rcb_common->dsaf_dev->desc_num; 439 440 ring->desc = NULL; 441 ring->desc_cb = NULL; 442 443 ring->irq = ring_pair_cb->virq[irq_idx]; 444 ring->desc_dma_addr = 0; 445 446 ring->buf_size = buf_size; 447 ring->desc_num = desc_num; 448 ring->max_desc_num_per_pkt = mdnum_ppkt; 449 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 450 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 451 ring->next_to_use = 0; 452 ring->next_to_clean = 0; 453 } 454 455 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 456 { 457 ring_pair_cb->q.handle = NULL; 458 459 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 460 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 461 } 462 463 static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) 464 { 465 int comm_index = rcb_common->comm_index; 466 int port; 467 int q_num; 468 469 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 470 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 471 port = ring_idx / q_num; 472 } else { 473 port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; 474 } 475 476 return port; 477 } 478 479 #define SERVICE_RING_IRQ_IDX(v1) \ 480 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) 481 #define DEBUG_RING_IRQ_IDX(v1) \ 482 ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) 483 #define DEBUG_RING_IRQ_OFFSET(v1) \ 484 ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) 485 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 486 { 487 int comm_index = rcb_common->comm_index; 488 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 489 490 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 491 return SERVICE_RING_IRQ_IDX(is_ver1); 492 else 493 return DEBUG_RING_IRQ_IDX(is_ver1) + 494 (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); 495 } 496 497 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 498 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 499 /** 500 *hns_rcb_get_cfg - get rcb config 501 *@rcb_common: rcb common device 502 */ 503 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 504 { 505 struct ring_pair_cb *ring_pair_cb; 506 u32 i; 507 u32 ring_num = rcb_common->ring_num; 508 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 509 struct device_node *np = rcb_common->dsaf_dev->dev->of_node; 510 struct platform_device *pdev = 511 to_platform_device(rcb_common->dsaf_dev->dev); 512 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 513 514 for (i = 0; i < ring_num; i++) { 515 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 516 ring_pair_cb->rcb_common = rcb_common; 517 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 518 ring_pair_cb->index = i; 519 ring_pair_cb->q.io_base = 520 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 521 ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); 522 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 523 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 524 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 525 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 526 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : 527 platform_get_irq(pdev, base_irq_idx + i * 3); 528 ring_pair_cb->q.phy_base = 529 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 530 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 531 } 532 } 533 534 /** 535 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 536 *@rcb_common: rcb_common device 537 *@comm_index:port index 538 *return coalesced_frames 539 */ 540 u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) 541 { 542 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 543 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 544 545 return hns_rcb_get_port_coalesced_frames(rcb_comm, port); 546 } 547 548 /** 549 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 550 *@rcb_common: rcb_common device 551 *@comm_index:port index 552 *return time_out 553 */ 554 u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) 555 { 556 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 557 558 return rcb_comm->timeout; 559 } 560 561 /** 562 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 563 *@rcb_common: rcb_common device 564 *@comm_index: comm :index 565 *@etx_usecs:tx time for coalesced time_out 566 *@rx_usecs:rx time for coalesced time_out 567 */ 568 void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 569 int port, u32 timeout) 570 { 571 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 572 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 573 574 if (rcb_comm->timeout == timeout) 575 return; 576 577 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 578 dev_err(dsaf_dev->dev, 579 "error: not support coalesce_usecs setting!\n"); 580 return; 581 } 582 rcb_comm->timeout = timeout; 583 hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); 584 } 585 586 /** 587 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 588 *@rcb_common: rcb_common device 589 *@tx_frames:tx BD num for coalesced frames 590 *@rx_frames:rx BD num for coalesced frames 591 *Return 0 on success, negative on failure 592 */ 593 int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 594 int port, u32 coalesced_frames) 595 { 596 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 597 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 598 u32 coalesced_reg_val; 599 int ret; 600 601 coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); 602 603 if (coalesced_reg_val == coalesced_frames) 604 return 0; 605 606 if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { 607 ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, 608 coalesced_frames); 609 return ret; 610 } else { 611 return -EINVAL; 612 } 613 } 614 615 /** 616 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 617 * accordding to dsaf mode 618 *@dsaf_mode: dsaf mode 619 *@max_vfn : max vfn number 620 *@max_q_per_vf:max ring number per vm 621 */ 622 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, 623 u16 *max_vfn, u16 *max_q_per_vf) 624 { 625 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 626 switch (dsaf_mode) { 627 case DSAF_MODE_DISABLE_6PORT_0VM: 628 *max_vfn = 1; 629 *max_q_per_vf = 16; 630 break; 631 case DSAF_MODE_DISABLE_FIX: 632 *max_vfn = 1; 633 *max_q_per_vf = 1; 634 break; 635 case DSAF_MODE_DISABLE_2PORT_64VM: 636 *max_vfn = 64; 637 *max_q_per_vf = 1; 638 break; 639 case DSAF_MODE_DISABLE_6PORT_16VM: 640 *max_vfn = 16; 641 *max_q_per_vf = 1; 642 break; 643 default: 644 *max_vfn = 1; 645 *max_q_per_vf = 16; 646 break; 647 } 648 } else { 649 *max_vfn = 1; 650 *max_q_per_vf = 1; 651 } 652 } 653 654 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) 655 { 656 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 657 switch (dsaf_dev->dsaf_mode) { 658 case DSAF_MODE_ENABLE_FIX: 659 return 1; 660 661 case DSAF_MODE_DISABLE_FIX: 662 return 6; 663 664 case DSAF_MODE_ENABLE_0VM: 665 return 32; 666 667 case DSAF_MODE_DISABLE_6PORT_0VM: 668 case DSAF_MODE_ENABLE_16VM: 669 case DSAF_MODE_DISABLE_6PORT_2VM: 670 case DSAF_MODE_DISABLE_6PORT_16VM: 671 case DSAF_MODE_DISABLE_6PORT_4VM: 672 case DSAF_MODE_ENABLE_8VM: 673 return 96; 674 675 case DSAF_MODE_DISABLE_2PORT_16VM: 676 case DSAF_MODE_DISABLE_2PORT_8VM: 677 case DSAF_MODE_ENABLE_32VM: 678 case DSAF_MODE_DISABLE_2PORT_64VM: 679 case DSAF_MODE_ENABLE_128VM: 680 return 128; 681 682 default: 683 dev_warn(dsaf_dev->dev, 684 "get ring num fail,use default!dsaf_mode=%d\n", 685 dsaf_dev->dsaf_mode); 686 return 128; 687 } 688 } else { 689 return 1; 690 } 691 } 692 693 void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, 694 int comm_index) 695 { 696 void __iomem *base_addr; 697 698 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 699 base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 700 else 701 base_addr = dsaf_dev->sds_base 702 + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET 703 + RCB_COMMON_REG_OFFSET; 704 705 return base_addr; 706 } 707 708 static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, 709 int comm_index) 710 { 711 struct device_node *np = dsaf_dev->dev->of_node; 712 phys_addr_t phy_addr; 713 const __be32 *tmp_addr; 714 u64 addr_offset = 0; 715 u64 size = 0; 716 int index = 0; 717 718 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 719 index = 2; 720 addr_offset = RCB_COMMON_REG_OFFSET; 721 } else { 722 index = 1; 723 addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + 724 RCB_COMMON_REG_OFFSET; 725 } 726 tmp_addr = of_get_address(np, index, &size, NULL); 727 phy_addr = of_translate_address(np, tmp_addr); 728 return phy_addr + addr_offset; 729 } 730 731 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 732 int comm_index) 733 { 734 struct rcb_common_cb *rcb_common; 735 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 736 u16 max_vfn; 737 u16 max_q_per_vf; 738 int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); 739 740 rcb_common = 741 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 742 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 743 if (!rcb_common) { 744 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 745 return -ENOMEM; 746 } 747 rcb_common->comm_index = comm_index; 748 rcb_common->ring_num = ring_num; 749 rcb_common->dsaf_dev = dsaf_dev; 750 751 rcb_common->desc_num = dsaf_dev->desc_num; 752 rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES; 753 rcb_common->timeout = HNS_RCB_MAX_TIME_OUT; 754 755 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 756 rcb_common->max_vfn = max_vfn; 757 rcb_common->max_q_per_vf = max_q_per_vf; 758 759 rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); 760 rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); 761 762 dsaf_dev->rcb_common[comm_index] = rcb_common; 763 return 0; 764 } 765 766 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 767 u32 comm_index) 768 { 769 dsaf_dev->rcb_common[comm_index] = NULL; 770 } 771 772 void hns_rcb_update_stats(struct hnae_queue *queue) 773 { 774 struct ring_pair_cb *ring = 775 container_of(queue, struct ring_pair_cb, q); 776 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 777 struct ppe_common_cb *ppe_common 778 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 779 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 780 781 hw_stats->rx_pkts += dsaf_read_dev(queue, 782 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 783 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 784 785 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 786 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 787 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 788 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 789 790 hw_stats->tx_pkts += dsaf_read_dev(queue, 791 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 792 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 793 794 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 795 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 796 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 797 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 798 } 799 800 /** 801 *hns_rcb_get_stats - get rcb statistic 802 *@ring: rcb ring 803 *@data:statistic value 804 */ 805 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 806 { 807 u64 *regs_buff = data; 808 struct ring_pair_cb *ring = 809 container_of(queue, struct ring_pair_cb, q); 810 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 811 812 regs_buff[0] = hw_stats->tx_pkts; 813 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 814 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 815 regs_buff[3] = 816 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 817 818 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 819 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 820 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 821 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 822 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 823 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 824 regs_buff[10] = queue->tx_ring.stats.restart_queue; 825 regs_buff[11] = queue->tx_ring.stats.tx_busy; 826 827 regs_buff[12] = hw_stats->rx_pkts; 828 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 829 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 830 regs_buff[15] = 831 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 832 833 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 834 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 835 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 836 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 837 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 838 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 839 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 840 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 841 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 842 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 843 regs_buff[26] = queue->rx_ring.stats.l2_err; 844 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 845 } 846 847 /** 848 *hns_rcb_get_ring_sset_count - rcb string set count 849 *@stringset:ethtool cmd 850 *return rcb ring string set count 851 */ 852 int hns_rcb_get_ring_sset_count(int stringset) 853 { 854 if (stringset == ETH_SS_STATS) 855 return HNS_RING_STATIC_REG_NUM; 856 857 return 0; 858 } 859 860 /** 861 *hns_rcb_get_common_regs_count - rcb common regs count 862 *return regs count 863 */ 864 int hns_rcb_get_common_regs_count(void) 865 { 866 return HNS_RCB_COMMON_DUMP_REG_NUM; 867 } 868 869 /** 870 *rcb_get_sset_count - rcb ring regs count 871 *return regs count 872 */ 873 int hns_rcb_get_ring_regs_count(void) 874 { 875 return HNS_RCB_RING_DUMP_REG_NUM; 876 } 877 878 /** 879 *hns_rcb_get_strings - get rcb string set 880 *@stringset:string set index 881 *@data:strings name value 882 *@index:queue index 883 */ 884 void hns_rcb_get_strings(int stringset, u8 *data, int index) 885 { 886 char *buff = (char *)data; 887 888 if (stringset != ETH_SS_STATS) 889 return; 890 891 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 892 buff = buff + ETH_GSTRING_LEN; 893 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 894 buff = buff + ETH_GSTRING_LEN; 895 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 896 buff = buff + ETH_GSTRING_LEN; 897 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 898 buff = buff + ETH_GSTRING_LEN; 899 900 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 901 buff = buff + ETH_GSTRING_LEN; 902 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 903 buff = buff + ETH_GSTRING_LEN; 904 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 905 buff = buff + ETH_GSTRING_LEN; 906 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 907 buff = buff + ETH_GSTRING_LEN; 908 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 909 buff = buff + ETH_GSTRING_LEN; 910 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 911 buff = buff + ETH_GSTRING_LEN; 912 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 913 buff = buff + ETH_GSTRING_LEN; 914 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 915 buff = buff + ETH_GSTRING_LEN; 916 917 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 918 buff = buff + ETH_GSTRING_LEN; 919 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 920 buff = buff + ETH_GSTRING_LEN; 921 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 922 buff = buff + ETH_GSTRING_LEN; 923 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 924 buff = buff + ETH_GSTRING_LEN; 925 926 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 927 buff = buff + ETH_GSTRING_LEN; 928 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 929 buff = buff + ETH_GSTRING_LEN; 930 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 931 buff = buff + ETH_GSTRING_LEN; 932 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 933 buff = buff + ETH_GSTRING_LEN; 934 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 935 buff = buff + ETH_GSTRING_LEN; 936 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 937 buff = buff + ETH_GSTRING_LEN; 938 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 939 buff = buff + ETH_GSTRING_LEN; 940 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 941 buff = buff + ETH_GSTRING_LEN; 942 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 943 buff = buff + ETH_GSTRING_LEN; 944 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 945 buff = buff + ETH_GSTRING_LEN; 946 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 947 buff = buff + ETH_GSTRING_LEN; 948 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 949 } 950 951 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 952 { 953 u32 *regs = data; 954 u32 i = 0; 955 956 /*rcb common registers */ 957 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 958 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 959 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 960 961 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 962 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 963 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 964 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 965 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 966 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 967 968 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 969 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 970 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 971 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 972 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 973 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 974 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 975 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 976 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 977 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 978 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 979 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 980 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 981 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 982 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 983 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 984 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 985 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 986 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 987 988 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 989 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 990 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 991 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 992 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 993 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 994 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 995 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 996 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 997 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 998 999 /* rcb common entry registers */ 1000 for (i = 0; i < 16; i++) { /* total 16 model registers */ 1001 regs[38 + i] 1002 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 1003 regs[54 + i] 1004 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 1005 } 1006 1007 regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); 1008 regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 1009 regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 1010 1011 /* mark end of rcb common regs */ 1012 for (i = 73; i < 80; i++) 1013 regs[i] = 0xcccccccc; 1014 } 1015 1016 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 1017 { 1018 u32 *regs = data; 1019 struct ring_pair_cb *ring_pair 1020 = container_of(queue, struct ring_pair_cb, q); 1021 u32 i = 0; 1022 1023 /*rcb ring registers */ 1024 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 1025 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 1026 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 1027 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 1028 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 1029 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 1030 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 1031 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 1032 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 1033 1034 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 1035 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 1036 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 1037 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 1038 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 1039 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 1040 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 1041 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 1042 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 1043 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 1044 1045 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 1046 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 1047 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 1048 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 1049 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 1050 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 1051 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 1052 1053 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 1054 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 1055 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 1056 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 1057 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 1058 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 1059 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1060 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1061 1062 /* mark end of ring regs */ 1063 for (i = 35; i < 40; i++) 1064 regs[i] = 0xcccccc00 + ring_pair->index; 1065 } 1066