1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 if (flag & RCB_INT_FLAG_TX) { 140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); 141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); 142 } 143 144 if (flag & RCB_INT_FLAG_RX) { 145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); 146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); 147 } 148 } 149 150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 151 { 152 u32 int_mask_en = !!mask; 153 154 if (flag & RCB_INT_FLAG_TX) 155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 156 157 if (flag & RCB_INT_FLAG_RX) 158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 159 } 160 161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) 162 { 163 if (flag & RCB_INT_FLAG_TX) 164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); 165 166 if (flag & RCB_INT_FLAG_RX) 167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); 168 } 169 170 /** 171 *hns_rcb_ring_enable_hw - enable ring 172 *@ring: rcb ring 173 */ 174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 175 { 176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 177 } 178 179 void hns_rcb_start(struct hnae_queue *q, u32 val) 180 { 181 hns_rcb_ring_enable_hw(q, val); 182 } 183 184 /** 185 *hns_rcb_common_init_commit_hw - make rcb common init completed 186 *@rcb_common: rcb common device 187 */ 188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 189 { 190 wmb(); /* Sync point before breakpoint */ 191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 192 wmb(); /* Sync point after breakpoint */ 193 } 194 195 /** 196 *hns_rcb_ring_init - init rcb ring 197 *@ring_pair: ring pair control block 198 *@ring_type: ring type, RX_RING or TX_RING 199 */ 200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 201 { 202 struct hnae_queue *q = &ring_pair->q; 203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 205 struct hnae_ring *ring = 206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 207 dma_addr_t dma = ring->desc_dma_addr; 208 209 if (ring_type == RX_RING) { 210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 211 (u32)dma); 212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 213 (u32)((dma >> 31) >> 1)); 214 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 216 bd_size_type); 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 218 ring_pair->port_id_in_comm); 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 220 ring_pair->port_id_in_comm); 221 } else { 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 223 (u32)dma); 224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 225 (u32)((dma >> 31) >> 1)); 226 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 228 bd_size_type); 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 230 ring_pair->port_id_in_comm); 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 232 ring_pair->port_id_in_comm); 233 } 234 } 235 236 /** 237 *hns_rcb_init_hw - init rcb hardware 238 *@ring: rcb ring 239 */ 240 void hns_rcb_init_hw(struct ring_pair_cb *ring) 241 { 242 hns_rcb_ring_init(ring, RX_RING); 243 hns_rcb_ring_init(ring, TX_RING); 244 } 245 246 /** 247 *hns_rcb_set_port_desc_cnt - set rcb port description num 248 *@rcb_common: rcb_common device 249 *@port_idx:port index 250 *@desc_cnt:BD num 251 */ 252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 253 u32 port_idx, u32 desc_cnt) 254 { 255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 256 desc_cnt); 257 } 258 259 static void hns_rcb_set_port_timeout( 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 261 { 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, 264 timeout * HNS_RCB_CLK_FREQ_MHZ); 265 else 266 dsaf_write_dev(rcb_common, 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, 268 timeout); 269 } 270 271 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 272 { 273 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 274 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 275 else 276 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 277 } 278 279 /*clr rcb comm exception irq**/ 280 static void hns_rcb_comm_exc_irq_en( 281 struct rcb_common_cb *rcb_common, int en) 282 { 283 u32 clr_vlue = 0xfffffffful; 284 u32 msk_vlue = en ? 0 : 0xfffffffful; 285 286 /* clr int*/ 287 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 288 289 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 290 291 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 292 293 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 294 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 295 296 /*en msk*/ 297 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 298 299 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 300 301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 302 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 303 304 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 305 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 306 } 307 308 /** 309 *hns_rcb_common_init_hw - init rcb common hardware 310 *@rcb_common: rcb_common device 311 *retuen 0 - success , negative --fail 312 */ 313 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 314 { 315 u32 reg_val; 316 int i; 317 int port_num = hns_rcb_common_get_port_num(rcb_common); 318 319 hns_rcb_comm_exc_irq_en(rcb_common, 0); 320 321 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 322 if (0x1 != (reg_val & 0x1)) { 323 dev_err(rcb_common->dsaf_dev->dev, 324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 325 return -EBUSY; 326 } 327 328 for (i = 0; i < port_num; i++) { 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 330 (void)hns_rcb_set_coalesced_frames( 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); 332 hns_rcb_set_port_timeout( 333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); 334 } 335 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 337 HNS_RCB_COMMON_ENDIAN); 338 339 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 340 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); 341 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); 342 } else { 343 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 344 RCB_COM_CFG_FNA_B, false); 345 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 346 RCB_COM_CFG_FA_B, true); 347 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, 348 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); 349 } 350 351 return 0; 352 } 353 354 int hns_rcb_buf_size2type(u32 buf_size) 355 { 356 int bd_size_type; 357 358 switch (buf_size) { 359 case 512: 360 bd_size_type = HNS_BD_SIZE_512_TYPE; 361 break; 362 case 1024: 363 bd_size_type = HNS_BD_SIZE_1024_TYPE; 364 break; 365 case 2048: 366 bd_size_type = HNS_BD_SIZE_2048_TYPE; 367 break; 368 case 4096: 369 bd_size_type = HNS_BD_SIZE_4096_TYPE; 370 break; 371 default: 372 bd_size_type = -EINVAL; 373 } 374 375 return bd_size_type; 376 } 377 378 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 379 { 380 struct hnae_ring *ring; 381 struct rcb_common_cb *rcb_common; 382 struct ring_pair_cb *ring_pair_cb; 383 u32 buf_size; 384 u16 desc_num, mdnum_ppkt; 385 bool irq_idx, is_ver1; 386 387 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 388 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); 389 if (ring_type == RX_RING) { 390 ring = &q->rx_ring; 391 ring->io_base = ring_pair_cb->q.io_base; 392 irq_idx = HNS_RCB_IRQ_IDX_RX; 393 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 394 } else { 395 ring = &q->tx_ring; 396 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 397 HNS_RCB_TX_REG_OFFSET; 398 irq_idx = HNS_RCB_IRQ_IDX_TX; 399 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT; 401 } 402 403 rcb_common = ring_pair_cb->rcb_common; 404 buf_size = rcb_common->dsaf_dev->buf_size; 405 desc_num = rcb_common->dsaf_dev->desc_num; 406 407 ring->desc = NULL; 408 ring->desc_cb = NULL; 409 410 ring->irq = ring_pair_cb->virq[irq_idx]; 411 ring->desc_dma_addr = 0; 412 413 ring->buf_size = buf_size; 414 ring->desc_num = desc_num; 415 ring->max_desc_num_per_pkt = mdnum_ppkt; 416 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 417 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 418 ring->next_to_use = 0; 419 ring->next_to_clean = 0; 420 } 421 422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 423 { 424 ring_pair_cb->q.handle = NULL; 425 426 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 428 } 429 430 static int hns_rcb_get_port_in_comm( 431 struct rcb_common_cb *rcb_common, int ring_idx) 432 { 433 434 return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); 435 } 436 437 #define SERVICE_RING_IRQ_IDX(v1) \ 438 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) 439 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 440 { 441 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 442 443 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 444 return SERVICE_RING_IRQ_IDX(is_ver1); 445 else 446 return HNS_DEBUG_RING_IRQ_IDX; 447 } 448 449 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 450 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 451 /** 452 *hns_rcb_get_cfg - get rcb config 453 *@rcb_common: rcb common device 454 */ 455 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 456 { 457 struct ring_pair_cb *ring_pair_cb; 458 u32 i; 459 u32 ring_num = rcb_common->ring_num; 460 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 461 struct platform_device *pdev = 462 to_platform_device(rcb_common->dsaf_dev->dev); 463 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 464 465 for (i = 0; i < ring_num; i++) { 466 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 467 ring_pair_cb->rcb_common = rcb_common; 468 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 469 ring_pair_cb->index = i; 470 ring_pair_cb->q.io_base = 471 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 472 ring_pair_cb->port_id_in_comm = 473 hns_rcb_get_port_in_comm(rcb_common, i); 474 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 475 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) : 476 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 477 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 478 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) : 479 platform_get_irq(pdev, base_irq_idx + i * 3); 480 ring_pair_cb->q.phy_base = 481 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 482 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 483 } 484 } 485 486 /** 487 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 488 *@rcb_common: rcb_common device 489 *@port_idx:port id in comm 490 * 491 *Returns: coalesced_frames 492 */ 493 u32 hns_rcb_get_coalesced_frames( 494 struct rcb_common_cb *rcb_common, u32 port_idx) 495 { 496 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); 497 } 498 499 /** 500 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 501 *@rcb_common: rcb_common device 502 *@port_idx:port id in comm 503 * 504 *Returns: time_out 505 */ 506 u32 hns_rcb_get_coalesce_usecs( 507 struct rcb_common_cb *rcb_common, u32 port_idx) 508 { 509 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 510 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) / 511 HNS_RCB_CLK_FREQ_MHZ; 512 else 513 return dsaf_read_dev(rcb_common, 514 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4); 515 } 516 517 /** 518 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 519 *@rcb_common: rcb_common device 520 *@port_idx:port id in comm 521 *@timeout:tx/rx time for coalesced time_out 522 * 523 * Returns: 524 * Zero for success, or an error code in case of failure 525 */ 526 int hns_rcb_set_coalesce_usecs( 527 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 528 { 529 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx); 530 531 if (timeout == old_timeout) 532 return 0; 533 534 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 535 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { 536 dev_err(rcb_common->dsaf_dev->dev, 537 "error: not support coalesce_usecs setting!\n"); 538 return -EINVAL; 539 } 540 } 541 if (timeout > HNS_RCB_MAX_COALESCED_USECS) { 542 dev_err(rcb_common->dsaf_dev->dev, 543 "error: coalesce_usecs setting supports 0~1023us\n"); 544 return -EINVAL; 545 } 546 547 if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 548 if (timeout == 0) 549 /* set timeout to 0, Disable gap time */ 550 dsaf_set_reg_field(rcb_common->io_base, 551 RCB_INT_GAP_TIME_REG + port_idx * 4, 552 PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, 553 0); 554 else 555 /* set timeout non 0, restore gap time to 1 */ 556 dsaf_set_reg_field(rcb_common->io_base, 557 RCB_INT_GAP_TIME_REG + port_idx * 4, 558 PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, 559 1); 560 } 561 562 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); 563 return 0; 564 } 565 566 /** 567 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 568 *@rcb_common: rcb_common device 569 *@port_idx:port id in comm 570 *@coalesced_frames:tx/rx BD num for coalesced frames 571 * 572 * Returns: 573 * Zero for success, or an error code in case of failure 574 */ 575 int hns_rcb_set_coalesced_frames( 576 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) 577 { 578 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); 579 580 if (coalesced_frames == old_waterline) 581 return 0; 582 583 if (coalesced_frames >= rcb_common->desc_num || 584 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES || 585 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) { 586 dev_err(rcb_common->dsaf_dev->dev, 587 "error: not support coalesce_frames setting!\n"); 588 return -EINVAL; 589 } 590 591 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 592 coalesced_frames); 593 return 0; 594 } 595 596 /** 597 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 598 * accordding to dsaf mode 599 *@dsaf_mode: dsaf mode 600 *@max_vfn : max vfn number 601 *@max_q_per_vf:max ring number per vm 602 */ 603 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, 604 u16 *max_q_per_vf) 605 { 606 switch (dsaf_mode) { 607 case DSAF_MODE_DISABLE_6PORT_0VM: 608 *max_vfn = 1; 609 *max_q_per_vf = 16; 610 break; 611 case DSAF_MODE_DISABLE_FIX: 612 case DSAF_MODE_DISABLE_SP: 613 *max_vfn = 1; 614 *max_q_per_vf = 1; 615 break; 616 case DSAF_MODE_DISABLE_2PORT_64VM: 617 *max_vfn = 64; 618 *max_q_per_vf = 1; 619 break; 620 case DSAF_MODE_DISABLE_6PORT_16VM: 621 *max_vfn = 16; 622 *max_q_per_vf = 1; 623 break; 624 default: 625 *max_vfn = 1; 626 *max_q_per_vf = 16; 627 break; 628 } 629 } 630 631 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) 632 { 633 switch (dsaf_dev->dsaf_mode) { 634 case DSAF_MODE_ENABLE_FIX: 635 case DSAF_MODE_DISABLE_SP: 636 return 1; 637 638 case DSAF_MODE_DISABLE_FIX: 639 return 6; 640 641 case DSAF_MODE_ENABLE_0VM: 642 return 32; 643 644 case DSAF_MODE_DISABLE_6PORT_0VM: 645 case DSAF_MODE_ENABLE_16VM: 646 case DSAF_MODE_DISABLE_6PORT_2VM: 647 case DSAF_MODE_DISABLE_6PORT_16VM: 648 case DSAF_MODE_DISABLE_6PORT_4VM: 649 case DSAF_MODE_ENABLE_8VM: 650 return 96; 651 652 case DSAF_MODE_DISABLE_2PORT_16VM: 653 case DSAF_MODE_DISABLE_2PORT_8VM: 654 case DSAF_MODE_ENABLE_32VM: 655 case DSAF_MODE_DISABLE_2PORT_64VM: 656 case DSAF_MODE_ENABLE_128VM: 657 return 128; 658 659 default: 660 dev_warn(dsaf_dev->dev, 661 "get ring num fail,use default!dsaf_mode=%d\n", 662 dsaf_dev->dsaf_mode); 663 return 128; 664 } 665 } 666 667 void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 668 { 669 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 670 671 return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 672 } 673 674 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common) 675 { 676 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 677 678 return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET; 679 } 680 681 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 682 int comm_index) 683 { 684 struct rcb_common_cb *rcb_common; 685 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 686 u16 max_vfn; 687 u16 max_q_per_vf; 688 int ring_num = hns_rcb_get_ring_num(dsaf_dev); 689 690 rcb_common = 691 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 692 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 693 if (!rcb_common) { 694 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 695 return -ENOMEM; 696 } 697 rcb_common->comm_index = comm_index; 698 rcb_common->ring_num = ring_num; 699 rcb_common->dsaf_dev = dsaf_dev; 700 701 rcb_common->desc_num = dsaf_dev->desc_num; 702 703 hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); 704 rcb_common->max_vfn = max_vfn; 705 rcb_common->max_q_per_vf = max_q_per_vf; 706 707 rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); 708 rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); 709 710 dsaf_dev->rcb_common[comm_index] = rcb_common; 711 return 0; 712 } 713 714 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 715 u32 comm_index) 716 { 717 dsaf_dev->rcb_common[comm_index] = NULL; 718 } 719 720 void hns_rcb_update_stats(struct hnae_queue *queue) 721 { 722 struct ring_pair_cb *ring = 723 container_of(queue, struct ring_pair_cb, q); 724 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 725 struct ppe_common_cb *ppe_common 726 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 727 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 728 729 hw_stats->rx_pkts += dsaf_read_dev(queue, 730 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 731 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 732 733 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 734 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 735 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 736 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 737 738 hw_stats->tx_pkts += dsaf_read_dev(queue, 739 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 740 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 741 742 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 743 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 744 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 745 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 746 } 747 748 /** 749 *hns_rcb_get_stats - get rcb statistic 750 *@ring: rcb ring 751 *@data:statistic value 752 */ 753 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 754 { 755 u64 *regs_buff = data; 756 struct ring_pair_cb *ring = 757 container_of(queue, struct ring_pair_cb, q); 758 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 759 760 regs_buff[0] = hw_stats->tx_pkts; 761 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 762 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 763 regs_buff[3] = 764 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 765 766 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 767 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 768 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 769 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 770 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 771 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 772 regs_buff[10] = queue->tx_ring.stats.restart_queue; 773 regs_buff[11] = queue->tx_ring.stats.tx_busy; 774 775 regs_buff[12] = hw_stats->rx_pkts; 776 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 777 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 778 regs_buff[15] = 779 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 780 781 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 782 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 783 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 784 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 785 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 786 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 787 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 788 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 789 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 790 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 791 regs_buff[26] = queue->rx_ring.stats.l2_err; 792 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 793 } 794 795 /** 796 *hns_rcb_get_ring_sset_count - rcb string set count 797 *@stringset:ethtool cmd 798 *return rcb ring string set count 799 */ 800 int hns_rcb_get_ring_sset_count(int stringset) 801 { 802 if (stringset == ETH_SS_STATS) 803 return HNS_RING_STATIC_REG_NUM; 804 805 return 0; 806 } 807 808 /** 809 *hns_rcb_get_common_regs_count - rcb common regs count 810 *return regs count 811 */ 812 int hns_rcb_get_common_regs_count(void) 813 { 814 return HNS_RCB_COMMON_DUMP_REG_NUM; 815 } 816 817 /** 818 *rcb_get_sset_count - rcb ring regs count 819 *return regs count 820 */ 821 int hns_rcb_get_ring_regs_count(void) 822 { 823 return HNS_RCB_RING_DUMP_REG_NUM; 824 } 825 826 /** 827 *hns_rcb_get_strings - get rcb string set 828 *@stringset:string set index 829 *@data:strings name value 830 *@index:queue index 831 */ 832 void hns_rcb_get_strings(int stringset, u8 *data, int index) 833 { 834 char *buff = (char *)data; 835 836 if (stringset != ETH_SS_STATS) 837 return; 838 839 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 840 buff = buff + ETH_GSTRING_LEN; 841 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 842 buff = buff + ETH_GSTRING_LEN; 843 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 844 buff = buff + ETH_GSTRING_LEN; 845 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 846 buff = buff + ETH_GSTRING_LEN; 847 848 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 849 buff = buff + ETH_GSTRING_LEN; 850 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 851 buff = buff + ETH_GSTRING_LEN; 852 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 853 buff = buff + ETH_GSTRING_LEN; 854 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 855 buff = buff + ETH_GSTRING_LEN; 856 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 857 buff = buff + ETH_GSTRING_LEN; 858 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 859 buff = buff + ETH_GSTRING_LEN; 860 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 861 buff = buff + ETH_GSTRING_LEN; 862 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 863 buff = buff + ETH_GSTRING_LEN; 864 865 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 866 buff = buff + ETH_GSTRING_LEN; 867 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 868 buff = buff + ETH_GSTRING_LEN; 869 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 870 buff = buff + ETH_GSTRING_LEN; 871 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 872 buff = buff + ETH_GSTRING_LEN; 873 874 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 875 buff = buff + ETH_GSTRING_LEN; 876 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 877 buff = buff + ETH_GSTRING_LEN; 878 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 879 buff = buff + ETH_GSTRING_LEN; 880 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 881 buff = buff + ETH_GSTRING_LEN; 882 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 883 buff = buff + ETH_GSTRING_LEN; 884 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 885 buff = buff + ETH_GSTRING_LEN; 886 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 887 buff = buff + ETH_GSTRING_LEN; 888 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 889 buff = buff + ETH_GSTRING_LEN; 890 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 891 buff = buff + ETH_GSTRING_LEN; 892 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 893 buff = buff + ETH_GSTRING_LEN; 894 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 895 buff = buff + ETH_GSTRING_LEN; 896 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 897 } 898 899 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 900 { 901 u32 *regs = data; 902 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); 903 bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); 904 u32 reg_tmp; 905 u32 reg_num_tmp; 906 u32 i = 0; 907 908 /*rcb common registers */ 909 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 910 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 911 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 912 913 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 914 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 915 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 916 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 917 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 918 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 919 920 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 921 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 922 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 923 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 924 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 925 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 926 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 927 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 928 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 929 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 930 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 931 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 932 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 933 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 934 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 935 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 936 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 937 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 938 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 939 940 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 941 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 942 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 943 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 944 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 945 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 946 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 947 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 948 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 949 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 950 951 /* rcb common entry registers */ 952 for (i = 0; i < 16; i++) { /* total 16 model registers */ 953 regs[38 + i] 954 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 955 regs[54 + i] 956 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 957 } 958 959 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; 960 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; 961 for (i = 0; i < reg_num_tmp; i++) 962 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); 963 964 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 965 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 966 967 /* mark end of rcb common regs */ 968 for (i = 78; i < 80; i++) 969 regs[i] = 0xcccccccc; 970 } 971 972 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 973 { 974 u32 *regs = data; 975 struct ring_pair_cb *ring_pair 976 = container_of(queue, struct ring_pair_cb, q); 977 u32 i = 0; 978 979 /*rcb ring registers */ 980 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 981 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 982 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 983 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 984 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 985 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 986 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 987 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 988 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 989 990 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 991 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 992 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 993 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 994 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 995 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 996 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 997 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 998 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 999 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 1000 1001 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 1002 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 1003 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 1004 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 1005 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 1006 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 1007 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 1008 1009 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 1010 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 1011 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 1012 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 1013 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 1014 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 1015 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1016 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1017 1018 /* mark end of ring regs */ 1019 for (i = 35; i < 40; i++) 1020 regs[i] = 0xcccccc00 + ring_pair->index; 1021 } 1022