1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 if (flag & RCB_INT_FLAG_TX) { 140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); 141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); 142 } 143 144 if (flag & RCB_INT_FLAG_RX) { 145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); 146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); 147 } 148 } 149 150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 151 { 152 u32 int_mask_en = !!mask; 153 154 if (flag & RCB_INT_FLAG_TX) 155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 156 157 if (flag & RCB_INT_FLAG_RX) 158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 159 } 160 161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) 162 { 163 if (flag & RCB_INT_FLAG_TX) 164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); 165 166 if (flag & RCB_INT_FLAG_RX) 167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); 168 } 169 170 /** 171 *hns_rcb_ring_enable_hw - enable ring 172 *@ring: rcb ring 173 */ 174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 175 { 176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 177 } 178 179 void hns_rcb_start(struct hnae_queue *q, u32 val) 180 { 181 hns_rcb_ring_enable_hw(q, val); 182 } 183 184 /** 185 *hns_rcb_common_init_commit_hw - make rcb common init completed 186 *@rcb_common: rcb common device 187 */ 188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 189 { 190 wmb(); /* Sync point before breakpoint */ 191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 192 wmb(); /* Sync point after breakpoint */ 193 } 194 195 /** 196 *hns_rcb_ring_init - init rcb ring 197 *@ring_pair: ring pair control block 198 *@ring_type: ring type, RX_RING or TX_RING 199 */ 200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 201 { 202 struct hnae_queue *q = &ring_pair->q; 203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 205 struct hnae_ring *ring = 206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 207 dma_addr_t dma = ring->desc_dma_addr; 208 209 if (ring_type == RX_RING) { 210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 211 (u32)dma); 212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 213 (u32)((dma >> 31) >> 1)); 214 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 216 bd_size_type); 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 218 ring_pair->port_id_in_comm); 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 220 ring_pair->port_id_in_comm); 221 } else { 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 223 (u32)dma); 224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 225 (u32)((dma >> 31) >> 1)); 226 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 228 bd_size_type); 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 230 ring_pair->port_id_in_comm); 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 232 ring_pair->port_id_in_comm); 233 } 234 } 235 236 /** 237 *hns_rcb_init_hw - init rcb hardware 238 *@ring: rcb ring 239 */ 240 void hns_rcb_init_hw(struct ring_pair_cb *ring) 241 { 242 hns_rcb_ring_init(ring, RX_RING); 243 hns_rcb_ring_init(ring, TX_RING); 244 } 245 246 /** 247 *hns_rcb_set_port_desc_cnt - set rcb port description num 248 *@rcb_common: rcb_common device 249 *@port_idx:port index 250 *@desc_cnt:BD num 251 */ 252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 253 u32 port_idx, u32 desc_cnt) 254 { 255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 256 desc_cnt); 257 } 258 259 static void hns_rcb_set_port_timeout( 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 261 { 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, 264 timeout * HNS_RCB_CLK_FREQ_MHZ); 265 else 266 dsaf_write_dev(rcb_common, 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, 268 timeout); 269 } 270 271 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 272 { 273 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 274 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 275 else 276 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 277 } 278 279 /*clr rcb comm exception irq**/ 280 static void hns_rcb_comm_exc_irq_en( 281 struct rcb_common_cb *rcb_common, int en) 282 { 283 u32 clr_vlue = 0xfffffffful; 284 u32 msk_vlue = en ? 0 : 0xfffffffful; 285 286 /* clr int*/ 287 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 288 289 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 290 291 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 292 293 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 294 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 295 296 /*en msk*/ 297 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 298 299 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 300 301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 302 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 303 304 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 305 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 306 } 307 308 /** 309 *hns_rcb_common_init_hw - init rcb common hardware 310 *@rcb_common: rcb_common device 311 *retuen 0 - success , negative --fail 312 */ 313 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 314 { 315 u32 reg_val; 316 int i; 317 int port_num = hns_rcb_common_get_port_num(rcb_common); 318 319 hns_rcb_comm_exc_irq_en(rcb_common, 0); 320 321 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 322 if (0x1 != (reg_val & 0x1)) { 323 dev_err(rcb_common->dsaf_dev->dev, 324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 325 return -EBUSY; 326 } 327 328 for (i = 0; i < port_num; i++) { 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 330 (void)hns_rcb_set_coalesced_frames( 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); 332 hns_rcb_set_port_timeout( 333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); 334 } 335 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 337 HNS_RCB_COMMON_ENDIAN); 338 339 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 340 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); 341 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); 342 } else { 343 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 344 RCB_COM_CFG_FNA_B, false); 345 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 346 RCB_COM_CFG_FA_B, true); 347 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, 348 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); 349 } 350 351 return 0; 352 } 353 354 int hns_rcb_buf_size2type(u32 buf_size) 355 { 356 int bd_size_type; 357 358 switch (buf_size) { 359 case 512: 360 bd_size_type = HNS_BD_SIZE_512_TYPE; 361 break; 362 case 1024: 363 bd_size_type = HNS_BD_SIZE_1024_TYPE; 364 break; 365 case 2048: 366 bd_size_type = HNS_BD_SIZE_2048_TYPE; 367 break; 368 case 4096: 369 bd_size_type = HNS_BD_SIZE_4096_TYPE; 370 break; 371 default: 372 bd_size_type = -EINVAL; 373 } 374 375 return bd_size_type; 376 } 377 378 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 379 { 380 struct hnae_ring *ring; 381 struct rcb_common_cb *rcb_common; 382 struct ring_pair_cb *ring_pair_cb; 383 u32 buf_size; 384 u16 desc_num, mdnum_ppkt; 385 bool irq_idx, is_ver1; 386 387 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 388 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); 389 if (ring_type == RX_RING) { 390 ring = &q->rx_ring; 391 ring->io_base = ring_pair_cb->q.io_base; 392 irq_idx = HNS_RCB_IRQ_IDX_RX; 393 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 394 } else { 395 ring = &q->tx_ring; 396 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 397 HNS_RCB_TX_REG_OFFSET; 398 irq_idx = HNS_RCB_IRQ_IDX_TX; 399 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT; 401 } 402 403 rcb_common = ring_pair_cb->rcb_common; 404 buf_size = rcb_common->dsaf_dev->buf_size; 405 desc_num = rcb_common->dsaf_dev->desc_num; 406 407 ring->desc = NULL; 408 ring->desc_cb = NULL; 409 410 ring->irq = ring_pair_cb->virq[irq_idx]; 411 ring->desc_dma_addr = 0; 412 413 ring->buf_size = buf_size; 414 ring->desc_num = desc_num; 415 ring->max_desc_num_per_pkt = mdnum_ppkt; 416 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 417 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 418 ring->next_to_use = 0; 419 ring->next_to_clean = 0; 420 } 421 422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 423 { 424 ring_pair_cb->q.handle = NULL; 425 426 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 428 } 429 430 static int hns_rcb_get_port_in_comm( 431 struct rcb_common_cb *rcb_common, int ring_idx) 432 { 433 434 return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); 435 } 436 437 #define SERVICE_RING_IRQ_IDX(v1) \ 438 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) 439 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 440 { 441 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 442 443 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 444 return SERVICE_RING_IRQ_IDX(is_ver1); 445 else 446 return HNS_DEBUG_RING_IRQ_IDX; 447 } 448 449 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 450 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 451 /** 452 *hns_rcb_get_cfg - get rcb config 453 *@rcb_common: rcb common device 454 */ 455 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 456 { 457 struct ring_pair_cb *ring_pair_cb; 458 u32 i; 459 u32 ring_num = rcb_common->ring_num; 460 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 461 struct platform_device *pdev = 462 to_platform_device(rcb_common->dsaf_dev->dev); 463 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 464 465 for (i = 0; i < ring_num; i++) { 466 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 467 ring_pair_cb->rcb_common = rcb_common; 468 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 469 ring_pair_cb->index = i; 470 ring_pair_cb->q.io_base = 471 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 472 ring_pair_cb->port_id_in_comm = 473 hns_rcb_get_port_in_comm(rcb_common, i); 474 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 475 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) : 476 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 477 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 478 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) : 479 platform_get_irq(pdev, base_irq_idx + i * 3); 480 ring_pair_cb->q.phy_base = 481 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 482 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 483 } 484 } 485 486 /** 487 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 488 *@rcb_common: rcb_common device 489 *@port_idx:port id in comm 490 * 491 *Returns: coalesced_frames 492 */ 493 u32 hns_rcb_get_coalesced_frames( 494 struct rcb_common_cb *rcb_common, u32 port_idx) 495 { 496 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); 497 } 498 499 /** 500 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 501 *@rcb_common: rcb_common device 502 *@port_idx:port id in comm 503 * 504 *Returns: time_out 505 */ 506 u32 hns_rcb_get_coalesce_usecs( 507 struct rcb_common_cb *rcb_common, u32 port_idx) 508 { 509 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 510 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) / 511 HNS_RCB_CLK_FREQ_MHZ; 512 else 513 return dsaf_read_dev(rcb_common, 514 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4); 515 } 516 517 /** 518 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 519 *@rcb_common: rcb_common device 520 *@port_idx:port id in comm 521 *@timeout:tx/rx time for coalesced time_out 522 * 523 * Returns: 524 * Zero for success, or an error code in case of failure 525 */ 526 int hns_rcb_set_coalesce_usecs( 527 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 528 { 529 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx); 530 531 if (timeout == old_timeout) 532 return 0; 533 534 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 535 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { 536 dev_err(rcb_common->dsaf_dev->dev, 537 "error: not support coalesce_usecs setting!\n"); 538 return -EINVAL; 539 } 540 } 541 if (timeout > HNS_RCB_MAX_COALESCED_USECS) { 542 dev_err(rcb_common->dsaf_dev->dev, 543 "error: coalesce_usecs setting supports 0~1023us\n"); 544 return -EINVAL; 545 } 546 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); 547 return 0; 548 } 549 550 /** 551 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 552 *@rcb_common: rcb_common device 553 *@port_idx:port id in comm 554 *@coalesced_frames:tx/rx BD num for coalesced frames 555 * 556 * Returns: 557 * Zero for success, or an error code in case of failure 558 */ 559 int hns_rcb_set_coalesced_frames( 560 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) 561 { 562 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); 563 564 if (coalesced_frames == old_waterline) 565 return 0; 566 567 if (coalesced_frames >= rcb_common->desc_num || 568 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES || 569 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) { 570 dev_err(rcb_common->dsaf_dev->dev, 571 "error: not support coalesce_frames setting!\n"); 572 return -EINVAL; 573 } 574 575 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 576 coalesced_frames); 577 return 0; 578 } 579 580 /** 581 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 582 * accordding to dsaf mode 583 *@dsaf_mode: dsaf mode 584 *@max_vfn : max vfn number 585 *@max_q_per_vf:max ring number per vm 586 */ 587 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, 588 u16 *max_q_per_vf) 589 { 590 switch (dsaf_mode) { 591 case DSAF_MODE_DISABLE_6PORT_0VM: 592 *max_vfn = 1; 593 *max_q_per_vf = 16; 594 break; 595 case DSAF_MODE_DISABLE_FIX: 596 case DSAF_MODE_DISABLE_SP: 597 *max_vfn = 1; 598 *max_q_per_vf = 1; 599 break; 600 case DSAF_MODE_DISABLE_2PORT_64VM: 601 *max_vfn = 64; 602 *max_q_per_vf = 1; 603 break; 604 case DSAF_MODE_DISABLE_6PORT_16VM: 605 *max_vfn = 16; 606 *max_q_per_vf = 1; 607 break; 608 default: 609 *max_vfn = 1; 610 *max_q_per_vf = 16; 611 break; 612 } 613 } 614 615 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) 616 { 617 switch (dsaf_dev->dsaf_mode) { 618 case DSAF_MODE_ENABLE_FIX: 619 case DSAF_MODE_DISABLE_SP: 620 return 1; 621 622 case DSAF_MODE_DISABLE_FIX: 623 return 6; 624 625 case DSAF_MODE_ENABLE_0VM: 626 return 32; 627 628 case DSAF_MODE_DISABLE_6PORT_0VM: 629 case DSAF_MODE_ENABLE_16VM: 630 case DSAF_MODE_DISABLE_6PORT_2VM: 631 case DSAF_MODE_DISABLE_6PORT_16VM: 632 case DSAF_MODE_DISABLE_6PORT_4VM: 633 case DSAF_MODE_ENABLE_8VM: 634 return 96; 635 636 case DSAF_MODE_DISABLE_2PORT_16VM: 637 case DSAF_MODE_DISABLE_2PORT_8VM: 638 case DSAF_MODE_ENABLE_32VM: 639 case DSAF_MODE_DISABLE_2PORT_64VM: 640 case DSAF_MODE_ENABLE_128VM: 641 return 128; 642 643 default: 644 dev_warn(dsaf_dev->dev, 645 "get ring num fail,use default!dsaf_mode=%d\n", 646 dsaf_dev->dsaf_mode); 647 return 128; 648 } 649 } 650 651 void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 652 { 653 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 654 655 return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 656 } 657 658 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common) 659 { 660 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 661 662 return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET; 663 } 664 665 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 666 int comm_index) 667 { 668 struct rcb_common_cb *rcb_common; 669 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 670 u16 max_vfn; 671 u16 max_q_per_vf; 672 int ring_num = hns_rcb_get_ring_num(dsaf_dev); 673 674 rcb_common = 675 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 676 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 677 if (!rcb_common) { 678 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 679 return -ENOMEM; 680 } 681 rcb_common->comm_index = comm_index; 682 rcb_common->ring_num = ring_num; 683 rcb_common->dsaf_dev = dsaf_dev; 684 685 rcb_common->desc_num = dsaf_dev->desc_num; 686 687 hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); 688 rcb_common->max_vfn = max_vfn; 689 rcb_common->max_q_per_vf = max_q_per_vf; 690 691 rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); 692 rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); 693 694 dsaf_dev->rcb_common[comm_index] = rcb_common; 695 return 0; 696 } 697 698 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 699 u32 comm_index) 700 { 701 dsaf_dev->rcb_common[comm_index] = NULL; 702 } 703 704 void hns_rcb_update_stats(struct hnae_queue *queue) 705 { 706 struct ring_pair_cb *ring = 707 container_of(queue, struct ring_pair_cb, q); 708 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 709 struct ppe_common_cb *ppe_common 710 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 711 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 712 713 hw_stats->rx_pkts += dsaf_read_dev(queue, 714 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 715 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 716 717 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 718 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 719 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 720 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 721 722 hw_stats->tx_pkts += dsaf_read_dev(queue, 723 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 724 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 725 726 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 727 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 728 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 729 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 730 } 731 732 /** 733 *hns_rcb_get_stats - get rcb statistic 734 *@ring: rcb ring 735 *@data:statistic value 736 */ 737 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 738 { 739 u64 *regs_buff = data; 740 struct ring_pair_cb *ring = 741 container_of(queue, struct ring_pair_cb, q); 742 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 743 744 regs_buff[0] = hw_stats->tx_pkts; 745 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 746 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 747 regs_buff[3] = 748 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 749 750 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 751 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 752 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 753 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 754 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 755 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 756 regs_buff[10] = queue->tx_ring.stats.restart_queue; 757 regs_buff[11] = queue->tx_ring.stats.tx_busy; 758 759 regs_buff[12] = hw_stats->rx_pkts; 760 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 761 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 762 regs_buff[15] = 763 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 764 765 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 766 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 767 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 768 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 769 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 770 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 771 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 772 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 773 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 774 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 775 regs_buff[26] = queue->rx_ring.stats.l2_err; 776 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 777 } 778 779 /** 780 *hns_rcb_get_ring_sset_count - rcb string set count 781 *@stringset:ethtool cmd 782 *return rcb ring string set count 783 */ 784 int hns_rcb_get_ring_sset_count(int stringset) 785 { 786 if (stringset == ETH_SS_STATS) 787 return HNS_RING_STATIC_REG_NUM; 788 789 return 0; 790 } 791 792 /** 793 *hns_rcb_get_common_regs_count - rcb common regs count 794 *return regs count 795 */ 796 int hns_rcb_get_common_regs_count(void) 797 { 798 return HNS_RCB_COMMON_DUMP_REG_NUM; 799 } 800 801 /** 802 *rcb_get_sset_count - rcb ring regs count 803 *return regs count 804 */ 805 int hns_rcb_get_ring_regs_count(void) 806 { 807 return HNS_RCB_RING_DUMP_REG_NUM; 808 } 809 810 /** 811 *hns_rcb_get_strings - get rcb string set 812 *@stringset:string set index 813 *@data:strings name value 814 *@index:queue index 815 */ 816 void hns_rcb_get_strings(int stringset, u8 *data, int index) 817 { 818 char *buff = (char *)data; 819 820 if (stringset != ETH_SS_STATS) 821 return; 822 823 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 824 buff = buff + ETH_GSTRING_LEN; 825 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 826 buff = buff + ETH_GSTRING_LEN; 827 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 828 buff = buff + ETH_GSTRING_LEN; 829 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 830 buff = buff + ETH_GSTRING_LEN; 831 832 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 833 buff = buff + ETH_GSTRING_LEN; 834 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 835 buff = buff + ETH_GSTRING_LEN; 836 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 837 buff = buff + ETH_GSTRING_LEN; 838 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 839 buff = buff + ETH_GSTRING_LEN; 840 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 841 buff = buff + ETH_GSTRING_LEN; 842 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 843 buff = buff + ETH_GSTRING_LEN; 844 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 845 buff = buff + ETH_GSTRING_LEN; 846 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 847 buff = buff + ETH_GSTRING_LEN; 848 849 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 850 buff = buff + ETH_GSTRING_LEN; 851 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 852 buff = buff + ETH_GSTRING_LEN; 853 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 854 buff = buff + ETH_GSTRING_LEN; 855 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 856 buff = buff + ETH_GSTRING_LEN; 857 858 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 859 buff = buff + ETH_GSTRING_LEN; 860 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 861 buff = buff + ETH_GSTRING_LEN; 862 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 863 buff = buff + ETH_GSTRING_LEN; 864 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 865 buff = buff + ETH_GSTRING_LEN; 866 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 867 buff = buff + ETH_GSTRING_LEN; 868 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 869 buff = buff + ETH_GSTRING_LEN; 870 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 871 buff = buff + ETH_GSTRING_LEN; 872 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 873 buff = buff + ETH_GSTRING_LEN; 874 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 875 buff = buff + ETH_GSTRING_LEN; 876 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 877 buff = buff + ETH_GSTRING_LEN; 878 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 879 buff = buff + ETH_GSTRING_LEN; 880 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 881 } 882 883 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 884 { 885 u32 *regs = data; 886 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); 887 bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); 888 u32 reg_tmp; 889 u32 reg_num_tmp; 890 u32 i = 0; 891 892 /*rcb common registers */ 893 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 894 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 895 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 896 897 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 898 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 899 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 900 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 901 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 902 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 903 904 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 905 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 906 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 907 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 908 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 909 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 910 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 911 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 912 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 913 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 914 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 915 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 916 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 917 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 918 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 919 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 920 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 921 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 922 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 923 924 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 925 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 926 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 927 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 928 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 929 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 930 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 931 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 932 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 933 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 934 935 /* rcb common entry registers */ 936 for (i = 0; i < 16; i++) { /* total 16 model registers */ 937 regs[38 + i] 938 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 939 regs[54 + i] 940 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 941 } 942 943 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; 944 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; 945 for (i = 0; i < reg_num_tmp; i++) 946 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); 947 948 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 949 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 950 951 /* mark end of rcb common regs */ 952 for (i = 78; i < 80; i++) 953 regs[i] = 0xcccccccc; 954 } 955 956 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 957 { 958 u32 *regs = data; 959 struct ring_pair_cb *ring_pair 960 = container_of(queue, struct ring_pair_cb, q); 961 u32 i = 0; 962 963 /*rcb ring registers */ 964 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 965 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 966 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 967 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 968 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 969 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 970 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 971 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 972 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 973 974 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 975 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 976 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 977 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 978 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 979 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 980 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 981 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 982 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 983 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 984 985 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 986 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 987 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 988 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 989 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 990 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 991 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 992 993 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 994 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 995 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 996 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 997 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 998 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 999 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1000 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1001 1002 /* mark end of ring regs */ 1003 for (i = 35; i < 40; i++) 1004 regs[i] = 0xcccccc00 + ring_pair->index; 1005 } 1006