1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 u32 clr = 1; 140 141 if (flag & RCB_INT_FLAG_TX) { 142 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr); 143 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr); 144 } 145 146 if (flag & RCB_INT_FLAG_RX) { 147 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr); 148 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr); 149 } 150 } 151 152 /** 153 *hns_rcb_ring_enable_hw - enable ring 154 *@ring: rcb ring 155 */ 156 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 157 { 158 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 159 } 160 161 void hns_rcb_start(struct hnae_queue *q, u32 val) 162 { 163 hns_rcb_ring_enable_hw(q, val); 164 } 165 166 /** 167 *hns_rcb_common_init_commit_hw - make rcb common init completed 168 *@rcb_common: rcb common device 169 */ 170 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 171 { 172 wmb(); /* Sync point before breakpoint */ 173 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 174 wmb(); /* Sync point after breakpoint */ 175 } 176 177 /** 178 *hns_rcb_ring_init - init rcb ring 179 *@ring_pair: ring pair control block 180 *@ring_type: ring type, RX_RING or TX_RING 181 */ 182 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 183 { 184 struct hnae_queue *q = &ring_pair->q; 185 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 186 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 187 struct hnae_ring *ring = 188 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 189 dma_addr_t dma = ring->desc_dma_addr; 190 191 if (ring_type == RX_RING) { 192 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 193 (u32)dma); 194 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 195 (u32)(dma >> 32)); 196 197 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 198 bd_size_type); 199 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 200 ring_pair->port_id_in_dsa); 201 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 202 ring_pair->port_id_in_dsa); 203 } else { 204 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 205 (u32)dma); 206 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 207 (u32)(dma >> 32)); 208 209 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 210 bd_size_type); 211 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 212 ring_pair->port_id_in_dsa); 213 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 214 ring_pair->port_id_in_dsa); 215 } 216 } 217 218 /** 219 *hns_rcb_init_hw - init rcb hardware 220 *@ring: rcb ring 221 */ 222 void hns_rcb_init_hw(struct ring_pair_cb *ring) 223 { 224 hns_rcb_ring_init(ring, RX_RING); 225 hns_rcb_ring_init(ring, TX_RING); 226 } 227 228 /** 229 *hns_rcb_set_port_desc_cnt - set rcb port description num 230 *@rcb_common: rcb_common device 231 *@port_idx:port index 232 *@desc_cnt:BD num 233 */ 234 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 235 u32 port_idx, u32 desc_cnt) 236 { 237 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 238 port_idx = 0; 239 240 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 241 desc_cnt); 242 } 243 244 /** 245 *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames 246 *@rcb_common: rcb_common device 247 *@port_idx:port index 248 *@coalesced_frames:BD num for coalesced frames 249 */ 250 static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, 251 u32 port_idx, 252 u32 coalesced_frames) 253 { 254 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 255 port_idx = 0; 256 if (coalesced_frames >= rcb_common->desc_num || 257 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) 258 return -EINVAL; 259 260 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 261 coalesced_frames); 262 return 0; 263 } 264 265 /** 266 *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames 267 *@rcb_common: rcb_common device 268 *@port_idx:port index 269 * return coaleseced frames value 270 */ 271 static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common, 272 u32 port_idx) 273 { 274 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 275 port_idx = 0; 276 277 return dsaf_read_dev(rcb_common, 278 RCB_CFG_PKTLINE_REG + port_idx * 4); 279 } 280 281 /** 282 *hns_rcb_set_timeout - set rcb port coalesced time_out 283 *@rcb_common: rcb_common device 284 *@time_out:time for coalesced time_out 285 */ 286 static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common, 287 u32 timeout) 288 { 289 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout); 290 } 291 292 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 293 { 294 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 295 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 296 else 297 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 298 } 299 300 /*clr rcb comm exception irq**/ 301 static void hns_rcb_comm_exc_irq_en( 302 struct rcb_common_cb *rcb_common, int en) 303 { 304 u32 clr_vlue = 0xfffffffful; 305 u32 msk_vlue = en ? 0 : 0xfffffffful; 306 307 /* clr int*/ 308 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 309 310 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 311 312 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 313 314 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 315 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 316 317 /*en msk*/ 318 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 319 320 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 321 322 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 323 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 324 325 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 326 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 327 } 328 329 /** 330 *hns_rcb_common_init_hw - init rcb common hardware 331 *@rcb_common: rcb_common device 332 *retuen 0 - success , negative --fail 333 */ 334 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 335 { 336 u32 reg_val; 337 int i; 338 int port_num = hns_rcb_common_get_port_num(rcb_common); 339 340 hns_rcb_comm_exc_irq_en(rcb_common, 0); 341 342 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 343 if (0x1 != (reg_val & 0x1)) { 344 dev_err(rcb_common->dsaf_dev->dev, 345 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 346 return -EBUSY; 347 } 348 349 for (i = 0; i < port_num; i++) { 350 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 351 (void)hns_rcb_set_port_coalesced_frames( 352 rcb_common, i, rcb_common->coalesced_frames); 353 } 354 hns_rcb_set_timeout(rcb_common, rcb_common->timeout); 355 356 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 357 HNS_RCB_COMMON_ENDIAN); 358 359 return 0; 360 } 361 362 int hns_rcb_buf_size2type(u32 buf_size) 363 { 364 int bd_size_type; 365 366 switch (buf_size) { 367 case 512: 368 bd_size_type = HNS_BD_SIZE_512_TYPE; 369 break; 370 case 1024: 371 bd_size_type = HNS_BD_SIZE_1024_TYPE; 372 break; 373 case 2048: 374 bd_size_type = HNS_BD_SIZE_2048_TYPE; 375 break; 376 case 4096: 377 bd_size_type = HNS_BD_SIZE_4096_TYPE; 378 break; 379 default: 380 bd_size_type = -EINVAL; 381 } 382 383 return bd_size_type; 384 } 385 386 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 387 { 388 struct hnae_ring *ring; 389 struct rcb_common_cb *rcb_common; 390 struct ring_pair_cb *ring_pair_cb; 391 u32 buf_size; 392 u16 desc_num; 393 int irq_idx; 394 395 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 396 if (ring_type == RX_RING) { 397 ring = &q->rx_ring; 398 ring->io_base = ring_pair_cb->q.io_base; 399 irq_idx = HNS_RCB_IRQ_IDX_RX; 400 } else { 401 ring = &q->tx_ring; 402 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 403 HNS_RCB_TX_REG_OFFSET; 404 irq_idx = HNS_RCB_IRQ_IDX_TX; 405 } 406 407 rcb_common = ring_pair_cb->rcb_common; 408 buf_size = rcb_common->dsaf_dev->buf_size; 409 desc_num = rcb_common->dsaf_dev->desc_num; 410 411 ring->desc = NULL; 412 ring->desc_cb = NULL; 413 414 ring->irq = ring_pair_cb->virq[irq_idx]; 415 ring->desc_dma_addr = 0; 416 417 ring->buf_size = buf_size; 418 ring->desc_num = desc_num; 419 ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT; 420 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 421 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 422 ring->next_to_use = 0; 423 ring->next_to_clean = 0; 424 } 425 426 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 427 { 428 ring_pair_cb->q.handle = NULL; 429 430 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 431 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 432 } 433 434 static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) 435 { 436 int comm_index = rcb_common->comm_index; 437 int port; 438 int q_num; 439 440 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 441 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 442 port = ring_idx / q_num; 443 } else { 444 port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; 445 } 446 447 return port; 448 } 449 450 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 451 { 452 int comm_index = rcb_common->comm_index; 453 454 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 455 return HNS_SERVICE_RING_IRQ_IDX; 456 else 457 return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2; 458 } 459 460 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 461 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 462 /** 463 *hns_rcb_get_cfg - get rcb config 464 *@rcb_common: rcb common device 465 */ 466 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 467 { 468 struct ring_pair_cb *ring_pair_cb; 469 u32 i; 470 u32 ring_num = rcb_common->ring_num; 471 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 472 struct device_node *np = rcb_common->dsaf_dev->dev->of_node; 473 474 for (i = 0; i < ring_num; i++) { 475 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 476 ring_pair_cb->rcb_common = rcb_common; 477 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 478 ring_pair_cb->index = i; 479 ring_pair_cb->q.io_base = 480 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 481 ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); 482 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] 483 = irq_of_parse_and_map(np, base_irq_idx + i * 2); 484 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] 485 = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1); 486 ring_pair_cb->q.phy_base = 487 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 488 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 489 } 490 } 491 492 /** 493 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 494 *@rcb_common: rcb_common device 495 *@comm_index:port index 496 *return coalesced_frames 497 */ 498 u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) 499 { 500 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 501 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 502 503 return hns_rcb_get_port_coalesced_frames(rcb_comm, port); 504 } 505 506 /** 507 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 508 *@rcb_common: rcb_common device 509 *@comm_index:port index 510 *return time_out 511 */ 512 u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) 513 { 514 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 515 516 return rcb_comm->timeout; 517 } 518 519 /** 520 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 521 *@rcb_common: rcb_common device 522 *@comm_index: comm :index 523 *@etx_usecs:tx time for coalesced time_out 524 *@rx_usecs:rx time for coalesced time_out 525 */ 526 void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 527 int port, u32 timeout) 528 { 529 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 530 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 531 532 if (rcb_comm->timeout == timeout) 533 return; 534 535 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 536 dev_err(dsaf_dev->dev, 537 "error: not support coalesce_usecs setting!\n"); 538 return; 539 } 540 rcb_comm->timeout = timeout; 541 hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); 542 } 543 544 /** 545 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 546 *@rcb_common: rcb_common device 547 *@tx_frames:tx BD num for coalesced frames 548 *@rx_frames:rx BD num for coalesced frames 549 *Return 0 on success, negative on failure 550 */ 551 int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 552 int port, u32 coalesced_frames) 553 { 554 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 555 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 556 u32 coalesced_reg_val; 557 int ret; 558 559 coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); 560 561 if (coalesced_reg_val == coalesced_frames) 562 return 0; 563 564 if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { 565 ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, 566 coalesced_frames); 567 return ret; 568 } else { 569 return -EINVAL; 570 } 571 } 572 573 /** 574 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 575 * accordding to dsaf mode 576 *@dsaf_mode: dsaf mode 577 *@max_vfn : max vfn number 578 *@max_q_per_vf:max ring number per vm 579 */ 580 static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, 581 u16 *max_vfn, u16 *max_q_per_vf) 582 { 583 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 584 switch (dsaf_mode) { 585 case DSAF_MODE_DISABLE_6PORT_0VM: 586 *max_vfn = 1; 587 *max_q_per_vf = 16; 588 break; 589 case DSAF_MODE_DISABLE_FIX: 590 *max_vfn = 1; 591 *max_q_per_vf = 1; 592 break; 593 case DSAF_MODE_DISABLE_2PORT_64VM: 594 *max_vfn = 64; 595 *max_q_per_vf = 1; 596 break; 597 case DSAF_MODE_DISABLE_6PORT_16VM: 598 *max_vfn = 16; 599 *max_q_per_vf = 1; 600 break; 601 default: 602 *max_vfn = 1; 603 *max_q_per_vf = 16; 604 break; 605 } 606 } else { 607 *max_vfn = 1; 608 *max_q_per_vf = 1; 609 } 610 } 611 612 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) 613 { 614 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 615 switch (dsaf_dev->dsaf_mode) { 616 case DSAF_MODE_ENABLE_FIX: 617 return 1; 618 619 case DSAF_MODE_DISABLE_FIX: 620 return 6; 621 622 case DSAF_MODE_ENABLE_0VM: 623 return 32; 624 625 case DSAF_MODE_DISABLE_6PORT_0VM: 626 case DSAF_MODE_ENABLE_16VM: 627 case DSAF_MODE_DISABLE_6PORT_2VM: 628 case DSAF_MODE_DISABLE_6PORT_16VM: 629 case DSAF_MODE_DISABLE_6PORT_4VM: 630 case DSAF_MODE_ENABLE_8VM: 631 return 96; 632 633 case DSAF_MODE_DISABLE_2PORT_16VM: 634 case DSAF_MODE_DISABLE_2PORT_8VM: 635 case DSAF_MODE_ENABLE_32VM: 636 case DSAF_MODE_DISABLE_2PORT_64VM: 637 case DSAF_MODE_ENABLE_128VM: 638 return 128; 639 640 default: 641 dev_warn(dsaf_dev->dev, 642 "get ring num fail,use default!dsaf_mode=%d\n", 643 dsaf_dev->dsaf_mode); 644 return 128; 645 } 646 } else { 647 return 1; 648 } 649 } 650 651 void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, 652 int comm_index) 653 { 654 void __iomem *base_addr; 655 656 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 657 base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 658 else 659 base_addr = dsaf_dev->sds_base 660 + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET 661 + RCB_COMMON_REG_OFFSET; 662 663 return base_addr; 664 } 665 666 static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, 667 int comm_index) 668 { 669 struct device_node *np = dsaf_dev->dev->of_node; 670 phys_addr_t phy_addr; 671 const __be32 *tmp_addr; 672 u64 addr_offset = 0; 673 u64 size = 0; 674 int index = 0; 675 676 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 677 index = 2; 678 addr_offset = RCB_COMMON_REG_OFFSET; 679 } else { 680 index = 1; 681 addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + 682 RCB_COMMON_REG_OFFSET; 683 } 684 tmp_addr = of_get_address(np, index, &size, NULL); 685 phy_addr = of_translate_address(np, tmp_addr); 686 return phy_addr + addr_offset; 687 } 688 689 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 690 int comm_index) 691 { 692 struct rcb_common_cb *rcb_common; 693 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 694 u16 max_vfn; 695 u16 max_q_per_vf; 696 int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); 697 698 rcb_common = 699 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 700 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 701 if (!rcb_common) { 702 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 703 return -ENOMEM; 704 } 705 rcb_common->comm_index = comm_index; 706 rcb_common->ring_num = ring_num; 707 rcb_common->dsaf_dev = dsaf_dev; 708 709 rcb_common->desc_num = dsaf_dev->desc_num; 710 rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES; 711 rcb_common->timeout = HNS_RCB_MAX_TIME_OUT; 712 713 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 714 rcb_common->max_vfn = max_vfn; 715 rcb_common->max_q_per_vf = max_q_per_vf; 716 717 rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); 718 rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); 719 720 dsaf_dev->rcb_common[comm_index] = rcb_common; 721 return 0; 722 } 723 724 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 725 u32 comm_index) 726 { 727 dsaf_dev->rcb_common[comm_index] = NULL; 728 } 729 730 void hns_rcb_update_stats(struct hnae_queue *queue) 731 { 732 struct ring_pair_cb *ring = 733 container_of(queue, struct ring_pair_cb, q); 734 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 735 struct ppe_common_cb *ppe_common 736 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 737 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 738 739 hw_stats->rx_pkts += dsaf_read_dev(queue, 740 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 741 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 742 743 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 744 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 745 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 746 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 747 748 hw_stats->tx_pkts += dsaf_read_dev(queue, 749 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 750 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 751 752 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 753 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 754 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 755 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 756 } 757 758 /** 759 *hns_rcb_get_stats - get rcb statistic 760 *@ring: rcb ring 761 *@data:statistic value 762 */ 763 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 764 { 765 u64 *regs_buff = data; 766 struct ring_pair_cb *ring = 767 container_of(queue, struct ring_pair_cb, q); 768 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 769 770 regs_buff[0] = hw_stats->tx_pkts; 771 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 772 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 773 regs_buff[3] = 774 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 775 776 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 777 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 778 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 779 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 780 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 781 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 782 regs_buff[10] = queue->tx_ring.stats.restart_queue; 783 regs_buff[11] = queue->tx_ring.stats.tx_busy; 784 785 regs_buff[12] = hw_stats->rx_pkts; 786 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 787 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 788 regs_buff[15] = 789 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 790 791 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 792 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 793 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 794 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 795 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 796 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 797 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 798 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 799 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 800 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 801 regs_buff[26] = queue->rx_ring.stats.l2_err; 802 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 803 } 804 805 /** 806 *hns_rcb_get_ring_sset_count - rcb string set count 807 *@stringset:ethtool cmd 808 *return rcb ring string set count 809 */ 810 int hns_rcb_get_ring_sset_count(int stringset) 811 { 812 if (stringset == ETH_SS_STATS) 813 return HNS_RING_STATIC_REG_NUM; 814 815 return 0; 816 } 817 818 /** 819 *hns_rcb_get_common_regs_count - rcb common regs count 820 *return regs count 821 */ 822 int hns_rcb_get_common_regs_count(void) 823 { 824 return HNS_RCB_COMMON_DUMP_REG_NUM; 825 } 826 827 /** 828 *rcb_get_sset_count - rcb ring regs count 829 *return regs count 830 */ 831 int hns_rcb_get_ring_regs_count(void) 832 { 833 return HNS_RCB_RING_DUMP_REG_NUM; 834 } 835 836 /** 837 *hns_rcb_get_strings - get rcb string set 838 *@stringset:string set index 839 *@data:strings name value 840 *@index:queue index 841 */ 842 void hns_rcb_get_strings(int stringset, u8 *data, int index) 843 { 844 char *buff = (char *)data; 845 846 if (stringset != ETH_SS_STATS) 847 return; 848 849 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 850 buff = buff + ETH_GSTRING_LEN; 851 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 852 buff = buff + ETH_GSTRING_LEN; 853 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 854 buff = buff + ETH_GSTRING_LEN; 855 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 856 buff = buff + ETH_GSTRING_LEN; 857 858 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 859 buff = buff + ETH_GSTRING_LEN; 860 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 861 buff = buff + ETH_GSTRING_LEN; 862 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 863 buff = buff + ETH_GSTRING_LEN; 864 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 865 buff = buff + ETH_GSTRING_LEN; 866 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 867 buff = buff + ETH_GSTRING_LEN; 868 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 869 buff = buff + ETH_GSTRING_LEN; 870 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 871 buff = buff + ETH_GSTRING_LEN; 872 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 873 buff = buff + ETH_GSTRING_LEN; 874 875 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 876 buff = buff + ETH_GSTRING_LEN; 877 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 878 buff = buff + ETH_GSTRING_LEN; 879 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 880 buff = buff + ETH_GSTRING_LEN; 881 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 882 buff = buff + ETH_GSTRING_LEN; 883 884 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 885 buff = buff + ETH_GSTRING_LEN; 886 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 887 buff = buff + ETH_GSTRING_LEN; 888 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 889 buff = buff + ETH_GSTRING_LEN; 890 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 891 buff = buff + ETH_GSTRING_LEN; 892 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 893 buff = buff + ETH_GSTRING_LEN; 894 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 895 buff = buff + ETH_GSTRING_LEN; 896 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 897 buff = buff + ETH_GSTRING_LEN; 898 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 899 buff = buff + ETH_GSTRING_LEN; 900 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 901 buff = buff + ETH_GSTRING_LEN; 902 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 903 buff = buff + ETH_GSTRING_LEN; 904 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 905 buff = buff + ETH_GSTRING_LEN; 906 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 907 } 908 909 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 910 { 911 u32 *regs = data; 912 u32 i = 0; 913 914 /*rcb common registers */ 915 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 916 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 917 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 918 919 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 920 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 921 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 922 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 923 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 924 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 925 926 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 927 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 928 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 929 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 930 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 931 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 932 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 933 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 934 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 935 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 936 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 937 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 938 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 939 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 940 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 941 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 942 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 943 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 944 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 945 946 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 947 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 948 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 949 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 950 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 951 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 952 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 953 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 954 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 955 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 956 957 /* rcb common entry registers */ 958 for (i = 0; i < 16; i++) { /* total 16 model registers */ 959 regs[38 + i] 960 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 961 regs[54 + i] 962 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 963 } 964 965 regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); 966 regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 967 regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 968 969 /* mark end of rcb common regs */ 970 for (i = 73; i < 80; i++) 971 regs[i] = 0xcccccccc; 972 } 973 974 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 975 { 976 u32 *regs = data; 977 struct ring_pair_cb *ring_pair 978 = container_of(queue, struct ring_pair_cb, q); 979 u32 i = 0; 980 981 /*rcb ring registers */ 982 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 983 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 984 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 985 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 986 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 987 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 988 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 989 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 990 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 991 992 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 993 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 994 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 995 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 996 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 997 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 998 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 999 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 1000 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 1001 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 1002 1003 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 1004 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 1005 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 1006 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 1007 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 1008 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 1009 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 1010 1011 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 1012 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 1013 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 1014 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 1015 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 1016 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 1017 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1018 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1019 1020 /* mark end of ring regs */ 1021 for (i = 35; i < 40; i++) 1022 regs[i] = 0xcccccc00 + ring_pair->index; 1023 } 1024