1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/gpio.h> 23 #include "xgene_enet_main.h" 24 #include "xgene_enet_hw.h" 25 #include "xgene_enet_sgmac.h" 26 #include "xgene_enet_xgmac.h" 27 28 #define RES_ENET_CSR 0 29 #define RES_RING_CSR 1 30 #define RES_RING_CMD 2 31 32 static const struct of_device_id xgene_enet_of_match[]; 33 static const struct acpi_device_id xgene_enet_acpi_match[]; 34 35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 36 { 37 struct xgene_enet_raw_desc16 *raw_desc; 38 int i; 39 40 if (!buf_pool) 41 return; 42 43 for (i = 0; i < buf_pool->slots; i++) { 44 raw_desc = &buf_pool->raw_desc16[i]; 45 46 /* Hardware expects descriptor in little endian format */ 47 raw_desc->m0 = cpu_to_le64(i | 48 SET_VAL(FPQNUM, buf_pool->dst_ring_num) | 49 SET_VAL(STASH, 3)); 50 } 51 } 52 53 static u16 xgene_enet_get_data_len(u64 bufdatalen) 54 { 55 u16 hw_len, mask; 56 57 hw_len = GET_VAL(BUFDATALEN, bufdatalen); 58 59 if (unlikely(hw_len == 0x7800)) { 60 return 0; 61 } else if (!(hw_len & BIT(14))) { 62 mask = GENMASK(13, 0); 63 return (hw_len & mask) ? (hw_len & mask) : SIZE_16K; 64 } else if (!(hw_len & GENMASK(13, 12))) { 65 mask = GENMASK(11, 0); 66 return (hw_len & mask) ? (hw_len & mask) : SIZE_4K; 67 } else { 68 mask = GENMASK(11, 0); 69 return (hw_len & mask) ? (hw_len & mask) : SIZE_2K; 70 } 71 } 72 73 static u16 xgene_enet_set_data_len(u32 size) 74 { 75 u16 hw_len; 76 77 hw_len = (size == SIZE_4K) ? BIT(14) : 0; 78 79 return hw_len; 80 } 81 82 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool, 83 u32 nbuf) 84 { 85 struct xgene_enet_raw_desc16 *raw_desc; 86 struct xgene_enet_pdata *pdata; 87 struct net_device *ndev; 88 dma_addr_t dma_addr; 89 struct device *dev; 90 struct page *page; 91 u32 slots, tail; 92 u16 hw_len; 93 int i; 94 95 if (unlikely(!buf_pool)) 96 return 0; 97 98 ndev = buf_pool->ndev; 99 pdata = netdev_priv(ndev); 100 dev = ndev_to_dev(ndev); 101 slots = buf_pool->slots - 1; 102 tail = buf_pool->tail; 103 104 for (i = 0; i < nbuf; i++) { 105 raw_desc = &buf_pool->raw_desc16[tail]; 106 107 page = dev_alloc_page(); 108 if (unlikely(!page)) 109 return -ENOMEM; 110 111 dma_addr = dma_map_page(dev, page, 0, 112 PAGE_SIZE, DMA_FROM_DEVICE); 113 if (unlikely(dma_mapping_error(dev, dma_addr))) { 114 put_page(page); 115 return -ENOMEM; 116 } 117 118 hw_len = xgene_enet_set_data_len(PAGE_SIZE); 119 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 120 SET_VAL(BUFDATALEN, hw_len) | 121 SET_BIT(COHERENT)); 122 123 buf_pool->frag_page[tail] = page; 124 tail = (tail + 1) & slots; 125 } 126 127 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 128 buf_pool->tail = tail; 129 130 return 0; 131 } 132 133 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, 134 u32 nbuf) 135 { 136 struct sk_buff *skb; 137 struct xgene_enet_raw_desc16 *raw_desc; 138 struct xgene_enet_pdata *pdata; 139 struct net_device *ndev; 140 struct device *dev; 141 dma_addr_t dma_addr; 142 u32 tail = buf_pool->tail; 143 u32 slots = buf_pool->slots - 1; 144 u16 bufdatalen, len; 145 int i; 146 147 ndev = buf_pool->ndev; 148 dev = ndev_to_dev(buf_pool->ndev); 149 pdata = netdev_priv(ndev); 150 151 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); 152 len = XGENE_ENET_STD_MTU; 153 154 for (i = 0; i < nbuf; i++) { 155 raw_desc = &buf_pool->raw_desc16[tail]; 156 157 skb = netdev_alloc_skb_ip_align(ndev, len); 158 if (unlikely(!skb)) 159 return -ENOMEM; 160 161 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); 162 if (dma_mapping_error(dev, dma_addr)) { 163 netdev_err(ndev, "DMA mapping error\n"); 164 dev_kfree_skb_any(skb); 165 return -EINVAL; 166 } 167 168 buf_pool->rx_skb[tail] = skb; 169 170 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 171 SET_VAL(BUFDATALEN, bufdatalen) | 172 SET_BIT(COHERENT)); 173 tail = (tail + 1) & slots; 174 } 175 176 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 177 buf_pool->tail = tail; 178 179 return 0; 180 } 181 182 static u8 xgene_enet_hdr_len(const void *data) 183 { 184 const struct ethhdr *eth = data; 185 186 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; 187 } 188 189 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) 190 { 191 struct device *dev = ndev_to_dev(buf_pool->ndev); 192 struct xgene_enet_raw_desc16 *raw_desc; 193 dma_addr_t dma_addr; 194 int i; 195 196 /* Free up the buffers held by hardware */ 197 for (i = 0; i < buf_pool->slots; i++) { 198 if (buf_pool->rx_skb[i]) { 199 dev_kfree_skb_any(buf_pool->rx_skb[i]); 200 201 raw_desc = &buf_pool->raw_desc16[i]; 202 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); 203 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, 204 DMA_FROM_DEVICE); 205 } 206 } 207 } 208 209 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool) 210 { 211 struct device *dev = ndev_to_dev(buf_pool->ndev); 212 dma_addr_t dma_addr; 213 struct page *page; 214 int i; 215 216 /* Free up the buffers held by hardware */ 217 for (i = 0; i < buf_pool->slots; i++) { 218 page = buf_pool->frag_page[i]; 219 if (page) { 220 dma_addr = buf_pool->frag_dma_addr[i]; 221 dma_unmap_page(dev, dma_addr, PAGE_SIZE, 222 DMA_FROM_DEVICE); 223 put_page(page); 224 } 225 } 226 } 227 228 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) 229 { 230 struct xgene_enet_desc_ring *rx_ring = data; 231 232 if (napi_schedule_prep(&rx_ring->napi)) { 233 disable_irq_nosync(irq); 234 __napi_schedule(&rx_ring->napi); 235 } 236 237 return IRQ_HANDLED; 238 } 239 240 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, 241 struct xgene_enet_raw_desc *raw_desc) 242 { 243 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev); 244 struct sk_buff *skb; 245 struct device *dev; 246 skb_frag_t *frag; 247 dma_addr_t *frag_dma_addr; 248 u16 skb_index; 249 u8 status; 250 int i, ret = 0; 251 u8 mss_index; 252 253 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 254 skb = cp_ring->cp_skb[skb_index]; 255 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; 256 257 dev = ndev_to_dev(cp_ring->ndev); 258 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 259 skb_headlen(skb), 260 DMA_TO_DEVICE); 261 262 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 263 frag = &skb_shinfo(skb)->frags[i]; 264 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), 265 DMA_TO_DEVICE); 266 } 267 268 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) { 269 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3)); 270 spin_lock(&pdata->mss_lock); 271 pdata->mss_refcnt[mss_index]--; 272 spin_unlock(&pdata->mss_lock); 273 } 274 275 /* Checking for error */ 276 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 277 if (unlikely(status > 2)) { 278 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), 279 status); 280 ret = -EIO; 281 } 282 283 if (likely(skb)) { 284 dev_kfree_skb_any(skb); 285 } else { 286 netdev_err(cp_ring->ndev, "completion skb is NULL\n"); 287 ret = -EIO; 288 } 289 290 return ret; 291 } 292 293 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) 294 { 295 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 296 bool mss_index_found = false; 297 int mss_index; 298 int i; 299 300 spin_lock(&pdata->mss_lock); 301 302 /* Reuse the slot if MSS matches */ 303 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { 304 if (pdata->mss[i] == mss) { 305 pdata->mss_refcnt[i]++; 306 mss_index = i; 307 mss_index_found = true; 308 } 309 } 310 311 /* Overwrite the slot with ref_count = 0 */ 312 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { 313 if (!pdata->mss_refcnt[i]) { 314 pdata->mss_refcnt[i]++; 315 pdata->mac_ops->set_mss(pdata, mss, i); 316 pdata->mss[i] = mss; 317 mss_index = i; 318 mss_index_found = true; 319 } 320 } 321 322 /* No slots with ref_count = 0 available, return busy */ 323 if (!mss_index_found) 324 mss_index = -EBUSY; 325 326 spin_unlock(&pdata->mss_lock); 327 328 return mss_index; 329 } 330 331 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) 332 { 333 struct net_device *ndev = skb->dev; 334 struct iphdr *iph; 335 u8 l3hlen = 0, l4hlen = 0; 336 u8 ethhdr, proto = 0, csum_enable = 0; 337 u32 hdr_len, mss = 0; 338 u32 i, len, nr_frags; 339 int mss_index; 340 341 ethhdr = xgene_enet_hdr_len(skb->data); 342 343 if (unlikely(skb->protocol != htons(ETH_P_IP)) && 344 unlikely(skb->protocol != htons(ETH_P_8021Q))) 345 goto out; 346 347 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) 348 goto out; 349 350 iph = ip_hdr(skb); 351 if (unlikely(ip_is_fragment(iph))) 352 goto out; 353 354 if (likely(iph->protocol == IPPROTO_TCP)) { 355 l4hlen = tcp_hdrlen(skb) >> 2; 356 csum_enable = 1; 357 proto = TSO_IPPROTO_TCP; 358 if (ndev->features & NETIF_F_TSO) { 359 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); 360 mss = skb_shinfo(skb)->gso_size; 361 362 if (skb_is_nonlinear(skb)) { 363 len = skb_headlen(skb); 364 nr_frags = skb_shinfo(skb)->nr_frags; 365 366 for (i = 0; i < 2 && i < nr_frags; i++) 367 len += skb_shinfo(skb)->frags[i].size; 368 369 /* HW requires header must reside in 3 buffer */ 370 if (unlikely(hdr_len > len)) { 371 if (skb_linearize(skb)) 372 return 0; 373 } 374 } 375 376 if (!mss || ((skb->len - hdr_len) <= mss)) 377 goto out; 378 379 mss_index = xgene_enet_setup_mss(ndev, mss); 380 if (unlikely(mss_index < 0)) 381 return -EBUSY; 382 383 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index); 384 } 385 } else if (iph->protocol == IPPROTO_UDP) { 386 l4hlen = UDP_HDR_SIZE; 387 csum_enable = 1; 388 } 389 out: 390 l3hlen = ip_hdrlen(skb) >> 2; 391 *hopinfo |= SET_VAL(TCPHDR, l4hlen) | 392 SET_VAL(IPHDR, l3hlen) | 393 SET_VAL(ETHHDR, ethhdr) | 394 SET_VAL(EC, csum_enable) | 395 SET_VAL(IS, proto) | 396 SET_BIT(IC) | 397 SET_BIT(TYPE_ETH_WORK_MESSAGE); 398 399 return 0; 400 } 401 402 static u16 xgene_enet_encode_len(u16 len) 403 { 404 return (len == BUFLEN_16K) ? 0 : len; 405 } 406 407 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) 408 { 409 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | 410 SET_VAL(BUFDATALEN, len)); 411 } 412 413 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) 414 { 415 __le64 *exp_bufs; 416 417 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; 418 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); 419 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); 420 421 return exp_bufs; 422 } 423 424 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) 425 { 426 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; 427 } 428 429 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, 430 struct sk_buff *skb) 431 { 432 struct device *dev = ndev_to_dev(tx_ring->ndev); 433 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); 434 struct xgene_enet_raw_desc *raw_desc; 435 __le64 *exp_desc = NULL, *exp_bufs = NULL; 436 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 437 skb_frag_t *frag; 438 u16 tail = tx_ring->tail; 439 u64 hopinfo = 0; 440 u32 len, hw_len; 441 u8 ll = 0, nv = 0, idx = 0; 442 bool split = false; 443 u32 size, offset, ell_bytes = 0; 444 u32 i, fidx, nr_frags, count = 1; 445 int ret; 446 447 raw_desc = &tx_ring->raw_desc[tail]; 448 tail = (tail + 1) & (tx_ring->slots - 1); 449 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); 450 451 ret = xgene_enet_work_msg(skb, &hopinfo); 452 if (ret) 453 return ret; 454 455 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | 456 hopinfo); 457 458 len = skb_headlen(skb); 459 hw_len = xgene_enet_encode_len(len); 460 461 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 462 if (dma_mapping_error(dev, dma_addr)) { 463 netdev_err(tx_ring->ndev, "DMA mapping error\n"); 464 return -EINVAL; 465 } 466 467 /* Hardware expects descriptor in little endian format */ 468 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 469 SET_VAL(BUFDATALEN, hw_len) | 470 SET_BIT(COHERENT)); 471 472 if (!skb_is_nonlinear(skb)) 473 goto out; 474 475 /* scatter gather */ 476 nv = 1; 477 exp_desc = (void *)&tx_ring->raw_desc[tail]; 478 tail = (tail + 1) & (tx_ring->slots - 1); 479 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); 480 481 nr_frags = skb_shinfo(skb)->nr_frags; 482 for (i = nr_frags; i < 4 ; i++) 483 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); 484 485 frag_dma_addr = xgene_get_frag_dma_array(tx_ring); 486 487 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { 488 if (!split) { 489 frag = &skb_shinfo(skb)->frags[fidx]; 490 size = skb_frag_size(frag); 491 offset = 0; 492 493 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, 494 DMA_TO_DEVICE); 495 if (dma_mapping_error(dev, pbuf_addr)) 496 return -EINVAL; 497 498 frag_dma_addr[fidx] = pbuf_addr; 499 fidx++; 500 501 if (size > BUFLEN_16K) 502 split = true; 503 } 504 505 if (size > BUFLEN_16K) { 506 len = BUFLEN_16K; 507 size -= BUFLEN_16K; 508 } else { 509 len = size; 510 split = false; 511 } 512 513 dma_addr = pbuf_addr + offset; 514 hw_len = xgene_enet_encode_len(len); 515 516 switch (i) { 517 case 0: 518 case 1: 519 case 2: 520 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); 521 break; 522 case 3: 523 if (split || (fidx != nr_frags)) { 524 exp_bufs = xgene_enet_get_exp_bufs(tx_ring); 525 xgene_set_addr_len(exp_bufs, idx, dma_addr, 526 hw_len); 527 idx++; 528 ell_bytes += len; 529 } else { 530 xgene_set_addr_len(exp_desc, i, dma_addr, 531 hw_len); 532 } 533 break; 534 default: 535 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); 536 idx++; 537 ell_bytes += len; 538 break; 539 } 540 541 if (split) 542 offset += BUFLEN_16K; 543 } 544 count++; 545 546 if (idx) { 547 ll = 1; 548 dma_addr = dma_map_single(dev, exp_bufs, 549 sizeof(u64) * MAX_EXP_BUFFS, 550 DMA_TO_DEVICE); 551 if (dma_mapping_error(dev, dma_addr)) { 552 dev_kfree_skb_any(skb); 553 return -EINVAL; 554 } 555 i = ell_bytes >> LL_BYTES_LSB_LEN; 556 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 557 SET_VAL(LL_BYTES_MSB, i) | 558 SET_VAL(LL_LEN, idx)); 559 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); 560 } 561 562 out: 563 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 564 SET_VAL(USERINFO, tx_ring->tail)); 565 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 566 pdata->tx_level[tx_ring->cp_ring->index] += count; 567 tx_ring->tail = tail; 568 569 return count; 570 } 571 572 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, 573 struct net_device *ndev) 574 { 575 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 576 struct xgene_enet_desc_ring *tx_ring; 577 int index = skb->queue_mapping; 578 u32 tx_level = pdata->tx_level[index]; 579 int count; 580 581 tx_ring = pdata->tx_ring[index]; 582 if (tx_level < pdata->txc_level[index]) 583 tx_level += ((typeof(pdata->tx_level[index]))~0U); 584 585 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { 586 netif_stop_subqueue(ndev, index); 587 return NETDEV_TX_BUSY; 588 } 589 590 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) 591 return NETDEV_TX_OK; 592 593 count = xgene_enet_setup_tx_desc(tx_ring, skb); 594 if (count == -EBUSY) 595 return NETDEV_TX_BUSY; 596 597 if (count <= 0) { 598 dev_kfree_skb_any(skb); 599 return NETDEV_TX_OK; 600 } 601 602 skb_tx_timestamp(skb); 603 604 tx_ring->tx_packets++; 605 tx_ring->tx_bytes += skb->len; 606 607 pdata->ring_ops->wr_cmd(tx_ring, count); 608 return NETDEV_TX_OK; 609 } 610 611 static void xgene_enet_skip_csum(struct sk_buff *skb) 612 { 613 struct iphdr *iph = ip_hdr(skb); 614 615 if (!ip_is_fragment(iph) || 616 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { 617 skb->ip_summed = CHECKSUM_UNNECESSARY; 618 } 619 } 620 621 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, 622 struct xgene_enet_raw_desc *raw_desc, 623 struct xgene_enet_raw_desc *exp_desc) 624 { 625 __le64 *desc = (void *)exp_desc; 626 dma_addr_t dma_addr; 627 struct device *dev; 628 struct page *page; 629 u16 slots, head; 630 u32 frag_size; 631 int i; 632 633 if (!buf_pool || !raw_desc || !exp_desc || 634 (!GET_VAL(NV, le64_to_cpu(raw_desc->m0)))) 635 return; 636 637 dev = ndev_to_dev(buf_pool->ndev); 638 slots = buf_pool->slots - 1; 639 head = buf_pool->head; 640 641 for (i = 0; i < 4; i++) { 642 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); 643 if (!frag_size) 644 break; 645 646 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); 647 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 648 649 page = buf_pool->frag_page[head]; 650 put_page(page); 651 652 buf_pool->frag_page[head] = NULL; 653 head = (head + 1) & slots; 654 } 655 buf_pool->head = head; 656 } 657 658 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 659 struct xgene_enet_raw_desc *raw_desc, 660 struct xgene_enet_raw_desc *exp_desc) 661 { 662 struct xgene_enet_desc_ring *buf_pool, *page_pool; 663 u32 datalen, frag_size, skb_index; 664 struct net_device *ndev; 665 dma_addr_t dma_addr; 666 struct sk_buff *skb; 667 struct device *dev; 668 struct page *page; 669 u16 slots, head; 670 int i, ret = 0; 671 __le64 *desc; 672 u8 status; 673 bool nv; 674 675 ndev = rx_ring->ndev; 676 dev = ndev_to_dev(rx_ring->ndev); 677 buf_pool = rx_ring->buf_pool; 678 page_pool = rx_ring->page_pool; 679 680 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 681 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); 682 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 683 skb = buf_pool->rx_skb[skb_index]; 684 buf_pool->rx_skb[skb_index] = NULL; 685 686 /* checking for error */ 687 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || 688 GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 689 if (unlikely(status > 2)) { 690 dev_kfree_skb_any(skb); 691 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); 692 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 693 status); 694 ret = -EIO; 695 goto out; 696 } 697 698 /* strip off CRC as HW isn't doing this */ 699 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); 700 701 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); 702 if (!nv) 703 datalen -= 4; 704 705 skb_put(skb, datalen); 706 prefetch(skb->data - NET_IP_ALIGN); 707 708 if (!nv) 709 goto skip_jumbo; 710 711 slots = page_pool->slots - 1; 712 head = page_pool->head; 713 desc = (void *)exp_desc; 714 715 for (i = 0; i < 4; i++) { 716 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); 717 if (!frag_size) 718 break; 719 720 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); 721 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 722 723 page = page_pool->frag_page[head]; 724 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, 725 frag_size, PAGE_SIZE); 726 727 datalen += frag_size; 728 729 page_pool->frag_page[head] = NULL; 730 head = (head + 1) & slots; 731 } 732 733 page_pool->head = head; 734 rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; 735 736 skip_jumbo: 737 skb_checksum_none_assert(skb); 738 skb->protocol = eth_type_trans(skb, ndev); 739 if (likely((ndev->features & NETIF_F_IP_CSUM) && 740 skb->protocol == htons(ETH_P_IP))) { 741 xgene_enet_skip_csum(skb); 742 } 743 744 rx_ring->rx_packets++; 745 rx_ring->rx_bytes += datalen; 746 napi_gro_receive(&rx_ring->napi, skb); 747 748 out: 749 if (rx_ring->npagepool <= 0) { 750 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL); 751 rx_ring->npagepool = NUM_NXTBUFPOOL; 752 if (ret) 753 return ret; 754 } 755 756 if (--rx_ring->nbufpool == 0) { 757 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); 758 rx_ring->nbufpool = NUM_BUFPOOL; 759 } 760 761 return ret; 762 } 763 764 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) 765 { 766 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; 767 } 768 769 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, 770 int budget) 771 { 772 struct net_device *ndev = ring->ndev; 773 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 774 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 775 u16 head = ring->head; 776 u16 slots = ring->slots - 1; 777 int ret, desc_count, count = 0, processed = 0; 778 bool is_completion; 779 780 do { 781 raw_desc = &ring->raw_desc[head]; 782 desc_count = 0; 783 is_completion = false; 784 exp_desc = NULL; 785 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 786 break; 787 788 /* read fpqnum field after dataaddr field */ 789 dma_rmb(); 790 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { 791 head = (head + 1) & slots; 792 exp_desc = &ring->raw_desc[head]; 793 794 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { 795 head = (head - 1) & slots; 796 break; 797 } 798 dma_rmb(); 799 count++; 800 desc_count++; 801 } 802 if (is_rx_desc(raw_desc)) { 803 ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc); 804 } else { 805 ret = xgene_enet_tx_completion(ring, raw_desc); 806 is_completion = true; 807 } 808 xgene_enet_mark_desc_slot_empty(raw_desc); 809 if (exp_desc) 810 xgene_enet_mark_desc_slot_empty(exp_desc); 811 812 head = (head + 1) & slots; 813 count++; 814 desc_count++; 815 processed++; 816 if (is_completion) 817 pdata->txc_level[ring->index] += desc_count; 818 819 if (ret) 820 break; 821 } while (--budget); 822 823 if (likely(count)) { 824 pdata->ring_ops->wr_cmd(ring, -count); 825 ring->head = head; 826 827 if (__netif_subqueue_stopped(ndev, ring->index)) 828 netif_start_subqueue(ndev, ring->index); 829 } 830 831 return processed; 832 } 833 834 static int xgene_enet_napi(struct napi_struct *napi, const int budget) 835 { 836 struct xgene_enet_desc_ring *ring; 837 int processed; 838 839 ring = container_of(napi, struct xgene_enet_desc_ring, napi); 840 processed = xgene_enet_process_ring(ring, budget); 841 842 if (processed != budget) { 843 napi_complete(napi); 844 enable_irq(ring->irq); 845 } 846 847 return processed; 848 } 849 850 static void xgene_enet_timeout(struct net_device *ndev) 851 { 852 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 853 struct netdev_queue *txq; 854 int i; 855 856 pdata->mac_ops->reset(pdata); 857 858 for (i = 0; i < pdata->txq_cnt; i++) { 859 txq = netdev_get_tx_queue(ndev, i); 860 txq->trans_start = jiffies; 861 netif_tx_start_queue(txq); 862 } 863 } 864 865 static void xgene_enet_set_irq_name(struct net_device *ndev) 866 { 867 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 868 struct xgene_enet_desc_ring *ring; 869 int i; 870 871 for (i = 0; i < pdata->rxq_cnt; i++) { 872 ring = pdata->rx_ring[i]; 873 if (!pdata->cq_cnt) { 874 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", 875 ndev->name); 876 } else { 877 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d", 878 ndev->name, i); 879 } 880 } 881 882 for (i = 0; i < pdata->cq_cnt; i++) { 883 ring = pdata->tx_ring[i]->cp_ring; 884 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d", 885 ndev->name, i); 886 } 887 } 888 889 static int xgene_enet_register_irq(struct net_device *ndev) 890 { 891 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 892 struct device *dev = ndev_to_dev(ndev); 893 struct xgene_enet_desc_ring *ring; 894 int ret = 0, i; 895 896 xgene_enet_set_irq_name(ndev); 897 for (i = 0; i < pdata->rxq_cnt; i++) { 898 ring = pdata->rx_ring[i]; 899 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 900 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 901 0, ring->irq_name, ring); 902 if (ret) { 903 netdev_err(ndev, "Failed to request irq %s\n", 904 ring->irq_name); 905 } 906 } 907 908 for (i = 0; i < pdata->cq_cnt; i++) { 909 ring = pdata->tx_ring[i]->cp_ring; 910 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 911 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 912 0, ring->irq_name, ring); 913 if (ret) { 914 netdev_err(ndev, "Failed to request irq %s\n", 915 ring->irq_name); 916 } 917 } 918 919 return ret; 920 } 921 922 static void xgene_enet_free_irq(struct net_device *ndev) 923 { 924 struct xgene_enet_pdata *pdata; 925 struct xgene_enet_desc_ring *ring; 926 struct device *dev; 927 int i; 928 929 pdata = netdev_priv(ndev); 930 dev = ndev_to_dev(ndev); 931 932 for (i = 0; i < pdata->rxq_cnt; i++) { 933 ring = pdata->rx_ring[i]; 934 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 935 devm_free_irq(dev, ring->irq, ring); 936 } 937 938 for (i = 0; i < pdata->cq_cnt; i++) { 939 ring = pdata->tx_ring[i]->cp_ring; 940 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 941 devm_free_irq(dev, ring->irq, ring); 942 } 943 } 944 945 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) 946 { 947 struct napi_struct *napi; 948 int i; 949 950 for (i = 0; i < pdata->rxq_cnt; i++) { 951 napi = &pdata->rx_ring[i]->napi; 952 napi_enable(napi); 953 } 954 955 for (i = 0; i < pdata->cq_cnt; i++) { 956 napi = &pdata->tx_ring[i]->cp_ring->napi; 957 napi_enable(napi); 958 } 959 } 960 961 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) 962 { 963 struct napi_struct *napi; 964 int i; 965 966 for (i = 0; i < pdata->rxq_cnt; i++) { 967 napi = &pdata->rx_ring[i]->napi; 968 napi_disable(napi); 969 } 970 971 for (i = 0; i < pdata->cq_cnt; i++) { 972 napi = &pdata->tx_ring[i]->cp_ring->napi; 973 napi_disable(napi); 974 } 975 } 976 977 static int xgene_enet_open(struct net_device *ndev) 978 { 979 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 980 const struct xgene_mac_ops *mac_ops = pdata->mac_ops; 981 int ret; 982 983 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); 984 if (ret) 985 return ret; 986 987 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); 988 if (ret) 989 return ret; 990 991 xgene_enet_napi_enable(pdata); 992 ret = xgene_enet_register_irq(ndev); 993 if (ret) 994 return ret; 995 996 if (ndev->phydev) { 997 phy_start(ndev->phydev); 998 } else { 999 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 1000 netif_carrier_off(ndev); 1001 } 1002 1003 mac_ops->tx_enable(pdata); 1004 mac_ops->rx_enable(pdata); 1005 netif_tx_start_all_queues(ndev); 1006 1007 return ret; 1008 } 1009 1010 static int xgene_enet_close(struct net_device *ndev) 1011 { 1012 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1013 const struct xgene_mac_ops *mac_ops = pdata->mac_ops; 1014 int i; 1015 1016 netif_tx_stop_all_queues(ndev); 1017 mac_ops->tx_disable(pdata); 1018 mac_ops->rx_disable(pdata); 1019 1020 if (ndev->phydev) 1021 phy_stop(ndev->phydev); 1022 else 1023 cancel_delayed_work_sync(&pdata->link_work); 1024 1025 xgene_enet_free_irq(ndev); 1026 xgene_enet_napi_disable(pdata); 1027 for (i = 0; i < pdata->rxq_cnt; i++) 1028 xgene_enet_process_ring(pdata->rx_ring[i], -1); 1029 1030 return 0; 1031 } 1032 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) 1033 { 1034 struct xgene_enet_pdata *pdata; 1035 struct device *dev; 1036 1037 pdata = netdev_priv(ring->ndev); 1038 dev = ndev_to_dev(ring->ndev); 1039 1040 pdata->ring_ops->clear(ring); 1041 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 1042 } 1043 1044 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) 1045 { 1046 struct xgene_enet_desc_ring *buf_pool, *page_pool; 1047 struct xgene_enet_desc_ring *ring; 1048 int i; 1049 1050 for (i = 0; i < pdata->txq_cnt; i++) { 1051 ring = pdata->tx_ring[i]; 1052 if (ring) { 1053 xgene_enet_delete_ring(ring); 1054 pdata->port_ops->clear(pdata, ring); 1055 if (pdata->cq_cnt) 1056 xgene_enet_delete_ring(ring->cp_ring); 1057 pdata->tx_ring[i] = NULL; 1058 } 1059 1060 } 1061 1062 for (i = 0; i < pdata->rxq_cnt; i++) { 1063 ring = pdata->rx_ring[i]; 1064 if (ring) { 1065 page_pool = ring->page_pool; 1066 if (page_pool) { 1067 xgene_enet_delete_pagepool(page_pool); 1068 xgene_enet_delete_ring(page_pool); 1069 pdata->port_ops->clear(pdata, page_pool); 1070 } 1071 1072 buf_pool = ring->buf_pool; 1073 xgene_enet_delete_bufpool(buf_pool); 1074 xgene_enet_delete_ring(buf_pool); 1075 pdata->port_ops->clear(pdata, buf_pool); 1076 1077 xgene_enet_delete_ring(ring); 1078 pdata->rx_ring[i] = NULL; 1079 } 1080 1081 } 1082 } 1083 1084 static int xgene_enet_get_ring_size(struct device *dev, 1085 enum xgene_enet_ring_cfgsize cfgsize) 1086 { 1087 int size = -EINVAL; 1088 1089 switch (cfgsize) { 1090 case RING_CFGSIZE_512B: 1091 size = 0x200; 1092 break; 1093 case RING_CFGSIZE_2KB: 1094 size = 0x800; 1095 break; 1096 case RING_CFGSIZE_16KB: 1097 size = 0x4000; 1098 break; 1099 case RING_CFGSIZE_64KB: 1100 size = 0x10000; 1101 break; 1102 case RING_CFGSIZE_512KB: 1103 size = 0x80000; 1104 break; 1105 default: 1106 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); 1107 break; 1108 } 1109 1110 return size; 1111 } 1112 1113 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) 1114 { 1115 struct xgene_enet_pdata *pdata; 1116 struct device *dev; 1117 1118 if (!ring) 1119 return; 1120 1121 dev = ndev_to_dev(ring->ndev); 1122 pdata = netdev_priv(ring->ndev); 1123 1124 if (ring->desc_addr) { 1125 pdata->ring_ops->clear(ring); 1126 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 1127 } 1128 devm_kfree(dev, ring); 1129 } 1130 1131 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) 1132 { 1133 struct xgene_enet_desc_ring *page_pool; 1134 struct device *dev = &pdata->pdev->dev; 1135 struct xgene_enet_desc_ring *ring; 1136 void *p; 1137 int i; 1138 1139 for (i = 0; i < pdata->txq_cnt; i++) { 1140 ring = pdata->tx_ring[i]; 1141 if (ring) { 1142 if (ring->cp_ring && ring->cp_ring->cp_skb) 1143 devm_kfree(dev, ring->cp_ring->cp_skb); 1144 1145 if (ring->cp_ring && pdata->cq_cnt) 1146 xgene_enet_free_desc_ring(ring->cp_ring); 1147 1148 xgene_enet_free_desc_ring(ring); 1149 } 1150 1151 } 1152 1153 for (i = 0; i < pdata->rxq_cnt; i++) { 1154 ring = pdata->rx_ring[i]; 1155 if (ring) { 1156 if (ring->buf_pool) { 1157 if (ring->buf_pool->rx_skb) 1158 devm_kfree(dev, ring->buf_pool->rx_skb); 1159 1160 xgene_enet_free_desc_ring(ring->buf_pool); 1161 } 1162 1163 page_pool = ring->page_pool; 1164 if (page_pool) { 1165 p = page_pool->frag_page; 1166 if (p) 1167 devm_kfree(dev, p); 1168 1169 p = page_pool->frag_dma_addr; 1170 if (p) 1171 devm_kfree(dev, p); 1172 } 1173 1174 xgene_enet_free_desc_ring(ring); 1175 } 1176 } 1177 } 1178 1179 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, 1180 struct xgene_enet_desc_ring *ring) 1181 { 1182 if ((pdata->enet_id == XGENE_ENET2) && 1183 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) { 1184 return true; 1185 } 1186 1187 return false; 1188 } 1189 1190 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, 1191 struct xgene_enet_desc_ring *ring) 1192 { 1193 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; 1194 1195 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); 1196 } 1197 1198 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 1199 struct net_device *ndev, u32 ring_num, 1200 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) 1201 { 1202 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1203 struct device *dev = ndev_to_dev(ndev); 1204 struct xgene_enet_desc_ring *ring; 1205 void *irq_mbox_addr; 1206 int size; 1207 1208 size = xgene_enet_get_ring_size(dev, cfgsize); 1209 if (size < 0) 1210 return NULL; 1211 1212 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), 1213 GFP_KERNEL); 1214 if (!ring) 1215 return NULL; 1216 1217 ring->ndev = ndev; 1218 ring->num = ring_num; 1219 ring->cfgsize = cfgsize; 1220 ring->id = ring_id; 1221 1222 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma, 1223 GFP_KERNEL | __GFP_ZERO); 1224 if (!ring->desc_addr) { 1225 devm_kfree(dev, ring); 1226 return NULL; 1227 } 1228 ring->size = size; 1229 1230 if (is_irq_mbox_required(pdata, ring)) { 1231 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, 1232 &ring->irq_mbox_dma, 1233 GFP_KERNEL | __GFP_ZERO); 1234 if (!irq_mbox_addr) { 1235 dmam_free_coherent(dev, size, ring->desc_addr, 1236 ring->dma); 1237 devm_kfree(dev, ring); 1238 return NULL; 1239 } 1240 ring->irq_mbox_addr = irq_mbox_addr; 1241 } 1242 1243 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); 1244 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; 1245 ring = pdata->ring_ops->setup(ring); 1246 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", 1247 ring->num, ring->size, ring->id, ring->slots); 1248 1249 return ring; 1250 } 1251 1252 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) 1253 { 1254 return (owner << 6) | (bufnum & GENMASK(5, 0)); 1255 } 1256 1257 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) 1258 { 1259 enum xgene_ring_owner owner; 1260 1261 if (p->enet_id == XGENE_ENET1) { 1262 switch (p->phy_mode) { 1263 case PHY_INTERFACE_MODE_SGMII: 1264 owner = RING_OWNER_ETH0; 1265 break; 1266 default: 1267 owner = (!p->port_id) ? RING_OWNER_ETH0 : 1268 RING_OWNER_ETH1; 1269 break; 1270 } 1271 } else { 1272 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; 1273 } 1274 1275 return owner; 1276 } 1277 1278 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) 1279 { 1280 struct device *dev = &pdata->pdev->dev; 1281 u32 cpu_bufnum; 1282 int ret; 1283 1284 ret = device_property_read_u32(dev, "channel", &cpu_bufnum); 1285 1286 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; 1287 } 1288 1289 static int xgene_enet_create_desc_rings(struct net_device *ndev) 1290 { 1291 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 1292 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1293 struct xgene_enet_desc_ring *page_pool = NULL; 1294 struct xgene_enet_desc_ring *buf_pool = NULL; 1295 struct device *dev = ndev_to_dev(ndev); 1296 u8 eth_bufnum = pdata->eth_bufnum; 1297 u8 bp_bufnum = pdata->bp_bufnum; 1298 u16 ring_num = pdata->ring_num; 1299 enum xgene_ring_owner owner; 1300 dma_addr_t dma_exp_bufs; 1301 u16 ring_id, slots; 1302 __le64 *exp_bufs; 1303 int i, ret, size; 1304 u8 cpu_bufnum; 1305 1306 cpu_bufnum = xgene_start_cpu_bufnum(pdata); 1307 1308 for (i = 0; i < pdata->rxq_cnt; i++) { 1309 /* allocate rx descriptor ring */ 1310 owner = xgene_derive_ring_owner(pdata); 1311 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 1312 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1313 RING_CFGSIZE_16KB, 1314 ring_id); 1315 if (!rx_ring) { 1316 ret = -ENOMEM; 1317 goto err; 1318 } 1319 1320 /* allocate buffer pool for receiving packets */ 1321 owner = xgene_derive_ring_owner(pdata); 1322 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 1323 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 1324 RING_CFGSIZE_16KB, 1325 ring_id); 1326 if (!buf_pool) { 1327 ret = -ENOMEM; 1328 goto err; 1329 } 1330 1331 rx_ring->nbufpool = NUM_BUFPOOL; 1332 rx_ring->npagepool = NUM_NXTBUFPOOL; 1333 rx_ring->irq = pdata->irqs[i]; 1334 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, 1335 sizeof(struct sk_buff *), 1336 GFP_KERNEL); 1337 if (!buf_pool->rx_skb) { 1338 ret = -ENOMEM; 1339 goto err; 1340 } 1341 1342 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); 1343 rx_ring->buf_pool = buf_pool; 1344 pdata->rx_ring[i] = rx_ring; 1345 1346 if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) || 1347 (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) { 1348 break; 1349 } 1350 1351 /* allocate next buffer pool for jumbo packets */ 1352 owner = xgene_derive_ring_owner(pdata); 1353 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 1354 page_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 1355 RING_CFGSIZE_16KB, 1356 ring_id); 1357 if (!page_pool) { 1358 ret = -ENOMEM; 1359 goto err; 1360 } 1361 1362 slots = page_pool->slots; 1363 page_pool->frag_page = devm_kcalloc(dev, slots, 1364 sizeof(struct page *), 1365 GFP_KERNEL); 1366 if (!page_pool->frag_page) { 1367 ret = -ENOMEM; 1368 goto err; 1369 } 1370 1371 page_pool->frag_dma_addr = devm_kcalloc(dev, slots, 1372 sizeof(dma_addr_t), 1373 GFP_KERNEL); 1374 if (!page_pool->frag_dma_addr) { 1375 ret = -ENOMEM; 1376 goto err; 1377 } 1378 1379 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool); 1380 rx_ring->page_pool = page_pool; 1381 } 1382 1383 for (i = 0; i < pdata->txq_cnt; i++) { 1384 /* allocate tx descriptor ring */ 1385 owner = xgene_derive_ring_owner(pdata); 1386 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); 1387 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1388 RING_CFGSIZE_16KB, 1389 ring_id); 1390 if (!tx_ring) { 1391 ret = -ENOMEM; 1392 goto err; 1393 } 1394 1395 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; 1396 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs, 1397 GFP_KERNEL | __GFP_ZERO); 1398 if (!exp_bufs) { 1399 ret = -ENOMEM; 1400 goto err; 1401 } 1402 tx_ring->exp_bufs = exp_bufs; 1403 1404 pdata->tx_ring[i] = tx_ring; 1405 1406 if (!pdata->cq_cnt) { 1407 cp_ring = pdata->rx_ring[i]; 1408 } else { 1409 /* allocate tx completion descriptor ring */ 1410 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, 1411 cpu_bufnum++); 1412 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1413 RING_CFGSIZE_16KB, 1414 ring_id); 1415 if (!cp_ring) { 1416 ret = -ENOMEM; 1417 goto err; 1418 } 1419 1420 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; 1421 cp_ring->index = i; 1422 } 1423 1424 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, 1425 sizeof(struct sk_buff *), 1426 GFP_KERNEL); 1427 if (!cp_ring->cp_skb) { 1428 ret = -ENOMEM; 1429 goto err; 1430 } 1431 1432 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; 1433 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, 1434 size, GFP_KERNEL); 1435 if (!cp_ring->frag_dma_addr) { 1436 devm_kfree(dev, cp_ring->cp_skb); 1437 ret = -ENOMEM; 1438 goto err; 1439 } 1440 1441 tx_ring->cp_ring = cp_ring; 1442 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1443 } 1444 1445 if (pdata->ring_ops->coalesce) 1446 pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1447 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; 1448 1449 return 0; 1450 1451 err: 1452 xgene_enet_free_desc_rings(pdata); 1453 return ret; 1454 } 1455 1456 static struct rtnl_link_stats64 *xgene_enet_get_stats64( 1457 struct net_device *ndev, 1458 struct rtnl_link_stats64 *storage) 1459 { 1460 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1461 struct rtnl_link_stats64 *stats = &pdata->stats; 1462 struct xgene_enet_desc_ring *ring; 1463 int i; 1464 1465 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 1466 for (i = 0; i < pdata->txq_cnt; i++) { 1467 ring = pdata->tx_ring[i]; 1468 if (ring) { 1469 stats->tx_packets += ring->tx_packets; 1470 stats->tx_bytes += ring->tx_bytes; 1471 } 1472 } 1473 1474 for (i = 0; i < pdata->rxq_cnt; i++) { 1475 ring = pdata->rx_ring[i]; 1476 if (ring) { 1477 stats->rx_packets += ring->rx_packets; 1478 stats->rx_bytes += ring->rx_bytes; 1479 stats->rx_errors += ring->rx_length_errors + 1480 ring->rx_crc_errors + 1481 ring->rx_frame_errors + 1482 ring->rx_fifo_errors; 1483 stats->rx_dropped += ring->rx_dropped; 1484 } 1485 } 1486 memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); 1487 1488 return storage; 1489 } 1490 1491 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) 1492 { 1493 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1494 int ret; 1495 1496 ret = eth_mac_addr(ndev, addr); 1497 if (ret) 1498 return ret; 1499 pdata->mac_ops->set_mac_addr(pdata); 1500 1501 return ret; 1502 } 1503 1504 static int xgene_change_mtu(struct net_device *ndev, int new_mtu) 1505 { 1506 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1507 int frame_size; 1508 1509 if (!netif_running(ndev)) 1510 return 0; 1511 1512 frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; 1513 1514 xgene_enet_close(ndev); 1515 ndev->mtu = new_mtu; 1516 pdata->mac_ops->set_framesize(pdata, frame_size); 1517 xgene_enet_open(ndev); 1518 1519 return 0; 1520 } 1521 1522 static const struct net_device_ops xgene_ndev_ops = { 1523 .ndo_open = xgene_enet_open, 1524 .ndo_stop = xgene_enet_close, 1525 .ndo_start_xmit = xgene_enet_start_xmit, 1526 .ndo_tx_timeout = xgene_enet_timeout, 1527 .ndo_get_stats64 = xgene_enet_get_stats64, 1528 .ndo_change_mtu = xgene_change_mtu, 1529 .ndo_set_mac_address = xgene_enet_set_mac_address, 1530 }; 1531 1532 #ifdef CONFIG_ACPI 1533 static void xgene_get_port_id_acpi(struct device *dev, 1534 struct xgene_enet_pdata *pdata) 1535 { 1536 acpi_status status; 1537 u64 temp; 1538 1539 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp); 1540 if (ACPI_FAILURE(status)) { 1541 pdata->port_id = 0; 1542 } else { 1543 pdata->port_id = temp; 1544 } 1545 1546 return; 1547 } 1548 #endif 1549 1550 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) 1551 { 1552 u32 id = 0; 1553 1554 of_property_read_u32(dev->of_node, "port-id", &id); 1555 1556 pdata->port_id = id & BIT(0); 1557 1558 return; 1559 } 1560 1561 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) 1562 { 1563 struct device *dev = &pdata->pdev->dev; 1564 int delay, ret; 1565 1566 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay); 1567 if (ret) { 1568 pdata->tx_delay = 4; 1569 return 0; 1570 } 1571 1572 if (delay < 0 || delay > 7) { 1573 dev_err(dev, "Invalid tx-delay specified\n"); 1574 return -EINVAL; 1575 } 1576 1577 pdata->tx_delay = delay; 1578 1579 return 0; 1580 } 1581 1582 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) 1583 { 1584 struct device *dev = &pdata->pdev->dev; 1585 int delay, ret; 1586 1587 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay); 1588 if (ret) { 1589 pdata->rx_delay = 2; 1590 return 0; 1591 } 1592 1593 if (delay < 0 || delay > 7) { 1594 dev_err(dev, "Invalid rx-delay specified\n"); 1595 return -EINVAL; 1596 } 1597 1598 pdata->rx_delay = delay; 1599 1600 return 0; 1601 } 1602 1603 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) 1604 { 1605 struct platform_device *pdev = pdata->pdev; 1606 struct device *dev = &pdev->dev; 1607 int i, ret, max_irqs; 1608 1609 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1610 max_irqs = 1; 1611 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) 1612 max_irqs = 2; 1613 else 1614 max_irqs = XGENE_MAX_ENET_IRQ; 1615 1616 for (i = 0; i < max_irqs; i++) { 1617 ret = platform_get_irq(pdev, i); 1618 if (ret <= 0) { 1619 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1620 max_irqs = i; 1621 pdata->rxq_cnt = max_irqs / 2; 1622 pdata->txq_cnt = max_irqs / 2; 1623 pdata->cq_cnt = max_irqs / 2; 1624 break; 1625 } 1626 dev_err(dev, "Unable to get ENET IRQ\n"); 1627 ret = ret ? : -ENXIO; 1628 return ret; 1629 } 1630 pdata->irqs[i] = ret; 1631 } 1632 1633 return 0; 1634 } 1635 1636 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) 1637 { 1638 int ret; 1639 1640 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) 1641 return 0; 1642 1643 if (!IS_ENABLED(CONFIG_MDIO_XGENE)) 1644 return 0; 1645 1646 ret = xgene_enet_phy_connect(pdata->ndev); 1647 if (!ret) 1648 pdata->mdio_driver = true; 1649 1650 return 0; 1651 } 1652 1653 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) 1654 { 1655 struct device *dev = &pdata->pdev->dev; 1656 1657 pdata->sfp_gpio_en = false; 1658 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII || 1659 (!device_property_present(dev, "sfp-gpios") && 1660 !device_property_present(dev, "rxlos-gpios"))) 1661 return; 1662 1663 pdata->sfp_gpio_en = true; 1664 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); 1665 if (IS_ERR(pdata->sfp_rdy)) 1666 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); 1667 } 1668 1669 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 1670 { 1671 struct platform_device *pdev; 1672 struct net_device *ndev; 1673 struct device *dev; 1674 struct resource *res; 1675 void __iomem *base_addr; 1676 u32 offset; 1677 int ret = 0; 1678 1679 pdev = pdata->pdev; 1680 dev = &pdev->dev; 1681 ndev = pdata->ndev; 1682 1683 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); 1684 if (!res) { 1685 dev_err(dev, "Resource enet_csr not defined\n"); 1686 return -ENODEV; 1687 } 1688 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); 1689 if (!pdata->base_addr) { 1690 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 1691 return -ENOMEM; 1692 } 1693 1694 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); 1695 if (!res) { 1696 dev_err(dev, "Resource ring_csr not defined\n"); 1697 return -ENODEV; 1698 } 1699 pdata->ring_csr_addr = devm_ioremap(dev, res->start, 1700 resource_size(res)); 1701 if (!pdata->ring_csr_addr) { 1702 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 1703 return -ENOMEM; 1704 } 1705 1706 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); 1707 if (!res) { 1708 dev_err(dev, "Resource ring_cmd not defined\n"); 1709 return -ENODEV; 1710 } 1711 pdata->ring_cmd_addr = devm_ioremap(dev, res->start, 1712 resource_size(res)); 1713 if (!pdata->ring_cmd_addr) { 1714 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 1715 return -ENOMEM; 1716 } 1717 1718 if (dev->of_node) 1719 xgene_get_port_id_dt(dev, pdata); 1720 #ifdef CONFIG_ACPI 1721 else 1722 xgene_get_port_id_acpi(dev, pdata); 1723 #endif 1724 1725 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) 1726 eth_hw_addr_random(ndev); 1727 1728 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 1729 1730 pdata->phy_mode = device_get_phy_mode(dev); 1731 if (pdata->phy_mode < 0) { 1732 dev_err(dev, "Unable to get phy-connection-type\n"); 1733 return pdata->phy_mode; 1734 } 1735 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && 1736 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && 1737 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 1738 dev_err(dev, "Incorrect phy-connection-type specified\n"); 1739 return -ENODEV; 1740 } 1741 1742 ret = xgene_get_tx_delay(pdata); 1743 if (ret) 1744 return ret; 1745 1746 ret = xgene_get_rx_delay(pdata); 1747 if (ret) 1748 return ret; 1749 1750 ret = xgene_enet_get_irqs(pdata); 1751 if (ret) 1752 return ret; 1753 1754 ret = xgene_enet_check_phy_handle(pdata); 1755 if (ret) 1756 return ret; 1757 1758 xgene_enet_gpiod_get(pdata); 1759 1760 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1761 if (IS_ERR(pdata->clk)) { 1762 /* Firmware may have set up the clock already. */ 1763 dev_info(dev, "clocks have been setup already\n"); 1764 } 1765 1766 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1767 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); 1768 else 1769 base_addr = pdata->base_addr; 1770 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 1771 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; 1772 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 1773 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 1774 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || 1775 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 1776 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; 1777 offset = (pdata->enet_id == XGENE_ENET1) ? 1778 BLOCK_ETH_MAC_CSR_OFFSET : 1779 X2_BLOCK_ETH_MAC_CSR_OFFSET; 1780 pdata->mcx_mac_csr_addr = base_addr + offset; 1781 } else { 1782 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 1783 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 1784 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; 1785 } 1786 pdata->rx_buff_cnt = NUM_PKT_BUF; 1787 1788 return 0; 1789 } 1790 1791 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) 1792 { 1793 struct xgene_enet_cle *enet_cle = &pdata->cle; 1794 struct xgene_enet_desc_ring *page_pool; 1795 struct net_device *ndev = pdata->ndev; 1796 struct xgene_enet_desc_ring *buf_pool; 1797 u16 dst_ring_num, ring_id; 1798 int i, ret; 1799 u32 count; 1800 1801 ret = pdata->port_ops->reset(pdata); 1802 if (ret) 1803 return ret; 1804 1805 ret = xgene_enet_create_desc_rings(ndev); 1806 if (ret) { 1807 netdev_err(ndev, "Error in ring configuration\n"); 1808 return ret; 1809 } 1810 1811 /* setup buffer pool */ 1812 for (i = 0; i < pdata->rxq_cnt; i++) { 1813 buf_pool = pdata->rx_ring[i]->buf_pool; 1814 xgene_enet_init_bufpool(buf_pool); 1815 page_pool = pdata->rx_ring[i]->page_pool; 1816 xgene_enet_init_bufpool(page_pool); 1817 1818 count = pdata->rx_buff_cnt; 1819 ret = xgene_enet_refill_bufpool(buf_pool, count); 1820 if (ret) 1821 goto err; 1822 1823 ret = xgene_enet_refill_pagepool(page_pool, count); 1824 if (ret) 1825 goto err; 1826 1827 } 1828 1829 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 1830 buf_pool = pdata->rx_ring[0]->buf_pool; 1831 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1832 /* Initialize and Enable PreClassifier Tree */ 1833 enet_cle->max_nodes = 512; 1834 enet_cle->max_dbptrs = 1024; 1835 enet_cle->parsers = 3; 1836 enet_cle->active_parser = PARSER_ALL; 1837 enet_cle->ptree.start_node = 0; 1838 enet_cle->ptree.start_dbptr = 0; 1839 enet_cle->jump_bytes = 8; 1840 ret = pdata->cle_ops->cle_init(pdata); 1841 if (ret) { 1842 netdev_err(ndev, "Preclass Tree init error\n"); 1843 goto err; 1844 } 1845 1846 } else { 1847 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 1848 buf_pool = pdata->rx_ring[0]->buf_pool; 1849 page_pool = pdata->rx_ring[0]->page_pool; 1850 ring_id = (page_pool) ? page_pool->id : 0; 1851 pdata->port_ops->cle_bypass(pdata, dst_ring_num, 1852 buf_pool->id, ring_id); 1853 } 1854 1855 ndev->max_mtu = XGENE_ENET_MAX_MTU; 1856 pdata->phy_speed = SPEED_UNKNOWN; 1857 pdata->mac_ops->init(pdata); 1858 1859 return ret; 1860 1861 err: 1862 xgene_enet_delete_desc_rings(pdata); 1863 return ret; 1864 } 1865 1866 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) 1867 { 1868 switch (pdata->phy_mode) { 1869 case PHY_INTERFACE_MODE_RGMII: 1870 pdata->mac_ops = &xgene_gmac_ops; 1871 pdata->port_ops = &xgene_gport_ops; 1872 pdata->rm = RM3; 1873 pdata->rxq_cnt = 1; 1874 pdata->txq_cnt = 1; 1875 pdata->cq_cnt = 0; 1876 break; 1877 case PHY_INTERFACE_MODE_SGMII: 1878 pdata->mac_ops = &xgene_sgmac_ops; 1879 pdata->port_ops = &xgene_sgport_ops; 1880 pdata->rm = RM1; 1881 pdata->rxq_cnt = 1; 1882 pdata->txq_cnt = 1; 1883 pdata->cq_cnt = 1; 1884 break; 1885 default: 1886 pdata->mac_ops = &xgene_xgmac_ops; 1887 pdata->port_ops = &xgene_xgport_ops; 1888 pdata->cle_ops = &xgene_cle3in_ops; 1889 pdata->rm = RM0; 1890 if (!pdata->rxq_cnt) { 1891 pdata->rxq_cnt = XGENE_NUM_RX_RING; 1892 pdata->txq_cnt = XGENE_NUM_TX_RING; 1893 pdata->cq_cnt = XGENE_NUM_TXC_RING; 1894 } 1895 break; 1896 } 1897 1898 if (pdata->enet_id == XGENE_ENET1) { 1899 switch (pdata->port_id) { 1900 case 0: 1901 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1902 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1903 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1904 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1905 pdata->ring_num = START_RING_NUM_0; 1906 } else { 1907 pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1908 pdata->eth_bufnum = START_ETH_BUFNUM_0; 1909 pdata->bp_bufnum = START_BP_BUFNUM_0; 1910 pdata->ring_num = START_RING_NUM_0; 1911 } 1912 break; 1913 case 1: 1914 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1915 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1; 1916 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1; 1917 pdata->bp_bufnum = XG_START_BP_BUFNUM_1; 1918 pdata->ring_num = XG_START_RING_NUM_1; 1919 } else { 1920 pdata->cpu_bufnum = START_CPU_BUFNUM_1; 1921 pdata->eth_bufnum = START_ETH_BUFNUM_1; 1922 pdata->bp_bufnum = START_BP_BUFNUM_1; 1923 pdata->ring_num = START_RING_NUM_1; 1924 } 1925 break; 1926 default: 1927 break; 1928 } 1929 pdata->ring_ops = &xgene_ring1_ops; 1930 } else { 1931 switch (pdata->port_id) { 1932 case 0: 1933 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1934 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1935 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1936 pdata->ring_num = X2_START_RING_NUM_0; 1937 break; 1938 case 1: 1939 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; 1940 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; 1941 pdata->bp_bufnum = X2_START_BP_BUFNUM_1; 1942 pdata->ring_num = X2_START_RING_NUM_1; 1943 break; 1944 default: 1945 break; 1946 } 1947 pdata->rm = RM0; 1948 pdata->ring_ops = &xgene_ring2_ops; 1949 } 1950 } 1951 1952 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) 1953 { 1954 struct napi_struct *napi; 1955 int i; 1956 1957 for (i = 0; i < pdata->rxq_cnt; i++) { 1958 napi = &pdata->rx_ring[i]->napi; 1959 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1960 NAPI_POLL_WEIGHT); 1961 } 1962 1963 for (i = 0; i < pdata->cq_cnt; i++) { 1964 napi = &pdata->tx_ring[i]->cp_ring->napi; 1965 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1966 NAPI_POLL_WEIGHT); 1967 } 1968 } 1969 1970 static int xgene_enet_probe(struct platform_device *pdev) 1971 { 1972 struct net_device *ndev; 1973 struct xgene_enet_pdata *pdata; 1974 struct device *dev = &pdev->dev; 1975 void (*link_state)(struct work_struct *); 1976 const struct of_device_id *of_id; 1977 int ret; 1978 1979 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), 1980 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); 1981 if (!ndev) 1982 return -ENOMEM; 1983 1984 pdata = netdev_priv(ndev); 1985 1986 pdata->pdev = pdev; 1987 pdata->ndev = ndev; 1988 SET_NETDEV_DEV(ndev, dev); 1989 platform_set_drvdata(pdev, pdata); 1990 ndev->netdev_ops = &xgene_ndev_ops; 1991 xgene_enet_set_ethtool_ops(ndev); 1992 ndev->features |= NETIF_F_IP_CSUM | 1993 NETIF_F_GSO | 1994 NETIF_F_GRO | 1995 NETIF_F_SG; 1996 1997 of_id = of_match_device(xgene_enet_of_match, &pdev->dev); 1998 if (of_id) { 1999 pdata->enet_id = (enum xgene_enet_id)of_id->data; 2000 } 2001 #ifdef CONFIG_ACPI 2002 else { 2003 const struct acpi_device_id *acpi_id; 2004 2005 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); 2006 if (acpi_id) 2007 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; 2008 } 2009 #endif 2010 if (!pdata->enet_id) { 2011 ret = -ENODEV; 2012 goto err; 2013 } 2014 2015 ret = xgene_enet_get_resources(pdata); 2016 if (ret) 2017 goto err; 2018 2019 xgene_enet_setup_ops(pdata); 2020 2021 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2022 ndev->features |= NETIF_F_TSO; 2023 spin_lock_init(&pdata->mss_lock); 2024 } 2025 ndev->hw_features = ndev->features; 2026 2027 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2028 if (ret) { 2029 netdev_err(ndev, "No usable DMA configuration\n"); 2030 goto err; 2031 } 2032 2033 ret = xgene_enet_init_hw(pdata); 2034 if (ret) 2035 goto err; 2036 2037 link_state = pdata->mac_ops->link_state; 2038 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2039 INIT_DELAYED_WORK(&pdata->link_work, link_state); 2040 } else if (!pdata->mdio_driver) { 2041 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 2042 ret = xgene_enet_mdio_config(pdata); 2043 else 2044 INIT_DELAYED_WORK(&pdata->link_work, link_state); 2045 2046 if (ret) 2047 goto err1; 2048 } 2049 2050 xgene_enet_napi_add(pdata); 2051 ret = register_netdev(ndev); 2052 if (ret) { 2053 netdev_err(ndev, "Failed to register netdev\n"); 2054 goto err2; 2055 } 2056 2057 return 0; 2058 2059 err2: 2060 /* 2061 * If necessary, free_netdev() will call netif_napi_del() and undo 2062 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). 2063 */ 2064 2065 if (pdata->mdio_driver) 2066 xgene_enet_phy_disconnect(pdata); 2067 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 2068 xgene_enet_mdio_remove(pdata); 2069 err1: 2070 xgene_enet_delete_desc_rings(pdata); 2071 err: 2072 free_netdev(ndev); 2073 return ret; 2074 } 2075 2076 static int xgene_enet_remove(struct platform_device *pdev) 2077 { 2078 struct xgene_enet_pdata *pdata; 2079 struct net_device *ndev; 2080 2081 pdata = platform_get_drvdata(pdev); 2082 ndev = pdata->ndev; 2083 2084 rtnl_lock(); 2085 if (netif_running(ndev)) 2086 dev_close(ndev); 2087 rtnl_unlock(); 2088 2089 if (pdata->mdio_driver) 2090 xgene_enet_phy_disconnect(pdata); 2091 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 2092 xgene_enet_mdio_remove(pdata); 2093 2094 unregister_netdev(ndev); 2095 pdata->port_ops->shutdown(pdata); 2096 xgene_enet_delete_desc_rings(pdata); 2097 free_netdev(ndev); 2098 2099 return 0; 2100 } 2101 2102 static void xgene_enet_shutdown(struct platform_device *pdev) 2103 { 2104 struct xgene_enet_pdata *pdata; 2105 2106 pdata = platform_get_drvdata(pdev); 2107 if (!pdata) 2108 return; 2109 2110 if (!pdata->ndev) 2111 return; 2112 2113 xgene_enet_remove(pdev); 2114 } 2115 2116 #ifdef CONFIG_ACPI 2117 static const struct acpi_device_id xgene_enet_acpi_match[] = { 2118 { "APMC0D05", XGENE_ENET1}, 2119 { "APMC0D30", XGENE_ENET1}, 2120 { "APMC0D31", XGENE_ENET1}, 2121 { "APMC0D3F", XGENE_ENET1}, 2122 { "APMC0D26", XGENE_ENET2}, 2123 { "APMC0D25", XGENE_ENET2}, 2124 { } 2125 }; 2126 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 2127 #endif 2128 2129 #ifdef CONFIG_OF 2130 static const struct of_device_id xgene_enet_of_match[] = { 2131 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, 2132 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, 2133 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, 2134 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, 2135 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, 2136 {}, 2137 }; 2138 2139 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 2140 #endif 2141 2142 static struct platform_driver xgene_enet_driver = { 2143 .driver = { 2144 .name = "xgene-enet", 2145 .of_match_table = of_match_ptr(xgene_enet_of_match), 2146 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), 2147 }, 2148 .probe = xgene_enet_probe, 2149 .remove = xgene_enet_remove, 2150 .shutdown = xgene_enet_shutdown, 2151 }; 2152 2153 module_platform_driver(xgene_enet_driver); 2154 2155 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); 2156 MODULE_VERSION(XGENE_DRV_VERSION); 2157 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); 2158 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); 2159 MODULE_LICENSE("GPL"); 2160