1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 #include "xgene_enet_sgmac.h" 25 #include "xgene_enet_xgmac.h" 26 27 #define RES_ENET_CSR 0 28 #define RES_RING_CSR 1 29 #define RES_RING_CMD 2 30 31 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 32 { 33 struct xgene_enet_raw_desc16 *raw_desc; 34 int i; 35 36 for (i = 0; i < buf_pool->slots; i++) { 37 raw_desc = &buf_pool->raw_desc16[i]; 38 39 /* Hardware expects descriptor in little endian format */ 40 raw_desc->m0 = cpu_to_le64(i | 41 SET_VAL(FPQNUM, buf_pool->dst_ring_num) | 42 SET_VAL(STASH, 3)); 43 } 44 } 45 46 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, 47 u32 nbuf) 48 { 49 struct sk_buff *skb; 50 struct xgene_enet_raw_desc16 *raw_desc; 51 struct net_device *ndev; 52 struct device *dev; 53 dma_addr_t dma_addr; 54 u32 tail = buf_pool->tail; 55 u32 slots = buf_pool->slots - 1; 56 u16 bufdatalen, len; 57 int i; 58 59 ndev = buf_pool->ndev; 60 dev = ndev_to_dev(buf_pool->ndev); 61 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); 62 len = XGENE_ENET_MAX_MTU; 63 64 for (i = 0; i < nbuf; i++) { 65 raw_desc = &buf_pool->raw_desc16[tail]; 66 67 skb = netdev_alloc_skb_ip_align(ndev, len); 68 if (unlikely(!skb)) 69 return -ENOMEM; 70 buf_pool->rx_skb[tail] = skb; 71 72 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); 73 if (dma_mapping_error(dev, dma_addr)) { 74 netdev_err(ndev, "DMA mapping error\n"); 75 dev_kfree_skb_any(skb); 76 return -EINVAL; 77 } 78 79 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 80 SET_VAL(BUFDATALEN, bufdatalen) | 81 SET_BIT(COHERENT)); 82 tail = (tail + 1) & slots; 83 } 84 85 iowrite32(nbuf, buf_pool->cmd); 86 buf_pool->tail = tail; 87 88 return 0; 89 } 90 91 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) 92 { 93 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 94 95 return ((u16)pdata->rm << 10) | ring->num; 96 } 97 98 static u8 xgene_enet_hdr_len(const void *data) 99 { 100 const struct ethhdr *eth = data; 101 102 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; 103 } 104 105 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) 106 { 107 u32 __iomem *cmd_base = ring->cmd_base; 108 u32 ring_state, num_msgs; 109 110 ring_state = ioread32(&cmd_base[1]); 111 num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN); 112 113 return num_msgs >> NUMMSGSINQ_POS; 114 } 115 116 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) 117 { 118 struct xgene_enet_raw_desc16 *raw_desc; 119 u32 slots = buf_pool->slots - 1; 120 u32 tail = buf_pool->tail; 121 u32 userinfo; 122 int i, len; 123 124 len = xgene_enet_ring_len(buf_pool); 125 for (i = 0; i < len; i++) { 126 tail = (tail - 1) & slots; 127 raw_desc = &buf_pool->raw_desc16[tail]; 128 129 /* Hardware stores descriptor in little endian format */ 130 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 131 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); 132 } 133 134 iowrite32(-len, buf_pool->cmd); 135 buf_pool->tail = tail; 136 } 137 138 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) 139 { 140 struct xgene_enet_desc_ring *rx_ring = data; 141 142 if (napi_schedule_prep(&rx_ring->napi)) { 143 disable_irq_nosync(irq); 144 __napi_schedule(&rx_ring->napi); 145 } 146 147 return IRQ_HANDLED; 148 } 149 150 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, 151 struct xgene_enet_raw_desc *raw_desc) 152 { 153 struct sk_buff *skb; 154 struct device *dev; 155 u16 skb_index; 156 u8 status; 157 int ret = 0; 158 159 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 160 skb = cp_ring->cp_skb[skb_index]; 161 162 dev = ndev_to_dev(cp_ring->ndev); 163 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 164 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), 165 DMA_TO_DEVICE); 166 167 /* Checking for error */ 168 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 169 if (unlikely(status > 2)) { 170 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), 171 status); 172 ret = -EIO; 173 } 174 175 if (likely(skb)) { 176 dev_kfree_skb_any(skb); 177 } else { 178 netdev_err(cp_ring->ndev, "completion skb is NULL\n"); 179 ret = -EIO; 180 } 181 182 return ret; 183 } 184 185 static u64 xgene_enet_work_msg(struct sk_buff *skb) 186 { 187 struct iphdr *iph; 188 u8 l3hlen, l4hlen = 0; 189 u8 csum_enable = 0; 190 u8 proto = 0; 191 u8 ethhdr; 192 u64 hopinfo; 193 194 if (unlikely(skb->protocol != htons(ETH_P_IP)) && 195 unlikely(skb->protocol != htons(ETH_P_8021Q))) 196 goto out; 197 198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) 199 goto out; 200 201 iph = ip_hdr(skb); 202 if (unlikely(ip_is_fragment(iph))) 203 goto out; 204 205 if (likely(iph->protocol == IPPROTO_TCP)) { 206 l4hlen = tcp_hdrlen(skb) >> 2; 207 csum_enable = 1; 208 proto = TSO_IPPROTO_TCP; 209 } else if (iph->protocol == IPPROTO_UDP) { 210 l4hlen = UDP_HDR_SIZE; 211 csum_enable = 1; 212 } 213 out: 214 l3hlen = ip_hdrlen(skb) >> 2; 215 ethhdr = xgene_enet_hdr_len(skb->data); 216 hopinfo = SET_VAL(TCPHDR, l4hlen) | 217 SET_VAL(IPHDR, l3hlen) | 218 SET_VAL(ETHHDR, ethhdr) | 219 SET_VAL(EC, csum_enable) | 220 SET_VAL(IS, proto) | 221 SET_BIT(IC) | 222 SET_BIT(TYPE_ETH_WORK_MESSAGE); 223 224 return hopinfo; 225 } 226 227 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, 228 struct sk_buff *skb) 229 { 230 struct device *dev = ndev_to_dev(tx_ring->ndev); 231 struct xgene_enet_raw_desc *raw_desc; 232 dma_addr_t dma_addr; 233 u16 tail = tx_ring->tail; 234 u64 hopinfo; 235 236 raw_desc = &tx_ring->raw_desc[tail]; 237 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); 238 239 dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); 240 if (dma_mapping_error(dev, dma_addr)) { 241 netdev_err(tx_ring->ndev, "DMA mapping error\n"); 242 return -EINVAL; 243 } 244 245 /* Hardware expects descriptor in little endian format */ 246 raw_desc->m0 = cpu_to_le64(tail); 247 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 248 SET_VAL(BUFDATALEN, skb->len) | 249 SET_BIT(COHERENT)); 250 hopinfo = xgene_enet_work_msg(skb); 251 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | 252 hopinfo); 253 tx_ring->cp_ring->cp_skb[tail] = skb; 254 255 return 0; 256 } 257 258 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, 259 struct net_device *ndev) 260 { 261 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 262 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 263 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 264 u32 tx_level, cq_level; 265 266 tx_level = xgene_enet_ring_len(tx_ring); 267 cq_level = xgene_enet_ring_len(cp_ring); 268 if (unlikely(tx_level > pdata->tx_qcnt_hi || 269 cq_level > pdata->cp_qcnt_hi)) { 270 netif_stop_queue(ndev); 271 return NETDEV_TX_BUSY; 272 } 273 274 if (xgene_enet_setup_tx_desc(tx_ring, skb)) { 275 dev_kfree_skb_any(skb); 276 return NETDEV_TX_OK; 277 } 278 279 iowrite32(1, tx_ring->cmd); 280 skb_tx_timestamp(skb); 281 tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); 282 283 pdata->stats.tx_packets++; 284 pdata->stats.tx_bytes += skb->len; 285 286 return NETDEV_TX_OK; 287 } 288 289 static void xgene_enet_skip_csum(struct sk_buff *skb) 290 { 291 struct iphdr *iph = ip_hdr(skb); 292 293 if (!ip_is_fragment(iph) || 294 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { 295 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 } 297 } 298 299 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 300 struct xgene_enet_raw_desc *raw_desc) 301 { 302 struct net_device *ndev; 303 struct xgene_enet_pdata *pdata; 304 struct device *dev; 305 struct xgene_enet_desc_ring *buf_pool; 306 u32 datalen, skb_index; 307 struct sk_buff *skb; 308 u8 status; 309 int ret = 0; 310 311 ndev = rx_ring->ndev; 312 pdata = netdev_priv(ndev); 313 dev = ndev_to_dev(rx_ring->ndev); 314 buf_pool = rx_ring->buf_pool; 315 316 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 317 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); 318 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 319 skb = buf_pool->rx_skb[skb_index]; 320 321 /* checking for error */ 322 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 323 if (unlikely(status > 2)) { 324 dev_kfree_skb_any(skb); 325 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 326 status); 327 pdata->stats.rx_dropped++; 328 ret = -EIO; 329 goto out; 330 } 331 332 /* strip off CRC as HW isn't doing this */ 333 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); 334 datalen -= 4; 335 prefetch(skb->data - NET_IP_ALIGN); 336 skb_put(skb, datalen); 337 338 skb_checksum_none_assert(skb); 339 skb->protocol = eth_type_trans(skb, ndev); 340 if (likely((ndev->features & NETIF_F_IP_CSUM) && 341 skb->protocol == htons(ETH_P_IP))) { 342 xgene_enet_skip_csum(skb); 343 } 344 345 pdata->stats.rx_packets++; 346 pdata->stats.rx_bytes += datalen; 347 napi_gro_receive(&rx_ring->napi, skb); 348 out: 349 if (--rx_ring->nbufpool == 0) { 350 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); 351 rx_ring->nbufpool = NUM_BUFPOOL; 352 } 353 354 return ret; 355 } 356 357 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) 358 { 359 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; 360 } 361 362 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, 363 int budget) 364 { 365 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 366 struct xgene_enet_raw_desc *raw_desc; 367 u16 head = ring->head; 368 u16 slots = ring->slots - 1; 369 int ret, count = 0; 370 371 do { 372 raw_desc = &ring->raw_desc[head]; 373 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 374 break; 375 376 /* read fpqnum field after dataaddr field */ 377 dma_rmb(); 378 if (is_rx_desc(raw_desc)) 379 ret = xgene_enet_rx_frame(ring, raw_desc); 380 else 381 ret = xgene_enet_tx_completion(ring, raw_desc); 382 xgene_enet_mark_desc_slot_empty(raw_desc); 383 384 head = (head + 1) & slots; 385 count++; 386 387 if (ret) 388 break; 389 } while (--budget); 390 391 if (likely(count)) { 392 iowrite32(-count, ring->cmd); 393 ring->head = head; 394 395 if (netif_queue_stopped(ring->ndev)) { 396 if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low) 397 netif_wake_queue(ring->ndev); 398 } 399 } 400 401 return count; 402 } 403 404 static int xgene_enet_napi(struct napi_struct *napi, const int budget) 405 { 406 struct xgene_enet_desc_ring *ring; 407 int processed; 408 409 ring = container_of(napi, struct xgene_enet_desc_ring, napi); 410 processed = xgene_enet_process_ring(ring, budget); 411 412 if (processed != budget) { 413 napi_complete(napi); 414 enable_irq(ring->irq); 415 } 416 417 return processed; 418 } 419 420 static void xgene_enet_timeout(struct net_device *ndev) 421 { 422 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 423 424 pdata->mac_ops->reset(pdata); 425 } 426 427 static int xgene_enet_register_irq(struct net_device *ndev) 428 { 429 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 430 struct device *dev = ndev_to_dev(ndev); 431 int ret; 432 433 ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq, 434 IRQF_SHARED, ndev->name, pdata->rx_ring); 435 if (ret) { 436 netdev_err(ndev, "rx%d interrupt request failed\n", 437 pdata->rx_ring->irq); 438 } 439 440 return ret; 441 } 442 443 static void xgene_enet_free_irq(struct net_device *ndev) 444 { 445 struct xgene_enet_pdata *pdata; 446 struct device *dev; 447 448 pdata = netdev_priv(ndev); 449 dev = ndev_to_dev(ndev); 450 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); 451 } 452 453 static int xgene_enet_open(struct net_device *ndev) 454 { 455 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 456 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 457 int ret; 458 459 mac_ops->tx_enable(pdata); 460 mac_ops->rx_enable(pdata); 461 462 ret = xgene_enet_register_irq(ndev); 463 if (ret) 464 return ret; 465 napi_enable(&pdata->rx_ring->napi); 466 467 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 468 phy_start(pdata->phy_dev); 469 else 470 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 471 472 netif_start_queue(ndev); 473 474 return ret; 475 } 476 477 static int xgene_enet_close(struct net_device *ndev) 478 { 479 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 480 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 481 482 netif_stop_queue(ndev); 483 484 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 485 phy_stop(pdata->phy_dev); 486 else 487 cancel_delayed_work_sync(&pdata->link_work); 488 489 napi_disable(&pdata->rx_ring->napi); 490 xgene_enet_free_irq(ndev); 491 xgene_enet_process_ring(pdata->rx_ring, -1); 492 493 mac_ops->tx_disable(pdata); 494 mac_ops->rx_disable(pdata); 495 496 return 0; 497 } 498 499 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) 500 { 501 struct xgene_enet_pdata *pdata; 502 struct device *dev; 503 504 pdata = netdev_priv(ring->ndev); 505 dev = ndev_to_dev(ring->ndev); 506 507 xgene_enet_clear_ring(ring); 508 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 509 } 510 511 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) 512 { 513 struct xgene_enet_desc_ring *buf_pool; 514 515 if (pdata->tx_ring) { 516 xgene_enet_delete_ring(pdata->tx_ring); 517 pdata->tx_ring = NULL; 518 } 519 520 if (pdata->rx_ring) { 521 buf_pool = pdata->rx_ring->buf_pool; 522 xgene_enet_delete_bufpool(buf_pool); 523 xgene_enet_delete_ring(buf_pool); 524 xgene_enet_delete_ring(pdata->rx_ring); 525 pdata->rx_ring = NULL; 526 } 527 } 528 529 static int xgene_enet_get_ring_size(struct device *dev, 530 enum xgene_enet_ring_cfgsize cfgsize) 531 { 532 int size = -EINVAL; 533 534 switch (cfgsize) { 535 case RING_CFGSIZE_512B: 536 size = 0x200; 537 break; 538 case RING_CFGSIZE_2KB: 539 size = 0x800; 540 break; 541 case RING_CFGSIZE_16KB: 542 size = 0x4000; 543 break; 544 case RING_CFGSIZE_64KB: 545 size = 0x10000; 546 break; 547 case RING_CFGSIZE_512KB: 548 size = 0x80000; 549 break; 550 default: 551 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); 552 break; 553 } 554 555 return size; 556 } 557 558 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) 559 { 560 struct device *dev; 561 562 if (!ring) 563 return; 564 565 dev = ndev_to_dev(ring->ndev); 566 567 if (ring->desc_addr) { 568 xgene_enet_clear_ring(ring); 569 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 570 } 571 devm_kfree(dev, ring); 572 } 573 574 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) 575 { 576 struct device *dev = &pdata->pdev->dev; 577 struct xgene_enet_desc_ring *ring; 578 579 ring = pdata->tx_ring; 580 if (ring) { 581 if (ring->cp_ring && ring->cp_ring->cp_skb) 582 devm_kfree(dev, ring->cp_ring->cp_skb); 583 xgene_enet_free_desc_ring(ring); 584 } 585 586 ring = pdata->rx_ring; 587 if (ring) { 588 if (ring->buf_pool) { 589 if (ring->buf_pool->rx_skb) 590 devm_kfree(dev, ring->buf_pool->rx_skb); 591 xgene_enet_free_desc_ring(ring->buf_pool); 592 } 593 xgene_enet_free_desc_ring(ring); 594 } 595 } 596 597 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 598 struct net_device *ndev, u32 ring_num, 599 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) 600 { 601 struct xgene_enet_desc_ring *ring; 602 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 603 struct device *dev = ndev_to_dev(ndev); 604 int size; 605 606 size = xgene_enet_get_ring_size(dev, cfgsize); 607 if (size < 0) 608 return NULL; 609 610 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), 611 GFP_KERNEL); 612 if (!ring) 613 return NULL; 614 615 ring->ndev = ndev; 616 ring->num = ring_num; 617 ring->cfgsize = cfgsize; 618 ring->id = ring_id; 619 620 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, 621 GFP_KERNEL); 622 if (!ring->desc_addr) { 623 devm_kfree(dev, ring); 624 return NULL; 625 } 626 ring->size = size; 627 628 ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); 629 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; 630 ring = xgene_enet_setup_ring(ring); 631 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", 632 ring->num, ring->size, ring->id, ring->slots); 633 634 return ring; 635 } 636 637 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) 638 { 639 return (owner << 6) | (bufnum & GENMASK(5, 0)); 640 } 641 642 static int xgene_enet_create_desc_rings(struct net_device *ndev) 643 { 644 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 645 struct device *dev = ndev_to_dev(ndev); 646 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 647 struct xgene_enet_desc_ring *buf_pool = NULL; 648 u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; 649 u8 bp_bufnum = START_BP_BUFNUM; 650 u16 ring_id, ring_num = START_RING_NUM; 651 int ret; 652 653 /* allocate rx descriptor ring */ 654 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 655 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 656 RING_CFGSIZE_16KB, ring_id); 657 if (!rx_ring) { 658 ret = -ENOMEM; 659 goto err; 660 } 661 662 /* allocate buffer pool for receiving packets */ 663 ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); 664 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 665 RING_CFGSIZE_2KB, ring_id); 666 if (!buf_pool) { 667 ret = -ENOMEM; 668 goto err; 669 } 670 671 rx_ring->nbufpool = NUM_BUFPOOL; 672 rx_ring->buf_pool = buf_pool; 673 rx_ring->irq = pdata->rx_irq; 674 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, 675 sizeof(struct sk_buff *), GFP_KERNEL); 676 if (!buf_pool->rx_skb) { 677 ret = -ENOMEM; 678 goto err; 679 } 680 681 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); 682 rx_ring->buf_pool = buf_pool; 683 pdata->rx_ring = rx_ring; 684 685 /* allocate tx descriptor ring */ 686 ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); 687 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 688 RING_CFGSIZE_16KB, ring_id); 689 if (!tx_ring) { 690 ret = -ENOMEM; 691 goto err; 692 } 693 pdata->tx_ring = tx_ring; 694 695 cp_ring = pdata->rx_ring; 696 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, 697 sizeof(struct sk_buff *), GFP_KERNEL); 698 if (!cp_ring->cp_skb) { 699 ret = -ENOMEM; 700 goto err; 701 } 702 pdata->tx_ring->cp_ring = cp_ring; 703 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 704 705 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 706 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; 707 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; 708 709 return 0; 710 711 err: 712 xgene_enet_free_desc_rings(pdata); 713 return ret; 714 } 715 716 static struct rtnl_link_stats64 *xgene_enet_get_stats64( 717 struct net_device *ndev, 718 struct rtnl_link_stats64 *storage) 719 { 720 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 721 struct rtnl_link_stats64 *stats = &pdata->stats; 722 723 stats->rx_errors += stats->rx_length_errors + 724 stats->rx_crc_errors + 725 stats->rx_frame_errors + 726 stats->rx_fifo_errors; 727 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); 728 729 return storage; 730 } 731 732 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) 733 { 734 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 735 int ret; 736 737 ret = eth_mac_addr(ndev, addr); 738 if (ret) 739 return ret; 740 pdata->mac_ops->set_mac_addr(pdata); 741 742 return ret; 743 } 744 745 static const struct net_device_ops xgene_ndev_ops = { 746 .ndo_open = xgene_enet_open, 747 .ndo_stop = xgene_enet_close, 748 .ndo_start_xmit = xgene_enet_start_xmit, 749 .ndo_tx_timeout = xgene_enet_timeout, 750 .ndo_get_stats64 = xgene_enet_get_stats64, 751 .ndo_change_mtu = eth_change_mtu, 752 .ndo_set_mac_address = xgene_enet_set_mac_address, 753 }; 754 755 static int xgene_get_mac_address(struct device *dev, 756 unsigned char *addr) 757 { 758 int ret; 759 760 ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); 761 if (ret) 762 ret = device_property_read_u8_array(dev, "mac-address", 763 addr, 6); 764 if (ret) 765 return -ENODEV; 766 767 return ETH_ALEN; 768 } 769 770 static int xgene_get_phy_mode(struct device *dev) 771 { 772 int i, ret; 773 char *modestr; 774 775 ret = device_property_read_string(dev, "phy-connection-type", 776 (const char **)&modestr); 777 if (ret) 778 ret = device_property_read_string(dev, "phy-mode", 779 (const char **)&modestr); 780 if (ret) 781 return -ENODEV; 782 783 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { 784 if (!strcasecmp(modestr, phy_modes(i))) 785 return i; 786 } 787 return -ENODEV; 788 } 789 790 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 791 { 792 struct platform_device *pdev; 793 struct net_device *ndev; 794 struct device *dev; 795 struct resource *res; 796 void __iomem *base_addr; 797 int ret; 798 799 pdev = pdata->pdev; 800 dev = &pdev->dev; 801 ndev = pdata->ndev; 802 803 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); 804 if (!res) { 805 dev_err(dev, "Resource enet_csr not defined\n"); 806 return -ENODEV; 807 } 808 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); 809 if (!pdata->base_addr) { 810 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 811 return -ENOMEM; 812 } 813 814 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); 815 if (!res) { 816 dev_err(dev, "Resource ring_csr not defined\n"); 817 return -ENODEV; 818 } 819 pdata->ring_csr_addr = devm_ioremap(dev, res->start, 820 resource_size(res)); 821 if (!pdata->ring_csr_addr) { 822 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 823 return -ENOMEM; 824 } 825 826 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); 827 if (!res) { 828 dev_err(dev, "Resource ring_cmd not defined\n"); 829 return -ENODEV; 830 } 831 pdata->ring_cmd_addr = devm_ioremap(dev, res->start, 832 resource_size(res)); 833 if (!pdata->ring_cmd_addr) { 834 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 835 return -ENOMEM; 836 } 837 838 ret = platform_get_irq(pdev, 0); 839 if (ret <= 0) { 840 dev_err(dev, "Unable to get ENET Rx IRQ\n"); 841 ret = ret ? : -ENXIO; 842 return ret; 843 } 844 pdata->rx_irq = ret; 845 846 if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) 847 eth_hw_addr_random(ndev); 848 849 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 850 851 pdata->phy_mode = xgene_get_phy_mode(dev); 852 if (pdata->phy_mode < 0) { 853 dev_err(dev, "Unable to get phy-connection-type\n"); 854 return pdata->phy_mode; 855 } 856 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && 857 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && 858 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 859 dev_err(dev, "Incorrect phy-connection-type specified\n"); 860 return -ENODEV; 861 } 862 863 pdata->clk = devm_clk_get(&pdev->dev, NULL); 864 if (IS_ERR(pdata->clk)) { 865 /* Firmware may have set up the clock already. */ 866 pdata->clk = NULL; 867 } 868 869 base_addr = pdata->base_addr; 870 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 871 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 872 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 873 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || 874 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 875 pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; 876 pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; 877 } else { 878 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 879 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 880 } 881 pdata->rx_buff_cnt = NUM_PKT_BUF; 882 883 return 0; 884 } 885 886 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) 887 { 888 struct net_device *ndev = pdata->ndev; 889 struct xgene_enet_desc_ring *buf_pool; 890 u16 dst_ring_num; 891 int ret; 892 893 ret = pdata->port_ops->reset(pdata); 894 if (ret) 895 return ret; 896 897 ret = xgene_enet_create_desc_rings(ndev); 898 if (ret) { 899 netdev_err(ndev, "Error in ring configuration\n"); 900 return ret; 901 } 902 903 /* setup buffer pool */ 904 buf_pool = pdata->rx_ring->buf_pool; 905 xgene_enet_init_bufpool(buf_pool); 906 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); 907 if (ret) { 908 xgene_enet_delete_desc_rings(pdata); 909 return ret; 910 } 911 912 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); 913 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); 914 pdata->mac_ops->init(pdata); 915 916 return ret; 917 } 918 919 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) 920 { 921 switch (pdata->phy_mode) { 922 case PHY_INTERFACE_MODE_RGMII: 923 pdata->mac_ops = &xgene_gmac_ops; 924 pdata->port_ops = &xgene_gport_ops; 925 pdata->rm = RM3; 926 break; 927 case PHY_INTERFACE_MODE_SGMII: 928 pdata->mac_ops = &xgene_sgmac_ops; 929 pdata->port_ops = &xgene_sgport_ops; 930 pdata->rm = RM1; 931 break; 932 default: 933 pdata->mac_ops = &xgene_xgmac_ops; 934 pdata->port_ops = &xgene_xgport_ops; 935 pdata->rm = RM0; 936 break; 937 } 938 } 939 940 static int xgene_enet_probe(struct platform_device *pdev) 941 { 942 struct net_device *ndev; 943 struct xgene_enet_pdata *pdata; 944 struct device *dev = &pdev->dev; 945 struct napi_struct *napi; 946 struct xgene_mac_ops *mac_ops; 947 int ret; 948 949 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); 950 if (!ndev) 951 return -ENOMEM; 952 953 pdata = netdev_priv(ndev); 954 955 pdata->pdev = pdev; 956 pdata->ndev = ndev; 957 SET_NETDEV_DEV(ndev, dev); 958 platform_set_drvdata(pdev, pdata); 959 ndev->netdev_ops = &xgene_ndev_ops; 960 xgene_enet_set_ethtool_ops(ndev); 961 ndev->features |= NETIF_F_IP_CSUM | 962 NETIF_F_GSO | 963 NETIF_F_GRO; 964 965 ret = xgene_enet_get_resources(pdata); 966 if (ret) 967 goto err; 968 969 xgene_enet_setup_ops(pdata); 970 971 ret = register_netdev(ndev); 972 if (ret) { 973 netdev_err(ndev, "Failed to register netdev\n"); 974 goto err; 975 } 976 977 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 978 if (ret) { 979 netdev_err(ndev, "No usable DMA configuration\n"); 980 goto err; 981 } 982 983 ret = xgene_enet_init_hw(pdata); 984 if (ret) 985 goto err; 986 987 napi = &pdata->rx_ring->napi; 988 netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); 989 mac_ops = pdata->mac_ops; 990 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 991 ret = xgene_enet_mdio_config(pdata); 992 else 993 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 994 995 return ret; 996 err: 997 unregister_netdev(ndev); 998 free_netdev(ndev); 999 return ret; 1000 } 1001 1002 static int xgene_enet_remove(struct platform_device *pdev) 1003 { 1004 struct xgene_enet_pdata *pdata; 1005 struct xgene_mac_ops *mac_ops; 1006 struct net_device *ndev; 1007 1008 pdata = platform_get_drvdata(pdev); 1009 mac_ops = pdata->mac_ops; 1010 ndev = pdata->ndev; 1011 1012 mac_ops->rx_disable(pdata); 1013 mac_ops->tx_disable(pdata); 1014 1015 netif_napi_del(&pdata->rx_ring->napi); 1016 xgene_enet_mdio_remove(pdata); 1017 xgene_enet_delete_desc_rings(pdata); 1018 unregister_netdev(ndev); 1019 pdata->port_ops->shutdown(pdata); 1020 free_netdev(ndev); 1021 1022 return 0; 1023 } 1024 1025 #ifdef CONFIG_ACPI 1026 static const struct acpi_device_id xgene_enet_acpi_match[] = { 1027 { "APMC0D05", }, 1028 { "APMC0D30", }, 1029 { "APMC0D31", }, 1030 { } 1031 }; 1032 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1033 #endif 1034 1035 #ifdef CONFIG_OF 1036 static struct of_device_id xgene_enet_of_match[] = { 1037 {.compatible = "apm,xgene-enet",}, 1038 {.compatible = "apm,xgene1-sgenet",}, 1039 {.compatible = "apm,xgene1-xgenet",}, 1040 {}, 1041 }; 1042 1043 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 1044 #endif 1045 1046 static struct platform_driver xgene_enet_driver = { 1047 .driver = { 1048 .name = "xgene-enet", 1049 .of_match_table = of_match_ptr(xgene_enet_of_match), 1050 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), 1051 }, 1052 .probe = xgene_enet_probe, 1053 .remove = xgene_enet_remove, 1054 }; 1055 1056 module_platform_driver(xgene_enet_driver); 1057 1058 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); 1059 MODULE_VERSION(XGENE_DRV_VERSION); 1060 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); 1061 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); 1062 MODULE_LICENSE("GPL"); 1063