1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 #include "xgene_enet_sgmac.h" 25 #include "xgene_enet_xgmac.h" 26 27 #define RES_ENET_CSR 0 28 #define RES_RING_CSR 1 29 #define RES_RING_CMD 2 30 31 static const struct of_device_id xgene_enet_of_match[]; 32 static const struct acpi_device_id xgene_enet_acpi_match[]; 33 34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 35 { 36 struct xgene_enet_raw_desc16 *raw_desc; 37 int i; 38 39 for (i = 0; i < buf_pool->slots; i++) { 40 raw_desc = &buf_pool->raw_desc16[i]; 41 42 /* Hardware expects descriptor in little endian format */ 43 raw_desc->m0 = cpu_to_le64(i | 44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) | 45 SET_VAL(STASH, 3)); 46 } 47 } 48 49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, 50 u32 nbuf) 51 { 52 struct sk_buff *skb; 53 struct xgene_enet_raw_desc16 *raw_desc; 54 struct xgene_enet_pdata *pdata; 55 struct net_device *ndev; 56 struct device *dev; 57 dma_addr_t dma_addr; 58 u32 tail = buf_pool->tail; 59 u32 slots = buf_pool->slots - 1; 60 u16 bufdatalen, len; 61 int i; 62 63 ndev = buf_pool->ndev; 64 dev = ndev_to_dev(buf_pool->ndev); 65 pdata = netdev_priv(ndev); 66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); 67 len = XGENE_ENET_MAX_MTU; 68 69 for (i = 0; i < nbuf; i++) { 70 raw_desc = &buf_pool->raw_desc16[tail]; 71 72 skb = netdev_alloc_skb_ip_align(ndev, len); 73 if (unlikely(!skb)) 74 return -ENOMEM; 75 buf_pool->rx_skb[tail] = skb; 76 77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); 78 if (dma_mapping_error(dev, dma_addr)) { 79 netdev_err(ndev, "DMA mapping error\n"); 80 dev_kfree_skb_any(skb); 81 return -EINVAL; 82 } 83 84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 85 SET_VAL(BUFDATALEN, bufdatalen) | 86 SET_BIT(COHERENT)); 87 tail = (tail + 1) & slots; 88 } 89 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 91 buf_pool->tail = tail; 92 93 return 0; 94 } 95 96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) 97 { 98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 99 100 return ((u16)pdata->rm << 10) | ring->num; 101 } 102 103 static u8 xgene_enet_hdr_len(const void *data) 104 { 105 const struct ethhdr *eth = data; 106 107 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; 108 } 109 110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) 111 { 112 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev); 113 struct xgene_enet_raw_desc16 *raw_desc; 114 u32 slots = buf_pool->slots - 1; 115 u32 tail = buf_pool->tail; 116 u32 userinfo; 117 int i, len; 118 119 len = pdata->ring_ops->len(buf_pool); 120 for (i = 0; i < len; i++) { 121 tail = (tail - 1) & slots; 122 raw_desc = &buf_pool->raw_desc16[tail]; 123 124 /* Hardware stores descriptor in little endian format */ 125 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 126 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); 127 } 128 129 pdata->ring_ops->wr_cmd(buf_pool, -len); 130 buf_pool->tail = tail; 131 } 132 133 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) 134 { 135 struct xgene_enet_desc_ring *rx_ring = data; 136 137 if (napi_schedule_prep(&rx_ring->napi)) { 138 disable_irq_nosync(irq); 139 __napi_schedule(&rx_ring->napi); 140 } 141 142 return IRQ_HANDLED; 143 } 144 145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, 146 struct xgene_enet_raw_desc *raw_desc) 147 { 148 struct sk_buff *skb; 149 struct device *dev; 150 skb_frag_t *frag; 151 dma_addr_t *frag_dma_addr; 152 u16 skb_index; 153 u8 status; 154 int i, ret = 0; 155 156 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 157 skb = cp_ring->cp_skb[skb_index]; 158 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; 159 160 dev = ndev_to_dev(cp_ring->ndev); 161 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 162 skb_headlen(skb), 163 DMA_TO_DEVICE); 164 165 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 166 frag = &skb_shinfo(skb)->frags[i]; 167 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), 168 DMA_TO_DEVICE); 169 } 170 171 /* Checking for error */ 172 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 173 if (unlikely(status > 2)) { 174 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), 175 status); 176 ret = -EIO; 177 } 178 179 if (likely(skb)) { 180 dev_kfree_skb_any(skb); 181 } else { 182 netdev_err(cp_ring->ndev, "completion skb is NULL\n"); 183 ret = -EIO; 184 } 185 186 return ret; 187 } 188 189 static u64 xgene_enet_work_msg(struct sk_buff *skb) 190 { 191 struct net_device *ndev = skb->dev; 192 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 193 struct iphdr *iph; 194 u8 l3hlen = 0, l4hlen = 0; 195 u8 ethhdr, proto = 0, csum_enable = 0; 196 u64 hopinfo = 0; 197 u32 hdr_len, mss = 0; 198 u32 i, len, nr_frags; 199 200 ethhdr = xgene_enet_hdr_len(skb->data); 201 202 if (unlikely(skb->protocol != htons(ETH_P_IP)) && 203 unlikely(skb->protocol != htons(ETH_P_8021Q))) 204 goto out; 205 206 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) 207 goto out; 208 209 iph = ip_hdr(skb); 210 if (unlikely(ip_is_fragment(iph))) 211 goto out; 212 213 if (likely(iph->protocol == IPPROTO_TCP)) { 214 l4hlen = tcp_hdrlen(skb) >> 2; 215 csum_enable = 1; 216 proto = TSO_IPPROTO_TCP; 217 if (ndev->features & NETIF_F_TSO) { 218 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); 219 mss = skb_shinfo(skb)->gso_size; 220 221 if (skb_is_nonlinear(skb)) { 222 len = skb_headlen(skb); 223 nr_frags = skb_shinfo(skb)->nr_frags; 224 225 for (i = 0; i < 2 && i < nr_frags; i++) 226 len += skb_shinfo(skb)->frags[i].size; 227 228 /* HW requires header must reside in 3 buffer */ 229 if (unlikely(hdr_len > len)) { 230 if (skb_linearize(skb)) 231 return 0; 232 } 233 } 234 235 if (!mss || ((skb->len - hdr_len) <= mss)) 236 goto out; 237 238 if (mss != pdata->mss) { 239 pdata->mss = mss; 240 pdata->mac_ops->set_mss(pdata); 241 } 242 hopinfo |= SET_BIT(ET); 243 } 244 } else if (iph->protocol == IPPROTO_UDP) { 245 l4hlen = UDP_HDR_SIZE; 246 csum_enable = 1; 247 } 248 out: 249 l3hlen = ip_hdrlen(skb) >> 2; 250 hopinfo |= SET_VAL(TCPHDR, l4hlen) | 251 SET_VAL(IPHDR, l3hlen) | 252 SET_VAL(ETHHDR, ethhdr) | 253 SET_VAL(EC, csum_enable) | 254 SET_VAL(IS, proto) | 255 SET_BIT(IC) | 256 SET_BIT(TYPE_ETH_WORK_MESSAGE); 257 258 return hopinfo; 259 } 260 261 static u16 xgene_enet_encode_len(u16 len) 262 { 263 return (len == BUFLEN_16K) ? 0 : len; 264 } 265 266 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) 267 { 268 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | 269 SET_VAL(BUFDATALEN, len)); 270 } 271 272 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) 273 { 274 __le64 *exp_bufs; 275 276 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; 277 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); 278 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); 279 280 return exp_bufs; 281 } 282 283 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) 284 { 285 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; 286 } 287 288 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, 289 struct sk_buff *skb) 290 { 291 struct device *dev = ndev_to_dev(tx_ring->ndev); 292 struct xgene_enet_raw_desc *raw_desc; 293 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 295 skb_frag_t *frag; 296 u16 tail = tx_ring->tail; 297 u64 hopinfo; 298 u32 len, hw_len; 299 u8 ll = 0, nv = 0, idx = 0; 300 bool split = false; 301 u32 size, offset, ell_bytes = 0; 302 u32 i, fidx, nr_frags, count = 1; 303 304 raw_desc = &tx_ring->raw_desc[tail]; 305 tail = (tail + 1) & (tx_ring->slots - 1); 306 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); 307 308 hopinfo = xgene_enet_work_msg(skb); 309 if (!hopinfo) 310 return -EINVAL; 311 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | 312 hopinfo); 313 314 len = skb_headlen(skb); 315 hw_len = xgene_enet_encode_len(len); 316 317 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 318 if (dma_mapping_error(dev, dma_addr)) { 319 netdev_err(tx_ring->ndev, "DMA mapping error\n"); 320 return -EINVAL; 321 } 322 323 /* Hardware expects descriptor in little endian format */ 324 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 325 SET_VAL(BUFDATALEN, hw_len) | 326 SET_BIT(COHERENT)); 327 328 if (!skb_is_nonlinear(skb)) 329 goto out; 330 331 /* scatter gather */ 332 nv = 1; 333 exp_desc = (void *)&tx_ring->raw_desc[tail]; 334 tail = (tail + 1) & (tx_ring->slots - 1); 335 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); 336 337 nr_frags = skb_shinfo(skb)->nr_frags; 338 for (i = nr_frags; i < 4 ; i++) 339 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); 340 341 frag_dma_addr = xgene_get_frag_dma_array(tx_ring); 342 343 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { 344 if (!split) { 345 frag = &skb_shinfo(skb)->frags[fidx]; 346 size = skb_frag_size(frag); 347 offset = 0; 348 349 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, 350 DMA_TO_DEVICE); 351 if (dma_mapping_error(dev, pbuf_addr)) 352 return -EINVAL; 353 354 frag_dma_addr[fidx] = pbuf_addr; 355 fidx++; 356 357 if (size > BUFLEN_16K) 358 split = true; 359 } 360 361 if (size > BUFLEN_16K) { 362 len = BUFLEN_16K; 363 size -= BUFLEN_16K; 364 } else { 365 len = size; 366 split = false; 367 } 368 369 dma_addr = pbuf_addr + offset; 370 hw_len = xgene_enet_encode_len(len); 371 372 switch (i) { 373 case 0: 374 case 1: 375 case 2: 376 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); 377 break; 378 case 3: 379 if (split || (fidx != nr_frags)) { 380 exp_bufs = xgene_enet_get_exp_bufs(tx_ring); 381 xgene_set_addr_len(exp_bufs, idx, dma_addr, 382 hw_len); 383 idx++; 384 ell_bytes += len; 385 } else { 386 xgene_set_addr_len(exp_desc, i, dma_addr, 387 hw_len); 388 } 389 break; 390 default: 391 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); 392 idx++; 393 ell_bytes += len; 394 break; 395 } 396 397 if (split) 398 offset += BUFLEN_16K; 399 } 400 count++; 401 402 if (idx) { 403 ll = 1; 404 dma_addr = dma_map_single(dev, exp_bufs, 405 sizeof(u64) * MAX_EXP_BUFFS, 406 DMA_TO_DEVICE); 407 if (dma_mapping_error(dev, dma_addr)) { 408 dev_kfree_skb_any(skb); 409 return -EINVAL; 410 } 411 i = ell_bytes >> LL_BYTES_LSB_LEN; 412 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 413 SET_VAL(LL_BYTES_MSB, i) | 414 SET_VAL(LL_LEN, idx)); 415 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); 416 } 417 418 out: 419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 420 SET_VAL(USERINFO, tx_ring->tail)); 421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 tx_ring->tail = tail; 423 424 return count; 425 } 426 427 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, 428 struct net_device *ndev) 429 { 430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 432 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 433 u32 tx_level, cq_level; 434 int count; 435 436 tx_level = pdata->ring_ops->len(tx_ring); 437 cq_level = pdata->ring_ops->len(cp_ring); 438 if (unlikely(tx_level > pdata->tx_qcnt_hi || 439 cq_level > pdata->cp_qcnt_hi)) { 440 netif_stop_queue(ndev); 441 return NETDEV_TX_BUSY; 442 } 443 444 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) 445 return NETDEV_TX_OK; 446 447 count = xgene_enet_setup_tx_desc(tx_ring, skb); 448 if (count <= 0) { 449 dev_kfree_skb_any(skb); 450 return NETDEV_TX_OK; 451 } 452 453 pdata->ring_ops->wr_cmd(tx_ring, count); 454 skb_tx_timestamp(skb); 455 456 pdata->stats.tx_packets++; 457 pdata->stats.tx_bytes += skb->len; 458 459 return NETDEV_TX_OK; 460 } 461 462 static void xgene_enet_skip_csum(struct sk_buff *skb) 463 { 464 struct iphdr *iph = ip_hdr(skb); 465 466 if (!ip_is_fragment(iph) || 467 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { 468 skb->ip_summed = CHECKSUM_UNNECESSARY; 469 } 470 } 471 472 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 473 struct xgene_enet_raw_desc *raw_desc) 474 { 475 struct net_device *ndev; 476 struct xgene_enet_pdata *pdata; 477 struct device *dev; 478 struct xgene_enet_desc_ring *buf_pool; 479 u32 datalen, skb_index; 480 struct sk_buff *skb; 481 u8 status; 482 int ret = 0; 483 484 ndev = rx_ring->ndev; 485 pdata = netdev_priv(ndev); 486 dev = ndev_to_dev(rx_ring->ndev); 487 buf_pool = rx_ring->buf_pool; 488 489 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 490 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); 491 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 492 skb = buf_pool->rx_skb[skb_index]; 493 494 /* checking for error */ 495 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 496 if (unlikely(status > 2)) { 497 dev_kfree_skb_any(skb); 498 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 499 status); 500 pdata->stats.rx_dropped++; 501 ret = -EIO; 502 goto out; 503 } 504 505 /* strip off CRC as HW isn't doing this */ 506 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); 507 datalen = (datalen & DATALEN_MASK) - 4; 508 prefetch(skb->data - NET_IP_ALIGN); 509 skb_put(skb, datalen); 510 511 skb_checksum_none_assert(skb); 512 skb->protocol = eth_type_trans(skb, ndev); 513 if (likely((ndev->features & NETIF_F_IP_CSUM) && 514 skb->protocol == htons(ETH_P_IP))) { 515 xgene_enet_skip_csum(skb); 516 } 517 518 pdata->stats.rx_packets++; 519 pdata->stats.rx_bytes += datalen; 520 napi_gro_receive(&rx_ring->napi, skb); 521 out: 522 if (--rx_ring->nbufpool == 0) { 523 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); 524 rx_ring->nbufpool = NUM_BUFPOOL; 525 } 526 527 return ret; 528 } 529 530 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) 531 { 532 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; 533 } 534 535 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, 536 int budget) 537 { 538 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 540 u16 head = ring->head; 541 u16 slots = ring->slots - 1; 542 int ret, count = 0, processed = 0; 543 544 do { 545 raw_desc = &ring->raw_desc[head]; 546 exp_desc = NULL; 547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 548 break; 549 550 /* read fpqnum field after dataaddr field */ 551 dma_rmb(); 552 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { 553 head = (head + 1) & slots; 554 exp_desc = &ring->raw_desc[head]; 555 556 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { 557 head = (head - 1) & slots; 558 break; 559 } 560 dma_rmb(); 561 count++; 562 } 563 if (is_rx_desc(raw_desc)) 564 ret = xgene_enet_rx_frame(ring, raw_desc); 565 else 566 ret = xgene_enet_tx_completion(ring, raw_desc); 567 xgene_enet_mark_desc_slot_empty(raw_desc); 568 if (exp_desc) 569 xgene_enet_mark_desc_slot_empty(exp_desc); 570 571 head = (head + 1) & slots; 572 count++; 573 processed++; 574 575 if (ret) 576 break; 577 } while (--budget); 578 579 if (likely(count)) { 580 pdata->ring_ops->wr_cmd(ring, -count); 581 ring->head = head; 582 583 if (netif_queue_stopped(ring->ndev)) { 584 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 585 netif_wake_queue(ring->ndev); 586 } 587 } 588 589 return processed; 590 } 591 592 static int xgene_enet_napi(struct napi_struct *napi, const int budget) 593 { 594 struct xgene_enet_desc_ring *ring; 595 int processed; 596 597 ring = container_of(napi, struct xgene_enet_desc_ring, napi); 598 processed = xgene_enet_process_ring(ring, budget); 599 600 if (processed != budget) { 601 napi_complete(napi); 602 enable_irq(ring->irq); 603 } 604 605 return processed; 606 } 607 608 static void xgene_enet_timeout(struct net_device *ndev) 609 { 610 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 611 612 pdata->mac_ops->reset(pdata); 613 } 614 615 static int xgene_enet_register_irq(struct net_device *ndev) 616 { 617 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 618 struct device *dev = ndev_to_dev(ndev); 619 struct xgene_enet_desc_ring *ring; 620 int ret; 621 622 ring = pdata->rx_ring; 623 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 624 IRQF_SHARED, ring->irq_name, ring); 625 if (ret) 626 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); 627 628 if (pdata->cq_cnt) { 629 ring = pdata->tx_ring->cp_ring; 630 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 631 IRQF_SHARED, ring->irq_name, ring); 632 if (ret) { 633 netdev_err(ndev, "Failed to request irq %s\n", 634 ring->irq_name); 635 } 636 } 637 638 return ret; 639 } 640 641 static void xgene_enet_free_irq(struct net_device *ndev) 642 { 643 struct xgene_enet_pdata *pdata; 644 struct device *dev; 645 646 pdata = netdev_priv(ndev); 647 dev = ndev_to_dev(ndev); 648 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); 649 650 if (pdata->cq_cnt) { 651 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, 652 pdata->tx_ring->cp_ring); 653 } 654 } 655 656 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) 657 { 658 struct napi_struct *napi; 659 660 napi = &pdata->rx_ring->napi; 661 napi_enable(napi); 662 663 if (pdata->cq_cnt) { 664 napi = &pdata->tx_ring->cp_ring->napi; 665 napi_enable(napi); 666 } 667 } 668 669 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) 670 { 671 struct napi_struct *napi; 672 673 napi = &pdata->rx_ring->napi; 674 napi_disable(napi); 675 676 if (pdata->cq_cnt) { 677 napi = &pdata->tx_ring->cp_ring->napi; 678 napi_disable(napi); 679 } 680 } 681 682 static int xgene_enet_open(struct net_device *ndev) 683 { 684 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 685 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 686 int ret; 687 688 mac_ops->tx_enable(pdata); 689 mac_ops->rx_enable(pdata); 690 691 ret = xgene_enet_register_irq(ndev); 692 if (ret) 693 return ret; 694 xgene_enet_napi_enable(pdata); 695 696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 697 phy_start(pdata->phy_dev); 698 else 699 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 700 701 netif_start_queue(ndev); 702 703 return ret; 704 } 705 706 static int xgene_enet_close(struct net_device *ndev) 707 { 708 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 709 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 710 711 netif_stop_queue(ndev); 712 713 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 714 phy_stop(pdata->phy_dev); 715 else 716 cancel_delayed_work_sync(&pdata->link_work); 717 718 xgene_enet_napi_disable(pdata); 719 xgene_enet_free_irq(ndev); 720 xgene_enet_process_ring(pdata->rx_ring, -1); 721 722 mac_ops->tx_disable(pdata); 723 mac_ops->rx_disable(pdata); 724 725 return 0; 726 } 727 728 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) 729 { 730 struct xgene_enet_pdata *pdata; 731 struct device *dev; 732 733 pdata = netdev_priv(ring->ndev); 734 dev = ndev_to_dev(ring->ndev); 735 736 pdata->ring_ops->clear(ring); 737 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 738 } 739 740 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) 741 { 742 struct xgene_enet_desc_ring *buf_pool; 743 744 if (pdata->tx_ring) { 745 xgene_enet_delete_ring(pdata->tx_ring); 746 pdata->tx_ring = NULL; 747 } 748 749 if (pdata->rx_ring) { 750 buf_pool = pdata->rx_ring->buf_pool; 751 xgene_enet_delete_bufpool(buf_pool); 752 xgene_enet_delete_ring(buf_pool); 753 xgene_enet_delete_ring(pdata->rx_ring); 754 pdata->rx_ring = NULL; 755 } 756 } 757 758 static int xgene_enet_get_ring_size(struct device *dev, 759 enum xgene_enet_ring_cfgsize cfgsize) 760 { 761 int size = -EINVAL; 762 763 switch (cfgsize) { 764 case RING_CFGSIZE_512B: 765 size = 0x200; 766 break; 767 case RING_CFGSIZE_2KB: 768 size = 0x800; 769 break; 770 case RING_CFGSIZE_16KB: 771 size = 0x4000; 772 break; 773 case RING_CFGSIZE_64KB: 774 size = 0x10000; 775 break; 776 case RING_CFGSIZE_512KB: 777 size = 0x80000; 778 break; 779 default: 780 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); 781 break; 782 } 783 784 return size; 785 } 786 787 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) 788 { 789 struct xgene_enet_pdata *pdata; 790 struct device *dev; 791 792 if (!ring) 793 return; 794 795 dev = ndev_to_dev(ring->ndev); 796 pdata = netdev_priv(ring->ndev); 797 798 if (ring->desc_addr) { 799 pdata->ring_ops->clear(ring); 800 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 801 } 802 devm_kfree(dev, ring); 803 } 804 805 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) 806 { 807 struct device *dev = &pdata->pdev->dev; 808 struct xgene_enet_desc_ring *ring; 809 810 ring = pdata->tx_ring; 811 if (ring) { 812 if (ring->cp_ring && ring->cp_ring->cp_skb) 813 devm_kfree(dev, ring->cp_ring->cp_skb); 814 if (ring->cp_ring && pdata->cq_cnt) 815 xgene_enet_free_desc_ring(ring->cp_ring); 816 xgene_enet_free_desc_ring(ring); 817 } 818 819 ring = pdata->rx_ring; 820 if (ring) { 821 if (ring->buf_pool) { 822 if (ring->buf_pool->rx_skb) 823 devm_kfree(dev, ring->buf_pool->rx_skb); 824 xgene_enet_free_desc_ring(ring->buf_pool); 825 } 826 xgene_enet_free_desc_ring(ring); 827 } 828 } 829 830 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, 831 struct xgene_enet_desc_ring *ring) 832 { 833 if ((pdata->enet_id == XGENE_ENET2) && 834 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) { 835 return true; 836 } 837 838 return false; 839 } 840 841 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, 842 struct xgene_enet_desc_ring *ring) 843 { 844 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; 845 846 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); 847 } 848 849 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 850 struct net_device *ndev, u32 ring_num, 851 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) 852 { 853 struct xgene_enet_desc_ring *ring; 854 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 855 struct device *dev = ndev_to_dev(ndev); 856 int size; 857 858 size = xgene_enet_get_ring_size(dev, cfgsize); 859 if (size < 0) 860 return NULL; 861 862 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), 863 GFP_KERNEL); 864 if (!ring) 865 return NULL; 866 867 ring->ndev = ndev; 868 ring->num = ring_num; 869 ring->cfgsize = cfgsize; 870 ring->id = ring_id; 871 872 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, 873 GFP_KERNEL); 874 if (!ring->desc_addr) { 875 devm_kfree(dev, ring); 876 return NULL; 877 } 878 ring->size = size; 879 880 if (is_irq_mbox_required(pdata, ring)) { 881 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE, 882 &ring->irq_mbox_dma, GFP_KERNEL); 883 if (!ring->irq_mbox_addr) { 884 dma_free_coherent(dev, size, ring->desc_addr, 885 ring->dma); 886 devm_kfree(dev, ring); 887 return NULL; 888 } 889 } 890 891 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); 892 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; 893 ring = pdata->ring_ops->setup(ring); 894 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", 895 ring->num, ring->size, ring->id, ring->slots); 896 897 return ring; 898 } 899 900 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) 901 { 902 return (owner << 6) | (bufnum & GENMASK(5, 0)); 903 } 904 905 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) 906 { 907 enum xgene_ring_owner owner; 908 909 if (p->enet_id == XGENE_ENET1) { 910 switch (p->phy_mode) { 911 case PHY_INTERFACE_MODE_SGMII: 912 owner = RING_OWNER_ETH0; 913 break; 914 default: 915 owner = (!p->port_id) ? RING_OWNER_ETH0 : 916 RING_OWNER_ETH1; 917 break; 918 } 919 } else { 920 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; 921 } 922 923 return owner; 924 } 925 926 static int xgene_enet_create_desc_rings(struct net_device *ndev) 927 { 928 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 929 struct device *dev = ndev_to_dev(ndev); 930 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 931 struct xgene_enet_desc_ring *buf_pool = NULL; 932 enum xgene_ring_owner owner; 933 dma_addr_t dma_exp_bufs; 934 u8 cpu_bufnum = pdata->cpu_bufnum; 935 u8 eth_bufnum = pdata->eth_bufnum; 936 u8 bp_bufnum = pdata->bp_bufnum; 937 u16 ring_num = pdata->ring_num; 938 u16 ring_id; 939 int ret, size; 940 941 /* allocate rx descriptor ring */ 942 owner = xgene_derive_ring_owner(pdata); 943 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 944 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 945 RING_CFGSIZE_16KB, ring_id); 946 if (!rx_ring) { 947 ret = -ENOMEM; 948 goto err; 949 } 950 951 /* allocate buffer pool for receiving packets */ 952 owner = xgene_derive_ring_owner(pdata); 953 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 954 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 955 RING_CFGSIZE_2KB, ring_id); 956 if (!buf_pool) { 957 ret = -ENOMEM; 958 goto err; 959 } 960 961 rx_ring->nbufpool = NUM_BUFPOOL; 962 rx_ring->buf_pool = buf_pool; 963 rx_ring->irq = pdata->rx_irq; 964 if (!pdata->cq_cnt) { 965 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", 966 ndev->name); 967 } else { 968 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); 969 } 970 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, 971 sizeof(struct sk_buff *), GFP_KERNEL); 972 if (!buf_pool->rx_skb) { 973 ret = -ENOMEM; 974 goto err; 975 } 976 977 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); 978 rx_ring->buf_pool = buf_pool; 979 pdata->rx_ring = rx_ring; 980 981 /* allocate tx descriptor ring */ 982 owner = xgene_derive_ring_owner(pdata); 983 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); 984 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 985 RING_CFGSIZE_16KB, ring_id); 986 if (!tx_ring) { 987 ret = -ENOMEM; 988 goto err; 989 } 990 991 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; 992 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, 993 GFP_KERNEL); 994 if (!tx_ring->exp_bufs) { 995 ret = -ENOMEM; 996 goto err; 997 } 998 999 pdata->tx_ring = tx_ring; 1000 1001 if (!pdata->cq_cnt) { 1002 cp_ring = pdata->rx_ring; 1003 } else { 1004 /* allocate tx completion descriptor ring */ 1005 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 1006 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1007 RING_CFGSIZE_16KB, 1008 ring_id); 1009 if (!cp_ring) { 1010 ret = -ENOMEM; 1011 goto err; 1012 } 1013 cp_ring->irq = pdata->txc_irq; 1014 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); 1015 } 1016 1017 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, 1018 sizeof(struct sk_buff *), GFP_KERNEL); 1019 if (!cp_ring->cp_skb) { 1020 ret = -ENOMEM; 1021 goto err; 1022 } 1023 1024 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; 1025 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, 1026 size, GFP_KERNEL); 1027 if (!cp_ring->frag_dma_addr) { 1028 devm_kfree(dev, cp_ring->cp_skb); 1029 ret = -ENOMEM; 1030 goto err; 1031 } 1032 1033 pdata->tx_ring->cp_ring = cp_ring; 1034 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1035 1036 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1037 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; 1038 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; 1039 1040 return 0; 1041 1042 err: 1043 xgene_enet_free_desc_rings(pdata); 1044 return ret; 1045 } 1046 1047 static struct rtnl_link_stats64 *xgene_enet_get_stats64( 1048 struct net_device *ndev, 1049 struct rtnl_link_stats64 *storage) 1050 { 1051 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1052 struct rtnl_link_stats64 *stats = &pdata->stats; 1053 1054 stats->rx_errors += stats->rx_length_errors + 1055 stats->rx_crc_errors + 1056 stats->rx_frame_errors + 1057 stats->rx_fifo_errors; 1058 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); 1059 1060 return storage; 1061 } 1062 1063 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) 1064 { 1065 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1066 int ret; 1067 1068 ret = eth_mac_addr(ndev, addr); 1069 if (ret) 1070 return ret; 1071 pdata->mac_ops->set_mac_addr(pdata); 1072 1073 return ret; 1074 } 1075 1076 static const struct net_device_ops xgene_ndev_ops = { 1077 .ndo_open = xgene_enet_open, 1078 .ndo_stop = xgene_enet_close, 1079 .ndo_start_xmit = xgene_enet_start_xmit, 1080 .ndo_tx_timeout = xgene_enet_timeout, 1081 .ndo_get_stats64 = xgene_enet_get_stats64, 1082 .ndo_change_mtu = eth_change_mtu, 1083 .ndo_set_mac_address = xgene_enet_set_mac_address, 1084 }; 1085 1086 #ifdef CONFIG_ACPI 1087 static int xgene_get_port_id_acpi(struct device *dev, 1088 struct xgene_enet_pdata *pdata) 1089 { 1090 acpi_status status; 1091 u64 temp; 1092 1093 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp); 1094 if (ACPI_FAILURE(status)) { 1095 pdata->port_id = 0; 1096 } else { 1097 pdata->port_id = temp; 1098 } 1099 1100 return 0; 1101 } 1102 #endif 1103 1104 static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) 1105 { 1106 u32 id = 0; 1107 int ret; 1108 1109 ret = of_property_read_u32(dev->of_node, "port-id", &id); 1110 if (ret) { 1111 pdata->port_id = 0; 1112 ret = 0; 1113 } else { 1114 pdata->port_id = id & BIT(0); 1115 } 1116 1117 return ret; 1118 } 1119 1120 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) 1121 { 1122 struct device *dev = &pdata->pdev->dev; 1123 int delay, ret; 1124 1125 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay); 1126 if (ret) { 1127 pdata->tx_delay = 4; 1128 return 0; 1129 } 1130 1131 if (delay < 0 || delay > 7) { 1132 dev_err(dev, "Invalid tx-delay specified\n"); 1133 return -EINVAL; 1134 } 1135 1136 pdata->tx_delay = delay; 1137 1138 return 0; 1139 } 1140 1141 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) 1142 { 1143 struct device *dev = &pdata->pdev->dev; 1144 int delay, ret; 1145 1146 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay); 1147 if (ret) { 1148 pdata->rx_delay = 2; 1149 return 0; 1150 } 1151 1152 if (delay < 0 || delay > 7) { 1153 dev_err(dev, "Invalid rx-delay specified\n"); 1154 return -EINVAL; 1155 } 1156 1157 pdata->rx_delay = delay; 1158 1159 return 0; 1160 } 1161 1162 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 1163 { 1164 struct platform_device *pdev; 1165 struct net_device *ndev; 1166 struct device *dev; 1167 struct resource *res; 1168 void __iomem *base_addr; 1169 u32 offset; 1170 int ret = 0; 1171 1172 pdev = pdata->pdev; 1173 dev = &pdev->dev; 1174 ndev = pdata->ndev; 1175 1176 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); 1177 if (!res) { 1178 dev_err(dev, "Resource enet_csr not defined\n"); 1179 return -ENODEV; 1180 } 1181 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); 1182 if (!pdata->base_addr) { 1183 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 1184 return -ENOMEM; 1185 } 1186 1187 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); 1188 if (!res) { 1189 dev_err(dev, "Resource ring_csr not defined\n"); 1190 return -ENODEV; 1191 } 1192 pdata->ring_csr_addr = devm_ioremap(dev, res->start, 1193 resource_size(res)); 1194 if (!pdata->ring_csr_addr) { 1195 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 1196 return -ENOMEM; 1197 } 1198 1199 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); 1200 if (!res) { 1201 dev_err(dev, "Resource ring_cmd not defined\n"); 1202 return -ENODEV; 1203 } 1204 pdata->ring_cmd_addr = devm_ioremap(dev, res->start, 1205 resource_size(res)); 1206 if (!pdata->ring_cmd_addr) { 1207 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 1208 return -ENOMEM; 1209 } 1210 1211 if (dev->of_node) 1212 ret = xgene_get_port_id_dt(dev, pdata); 1213 #ifdef CONFIG_ACPI 1214 else 1215 ret = xgene_get_port_id_acpi(dev, pdata); 1216 #endif 1217 if (ret) 1218 return ret; 1219 1220 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) 1221 eth_hw_addr_random(ndev); 1222 1223 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 1224 1225 pdata->phy_mode = device_get_phy_mode(dev); 1226 if (pdata->phy_mode < 0) { 1227 dev_err(dev, "Unable to get phy-connection-type\n"); 1228 return pdata->phy_mode; 1229 } 1230 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && 1231 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && 1232 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 1233 dev_err(dev, "Incorrect phy-connection-type specified\n"); 1234 return -ENODEV; 1235 } 1236 1237 ret = xgene_get_tx_delay(pdata); 1238 if (ret) 1239 return ret; 1240 1241 ret = xgene_get_rx_delay(pdata); 1242 if (ret) 1243 return ret; 1244 1245 ret = platform_get_irq(pdev, 0); 1246 if (ret <= 0) { 1247 dev_err(dev, "Unable to get ENET Rx IRQ\n"); 1248 ret = ret ? : -ENXIO; 1249 return ret; 1250 } 1251 pdata->rx_irq = ret; 1252 1253 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { 1254 ret = platform_get_irq(pdev, 1); 1255 if (ret <= 0) { 1256 pdata->cq_cnt = 0; 1257 dev_info(dev, "Unable to get Tx completion IRQ," 1258 "using Rx IRQ instead\n"); 1259 } else { 1260 pdata->cq_cnt = XGENE_MAX_TXC_RINGS; 1261 pdata->txc_irq = ret; 1262 } 1263 } 1264 1265 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1266 if (IS_ERR(pdata->clk)) { 1267 /* Firmware may have set up the clock already. */ 1268 dev_info(dev, "clocks have been setup already\n"); 1269 } 1270 1271 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1272 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); 1273 else 1274 base_addr = pdata->base_addr; 1275 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 1276 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 1277 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 1278 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || 1279 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 1280 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; 1281 offset = (pdata->enet_id == XGENE_ENET1) ? 1282 BLOCK_ETH_MAC_CSR_OFFSET : 1283 X2_BLOCK_ETH_MAC_CSR_OFFSET; 1284 pdata->mcx_mac_csr_addr = base_addr + offset; 1285 } else { 1286 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 1287 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 1288 } 1289 pdata->rx_buff_cnt = NUM_PKT_BUF; 1290 1291 return 0; 1292 } 1293 1294 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) 1295 { 1296 struct net_device *ndev = pdata->ndev; 1297 struct xgene_enet_desc_ring *buf_pool; 1298 u16 dst_ring_num; 1299 int ret; 1300 1301 ret = pdata->port_ops->reset(pdata); 1302 if (ret) 1303 return ret; 1304 1305 ret = xgene_enet_create_desc_rings(ndev); 1306 if (ret) { 1307 netdev_err(ndev, "Error in ring configuration\n"); 1308 return ret; 1309 } 1310 1311 /* setup buffer pool */ 1312 buf_pool = pdata->rx_ring->buf_pool; 1313 xgene_enet_init_bufpool(buf_pool); 1314 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); 1315 if (ret) { 1316 xgene_enet_delete_desc_rings(pdata); 1317 return ret; 1318 } 1319 1320 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); 1321 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); 1322 pdata->mac_ops->init(pdata); 1323 1324 return ret; 1325 } 1326 1327 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) 1328 { 1329 switch (pdata->phy_mode) { 1330 case PHY_INTERFACE_MODE_RGMII: 1331 pdata->mac_ops = &xgene_gmac_ops; 1332 pdata->port_ops = &xgene_gport_ops; 1333 pdata->rm = RM3; 1334 break; 1335 case PHY_INTERFACE_MODE_SGMII: 1336 pdata->mac_ops = &xgene_sgmac_ops; 1337 pdata->port_ops = &xgene_sgport_ops; 1338 pdata->rm = RM1; 1339 break; 1340 default: 1341 pdata->mac_ops = &xgene_xgmac_ops; 1342 pdata->port_ops = &xgene_xgport_ops; 1343 pdata->rm = RM0; 1344 break; 1345 } 1346 1347 if (pdata->enet_id == XGENE_ENET1) { 1348 switch (pdata->port_id) { 1349 case 0: 1350 pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1351 pdata->eth_bufnum = START_ETH_BUFNUM_0; 1352 pdata->bp_bufnum = START_BP_BUFNUM_0; 1353 pdata->ring_num = START_RING_NUM_0; 1354 break; 1355 case 1: 1356 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1357 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1; 1358 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1; 1359 pdata->bp_bufnum = XG_START_BP_BUFNUM_1; 1360 pdata->ring_num = XG_START_RING_NUM_1; 1361 } else { 1362 pdata->cpu_bufnum = START_CPU_BUFNUM_1; 1363 pdata->eth_bufnum = START_ETH_BUFNUM_1; 1364 pdata->bp_bufnum = START_BP_BUFNUM_1; 1365 pdata->ring_num = START_RING_NUM_1; 1366 } 1367 break; 1368 default: 1369 break; 1370 } 1371 pdata->ring_ops = &xgene_ring1_ops; 1372 } else { 1373 switch (pdata->port_id) { 1374 case 0: 1375 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1376 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1377 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1378 pdata->ring_num = X2_START_RING_NUM_0; 1379 break; 1380 case 1: 1381 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; 1382 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; 1383 pdata->bp_bufnum = X2_START_BP_BUFNUM_1; 1384 pdata->ring_num = X2_START_RING_NUM_1; 1385 break; 1386 default: 1387 break; 1388 } 1389 pdata->rm = RM0; 1390 pdata->ring_ops = &xgene_ring2_ops; 1391 } 1392 } 1393 1394 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) 1395 { 1396 struct napi_struct *napi; 1397 1398 napi = &pdata->rx_ring->napi; 1399 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); 1400 1401 if (pdata->cq_cnt) { 1402 napi = &pdata->tx_ring->cp_ring->napi; 1403 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1404 NAPI_POLL_WEIGHT); 1405 } 1406 } 1407 1408 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) 1409 { 1410 struct napi_struct *napi; 1411 1412 napi = &pdata->rx_ring->napi; 1413 netif_napi_del(napi); 1414 1415 if (pdata->cq_cnt) { 1416 napi = &pdata->tx_ring->cp_ring->napi; 1417 netif_napi_del(napi); 1418 } 1419 } 1420 1421 static int xgene_enet_probe(struct platform_device *pdev) 1422 { 1423 struct net_device *ndev; 1424 struct xgene_enet_pdata *pdata; 1425 struct device *dev = &pdev->dev; 1426 struct xgene_mac_ops *mac_ops; 1427 const struct of_device_id *of_id; 1428 int ret; 1429 1430 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); 1431 if (!ndev) 1432 return -ENOMEM; 1433 1434 pdata = netdev_priv(ndev); 1435 1436 pdata->pdev = pdev; 1437 pdata->ndev = ndev; 1438 SET_NETDEV_DEV(ndev, dev); 1439 platform_set_drvdata(pdev, pdata); 1440 ndev->netdev_ops = &xgene_ndev_ops; 1441 xgene_enet_set_ethtool_ops(ndev); 1442 ndev->features |= NETIF_F_IP_CSUM | 1443 NETIF_F_GSO | 1444 NETIF_F_GRO | 1445 NETIF_F_SG; 1446 1447 of_id = of_match_device(xgene_enet_of_match, &pdev->dev); 1448 if (of_id) { 1449 pdata->enet_id = (enum xgene_enet_id)of_id->data; 1450 } 1451 #ifdef CONFIG_ACPI 1452 else { 1453 const struct acpi_device_id *acpi_id; 1454 1455 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); 1456 if (acpi_id) 1457 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; 1458 } 1459 #endif 1460 if (!pdata->enet_id) { 1461 free_netdev(ndev); 1462 return -ENODEV; 1463 } 1464 1465 ret = xgene_enet_get_resources(pdata); 1466 if (ret) 1467 goto err; 1468 1469 xgene_enet_setup_ops(pdata); 1470 1471 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1472 ndev->features |= NETIF_F_TSO; 1473 pdata->mss = XGENE_ENET_MSS; 1474 } 1475 ndev->hw_features = ndev->features; 1476 1477 ret = register_netdev(ndev); 1478 if (ret) { 1479 netdev_err(ndev, "Failed to register netdev\n"); 1480 goto err; 1481 } 1482 1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1484 if (ret) { 1485 netdev_err(ndev, "No usable DMA configuration\n"); 1486 goto err; 1487 } 1488 1489 ret = xgene_enet_init_hw(pdata); 1490 if (ret) 1491 goto err; 1492 1493 xgene_enet_napi_add(pdata); 1494 mac_ops = pdata->mac_ops; 1495 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1496 ret = xgene_enet_mdio_config(pdata); 1497 else 1498 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1499 1500 return ret; 1501 err: 1502 unregister_netdev(ndev); 1503 free_netdev(ndev); 1504 return ret; 1505 } 1506 1507 static int xgene_enet_remove(struct platform_device *pdev) 1508 { 1509 struct xgene_enet_pdata *pdata; 1510 struct xgene_mac_ops *mac_ops; 1511 struct net_device *ndev; 1512 1513 pdata = platform_get_drvdata(pdev); 1514 mac_ops = pdata->mac_ops; 1515 ndev = pdata->ndev; 1516 1517 mac_ops->rx_disable(pdata); 1518 mac_ops->tx_disable(pdata); 1519 1520 xgene_enet_napi_del(pdata); 1521 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1522 xgene_enet_mdio_remove(pdata); 1523 unregister_netdev(ndev); 1524 xgene_enet_delete_desc_rings(pdata); 1525 pdata->port_ops->shutdown(pdata); 1526 free_netdev(ndev); 1527 1528 return 0; 1529 } 1530 1531 #ifdef CONFIG_ACPI 1532 static const struct acpi_device_id xgene_enet_acpi_match[] = { 1533 { "APMC0D05", XGENE_ENET1}, 1534 { "APMC0D30", XGENE_ENET1}, 1535 { "APMC0D31", XGENE_ENET1}, 1536 { "APMC0D3F", XGENE_ENET1}, 1537 { "APMC0D26", XGENE_ENET2}, 1538 { "APMC0D25", XGENE_ENET2}, 1539 { } 1540 }; 1541 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1542 #endif 1543 1544 #ifdef CONFIG_OF 1545 static const struct of_device_id xgene_enet_of_match[] = { 1546 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, 1547 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, 1548 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, 1549 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, 1550 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, 1551 {}, 1552 }; 1553 1554 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 1555 #endif 1556 1557 static struct platform_driver xgene_enet_driver = { 1558 .driver = { 1559 .name = "xgene-enet", 1560 .of_match_table = of_match_ptr(xgene_enet_of_match), 1561 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), 1562 }, 1563 .probe = xgene_enet_probe, 1564 .remove = xgene_enet_remove, 1565 }; 1566 1567 module_platform_driver(xgene_enet_driver); 1568 1569 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); 1570 MODULE_VERSION(XGENE_DRV_VERSION); 1571 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); 1572 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); 1573 MODULE_LICENSE("GPL"); 1574