1 /* 2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> 3 * 4 * This file is free software: you may copy, redistribute and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation, either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This file is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * 17 * This file incorporates work covered by the following copyright and 18 * permission notice: 19 * 20 * Copyright (c) 2012 Qualcomm Atheros, Inc. 21 * 22 * Permission to use, copy, modify, and/or distribute this software for any 23 * purpose with or without fee is hereby granted, provided that the above 24 * copyright notice and this permission notice appear in all copies. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/pci.h> 37 #include <linux/interrupt.h> 38 #include <linux/ip.h> 39 #include <linux/ipv6.h> 40 #include <linux/if_vlan.h> 41 #include <linux/mdio.h> 42 #include <linux/aer.h> 43 #include <linux/bitops.h> 44 #include <linux/netdevice.h> 45 #include <linux/etherdevice.h> 46 #include <net/ip6_checksum.h> 47 #include <linux/crc32.h> 48 #include "alx.h" 49 #include "hw.h" 50 #include "reg.h" 51 52 const char alx_drv_name[] = "alx"; 53 54 static bool msix = false; 55 module_param(msix, bool, 0); 56 MODULE_PARM_DESC(msix, "Enable msi-x interrupt support"); 57 58 static void alx_free_txbuf(struct alx_priv *alx, int entry) 59 { 60 struct alx_buffer *txb = &alx->txq.bufs[entry]; 61 62 if (dma_unmap_len(txb, size)) { 63 dma_unmap_single(&alx->hw.pdev->dev, 64 dma_unmap_addr(txb, dma), 65 dma_unmap_len(txb, size), 66 DMA_TO_DEVICE); 67 dma_unmap_len_set(txb, size, 0); 68 } 69 70 if (txb->skb) { 71 dev_kfree_skb_any(txb->skb); 72 txb->skb = NULL; 73 } 74 } 75 76 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 77 { 78 struct alx_rx_queue *rxq = &alx->rxq; 79 struct sk_buff *skb; 80 struct alx_buffer *cur_buf; 81 dma_addr_t dma; 82 u16 cur, next, count = 0; 83 84 next = cur = rxq->write_idx; 85 if (++next == alx->rx_ringsz) 86 next = 0; 87 cur_buf = &rxq->bufs[cur]; 88 89 while (!cur_buf->skb && next != rxq->read_idx) { 90 struct alx_rfd *rfd = &rxq->rfd[cur]; 91 92 /* 93 * When DMA RX address is set to something like 94 * 0x....fc0, it will be very likely to cause DMA 95 * RFD overflow issue. 96 * 97 * To work around it, we apply rx skb with 64 bytes 98 * longer space, and offset the address whenever 99 * 0x....fc0 is detected. 100 */ 101 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); 102 if (!skb) 103 break; 104 105 if (((unsigned long)skb->data & 0xfff) == 0xfc0) 106 skb_reserve(skb, 64); 107 108 dma = dma_map_single(&alx->hw.pdev->dev, 109 skb->data, alx->rxbuf_size, 110 DMA_FROM_DEVICE); 111 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { 112 dev_kfree_skb(skb); 113 break; 114 } 115 116 /* Unfortunately, RX descriptor buffers must be 4-byte 117 * aligned, so we can't use IP alignment. 118 */ 119 if (WARN_ON(dma & 3)) { 120 dev_kfree_skb(skb); 121 break; 122 } 123 124 cur_buf->skb = skb; 125 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); 126 dma_unmap_addr_set(cur_buf, dma, dma); 127 rfd->addr = cpu_to_le64(dma); 128 129 cur = next; 130 if (++next == alx->rx_ringsz) 131 next = 0; 132 cur_buf = &rxq->bufs[cur]; 133 count++; 134 } 135 136 if (count) { 137 /* flush all updates before updating hardware */ 138 wmb(); 139 rxq->write_idx = cur; 140 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); 141 } 142 143 return count; 144 } 145 146 static inline int alx_tpd_avail(struct alx_priv *alx) 147 { 148 struct alx_tx_queue *txq = &alx->txq; 149 150 if (txq->write_idx >= txq->read_idx) 151 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; 152 return txq->read_idx - txq->write_idx - 1; 153 } 154 155 static bool alx_clean_tx_irq(struct alx_priv *alx) 156 { 157 struct alx_tx_queue *txq = &alx->txq; 158 u16 hw_read_idx, sw_read_idx; 159 unsigned int total_bytes = 0, total_packets = 0; 160 int budget = ALX_DEFAULT_TX_WORK; 161 162 sw_read_idx = txq->read_idx; 163 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); 164 165 if (sw_read_idx != hw_read_idx) { 166 while (sw_read_idx != hw_read_idx && budget > 0) { 167 struct sk_buff *skb; 168 169 skb = txq->bufs[sw_read_idx].skb; 170 if (skb) { 171 total_bytes += skb->len; 172 total_packets++; 173 budget--; 174 } 175 176 alx_free_txbuf(alx, sw_read_idx); 177 178 if (++sw_read_idx == alx->tx_ringsz) 179 sw_read_idx = 0; 180 } 181 txq->read_idx = sw_read_idx; 182 183 netdev_completed_queue(alx->dev, total_packets, total_bytes); 184 } 185 186 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && 187 alx_tpd_avail(alx) > alx->tx_ringsz/4) 188 netif_wake_queue(alx->dev); 189 190 return sw_read_idx == hw_read_idx; 191 } 192 193 static void alx_schedule_link_check(struct alx_priv *alx) 194 { 195 schedule_work(&alx->link_check_wk); 196 } 197 198 static void alx_schedule_reset(struct alx_priv *alx) 199 { 200 schedule_work(&alx->reset_wk); 201 } 202 203 static int alx_clean_rx_irq(struct alx_priv *alx, int budget) 204 { 205 struct alx_rx_queue *rxq = &alx->rxq; 206 struct alx_rrd *rrd; 207 struct alx_buffer *rxb; 208 struct sk_buff *skb; 209 u16 length, rfd_cleaned = 0; 210 int work = 0; 211 212 while (work < budget) { 213 rrd = &rxq->rrd[rxq->rrd_read_idx]; 214 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 215 break; 216 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); 217 218 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), 219 RRD_SI) != rxq->read_idx || 220 ALX_GET_FIELD(le32_to_cpu(rrd->word0), 221 RRD_NOR) != 1) { 222 alx_schedule_reset(alx); 223 return work; 224 } 225 226 rxb = &rxq->bufs[rxq->read_idx]; 227 dma_unmap_single(&alx->hw.pdev->dev, 228 dma_unmap_addr(rxb, dma), 229 dma_unmap_len(rxb, size), 230 DMA_FROM_DEVICE); 231 dma_unmap_len_set(rxb, size, 0); 232 skb = rxb->skb; 233 rxb->skb = NULL; 234 235 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || 236 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { 237 rrd->word3 = 0; 238 dev_kfree_skb_any(skb); 239 goto next_pkt; 240 } 241 242 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), 243 RRD_PKTLEN) - ETH_FCS_LEN; 244 skb_put(skb, length); 245 skb->protocol = eth_type_trans(skb, alx->dev); 246 247 skb_checksum_none_assert(skb); 248 if (alx->dev->features & NETIF_F_RXCSUM && 249 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | 250 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { 251 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), 252 RRD_PID)) { 253 case RRD_PID_IPV6UDP: 254 case RRD_PID_IPV4UDP: 255 case RRD_PID_IPV4TCP: 256 case RRD_PID_IPV6TCP: 257 skb->ip_summed = CHECKSUM_UNNECESSARY; 258 break; 259 } 260 } 261 262 napi_gro_receive(&alx->napi, skb); 263 work++; 264 265 next_pkt: 266 if (++rxq->read_idx == alx->rx_ringsz) 267 rxq->read_idx = 0; 268 if (++rxq->rrd_read_idx == alx->rx_ringsz) 269 rxq->rrd_read_idx = 0; 270 271 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) 272 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); 273 } 274 275 if (rfd_cleaned) 276 alx_refill_rx_ring(alx, GFP_ATOMIC); 277 278 return work; 279 } 280 281 static int alx_poll(struct napi_struct *napi, int budget) 282 { 283 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 284 struct alx_hw *hw = &alx->hw; 285 unsigned long flags; 286 bool tx_complete; 287 int work; 288 289 tx_complete = alx_clean_tx_irq(alx); 290 work = alx_clean_rx_irq(alx, budget); 291 292 if (!tx_complete || work == budget) 293 return budget; 294 295 napi_complete(&alx->napi); 296 297 /* enable interrupt */ 298 if (alx->flags & ALX_FLAG_USING_MSIX) { 299 alx_mask_msix(hw, 1, false); 300 } else { 301 spin_lock_irqsave(&alx->irq_lock, flags); 302 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 303 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 304 spin_unlock_irqrestore(&alx->irq_lock, flags); 305 } 306 307 alx_post_write(hw); 308 309 return work; 310 } 311 312 static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr) 313 { 314 struct alx_hw *hw = &alx->hw; 315 316 if (intr & ALX_ISR_FATAL) { 317 netif_warn(alx, hw, alx->dev, 318 "fatal interrupt 0x%x, resetting\n", intr); 319 alx_schedule_reset(alx); 320 return true; 321 } 322 323 if (intr & ALX_ISR_ALERT) 324 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); 325 326 if (intr & ALX_ISR_PHY) { 327 /* suppress PHY interrupt, because the source 328 * is from PHY internal. only the internal status 329 * is cleared, the interrupt status could be cleared. 330 */ 331 alx->int_mask &= ~ALX_ISR_PHY; 332 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 333 alx_schedule_link_check(alx); 334 } 335 336 return false; 337 } 338 339 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) 340 { 341 struct alx_hw *hw = &alx->hw; 342 343 spin_lock(&alx->irq_lock); 344 345 /* ACK interrupt */ 346 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); 347 intr &= alx->int_mask; 348 349 if (alx_intr_handle_misc(alx, intr)) 350 goto out; 351 352 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { 353 napi_schedule(&alx->napi); 354 /* mask rx/tx interrupt, enable them when napi complete */ 355 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 356 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 357 } 358 359 alx_write_mem32(hw, ALX_ISR, 0); 360 361 out: 362 spin_unlock(&alx->irq_lock); 363 return IRQ_HANDLED; 364 } 365 366 static irqreturn_t alx_intr_msix_ring(int irq, void *data) 367 { 368 struct alx_priv *alx = data; 369 struct alx_hw *hw = &alx->hw; 370 371 /* mask interrupt to ACK chip */ 372 alx_mask_msix(hw, 1, true); 373 /* clear interrupt status */ 374 alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)); 375 376 napi_schedule(&alx->napi); 377 378 return IRQ_HANDLED; 379 } 380 381 static irqreturn_t alx_intr_msix_misc(int irq, void *data) 382 { 383 struct alx_priv *alx = data; 384 struct alx_hw *hw = &alx->hw; 385 u32 intr; 386 387 /* mask interrupt to ACK chip */ 388 alx_mask_msix(hw, 0, true); 389 390 /* read interrupt status */ 391 intr = alx_read_mem32(hw, ALX_ISR); 392 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES); 393 394 if (alx_intr_handle_misc(alx, intr)) 395 return IRQ_HANDLED; 396 397 /* clear interrupt status */ 398 alx_write_mem32(hw, ALX_ISR, intr); 399 400 /* enable interrupt again */ 401 alx_mask_msix(hw, 0, false); 402 403 return IRQ_HANDLED; 404 } 405 406 static irqreturn_t alx_intr_msi(int irq, void *data) 407 { 408 struct alx_priv *alx = data; 409 410 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); 411 } 412 413 static irqreturn_t alx_intr_legacy(int irq, void *data) 414 { 415 struct alx_priv *alx = data; 416 struct alx_hw *hw = &alx->hw; 417 u32 intr; 418 419 intr = alx_read_mem32(hw, ALX_ISR); 420 421 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) 422 return IRQ_NONE; 423 424 return alx_intr_handle(alx, intr); 425 } 426 427 static void alx_init_ring_ptrs(struct alx_priv *alx) 428 { 429 struct alx_hw *hw = &alx->hw; 430 u32 addr_hi = ((u64)alx->descmem.dma) >> 32; 431 432 alx->rxq.read_idx = 0; 433 alx->rxq.write_idx = 0; 434 alx->rxq.rrd_read_idx = 0; 435 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); 436 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); 437 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); 438 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); 439 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); 440 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); 441 442 alx->txq.read_idx = 0; 443 alx->txq.write_idx = 0; 444 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); 445 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); 446 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); 447 448 /* load these pointers into the chip */ 449 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); 450 } 451 452 static void alx_free_txring_buf(struct alx_priv *alx) 453 { 454 struct alx_tx_queue *txq = &alx->txq; 455 int i; 456 457 if (!txq->bufs) 458 return; 459 460 for (i = 0; i < alx->tx_ringsz; i++) 461 alx_free_txbuf(alx, i); 462 463 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); 464 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); 465 txq->write_idx = 0; 466 txq->read_idx = 0; 467 468 netdev_reset_queue(alx->dev); 469 } 470 471 static void alx_free_rxring_buf(struct alx_priv *alx) 472 { 473 struct alx_rx_queue *rxq = &alx->rxq; 474 struct alx_buffer *cur_buf; 475 u16 i; 476 477 if (rxq == NULL) 478 return; 479 480 for (i = 0; i < alx->rx_ringsz; i++) { 481 cur_buf = rxq->bufs + i; 482 if (cur_buf->skb) { 483 dma_unmap_single(&alx->hw.pdev->dev, 484 dma_unmap_addr(cur_buf, dma), 485 dma_unmap_len(cur_buf, size), 486 DMA_FROM_DEVICE); 487 dev_kfree_skb(cur_buf->skb); 488 cur_buf->skb = NULL; 489 dma_unmap_len_set(cur_buf, size, 0); 490 dma_unmap_addr_set(cur_buf, dma, 0); 491 } 492 } 493 494 rxq->write_idx = 0; 495 rxq->read_idx = 0; 496 rxq->rrd_read_idx = 0; 497 } 498 499 static void alx_free_buffers(struct alx_priv *alx) 500 { 501 alx_free_txring_buf(alx); 502 alx_free_rxring_buf(alx); 503 } 504 505 static int alx_reinit_rings(struct alx_priv *alx) 506 { 507 alx_free_buffers(alx); 508 509 alx_init_ring_ptrs(alx); 510 511 if (!alx_refill_rx_ring(alx, GFP_KERNEL)) 512 return -ENOMEM; 513 514 return 0; 515 } 516 517 static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) 518 { 519 u32 crc32, bit, reg; 520 521 crc32 = ether_crc(ETH_ALEN, addr); 522 reg = (crc32 >> 31) & 0x1; 523 bit = (crc32 >> 26) & 0x1F; 524 525 mc_hash[reg] |= BIT(bit); 526 } 527 528 static void __alx_set_rx_mode(struct net_device *netdev) 529 { 530 struct alx_priv *alx = netdev_priv(netdev); 531 struct alx_hw *hw = &alx->hw; 532 struct netdev_hw_addr *ha; 533 u32 mc_hash[2] = {}; 534 535 if (!(netdev->flags & IFF_ALLMULTI)) { 536 netdev_for_each_mc_addr(ha, netdev) 537 alx_add_mc_addr(hw, ha->addr, mc_hash); 538 539 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); 540 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); 541 } 542 543 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); 544 if (netdev->flags & IFF_PROMISC) 545 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; 546 if (netdev->flags & IFF_ALLMULTI) 547 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; 548 549 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); 550 } 551 552 static void alx_set_rx_mode(struct net_device *netdev) 553 { 554 __alx_set_rx_mode(netdev); 555 } 556 557 static int alx_set_mac_address(struct net_device *netdev, void *data) 558 { 559 struct alx_priv *alx = netdev_priv(netdev); 560 struct alx_hw *hw = &alx->hw; 561 struct sockaddr *addr = data; 562 563 if (!is_valid_ether_addr(addr->sa_data)) 564 return -EADDRNOTAVAIL; 565 566 if (netdev->addr_assign_type & NET_ADDR_RANDOM) 567 netdev->addr_assign_type ^= NET_ADDR_RANDOM; 568 569 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 570 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 571 alx_set_macaddr(hw, hw->mac_addr); 572 573 return 0; 574 } 575 576 static int alx_alloc_descriptors(struct alx_priv *alx) 577 { 578 alx->txq.bufs = kcalloc(alx->tx_ringsz, 579 sizeof(struct alx_buffer), 580 GFP_KERNEL); 581 if (!alx->txq.bufs) 582 return -ENOMEM; 583 584 alx->rxq.bufs = kcalloc(alx->rx_ringsz, 585 sizeof(struct alx_buffer), 586 GFP_KERNEL); 587 if (!alx->rxq.bufs) 588 goto out_free; 589 590 /* physical tx/rx ring descriptors 591 * 592 * Allocate them as a single chunk because they must not cross a 593 * 4G boundary (hardware has a single register for high 32 bits 594 * of addresses only) 595 */ 596 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + 597 sizeof(struct alx_rrd) * alx->rx_ringsz + 598 sizeof(struct alx_rfd) * alx->rx_ringsz; 599 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 600 alx->descmem.size, 601 &alx->descmem.dma, 602 GFP_KERNEL); 603 if (!alx->descmem.virt) 604 goto out_free; 605 606 alx->txq.tpd = alx->descmem.virt; 607 alx->txq.tpd_dma = alx->descmem.dma; 608 609 /* alignment requirement for next block */ 610 BUILD_BUG_ON(sizeof(struct alx_txd) % 8); 611 612 alx->rxq.rrd = 613 (void *)((u8 *)alx->descmem.virt + 614 sizeof(struct alx_txd) * alx->tx_ringsz); 615 alx->rxq.rrd_dma = alx->descmem.dma + 616 sizeof(struct alx_txd) * alx->tx_ringsz; 617 618 /* alignment requirement for next block */ 619 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); 620 621 alx->rxq.rfd = 622 (void *)((u8 *)alx->descmem.virt + 623 sizeof(struct alx_txd) * alx->tx_ringsz + 624 sizeof(struct alx_rrd) * alx->rx_ringsz); 625 alx->rxq.rfd_dma = alx->descmem.dma + 626 sizeof(struct alx_txd) * alx->tx_ringsz + 627 sizeof(struct alx_rrd) * alx->rx_ringsz; 628 629 return 0; 630 out_free: 631 kfree(alx->txq.bufs); 632 kfree(alx->rxq.bufs); 633 return -ENOMEM; 634 } 635 636 static int alx_alloc_rings(struct alx_priv *alx) 637 { 638 int err; 639 640 err = alx_alloc_descriptors(alx); 641 if (err) 642 return err; 643 644 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 645 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 646 647 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); 648 649 alx_reinit_rings(alx); 650 return 0; 651 } 652 653 static void alx_free_rings(struct alx_priv *alx) 654 { 655 netif_napi_del(&alx->napi); 656 alx_free_buffers(alx); 657 658 kfree(alx->txq.bufs); 659 kfree(alx->rxq.bufs); 660 661 dma_free_coherent(&alx->hw.pdev->dev, 662 alx->descmem.size, 663 alx->descmem.virt, 664 alx->descmem.dma); 665 } 666 667 static void alx_config_vector_mapping(struct alx_priv *alx) 668 { 669 struct alx_hw *hw = &alx->hw; 670 u32 tbl = 0; 671 672 if (alx->flags & ALX_FLAG_USING_MSIX) { 673 tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT; 674 tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT; 675 } 676 677 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl); 678 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); 679 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); 680 } 681 682 static bool alx_enable_msix(struct alx_priv *alx) 683 { 684 int i, err, num_vec = 2; 685 686 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry), 687 GFP_KERNEL); 688 if (!alx->msix_entries) { 689 netdev_warn(alx->dev, "Allocation of msix entries failed!\n"); 690 return false; 691 } 692 693 for (i = 0; i < num_vec; i++) 694 alx->msix_entries[i].entry = i; 695 696 err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec); 697 if (err) { 698 kfree(alx->msix_entries); 699 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n"); 700 return false; 701 } 702 703 alx->num_vec = num_vec; 704 return true; 705 } 706 707 static int alx_request_msix(struct alx_priv *alx) 708 { 709 struct net_device *netdev = alx->dev; 710 int i, err, vector = 0, free_vector = 0; 711 712 err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc, 713 0, netdev->name, alx); 714 if (err) 715 goto out_err; 716 717 vector++; 718 sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name); 719 720 err = request_irq(alx->msix_entries[vector].vector, 721 alx_intr_msix_ring, 0, alx->irq_lbl, alx); 722 if (err) 723 goto out_free; 724 725 return 0; 726 727 out_free: 728 free_irq(alx->msix_entries[free_vector++].vector, alx); 729 730 vector--; 731 for (i = 0; i < vector; i++) 732 free_irq(alx->msix_entries[free_vector++].vector, alx); 733 734 out_err: 735 return err; 736 } 737 738 static void alx_init_intr(struct alx_priv *alx, bool msix) 739 { 740 if (msix) { 741 if (alx_enable_msix(alx)) 742 alx->flags |= ALX_FLAG_USING_MSIX; 743 } 744 745 if (!(alx->flags & ALX_FLAG_USING_MSIX)) { 746 alx->num_vec = 1; 747 748 if (!pci_enable_msi(alx->hw.pdev)) 749 alx->flags |= ALX_FLAG_USING_MSI; 750 } 751 } 752 753 static void alx_disable_advanced_intr(struct alx_priv *alx) 754 { 755 if (alx->flags & ALX_FLAG_USING_MSIX) { 756 kfree(alx->msix_entries); 757 pci_disable_msix(alx->hw.pdev); 758 alx->flags &= ~ALX_FLAG_USING_MSIX; 759 } 760 761 if (alx->flags & ALX_FLAG_USING_MSI) { 762 pci_disable_msi(alx->hw.pdev); 763 alx->flags &= ~ALX_FLAG_USING_MSI; 764 } 765 } 766 767 static void alx_irq_enable(struct alx_priv *alx) 768 { 769 struct alx_hw *hw = &alx->hw; 770 int i; 771 772 /* level-1 interrupt switch */ 773 alx_write_mem32(hw, ALX_ISR, 0); 774 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 775 alx_post_write(hw); 776 777 if (alx->flags & ALX_FLAG_USING_MSIX) 778 /* enable all msix irqs */ 779 for (i = 0; i < alx->num_vec; i++) 780 alx_mask_msix(hw, i, false); 781 } 782 783 static void alx_irq_disable(struct alx_priv *alx) 784 { 785 struct alx_hw *hw = &alx->hw; 786 int i; 787 788 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); 789 alx_write_mem32(hw, ALX_IMR, 0); 790 alx_post_write(hw); 791 792 if (alx->flags & ALX_FLAG_USING_MSIX) { 793 for (i = 0; i < alx->num_vec; i++) { 794 alx_mask_msix(hw, i, true); 795 synchronize_irq(alx->msix_entries[i].vector); 796 } 797 } else { 798 synchronize_irq(alx->hw.pdev->irq); 799 } 800 } 801 802 static int alx_request_irq(struct alx_priv *alx) 803 { 804 struct pci_dev *pdev = alx->hw.pdev; 805 struct alx_hw *hw = &alx->hw; 806 int err; 807 u32 msi_ctrl; 808 809 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; 810 811 if (alx->flags & ALX_FLAG_USING_MSIX) { 812 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl); 813 err = alx_request_msix(alx); 814 if (!err) 815 goto out; 816 817 /* msix request failed, realloc resources */ 818 alx_disable_advanced_intr(alx); 819 alx_init_intr(alx, false); 820 } 821 822 if (alx->flags & ALX_FLAG_USING_MSI) { 823 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 824 msi_ctrl | ALX_MSI_MASK_SEL_LINE); 825 err = request_irq(pdev->irq, alx_intr_msi, 0, 826 alx->dev->name, alx); 827 if (!err) 828 goto out; 829 /* fall back to legacy interrupt */ 830 alx->flags &= ~ALX_FLAG_USING_MSI; 831 pci_disable_msi(alx->hw.pdev); 832 } 833 834 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); 835 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, 836 alx->dev->name, alx); 837 out: 838 if (!err) 839 alx_config_vector_mapping(alx); 840 else 841 netdev_err(alx->dev, "IRQ registration failed!\n"); 842 return err; 843 } 844 845 static void alx_free_irq(struct alx_priv *alx) 846 { 847 struct pci_dev *pdev = alx->hw.pdev; 848 int i; 849 850 if (alx->flags & ALX_FLAG_USING_MSIX) { 851 /* we have only 2 vectors without multi queue support */ 852 for (i = 0; i < 2; i++) 853 free_irq(alx->msix_entries[i].vector, alx); 854 } else { 855 free_irq(pdev->irq, alx); 856 } 857 858 alx_disable_advanced_intr(alx); 859 } 860 861 static int alx_identify_hw(struct alx_priv *alx) 862 { 863 struct alx_hw *hw = &alx->hw; 864 int rev = alx_hw_revision(hw); 865 866 if (rev > ALX_REV_C0) 867 return -EINVAL; 868 869 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; 870 871 return 0; 872 } 873 874 static int alx_init_sw(struct alx_priv *alx) 875 { 876 struct pci_dev *pdev = alx->hw.pdev; 877 struct alx_hw *hw = &alx->hw; 878 int err; 879 880 err = alx_identify_hw(alx); 881 if (err) { 882 dev_err(&pdev->dev, "unrecognized chip, aborting\n"); 883 return err; 884 } 885 886 alx->hw.lnk_patch = 887 pdev->device == ALX_DEV_ID_AR8161 && 888 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && 889 pdev->subsystem_device == 0x0091 && 890 pdev->revision == 0; 891 892 hw->smb_timer = 400; 893 hw->mtu = alx->dev->mtu; 894 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); 895 alx->tx_ringsz = 256; 896 alx->rx_ringsz = 512; 897 hw->imt = 200; 898 alx->int_mask = ALX_ISR_MISC; 899 hw->dma_chnl = hw->max_dma_chnl; 900 hw->ith_tpd = alx->tx_ringsz / 3; 901 hw->link_speed = SPEED_UNKNOWN; 902 hw->duplex = DUPLEX_UNKNOWN; 903 hw->adv_cfg = ADVERTISED_Autoneg | 904 ADVERTISED_10baseT_Half | 905 ADVERTISED_10baseT_Full | 906 ADVERTISED_100baseT_Full | 907 ADVERTISED_100baseT_Half | 908 ADVERTISED_1000baseT_Full; 909 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; 910 911 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | 912 ALX_MAC_CTRL_MHASH_ALG_HI5B | 913 ALX_MAC_CTRL_BRD_EN | 914 ALX_MAC_CTRL_PCRCE | 915 ALX_MAC_CTRL_CRCE | 916 ALX_MAC_CTRL_RXFC_EN | 917 ALX_MAC_CTRL_TXFC_EN | 918 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; 919 920 return err; 921 } 922 923 924 static netdev_features_t alx_fix_features(struct net_device *netdev, 925 netdev_features_t features) 926 { 927 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) 928 features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 929 930 return features; 931 } 932 933 static void alx_netif_stop(struct alx_priv *alx) 934 { 935 netif_trans_update(alx->dev); 936 if (netif_carrier_ok(alx->dev)) { 937 netif_carrier_off(alx->dev); 938 netif_tx_disable(alx->dev); 939 napi_disable(&alx->napi); 940 } 941 } 942 943 static void alx_halt(struct alx_priv *alx) 944 { 945 struct alx_hw *hw = &alx->hw; 946 947 alx_netif_stop(alx); 948 hw->link_speed = SPEED_UNKNOWN; 949 hw->duplex = DUPLEX_UNKNOWN; 950 951 alx_reset_mac(hw); 952 953 /* disable l0s/l1 */ 954 alx_enable_aspm(hw, false, false); 955 alx_irq_disable(alx); 956 alx_free_buffers(alx); 957 } 958 959 static void alx_configure(struct alx_priv *alx) 960 { 961 struct alx_hw *hw = &alx->hw; 962 963 alx_configure_basic(hw); 964 alx_disable_rss(hw); 965 __alx_set_rx_mode(alx->dev); 966 967 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); 968 } 969 970 static void alx_activate(struct alx_priv *alx) 971 { 972 /* hardware setting lost, restore it */ 973 alx_reinit_rings(alx); 974 alx_configure(alx); 975 976 /* clear old interrupts */ 977 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); 978 979 alx_irq_enable(alx); 980 981 alx_schedule_link_check(alx); 982 } 983 984 static void alx_reinit(struct alx_priv *alx) 985 { 986 ASSERT_RTNL(); 987 988 alx_halt(alx); 989 alx_activate(alx); 990 } 991 992 static int alx_change_mtu(struct net_device *netdev, int mtu) 993 { 994 struct alx_priv *alx = netdev_priv(netdev); 995 int max_frame = ALX_MAX_FRAME_LEN(mtu); 996 997 if ((max_frame < ALX_MIN_FRAME_SIZE) || 998 (max_frame > ALX_MAX_FRAME_SIZE)) 999 return -EINVAL; 1000 1001 if (netdev->mtu == mtu) 1002 return 0; 1003 1004 netdev->mtu = mtu; 1005 alx->hw.mtu = mtu; 1006 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 1007 netdev_update_features(netdev); 1008 if (netif_running(netdev)) 1009 alx_reinit(alx); 1010 return 0; 1011 } 1012 1013 static void alx_netif_start(struct alx_priv *alx) 1014 { 1015 netif_tx_wake_all_queues(alx->dev); 1016 napi_enable(&alx->napi); 1017 netif_carrier_on(alx->dev); 1018 } 1019 1020 static int __alx_open(struct alx_priv *alx, bool resume) 1021 { 1022 int err; 1023 1024 alx_init_intr(alx, msix); 1025 1026 if (!resume) 1027 netif_carrier_off(alx->dev); 1028 1029 err = alx_alloc_rings(alx); 1030 if (err) 1031 goto out_disable_adv_intr; 1032 1033 alx_configure(alx); 1034 1035 err = alx_request_irq(alx); 1036 if (err) 1037 goto out_free_rings; 1038 1039 /* clear old interrupts */ 1040 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); 1041 1042 alx_irq_enable(alx); 1043 1044 if (!resume) 1045 netif_tx_start_all_queues(alx->dev); 1046 1047 alx_schedule_link_check(alx); 1048 return 0; 1049 1050 out_free_rings: 1051 alx_free_rings(alx); 1052 out_disable_adv_intr: 1053 alx_disable_advanced_intr(alx); 1054 return err; 1055 } 1056 1057 static void __alx_stop(struct alx_priv *alx) 1058 { 1059 alx_halt(alx); 1060 alx_free_irq(alx); 1061 alx_free_rings(alx); 1062 } 1063 1064 static const char *alx_speed_desc(struct alx_hw *hw) 1065 { 1066 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { 1067 case ADVERTISED_1000baseT_Full: 1068 return "1 Gbps Full"; 1069 case ADVERTISED_100baseT_Full: 1070 return "100 Mbps Full"; 1071 case ADVERTISED_100baseT_Half: 1072 return "100 Mbps Half"; 1073 case ADVERTISED_10baseT_Full: 1074 return "10 Mbps Full"; 1075 case ADVERTISED_10baseT_Half: 1076 return "10 Mbps Half"; 1077 default: 1078 return "Unknown speed"; 1079 } 1080 } 1081 1082 static void alx_check_link(struct alx_priv *alx) 1083 { 1084 struct alx_hw *hw = &alx->hw; 1085 unsigned long flags; 1086 int old_speed; 1087 u8 old_duplex; 1088 int err; 1089 1090 /* clear PHY internal interrupt status, otherwise the main 1091 * interrupt status will be asserted forever 1092 */ 1093 alx_clear_phy_intr(hw); 1094 1095 old_speed = hw->link_speed; 1096 old_duplex = hw->duplex; 1097 err = alx_read_phy_link(hw); 1098 if (err < 0) 1099 goto reset; 1100 1101 spin_lock_irqsave(&alx->irq_lock, flags); 1102 alx->int_mask |= ALX_ISR_PHY; 1103 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 1104 spin_unlock_irqrestore(&alx->irq_lock, flags); 1105 1106 if (old_speed == hw->link_speed) 1107 return; 1108 1109 if (hw->link_speed != SPEED_UNKNOWN) { 1110 netif_info(alx, link, alx->dev, 1111 "NIC Up: %s\n", alx_speed_desc(hw)); 1112 alx_post_phy_link(hw); 1113 alx_enable_aspm(hw, true, true); 1114 alx_start_mac(hw); 1115 1116 if (old_speed == SPEED_UNKNOWN) 1117 alx_netif_start(alx); 1118 } else { 1119 /* link is now down */ 1120 alx_netif_stop(alx); 1121 netif_info(alx, link, alx->dev, "Link Down\n"); 1122 err = alx_reset_mac(hw); 1123 if (err) 1124 goto reset; 1125 alx_irq_disable(alx); 1126 1127 /* MAC reset causes all HW settings to be lost, restore all */ 1128 err = alx_reinit_rings(alx); 1129 if (err) 1130 goto reset; 1131 alx_configure(alx); 1132 alx_enable_aspm(hw, false, true); 1133 alx_post_phy_link(hw); 1134 alx_irq_enable(alx); 1135 } 1136 1137 return; 1138 1139 reset: 1140 alx_schedule_reset(alx); 1141 } 1142 1143 static int alx_open(struct net_device *netdev) 1144 { 1145 return __alx_open(netdev_priv(netdev), false); 1146 } 1147 1148 static int alx_stop(struct net_device *netdev) 1149 { 1150 __alx_stop(netdev_priv(netdev)); 1151 return 0; 1152 } 1153 1154 static void alx_link_check(struct work_struct *work) 1155 { 1156 struct alx_priv *alx; 1157 1158 alx = container_of(work, struct alx_priv, link_check_wk); 1159 1160 rtnl_lock(); 1161 alx_check_link(alx); 1162 rtnl_unlock(); 1163 } 1164 1165 static void alx_reset(struct work_struct *work) 1166 { 1167 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); 1168 1169 rtnl_lock(); 1170 alx_reinit(alx); 1171 rtnl_unlock(); 1172 } 1173 1174 static int alx_tpd_req(struct sk_buff *skb) 1175 { 1176 int num; 1177 1178 num = skb_shinfo(skb)->nr_frags + 1; 1179 /* we need one extra descriptor for LSOv2 */ 1180 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1181 num++; 1182 1183 return num; 1184 } 1185 1186 static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) 1187 { 1188 u8 cso, css; 1189 1190 if (skb->ip_summed != CHECKSUM_PARTIAL) 1191 return 0; 1192 1193 cso = skb_checksum_start_offset(skb); 1194 if (cso & 1) 1195 return -EINVAL; 1196 1197 css = cso + skb->csum_offset; 1198 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); 1199 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); 1200 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); 1201 1202 return 0; 1203 } 1204 1205 static int alx_tso(struct sk_buff *skb, struct alx_txd *first) 1206 { 1207 int err; 1208 1209 if (skb->ip_summed != CHECKSUM_PARTIAL) 1210 return 0; 1211 1212 if (!skb_is_gso(skb)) 1213 return 0; 1214 1215 err = skb_cow_head(skb, 0); 1216 if (err < 0) 1217 return err; 1218 1219 if (skb->protocol == htons(ETH_P_IP)) { 1220 struct iphdr *iph = ip_hdr(skb); 1221 1222 iph->check = 0; 1223 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1224 0, IPPROTO_TCP, 0); 1225 first->word1 |= 1 << TPD_IPV4_SHIFT; 1226 } else if (skb_is_gso_v6(skb)) { 1227 ipv6_hdr(skb)->payload_len = 0; 1228 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1229 &ipv6_hdr(skb)->daddr, 1230 0, IPPROTO_TCP, 0); 1231 /* LSOv2: the first TPD only provides the packet length */ 1232 first->adrl.l.pkt_len = skb->len; 1233 first->word1 |= 1 << TPD_LSO_V2_SHIFT; 1234 } 1235 1236 first->word1 |= 1 << TPD_LSO_EN_SHIFT; 1237 first->word1 |= (skb_transport_offset(skb) & 1238 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT; 1239 first->word1 |= (skb_shinfo(skb)->gso_size & 1240 TPD_MSS_MASK) << TPD_MSS_SHIFT; 1241 return 1; 1242 } 1243 1244 static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) 1245 { 1246 struct alx_tx_queue *txq = &alx->txq; 1247 struct alx_txd *tpd, *first_tpd; 1248 dma_addr_t dma; 1249 int maplen, f, first_idx = txq->write_idx; 1250 1251 first_tpd = &txq->tpd[txq->write_idx]; 1252 tpd = first_tpd; 1253 1254 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { 1255 if (++txq->write_idx == alx->tx_ringsz) 1256 txq->write_idx = 0; 1257 1258 tpd = &txq->tpd[txq->write_idx]; 1259 tpd->len = first_tpd->len; 1260 tpd->vlan_tag = first_tpd->vlan_tag; 1261 tpd->word1 = first_tpd->word1; 1262 } 1263 1264 maplen = skb_headlen(skb); 1265 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, 1266 DMA_TO_DEVICE); 1267 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1268 goto err_dma; 1269 1270 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1271 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); 1272 1273 tpd->adrl.addr = cpu_to_le64(dma); 1274 tpd->len = cpu_to_le16(maplen); 1275 1276 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 1277 struct skb_frag_struct *frag; 1278 1279 frag = &skb_shinfo(skb)->frags[f]; 1280 1281 if (++txq->write_idx == alx->tx_ringsz) 1282 txq->write_idx = 0; 1283 tpd = &txq->tpd[txq->write_idx]; 1284 1285 tpd->word1 = first_tpd->word1; 1286 1287 maplen = skb_frag_size(frag); 1288 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, 1289 maplen, DMA_TO_DEVICE); 1290 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1291 goto err_dma; 1292 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1293 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); 1294 1295 tpd->adrl.addr = cpu_to_le64(dma); 1296 tpd->len = cpu_to_le16(maplen); 1297 } 1298 1299 /* last TPD, set EOP flag and store skb */ 1300 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); 1301 txq->bufs[txq->write_idx].skb = skb; 1302 1303 if (++txq->write_idx == alx->tx_ringsz) 1304 txq->write_idx = 0; 1305 1306 return 0; 1307 1308 err_dma: 1309 f = first_idx; 1310 while (f != txq->write_idx) { 1311 alx_free_txbuf(alx, f); 1312 if (++f == alx->tx_ringsz) 1313 f = 0; 1314 } 1315 return -ENOMEM; 1316 } 1317 1318 static netdev_tx_t alx_start_xmit(struct sk_buff *skb, 1319 struct net_device *netdev) 1320 { 1321 struct alx_priv *alx = netdev_priv(netdev); 1322 struct alx_tx_queue *txq = &alx->txq; 1323 struct alx_txd *first; 1324 int tso; 1325 1326 if (alx_tpd_avail(alx) < alx_tpd_req(skb)) { 1327 netif_stop_queue(alx->dev); 1328 goto drop; 1329 } 1330 1331 first = &txq->tpd[txq->write_idx]; 1332 memset(first, 0, sizeof(*first)); 1333 1334 tso = alx_tso(skb, first); 1335 if (tso < 0) 1336 goto drop; 1337 else if (!tso && alx_tx_csum(skb, first)) 1338 goto drop; 1339 1340 if (alx_map_tx_skb(alx, skb) < 0) 1341 goto drop; 1342 1343 netdev_sent_queue(alx->dev, skb->len); 1344 1345 /* flush updates before updating hardware */ 1346 wmb(); 1347 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); 1348 1349 if (alx_tpd_avail(alx) < alx->tx_ringsz/8) 1350 netif_stop_queue(alx->dev); 1351 1352 return NETDEV_TX_OK; 1353 1354 drop: 1355 dev_kfree_skb_any(skb); 1356 return NETDEV_TX_OK; 1357 } 1358 1359 static void alx_tx_timeout(struct net_device *dev) 1360 { 1361 struct alx_priv *alx = netdev_priv(dev); 1362 1363 alx_schedule_reset(alx); 1364 } 1365 1366 static int alx_mdio_read(struct net_device *netdev, 1367 int prtad, int devad, u16 addr) 1368 { 1369 struct alx_priv *alx = netdev_priv(netdev); 1370 struct alx_hw *hw = &alx->hw; 1371 u16 val; 1372 int err; 1373 1374 if (prtad != hw->mdio.prtad) 1375 return -EINVAL; 1376 1377 if (devad == MDIO_DEVAD_NONE) 1378 err = alx_read_phy_reg(hw, addr, &val); 1379 else 1380 err = alx_read_phy_ext(hw, devad, addr, &val); 1381 1382 if (err) 1383 return err; 1384 return val; 1385 } 1386 1387 static int alx_mdio_write(struct net_device *netdev, 1388 int prtad, int devad, u16 addr, u16 val) 1389 { 1390 struct alx_priv *alx = netdev_priv(netdev); 1391 struct alx_hw *hw = &alx->hw; 1392 1393 if (prtad != hw->mdio.prtad) 1394 return -EINVAL; 1395 1396 if (devad == MDIO_DEVAD_NONE) 1397 return alx_write_phy_reg(hw, addr, val); 1398 1399 return alx_write_phy_ext(hw, devad, addr, val); 1400 } 1401 1402 static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1403 { 1404 struct alx_priv *alx = netdev_priv(netdev); 1405 1406 if (!netif_running(netdev)) 1407 return -EAGAIN; 1408 1409 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); 1410 } 1411 1412 #ifdef CONFIG_NET_POLL_CONTROLLER 1413 static void alx_poll_controller(struct net_device *netdev) 1414 { 1415 struct alx_priv *alx = netdev_priv(netdev); 1416 1417 if (alx->flags & ALX_FLAG_USING_MSIX) { 1418 alx_intr_msix_misc(0, alx); 1419 alx_intr_msix_ring(0, alx); 1420 } else if (alx->flags & ALX_FLAG_USING_MSI) 1421 alx_intr_msi(0, alx); 1422 else 1423 alx_intr_legacy(0, alx); 1424 } 1425 #endif 1426 1427 static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, 1428 struct rtnl_link_stats64 *net_stats) 1429 { 1430 struct alx_priv *alx = netdev_priv(dev); 1431 struct alx_hw_stats *hw_stats = &alx->hw.stats; 1432 1433 spin_lock(&alx->stats_lock); 1434 1435 alx_update_hw_stats(&alx->hw); 1436 1437 net_stats->tx_bytes = hw_stats->tx_byte_cnt; 1438 net_stats->rx_bytes = hw_stats->rx_byte_cnt; 1439 net_stats->multicast = hw_stats->rx_mcast; 1440 net_stats->collisions = hw_stats->tx_single_col + 1441 hw_stats->tx_multi_col + 1442 hw_stats->tx_late_col + 1443 hw_stats->tx_abort_col; 1444 1445 net_stats->rx_errors = hw_stats->rx_frag + 1446 hw_stats->rx_fcs_err + 1447 hw_stats->rx_len_err + 1448 hw_stats->rx_ov_sz + 1449 hw_stats->rx_ov_rrd + 1450 hw_stats->rx_align_err + 1451 hw_stats->rx_ov_rxf; 1452 1453 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; 1454 net_stats->rx_length_errors = hw_stats->rx_len_err; 1455 net_stats->rx_crc_errors = hw_stats->rx_fcs_err; 1456 net_stats->rx_frame_errors = hw_stats->rx_align_err; 1457 net_stats->rx_dropped = hw_stats->rx_ov_rrd; 1458 1459 net_stats->tx_errors = hw_stats->tx_late_col + 1460 hw_stats->tx_abort_col + 1461 hw_stats->tx_underrun + 1462 hw_stats->tx_trunc; 1463 1464 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; 1465 net_stats->tx_fifo_errors = hw_stats->tx_underrun; 1466 net_stats->tx_window_errors = hw_stats->tx_late_col; 1467 1468 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; 1469 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; 1470 1471 spin_unlock(&alx->stats_lock); 1472 1473 return net_stats; 1474 } 1475 1476 static const struct net_device_ops alx_netdev_ops = { 1477 .ndo_open = alx_open, 1478 .ndo_stop = alx_stop, 1479 .ndo_start_xmit = alx_start_xmit, 1480 .ndo_get_stats64 = alx_get_stats64, 1481 .ndo_set_rx_mode = alx_set_rx_mode, 1482 .ndo_validate_addr = eth_validate_addr, 1483 .ndo_set_mac_address = alx_set_mac_address, 1484 .ndo_change_mtu = alx_change_mtu, 1485 .ndo_do_ioctl = alx_ioctl, 1486 .ndo_tx_timeout = alx_tx_timeout, 1487 .ndo_fix_features = alx_fix_features, 1488 #ifdef CONFIG_NET_POLL_CONTROLLER 1489 .ndo_poll_controller = alx_poll_controller, 1490 #endif 1491 }; 1492 1493 static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1494 { 1495 struct net_device *netdev; 1496 struct alx_priv *alx; 1497 struct alx_hw *hw; 1498 bool phy_configured; 1499 int err; 1500 1501 err = pci_enable_device_mem(pdev); 1502 if (err) 1503 return err; 1504 1505 /* The alx chip can DMA to 64-bit addresses, but it uses a single 1506 * shared register for the high 32 bits, so only a single, aligned, 1507 * 4 GB physical address range can be used for descriptors. 1508 */ 1509 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1510 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); 1511 } else { 1512 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1513 if (err) { 1514 dev_err(&pdev->dev, "No usable DMA config, aborting\n"); 1515 goto out_pci_disable; 1516 } 1517 } 1518 1519 err = pci_request_mem_regions(pdev, alx_drv_name); 1520 if (err) { 1521 dev_err(&pdev->dev, 1522 "pci_request_mem_regions failed\n"); 1523 goto out_pci_disable; 1524 } 1525 1526 pci_enable_pcie_error_reporting(pdev); 1527 pci_set_master(pdev); 1528 1529 if (!pdev->pm_cap) { 1530 dev_err(&pdev->dev, 1531 "Can't find power management capability, aborting\n"); 1532 err = -EIO; 1533 goto out_pci_release; 1534 } 1535 1536 netdev = alloc_etherdev(sizeof(*alx)); 1537 if (!netdev) { 1538 err = -ENOMEM; 1539 goto out_pci_release; 1540 } 1541 1542 SET_NETDEV_DEV(netdev, &pdev->dev); 1543 alx = netdev_priv(netdev); 1544 spin_lock_init(&alx->hw.mdio_lock); 1545 spin_lock_init(&alx->irq_lock); 1546 spin_lock_init(&alx->stats_lock); 1547 alx->dev = netdev; 1548 alx->hw.pdev = pdev; 1549 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | 1550 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; 1551 hw = &alx->hw; 1552 pci_set_drvdata(pdev, alx); 1553 1554 hw->hw_addr = pci_ioremap_bar(pdev, 0); 1555 if (!hw->hw_addr) { 1556 dev_err(&pdev->dev, "cannot map device registers\n"); 1557 err = -EIO; 1558 goto out_free_netdev; 1559 } 1560 1561 netdev->netdev_ops = &alx_netdev_ops; 1562 netdev->ethtool_ops = &alx_ethtool_ops; 1563 netdev->irq = pdev->irq; 1564 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; 1565 1566 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) 1567 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 1568 1569 err = alx_init_sw(alx); 1570 if (err) { 1571 dev_err(&pdev->dev, "net device private data init failed\n"); 1572 goto out_unmap; 1573 } 1574 1575 alx_reset_pcie(hw); 1576 1577 phy_configured = alx_phy_configured(hw); 1578 1579 if (!phy_configured) 1580 alx_reset_phy(hw); 1581 1582 err = alx_reset_mac(hw); 1583 if (err) { 1584 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); 1585 goto out_unmap; 1586 } 1587 1588 /* setup link to put it in a known good starting state */ 1589 if (!phy_configured) { 1590 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); 1591 if (err) { 1592 dev_err(&pdev->dev, 1593 "failed to configure PHY speed/duplex (err=%d)\n", 1594 err); 1595 goto out_unmap; 1596 } 1597 } 1598 1599 netdev->hw_features = NETIF_F_SG | 1600 NETIF_F_HW_CSUM | 1601 NETIF_F_TSO | 1602 NETIF_F_TSO6; 1603 1604 if (alx_get_perm_macaddr(hw, hw->perm_addr)) { 1605 dev_warn(&pdev->dev, 1606 "Invalid permanent address programmed, using random one\n"); 1607 eth_hw_addr_random(netdev); 1608 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); 1609 } 1610 1611 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); 1612 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); 1613 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); 1614 1615 hw->mdio.prtad = 0; 1616 hw->mdio.mmds = 0; 1617 hw->mdio.dev = netdev; 1618 hw->mdio.mode_support = MDIO_SUPPORTS_C45 | 1619 MDIO_SUPPORTS_C22 | 1620 MDIO_EMULATE_C22; 1621 hw->mdio.mdio_read = alx_mdio_read; 1622 hw->mdio.mdio_write = alx_mdio_write; 1623 1624 if (!alx_get_phy_info(hw)) { 1625 dev_err(&pdev->dev, "failed to identify PHY\n"); 1626 err = -EIO; 1627 goto out_unmap; 1628 } 1629 1630 INIT_WORK(&alx->link_check_wk, alx_link_check); 1631 INIT_WORK(&alx->reset_wk, alx_reset); 1632 netif_carrier_off(netdev); 1633 1634 err = register_netdev(netdev); 1635 if (err) { 1636 dev_err(&pdev->dev, "register netdevice failed\n"); 1637 goto out_unmap; 1638 } 1639 1640 netdev_info(netdev, 1641 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", 1642 netdev->dev_addr); 1643 1644 return 0; 1645 1646 out_unmap: 1647 iounmap(hw->hw_addr); 1648 out_free_netdev: 1649 free_netdev(netdev); 1650 out_pci_release: 1651 pci_release_mem_regions(pdev); 1652 out_pci_disable: 1653 pci_disable_device(pdev); 1654 return err; 1655 } 1656 1657 static void alx_remove(struct pci_dev *pdev) 1658 { 1659 struct alx_priv *alx = pci_get_drvdata(pdev); 1660 struct alx_hw *hw = &alx->hw; 1661 1662 cancel_work_sync(&alx->link_check_wk); 1663 cancel_work_sync(&alx->reset_wk); 1664 1665 /* restore permanent mac address */ 1666 alx_set_macaddr(hw, hw->perm_addr); 1667 1668 unregister_netdev(alx->dev); 1669 iounmap(hw->hw_addr); 1670 pci_release_mem_regions(pdev); 1671 1672 pci_disable_pcie_error_reporting(pdev); 1673 pci_disable_device(pdev); 1674 1675 free_netdev(alx->dev); 1676 } 1677 1678 #ifdef CONFIG_PM_SLEEP 1679 static int alx_suspend(struct device *dev) 1680 { 1681 struct pci_dev *pdev = to_pci_dev(dev); 1682 struct alx_priv *alx = pci_get_drvdata(pdev); 1683 1684 if (!netif_running(alx->dev)) 1685 return 0; 1686 netif_device_detach(alx->dev); 1687 __alx_stop(alx); 1688 return 0; 1689 } 1690 1691 static int alx_resume(struct device *dev) 1692 { 1693 struct pci_dev *pdev = to_pci_dev(dev); 1694 struct alx_priv *alx = pci_get_drvdata(pdev); 1695 struct alx_hw *hw = &alx->hw; 1696 1697 alx_reset_phy(hw); 1698 1699 if (!netif_running(alx->dev)) 1700 return 0; 1701 netif_device_attach(alx->dev); 1702 return __alx_open(alx, true); 1703 } 1704 1705 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); 1706 #define ALX_PM_OPS (&alx_pm_ops) 1707 #else 1708 #define ALX_PM_OPS NULL 1709 #endif 1710 1711 1712 static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, 1713 pci_channel_state_t state) 1714 { 1715 struct alx_priv *alx = pci_get_drvdata(pdev); 1716 struct net_device *netdev = alx->dev; 1717 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; 1718 1719 dev_info(&pdev->dev, "pci error detected\n"); 1720 1721 rtnl_lock(); 1722 1723 if (netif_running(netdev)) { 1724 netif_device_detach(netdev); 1725 alx_halt(alx); 1726 } 1727 1728 if (state == pci_channel_io_perm_failure) 1729 rc = PCI_ERS_RESULT_DISCONNECT; 1730 else 1731 pci_disable_device(pdev); 1732 1733 rtnl_unlock(); 1734 1735 return rc; 1736 } 1737 1738 static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) 1739 { 1740 struct alx_priv *alx = pci_get_drvdata(pdev); 1741 struct alx_hw *hw = &alx->hw; 1742 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 1743 1744 dev_info(&pdev->dev, "pci error slot reset\n"); 1745 1746 rtnl_lock(); 1747 1748 if (pci_enable_device(pdev)) { 1749 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); 1750 goto out; 1751 } 1752 1753 pci_set_master(pdev); 1754 1755 alx_reset_pcie(hw); 1756 if (!alx_reset_mac(hw)) 1757 rc = PCI_ERS_RESULT_RECOVERED; 1758 out: 1759 pci_cleanup_aer_uncorrect_error_status(pdev); 1760 1761 rtnl_unlock(); 1762 1763 return rc; 1764 } 1765 1766 static void alx_pci_error_resume(struct pci_dev *pdev) 1767 { 1768 struct alx_priv *alx = pci_get_drvdata(pdev); 1769 struct net_device *netdev = alx->dev; 1770 1771 dev_info(&pdev->dev, "pci error resume\n"); 1772 1773 rtnl_lock(); 1774 1775 if (netif_running(netdev)) { 1776 alx_activate(alx); 1777 netif_device_attach(netdev); 1778 } 1779 1780 rtnl_unlock(); 1781 } 1782 1783 static const struct pci_error_handlers alx_err_handlers = { 1784 .error_detected = alx_pci_error_detected, 1785 .slot_reset = alx_pci_error_slot_reset, 1786 .resume = alx_pci_error_resume, 1787 }; 1788 1789 static const struct pci_device_id alx_pci_tbl[] = { 1790 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), 1791 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1792 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), 1793 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1794 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), 1795 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1796 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), 1797 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1798 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1799 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1800 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1801 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, 1802 {} 1803 }; 1804 1805 static struct pci_driver alx_driver = { 1806 .name = alx_drv_name, 1807 .id_table = alx_pci_tbl, 1808 .probe = alx_probe, 1809 .remove = alx_remove, 1810 .err_handler = &alx_err_handlers, 1811 .driver.pm = ALX_PM_OPS, 1812 }; 1813 1814 module_pci_driver(alx_driver); 1815 MODULE_DEVICE_TABLE(pci, alx_pci_tbl); 1816 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); 1817 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); 1818 MODULE_DESCRIPTION( 1819 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); 1820 MODULE_LICENSE("GPL"); 1821