1 /* 2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> 3 * 4 * This file is free software: you may copy, redistribute and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation, either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This file is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * 17 * This file incorporates work covered by the following copyright and 18 * permission notice: 19 * 20 * Copyright (c) 2012 Qualcomm Atheros, Inc. 21 * 22 * Permission to use, copy, modify, and/or distribute this software for any 23 * purpose with or without fee is hereby granted, provided that the above 24 * copyright notice and this permission notice appear in all copies. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/pci.h> 37 #include <linux/interrupt.h> 38 #include <linux/ip.h> 39 #include <linux/ipv6.h> 40 #include <linux/if_vlan.h> 41 #include <linux/mdio.h> 42 #include <linux/aer.h> 43 #include <linux/bitops.h> 44 #include <linux/netdevice.h> 45 #include <linux/etherdevice.h> 46 #include <net/ip6_checksum.h> 47 #include <linux/crc32.h> 48 #include "alx.h" 49 #include "hw.h" 50 #include "reg.h" 51 52 const char alx_drv_name[] = "alx"; 53 54 55 static void alx_free_txbuf(struct alx_priv *alx, int entry) 56 { 57 struct alx_buffer *txb = &alx->txq.bufs[entry]; 58 59 if (dma_unmap_len(txb, size)) { 60 dma_unmap_single(&alx->hw.pdev->dev, 61 dma_unmap_addr(txb, dma), 62 dma_unmap_len(txb, size), 63 DMA_TO_DEVICE); 64 dma_unmap_len_set(txb, size, 0); 65 } 66 67 if (txb->skb) { 68 dev_kfree_skb_any(txb->skb); 69 txb->skb = NULL; 70 } 71 } 72 73 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 74 { 75 struct alx_rx_queue *rxq = &alx->rxq; 76 struct sk_buff *skb; 77 struct alx_buffer *cur_buf; 78 dma_addr_t dma; 79 u16 cur, next, count = 0; 80 81 next = cur = rxq->write_idx; 82 if (++next == alx->rx_ringsz) 83 next = 0; 84 cur_buf = &rxq->bufs[cur]; 85 86 while (!cur_buf->skb && next != rxq->read_idx) { 87 struct alx_rfd *rfd = &rxq->rfd[cur]; 88 89 /* 90 * When DMA RX address is set to something like 91 * 0x....fc0, it will be very likely to cause DMA 92 * RFD overflow issue. 93 * 94 * To work around it, we apply rx skb with 64 bytes 95 * longer space, and offset the address whenever 96 * 0x....fc0 is detected. 97 */ 98 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); 99 if (!skb) 100 break; 101 102 if (((unsigned long)skb->data & 0xfff) == 0xfc0) 103 skb_reserve(skb, 64); 104 105 dma = dma_map_single(&alx->hw.pdev->dev, 106 skb->data, alx->rxbuf_size, 107 DMA_FROM_DEVICE); 108 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { 109 dev_kfree_skb(skb); 110 break; 111 } 112 113 /* Unfortunately, RX descriptor buffers must be 4-byte 114 * aligned, so we can't use IP alignment. 115 */ 116 if (WARN_ON(dma & 3)) { 117 dev_kfree_skb(skb); 118 break; 119 } 120 121 cur_buf->skb = skb; 122 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); 123 dma_unmap_addr_set(cur_buf, dma, dma); 124 rfd->addr = cpu_to_le64(dma); 125 126 cur = next; 127 if (++next == alx->rx_ringsz) 128 next = 0; 129 cur_buf = &rxq->bufs[cur]; 130 count++; 131 } 132 133 if (count) { 134 /* flush all updates before updating hardware */ 135 wmb(); 136 rxq->write_idx = cur; 137 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); 138 } 139 140 return count; 141 } 142 143 static inline int alx_tpd_avail(struct alx_priv *alx) 144 { 145 struct alx_tx_queue *txq = &alx->txq; 146 147 if (txq->write_idx >= txq->read_idx) 148 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; 149 return txq->read_idx - txq->write_idx - 1; 150 } 151 152 static bool alx_clean_tx_irq(struct alx_priv *alx) 153 { 154 struct alx_tx_queue *txq = &alx->txq; 155 u16 hw_read_idx, sw_read_idx; 156 unsigned int total_bytes = 0, total_packets = 0; 157 int budget = ALX_DEFAULT_TX_WORK; 158 159 sw_read_idx = txq->read_idx; 160 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); 161 162 if (sw_read_idx != hw_read_idx) { 163 while (sw_read_idx != hw_read_idx && budget > 0) { 164 struct sk_buff *skb; 165 166 skb = txq->bufs[sw_read_idx].skb; 167 if (skb) { 168 total_bytes += skb->len; 169 total_packets++; 170 budget--; 171 } 172 173 alx_free_txbuf(alx, sw_read_idx); 174 175 if (++sw_read_idx == alx->tx_ringsz) 176 sw_read_idx = 0; 177 } 178 txq->read_idx = sw_read_idx; 179 180 netdev_completed_queue(alx->dev, total_packets, total_bytes); 181 } 182 183 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && 184 alx_tpd_avail(alx) > alx->tx_ringsz/4) 185 netif_wake_queue(alx->dev); 186 187 return sw_read_idx == hw_read_idx; 188 } 189 190 static void alx_schedule_link_check(struct alx_priv *alx) 191 { 192 schedule_work(&alx->link_check_wk); 193 } 194 195 static void alx_schedule_reset(struct alx_priv *alx) 196 { 197 schedule_work(&alx->reset_wk); 198 } 199 200 static int alx_clean_rx_irq(struct alx_priv *alx, int budget) 201 { 202 struct alx_rx_queue *rxq = &alx->rxq; 203 struct alx_rrd *rrd; 204 struct alx_buffer *rxb; 205 struct sk_buff *skb; 206 u16 length, rfd_cleaned = 0; 207 int work = 0; 208 209 while (work < budget) { 210 rrd = &rxq->rrd[rxq->rrd_read_idx]; 211 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 212 break; 213 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); 214 215 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), 216 RRD_SI) != rxq->read_idx || 217 ALX_GET_FIELD(le32_to_cpu(rrd->word0), 218 RRD_NOR) != 1) { 219 alx_schedule_reset(alx); 220 return work; 221 } 222 223 rxb = &rxq->bufs[rxq->read_idx]; 224 dma_unmap_single(&alx->hw.pdev->dev, 225 dma_unmap_addr(rxb, dma), 226 dma_unmap_len(rxb, size), 227 DMA_FROM_DEVICE); 228 dma_unmap_len_set(rxb, size, 0); 229 skb = rxb->skb; 230 rxb->skb = NULL; 231 232 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || 233 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { 234 rrd->word3 = 0; 235 dev_kfree_skb_any(skb); 236 goto next_pkt; 237 } 238 239 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), 240 RRD_PKTLEN) - ETH_FCS_LEN; 241 skb_put(skb, length); 242 skb->protocol = eth_type_trans(skb, alx->dev); 243 244 skb_checksum_none_assert(skb); 245 if (alx->dev->features & NETIF_F_RXCSUM && 246 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | 247 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { 248 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), 249 RRD_PID)) { 250 case RRD_PID_IPV6UDP: 251 case RRD_PID_IPV4UDP: 252 case RRD_PID_IPV4TCP: 253 case RRD_PID_IPV6TCP: 254 skb->ip_summed = CHECKSUM_UNNECESSARY; 255 break; 256 } 257 } 258 259 napi_gro_receive(&alx->napi, skb); 260 work++; 261 262 next_pkt: 263 if (++rxq->read_idx == alx->rx_ringsz) 264 rxq->read_idx = 0; 265 if (++rxq->rrd_read_idx == alx->rx_ringsz) 266 rxq->rrd_read_idx = 0; 267 268 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) 269 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); 270 } 271 272 if (rfd_cleaned) 273 alx_refill_rx_ring(alx, GFP_ATOMIC); 274 275 return work; 276 } 277 278 static int alx_poll(struct napi_struct *napi, int budget) 279 { 280 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 281 struct alx_hw *hw = &alx->hw; 282 unsigned long flags; 283 bool tx_complete; 284 int work; 285 286 tx_complete = alx_clean_tx_irq(alx); 287 work = alx_clean_rx_irq(alx, budget); 288 289 if (!tx_complete || work == budget) 290 return budget; 291 292 napi_complete(&alx->napi); 293 294 /* enable interrupt */ 295 spin_lock_irqsave(&alx->irq_lock, flags); 296 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 297 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 298 spin_unlock_irqrestore(&alx->irq_lock, flags); 299 300 alx_post_write(hw); 301 302 return work; 303 } 304 305 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) 306 { 307 struct alx_hw *hw = &alx->hw; 308 bool write_int_mask = false; 309 310 spin_lock(&alx->irq_lock); 311 312 /* ACK interrupt */ 313 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); 314 intr &= alx->int_mask; 315 316 if (intr & ALX_ISR_FATAL) { 317 netif_warn(alx, hw, alx->dev, 318 "fatal interrupt 0x%x, resetting\n", intr); 319 alx_schedule_reset(alx); 320 goto out; 321 } 322 323 if (intr & ALX_ISR_ALERT) 324 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); 325 326 if (intr & ALX_ISR_PHY) { 327 /* suppress PHY interrupt, because the source 328 * is from PHY internal. only the internal status 329 * is cleared, the interrupt status could be cleared. 330 */ 331 alx->int_mask &= ~ALX_ISR_PHY; 332 write_int_mask = true; 333 alx_schedule_link_check(alx); 334 } 335 336 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { 337 napi_schedule(&alx->napi); 338 /* mask rx/tx interrupt, enable them when napi complete */ 339 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 340 write_int_mask = true; 341 } 342 343 if (write_int_mask) 344 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 345 346 alx_write_mem32(hw, ALX_ISR, 0); 347 348 out: 349 spin_unlock(&alx->irq_lock); 350 return IRQ_HANDLED; 351 } 352 353 static irqreturn_t alx_intr_msi(int irq, void *data) 354 { 355 struct alx_priv *alx = data; 356 357 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); 358 } 359 360 static irqreturn_t alx_intr_legacy(int irq, void *data) 361 { 362 struct alx_priv *alx = data; 363 struct alx_hw *hw = &alx->hw; 364 u32 intr; 365 366 intr = alx_read_mem32(hw, ALX_ISR); 367 368 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) 369 return IRQ_NONE; 370 371 return alx_intr_handle(alx, intr); 372 } 373 374 static void alx_init_ring_ptrs(struct alx_priv *alx) 375 { 376 struct alx_hw *hw = &alx->hw; 377 u32 addr_hi = ((u64)alx->descmem.dma) >> 32; 378 379 alx->rxq.read_idx = 0; 380 alx->rxq.write_idx = 0; 381 alx->rxq.rrd_read_idx = 0; 382 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); 383 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); 384 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); 385 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); 386 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); 387 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); 388 389 alx->txq.read_idx = 0; 390 alx->txq.write_idx = 0; 391 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); 392 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); 393 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); 394 395 /* load these pointers into the chip */ 396 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); 397 } 398 399 static void alx_free_txring_buf(struct alx_priv *alx) 400 { 401 struct alx_tx_queue *txq = &alx->txq; 402 int i; 403 404 if (!txq->bufs) 405 return; 406 407 for (i = 0; i < alx->tx_ringsz; i++) 408 alx_free_txbuf(alx, i); 409 410 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); 411 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); 412 txq->write_idx = 0; 413 txq->read_idx = 0; 414 415 netdev_reset_queue(alx->dev); 416 } 417 418 static void alx_free_rxring_buf(struct alx_priv *alx) 419 { 420 struct alx_rx_queue *rxq = &alx->rxq; 421 struct alx_buffer *cur_buf; 422 u16 i; 423 424 if (rxq == NULL) 425 return; 426 427 for (i = 0; i < alx->rx_ringsz; i++) { 428 cur_buf = rxq->bufs + i; 429 if (cur_buf->skb) { 430 dma_unmap_single(&alx->hw.pdev->dev, 431 dma_unmap_addr(cur_buf, dma), 432 dma_unmap_len(cur_buf, size), 433 DMA_FROM_DEVICE); 434 dev_kfree_skb(cur_buf->skb); 435 cur_buf->skb = NULL; 436 dma_unmap_len_set(cur_buf, size, 0); 437 dma_unmap_addr_set(cur_buf, dma, 0); 438 } 439 } 440 441 rxq->write_idx = 0; 442 rxq->read_idx = 0; 443 rxq->rrd_read_idx = 0; 444 } 445 446 static void alx_free_buffers(struct alx_priv *alx) 447 { 448 alx_free_txring_buf(alx); 449 alx_free_rxring_buf(alx); 450 } 451 452 static int alx_reinit_rings(struct alx_priv *alx) 453 { 454 alx_free_buffers(alx); 455 456 alx_init_ring_ptrs(alx); 457 458 if (!alx_refill_rx_ring(alx, GFP_KERNEL)) 459 return -ENOMEM; 460 461 return 0; 462 } 463 464 static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) 465 { 466 u32 crc32, bit, reg; 467 468 crc32 = ether_crc(ETH_ALEN, addr); 469 reg = (crc32 >> 31) & 0x1; 470 bit = (crc32 >> 26) & 0x1F; 471 472 mc_hash[reg] |= BIT(bit); 473 } 474 475 static void __alx_set_rx_mode(struct net_device *netdev) 476 { 477 struct alx_priv *alx = netdev_priv(netdev); 478 struct alx_hw *hw = &alx->hw; 479 struct netdev_hw_addr *ha; 480 u32 mc_hash[2] = {}; 481 482 if (!(netdev->flags & IFF_ALLMULTI)) { 483 netdev_for_each_mc_addr(ha, netdev) 484 alx_add_mc_addr(hw, ha->addr, mc_hash); 485 486 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); 487 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); 488 } 489 490 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); 491 if (netdev->flags & IFF_PROMISC) 492 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; 493 if (netdev->flags & IFF_ALLMULTI) 494 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; 495 496 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); 497 } 498 499 static void alx_set_rx_mode(struct net_device *netdev) 500 { 501 __alx_set_rx_mode(netdev); 502 } 503 504 static int alx_set_mac_address(struct net_device *netdev, void *data) 505 { 506 struct alx_priv *alx = netdev_priv(netdev); 507 struct alx_hw *hw = &alx->hw; 508 struct sockaddr *addr = data; 509 510 if (!is_valid_ether_addr(addr->sa_data)) 511 return -EADDRNOTAVAIL; 512 513 if (netdev->addr_assign_type & NET_ADDR_RANDOM) 514 netdev->addr_assign_type ^= NET_ADDR_RANDOM; 515 516 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 517 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 518 alx_set_macaddr(hw, hw->mac_addr); 519 520 return 0; 521 } 522 523 static int alx_alloc_descriptors(struct alx_priv *alx) 524 { 525 alx->txq.bufs = kcalloc(alx->tx_ringsz, 526 sizeof(struct alx_buffer), 527 GFP_KERNEL); 528 if (!alx->txq.bufs) 529 return -ENOMEM; 530 531 alx->rxq.bufs = kcalloc(alx->rx_ringsz, 532 sizeof(struct alx_buffer), 533 GFP_KERNEL); 534 if (!alx->rxq.bufs) 535 goto out_free; 536 537 /* physical tx/rx ring descriptors 538 * 539 * Allocate them as a single chunk because they must not cross a 540 * 4G boundary (hardware has a single register for high 32 bits 541 * of addresses only) 542 */ 543 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + 544 sizeof(struct alx_rrd) * alx->rx_ringsz + 545 sizeof(struct alx_rfd) * alx->rx_ringsz; 546 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 547 alx->descmem.size, 548 &alx->descmem.dma, 549 GFP_KERNEL); 550 if (!alx->descmem.virt) 551 goto out_free; 552 553 alx->txq.tpd = alx->descmem.virt; 554 alx->txq.tpd_dma = alx->descmem.dma; 555 556 /* alignment requirement for next block */ 557 BUILD_BUG_ON(sizeof(struct alx_txd) % 8); 558 559 alx->rxq.rrd = 560 (void *)((u8 *)alx->descmem.virt + 561 sizeof(struct alx_txd) * alx->tx_ringsz); 562 alx->rxq.rrd_dma = alx->descmem.dma + 563 sizeof(struct alx_txd) * alx->tx_ringsz; 564 565 /* alignment requirement for next block */ 566 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); 567 568 alx->rxq.rfd = 569 (void *)((u8 *)alx->descmem.virt + 570 sizeof(struct alx_txd) * alx->tx_ringsz + 571 sizeof(struct alx_rrd) * alx->rx_ringsz); 572 alx->rxq.rfd_dma = alx->descmem.dma + 573 sizeof(struct alx_txd) * alx->tx_ringsz + 574 sizeof(struct alx_rrd) * alx->rx_ringsz; 575 576 return 0; 577 out_free: 578 kfree(alx->txq.bufs); 579 kfree(alx->rxq.bufs); 580 return -ENOMEM; 581 } 582 583 static int alx_alloc_rings(struct alx_priv *alx) 584 { 585 int err; 586 587 err = alx_alloc_descriptors(alx); 588 if (err) 589 return err; 590 591 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 592 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 593 594 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); 595 596 alx_reinit_rings(alx); 597 return 0; 598 } 599 600 static void alx_free_rings(struct alx_priv *alx) 601 { 602 netif_napi_del(&alx->napi); 603 alx_free_buffers(alx); 604 605 kfree(alx->txq.bufs); 606 kfree(alx->rxq.bufs); 607 608 dma_free_coherent(&alx->hw.pdev->dev, 609 alx->descmem.size, 610 alx->descmem.virt, 611 alx->descmem.dma); 612 } 613 614 static void alx_config_vector_mapping(struct alx_priv *alx) 615 { 616 struct alx_hw *hw = &alx->hw; 617 618 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); 619 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); 620 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); 621 } 622 623 static void alx_irq_enable(struct alx_priv *alx) 624 { 625 struct alx_hw *hw = &alx->hw; 626 627 /* level-1 interrupt switch */ 628 alx_write_mem32(hw, ALX_ISR, 0); 629 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 630 alx_post_write(hw); 631 } 632 633 static void alx_irq_disable(struct alx_priv *alx) 634 { 635 struct alx_hw *hw = &alx->hw; 636 637 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); 638 alx_write_mem32(hw, ALX_IMR, 0); 639 alx_post_write(hw); 640 641 synchronize_irq(alx->hw.pdev->irq); 642 } 643 644 static int alx_request_irq(struct alx_priv *alx) 645 { 646 struct pci_dev *pdev = alx->hw.pdev; 647 struct alx_hw *hw = &alx->hw; 648 int err; 649 u32 msi_ctrl; 650 651 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; 652 653 if (!pci_enable_msi(alx->hw.pdev)) { 654 alx->msi = true; 655 656 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 657 msi_ctrl | ALX_MSI_MASK_SEL_LINE); 658 err = request_irq(pdev->irq, alx_intr_msi, 0, 659 alx->dev->name, alx); 660 if (!err) 661 goto out; 662 /* fall back to legacy interrupt */ 663 pci_disable_msi(alx->hw.pdev); 664 } 665 666 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); 667 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, 668 alx->dev->name, alx); 669 out: 670 if (!err) 671 alx_config_vector_mapping(alx); 672 return err; 673 } 674 675 static void alx_free_irq(struct alx_priv *alx) 676 { 677 struct pci_dev *pdev = alx->hw.pdev; 678 679 free_irq(pdev->irq, alx); 680 681 if (alx->msi) { 682 pci_disable_msi(alx->hw.pdev); 683 alx->msi = false; 684 } 685 } 686 687 static int alx_identify_hw(struct alx_priv *alx) 688 { 689 struct alx_hw *hw = &alx->hw; 690 int rev = alx_hw_revision(hw); 691 692 if (rev > ALX_REV_C0) 693 return -EINVAL; 694 695 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; 696 697 return 0; 698 } 699 700 static int alx_init_sw(struct alx_priv *alx) 701 { 702 struct pci_dev *pdev = alx->hw.pdev; 703 struct alx_hw *hw = &alx->hw; 704 int err; 705 706 err = alx_identify_hw(alx); 707 if (err) { 708 dev_err(&pdev->dev, "unrecognized chip, aborting\n"); 709 return err; 710 } 711 712 alx->hw.lnk_patch = 713 pdev->device == ALX_DEV_ID_AR8161 && 714 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && 715 pdev->subsystem_device == 0x0091 && 716 pdev->revision == 0; 717 718 hw->smb_timer = 400; 719 hw->mtu = alx->dev->mtu; 720 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); 721 alx->tx_ringsz = 256; 722 alx->rx_ringsz = 512; 723 hw->imt = 200; 724 alx->int_mask = ALX_ISR_MISC; 725 hw->dma_chnl = hw->max_dma_chnl; 726 hw->ith_tpd = alx->tx_ringsz / 3; 727 hw->link_speed = SPEED_UNKNOWN; 728 hw->duplex = DUPLEX_UNKNOWN; 729 hw->adv_cfg = ADVERTISED_Autoneg | 730 ADVERTISED_10baseT_Half | 731 ADVERTISED_10baseT_Full | 732 ADVERTISED_100baseT_Full | 733 ADVERTISED_100baseT_Half | 734 ADVERTISED_1000baseT_Full; 735 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; 736 737 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | 738 ALX_MAC_CTRL_MHASH_ALG_HI5B | 739 ALX_MAC_CTRL_BRD_EN | 740 ALX_MAC_CTRL_PCRCE | 741 ALX_MAC_CTRL_CRCE | 742 ALX_MAC_CTRL_RXFC_EN | 743 ALX_MAC_CTRL_TXFC_EN | 744 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; 745 746 return err; 747 } 748 749 750 static netdev_features_t alx_fix_features(struct net_device *netdev, 751 netdev_features_t features) 752 { 753 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) 754 features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 755 756 return features; 757 } 758 759 static void alx_netif_stop(struct alx_priv *alx) 760 { 761 netif_trans_update(alx->dev); 762 if (netif_carrier_ok(alx->dev)) { 763 netif_carrier_off(alx->dev); 764 netif_tx_disable(alx->dev); 765 napi_disable(&alx->napi); 766 } 767 } 768 769 static void alx_halt(struct alx_priv *alx) 770 { 771 struct alx_hw *hw = &alx->hw; 772 773 alx_netif_stop(alx); 774 hw->link_speed = SPEED_UNKNOWN; 775 hw->duplex = DUPLEX_UNKNOWN; 776 777 alx_reset_mac(hw); 778 779 /* disable l0s/l1 */ 780 alx_enable_aspm(hw, false, false); 781 alx_irq_disable(alx); 782 alx_free_buffers(alx); 783 } 784 785 static void alx_configure(struct alx_priv *alx) 786 { 787 struct alx_hw *hw = &alx->hw; 788 789 alx_configure_basic(hw); 790 alx_disable_rss(hw); 791 __alx_set_rx_mode(alx->dev); 792 793 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); 794 } 795 796 static void alx_activate(struct alx_priv *alx) 797 { 798 /* hardware setting lost, restore it */ 799 alx_reinit_rings(alx); 800 alx_configure(alx); 801 802 /* clear old interrupts */ 803 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); 804 805 alx_irq_enable(alx); 806 807 alx_schedule_link_check(alx); 808 } 809 810 static void alx_reinit(struct alx_priv *alx) 811 { 812 ASSERT_RTNL(); 813 814 alx_halt(alx); 815 alx_activate(alx); 816 } 817 818 static int alx_change_mtu(struct net_device *netdev, int mtu) 819 { 820 struct alx_priv *alx = netdev_priv(netdev); 821 int max_frame = ALX_MAX_FRAME_LEN(mtu); 822 823 if ((max_frame < ALX_MIN_FRAME_SIZE) || 824 (max_frame > ALX_MAX_FRAME_SIZE)) 825 return -EINVAL; 826 827 if (netdev->mtu == mtu) 828 return 0; 829 830 netdev->mtu = mtu; 831 alx->hw.mtu = mtu; 832 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 833 netdev_update_features(netdev); 834 if (netif_running(netdev)) 835 alx_reinit(alx); 836 return 0; 837 } 838 839 static void alx_netif_start(struct alx_priv *alx) 840 { 841 netif_tx_wake_all_queues(alx->dev); 842 napi_enable(&alx->napi); 843 netif_carrier_on(alx->dev); 844 } 845 846 static int __alx_open(struct alx_priv *alx, bool resume) 847 { 848 int err; 849 850 if (!resume) 851 netif_carrier_off(alx->dev); 852 853 err = alx_alloc_rings(alx); 854 if (err) 855 return err; 856 857 alx_configure(alx); 858 859 err = alx_request_irq(alx); 860 if (err) 861 goto out_free_rings; 862 863 /* clear old interrupts */ 864 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); 865 866 alx_irq_enable(alx); 867 868 if (!resume) 869 netif_tx_start_all_queues(alx->dev); 870 871 alx_schedule_link_check(alx); 872 return 0; 873 874 out_free_rings: 875 alx_free_rings(alx); 876 return err; 877 } 878 879 static void __alx_stop(struct alx_priv *alx) 880 { 881 alx_halt(alx); 882 alx_free_irq(alx); 883 alx_free_rings(alx); 884 } 885 886 static const char *alx_speed_desc(struct alx_hw *hw) 887 { 888 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { 889 case ADVERTISED_1000baseT_Full: 890 return "1 Gbps Full"; 891 case ADVERTISED_100baseT_Full: 892 return "100 Mbps Full"; 893 case ADVERTISED_100baseT_Half: 894 return "100 Mbps Half"; 895 case ADVERTISED_10baseT_Full: 896 return "10 Mbps Full"; 897 case ADVERTISED_10baseT_Half: 898 return "10 Mbps Half"; 899 default: 900 return "Unknown speed"; 901 } 902 } 903 904 static void alx_check_link(struct alx_priv *alx) 905 { 906 struct alx_hw *hw = &alx->hw; 907 unsigned long flags; 908 int old_speed; 909 u8 old_duplex; 910 int err; 911 912 /* clear PHY internal interrupt status, otherwise the main 913 * interrupt status will be asserted forever 914 */ 915 alx_clear_phy_intr(hw); 916 917 old_speed = hw->link_speed; 918 old_duplex = hw->duplex; 919 err = alx_read_phy_link(hw); 920 if (err < 0) 921 goto reset; 922 923 spin_lock_irqsave(&alx->irq_lock, flags); 924 alx->int_mask |= ALX_ISR_PHY; 925 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 926 spin_unlock_irqrestore(&alx->irq_lock, flags); 927 928 if (old_speed == hw->link_speed) 929 return; 930 931 if (hw->link_speed != SPEED_UNKNOWN) { 932 netif_info(alx, link, alx->dev, 933 "NIC Up: %s\n", alx_speed_desc(hw)); 934 alx_post_phy_link(hw); 935 alx_enable_aspm(hw, true, true); 936 alx_start_mac(hw); 937 938 if (old_speed == SPEED_UNKNOWN) 939 alx_netif_start(alx); 940 } else { 941 /* link is now down */ 942 alx_netif_stop(alx); 943 netif_info(alx, link, alx->dev, "Link Down\n"); 944 err = alx_reset_mac(hw); 945 if (err) 946 goto reset; 947 alx_irq_disable(alx); 948 949 /* MAC reset causes all HW settings to be lost, restore all */ 950 err = alx_reinit_rings(alx); 951 if (err) 952 goto reset; 953 alx_configure(alx); 954 alx_enable_aspm(hw, false, true); 955 alx_post_phy_link(hw); 956 alx_irq_enable(alx); 957 } 958 959 return; 960 961 reset: 962 alx_schedule_reset(alx); 963 } 964 965 static int alx_open(struct net_device *netdev) 966 { 967 return __alx_open(netdev_priv(netdev), false); 968 } 969 970 static int alx_stop(struct net_device *netdev) 971 { 972 __alx_stop(netdev_priv(netdev)); 973 return 0; 974 } 975 976 static void alx_link_check(struct work_struct *work) 977 { 978 struct alx_priv *alx; 979 980 alx = container_of(work, struct alx_priv, link_check_wk); 981 982 rtnl_lock(); 983 alx_check_link(alx); 984 rtnl_unlock(); 985 } 986 987 static void alx_reset(struct work_struct *work) 988 { 989 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); 990 991 rtnl_lock(); 992 alx_reinit(alx); 993 rtnl_unlock(); 994 } 995 996 static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) 997 { 998 u8 cso, css; 999 1000 if (skb->ip_summed != CHECKSUM_PARTIAL) 1001 return 0; 1002 1003 cso = skb_checksum_start_offset(skb); 1004 if (cso & 1) 1005 return -EINVAL; 1006 1007 css = cso + skb->csum_offset; 1008 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); 1009 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); 1010 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); 1011 1012 return 0; 1013 } 1014 1015 static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) 1016 { 1017 struct alx_tx_queue *txq = &alx->txq; 1018 struct alx_txd *tpd, *first_tpd; 1019 dma_addr_t dma; 1020 int maplen, f, first_idx = txq->write_idx; 1021 1022 first_tpd = &txq->tpd[txq->write_idx]; 1023 tpd = first_tpd; 1024 1025 maplen = skb_headlen(skb); 1026 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, 1027 DMA_TO_DEVICE); 1028 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1029 goto err_dma; 1030 1031 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1032 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); 1033 1034 tpd->adrl.addr = cpu_to_le64(dma); 1035 tpd->len = cpu_to_le16(maplen); 1036 1037 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 1038 struct skb_frag_struct *frag; 1039 1040 frag = &skb_shinfo(skb)->frags[f]; 1041 1042 if (++txq->write_idx == alx->tx_ringsz) 1043 txq->write_idx = 0; 1044 tpd = &txq->tpd[txq->write_idx]; 1045 1046 tpd->word1 = first_tpd->word1; 1047 1048 maplen = skb_frag_size(frag); 1049 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, 1050 maplen, DMA_TO_DEVICE); 1051 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1052 goto err_dma; 1053 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1054 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); 1055 1056 tpd->adrl.addr = cpu_to_le64(dma); 1057 tpd->len = cpu_to_le16(maplen); 1058 } 1059 1060 /* last TPD, set EOP flag and store skb */ 1061 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); 1062 txq->bufs[txq->write_idx].skb = skb; 1063 1064 if (++txq->write_idx == alx->tx_ringsz) 1065 txq->write_idx = 0; 1066 1067 return 0; 1068 1069 err_dma: 1070 f = first_idx; 1071 while (f != txq->write_idx) { 1072 alx_free_txbuf(alx, f); 1073 if (++f == alx->tx_ringsz) 1074 f = 0; 1075 } 1076 return -ENOMEM; 1077 } 1078 1079 static netdev_tx_t alx_start_xmit(struct sk_buff *skb, 1080 struct net_device *netdev) 1081 { 1082 struct alx_priv *alx = netdev_priv(netdev); 1083 struct alx_tx_queue *txq = &alx->txq; 1084 struct alx_txd *first; 1085 int tpdreq = skb_shinfo(skb)->nr_frags + 1; 1086 1087 if (alx_tpd_avail(alx) < tpdreq) { 1088 netif_stop_queue(alx->dev); 1089 goto drop; 1090 } 1091 1092 first = &txq->tpd[txq->write_idx]; 1093 memset(first, 0, sizeof(*first)); 1094 1095 if (alx_tx_csum(skb, first)) 1096 goto drop; 1097 1098 if (alx_map_tx_skb(alx, skb) < 0) 1099 goto drop; 1100 1101 netdev_sent_queue(alx->dev, skb->len); 1102 1103 /* flush updates before updating hardware */ 1104 wmb(); 1105 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); 1106 1107 if (alx_tpd_avail(alx) < alx->tx_ringsz/8) 1108 netif_stop_queue(alx->dev); 1109 1110 return NETDEV_TX_OK; 1111 1112 drop: 1113 dev_kfree_skb_any(skb); 1114 return NETDEV_TX_OK; 1115 } 1116 1117 static void alx_tx_timeout(struct net_device *dev) 1118 { 1119 struct alx_priv *alx = netdev_priv(dev); 1120 1121 alx_schedule_reset(alx); 1122 } 1123 1124 static int alx_mdio_read(struct net_device *netdev, 1125 int prtad, int devad, u16 addr) 1126 { 1127 struct alx_priv *alx = netdev_priv(netdev); 1128 struct alx_hw *hw = &alx->hw; 1129 u16 val; 1130 int err; 1131 1132 if (prtad != hw->mdio.prtad) 1133 return -EINVAL; 1134 1135 if (devad == MDIO_DEVAD_NONE) 1136 err = alx_read_phy_reg(hw, addr, &val); 1137 else 1138 err = alx_read_phy_ext(hw, devad, addr, &val); 1139 1140 if (err) 1141 return err; 1142 return val; 1143 } 1144 1145 static int alx_mdio_write(struct net_device *netdev, 1146 int prtad, int devad, u16 addr, u16 val) 1147 { 1148 struct alx_priv *alx = netdev_priv(netdev); 1149 struct alx_hw *hw = &alx->hw; 1150 1151 if (prtad != hw->mdio.prtad) 1152 return -EINVAL; 1153 1154 if (devad == MDIO_DEVAD_NONE) 1155 return alx_write_phy_reg(hw, addr, val); 1156 1157 return alx_write_phy_ext(hw, devad, addr, val); 1158 } 1159 1160 static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1161 { 1162 struct alx_priv *alx = netdev_priv(netdev); 1163 1164 if (!netif_running(netdev)) 1165 return -EAGAIN; 1166 1167 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); 1168 } 1169 1170 #ifdef CONFIG_NET_POLL_CONTROLLER 1171 static void alx_poll_controller(struct net_device *netdev) 1172 { 1173 struct alx_priv *alx = netdev_priv(netdev); 1174 1175 if (alx->msi) 1176 alx_intr_msi(0, alx); 1177 else 1178 alx_intr_legacy(0, alx); 1179 } 1180 #endif 1181 1182 static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, 1183 struct rtnl_link_stats64 *net_stats) 1184 { 1185 struct alx_priv *alx = netdev_priv(dev); 1186 struct alx_hw_stats *hw_stats = &alx->hw.stats; 1187 1188 spin_lock(&alx->stats_lock); 1189 1190 alx_update_hw_stats(&alx->hw); 1191 1192 net_stats->tx_bytes = hw_stats->tx_byte_cnt; 1193 net_stats->rx_bytes = hw_stats->rx_byte_cnt; 1194 net_stats->multicast = hw_stats->rx_mcast; 1195 net_stats->collisions = hw_stats->tx_single_col + 1196 hw_stats->tx_multi_col + 1197 hw_stats->tx_late_col + 1198 hw_stats->tx_abort_col; 1199 1200 net_stats->rx_errors = hw_stats->rx_frag + 1201 hw_stats->rx_fcs_err + 1202 hw_stats->rx_len_err + 1203 hw_stats->rx_ov_sz + 1204 hw_stats->rx_ov_rrd + 1205 hw_stats->rx_align_err + 1206 hw_stats->rx_ov_rxf; 1207 1208 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; 1209 net_stats->rx_length_errors = hw_stats->rx_len_err; 1210 net_stats->rx_crc_errors = hw_stats->rx_fcs_err; 1211 net_stats->rx_frame_errors = hw_stats->rx_align_err; 1212 net_stats->rx_dropped = hw_stats->rx_ov_rrd; 1213 1214 net_stats->tx_errors = hw_stats->tx_late_col + 1215 hw_stats->tx_abort_col + 1216 hw_stats->tx_underrun + 1217 hw_stats->tx_trunc; 1218 1219 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; 1220 net_stats->tx_fifo_errors = hw_stats->tx_underrun; 1221 net_stats->tx_window_errors = hw_stats->tx_late_col; 1222 1223 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; 1224 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; 1225 1226 spin_unlock(&alx->stats_lock); 1227 1228 return net_stats; 1229 } 1230 1231 static const struct net_device_ops alx_netdev_ops = { 1232 .ndo_open = alx_open, 1233 .ndo_stop = alx_stop, 1234 .ndo_start_xmit = alx_start_xmit, 1235 .ndo_get_stats64 = alx_get_stats64, 1236 .ndo_set_rx_mode = alx_set_rx_mode, 1237 .ndo_validate_addr = eth_validate_addr, 1238 .ndo_set_mac_address = alx_set_mac_address, 1239 .ndo_change_mtu = alx_change_mtu, 1240 .ndo_do_ioctl = alx_ioctl, 1241 .ndo_tx_timeout = alx_tx_timeout, 1242 .ndo_fix_features = alx_fix_features, 1243 #ifdef CONFIG_NET_POLL_CONTROLLER 1244 .ndo_poll_controller = alx_poll_controller, 1245 #endif 1246 }; 1247 1248 static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1249 { 1250 struct net_device *netdev; 1251 struct alx_priv *alx; 1252 struct alx_hw *hw; 1253 bool phy_configured; 1254 int err; 1255 1256 err = pci_enable_device_mem(pdev); 1257 if (err) 1258 return err; 1259 1260 /* The alx chip can DMA to 64-bit addresses, but it uses a single 1261 * shared register for the high 32 bits, so only a single, aligned, 1262 * 4 GB physical address range can be used for descriptors. 1263 */ 1264 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1265 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); 1266 } else { 1267 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1268 if (err) { 1269 dev_err(&pdev->dev, "No usable DMA config, aborting\n"); 1270 goto out_pci_disable; 1271 } 1272 } 1273 1274 err = pci_request_mem_regions(pdev, alx_drv_name); 1275 if (err) { 1276 dev_err(&pdev->dev, 1277 "pci_request_mem_regions failed\n"); 1278 goto out_pci_disable; 1279 } 1280 1281 pci_enable_pcie_error_reporting(pdev); 1282 pci_set_master(pdev); 1283 1284 if (!pdev->pm_cap) { 1285 dev_err(&pdev->dev, 1286 "Can't find power management capability, aborting\n"); 1287 err = -EIO; 1288 goto out_pci_release; 1289 } 1290 1291 netdev = alloc_etherdev(sizeof(*alx)); 1292 if (!netdev) { 1293 err = -ENOMEM; 1294 goto out_pci_release; 1295 } 1296 1297 SET_NETDEV_DEV(netdev, &pdev->dev); 1298 alx = netdev_priv(netdev); 1299 spin_lock_init(&alx->hw.mdio_lock); 1300 spin_lock_init(&alx->irq_lock); 1301 spin_lock_init(&alx->stats_lock); 1302 alx->dev = netdev; 1303 alx->hw.pdev = pdev; 1304 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | 1305 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; 1306 hw = &alx->hw; 1307 pci_set_drvdata(pdev, alx); 1308 1309 hw->hw_addr = pci_ioremap_bar(pdev, 0); 1310 if (!hw->hw_addr) { 1311 dev_err(&pdev->dev, "cannot map device registers\n"); 1312 err = -EIO; 1313 goto out_free_netdev; 1314 } 1315 1316 netdev->netdev_ops = &alx_netdev_ops; 1317 netdev->ethtool_ops = &alx_ethtool_ops; 1318 netdev->irq = pdev->irq; 1319 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; 1320 1321 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) 1322 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 1323 1324 err = alx_init_sw(alx); 1325 if (err) { 1326 dev_err(&pdev->dev, "net device private data init failed\n"); 1327 goto out_unmap; 1328 } 1329 1330 alx_reset_pcie(hw); 1331 1332 phy_configured = alx_phy_configured(hw); 1333 1334 if (!phy_configured) 1335 alx_reset_phy(hw); 1336 1337 err = alx_reset_mac(hw); 1338 if (err) { 1339 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); 1340 goto out_unmap; 1341 } 1342 1343 /* setup link to put it in a known good starting state */ 1344 if (!phy_configured) { 1345 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); 1346 if (err) { 1347 dev_err(&pdev->dev, 1348 "failed to configure PHY speed/duplex (err=%d)\n", 1349 err); 1350 goto out_unmap; 1351 } 1352 } 1353 1354 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 1355 1356 if (alx_get_perm_macaddr(hw, hw->perm_addr)) { 1357 dev_warn(&pdev->dev, 1358 "Invalid permanent address programmed, using random one\n"); 1359 eth_hw_addr_random(netdev); 1360 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); 1361 } 1362 1363 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); 1364 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); 1365 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); 1366 1367 hw->mdio.prtad = 0; 1368 hw->mdio.mmds = 0; 1369 hw->mdio.dev = netdev; 1370 hw->mdio.mode_support = MDIO_SUPPORTS_C45 | 1371 MDIO_SUPPORTS_C22 | 1372 MDIO_EMULATE_C22; 1373 hw->mdio.mdio_read = alx_mdio_read; 1374 hw->mdio.mdio_write = alx_mdio_write; 1375 1376 if (!alx_get_phy_info(hw)) { 1377 dev_err(&pdev->dev, "failed to identify PHY\n"); 1378 err = -EIO; 1379 goto out_unmap; 1380 } 1381 1382 INIT_WORK(&alx->link_check_wk, alx_link_check); 1383 INIT_WORK(&alx->reset_wk, alx_reset); 1384 netif_carrier_off(netdev); 1385 1386 err = register_netdev(netdev); 1387 if (err) { 1388 dev_err(&pdev->dev, "register netdevice failed\n"); 1389 goto out_unmap; 1390 } 1391 1392 netdev_info(netdev, 1393 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", 1394 netdev->dev_addr); 1395 1396 return 0; 1397 1398 out_unmap: 1399 iounmap(hw->hw_addr); 1400 out_free_netdev: 1401 free_netdev(netdev); 1402 out_pci_release: 1403 pci_release_mem_regions(pdev); 1404 out_pci_disable: 1405 pci_disable_device(pdev); 1406 return err; 1407 } 1408 1409 static void alx_remove(struct pci_dev *pdev) 1410 { 1411 struct alx_priv *alx = pci_get_drvdata(pdev); 1412 struct alx_hw *hw = &alx->hw; 1413 1414 cancel_work_sync(&alx->link_check_wk); 1415 cancel_work_sync(&alx->reset_wk); 1416 1417 /* restore permanent mac address */ 1418 alx_set_macaddr(hw, hw->perm_addr); 1419 1420 unregister_netdev(alx->dev); 1421 iounmap(hw->hw_addr); 1422 pci_release_mem_regions(pdev); 1423 1424 pci_disable_pcie_error_reporting(pdev); 1425 pci_disable_device(pdev); 1426 1427 free_netdev(alx->dev); 1428 } 1429 1430 #ifdef CONFIG_PM_SLEEP 1431 static int alx_suspend(struct device *dev) 1432 { 1433 struct pci_dev *pdev = to_pci_dev(dev); 1434 struct alx_priv *alx = pci_get_drvdata(pdev); 1435 1436 if (!netif_running(alx->dev)) 1437 return 0; 1438 netif_device_detach(alx->dev); 1439 __alx_stop(alx); 1440 return 0; 1441 } 1442 1443 static int alx_resume(struct device *dev) 1444 { 1445 struct pci_dev *pdev = to_pci_dev(dev); 1446 struct alx_priv *alx = pci_get_drvdata(pdev); 1447 struct alx_hw *hw = &alx->hw; 1448 1449 alx_reset_phy(hw); 1450 1451 if (!netif_running(alx->dev)) 1452 return 0; 1453 netif_device_attach(alx->dev); 1454 return __alx_open(alx, true); 1455 } 1456 1457 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); 1458 #define ALX_PM_OPS (&alx_pm_ops) 1459 #else 1460 #define ALX_PM_OPS NULL 1461 #endif 1462 1463 1464 static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, 1465 pci_channel_state_t state) 1466 { 1467 struct alx_priv *alx = pci_get_drvdata(pdev); 1468 struct net_device *netdev = alx->dev; 1469 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; 1470 1471 dev_info(&pdev->dev, "pci error detected\n"); 1472 1473 rtnl_lock(); 1474 1475 if (netif_running(netdev)) { 1476 netif_device_detach(netdev); 1477 alx_halt(alx); 1478 } 1479 1480 if (state == pci_channel_io_perm_failure) 1481 rc = PCI_ERS_RESULT_DISCONNECT; 1482 else 1483 pci_disable_device(pdev); 1484 1485 rtnl_unlock(); 1486 1487 return rc; 1488 } 1489 1490 static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) 1491 { 1492 struct alx_priv *alx = pci_get_drvdata(pdev); 1493 struct alx_hw *hw = &alx->hw; 1494 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 1495 1496 dev_info(&pdev->dev, "pci error slot reset\n"); 1497 1498 rtnl_lock(); 1499 1500 if (pci_enable_device(pdev)) { 1501 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); 1502 goto out; 1503 } 1504 1505 pci_set_master(pdev); 1506 1507 alx_reset_pcie(hw); 1508 if (!alx_reset_mac(hw)) 1509 rc = PCI_ERS_RESULT_RECOVERED; 1510 out: 1511 pci_cleanup_aer_uncorrect_error_status(pdev); 1512 1513 rtnl_unlock(); 1514 1515 return rc; 1516 } 1517 1518 static void alx_pci_error_resume(struct pci_dev *pdev) 1519 { 1520 struct alx_priv *alx = pci_get_drvdata(pdev); 1521 struct net_device *netdev = alx->dev; 1522 1523 dev_info(&pdev->dev, "pci error resume\n"); 1524 1525 rtnl_lock(); 1526 1527 if (netif_running(netdev)) { 1528 alx_activate(alx); 1529 netif_device_attach(netdev); 1530 } 1531 1532 rtnl_unlock(); 1533 } 1534 1535 static const struct pci_error_handlers alx_err_handlers = { 1536 .error_detected = alx_pci_error_detected, 1537 .slot_reset = alx_pci_error_slot_reset, 1538 .resume = alx_pci_error_resume, 1539 }; 1540 1541 static const struct pci_device_id alx_pci_tbl[] = { 1542 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), 1543 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1544 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), 1545 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1546 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), 1547 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1548 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), 1549 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1550 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1551 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1552 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1553 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, 1554 {} 1555 }; 1556 1557 static struct pci_driver alx_driver = { 1558 .name = alx_drv_name, 1559 .id_table = alx_pci_tbl, 1560 .probe = alx_probe, 1561 .remove = alx_remove, 1562 .err_handler = &alx_err_handlers, 1563 .driver.pm = ALX_PM_OPS, 1564 }; 1565 1566 module_pci_driver(alx_driver); 1567 MODULE_DEVICE_TABLE(pci, alx_pci_tbl); 1568 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); 1569 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); 1570 MODULE_DESCRIPTION( 1571 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); 1572 MODULE_LICENSE("GPL"); 1573