1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for Gigabit Ethernet adapters based on the Session Layer 4 * Interface (SLIC) technology by Alacritech. The driver does not 5 * support the hardware acceleration features provided by these cards. 6 * 7 * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/if_ether.h> 16 #include <linux/crc32.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/ethtool.h> 19 #include <linux/mii.h> 20 #include <linux/interrupt.h> 21 #include <linux/delay.h> 22 #include <linux/firmware.h> 23 #include <linux/list.h> 24 #include <linux/u64_stats_sync.h> 25 26 #include "slic.h" 27 28 #define DRV_NAME "slicoss" 29 30 static const struct pci_device_id slic_id_tbl[] = { 31 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, 32 PCI_DEVICE_ID_ALACRITECH_MOJAVE) }, 33 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, 34 PCI_DEVICE_ID_ALACRITECH_OASIS) }, 35 { 0 } 36 }; 37 38 static const char slic_stats_strings[][ETH_GSTRING_LEN] = { 39 "rx_packets", 40 "rx_bytes", 41 "rx_multicasts", 42 "rx_errors", 43 "rx_buff_miss", 44 "rx_tp_csum", 45 "rx_tp_oflow", 46 "rx_tp_hlen", 47 "rx_ip_csum", 48 "rx_ip_len", 49 "rx_ip_hdr_len", 50 "rx_early", 51 "rx_buff_oflow", 52 "rx_lcode", 53 "rx_drbl", 54 "rx_crc", 55 "rx_oflow_802", 56 "rx_uflow_802", 57 "tx_packets", 58 "tx_bytes", 59 "tx_carrier", 60 "tx_dropped", 61 "irq_errs", 62 }; 63 64 static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen) 65 { 66 return (idx + 1) & (qlen - 1); 67 } 68 69 static inline int slic_get_free_queue_descs(unsigned int put_idx, 70 unsigned int done_idx, 71 unsigned int qlen) 72 { 73 if (put_idx >= done_idx) 74 return (qlen - (put_idx - done_idx) - 1); 75 return (done_idx - put_idx - 1); 76 } 77 78 static unsigned int slic_next_compl_idx(struct slic_device *sdev) 79 { 80 struct slic_stat_queue *stq = &sdev->stq; 81 unsigned int active = stq->active_array; 82 struct slic_stat_desc *descs; 83 struct slic_stat_desc *stat; 84 unsigned int idx; 85 86 descs = stq->descs[active]; 87 stat = &descs[stq->done_idx]; 88 89 if (!stat->status) 90 return SLIC_INVALID_STAT_DESC_IDX; 91 92 idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1; 93 /* reset desc */ 94 stat->hnd = 0; 95 stat->status = 0; 96 97 stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len); 98 /* check for wraparound */ 99 if (!stq->done_idx) { 100 dma_addr_t paddr = stq->paddr[active]; 101 102 slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) | 103 stq->len); 104 /* make sure new status descriptors are immediately available */ 105 slic_flush_write(sdev); 106 active++; 107 active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1); 108 stq->active_array = active; 109 } 110 return idx; 111 } 112 113 static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq) 114 { 115 /* ensure tail idx is updated */ 116 smp_mb(); 117 return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len); 118 } 119 120 static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq) 121 { 122 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); 123 } 124 125 static void slic_clear_upr_list(struct slic_upr_list *upr_list) 126 { 127 struct slic_upr *upr; 128 struct slic_upr *tmp; 129 130 spin_lock_bh(&upr_list->lock); 131 list_for_each_entry_safe(upr, tmp, &upr_list->list, list) { 132 list_del(&upr->list); 133 kfree(upr); 134 } 135 upr_list->pending = false; 136 spin_unlock_bh(&upr_list->lock); 137 } 138 139 static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr) 140 { 141 u32 reg; 142 143 reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG : 144 SLIC_REG_LSTAT; 145 slic_write(sdev, reg, lower_32_bits(upr->paddr)); 146 slic_flush_write(sdev); 147 } 148 149 static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr) 150 { 151 struct slic_upr_list *upr_list = &sdev->upr_list; 152 bool pending; 153 154 spin_lock_bh(&upr_list->lock); 155 pending = upr_list->pending; 156 INIT_LIST_HEAD(&upr->list); 157 list_add_tail(&upr->list, &upr_list->list); 158 upr_list->pending = true; 159 spin_unlock_bh(&upr_list->lock); 160 161 if (!pending) 162 slic_start_upr(sdev, upr); 163 } 164 165 static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev) 166 { 167 struct slic_upr_list *upr_list = &sdev->upr_list; 168 struct slic_upr *next_upr = NULL; 169 struct slic_upr *upr = NULL; 170 171 spin_lock_bh(&upr_list->lock); 172 if (!list_empty(&upr_list->list)) { 173 upr = list_first_entry(&upr_list->list, struct slic_upr, list); 174 list_del(&upr->list); 175 176 if (list_empty(&upr_list->list)) 177 upr_list->pending = false; 178 else 179 next_upr = list_first_entry(&upr_list->list, 180 struct slic_upr, list); 181 } 182 spin_unlock_bh(&upr_list->lock); 183 /* trigger processing of the next upr in list */ 184 if (next_upr) 185 slic_start_upr(sdev, next_upr); 186 187 return upr; 188 } 189 190 static int slic_new_upr(struct slic_device *sdev, unsigned int type, 191 dma_addr_t paddr) 192 { 193 struct slic_upr *upr; 194 195 upr = kmalloc(sizeof(*upr), GFP_ATOMIC); 196 if (!upr) 197 return -ENOMEM; 198 upr->type = type; 199 upr->paddr = paddr; 200 201 slic_queue_upr(sdev, upr); 202 203 return 0; 204 } 205 206 static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr) 207 { 208 u64 mask = *mcmask; 209 u8 crc; 210 /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb), 211 * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically 212 * discarded. 213 */ 214 crc = ether_crc(ETH_ALEN, addr) >> 23; 215 /* we only have space on the SLIC for 64 entries */ 216 crc &= 0x3F; 217 mask |= (u64)1 << crc; 218 *mcmask = mask; 219 } 220 221 /* must be called with link_lock held */ 222 static void slic_configure_rcv(struct slic_device *sdev) 223 { 224 u32 val; 225 226 val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN | 227 SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD; 228 229 if (sdev->duplex == DUPLEX_FULL) 230 val |= SLIC_GRCR_CTLEN; 231 232 if (sdev->promisc) 233 val |= SLIC_GRCR_RCVALL; 234 235 slic_write(sdev, SLIC_REG_WRCFG, val); 236 } 237 238 /* must be called with link_lock held */ 239 static void slic_configure_xmt(struct slic_device *sdev) 240 { 241 u32 val; 242 243 val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN; 244 245 if (sdev->duplex == DUPLEX_FULL) 246 val |= SLIC_GXCR_PAUSEEN; 247 248 slic_write(sdev, SLIC_REG_WXCFG, val); 249 } 250 251 /* must be called with link_lock held */ 252 static void slic_configure_mac(struct slic_device *sdev) 253 { 254 u32 val; 255 256 if (sdev->speed == SPEED_1000) { 257 val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT | 258 SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT | 259 SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT | 260 SLIC_GMCR_GBIT; /* enable GMII */ 261 } else { 262 val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT | 263 SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT | 264 SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT; 265 } 266 267 if (sdev->duplex == DUPLEX_FULL) 268 val |= SLIC_GMCR_FULLD; 269 270 slic_write(sdev, SLIC_REG_WMCFG, val); 271 } 272 273 static void slic_configure_link_locked(struct slic_device *sdev, int speed, 274 unsigned int duplex) 275 { 276 struct net_device *dev = sdev->netdev; 277 278 if (sdev->speed == speed && sdev->duplex == duplex) 279 return; 280 281 sdev->speed = speed; 282 sdev->duplex = duplex; 283 284 if (sdev->speed == SPEED_UNKNOWN) { 285 if (netif_carrier_ok(dev)) 286 netif_carrier_off(dev); 287 } else { 288 /* (re)configure link settings */ 289 slic_configure_mac(sdev); 290 slic_configure_xmt(sdev); 291 slic_configure_rcv(sdev); 292 slic_flush_write(sdev); 293 294 if (!netif_carrier_ok(dev)) 295 netif_carrier_on(dev); 296 } 297 } 298 299 static void slic_configure_link(struct slic_device *sdev, int speed, 300 unsigned int duplex) 301 { 302 spin_lock_bh(&sdev->link_lock); 303 slic_configure_link_locked(sdev, speed, duplex); 304 spin_unlock_bh(&sdev->link_lock); 305 } 306 307 static void slic_set_rx_mode(struct net_device *dev) 308 { 309 struct slic_device *sdev = netdev_priv(dev); 310 struct netdev_hw_addr *hwaddr; 311 bool set_promisc; 312 u64 mcmask; 313 314 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 315 /* Turn on all multicast addresses. We have to do this for 316 * promiscuous mode as well as ALLMCAST mode (it saves the 317 * microcode from having to keep state about the MAC 318 * configuration). 319 */ 320 mcmask = ~(u64)0; 321 } else { 322 mcmask = 0; 323 324 netdev_for_each_mc_addr(hwaddr, dev) { 325 slic_set_mcast_bit(&mcmask, hwaddr->addr); 326 } 327 } 328 329 slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask)); 330 slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask)); 331 332 set_promisc = !!(dev->flags & IFF_PROMISC); 333 334 spin_lock_bh(&sdev->link_lock); 335 if (sdev->promisc != set_promisc) { 336 sdev->promisc = set_promisc; 337 slic_configure_rcv(sdev); 338 } 339 spin_unlock_bh(&sdev->link_lock); 340 } 341 342 static void slic_xmit_complete(struct slic_device *sdev) 343 { 344 struct slic_tx_queue *txq = &sdev->txq; 345 struct net_device *dev = sdev->netdev; 346 struct slic_tx_buffer *buff; 347 unsigned int frames = 0; 348 unsigned int bytes = 0; 349 unsigned int idx; 350 351 /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new 352 * completions during processing keeps the loop running endlessly. 353 */ 354 do { 355 idx = slic_next_compl_idx(sdev); 356 if (idx == SLIC_INVALID_STAT_DESC_IDX) 357 break; 358 359 txq->done_idx = idx; 360 buff = &txq->txbuffs[idx]; 361 362 if (unlikely(!buff->skb)) { 363 netdev_warn(dev, 364 "no skb found for desc idx %i\n", idx); 365 continue; 366 } 367 dma_unmap_single(&sdev->pdev->dev, 368 dma_unmap_addr(buff, map_addr), 369 dma_unmap_len(buff, map_len), DMA_TO_DEVICE); 370 371 bytes += buff->skb->len; 372 frames++; 373 374 dev_kfree_skb_any(buff->skb); 375 buff->skb = NULL; 376 } while (frames < SLIC_MAX_TX_COMPLETIONS); 377 /* make sure xmit sees the new value for done_idx */ 378 smp_wmb(); 379 380 u64_stats_update_begin(&sdev->stats.syncp); 381 sdev->stats.tx_bytes += bytes; 382 sdev->stats.tx_packets += frames; 383 u64_stats_update_end(&sdev->stats.syncp); 384 385 netif_tx_lock(dev); 386 if (netif_queue_stopped(dev) && 387 (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS)) 388 netif_wake_queue(dev); 389 netif_tx_unlock(dev); 390 } 391 392 static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp) 393 { 394 const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1; 395 unsigned int maplen = SLIC_RX_BUFF_SIZE; 396 struct slic_rx_queue *rxq = &sdev->rxq; 397 struct net_device *dev = sdev->netdev; 398 struct slic_rx_buffer *buff; 399 struct slic_rx_desc *desc; 400 unsigned int misalign; 401 unsigned int offset; 402 struct sk_buff *skb; 403 dma_addr_t paddr; 404 405 while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) { 406 skb = alloc_skb(maplen + ALIGN_MASK, gfp); 407 if (!skb) 408 break; 409 410 paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen, 411 DMA_FROM_DEVICE); 412 if (dma_mapping_error(&sdev->pdev->dev, paddr)) { 413 netdev_err(dev, "mapping rx packet failed\n"); 414 /* drop skb */ 415 dev_kfree_skb_any(skb); 416 break; 417 } 418 /* ensure head buffer descriptors are 256 byte aligned */ 419 offset = 0; 420 misalign = paddr & ALIGN_MASK; 421 if (misalign) { 422 offset = SLIC_RX_BUFF_ALIGN - misalign; 423 skb_reserve(skb, offset); 424 } 425 /* the HW expects dma chunks for descriptor + frame data */ 426 desc = (struct slic_rx_desc *)skb->data; 427 /* temporarily sync descriptor for CPU to clear status */ 428 dma_sync_single_for_cpu(&sdev->pdev->dev, paddr, 429 offset + sizeof(*desc), 430 DMA_FROM_DEVICE); 431 desc->status = 0; 432 /* return it to HW again */ 433 dma_sync_single_for_device(&sdev->pdev->dev, paddr, 434 offset + sizeof(*desc), 435 DMA_FROM_DEVICE); 436 437 buff = &rxq->rxbuffs[rxq->put_idx]; 438 buff->skb = skb; 439 dma_unmap_addr_set(buff, map_addr, paddr); 440 dma_unmap_len_set(buff, map_len, maplen); 441 buff->addr_offset = offset; 442 /* complete write to descriptor before it is handed to HW */ 443 wmb(); 444 /* head buffer descriptors are placed immediately before skb */ 445 slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset); 446 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); 447 } 448 } 449 450 static void slic_handle_frame_error(struct slic_device *sdev, 451 struct sk_buff *skb) 452 { 453 struct slic_stats *stats = &sdev->stats; 454 455 if (sdev->model == SLIC_MODEL_OASIS) { 456 struct slic_rx_info_oasis *info; 457 u32 status_b; 458 u32 status; 459 460 info = (struct slic_rx_info_oasis *)skb->data; 461 status = le32_to_cpu(info->frame_status); 462 status_b = le32_to_cpu(info->frame_status_b); 463 /* transport layer */ 464 if (status_b & SLIC_VRHSTATB_TPCSUM) 465 SLIC_INC_STATS_COUNTER(stats, rx_tpcsum); 466 if (status & SLIC_VRHSTAT_TPOFLO) 467 SLIC_INC_STATS_COUNTER(stats, rx_tpoflow); 468 if (status_b & SLIC_VRHSTATB_TPHLEN) 469 SLIC_INC_STATS_COUNTER(stats, rx_tphlen); 470 /* ip layer */ 471 if (status_b & SLIC_VRHSTATB_IPCSUM) 472 SLIC_INC_STATS_COUNTER(stats, rx_ipcsum); 473 if (status_b & SLIC_VRHSTATB_IPLERR) 474 SLIC_INC_STATS_COUNTER(stats, rx_iplen); 475 if (status_b & SLIC_VRHSTATB_IPHERR) 476 SLIC_INC_STATS_COUNTER(stats, rx_iphlen); 477 /* link layer */ 478 if (status_b & SLIC_VRHSTATB_RCVE) 479 SLIC_INC_STATS_COUNTER(stats, rx_early); 480 if (status_b & SLIC_VRHSTATB_BUFF) 481 SLIC_INC_STATS_COUNTER(stats, rx_buffoflow); 482 if (status_b & SLIC_VRHSTATB_CODE) 483 SLIC_INC_STATS_COUNTER(stats, rx_lcode); 484 if (status_b & SLIC_VRHSTATB_DRBL) 485 SLIC_INC_STATS_COUNTER(stats, rx_drbl); 486 if (status_b & SLIC_VRHSTATB_CRC) 487 SLIC_INC_STATS_COUNTER(stats, rx_crc); 488 if (status & SLIC_VRHSTAT_802OE) 489 SLIC_INC_STATS_COUNTER(stats, rx_oflow802); 490 if (status_b & SLIC_VRHSTATB_802UE) 491 SLIC_INC_STATS_COUNTER(stats, rx_uflow802); 492 if (status_b & SLIC_VRHSTATB_CARRE) 493 SLIC_INC_STATS_COUNTER(stats, tx_carrier); 494 } else { /* mojave */ 495 struct slic_rx_info_mojave *info; 496 u32 status; 497 498 info = (struct slic_rx_info_mojave *)skb->data; 499 status = le32_to_cpu(info->frame_status); 500 /* transport layer */ 501 if (status & SLIC_VGBSTAT_XPERR) { 502 u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT; 503 504 if (xerr == SLIC_VGBSTAT_XCSERR) 505 SLIC_INC_STATS_COUNTER(stats, rx_tpcsum); 506 if (xerr == SLIC_VGBSTAT_XUFLOW) 507 SLIC_INC_STATS_COUNTER(stats, rx_tpoflow); 508 if (xerr == SLIC_VGBSTAT_XHLEN) 509 SLIC_INC_STATS_COUNTER(stats, rx_tphlen); 510 } 511 /* ip layer */ 512 if (status & SLIC_VGBSTAT_NETERR) { 513 u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT & 514 SLIC_VGBSTAT_NERRMSK; 515 516 if (nerr == SLIC_VGBSTAT_NCSERR) 517 SLIC_INC_STATS_COUNTER(stats, rx_ipcsum); 518 if (nerr == SLIC_VGBSTAT_NUFLOW) 519 SLIC_INC_STATS_COUNTER(stats, rx_iplen); 520 if (nerr == SLIC_VGBSTAT_NHLEN) 521 SLIC_INC_STATS_COUNTER(stats, rx_iphlen); 522 } 523 /* link layer */ 524 if (status & SLIC_VGBSTAT_LNKERR) { 525 u32 lerr = status & SLIC_VGBSTAT_LERRMSK; 526 527 if (lerr == SLIC_VGBSTAT_LDEARLY) 528 SLIC_INC_STATS_COUNTER(stats, rx_early); 529 if (lerr == SLIC_VGBSTAT_LBOFLO) 530 SLIC_INC_STATS_COUNTER(stats, rx_buffoflow); 531 if (lerr == SLIC_VGBSTAT_LCODERR) 532 SLIC_INC_STATS_COUNTER(stats, rx_lcode); 533 if (lerr == SLIC_VGBSTAT_LDBLNBL) 534 SLIC_INC_STATS_COUNTER(stats, rx_drbl); 535 if (lerr == SLIC_VGBSTAT_LCRCERR) 536 SLIC_INC_STATS_COUNTER(stats, rx_crc); 537 if (lerr == SLIC_VGBSTAT_LOFLO) 538 SLIC_INC_STATS_COUNTER(stats, rx_oflow802); 539 if (lerr == SLIC_VGBSTAT_LUFLO) 540 SLIC_INC_STATS_COUNTER(stats, rx_uflow802); 541 } 542 } 543 SLIC_INC_STATS_COUNTER(stats, rx_errors); 544 } 545 546 static void slic_handle_receive(struct slic_device *sdev, unsigned int todo, 547 unsigned int *done) 548 { 549 struct slic_rx_queue *rxq = &sdev->rxq; 550 struct net_device *dev = sdev->netdev; 551 struct slic_rx_buffer *buff; 552 struct slic_rx_desc *desc; 553 unsigned int frames = 0; 554 unsigned int bytes = 0; 555 struct sk_buff *skb; 556 u32 status; 557 u32 len; 558 559 while (todo && (rxq->done_idx != rxq->put_idx)) { 560 buff = &rxq->rxbuffs[rxq->done_idx]; 561 562 skb = buff->skb; 563 if (!skb) 564 break; 565 566 desc = (struct slic_rx_desc *)skb->data; 567 568 dma_sync_single_for_cpu(&sdev->pdev->dev, 569 dma_unmap_addr(buff, map_addr), 570 buff->addr_offset + sizeof(*desc), 571 DMA_FROM_DEVICE); 572 573 status = le32_to_cpu(desc->status); 574 if (!(status & SLIC_IRHDDR_SVALID)) { 575 dma_sync_single_for_device(&sdev->pdev->dev, 576 dma_unmap_addr(buff, 577 map_addr), 578 buff->addr_offset + 579 sizeof(*desc), 580 DMA_FROM_DEVICE); 581 break; 582 } 583 584 buff->skb = NULL; 585 586 dma_unmap_single(&sdev->pdev->dev, 587 dma_unmap_addr(buff, map_addr), 588 dma_unmap_len(buff, map_len), 589 DMA_FROM_DEVICE); 590 591 /* skip rx descriptor that is placed before the frame data */ 592 skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE); 593 594 if (unlikely(status & SLIC_IRHDDR_ERR)) { 595 slic_handle_frame_error(sdev, skb); 596 dev_kfree_skb_any(skb); 597 } else { 598 struct ethhdr *eh = (struct ethhdr *)skb->data; 599 600 if (is_multicast_ether_addr(eh->h_dest)) 601 SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts); 602 603 len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK; 604 skb_put(skb, len); 605 skb->protocol = eth_type_trans(skb, dev); 606 skb->ip_summed = CHECKSUM_UNNECESSARY; 607 608 napi_gro_receive(&sdev->napi, skb); 609 610 bytes += len; 611 frames++; 612 } 613 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); 614 todo--; 615 } 616 617 u64_stats_update_begin(&sdev->stats.syncp); 618 sdev->stats.rx_bytes += bytes; 619 sdev->stats.rx_packets += frames; 620 u64_stats_update_end(&sdev->stats.syncp); 621 622 slic_refill_rx_queue(sdev, GFP_ATOMIC); 623 } 624 625 static void slic_handle_link_irq(struct slic_device *sdev) 626 { 627 struct slic_shmem *sm = &sdev->shmem; 628 struct slic_shmem_data *sm_data = sm->shmem_data; 629 unsigned int duplex; 630 int speed; 631 u32 link; 632 633 link = le32_to_cpu(sm_data->link); 634 635 if (link & SLIC_GIG_LINKUP) { 636 if (link & SLIC_GIG_SPEED_1000) 637 speed = SPEED_1000; 638 else if (link & SLIC_GIG_SPEED_100) 639 speed = SPEED_100; 640 else 641 speed = SPEED_10; 642 643 duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL : 644 DUPLEX_HALF; 645 } else { 646 duplex = DUPLEX_UNKNOWN; 647 speed = SPEED_UNKNOWN; 648 } 649 slic_configure_link(sdev, speed, duplex); 650 } 651 652 static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs) 653 { 654 struct slic_upr *upr; 655 656 /* remove upr that caused this irq (always the first entry in list) */ 657 upr = slic_dequeue_upr(sdev); 658 if (!upr) { 659 netdev_warn(sdev->netdev, "no upr found on list\n"); 660 return; 661 } 662 663 if (upr->type == SLIC_UPR_LSTAT) { 664 if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) { 665 /* try again */ 666 slic_queue_upr(sdev, upr); 667 return; 668 } 669 slic_handle_link_irq(sdev); 670 } 671 kfree(upr); 672 } 673 674 static int slic_handle_link_change(struct slic_device *sdev) 675 { 676 return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr); 677 } 678 679 static void slic_handle_err_irq(struct slic_device *sdev, u32 isr) 680 { 681 struct slic_stats *stats = &sdev->stats; 682 683 if (isr & SLIC_ISR_RMISS) 684 SLIC_INC_STATS_COUNTER(stats, rx_buff_miss); 685 if (isr & SLIC_ISR_XDROP) 686 SLIC_INC_STATS_COUNTER(stats, tx_dropped); 687 if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP))) 688 SLIC_INC_STATS_COUNTER(stats, irq_errs); 689 } 690 691 static void slic_handle_irq(struct slic_device *sdev, u32 isr, 692 unsigned int todo, unsigned int *done) 693 { 694 if (isr & SLIC_ISR_ERR) 695 slic_handle_err_irq(sdev, isr); 696 697 if (isr & SLIC_ISR_LEVENT) 698 slic_handle_link_change(sdev); 699 700 if (isr & SLIC_ISR_UPC_MASK) 701 slic_handle_upr_irq(sdev, isr); 702 703 if (isr & SLIC_ISR_RCV) 704 slic_handle_receive(sdev, todo, done); 705 706 if (isr & SLIC_ISR_CMD) 707 slic_xmit_complete(sdev); 708 } 709 710 static int slic_poll(struct napi_struct *napi, int todo) 711 { 712 struct slic_device *sdev = container_of(napi, struct slic_device, napi); 713 struct slic_shmem *sm = &sdev->shmem; 714 struct slic_shmem_data *sm_data = sm->shmem_data; 715 u32 isr = le32_to_cpu(sm_data->isr); 716 int done = 0; 717 718 slic_handle_irq(sdev, isr, todo, &done); 719 720 if (done < todo) { 721 napi_complete_done(napi, done); 722 /* reenable irqs */ 723 sm_data->isr = 0; 724 /* make sure sm_data->isr is cleard before irqs are reenabled */ 725 wmb(); 726 slic_write(sdev, SLIC_REG_ISR, 0); 727 slic_flush_write(sdev); 728 } 729 730 return done; 731 } 732 733 static irqreturn_t slic_irq(int irq, void *dev_id) 734 { 735 struct slic_device *sdev = dev_id; 736 struct slic_shmem *sm = &sdev->shmem; 737 struct slic_shmem_data *sm_data = sm->shmem_data; 738 739 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK); 740 slic_flush_write(sdev); 741 /* make sure sm_data->isr is read after ICR_INT_MASK is set */ 742 wmb(); 743 744 if (!sm_data->isr) { 745 dma_rmb(); 746 /* spurious interrupt */ 747 slic_write(sdev, SLIC_REG_ISR, 0); 748 slic_flush_write(sdev); 749 return IRQ_NONE; 750 } 751 752 napi_schedule_irqoff(&sdev->napi); 753 754 return IRQ_HANDLED; 755 } 756 757 static void slic_card_reset(struct slic_device *sdev) 758 { 759 u16 cmd; 760 761 slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC); 762 /* flush write by means of config space */ 763 pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd); 764 mdelay(1); 765 } 766 767 static int slic_init_stat_queue(struct slic_device *sdev) 768 { 769 const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1; 770 struct slic_stat_queue *stq = &sdev->stq; 771 struct slic_stat_desc *descs; 772 unsigned int misalign; 773 unsigned int offset; 774 dma_addr_t paddr; 775 size_t size; 776 int err; 777 int i; 778 779 stq->len = SLIC_NUM_STAT_DESCS; 780 stq->active_array = 0; 781 stq->done_idx = 0; 782 783 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 784 785 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 786 descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, 787 GFP_KERNEL); 788 if (!descs) { 789 netdev_err(sdev->netdev, 790 "failed to allocate status descriptors\n"); 791 err = -ENOMEM; 792 goto free_descs; 793 } 794 /* ensure correct alignment */ 795 offset = 0; 796 misalign = paddr & DESC_ALIGN_MASK; 797 if (misalign) { 798 offset = SLIC_STATS_DESC_ALIGN - misalign; 799 descs += offset; 800 paddr += offset; 801 } 802 803 slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) | 804 stq->len); 805 stq->descs[i] = descs; 806 stq->paddr[i] = paddr; 807 stq->addr_offset[i] = offset; 808 } 809 810 stq->mem_size = size; 811 812 return 0; 813 814 free_descs: 815 while (i--) { 816 dma_free_coherent(&sdev->pdev->dev, stq->mem_size, 817 stq->descs[i] - stq->addr_offset[i], 818 stq->paddr[i] - stq->addr_offset[i]); 819 } 820 821 return err; 822 } 823 824 static void slic_free_stat_queue(struct slic_device *sdev) 825 { 826 struct slic_stat_queue *stq = &sdev->stq; 827 int i; 828 829 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 830 dma_free_coherent(&sdev->pdev->dev, stq->mem_size, 831 stq->descs[i] - stq->addr_offset[i], 832 stq->paddr[i] - stq->addr_offset[i]); 833 } 834 } 835 836 static int slic_init_tx_queue(struct slic_device *sdev) 837 { 838 struct slic_tx_queue *txq = &sdev->txq; 839 struct slic_tx_buffer *buff; 840 struct slic_tx_desc *desc; 841 unsigned int i; 842 int err; 843 844 txq->len = SLIC_NUM_TX_DESCS; 845 txq->put_idx = 0; 846 txq->done_idx = 0; 847 848 txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL); 849 if (!txq->txbuffs) 850 return -ENOMEM; 851 852 txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev, 853 sizeof(*desc), SLIC_TX_DESC_ALIGN, 854 4096); 855 if (!txq->dma_pool) { 856 err = -ENOMEM; 857 netdev_err(sdev->netdev, "failed to create dma pool\n"); 858 goto free_buffs; 859 } 860 861 for (i = 0; i < txq->len; i++) { 862 buff = &txq->txbuffs[i]; 863 desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL, 864 &buff->desc_paddr); 865 if (!desc) { 866 netdev_err(sdev->netdev, 867 "failed to alloc pool chunk (%i)\n", i); 868 err = -ENOMEM; 869 goto free_descs; 870 } 871 872 desc->hnd = cpu_to_le32((u32)(i + 1)); 873 desc->cmd = SLIC_CMD_XMT_REQ; 874 desc->flags = 0; 875 desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB); 876 buff->desc = desc; 877 } 878 879 return 0; 880 881 free_descs: 882 while (i--) { 883 buff = &txq->txbuffs[i]; 884 dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr); 885 } 886 dma_pool_destroy(txq->dma_pool); 887 888 free_buffs: 889 kfree(txq->txbuffs); 890 891 return err; 892 } 893 894 static void slic_free_tx_queue(struct slic_device *sdev) 895 { 896 struct slic_tx_queue *txq = &sdev->txq; 897 struct slic_tx_buffer *buff; 898 unsigned int i; 899 900 for (i = 0; i < txq->len; i++) { 901 buff = &txq->txbuffs[i]; 902 dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr); 903 if (!buff->skb) 904 continue; 905 906 dma_unmap_single(&sdev->pdev->dev, 907 dma_unmap_addr(buff, map_addr), 908 dma_unmap_len(buff, map_len), DMA_TO_DEVICE); 909 consume_skb(buff->skb); 910 } 911 dma_pool_destroy(txq->dma_pool); 912 913 kfree(txq->txbuffs); 914 } 915 916 static int slic_init_rx_queue(struct slic_device *sdev) 917 { 918 struct slic_rx_queue *rxq = &sdev->rxq; 919 struct slic_rx_buffer *buff; 920 921 rxq->len = SLIC_NUM_RX_LES; 922 rxq->done_idx = 0; 923 rxq->put_idx = 0; 924 925 buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL); 926 if (!buff) 927 return -ENOMEM; 928 929 rxq->rxbuffs = buff; 930 slic_refill_rx_queue(sdev, GFP_KERNEL); 931 932 return 0; 933 } 934 935 static void slic_free_rx_queue(struct slic_device *sdev) 936 { 937 struct slic_rx_queue *rxq = &sdev->rxq; 938 struct slic_rx_buffer *buff; 939 unsigned int i; 940 941 /* free rx buffers */ 942 for (i = 0; i < rxq->len; i++) { 943 buff = &rxq->rxbuffs[i]; 944 945 if (!buff->skb) 946 continue; 947 948 dma_unmap_single(&sdev->pdev->dev, 949 dma_unmap_addr(buff, map_addr), 950 dma_unmap_len(buff, map_len), 951 DMA_FROM_DEVICE); 952 consume_skb(buff->skb); 953 } 954 kfree(rxq->rxbuffs); 955 } 956 957 static void slic_set_link_autoneg(struct slic_device *sdev) 958 { 959 unsigned int subid = sdev->pdev->subsystem_device; 960 u32 val; 961 962 if (sdev->is_fiber) { 963 /* We've got a fiber gigabit interface, and register 4 is 964 * different in fiber mode than in copper mode. 965 */ 966 /* advertise FD only @1000 Mb */ 967 val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL | 968 ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 969 /* enable PAUSE frames */ 970 slic_write(sdev, SLIC_REG_WPHY, val); 971 /* reset phy, enable auto-neg */ 972 val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE | 973 BMCR_ANRESTART; 974 slic_write(sdev, SLIC_REG_WPHY, val); 975 } else { /* copper gigabit */ 976 /* We've got a copper gigabit interface, and register 4 is 977 * different in copper mode than in fiber mode. 978 */ 979 /* advertise 10/100 Mb modes */ 980 val = MII_ADVERTISE << 16 | ADVERTISE_100FULL | 981 ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF; 982 /* enable PAUSE frames */ 983 val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 984 /* required by the Cicada PHY */ 985 val |= ADVERTISE_CSMA; 986 slic_write(sdev, SLIC_REG_WPHY, val); 987 988 /* advertise FD only @1000 Mb */ 989 val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL; 990 slic_write(sdev, SLIC_REG_WPHY, val); 991 992 if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) { 993 /* if a Marvell PHY enable auto crossover */ 994 val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON; 995 slic_write(sdev, SLIC_REG_WPHY, val); 996 997 /* reset phy, enable auto-neg */ 998 val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE | 999 BMCR_ANRESTART; 1000 slic_write(sdev, SLIC_REG_WPHY, val); 1001 } else { 1002 /* enable and restart auto-neg (don't reset) */ 1003 val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART; 1004 slic_write(sdev, SLIC_REG_WPHY, val); 1005 } 1006 } 1007 } 1008 1009 static void slic_set_mac_address(struct slic_device *sdev) 1010 { 1011 const u8 *addr = sdev->netdev->dev_addr; 1012 u32 val; 1013 1014 val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24; 1015 1016 slic_write(sdev, SLIC_REG_WRADDRAL, val); 1017 slic_write(sdev, SLIC_REG_WRADDRBL, val); 1018 1019 val = addr[0] << 8 | addr[1]; 1020 1021 slic_write(sdev, SLIC_REG_WRADDRAH, val); 1022 slic_write(sdev, SLIC_REG_WRADDRBH, val); 1023 slic_flush_write(sdev); 1024 } 1025 1026 static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset) 1027 { 1028 int idx = *offset; 1029 __le32 val; 1030 1031 memcpy(&val, fw->data + *offset, sizeof(val)); 1032 idx += 4; 1033 *offset = idx; 1034 1035 return le32_to_cpu(val); 1036 } 1037 1038 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE); 1039 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS); 1040 1041 static int slic_load_rcvseq_firmware(struct slic_device *sdev) 1042 { 1043 const struct firmware *fw; 1044 const char *file; 1045 u32 codelen; 1046 int idx = 0; 1047 u32 instr; 1048 u32 addr; 1049 int err; 1050 1051 file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS : 1052 SLIC_RCV_FIRMWARE_MOJAVE; 1053 err = request_firmware(&fw, file, &sdev->pdev->dev); 1054 if (err) { 1055 dev_err(&sdev->pdev->dev, 1056 "failed to load receive sequencer firmware %s\n", file); 1057 return err; 1058 } 1059 /* Do an initial sanity check concerning firmware size now. A further 1060 * check follows below. 1061 */ 1062 if (fw->size < SLIC_FIRMWARE_MIN_SIZE) { 1063 dev_err(&sdev->pdev->dev, 1064 "invalid firmware size %zu (min %u expected)\n", 1065 fw->size, SLIC_FIRMWARE_MIN_SIZE); 1066 err = -EINVAL; 1067 goto release; 1068 } 1069 1070 codelen = slic_read_dword_from_firmware(fw, &idx); 1071 1072 /* do another sanity check against firmware size */ 1073 if ((codelen + 4) > fw->size) { 1074 dev_err(&sdev->pdev->dev, 1075 "invalid rcv-sequencer firmware size %zu\n", fw->size); 1076 err = -EINVAL; 1077 goto release; 1078 } 1079 1080 /* download sequencer code to card */ 1081 slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN); 1082 for (addr = 0; addr < codelen; addr++) { 1083 __le32 val; 1084 /* write out instruction address */ 1085 slic_write(sdev, SLIC_REG_RCV_WCS, addr); 1086 1087 instr = slic_read_dword_from_firmware(fw, &idx); 1088 /* write out the instruction data low addr */ 1089 slic_write(sdev, SLIC_REG_RCV_WCS, instr); 1090 1091 val = (__le32)fw->data[idx]; 1092 instr = le32_to_cpu(val); 1093 idx++; 1094 /* write out the instruction data high addr */ 1095 slic_write(sdev, SLIC_REG_RCV_WCS, instr); 1096 } 1097 /* finish download */ 1098 slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH); 1099 slic_flush_write(sdev); 1100 release: 1101 release_firmware(fw); 1102 1103 return err; 1104 } 1105 1106 MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE); 1107 MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS); 1108 1109 static int slic_load_firmware(struct slic_device *sdev) 1110 { 1111 u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS]; 1112 u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS]; 1113 const struct firmware *fw; 1114 unsigned int datalen; 1115 const char *file; 1116 int code_start; 1117 unsigned int i; 1118 u32 numsects; 1119 int idx = 0; 1120 u32 sect; 1121 u32 instr; 1122 u32 addr; 1123 u32 base; 1124 int err; 1125 1126 file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS : 1127 SLIC_FIRMWARE_MOJAVE; 1128 err = request_firmware(&fw, file, &sdev->pdev->dev); 1129 if (err) { 1130 dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file); 1131 return err; 1132 } 1133 /* Do an initial sanity check concerning firmware size now. A further 1134 * check follows below. 1135 */ 1136 if (fw->size < SLIC_FIRMWARE_MIN_SIZE) { 1137 dev_err(&sdev->pdev->dev, 1138 "invalid firmware size %zu (min is %u)\n", fw->size, 1139 SLIC_FIRMWARE_MIN_SIZE); 1140 err = -EINVAL; 1141 goto release; 1142 } 1143 1144 numsects = slic_read_dword_from_firmware(fw, &idx); 1145 if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) { 1146 dev_err(&sdev->pdev->dev, 1147 "invalid number of sections in firmware: %u", numsects); 1148 err = -EINVAL; 1149 goto release; 1150 } 1151 1152 datalen = numsects * 8 + 4; 1153 for (i = 0; i < numsects; i++) { 1154 sectsize[i] = slic_read_dword_from_firmware(fw, &idx); 1155 datalen += sectsize[i]; 1156 } 1157 1158 /* do another sanity check against firmware size */ 1159 if (datalen > fw->size) { 1160 dev_err(&sdev->pdev->dev, 1161 "invalid firmware size %zu (expected >= %u)\n", 1162 fw->size, datalen); 1163 err = -EINVAL; 1164 goto release; 1165 } 1166 /* get sections */ 1167 for (i = 0; i < numsects; i++) 1168 sectstart[i] = slic_read_dword_from_firmware(fw, &idx); 1169 1170 code_start = idx; 1171 instr = slic_read_dword_from_firmware(fw, &idx); 1172 1173 for (sect = 0; sect < numsects; sect++) { 1174 unsigned int ssize = sectsize[sect] >> 3; 1175 1176 base = sectstart[sect]; 1177 1178 for (addr = 0; addr < ssize; addr++) { 1179 /* write out instruction address */ 1180 slic_write(sdev, SLIC_REG_WCS, base + addr); 1181 /* write out instruction to low addr */ 1182 slic_write(sdev, SLIC_REG_WCS, instr); 1183 instr = slic_read_dword_from_firmware(fw, &idx); 1184 /* write out instruction to high addr */ 1185 slic_write(sdev, SLIC_REG_WCS, instr); 1186 instr = slic_read_dword_from_firmware(fw, &idx); 1187 } 1188 } 1189 1190 idx = code_start; 1191 1192 for (sect = 0; sect < numsects; sect++) { 1193 unsigned int ssize = sectsize[sect] >> 3; 1194 1195 instr = slic_read_dword_from_firmware(fw, &idx); 1196 base = sectstart[sect]; 1197 if (base < 0x8000) 1198 continue; 1199 1200 for (addr = 0; addr < ssize; addr++) { 1201 /* write out instruction address */ 1202 slic_write(sdev, SLIC_REG_WCS, 1203 SLIC_WCS_COMPARE | (base + addr)); 1204 /* write out instruction to low addr */ 1205 slic_write(sdev, SLIC_REG_WCS, instr); 1206 instr = slic_read_dword_from_firmware(fw, &idx); 1207 /* write out instruction to high addr */ 1208 slic_write(sdev, SLIC_REG_WCS, instr); 1209 instr = slic_read_dword_from_firmware(fw, &idx); 1210 } 1211 } 1212 slic_flush_write(sdev); 1213 mdelay(10); 1214 /* everything OK, kick off the card */ 1215 slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START); 1216 slic_flush_write(sdev); 1217 /* wait long enough for ucode to init card and reach the mainloop */ 1218 mdelay(20); 1219 release: 1220 release_firmware(fw); 1221 1222 return err; 1223 } 1224 1225 static int slic_init_shmem(struct slic_device *sdev) 1226 { 1227 struct slic_shmem *sm = &sdev->shmem; 1228 struct slic_shmem_data *sm_data; 1229 dma_addr_t paddr; 1230 1231 sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1232 &paddr, GFP_KERNEL); 1233 if (!sm_data) { 1234 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1235 return -ENOMEM; 1236 } 1237 1238 sm->shmem_data = sm_data; 1239 sm->isr_paddr = paddr; 1240 sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link); 1241 1242 return 0; 1243 } 1244 1245 static void slic_free_shmem(struct slic_device *sdev) 1246 { 1247 struct slic_shmem *sm = &sdev->shmem; 1248 struct slic_shmem_data *sm_data = sm->shmem_data; 1249 1250 dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data, 1251 sm->isr_paddr); 1252 } 1253 1254 static int slic_init_iface(struct slic_device *sdev) 1255 { 1256 struct slic_shmem *sm = &sdev->shmem; 1257 int err; 1258 1259 sdev->upr_list.pending = false; 1260 1261 err = slic_init_shmem(sdev); 1262 if (err) { 1263 netdev_err(sdev->netdev, "failed to init shared memory\n"); 1264 return err; 1265 } 1266 1267 err = slic_load_firmware(sdev); 1268 if (err) { 1269 netdev_err(sdev->netdev, "failed to load firmware\n"); 1270 goto free_sm; 1271 } 1272 1273 err = slic_load_rcvseq_firmware(sdev); 1274 if (err) { 1275 netdev_err(sdev->netdev, 1276 "failed to load firmware for receive sequencer\n"); 1277 goto free_sm; 1278 } 1279 1280 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1281 slic_flush_write(sdev); 1282 mdelay(1); 1283 1284 err = slic_init_rx_queue(sdev); 1285 if (err) { 1286 netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err); 1287 goto free_sm; 1288 } 1289 1290 err = slic_init_tx_queue(sdev); 1291 if (err) { 1292 netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err); 1293 goto free_rxq; 1294 } 1295 1296 err = slic_init_stat_queue(sdev); 1297 if (err) { 1298 netdev_err(sdev->netdev, "failed to init status queue: %u\n", 1299 err); 1300 goto free_txq; 1301 } 1302 1303 slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr)); 1304 napi_enable(&sdev->napi); 1305 /* disable irq mitigation */ 1306 slic_write(sdev, SLIC_REG_INTAGG, 0); 1307 slic_write(sdev, SLIC_REG_ISR, 0); 1308 slic_flush_write(sdev); 1309 1310 slic_set_mac_address(sdev); 1311 1312 spin_lock_bh(&sdev->link_lock); 1313 sdev->duplex = DUPLEX_UNKNOWN; 1314 sdev->speed = SPEED_UNKNOWN; 1315 spin_unlock_bh(&sdev->link_lock); 1316 1317 slic_set_link_autoneg(sdev); 1318 1319 err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME, 1320 sdev); 1321 if (err) { 1322 netdev_err(sdev->netdev, "failed to request irq: %u\n", err); 1323 goto disable_napi; 1324 } 1325 1326 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON); 1327 slic_flush_write(sdev); 1328 /* request initial link status */ 1329 err = slic_handle_link_change(sdev); 1330 if (err) 1331 netdev_warn(sdev->netdev, 1332 "failed to set initial link state: %u\n", err); 1333 return 0; 1334 1335 disable_napi: 1336 napi_disable(&sdev->napi); 1337 slic_free_stat_queue(sdev); 1338 free_txq: 1339 slic_free_tx_queue(sdev); 1340 free_rxq: 1341 slic_free_rx_queue(sdev); 1342 free_sm: 1343 slic_free_shmem(sdev); 1344 slic_card_reset(sdev); 1345 1346 return err; 1347 } 1348 1349 static int slic_open(struct net_device *dev) 1350 { 1351 struct slic_device *sdev = netdev_priv(dev); 1352 int err; 1353 1354 netif_carrier_off(dev); 1355 1356 err = slic_init_iface(sdev); 1357 if (err) { 1358 netdev_err(dev, "failed to initialize interface: %i\n", err); 1359 return err; 1360 } 1361 1362 netif_start_queue(dev); 1363 1364 return 0; 1365 } 1366 1367 static int slic_close(struct net_device *dev) 1368 { 1369 struct slic_device *sdev = netdev_priv(dev); 1370 u32 val; 1371 1372 netif_stop_queue(dev); 1373 1374 /* stop irq handling */ 1375 napi_disable(&sdev->napi); 1376 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1377 slic_write(sdev, SLIC_REG_ISR, 0); 1378 slic_flush_write(sdev); 1379 1380 free_irq(sdev->pdev->irq, sdev); 1381 /* turn off RCV and XMT and power down PHY */ 1382 val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN; 1383 slic_write(sdev, SLIC_REG_WXCFG, val); 1384 1385 val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN | 1386 SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT; 1387 slic_write(sdev, SLIC_REG_WRCFG, val); 1388 1389 val = MII_BMCR << 16 | BMCR_PDOWN; 1390 slic_write(sdev, SLIC_REG_WPHY, val); 1391 slic_flush_write(sdev); 1392 1393 slic_clear_upr_list(&sdev->upr_list); 1394 slic_write(sdev, SLIC_REG_QUIESCE, 0); 1395 1396 slic_free_stat_queue(sdev); 1397 slic_free_tx_queue(sdev); 1398 slic_free_rx_queue(sdev); 1399 slic_free_shmem(sdev); 1400 1401 slic_card_reset(sdev); 1402 netif_carrier_off(dev); 1403 1404 return 0; 1405 } 1406 1407 static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev) 1408 { 1409 struct slic_device *sdev = netdev_priv(dev); 1410 struct slic_tx_queue *txq = &sdev->txq; 1411 struct slic_tx_buffer *buff; 1412 struct slic_tx_desc *desc; 1413 dma_addr_t paddr; 1414 u32 cbar_val; 1415 u32 maplen; 1416 1417 if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) { 1418 netdev_err(dev, "BUG! not enough tx LEs left: %u\n", 1419 slic_get_free_tx_descs(txq)); 1420 return NETDEV_TX_BUSY; 1421 } 1422 1423 maplen = skb_headlen(skb); 1424 paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen, 1425 DMA_TO_DEVICE); 1426 if (dma_mapping_error(&sdev->pdev->dev, paddr)) { 1427 netdev_err(dev, "failed to map tx buffer\n"); 1428 goto drop_skb; 1429 } 1430 1431 buff = &txq->txbuffs[txq->put_idx]; 1432 buff->skb = skb; 1433 dma_unmap_addr_set(buff, map_addr, paddr); 1434 dma_unmap_len_set(buff, map_len, maplen); 1435 1436 desc = buff->desc; 1437 desc->totlen = cpu_to_le32(maplen); 1438 desc->paddrl = cpu_to_le32(lower_32_bits(paddr)); 1439 desc->paddrh = cpu_to_le32(upper_32_bits(paddr)); 1440 desc->len = cpu_to_le32(maplen); 1441 1442 txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len); 1443 1444 cbar_val = lower_32_bits(buff->desc_paddr) | 1; 1445 /* complete writes to RAM and DMA before hardware is informed */ 1446 wmb(); 1447 1448 slic_write(sdev, SLIC_REG_CBAR, cbar_val); 1449 1450 if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS) 1451 netif_stop_queue(dev); 1452 1453 return NETDEV_TX_OK; 1454 drop_skb: 1455 dev_kfree_skb_any(skb); 1456 1457 return NETDEV_TX_OK; 1458 } 1459 1460 static void slic_get_stats(struct net_device *dev, 1461 struct rtnl_link_stats64 *lst) 1462 { 1463 struct slic_device *sdev = netdev_priv(dev); 1464 struct slic_stats *stats = &sdev->stats; 1465 1466 SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets); 1467 SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets); 1468 SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes); 1469 SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes); 1470 SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors); 1471 SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss); 1472 SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped); 1473 SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts); 1474 SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow); 1475 SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc); 1476 SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802); 1477 SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier); 1478 } 1479 1480 static int slic_get_sset_count(struct net_device *dev, int sset) 1481 { 1482 switch (sset) { 1483 case ETH_SS_STATS: 1484 return ARRAY_SIZE(slic_stats_strings); 1485 default: 1486 return -EOPNOTSUPP; 1487 } 1488 } 1489 1490 static void slic_get_ethtool_stats(struct net_device *dev, 1491 struct ethtool_stats *eth_stats, u64 *data) 1492 { 1493 struct slic_device *sdev = netdev_priv(dev); 1494 struct slic_stats *stats = &sdev->stats; 1495 1496 SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets); 1497 SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes); 1498 SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts); 1499 SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors); 1500 SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss); 1501 SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum); 1502 SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow); 1503 SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen); 1504 SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum); 1505 SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen); 1506 SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen); 1507 SLIC_GET_STATS_COUNTER(data[11], stats, rx_early); 1508 SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow); 1509 SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode); 1510 SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl); 1511 SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc); 1512 SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802); 1513 SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802); 1514 SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets); 1515 SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes); 1516 SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier); 1517 SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped); 1518 SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs); 1519 } 1520 1521 static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1522 { 1523 if (stringset == ETH_SS_STATS) { 1524 memcpy(data, slic_stats_strings, sizeof(slic_stats_strings)); 1525 data += sizeof(slic_stats_strings); 1526 } 1527 } 1528 1529 static void slic_get_drvinfo(struct net_device *dev, 1530 struct ethtool_drvinfo *info) 1531 { 1532 struct slic_device *sdev = netdev_priv(dev); 1533 1534 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1535 strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info)); 1536 } 1537 1538 static const struct ethtool_ops slic_ethtool_ops = { 1539 .get_drvinfo = slic_get_drvinfo, 1540 .get_link = ethtool_op_get_link, 1541 .get_strings = slic_get_strings, 1542 .get_ethtool_stats = slic_get_ethtool_stats, 1543 .get_sset_count = slic_get_sset_count, 1544 }; 1545 1546 static const struct net_device_ops slic_netdev_ops = { 1547 .ndo_open = slic_open, 1548 .ndo_stop = slic_close, 1549 .ndo_start_xmit = slic_xmit, 1550 .ndo_set_mac_address = eth_mac_addr, 1551 .ndo_get_stats64 = slic_get_stats, 1552 .ndo_set_rx_mode = slic_set_rx_mode, 1553 .ndo_validate_addr = eth_validate_addr, 1554 }; 1555 1556 static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len) 1557 { 1558 unsigned char *ptr = eeprom; 1559 u32 csum = 0; 1560 __le16 data; 1561 1562 while (len > 1) { 1563 memcpy(&data, ptr, sizeof(data)); 1564 csum += le16_to_cpu(data); 1565 ptr += 2; 1566 len -= 2; 1567 } 1568 if (len > 0) 1569 csum += *(u8 *)ptr; 1570 while (csum >> 16) 1571 csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF); 1572 return ~csum; 1573 } 1574 1575 /* check eeprom size, magic and checksum */ 1576 static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size) 1577 { 1578 const unsigned int MAX_SIZE = 128; 1579 const unsigned int MIN_SIZE = 98; 1580 __le16 magic; 1581 __le16 csum; 1582 1583 if (size < MIN_SIZE || size > MAX_SIZE) 1584 return false; 1585 memcpy(&magic, eeprom, sizeof(magic)); 1586 if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC) 1587 return false; 1588 /* cut checksum bytes */ 1589 size -= 2; 1590 memcpy(&csum, eeprom + size, sizeof(csum)); 1591 1592 return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size)); 1593 } 1594 1595 static int slic_read_eeprom(struct slic_device *sdev) 1596 { 1597 unsigned int devfn = PCI_FUNC(sdev->pdev->devfn); 1598 struct slic_shmem *sm = &sdev->shmem; 1599 struct slic_shmem_data *sm_data = sm->shmem_data; 1600 const unsigned int MAX_LOOPS = 5000; 1601 unsigned int codesize; 1602 unsigned char *eeprom; 1603 struct slic_upr *upr; 1604 unsigned int i = 0; 1605 dma_addr_t paddr; 1606 int err = 0; 1607 u8 *mac[2]; 1608 1609 eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1610 &paddr, GFP_KERNEL); 1611 if (!eeprom) 1612 return -ENOMEM; 1613 1614 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1615 /* setup ISP temporarily */ 1616 slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr)); 1617 1618 err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr); 1619 if (!err) { 1620 for (i = 0; i < MAX_LOOPS; i++) { 1621 if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC) 1622 break; 1623 mdelay(1); 1624 } 1625 if (i == MAX_LOOPS) { 1626 dev_err(&sdev->pdev->dev, 1627 "timed out while waiting for eeprom data\n"); 1628 err = -ETIMEDOUT; 1629 } 1630 upr = slic_dequeue_upr(sdev); 1631 kfree(upr); 1632 } 1633 1634 slic_write(sdev, SLIC_REG_ISP, 0); 1635 slic_write(sdev, SLIC_REG_ISR, 0); 1636 slic_flush_write(sdev); 1637 1638 if (err) 1639 goto free_eeprom; 1640 1641 if (sdev->model == SLIC_MODEL_OASIS) { 1642 struct slic_oasis_eeprom *oee; 1643 1644 oee = (struct slic_oasis_eeprom *)eeprom; 1645 mac[0] = oee->mac; 1646 mac[1] = oee->mac2; 1647 codesize = le16_to_cpu(oee->eeprom_code_size); 1648 } else { 1649 struct slic_mojave_eeprom *mee; 1650 1651 mee = (struct slic_mojave_eeprom *)eeprom; 1652 mac[0] = mee->mac; 1653 mac[1] = mee->mac2; 1654 codesize = le16_to_cpu(mee->eeprom_code_size); 1655 } 1656 1657 if (!slic_eeprom_valid(eeprom, codesize)) { 1658 dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n"); 1659 err = -EINVAL; 1660 goto free_eeprom; 1661 } 1662 /* set mac address */ 1663 eth_hw_addr_set(sdev->netdev, mac[devfn]); 1664 free_eeprom: 1665 dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr); 1666 1667 return err; 1668 } 1669 1670 static int slic_init(struct slic_device *sdev) 1671 { 1672 int err; 1673 1674 spin_lock_init(&sdev->upper_lock); 1675 spin_lock_init(&sdev->link_lock); 1676 INIT_LIST_HEAD(&sdev->upr_list.list); 1677 spin_lock_init(&sdev->upr_list.lock); 1678 u64_stats_init(&sdev->stats.syncp); 1679 1680 slic_card_reset(sdev); 1681 1682 err = slic_load_firmware(sdev); 1683 if (err) { 1684 dev_err(&sdev->pdev->dev, "failed to load firmware\n"); 1685 return err; 1686 } 1687 1688 /* we need the shared memory to read EEPROM so set it up temporarily */ 1689 err = slic_init_shmem(sdev); 1690 if (err) { 1691 dev_err(&sdev->pdev->dev, "failed to init shared memory\n"); 1692 return err; 1693 } 1694 1695 err = slic_read_eeprom(sdev); 1696 if (err) { 1697 dev_err(&sdev->pdev->dev, "failed to read eeprom\n"); 1698 goto free_sm; 1699 } 1700 1701 slic_card_reset(sdev); 1702 slic_free_shmem(sdev); 1703 1704 return 0; 1705 free_sm: 1706 slic_free_shmem(sdev); 1707 1708 return err; 1709 } 1710 1711 static bool slic_is_fiber(unsigned short subdev) 1712 { 1713 switch (subdev) { 1714 /* Mojave */ 1715 case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: 1716 case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough; 1717 /* Oasis */ 1718 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: 1719 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: 1720 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: 1721 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: 1722 return true; 1723 } 1724 return false; 1725 } 1726 1727 static void slic_configure_pci(struct pci_dev *pdev) 1728 { 1729 u16 old; 1730 u16 cmd; 1731 1732 pci_read_config_word(pdev, PCI_COMMAND, &old); 1733 1734 cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 1735 if (old != cmd) 1736 pci_write_config_word(pdev, PCI_COMMAND, cmd); 1737 } 1738 1739 static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1740 { 1741 struct slic_device *sdev; 1742 struct net_device *dev; 1743 int err; 1744 1745 err = pci_enable_device(pdev); 1746 if (err) { 1747 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1748 return err; 1749 } 1750 1751 pci_set_master(pdev); 1752 pci_try_set_mwi(pdev); 1753 1754 slic_configure_pci(pdev); 1755 1756 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1757 if (err) { 1758 dev_err(&pdev->dev, "failed to setup DMA\n"); 1759 goto disable; 1760 } 1761 1762 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1763 1764 err = pci_request_regions(pdev, DRV_NAME); 1765 if (err) { 1766 dev_err(&pdev->dev, "failed to obtain PCI regions\n"); 1767 goto disable; 1768 } 1769 1770 dev = alloc_etherdev(sizeof(*sdev)); 1771 if (!dev) { 1772 dev_err(&pdev->dev, "failed to alloc ethernet device\n"); 1773 err = -ENOMEM; 1774 goto free_regions; 1775 } 1776 1777 SET_NETDEV_DEV(dev, &pdev->dev); 1778 pci_set_drvdata(pdev, dev); 1779 dev->irq = pdev->irq; 1780 dev->netdev_ops = &slic_netdev_ops; 1781 dev->hw_features = NETIF_F_RXCSUM; 1782 dev->features |= dev->hw_features; 1783 1784 dev->ethtool_ops = &slic_ethtool_ops; 1785 1786 sdev = netdev_priv(dev); 1787 sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ? 1788 SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE; 1789 sdev->is_fiber = slic_is_fiber(pdev->subsystem_device); 1790 sdev->pdev = pdev; 1791 sdev->netdev = dev; 1792 sdev->regs = ioremap(pci_resource_start(pdev, 0), 1793 pci_resource_len(pdev, 0)); 1794 if (!sdev->regs) { 1795 dev_err(&pdev->dev, "failed to map registers\n"); 1796 err = -ENOMEM; 1797 goto free_netdev; 1798 } 1799 1800 err = slic_init(sdev); 1801 if (err) { 1802 dev_err(&pdev->dev, "failed to initialize driver\n"); 1803 goto unmap; 1804 } 1805 1806 netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT); 1807 netif_carrier_off(dev); 1808 1809 err = register_netdev(dev); 1810 if (err) { 1811 dev_err(&pdev->dev, "failed to register net device: %i\n", err); 1812 goto unmap; 1813 } 1814 1815 return 0; 1816 1817 unmap: 1818 iounmap(sdev->regs); 1819 free_netdev: 1820 free_netdev(dev); 1821 free_regions: 1822 pci_release_regions(pdev); 1823 disable: 1824 pci_disable_device(pdev); 1825 1826 return err; 1827 } 1828 1829 static void slic_remove(struct pci_dev *pdev) 1830 { 1831 struct net_device *dev = pci_get_drvdata(pdev); 1832 struct slic_device *sdev = netdev_priv(dev); 1833 1834 unregister_netdev(dev); 1835 iounmap(sdev->regs); 1836 free_netdev(dev); 1837 pci_release_regions(pdev); 1838 pci_disable_device(pdev); 1839 } 1840 1841 static struct pci_driver slic_driver = { 1842 .name = DRV_NAME, 1843 .id_table = slic_id_tbl, 1844 .probe = slic_probe, 1845 .remove = slic_remove, 1846 }; 1847 1848 module_pci_driver(slic_driver); 1849 1850 MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver"); 1851 MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>"); 1852 MODULE_LICENSE("GPL"); 1853