1 /* 2 * Driver for Gigabit Ethernet adapters based on the Session Layer 3 * Interface (SLIC) technology by Alacritech. The driver does not 4 * support the hardware acceleration features provided by these cards. 5 * 6 * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/ethtool.h> 28 #include <linux/mii.h> 29 #include <linux/interrupt.h> 30 #include <linux/delay.h> 31 #include <linux/firmware.h> 32 #include <linux/list.h> 33 #include <linux/u64_stats_sync.h> 34 35 #include "slic.h" 36 37 #define DRV_NAME "slicoss" 38 #define DRV_VERSION "1.0" 39 40 static const struct pci_device_id slic_id_tbl[] = { 41 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, 42 PCI_DEVICE_ID_ALACRITECH_MOJAVE) }, 43 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, 44 PCI_DEVICE_ID_ALACRITECH_OASIS) }, 45 { 0 } 46 }; 47 48 static const char slic_stats_strings[][ETH_GSTRING_LEN] = { 49 "rx_packets", 50 "rx_bytes", 51 "rx_multicasts", 52 "rx_errors", 53 "rx_buff_miss", 54 "rx_tp_csum", 55 "rx_tp_oflow", 56 "rx_tp_hlen", 57 "rx_ip_csum", 58 "rx_ip_len", 59 "rx_ip_hdr_len", 60 "rx_early", 61 "rx_buff_oflow", 62 "rx_lcode", 63 "rx_drbl", 64 "rx_crc", 65 "rx_oflow_802", 66 "rx_uflow_802", 67 "tx_packets", 68 "tx_bytes", 69 "tx_carrier", 70 "tx_dropped", 71 "irq_errs", 72 }; 73 74 static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen) 75 { 76 return (idx + 1) & (qlen - 1); 77 } 78 79 static inline int slic_get_free_queue_descs(unsigned int put_idx, 80 unsigned int done_idx, 81 unsigned int qlen) 82 { 83 if (put_idx >= done_idx) 84 return (qlen - (put_idx - done_idx) - 1); 85 return (done_idx - put_idx - 1); 86 } 87 88 static unsigned int slic_next_compl_idx(struct slic_device *sdev) 89 { 90 struct slic_stat_queue *stq = &sdev->stq; 91 unsigned int active = stq->active_array; 92 struct slic_stat_desc *descs; 93 struct slic_stat_desc *stat; 94 unsigned int idx; 95 96 descs = stq->descs[active]; 97 stat = &descs[stq->done_idx]; 98 99 if (!stat->status) 100 return SLIC_INVALID_STAT_DESC_IDX; 101 102 idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1; 103 /* reset desc */ 104 stat->hnd = 0; 105 stat->status = 0; 106 107 stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len); 108 /* check for wraparound */ 109 if (!stq->done_idx) { 110 dma_addr_t paddr = stq->paddr[active]; 111 112 slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) | 113 stq->len); 114 /* make sure new status descriptors are immediately available */ 115 slic_flush_write(sdev); 116 active++; 117 active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1); 118 stq->active_array = active; 119 } 120 return idx; 121 } 122 123 static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq) 124 { 125 /* ensure tail idx is updated */ 126 smp_mb(); 127 return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len); 128 } 129 130 static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq) 131 { 132 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); 133 } 134 135 static void slic_clear_upr_list(struct slic_upr_list *upr_list) 136 { 137 struct slic_upr *upr; 138 struct slic_upr *tmp; 139 140 spin_lock_bh(&upr_list->lock); 141 list_for_each_entry_safe(upr, tmp, &upr_list->list, list) { 142 list_del(&upr->list); 143 kfree(upr); 144 } 145 upr_list->pending = false; 146 spin_unlock_bh(&upr_list->lock); 147 } 148 149 static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr) 150 { 151 u32 reg; 152 153 reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG : 154 SLIC_REG_LSTAT; 155 slic_write(sdev, reg, lower_32_bits(upr->paddr)); 156 slic_flush_write(sdev); 157 } 158 159 static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr) 160 { 161 struct slic_upr_list *upr_list = &sdev->upr_list; 162 bool pending; 163 164 spin_lock_bh(&upr_list->lock); 165 pending = upr_list->pending; 166 INIT_LIST_HEAD(&upr->list); 167 list_add_tail(&upr->list, &upr_list->list); 168 upr_list->pending = true; 169 spin_unlock_bh(&upr_list->lock); 170 171 if (!pending) 172 slic_start_upr(sdev, upr); 173 } 174 175 static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev) 176 { 177 struct slic_upr_list *upr_list = &sdev->upr_list; 178 struct slic_upr *next_upr = NULL; 179 struct slic_upr *upr = NULL; 180 181 spin_lock_bh(&upr_list->lock); 182 if (!list_empty(&upr_list->list)) { 183 upr = list_first_entry(&upr_list->list, struct slic_upr, list); 184 list_del(&upr->list); 185 186 if (list_empty(&upr_list->list)) 187 upr_list->pending = false; 188 else 189 next_upr = list_first_entry(&upr_list->list, 190 struct slic_upr, list); 191 } 192 spin_unlock_bh(&upr_list->lock); 193 /* trigger processing of the next upr in list */ 194 if (next_upr) 195 slic_start_upr(sdev, next_upr); 196 197 return upr; 198 } 199 200 static int slic_new_upr(struct slic_device *sdev, unsigned int type, 201 dma_addr_t paddr) 202 { 203 struct slic_upr *upr; 204 205 upr = kmalloc(sizeof(*upr), GFP_ATOMIC); 206 if (!upr) 207 return -ENOMEM; 208 upr->type = type; 209 upr->paddr = paddr; 210 211 slic_queue_upr(sdev, upr); 212 213 return 0; 214 } 215 216 static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr) 217 { 218 u64 mask = *mcmask; 219 u8 crc; 220 /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb), 221 * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically 222 * discarded. 223 */ 224 crc = ether_crc(ETH_ALEN, addr) >> 23; 225 /* we only have space on the SLIC for 64 entries */ 226 crc &= 0x3F; 227 mask |= (u64)1 << crc; 228 *mcmask = mask; 229 } 230 231 /* must be called with link_lock held */ 232 static void slic_configure_rcv(struct slic_device *sdev) 233 { 234 u32 val; 235 236 val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN | 237 SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD; 238 239 if (sdev->duplex == DUPLEX_FULL) 240 val |= SLIC_GRCR_CTLEN; 241 242 if (sdev->promisc) 243 val |= SLIC_GRCR_RCVALL; 244 245 slic_write(sdev, SLIC_REG_WRCFG, val); 246 } 247 248 /* must be called with link_lock held */ 249 static void slic_configure_xmt(struct slic_device *sdev) 250 { 251 u32 val; 252 253 val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN; 254 255 if (sdev->duplex == DUPLEX_FULL) 256 val |= SLIC_GXCR_PAUSEEN; 257 258 slic_write(sdev, SLIC_REG_WXCFG, val); 259 } 260 261 /* must be called with link_lock held */ 262 static void slic_configure_mac(struct slic_device *sdev) 263 { 264 u32 val; 265 266 if (sdev->speed == SPEED_1000) { 267 val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT | 268 SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT | 269 SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT | 270 SLIC_GMCR_GBIT; /* enable GMII */ 271 } else { 272 val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT | 273 SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT | 274 SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT; 275 } 276 277 if (sdev->duplex == DUPLEX_FULL) 278 val |= SLIC_GMCR_FULLD; 279 280 slic_write(sdev, SLIC_REG_WMCFG, val); 281 } 282 283 static void slic_configure_link_locked(struct slic_device *sdev, int speed, 284 unsigned int duplex) 285 { 286 struct net_device *dev = sdev->netdev; 287 288 if (sdev->speed == speed && sdev->duplex == duplex) 289 return; 290 291 sdev->speed = speed; 292 sdev->duplex = duplex; 293 294 if (sdev->speed == SPEED_UNKNOWN) { 295 if (netif_carrier_ok(dev)) 296 netif_carrier_off(dev); 297 } else { 298 /* (re)configure link settings */ 299 slic_configure_mac(sdev); 300 slic_configure_xmt(sdev); 301 slic_configure_rcv(sdev); 302 slic_flush_write(sdev); 303 304 if (!netif_carrier_ok(dev)) 305 netif_carrier_on(dev); 306 } 307 } 308 309 static void slic_configure_link(struct slic_device *sdev, int speed, 310 unsigned int duplex) 311 { 312 spin_lock_bh(&sdev->link_lock); 313 slic_configure_link_locked(sdev, speed, duplex); 314 spin_unlock_bh(&sdev->link_lock); 315 } 316 317 static void slic_set_rx_mode(struct net_device *dev) 318 { 319 struct slic_device *sdev = netdev_priv(dev); 320 struct netdev_hw_addr *hwaddr; 321 bool set_promisc; 322 u64 mcmask; 323 324 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 325 /* Turn on all multicast addresses. We have to do this for 326 * promiscuous mode as well as ALLMCAST mode (it saves the 327 * microcode from having to keep state about the MAC 328 * configuration). 329 */ 330 mcmask = ~(u64)0; 331 } else { 332 mcmask = 0; 333 334 netdev_for_each_mc_addr(hwaddr, dev) { 335 slic_set_mcast_bit(&mcmask, hwaddr->addr); 336 } 337 } 338 339 slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask)); 340 slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask)); 341 342 set_promisc = !!(dev->flags & IFF_PROMISC); 343 344 spin_lock_bh(&sdev->link_lock); 345 if (sdev->promisc != set_promisc) { 346 sdev->promisc = set_promisc; 347 slic_configure_rcv(sdev); 348 } 349 spin_unlock_bh(&sdev->link_lock); 350 } 351 352 static void slic_xmit_complete(struct slic_device *sdev) 353 { 354 struct slic_tx_queue *txq = &sdev->txq; 355 struct net_device *dev = sdev->netdev; 356 struct slic_tx_buffer *buff; 357 unsigned int frames = 0; 358 unsigned int bytes = 0; 359 unsigned int idx; 360 361 /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new 362 * completions during processing keeps the loop running endlessly. 363 */ 364 do { 365 idx = slic_next_compl_idx(sdev); 366 if (idx == SLIC_INVALID_STAT_DESC_IDX) 367 break; 368 369 txq->done_idx = idx; 370 buff = &txq->txbuffs[idx]; 371 372 if (unlikely(!buff->skb)) { 373 netdev_warn(dev, 374 "no skb found for desc idx %i\n", idx); 375 continue; 376 } 377 dma_unmap_single(&sdev->pdev->dev, 378 dma_unmap_addr(buff, map_addr), 379 dma_unmap_len(buff, map_len), DMA_TO_DEVICE); 380 381 bytes += buff->skb->len; 382 frames++; 383 384 dev_kfree_skb_any(buff->skb); 385 buff->skb = NULL; 386 } while (frames < SLIC_MAX_TX_COMPLETIONS); 387 /* make sure xmit sees the new value for done_idx */ 388 smp_wmb(); 389 390 u64_stats_update_begin(&sdev->stats.syncp); 391 sdev->stats.tx_bytes += bytes; 392 sdev->stats.tx_packets += frames; 393 u64_stats_update_end(&sdev->stats.syncp); 394 395 netif_tx_lock(dev); 396 if (netif_queue_stopped(dev) && 397 (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS)) 398 netif_wake_queue(dev); 399 netif_tx_unlock(dev); 400 } 401 402 static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp) 403 { 404 const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1; 405 unsigned int maplen = SLIC_RX_BUFF_SIZE; 406 struct slic_rx_queue *rxq = &sdev->rxq; 407 struct net_device *dev = sdev->netdev; 408 struct slic_rx_buffer *buff; 409 struct slic_rx_desc *desc; 410 unsigned int misalign; 411 unsigned int offset; 412 struct sk_buff *skb; 413 dma_addr_t paddr; 414 415 while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) { 416 skb = alloc_skb(maplen + ALIGN_MASK, gfp); 417 if (!skb) 418 break; 419 420 paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen, 421 DMA_FROM_DEVICE); 422 if (dma_mapping_error(&sdev->pdev->dev, paddr)) { 423 netdev_err(dev, "mapping rx packet failed\n"); 424 /* drop skb */ 425 dev_kfree_skb_any(skb); 426 break; 427 } 428 /* ensure head buffer descriptors are 256 byte aligned */ 429 offset = 0; 430 misalign = paddr & ALIGN_MASK; 431 if (misalign) { 432 offset = SLIC_RX_BUFF_ALIGN - misalign; 433 skb_reserve(skb, offset); 434 } 435 /* the HW expects dma chunks for descriptor + frame data */ 436 desc = (struct slic_rx_desc *)skb->data; 437 /* temporarily sync descriptor for CPU to clear status */ 438 dma_sync_single_for_cpu(&sdev->pdev->dev, paddr, 439 offset + sizeof(*desc), 440 DMA_FROM_DEVICE); 441 desc->status = 0; 442 /* return it to HW again */ 443 dma_sync_single_for_device(&sdev->pdev->dev, paddr, 444 offset + sizeof(*desc), 445 DMA_FROM_DEVICE); 446 447 buff = &rxq->rxbuffs[rxq->put_idx]; 448 buff->skb = skb; 449 dma_unmap_addr_set(buff, map_addr, paddr); 450 dma_unmap_len_set(buff, map_len, maplen); 451 buff->addr_offset = offset; 452 /* complete write to descriptor before it is handed to HW */ 453 wmb(); 454 /* head buffer descriptors are placed immediately before skb */ 455 slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset); 456 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); 457 } 458 } 459 460 static void slic_handle_frame_error(struct slic_device *sdev, 461 struct sk_buff *skb) 462 { 463 struct slic_stats *stats = &sdev->stats; 464 465 if (sdev->model == SLIC_MODEL_OASIS) { 466 struct slic_rx_info_oasis *info; 467 u32 status_b; 468 u32 status; 469 470 info = (struct slic_rx_info_oasis *)skb->data; 471 status = le32_to_cpu(info->frame_status); 472 status_b = le32_to_cpu(info->frame_status_b); 473 /* transport layer */ 474 if (status_b & SLIC_VRHSTATB_TPCSUM) 475 SLIC_INC_STATS_COUNTER(stats, rx_tpcsum); 476 if (status & SLIC_VRHSTAT_TPOFLO) 477 SLIC_INC_STATS_COUNTER(stats, rx_tpoflow); 478 if (status_b & SLIC_VRHSTATB_TPHLEN) 479 SLIC_INC_STATS_COUNTER(stats, rx_tphlen); 480 /* ip layer */ 481 if (status_b & SLIC_VRHSTATB_IPCSUM) 482 SLIC_INC_STATS_COUNTER(stats, rx_ipcsum); 483 if (status_b & SLIC_VRHSTATB_IPLERR) 484 SLIC_INC_STATS_COUNTER(stats, rx_iplen); 485 if (status_b & SLIC_VRHSTATB_IPHERR) 486 SLIC_INC_STATS_COUNTER(stats, rx_iphlen); 487 /* link layer */ 488 if (status_b & SLIC_VRHSTATB_RCVE) 489 SLIC_INC_STATS_COUNTER(stats, rx_early); 490 if (status_b & SLIC_VRHSTATB_BUFF) 491 SLIC_INC_STATS_COUNTER(stats, rx_buffoflow); 492 if (status_b & SLIC_VRHSTATB_CODE) 493 SLIC_INC_STATS_COUNTER(stats, rx_lcode); 494 if (status_b & SLIC_VRHSTATB_DRBL) 495 SLIC_INC_STATS_COUNTER(stats, rx_drbl); 496 if (status_b & SLIC_VRHSTATB_CRC) 497 SLIC_INC_STATS_COUNTER(stats, rx_crc); 498 if (status & SLIC_VRHSTAT_802OE) 499 SLIC_INC_STATS_COUNTER(stats, rx_oflow802); 500 if (status_b & SLIC_VRHSTATB_802UE) 501 SLIC_INC_STATS_COUNTER(stats, rx_uflow802); 502 if (status_b & SLIC_VRHSTATB_CARRE) 503 SLIC_INC_STATS_COUNTER(stats, tx_carrier); 504 } else { /* mojave */ 505 struct slic_rx_info_mojave *info; 506 u32 status; 507 508 info = (struct slic_rx_info_mojave *)skb->data; 509 status = le32_to_cpu(info->frame_status); 510 /* transport layer */ 511 if (status & SLIC_VGBSTAT_XPERR) { 512 u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT; 513 514 if (xerr == SLIC_VGBSTAT_XCSERR) 515 SLIC_INC_STATS_COUNTER(stats, rx_tpcsum); 516 if (xerr == SLIC_VGBSTAT_XUFLOW) 517 SLIC_INC_STATS_COUNTER(stats, rx_tpoflow); 518 if (xerr == SLIC_VGBSTAT_XHLEN) 519 SLIC_INC_STATS_COUNTER(stats, rx_tphlen); 520 } 521 /* ip layer */ 522 if (status & SLIC_VGBSTAT_NETERR) { 523 u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT & 524 SLIC_VGBSTAT_NERRMSK; 525 526 if (nerr == SLIC_VGBSTAT_NCSERR) 527 SLIC_INC_STATS_COUNTER(stats, rx_ipcsum); 528 if (nerr == SLIC_VGBSTAT_NUFLOW) 529 SLIC_INC_STATS_COUNTER(stats, rx_iplen); 530 if (nerr == SLIC_VGBSTAT_NHLEN) 531 SLIC_INC_STATS_COUNTER(stats, rx_iphlen); 532 } 533 /* link layer */ 534 if (status & SLIC_VGBSTAT_LNKERR) { 535 u32 lerr = status & SLIC_VGBSTAT_LERRMSK; 536 537 if (lerr == SLIC_VGBSTAT_LDEARLY) 538 SLIC_INC_STATS_COUNTER(stats, rx_early); 539 if (lerr == SLIC_VGBSTAT_LBOFLO) 540 SLIC_INC_STATS_COUNTER(stats, rx_buffoflow); 541 if (lerr == SLIC_VGBSTAT_LCODERR) 542 SLIC_INC_STATS_COUNTER(stats, rx_lcode); 543 if (lerr == SLIC_VGBSTAT_LDBLNBL) 544 SLIC_INC_STATS_COUNTER(stats, rx_drbl); 545 if (lerr == SLIC_VGBSTAT_LCRCERR) 546 SLIC_INC_STATS_COUNTER(stats, rx_crc); 547 if (lerr == SLIC_VGBSTAT_LOFLO) 548 SLIC_INC_STATS_COUNTER(stats, rx_oflow802); 549 if (lerr == SLIC_VGBSTAT_LUFLO) 550 SLIC_INC_STATS_COUNTER(stats, rx_uflow802); 551 } 552 } 553 SLIC_INC_STATS_COUNTER(stats, rx_errors); 554 } 555 556 static void slic_handle_receive(struct slic_device *sdev, unsigned int todo, 557 unsigned int *done) 558 { 559 struct slic_rx_queue *rxq = &sdev->rxq; 560 struct net_device *dev = sdev->netdev; 561 struct slic_rx_buffer *buff; 562 struct slic_rx_desc *desc; 563 unsigned int frames = 0; 564 unsigned int bytes = 0; 565 struct sk_buff *skb; 566 u32 status; 567 u32 len; 568 569 while (todo && (rxq->done_idx != rxq->put_idx)) { 570 buff = &rxq->rxbuffs[rxq->done_idx]; 571 572 skb = buff->skb; 573 if (!skb) 574 break; 575 576 desc = (struct slic_rx_desc *)skb->data; 577 578 dma_sync_single_for_cpu(&sdev->pdev->dev, 579 dma_unmap_addr(buff, map_addr), 580 buff->addr_offset + sizeof(*desc), 581 DMA_FROM_DEVICE); 582 583 status = le32_to_cpu(desc->status); 584 if (!(status & SLIC_IRHDDR_SVALID)) { 585 dma_sync_single_for_device(&sdev->pdev->dev, 586 dma_unmap_addr(buff, 587 map_addr), 588 buff->addr_offset + 589 sizeof(*desc), 590 DMA_FROM_DEVICE); 591 break; 592 } 593 594 buff->skb = NULL; 595 596 dma_unmap_single(&sdev->pdev->dev, 597 dma_unmap_addr(buff, map_addr), 598 dma_unmap_len(buff, map_len), 599 DMA_FROM_DEVICE); 600 601 /* skip rx descriptor that is placed before the frame data */ 602 skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE); 603 604 if (unlikely(status & SLIC_IRHDDR_ERR)) { 605 slic_handle_frame_error(sdev, skb); 606 dev_kfree_skb_any(skb); 607 } else { 608 struct ethhdr *eh = (struct ethhdr *)skb->data; 609 610 if (is_multicast_ether_addr(eh->h_dest)) 611 SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts); 612 613 len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK; 614 skb_put(skb, len); 615 skb->protocol = eth_type_trans(skb, dev); 616 skb->ip_summed = CHECKSUM_UNNECESSARY; 617 618 napi_gro_receive(&sdev->napi, skb); 619 620 bytes += len; 621 frames++; 622 } 623 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); 624 todo--; 625 } 626 627 u64_stats_update_begin(&sdev->stats.syncp); 628 sdev->stats.rx_bytes += bytes; 629 sdev->stats.rx_packets += frames; 630 u64_stats_update_end(&sdev->stats.syncp); 631 632 slic_refill_rx_queue(sdev, GFP_ATOMIC); 633 } 634 635 static void slic_handle_link_irq(struct slic_device *sdev) 636 { 637 struct slic_shmem *sm = &sdev->shmem; 638 struct slic_shmem_data *sm_data = sm->shmem_data; 639 unsigned int duplex; 640 int speed; 641 u32 link; 642 643 link = le32_to_cpu(sm_data->link); 644 645 if (link & SLIC_GIG_LINKUP) { 646 if (link & SLIC_GIG_SPEED_1000) 647 speed = SPEED_1000; 648 else if (link & SLIC_GIG_SPEED_100) 649 speed = SPEED_100; 650 else 651 speed = SPEED_10; 652 653 duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL : 654 DUPLEX_HALF; 655 } else { 656 duplex = DUPLEX_UNKNOWN; 657 speed = SPEED_UNKNOWN; 658 } 659 slic_configure_link(sdev, speed, duplex); 660 } 661 662 static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs) 663 { 664 struct slic_upr *upr; 665 666 /* remove upr that caused this irq (always the first entry in list) */ 667 upr = slic_dequeue_upr(sdev); 668 if (!upr) { 669 netdev_warn(sdev->netdev, "no upr found on list\n"); 670 return; 671 } 672 673 if (upr->type == SLIC_UPR_LSTAT) { 674 if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) { 675 /* try again */ 676 slic_queue_upr(sdev, upr); 677 return; 678 } 679 slic_handle_link_irq(sdev); 680 } 681 kfree(upr); 682 } 683 684 static int slic_handle_link_change(struct slic_device *sdev) 685 { 686 return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr); 687 } 688 689 static void slic_handle_err_irq(struct slic_device *sdev, u32 isr) 690 { 691 struct slic_stats *stats = &sdev->stats; 692 693 if (isr & SLIC_ISR_RMISS) 694 SLIC_INC_STATS_COUNTER(stats, rx_buff_miss); 695 if (isr & SLIC_ISR_XDROP) 696 SLIC_INC_STATS_COUNTER(stats, tx_dropped); 697 if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP))) 698 SLIC_INC_STATS_COUNTER(stats, irq_errs); 699 } 700 701 static void slic_handle_irq(struct slic_device *sdev, u32 isr, 702 unsigned int todo, unsigned int *done) 703 { 704 if (isr & SLIC_ISR_ERR) 705 slic_handle_err_irq(sdev, isr); 706 707 if (isr & SLIC_ISR_LEVENT) 708 slic_handle_link_change(sdev); 709 710 if (isr & SLIC_ISR_UPC_MASK) 711 slic_handle_upr_irq(sdev, isr); 712 713 if (isr & SLIC_ISR_RCV) 714 slic_handle_receive(sdev, todo, done); 715 716 if (isr & SLIC_ISR_CMD) 717 slic_xmit_complete(sdev); 718 } 719 720 static int slic_poll(struct napi_struct *napi, int todo) 721 { 722 struct slic_device *sdev = container_of(napi, struct slic_device, napi); 723 struct slic_shmem *sm = &sdev->shmem; 724 struct slic_shmem_data *sm_data = sm->shmem_data; 725 u32 isr = le32_to_cpu(sm_data->isr); 726 int done = 0; 727 728 slic_handle_irq(sdev, isr, todo, &done); 729 730 if (done < todo) { 731 napi_complete_done(napi, done); 732 /* reenable irqs */ 733 sm_data->isr = 0; 734 /* make sure sm_data->isr is cleard before irqs are reenabled */ 735 wmb(); 736 slic_write(sdev, SLIC_REG_ISR, 0); 737 slic_flush_write(sdev); 738 } 739 740 return done; 741 } 742 743 static irqreturn_t slic_irq(int irq, void *dev_id) 744 { 745 struct slic_device *sdev = dev_id; 746 struct slic_shmem *sm = &sdev->shmem; 747 struct slic_shmem_data *sm_data = sm->shmem_data; 748 749 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK); 750 slic_flush_write(sdev); 751 /* make sure sm_data->isr is read after ICR_INT_MASK is set */ 752 wmb(); 753 754 if (!sm_data->isr) { 755 dma_rmb(); 756 /* spurious interrupt */ 757 slic_write(sdev, SLIC_REG_ISR, 0); 758 slic_flush_write(sdev); 759 return IRQ_NONE; 760 } 761 762 napi_schedule_irqoff(&sdev->napi); 763 764 return IRQ_HANDLED; 765 } 766 767 static void slic_card_reset(struct slic_device *sdev) 768 { 769 u16 cmd; 770 771 slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC); 772 /* flush write by means of config space */ 773 pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd); 774 mdelay(1); 775 } 776 777 static int slic_init_stat_queue(struct slic_device *sdev) 778 { 779 const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1; 780 struct slic_stat_queue *stq = &sdev->stq; 781 struct slic_stat_desc *descs; 782 unsigned int misalign; 783 unsigned int offset; 784 dma_addr_t paddr; 785 size_t size; 786 int err; 787 int i; 788 789 stq->len = SLIC_NUM_STAT_DESCS; 790 stq->active_array = 0; 791 stq->done_idx = 0; 792 793 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 794 795 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 796 descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, 797 GFP_KERNEL); 798 if (!descs) { 799 netdev_err(sdev->netdev, 800 "failed to allocate status descriptors\n"); 801 err = -ENOMEM; 802 goto free_descs; 803 } 804 /* ensure correct alignment */ 805 offset = 0; 806 misalign = paddr & DESC_ALIGN_MASK; 807 if (misalign) { 808 offset = SLIC_STATS_DESC_ALIGN - misalign; 809 descs += offset; 810 paddr += offset; 811 } 812 813 slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) | 814 stq->len); 815 stq->descs[i] = descs; 816 stq->paddr[i] = paddr; 817 stq->addr_offset[i] = offset; 818 } 819 820 stq->mem_size = size; 821 822 return 0; 823 824 free_descs: 825 while (i--) { 826 dma_free_coherent(&sdev->pdev->dev, stq->mem_size, 827 stq->descs[i] - stq->addr_offset[i], 828 stq->paddr[i] - stq->addr_offset[i]); 829 } 830 831 return err; 832 } 833 834 static void slic_free_stat_queue(struct slic_device *sdev) 835 { 836 struct slic_stat_queue *stq = &sdev->stq; 837 int i; 838 839 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 840 dma_free_coherent(&sdev->pdev->dev, stq->mem_size, 841 stq->descs[i] - stq->addr_offset[i], 842 stq->paddr[i] - stq->addr_offset[i]); 843 } 844 } 845 846 static int slic_init_tx_queue(struct slic_device *sdev) 847 { 848 struct slic_tx_queue *txq = &sdev->txq; 849 struct slic_tx_buffer *buff; 850 struct slic_tx_desc *desc; 851 unsigned int i; 852 int err; 853 854 txq->len = SLIC_NUM_TX_DESCS; 855 txq->put_idx = 0; 856 txq->done_idx = 0; 857 858 txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL); 859 if (!txq->txbuffs) 860 return -ENOMEM; 861 862 txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev, 863 sizeof(*desc), SLIC_TX_DESC_ALIGN, 864 4096); 865 if (!txq->dma_pool) { 866 err = -ENOMEM; 867 netdev_err(sdev->netdev, "failed to create dma pool\n"); 868 goto free_buffs; 869 } 870 871 for (i = 0; i < txq->len; i++) { 872 buff = &txq->txbuffs[i]; 873 desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL, 874 &buff->desc_paddr); 875 if (!desc) { 876 netdev_err(sdev->netdev, 877 "failed to alloc pool chunk (%i)\n", i); 878 err = -ENOMEM; 879 goto free_descs; 880 } 881 882 desc->hnd = cpu_to_le32((u32)(i + 1)); 883 desc->cmd = SLIC_CMD_XMT_REQ; 884 desc->flags = 0; 885 desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB); 886 buff->desc = desc; 887 } 888 889 return 0; 890 891 free_descs: 892 while (i--) { 893 buff = &txq->txbuffs[i]; 894 dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr); 895 } 896 dma_pool_destroy(txq->dma_pool); 897 898 free_buffs: 899 kfree(txq->txbuffs); 900 901 return err; 902 } 903 904 static void slic_free_tx_queue(struct slic_device *sdev) 905 { 906 struct slic_tx_queue *txq = &sdev->txq; 907 struct slic_tx_buffer *buff; 908 unsigned int i; 909 910 for (i = 0; i < txq->len; i++) { 911 buff = &txq->txbuffs[i]; 912 dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr); 913 if (!buff->skb) 914 continue; 915 916 dma_unmap_single(&sdev->pdev->dev, 917 dma_unmap_addr(buff, map_addr), 918 dma_unmap_len(buff, map_len), DMA_TO_DEVICE); 919 consume_skb(buff->skb); 920 } 921 dma_pool_destroy(txq->dma_pool); 922 923 kfree(txq->txbuffs); 924 } 925 926 static int slic_init_rx_queue(struct slic_device *sdev) 927 { 928 struct slic_rx_queue *rxq = &sdev->rxq; 929 struct slic_rx_buffer *buff; 930 931 rxq->len = SLIC_NUM_RX_LES; 932 rxq->done_idx = 0; 933 rxq->put_idx = 0; 934 935 buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL); 936 if (!buff) 937 return -ENOMEM; 938 939 rxq->rxbuffs = buff; 940 slic_refill_rx_queue(sdev, GFP_KERNEL); 941 942 return 0; 943 } 944 945 static void slic_free_rx_queue(struct slic_device *sdev) 946 { 947 struct slic_rx_queue *rxq = &sdev->rxq; 948 struct slic_rx_buffer *buff; 949 unsigned int i; 950 951 /* free rx buffers */ 952 for (i = 0; i < rxq->len; i++) { 953 buff = &rxq->rxbuffs[i]; 954 955 if (!buff->skb) 956 continue; 957 958 dma_unmap_single(&sdev->pdev->dev, 959 dma_unmap_addr(buff, map_addr), 960 dma_unmap_len(buff, map_len), 961 DMA_FROM_DEVICE); 962 consume_skb(buff->skb); 963 } 964 kfree(rxq->rxbuffs); 965 } 966 967 static void slic_set_link_autoneg(struct slic_device *sdev) 968 { 969 unsigned int subid = sdev->pdev->subsystem_device; 970 u32 val; 971 972 if (sdev->is_fiber) { 973 /* We've got a fiber gigabit interface, and register 4 is 974 * different in fiber mode than in copper mode. 975 */ 976 /* advertise FD only @1000 Mb */ 977 val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL | 978 ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 979 /* enable PAUSE frames */ 980 slic_write(sdev, SLIC_REG_WPHY, val); 981 /* reset phy, enable auto-neg */ 982 val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE | 983 BMCR_ANRESTART; 984 slic_write(sdev, SLIC_REG_WPHY, val); 985 } else { /* copper gigabit */ 986 /* We've got a copper gigabit interface, and register 4 is 987 * different in copper mode than in fiber mode. 988 */ 989 /* advertise 10/100 Mb modes */ 990 val = MII_ADVERTISE << 16 | ADVERTISE_100FULL | 991 ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF; 992 /* enable PAUSE frames */ 993 val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 994 /* required by the Cicada PHY */ 995 val |= ADVERTISE_CSMA; 996 slic_write(sdev, SLIC_REG_WPHY, val); 997 998 /* advertise FD only @1000 Mb */ 999 val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL; 1000 slic_write(sdev, SLIC_REG_WPHY, val); 1001 1002 if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) { 1003 /* if a Marvell PHY enable auto crossover */ 1004 val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON; 1005 slic_write(sdev, SLIC_REG_WPHY, val); 1006 1007 /* reset phy, enable auto-neg */ 1008 val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE | 1009 BMCR_ANRESTART; 1010 slic_write(sdev, SLIC_REG_WPHY, val); 1011 } else { 1012 /* enable and restart auto-neg (don't reset) */ 1013 val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART; 1014 slic_write(sdev, SLIC_REG_WPHY, val); 1015 } 1016 } 1017 } 1018 1019 static void slic_set_mac_address(struct slic_device *sdev) 1020 { 1021 u8 *addr = sdev->netdev->dev_addr; 1022 u32 val; 1023 1024 val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24; 1025 1026 slic_write(sdev, SLIC_REG_WRADDRAL, val); 1027 slic_write(sdev, SLIC_REG_WRADDRBL, val); 1028 1029 val = addr[0] << 8 | addr[1]; 1030 1031 slic_write(sdev, SLIC_REG_WRADDRAH, val); 1032 slic_write(sdev, SLIC_REG_WRADDRBH, val); 1033 slic_flush_write(sdev); 1034 } 1035 1036 static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset) 1037 { 1038 int idx = *offset; 1039 __le32 val; 1040 1041 memcpy(&val, fw->data + *offset, sizeof(val)); 1042 idx += 4; 1043 *offset = idx; 1044 1045 return le32_to_cpu(val); 1046 } 1047 1048 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE); 1049 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS); 1050 1051 static int slic_load_rcvseq_firmware(struct slic_device *sdev) 1052 { 1053 const struct firmware *fw; 1054 const char *file; 1055 u32 codelen; 1056 int idx = 0; 1057 u32 instr; 1058 u32 addr; 1059 int err; 1060 1061 file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS : 1062 SLIC_RCV_FIRMWARE_MOJAVE; 1063 err = request_firmware(&fw, file, &sdev->pdev->dev); 1064 if (err) { 1065 dev_err(&sdev->pdev->dev, 1066 "failed to load receive sequencer firmware %s\n", file); 1067 return err; 1068 } 1069 /* Do an initial sanity check concerning firmware size now. A further 1070 * check follows below. 1071 */ 1072 if (fw->size < SLIC_FIRMWARE_MIN_SIZE) { 1073 dev_err(&sdev->pdev->dev, 1074 "invalid firmware size %zu (min %u expected)\n", 1075 fw->size, SLIC_FIRMWARE_MIN_SIZE); 1076 err = -EINVAL; 1077 goto release; 1078 } 1079 1080 codelen = slic_read_dword_from_firmware(fw, &idx); 1081 1082 /* do another sanity check against firmware size */ 1083 if ((codelen + 4) > fw->size) { 1084 dev_err(&sdev->pdev->dev, 1085 "invalid rcv-sequencer firmware size %zu\n", fw->size); 1086 err = -EINVAL; 1087 goto release; 1088 } 1089 1090 /* download sequencer code to card */ 1091 slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN); 1092 for (addr = 0; addr < codelen; addr++) { 1093 __le32 val; 1094 /* write out instruction address */ 1095 slic_write(sdev, SLIC_REG_RCV_WCS, addr); 1096 1097 instr = slic_read_dword_from_firmware(fw, &idx); 1098 /* write out the instruction data low addr */ 1099 slic_write(sdev, SLIC_REG_RCV_WCS, instr); 1100 1101 val = (__le32)fw->data[idx]; 1102 instr = le32_to_cpu(val); 1103 idx++; 1104 /* write out the instruction data high addr */ 1105 slic_write(sdev, SLIC_REG_RCV_WCS, instr); 1106 } 1107 /* finish download */ 1108 slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH); 1109 slic_flush_write(sdev); 1110 release: 1111 release_firmware(fw); 1112 1113 return err; 1114 } 1115 1116 MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE); 1117 MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS); 1118 1119 static int slic_load_firmware(struct slic_device *sdev) 1120 { 1121 u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS]; 1122 u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS]; 1123 const struct firmware *fw; 1124 unsigned int datalen; 1125 const char *file; 1126 int code_start; 1127 unsigned int i; 1128 u32 numsects; 1129 int idx = 0; 1130 u32 sect; 1131 u32 instr; 1132 u32 addr; 1133 u32 base; 1134 int err; 1135 1136 file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS : 1137 SLIC_FIRMWARE_MOJAVE; 1138 err = request_firmware(&fw, file, &sdev->pdev->dev); 1139 if (err) { 1140 dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file); 1141 return err; 1142 } 1143 /* Do an initial sanity check concerning firmware size now. A further 1144 * check follows below. 1145 */ 1146 if (fw->size < SLIC_FIRMWARE_MIN_SIZE) { 1147 dev_err(&sdev->pdev->dev, 1148 "invalid firmware size %zu (min is %u)\n", fw->size, 1149 SLIC_FIRMWARE_MIN_SIZE); 1150 err = -EINVAL; 1151 goto release; 1152 } 1153 1154 numsects = slic_read_dword_from_firmware(fw, &idx); 1155 if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) { 1156 dev_err(&sdev->pdev->dev, 1157 "invalid number of sections in firmware: %u", numsects); 1158 err = -EINVAL; 1159 goto release; 1160 } 1161 1162 datalen = numsects * 8 + 4; 1163 for (i = 0; i < numsects; i++) { 1164 sectsize[i] = slic_read_dword_from_firmware(fw, &idx); 1165 datalen += sectsize[i]; 1166 } 1167 1168 /* do another sanity check against firmware size */ 1169 if (datalen > fw->size) { 1170 dev_err(&sdev->pdev->dev, 1171 "invalid firmware size %zu (expected >= %u)\n", 1172 fw->size, datalen); 1173 err = -EINVAL; 1174 goto release; 1175 } 1176 /* get sections */ 1177 for (i = 0; i < numsects; i++) 1178 sectstart[i] = slic_read_dword_from_firmware(fw, &idx); 1179 1180 code_start = idx; 1181 instr = slic_read_dword_from_firmware(fw, &idx); 1182 1183 for (sect = 0; sect < numsects; sect++) { 1184 unsigned int ssize = sectsize[sect] >> 3; 1185 1186 base = sectstart[sect]; 1187 1188 for (addr = 0; addr < ssize; addr++) { 1189 /* write out instruction address */ 1190 slic_write(sdev, SLIC_REG_WCS, base + addr); 1191 /* write out instruction to low addr */ 1192 slic_write(sdev, SLIC_REG_WCS, instr); 1193 instr = slic_read_dword_from_firmware(fw, &idx); 1194 /* write out instruction to high addr */ 1195 slic_write(sdev, SLIC_REG_WCS, instr); 1196 instr = slic_read_dword_from_firmware(fw, &idx); 1197 } 1198 } 1199 1200 idx = code_start; 1201 1202 for (sect = 0; sect < numsects; sect++) { 1203 unsigned int ssize = sectsize[sect] >> 3; 1204 1205 instr = slic_read_dword_from_firmware(fw, &idx); 1206 base = sectstart[sect]; 1207 if (base < 0x8000) 1208 continue; 1209 1210 for (addr = 0; addr < ssize; addr++) { 1211 /* write out instruction address */ 1212 slic_write(sdev, SLIC_REG_WCS, 1213 SLIC_WCS_COMPARE | (base + addr)); 1214 /* write out instruction to low addr */ 1215 slic_write(sdev, SLIC_REG_WCS, instr); 1216 instr = slic_read_dword_from_firmware(fw, &idx); 1217 /* write out instruction to high addr */ 1218 slic_write(sdev, SLIC_REG_WCS, instr); 1219 instr = slic_read_dword_from_firmware(fw, &idx); 1220 } 1221 } 1222 slic_flush_write(sdev); 1223 mdelay(10); 1224 /* everything OK, kick off the card */ 1225 slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START); 1226 slic_flush_write(sdev); 1227 /* wait long enough for ucode to init card and reach the mainloop */ 1228 mdelay(20); 1229 release: 1230 release_firmware(fw); 1231 1232 return err; 1233 } 1234 1235 static int slic_init_shmem(struct slic_device *sdev) 1236 { 1237 struct slic_shmem *sm = &sdev->shmem; 1238 struct slic_shmem_data *sm_data; 1239 dma_addr_t paddr; 1240 1241 sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1242 &paddr, GFP_KERNEL); 1243 if (!sm_data) { 1244 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1245 return -ENOMEM; 1246 } 1247 1248 sm->shmem_data = sm_data; 1249 sm->isr_paddr = paddr; 1250 sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link); 1251 1252 return 0; 1253 } 1254 1255 static void slic_free_shmem(struct slic_device *sdev) 1256 { 1257 struct slic_shmem *sm = &sdev->shmem; 1258 struct slic_shmem_data *sm_data = sm->shmem_data; 1259 1260 dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data, 1261 sm->isr_paddr); 1262 } 1263 1264 static int slic_init_iface(struct slic_device *sdev) 1265 { 1266 struct slic_shmem *sm = &sdev->shmem; 1267 int err; 1268 1269 sdev->upr_list.pending = false; 1270 1271 err = slic_init_shmem(sdev); 1272 if (err) { 1273 netdev_err(sdev->netdev, "failed to init shared memory\n"); 1274 return err; 1275 } 1276 1277 err = slic_load_firmware(sdev); 1278 if (err) { 1279 netdev_err(sdev->netdev, "failed to load firmware\n"); 1280 goto free_sm; 1281 } 1282 1283 err = slic_load_rcvseq_firmware(sdev); 1284 if (err) { 1285 netdev_err(sdev->netdev, 1286 "failed to load firmware for receive sequencer\n"); 1287 goto free_sm; 1288 } 1289 1290 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1291 slic_flush_write(sdev); 1292 mdelay(1); 1293 1294 err = slic_init_rx_queue(sdev); 1295 if (err) { 1296 netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err); 1297 goto free_sm; 1298 } 1299 1300 err = slic_init_tx_queue(sdev); 1301 if (err) { 1302 netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err); 1303 goto free_rxq; 1304 } 1305 1306 err = slic_init_stat_queue(sdev); 1307 if (err) { 1308 netdev_err(sdev->netdev, "failed to init status queue: %u\n", 1309 err); 1310 goto free_txq; 1311 } 1312 1313 slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr)); 1314 napi_enable(&sdev->napi); 1315 /* disable irq mitigation */ 1316 slic_write(sdev, SLIC_REG_INTAGG, 0); 1317 slic_write(sdev, SLIC_REG_ISR, 0); 1318 slic_flush_write(sdev); 1319 1320 slic_set_mac_address(sdev); 1321 1322 spin_lock_bh(&sdev->link_lock); 1323 sdev->duplex = DUPLEX_UNKNOWN; 1324 sdev->speed = SPEED_UNKNOWN; 1325 spin_unlock_bh(&sdev->link_lock); 1326 1327 slic_set_link_autoneg(sdev); 1328 1329 err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME, 1330 sdev); 1331 if (err) { 1332 netdev_err(sdev->netdev, "failed to request irq: %u\n", err); 1333 goto disable_napi; 1334 } 1335 1336 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON); 1337 slic_flush_write(sdev); 1338 /* request initial link status */ 1339 err = slic_handle_link_change(sdev); 1340 if (err) 1341 netdev_warn(sdev->netdev, 1342 "failed to set initial link state: %u\n", err); 1343 return 0; 1344 1345 disable_napi: 1346 napi_disable(&sdev->napi); 1347 slic_free_stat_queue(sdev); 1348 free_txq: 1349 slic_free_tx_queue(sdev); 1350 free_rxq: 1351 slic_free_rx_queue(sdev); 1352 free_sm: 1353 slic_free_shmem(sdev); 1354 slic_card_reset(sdev); 1355 1356 return err; 1357 } 1358 1359 static int slic_open(struct net_device *dev) 1360 { 1361 struct slic_device *sdev = netdev_priv(dev); 1362 int err; 1363 1364 netif_carrier_off(dev); 1365 1366 err = slic_init_iface(sdev); 1367 if (err) { 1368 netdev_err(dev, "failed to initialize interface: %i\n", err); 1369 return err; 1370 } 1371 1372 netif_start_queue(dev); 1373 1374 return 0; 1375 } 1376 1377 static int slic_close(struct net_device *dev) 1378 { 1379 struct slic_device *sdev = netdev_priv(dev); 1380 u32 val; 1381 1382 netif_stop_queue(dev); 1383 1384 /* stop irq handling */ 1385 napi_disable(&sdev->napi); 1386 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1387 slic_write(sdev, SLIC_REG_ISR, 0); 1388 slic_flush_write(sdev); 1389 1390 free_irq(sdev->pdev->irq, sdev); 1391 /* turn off RCV and XMT and power down PHY */ 1392 val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN; 1393 slic_write(sdev, SLIC_REG_WXCFG, val); 1394 1395 val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN | 1396 SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT; 1397 slic_write(sdev, SLIC_REG_WRCFG, val); 1398 1399 val = MII_BMCR << 16 | BMCR_PDOWN; 1400 slic_write(sdev, SLIC_REG_WPHY, val); 1401 slic_flush_write(sdev); 1402 1403 slic_clear_upr_list(&sdev->upr_list); 1404 slic_write(sdev, SLIC_REG_QUIESCE, 0); 1405 1406 slic_free_stat_queue(sdev); 1407 slic_free_tx_queue(sdev); 1408 slic_free_rx_queue(sdev); 1409 slic_free_shmem(sdev); 1410 1411 slic_card_reset(sdev); 1412 netif_carrier_off(dev); 1413 1414 return 0; 1415 } 1416 1417 static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev) 1418 { 1419 struct slic_device *sdev = netdev_priv(dev); 1420 struct slic_tx_queue *txq = &sdev->txq; 1421 struct slic_tx_buffer *buff; 1422 struct slic_tx_desc *desc; 1423 dma_addr_t paddr; 1424 u32 cbar_val; 1425 u32 maplen; 1426 1427 if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) { 1428 netdev_err(dev, "BUG! not enough tx LEs left: %u\n", 1429 slic_get_free_tx_descs(txq)); 1430 return NETDEV_TX_BUSY; 1431 } 1432 1433 maplen = skb_headlen(skb); 1434 paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen, 1435 DMA_TO_DEVICE); 1436 if (dma_mapping_error(&sdev->pdev->dev, paddr)) { 1437 netdev_err(dev, "failed to map tx buffer\n"); 1438 goto drop_skb; 1439 } 1440 1441 buff = &txq->txbuffs[txq->put_idx]; 1442 buff->skb = skb; 1443 dma_unmap_addr_set(buff, map_addr, paddr); 1444 dma_unmap_len_set(buff, map_len, maplen); 1445 1446 desc = buff->desc; 1447 desc->totlen = cpu_to_le32(maplen); 1448 desc->paddrl = cpu_to_le32(lower_32_bits(paddr)); 1449 desc->paddrh = cpu_to_le32(upper_32_bits(paddr)); 1450 desc->len = cpu_to_le32(maplen); 1451 1452 txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len); 1453 1454 cbar_val = lower_32_bits(buff->desc_paddr) | 1; 1455 /* complete writes to RAM and DMA before hardware is informed */ 1456 wmb(); 1457 1458 slic_write(sdev, SLIC_REG_CBAR, cbar_val); 1459 1460 if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS) 1461 netif_stop_queue(dev); 1462 1463 return NETDEV_TX_OK; 1464 drop_skb: 1465 dev_kfree_skb_any(skb); 1466 1467 return NETDEV_TX_OK; 1468 } 1469 1470 static void slic_get_stats(struct net_device *dev, 1471 struct rtnl_link_stats64 *lst) 1472 { 1473 struct slic_device *sdev = netdev_priv(dev); 1474 struct slic_stats *stats = &sdev->stats; 1475 1476 SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets); 1477 SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets); 1478 SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes); 1479 SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes); 1480 SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors); 1481 SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss); 1482 SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped); 1483 SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts); 1484 SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow); 1485 SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc); 1486 SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802); 1487 SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier); 1488 } 1489 1490 static int slic_get_sset_count(struct net_device *dev, int sset) 1491 { 1492 switch (sset) { 1493 case ETH_SS_STATS: 1494 return ARRAY_SIZE(slic_stats_strings); 1495 default: 1496 return -EOPNOTSUPP; 1497 } 1498 } 1499 1500 static void slic_get_ethtool_stats(struct net_device *dev, 1501 struct ethtool_stats *eth_stats, u64 *data) 1502 { 1503 struct slic_device *sdev = netdev_priv(dev); 1504 struct slic_stats *stats = &sdev->stats; 1505 1506 SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets); 1507 SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes); 1508 SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts); 1509 SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors); 1510 SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss); 1511 SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum); 1512 SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow); 1513 SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen); 1514 SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum); 1515 SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen); 1516 SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen); 1517 SLIC_GET_STATS_COUNTER(data[11], stats, rx_early); 1518 SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow); 1519 SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode); 1520 SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl); 1521 SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc); 1522 SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802); 1523 SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802); 1524 SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets); 1525 SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes); 1526 SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier); 1527 SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped); 1528 SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs); 1529 } 1530 1531 static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1532 { 1533 if (stringset == ETH_SS_STATS) { 1534 memcpy(data, slic_stats_strings, sizeof(slic_stats_strings)); 1535 data += sizeof(slic_stats_strings); 1536 } 1537 } 1538 1539 static void slic_get_drvinfo(struct net_device *dev, 1540 struct ethtool_drvinfo *info) 1541 { 1542 struct slic_device *sdev = netdev_priv(dev); 1543 1544 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1545 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1546 strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info)); 1547 } 1548 1549 static const struct ethtool_ops slic_ethtool_ops = { 1550 .get_drvinfo = slic_get_drvinfo, 1551 .get_link = ethtool_op_get_link, 1552 .get_strings = slic_get_strings, 1553 .get_ethtool_stats = slic_get_ethtool_stats, 1554 .get_sset_count = slic_get_sset_count, 1555 }; 1556 1557 static const struct net_device_ops slic_netdev_ops = { 1558 .ndo_open = slic_open, 1559 .ndo_stop = slic_close, 1560 .ndo_start_xmit = slic_xmit, 1561 .ndo_set_mac_address = eth_mac_addr, 1562 .ndo_get_stats64 = slic_get_stats, 1563 .ndo_set_rx_mode = slic_set_rx_mode, 1564 .ndo_validate_addr = eth_validate_addr, 1565 }; 1566 1567 static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len) 1568 { 1569 unsigned char *ptr = eeprom; 1570 u32 csum = 0; 1571 __le16 data; 1572 1573 while (len > 1) { 1574 memcpy(&data, ptr, sizeof(data)); 1575 csum += le16_to_cpu(data); 1576 ptr += 2; 1577 len -= 2; 1578 } 1579 if (len > 0) 1580 csum += *(u8 *)ptr; 1581 while (csum >> 16) 1582 csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF); 1583 return ~csum; 1584 } 1585 1586 /* check eeprom size, magic and checksum */ 1587 static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size) 1588 { 1589 const unsigned int MAX_SIZE = 128; 1590 const unsigned int MIN_SIZE = 98; 1591 __le16 magic; 1592 __le16 csum; 1593 1594 if (size < MIN_SIZE || size > MAX_SIZE) 1595 return false; 1596 memcpy(&magic, eeprom, sizeof(magic)); 1597 if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC) 1598 return false; 1599 /* cut checksum bytes */ 1600 size -= 2; 1601 memcpy(&csum, eeprom + size, sizeof(csum)); 1602 1603 return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size)); 1604 } 1605 1606 static int slic_read_eeprom(struct slic_device *sdev) 1607 { 1608 unsigned int devfn = PCI_FUNC(sdev->pdev->devfn); 1609 struct slic_shmem *sm = &sdev->shmem; 1610 struct slic_shmem_data *sm_data = sm->shmem_data; 1611 const unsigned int MAX_LOOPS = 5000; 1612 unsigned int codesize; 1613 unsigned char *eeprom; 1614 struct slic_upr *upr; 1615 unsigned int i = 0; 1616 dma_addr_t paddr; 1617 int err = 0; 1618 u8 *mac[2]; 1619 1620 eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1621 &paddr, GFP_KERNEL); 1622 if (!eeprom) 1623 return -ENOMEM; 1624 1625 slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF); 1626 /* setup ISP temporarily */ 1627 slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr)); 1628 1629 err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr); 1630 if (!err) { 1631 for (i = 0; i < MAX_LOOPS; i++) { 1632 if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC) 1633 break; 1634 mdelay(1); 1635 } 1636 if (i == MAX_LOOPS) { 1637 dev_err(&sdev->pdev->dev, 1638 "timed out while waiting for eeprom data\n"); 1639 err = -ETIMEDOUT; 1640 } 1641 upr = slic_dequeue_upr(sdev); 1642 kfree(upr); 1643 } 1644 1645 slic_write(sdev, SLIC_REG_ISP, 0); 1646 slic_write(sdev, SLIC_REG_ISR, 0); 1647 slic_flush_write(sdev); 1648 1649 if (err) 1650 goto free_eeprom; 1651 1652 if (sdev->model == SLIC_MODEL_OASIS) { 1653 struct slic_oasis_eeprom *oee; 1654 1655 oee = (struct slic_oasis_eeprom *)eeprom; 1656 mac[0] = oee->mac; 1657 mac[1] = oee->mac2; 1658 codesize = le16_to_cpu(oee->eeprom_code_size); 1659 } else { 1660 struct slic_mojave_eeprom *mee; 1661 1662 mee = (struct slic_mojave_eeprom *)eeprom; 1663 mac[0] = mee->mac; 1664 mac[1] = mee->mac2; 1665 codesize = le16_to_cpu(mee->eeprom_code_size); 1666 } 1667 1668 if (!slic_eeprom_valid(eeprom, codesize)) { 1669 dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n"); 1670 err = -EINVAL; 1671 goto free_eeprom; 1672 } 1673 /* set mac address */ 1674 ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]); 1675 free_eeprom: 1676 dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr); 1677 1678 return err; 1679 } 1680 1681 static int slic_init(struct slic_device *sdev) 1682 { 1683 int err; 1684 1685 spin_lock_init(&sdev->upper_lock); 1686 spin_lock_init(&sdev->link_lock); 1687 INIT_LIST_HEAD(&sdev->upr_list.list); 1688 spin_lock_init(&sdev->upr_list.lock); 1689 u64_stats_init(&sdev->stats.syncp); 1690 1691 slic_card_reset(sdev); 1692 1693 err = slic_load_firmware(sdev); 1694 if (err) { 1695 dev_err(&sdev->pdev->dev, "failed to load firmware\n"); 1696 return err; 1697 } 1698 1699 /* we need the shared memory to read EEPROM so set it up temporarily */ 1700 err = slic_init_shmem(sdev); 1701 if (err) { 1702 dev_err(&sdev->pdev->dev, "failed to init shared memory\n"); 1703 return err; 1704 } 1705 1706 err = slic_read_eeprom(sdev); 1707 if (err) { 1708 dev_err(&sdev->pdev->dev, "failed to read eeprom\n"); 1709 goto free_sm; 1710 } 1711 1712 slic_card_reset(sdev); 1713 slic_free_shmem(sdev); 1714 1715 return 0; 1716 free_sm: 1717 slic_free_shmem(sdev); 1718 1719 return err; 1720 } 1721 1722 static bool slic_is_fiber(unsigned short subdev) 1723 { 1724 switch (subdev) { 1725 /* Mojave */ 1726 case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */ 1727 case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */ 1728 /* Oasis */ 1729 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */ 1730 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */ 1731 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */ 1732 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */ 1733 return true; 1734 } 1735 return false; 1736 } 1737 1738 static void slic_configure_pci(struct pci_dev *pdev) 1739 { 1740 u16 old; 1741 u16 cmd; 1742 1743 pci_read_config_word(pdev, PCI_COMMAND, &old); 1744 1745 cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 1746 if (old != cmd) 1747 pci_write_config_word(pdev, PCI_COMMAND, cmd); 1748 } 1749 1750 static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1751 { 1752 struct slic_device *sdev; 1753 struct net_device *dev; 1754 int err; 1755 1756 err = pci_enable_device(pdev); 1757 if (err) { 1758 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1759 return err; 1760 } 1761 1762 pci_set_master(pdev); 1763 pci_try_set_mwi(pdev); 1764 1765 slic_configure_pci(pdev); 1766 1767 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1768 if (err) { 1769 dev_err(&pdev->dev, "failed to setup DMA\n"); 1770 goto disable; 1771 } 1772 1773 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1774 1775 err = pci_request_regions(pdev, DRV_NAME); 1776 if (err) { 1777 dev_err(&pdev->dev, "failed to obtain PCI regions\n"); 1778 goto disable; 1779 } 1780 1781 dev = alloc_etherdev(sizeof(*sdev)); 1782 if (!dev) { 1783 dev_err(&pdev->dev, "failed to alloc ethernet device\n"); 1784 err = -ENOMEM; 1785 goto free_regions; 1786 } 1787 1788 SET_NETDEV_DEV(dev, &pdev->dev); 1789 pci_set_drvdata(pdev, dev); 1790 dev->irq = pdev->irq; 1791 dev->netdev_ops = &slic_netdev_ops; 1792 dev->hw_features = NETIF_F_RXCSUM; 1793 dev->features |= dev->hw_features; 1794 1795 dev->ethtool_ops = &slic_ethtool_ops; 1796 1797 sdev = netdev_priv(dev); 1798 sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ? 1799 SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE; 1800 sdev->is_fiber = slic_is_fiber(pdev->subsystem_device); 1801 sdev->pdev = pdev; 1802 sdev->netdev = dev; 1803 sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0), 1804 pci_resource_len(pdev, 0)); 1805 if (!sdev->regs) { 1806 dev_err(&pdev->dev, "failed to map registers\n"); 1807 err = -ENOMEM; 1808 goto free_netdev; 1809 } 1810 1811 err = slic_init(sdev); 1812 if (err) { 1813 dev_err(&pdev->dev, "failed to initialize driver\n"); 1814 goto unmap; 1815 } 1816 1817 netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT); 1818 netif_carrier_off(dev); 1819 1820 err = register_netdev(dev); 1821 if (err) { 1822 dev_err(&pdev->dev, "failed to register net device: %i\n", err); 1823 goto unmap; 1824 } 1825 1826 return 0; 1827 1828 unmap: 1829 iounmap(sdev->regs); 1830 free_netdev: 1831 free_netdev(dev); 1832 free_regions: 1833 pci_release_regions(pdev); 1834 disable: 1835 pci_disable_device(pdev); 1836 1837 return err; 1838 } 1839 1840 static void slic_remove(struct pci_dev *pdev) 1841 { 1842 struct net_device *dev = pci_get_drvdata(pdev); 1843 struct slic_device *sdev = netdev_priv(dev); 1844 1845 unregister_netdev(dev); 1846 iounmap(sdev->regs); 1847 free_netdev(dev); 1848 pci_release_regions(pdev); 1849 pci_disable_device(pdev); 1850 } 1851 1852 static struct pci_driver slic_driver = { 1853 .name = DRV_NAME, 1854 .id_table = slic_id_tbl, 1855 .probe = slic_probe, 1856 .remove = slic_remove, 1857 }; 1858 1859 module_pci_driver(slic_driver); 1860 1861 MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver"); 1862 MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>"); 1863 MODULE_LICENSE("GPL"); 1864 MODULE_VERSION(DRV_VERSION); 1865