1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 #include <linux/bitops.h> 20 #include <linux/netdevice.h> 21 #include <linux/skbuff.h> 22 #include <linux/etherdevice.h> 23 #include <linux/in.h> 24 #include <linux/ethtool.h> 25 #include <linux/if_vlan.h> 26 #include <linux/if_ether.h> 27 #include <linux/ip.h> 28 #include <linux/prefetch.h> 29 #include <linux/module.h> 30 31 #include "bnad.h" 32 #include "bna.h" 33 #include "cna.h" 34 35 static DEFINE_MUTEX(bnad_fwimg_mutex); 36 37 /* 38 * Module params 39 */ 40 static uint bnad_msix_disable; 41 module_param(bnad_msix_disable, uint, 0444); 42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); 43 44 static uint bnad_ioc_auto_recover = 1; 45 module_param(bnad_ioc_auto_recover, uint, 0444); 46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); 47 48 static uint bna_debugfs_enable = 1; 49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); 50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," 51 " Range[false:0|true:1]"); 52 53 /* 54 * Global variables 55 */ 56 static u32 bnad_rxqs_per_cq = 2; 57 static u32 bna_id; 58 static struct mutex bnad_list_mutex; 59 static LIST_HEAD(bnad_list); 60 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 61 62 /* 63 * Local MACROS 64 */ 65 #define BNAD_GET_MBOX_IRQ(_bnad) \ 66 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ 67 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ 68 ((_bnad)->pcidev->irq)) 69 70 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ 71 do { \ 72 (_res_info)->res_type = BNA_RES_T_MEM; \ 73 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ 74 (_res_info)->res_u.mem_info.num = (_num); \ 75 (_res_info)->res_u.mem_info.len = (_size); \ 76 } while (0) 77 78 static void 79 bnad_add_to_list(struct bnad *bnad) 80 { 81 mutex_lock(&bnad_list_mutex); 82 list_add_tail(&bnad->list_entry, &bnad_list); 83 bnad->id = bna_id++; 84 mutex_unlock(&bnad_list_mutex); 85 } 86 87 static void 88 bnad_remove_from_list(struct bnad *bnad) 89 { 90 mutex_lock(&bnad_list_mutex); 91 list_del(&bnad->list_entry); 92 mutex_unlock(&bnad_list_mutex); 93 } 94 95 /* 96 * Reinitialize completions in CQ, once Rx is taken down 97 */ 98 static void 99 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) 100 { 101 struct bna_cq_entry *cmpl; 102 int i; 103 104 for (i = 0; i < ccb->q_depth; i++) { 105 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; 106 cmpl->valid = 0; 107 } 108 } 109 110 /* Tx Datapath functions */ 111 112 113 /* Caller should ensure that the entry at unmap_q[index] is valid */ 114 static u32 115 bnad_tx_buff_unmap(struct bnad *bnad, 116 struct bnad_tx_unmap *unmap_q, 117 u32 q_depth, u32 index) 118 { 119 struct bnad_tx_unmap *unmap; 120 struct sk_buff *skb; 121 int vector, nvecs; 122 123 unmap = &unmap_q[index]; 124 nvecs = unmap->nvecs; 125 126 skb = unmap->skb; 127 unmap->skb = NULL; 128 unmap->nvecs = 0; 129 dma_unmap_single(&bnad->pcidev->dev, 130 dma_unmap_addr(&unmap->vectors[0], dma_addr), 131 skb_headlen(skb), DMA_TO_DEVICE); 132 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); 133 nvecs--; 134 135 vector = 0; 136 while (nvecs) { 137 vector++; 138 if (vector == BFI_TX_MAX_VECTORS_PER_WI) { 139 vector = 0; 140 BNA_QE_INDX_INC(index, q_depth); 141 unmap = &unmap_q[index]; 142 } 143 144 dma_unmap_page(&bnad->pcidev->dev, 145 dma_unmap_addr(&unmap->vectors[vector], dma_addr), 146 dma_unmap_len(&unmap->vectors[vector], dma_len), 147 DMA_TO_DEVICE); 148 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); 149 nvecs--; 150 } 151 152 BNA_QE_INDX_INC(index, q_depth); 153 154 return index; 155 } 156 157 /* 158 * Frees all pending Tx Bufs 159 * At this point no activity is expected on the Q, 160 * so DMA unmap & freeing is fine. 161 */ 162 static void 163 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) 164 { 165 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; 166 struct sk_buff *skb; 167 int i; 168 169 for (i = 0; i < tcb->q_depth; i++) { 170 skb = unmap_q[i].skb; 171 if (!skb) 172 continue; 173 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); 174 175 dev_kfree_skb_any(skb); 176 } 177 } 178 179 /* 180 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion 181 * Can be called in a) Interrupt context 182 * b) Sending context 183 */ 184 static u32 185 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) 186 { 187 u32 sent_packets = 0, sent_bytes = 0; 188 u32 wis, unmap_wis, hw_cons, cons, q_depth; 189 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; 190 struct bnad_tx_unmap *unmap; 191 struct sk_buff *skb; 192 193 /* Just return if TX is stopped */ 194 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 195 return 0; 196 197 hw_cons = *(tcb->hw_consumer_index); 198 cons = tcb->consumer_index; 199 q_depth = tcb->q_depth; 200 201 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); 202 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); 203 204 while (wis) { 205 unmap = &unmap_q[cons]; 206 207 skb = unmap->skb; 208 209 sent_packets++; 210 sent_bytes += skb->len; 211 212 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); 213 wis -= unmap_wis; 214 215 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); 216 dev_kfree_skb_any(skb); 217 } 218 219 /* Update consumer pointers. */ 220 tcb->consumer_index = hw_cons; 221 222 tcb->txq->tx_packets += sent_packets; 223 tcb->txq->tx_bytes += sent_bytes; 224 225 return sent_packets; 226 } 227 228 static u32 229 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) 230 { 231 struct net_device *netdev = bnad->netdev; 232 u32 sent = 0; 233 234 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 235 return 0; 236 237 sent = bnad_txcmpl_process(bnad, tcb); 238 if (sent) { 239 if (netif_queue_stopped(netdev) && 240 netif_carrier_ok(netdev) && 241 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= 242 BNAD_NETIF_WAKE_THRESHOLD) { 243 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { 244 netif_wake_queue(netdev); 245 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 246 } 247 } 248 } 249 250 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 251 bna_ib_ack(tcb->i_dbell, sent); 252 253 smp_mb__before_atomic(); 254 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 255 256 return sent; 257 } 258 259 /* MSIX Tx Completion Handler */ 260 static irqreturn_t 261 bnad_msix_tx(int irq, void *data) 262 { 263 struct bna_tcb *tcb = (struct bna_tcb *)data; 264 struct bnad *bnad = tcb->bnad; 265 266 bnad_tx_complete(bnad, tcb); 267 268 return IRQ_HANDLED; 269 } 270 271 static inline void 272 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) 273 { 274 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 275 276 unmap_q->reuse_pi = -1; 277 unmap_q->alloc_order = -1; 278 unmap_q->map_size = 0; 279 unmap_q->type = BNAD_RXBUF_NONE; 280 } 281 282 /* Default is page-based allocation. Multi-buffer support - TBD */ 283 static int 284 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) 285 { 286 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 287 int order; 288 289 bnad_rxq_alloc_uninit(bnad, rcb); 290 291 order = get_order(rcb->rxq->buffer_size); 292 293 unmap_q->type = BNAD_RXBUF_PAGE; 294 295 if (bna_is_small_rxq(rcb->id)) { 296 unmap_q->alloc_order = 0; 297 unmap_q->map_size = rcb->rxq->buffer_size; 298 } else { 299 if (rcb->rxq->multi_buffer) { 300 unmap_q->alloc_order = 0; 301 unmap_q->map_size = rcb->rxq->buffer_size; 302 unmap_q->type = BNAD_RXBUF_MULTI_BUFF; 303 } else { 304 unmap_q->alloc_order = order; 305 unmap_q->map_size = 306 (rcb->rxq->buffer_size > 2048) ? 307 PAGE_SIZE << order : 2048; 308 } 309 } 310 311 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); 312 313 return 0; 314 } 315 316 static inline void 317 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) 318 { 319 if (!unmap->page) 320 return; 321 322 dma_unmap_page(&bnad->pcidev->dev, 323 dma_unmap_addr(&unmap->vector, dma_addr), 324 unmap->vector.len, DMA_FROM_DEVICE); 325 put_page(unmap->page); 326 unmap->page = NULL; 327 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); 328 unmap->vector.len = 0; 329 } 330 331 static inline void 332 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) 333 { 334 if (!unmap->skb) 335 return; 336 337 dma_unmap_single(&bnad->pcidev->dev, 338 dma_unmap_addr(&unmap->vector, dma_addr), 339 unmap->vector.len, DMA_FROM_DEVICE); 340 dev_kfree_skb_any(unmap->skb); 341 unmap->skb = NULL; 342 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); 343 unmap->vector.len = 0; 344 } 345 346 static void 347 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) 348 { 349 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 350 int i; 351 352 for (i = 0; i < rcb->q_depth; i++) { 353 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; 354 355 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 356 bnad_rxq_cleanup_skb(bnad, unmap); 357 else 358 bnad_rxq_cleanup_page(bnad, unmap); 359 } 360 bnad_rxq_alloc_uninit(bnad, rcb); 361 } 362 363 static u32 364 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) 365 { 366 u32 alloced, prod, q_depth; 367 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 368 struct bnad_rx_unmap *unmap, *prev; 369 struct bna_rxq_entry *rxent; 370 struct page *page; 371 u32 page_offset, alloc_size; 372 dma_addr_t dma_addr; 373 374 prod = rcb->producer_index; 375 q_depth = rcb->q_depth; 376 377 alloc_size = PAGE_SIZE << unmap_q->alloc_order; 378 alloced = 0; 379 380 while (nalloc--) { 381 unmap = &unmap_q->unmap[prod]; 382 383 if (unmap_q->reuse_pi < 0) { 384 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, 385 unmap_q->alloc_order); 386 page_offset = 0; 387 } else { 388 prev = &unmap_q->unmap[unmap_q->reuse_pi]; 389 page = prev->page; 390 page_offset = prev->page_offset + unmap_q->map_size; 391 get_page(page); 392 } 393 394 if (unlikely(!page)) { 395 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); 396 rcb->rxq->rxbuf_alloc_failed++; 397 goto finishing; 398 } 399 400 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, 401 unmap_q->map_size, DMA_FROM_DEVICE); 402 403 unmap->page = page; 404 unmap->page_offset = page_offset; 405 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); 406 unmap->vector.len = unmap_q->map_size; 407 page_offset += unmap_q->map_size; 408 409 if (page_offset < alloc_size) 410 unmap_q->reuse_pi = prod; 411 else 412 unmap_q->reuse_pi = -1; 413 414 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; 415 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 416 BNA_QE_INDX_INC(prod, q_depth); 417 alloced++; 418 } 419 420 finishing: 421 if (likely(alloced)) { 422 rcb->producer_index = prod; 423 smp_mb(); 424 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) 425 bna_rxq_prod_indx_doorbell(rcb); 426 } 427 428 return alloced; 429 } 430 431 static u32 432 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) 433 { 434 u32 alloced, prod, q_depth, buff_sz; 435 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 436 struct bnad_rx_unmap *unmap; 437 struct bna_rxq_entry *rxent; 438 struct sk_buff *skb; 439 dma_addr_t dma_addr; 440 441 buff_sz = rcb->rxq->buffer_size; 442 prod = rcb->producer_index; 443 q_depth = rcb->q_depth; 444 445 alloced = 0; 446 while (nalloc--) { 447 unmap = &unmap_q->unmap[prod]; 448 449 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); 450 451 if (unlikely(!skb)) { 452 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); 453 rcb->rxq->rxbuf_alloc_failed++; 454 goto finishing; 455 } 456 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 457 buff_sz, DMA_FROM_DEVICE); 458 459 unmap->skb = skb; 460 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); 461 unmap->vector.len = buff_sz; 462 463 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; 464 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 465 BNA_QE_INDX_INC(prod, q_depth); 466 alloced++; 467 } 468 469 finishing: 470 if (likely(alloced)) { 471 rcb->producer_index = prod; 472 smp_mb(); 473 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) 474 bna_rxq_prod_indx_doorbell(rcb); 475 } 476 477 return alloced; 478 } 479 480 static inline void 481 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) 482 { 483 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 484 u32 to_alloc; 485 486 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); 487 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) 488 return; 489 490 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 491 bnad_rxq_refill_skb(bnad, rcb, to_alloc); 492 else 493 bnad_rxq_refill_page(bnad, rcb, to_alloc); 494 } 495 496 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ 497 BNA_CQ_EF_IPV6 | \ 498 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ 499 BNA_CQ_EF_L4_CKSUM_OK) 500 501 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ 502 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) 503 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \ 504 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) 505 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ 506 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) 507 #define flags_udp6 (BNA_CQ_EF_IPV6 | \ 508 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) 509 510 static void 511 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, 512 u32 sop_ci, u32 nvecs) 513 { 514 struct bnad_rx_unmap_q *unmap_q; 515 struct bnad_rx_unmap *unmap; 516 u32 ci, vec; 517 518 unmap_q = rcb->unmap_q; 519 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { 520 unmap = &unmap_q->unmap[ci]; 521 BNA_QE_INDX_INC(ci, rcb->q_depth); 522 523 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 524 bnad_rxq_cleanup_skb(bnad, unmap); 525 else 526 bnad_rxq_cleanup_page(bnad, unmap); 527 } 528 } 529 530 static void 531 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, 532 u32 sop_ci, u32 nvecs, u32 last_fraglen) 533 { 534 struct bnad *bnad; 535 u32 ci, vec, len, totlen = 0; 536 struct bnad_rx_unmap_q *unmap_q; 537 struct bnad_rx_unmap *unmap; 538 539 unmap_q = rcb->unmap_q; 540 bnad = rcb->bnad; 541 542 /* prefetch header */ 543 prefetch(page_address(unmap_q->unmap[sop_ci].page) + 544 unmap_q->unmap[sop_ci].page_offset); 545 546 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { 547 unmap = &unmap_q->unmap[ci]; 548 BNA_QE_INDX_INC(ci, rcb->q_depth); 549 550 dma_unmap_page(&bnad->pcidev->dev, 551 dma_unmap_addr(&unmap->vector, dma_addr), 552 unmap->vector.len, DMA_FROM_DEVICE); 553 554 len = (vec == nvecs) ? 555 last_fraglen : unmap->vector.len; 556 skb->truesize += unmap->vector.len; 557 totlen += len; 558 559 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 560 unmap->page, unmap->page_offset, len); 561 562 unmap->page = NULL; 563 unmap->vector.len = 0; 564 } 565 566 skb->len += totlen; 567 skb->data_len += totlen; 568 } 569 570 static inline void 571 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, 572 struct bnad_rx_unmap *unmap, u32 len) 573 { 574 prefetch(skb->data); 575 576 dma_unmap_single(&bnad->pcidev->dev, 577 dma_unmap_addr(&unmap->vector, dma_addr), 578 unmap->vector.len, DMA_FROM_DEVICE); 579 580 skb_put(skb, len); 581 skb->protocol = eth_type_trans(skb, bnad->netdev); 582 583 unmap->skb = NULL; 584 unmap->vector.len = 0; 585 } 586 587 static u32 588 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) 589 { 590 struct bna_cq_entry *cq, *cmpl, *next_cmpl; 591 struct bna_rcb *rcb = NULL; 592 struct bnad_rx_unmap_q *unmap_q; 593 struct bnad_rx_unmap *unmap = NULL; 594 struct sk_buff *skb = NULL; 595 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 596 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; 597 u32 packets = 0, len = 0, totlen = 0; 598 u32 pi, vec, sop_ci = 0, nvecs = 0; 599 u32 flags, masked_flags; 600 601 prefetch(bnad->netdev); 602 603 cq = ccb->sw_q; 604 605 while (packets < budget) { 606 cmpl = &cq[ccb->producer_index]; 607 if (!cmpl->valid) 608 break; 609 /* The 'valid' field is set by the adapter, only after writing 610 * the other fields of completion entry. Hence, do not load 611 * other fields of completion entry *before* the 'valid' is 612 * loaded. Adding the rmb() here prevents the compiler and/or 613 * CPU from reordering the reads which would potentially result 614 * in reading stale values in completion entry. 615 */ 616 rmb(); 617 618 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); 619 620 if (bna_is_small_rxq(cmpl->rxq_id)) 621 rcb = ccb->rcb[1]; 622 else 623 rcb = ccb->rcb[0]; 624 625 unmap_q = rcb->unmap_q; 626 627 /* start of packet ci */ 628 sop_ci = rcb->consumer_index; 629 630 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { 631 unmap = &unmap_q->unmap[sop_ci]; 632 skb = unmap->skb; 633 } else { 634 skb = napi_get_frags(&rx_ctrl->napi); 635 if (unlikely(!skb)) 636 break; 637 } 638 prefetch(skb); 639 640 flags = ntohl(cmpl->flags); 641 len = ntohs(cmpl->length); 642 totlen = len; 643 nvecs = 1; 644 645 /* Check all the completions for this frame. 646 * busy-wait doesn't help much, break here. 647 */ 648 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && 649 (flags & BNA_CQ_EF_EOP) == 0) { 650 pi = ccb->producer_index; 651 do { 652 BNA_QE_INDX_INC(pi, ccb->q_depth); 653 next_cmpl = &cq[pi]; 654 655 if (!next_cmpl->valid) 656 break; 657 /* The 'valid' field is set by the adapter, only 658 * after writing the other fields of completion 659 * entry. Hence, do not load other fields of 660 * completion entry *before* the 'valid' is 661 * loaded. Adding the rmb() here prevents the 662 * compiler and/or CPU from reordering the reads 663 * which would potentially result in reading 664 * stale values in completion entry. 665 */ 666 rmb(); 667 668 len = ntohs(next_cmpl->length); 669 flags = ntohl(next_cmpl->flags); 670 671 nvecs++; 672 totlen += len; 673 } while ((flags & BNA_CQ_EF_EOP) == 0); 674 675 if (!next_cmpl->valid) 676 break; 677 } 678 679 /* TODO: BNA_CQ_EF_LOCAL ? */ 680 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 681 BNA_CQ_EF_FCS_ERROR | 682 BNA_CQ_EF_TOO_LONG))) { 683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); 684 rcb->rxq->rx_packets_with_error++; 685 686 goto next; 687 } 688 689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 690 bnad_cq_setup_skb(bnad, skb, unmap, len); 691 else 692 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 693 694 packets++; 695 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_bytes += totlen; 697 ccb->bytes_per_intr += totlen; 698 699 masked_flags = flags & flags_cksum_prot_mask; 700 701 if (likely 702 ((bnad->netdev->features & NETIF_F_RXCSUM) && 703 ((masked_flags == flags_tcp4) || 704 (masked_flags == flags_udp4) || 705 (masked_flags == flags_tcp6) || 706 (masked_flags == flags_udp6)))) 707 skb->ip_summed = CHECKSUM_UNNECESSARY; 708 else 709 skb_checksum_none_assert(skb); 710 711 if ((flags & BNA_CQ_EF_VLAN) && 712 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 713 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); 714 715 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 716 netif_receive_skb(skb); 717 else 718 napi_gro_frags(&rx_ctrl->napi); 719 720 next: 721 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); 722 for (vec = 0; vec < nvecs; vec++) { 723 cmpl = &cq[ccb->producer_index]; 724 cmpl->valid = 0; 725 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); 726 } 727 cmpl = &cq[ccb->producer_index]; 728 } 729 730 napi_gro_flush(&rx_ctrl->napi, false); 731 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) 732 bna_ib_ack_disable_irq(ccb->i_dbell, packets); 733 734 bnad_rxq_post(bnad, ccb->rcb[0]); 735 if (ccb->rcb[1]) 736 bnad_rxq_post(bnad, ccb->rcb[1]); 737 738 return packets; 739 } 740 741 static void 742 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) 743 { 744 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 745 struct napi_struct *napi = &rx_ctrl->napi; 746 747 if (likely(napi_schedule_prep(napi))) { 748 __napi_schedule(napi); 749 rx_ctrl->rx_schedule++; 750 } 751 } 752 753 /* MSIX Rx Path Handler */ 754 static irqreturn_t 755 bnad_msix_rx(int irq, void *data) 756 { 757 struct bna_ccb *ccb = (struct bna_ccb *)data; 758 759 if (ccb) { 760 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; 761 bnad_netif_rx_schedule_poll(ccb->bnad, ccb); 762 } 763 764 return IRQ_HANDLED; 765 } 766 767 /* Interrupt handlers */ 768 769 /* Mbox Interrupt Handlers */ 770 static irqreturn_t 771 bnad_msix_mbox_handler(int irq, void *data) 772 { 773 u32 intr_status; 774 unsigned long flags; 775 struct bnad *bnad = (struct bnad *)data; 776 777 spin_lock_irqsave(&bnad->bna_lock, flags); 778 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { 779 spin_unlock_irqrestore(&bnad->bna_lock, flags); 780 return IRQ_HANDLED; 781 } 782 783 bna_intr_status_get(&bnad->bna, intr_status); 784 785 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) 786 bna_mbox_handler(&bnad->bna, intr_status); 787 788 spin_unlock_irqrestore(&bnad->bna_lock, flags); 789 790 return IRQ_HANDLED; 791 } 792 793 static irqreturn_t 794 bnad_isr(int irq, void *data) 795 { 796 int i, j; 797 u32 intr_status; 798 unsigned long flags; 799 struct bnad *bnad = (struct bnad *)data; 800 struct bnad_rx_info *rx_info; 801 struct bnad_rx_ctrl *rx_ctrl; 802 struct bna_tcb *tcb = NULL; 803 804 spin_lock_irqsave(&bnad->bna_lock, flags); 805 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { 806 spin_unlock_irqrestore(&bnad->bna_lock, flags); 807 return IRQ_NONE; 808 } 809 810 bna_intr_status_get(&bnad->bna, intr_status); 811 812 if (unlikely(!intr_status)) { 813 spin_unlock_irqrestore(&bnad->bna_lock, flags); 814 return IRQ_NONE; 815 } 816 817 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) 818 bna_mbox_handler(&bnad->bna, intr_status); 819 820 spin_unlock_irqrestore(&bnad->bna_lock, flags); 821 822 if (!BNA_IS_INTX_DATA_INTR(intr_status)) 823 return IRQ_HANDLED; 824 825 /* Process data interrupts */ 826 /* Tx processing */ 827 for (i = 0; i < bnad->num_tx; i++) { 828 for (j = 0; j < bnad->num_txq_per_tx; j++) { 829 tcb = bnad->tx_info[i].tcb[j]; 830 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 831 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); 832 } 833 } 834 /* Rx processing */ 835 for (i = 0; i < bnad->num_rx; i++) { 836 rx_info = &bnad->rx_info[i]; 837 if (!rx_info->rx) 838 continue; 839 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 840 rx_ctrl = &rx_info->rx_ctrl[j]; 841 if (rx_ctrl->ccb) 842 bnad_netif_rx_schedule_poll(bnad, 843 rx_ctrl->ccb); 844 } 845 } 846 return IRQ_HANDLED; 847 } 848 849 /* 850 * Called in interrupt / callback context 851 * with bna_lock held, so cfg_flags access is OK 852 */ 853 static void 854 bnad_enable_mbox_irq(struct bnad *bnad) 855 { 856 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 857 858 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); 859 } 860 861 /* 862 * Called with bnad->bna_lock held b'cos of 863 * bnad->cfg_flags access. 864 */ 865 static void 866 bnad_disable_mbox_irq(struct bnad *bnad) 867 { 868 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 869 870 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 871 } 872 873 static void 874 bnad_set_netdev_perm_addr(struct bnad *bnad) 875 { 876 struct net_device *netdev = bnad->netdev; 877 878 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); 879 if (is_zero_ether_addr(netdev->dev_addr)) 880 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); 881 } 882 883 /* Control Path Handlers */ 884 885 /* Callbacks */ 886 void 887 bnad_cb_mbox_intr_enable(struct bnad *bnad) 888 { 889 bnad_enable_mbox_irq(bnad); 890 } 891 892 void 893 bnad_cb_mbox_intr_disable(struct bnad *bnad) 894 { 895 bnad_disable_mbox_irq(bnad); 896 } 897 898 void 899 bnad_cb_ioceth_ready(struct bnad *bnad) 900 { 901 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; 902 complete(&bnad->bnad_completions.ioc_comp); 903 } 904 905 void 906 bnad_cb_ioceth_failed(struct bnad *bnad) 907 { 908 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; 909 complete(&bnad->bnad_completions.ioc_comp); 910 } 911 912 void 913 bnad_cb_ioceth_disabled(struct bnad *bnad) 914 { 915 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; 916 complete(&bnad->bnad_completions.ioc_comp); 917 } 918 919 static void 920 bnad_cb_enet_disabled(void *arg) 921 { 922 struct bnad *bnad = (struct bnad *)arg; 923 924 netif_carrier_off(bnad->netdev); 925 complete(&bnad->bnad_completions.enet_comp); 926 } 927 928 void 929 bnad_cb_ethport_link_status(struct bnad *bnad, 930 enum bna_link_status link_status) 931 { 932 bool link_up = false; 933 934 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); 935 936 if (link_status == BNA_CEE_UP) { 937 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) 938 BNAD_UPDATE_CTR(bnad, cee_toggle); 939 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 940 } else { 941 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) 942 BNAD_UPDATE_CTR(bnad, cee_toggle); 943 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 944 } 945 946 if (link_up) { 947 if (!netif_carrier_ok(bnad->netdev)) { 948 uint tx_id, tcb_id; 949 printk(KERN_WARNING "bna: %s link up\n", 950 bnad->netdev->name); 951 netif_carrier_on(bnad->netdev); 952 BNAD_UPDATE_CTR(bnad, link_toggle); 953 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { 954 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; 955 tcb_id++) { 956 struct bna_tcb *tcb = 957 bnad->tx_info[tx_id].tcb[tcb_id]; 958 u32 txq_id; 959 if (!tcb) 960 continue; 961 962 txq_id = tcb->id; 963 964 if (test_bit(BNAD_TXQ_TX_STARTED, 965 &tcb->flags)) { 966 /* 967 * Force an immediate 968 * Transmit Schedule */ 969 printk(KERN_INFO "bna: %s %d " 970 "TXQ_STARTED\n", 971 bnad->netdev->name, 972 txq_id); 973 netif_wake_subqueue( 974 bnad->netdev, 975 txq_id); 976 BNAD_UPDATE_CTR(bnad, 977 netif_queue_wakeup); 978 } else { 979 netif_stop_subqueue( 980 bnad->netdev, 981 txq_id); 982 BNAD_UPDATE_CTR(bnad, 983 netif_queue_stop); 984 } 985 } 986 } 987 } 988 } else { 989 if (netif_carrier_ok(bnad->netdev)) { 990 printk(KERN_WARNING "bna: %s link down\n", 991 bnad->netdev->name); 992 netif_carrier_off(bnad->netdev); 993 BNAD_UPDATE_CTR(bnad, link_toggle); 994 } 995 } 996 } 997 998 static void 999 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) 1000 { 1001 struct bnad *bnad = (struct bnad *)arg; 1002 1003 complete(&bnad->bnad_completions.tx_comp); 1004 } 1005 1006 static void 1007 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) 1008 { 1009 struct bnad_tx_info *tx_info = 1010 (struct bnad_tx_info *)tcb->txq->tx->priv; 1011 1012 tcb->priv = tcb; 1013 tx_info->tcb[tcb->id] = tcb; 1014 } 1015 1016 static void 1017 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) 1018 { 1019 struct bnad_tx_info *tx_info = 1020 (struct bnad_tx_info *)tcb->txq->tx->priv; 1021 1022 tx_info->tcb[tcb->id] = NULL; 1023 tcb->priv = NULL; 1024 } 1025 1026 static void 1027 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 1028 { 1029 struct bnad_rx_info *rx_info = 1030 (struct bnad_rx_info *)ccb->cq->rx->priv; 1031 1032 rx_info->rx_ctrl[ccb->id].ccb = ccb; 1033 ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; 1034 } 1035 1036 static void 1037 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) 1038 { 1039 struct bnad_rx_info *rx_info = 1040 (struct bnad_rx_info *)ccb->cq->rx->priv; 1041 1042 rx_info->rx_ctrl[ccb->id].ccb = NULL; 1043 } 1044 1045 static void 1046 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) 1047 { 1048 struct bnad_tx_info *tx_info = 1049 (struct bnad_tx_info *)tx->priv; 1050 struct bna_tcb *tcb; 1051 u32 txq_id; 1052 int i; 1053 1054 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { 1055 tcb = tx_info->tcb[i]; 1056 if (!tcb) 1057 continue; 1058 txq_id = tcb->id; 1059 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 1060 netif_stop_subqueue(bnad->netdev, txq_id); 1061 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", 1062 bnad->netdev->name, txq_id); 1063 } 1064 } 1065 1066 static void 1067 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) 1068 { 1069 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; 1070 struct bna_tcb *tcb; 1071 u32 txq_id; 1072 int i; 1073 1074 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { 1075 tcb = tx_info->tcb[i]; 1076 if (!tcb) 1077 continue; 1078 txq_id = tcb->id; 1079 1080 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); 1081 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 1082 BUG_ON(*(tcb->hw_consumer_index) != 0); 1083 1084 if (netif_carrier_ok(bnad->netdev)) { 1085 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", 1086 bnad->netdev->name, txq_id); 1087 netif_wake_subqueue(bnad->netdev, txq_id); 1088 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 1089 } 1090 } 1091 1092 /* 1093 * Workaround for first ioceth enable failure & we 1094 * get a 0 MAC address. We try to get the MAC address 1095 * again here. 1096 */ 1097 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { 1098 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); 1099 bnad_set_netdev_perm_addr(bnad); 1100 } 1101 } 1102 1103 /* 1104 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. 1105 */ 1106 static void 1107 bnad_tx_cleanup(struct delayed_work *work) 1108 { 1109 struct bnad_tx_info *tx_info = 1110 container_of(work, struct bnad_tx_info, tx_cleanup_work); 1111 struct bnad *bnad = NULL; 1112 struct bna_tcb *tcb; 1113 unsigned long flags; 1114 u32 i, pending = 0; 1115 1116 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { 1117 tcb = tx_info->tcb[i]; 1118 if (!tcb) 1119 continue; 1120 1121 bnad = tcb->bnad; 1122 1123 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 1124 pending++; 1125 continue; 1126 } 1127 1128 bnad_txq_cleanup(bnad, tcb); 1129 1130 smp_mb__before_atomic(); 1131 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 1132 } 1133 1134 if (pending) { 1135 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 1136 msecs_to_jiffies(1)); 1137 return; 1138 } 1139 1140 spin_lock_irqsave(&bnad->bna_lock, flags); 1141 bna_tx_cleanup_complete(tx_info->tx); 1142 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1143 } 1144 1145 static void 1146 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) 1147 { 1148 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; 1149 struct bna_tcb *tcb; 1150 int i; 1151 1152 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { 1153 tcb = tx_info->tcb[i]; 1154 if (!tcb) 1155 continue; 1156 } 1157 1158 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); 1159 } 1160 1161 static void 1162 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) 1163 { 1164 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; 1165 struct bna_ccb *ccb; 1166 struct bnad_rx_ctrl *rx_ctrl; 1167 int i; 1168 1169 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1170 rx_ctrl = &rx_info->rx_ctrl[i]; 1171 ccb = rx_ctrl->ccb; 1172 if (!ccb) 1173 continue; 1174 1175 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); 1176 1177 if (ccb->rcb[1]) 1178 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); 1179 } 1180 } 1181 1182 /* 1183 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. 1184 */ 1185 static void 1186 bnad_rx_cleanup(void *work) 1187 { 1188 struct bnad_rx_info *rx_info = 1189 container_of(work, struct bnad_rx_info, rx_cleanup_work); 1190 struct bnad_rx_ctrl *rx_ctrl; 1191 struct bnad *bnad = NULL; 1192 unsigned long flags; 1193 u32 i; 1194 1195 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1196 rx_ctrl = &rx_info->rx_ctrl[i]; 1197 1198 if (!rx_ctrl->ccb) 1199 continue; 1200 1201 bnad = rx_ctrl->ccb->bnad; 1202 1203 /* 1204 * Wait till the poll handler has exited 1205 * and nothing can be scheduled anymore 1206 */ 1207 napi_disable(&rx_ctrl->napi); 1208 1209 bnad_cq_cleanup(bnad, rx_ctrl->ccb); 1210 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); 1211 if (rx_ctrl->ccb->rcb[1]) 1212 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); 1213 } 1214 1215 spin_lock_irqsave(&bnad->bna_lock, flags); 1216 bna_rx_cleanup_complete(rx_info->rx); 1217 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1218 } 1219 1220 static void 1221 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) 1222 { 1223 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; 1224 struct bna_ccb *ccb; 1225 struct bnad_rx_ctrl *rx_ctrl; 1226 int i; 1227 1228 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1229 rx_ctrl = &rx_info->rx_ctrl[i]; 1230 ccb = rx_ctrl->ccb; 1231 if (!ccb) 1232 continue; 1233 1234 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); 1235 1236 if (ccb->rcb[1]) 1237 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 1238 } 1239 1240 queue_work(bnad->work_q, &rx_info->rx_cleanup_work); 1241 } 1242 1243 static void 1244 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) 1245 { 1246 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; 1247 struct bna_ccb *ccb; 1248 struct bna_rcb *rcb; 1249 struct bnad_rx_ctrl *rx_ctrl; 1250 int i, j; 1251 1252 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1253 rx_ctrl = &rx_info->rx_ctrl[i]; 1254 ccb = rx_ctrl->ccb; 1255 if (!ccb) 1256 continue; 1257 1258 napi_enable(&rx_ctrl->napi); 1259 1260 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { 1261 rcb = ccb->rcb[j]; 1262 if (!rcb) 1263 continue; 1264 1265 bnad_rxq_alloc_init(bnad, rcb); 1266 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1267 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); 1268 bnad_rxq_post(bnad, rcb); 1269 } 1270 } 1271 } 1272 1273 static void 1274 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) 1275 { 1276 struct bnad *bnad = (struct bnad *)arg; 1277 1278 complete(&bnad->bnad_completions.rx_comp); 1279 } 1280 1281 static void 1282 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) 1283 { 1284 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; 1285 complete(&bnad->bnad_completions.mcast_comp); 1286 } 1287 1288 void 1289 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 1290 struct bna_stats *stats) 1291 { 1292 if (status == BNA_CB_SUCCESS) 1293 BNAD_UPDATE_CTR(bnad, hw_stats_updates); 1294 1295 if (!netif_running(bnad->netdev) || 1296 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1297 return; 1298 1299 mod_timer(&bnad->stats_timer, 1300 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1301 } 1302 1303 static void 1304 bnad_cb_enet_mtu_set(struct bnad *bnad) 1305 { 1306 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; 1307 complete(&bnad->bnad_completions.mtu_comp); 1308 } 1309 1310 void 1311 bnad_cb_completion(void *arg, enum bfa_status status) 1312 { 1313 struct bnad_iocmd_comp *iocmd_comp = 1314 (struct bnad_iocmd_comp *)arg; 1315 1316 iocmd_comp->comp_status = (u32) status; 1317 complete(&iocmd_comp->comp); 1318 } 1319 1320 /* Resource allocation, free functions */ 1321 1322 static void 1323 bnad_mem_free(struct bnad *bnad, 1324 struct bna_mem_info *mem_info) 1325 { 1326 int i; 1327 dma_addr_t dma_pa; 1328 1329 if (mem_info->mdl == NULL) 1330 return; 1331 1332 for (i = 0; i < mem_info->num; i++) { 1333 if (mem_info->mdl[i].kva != NULL) { 1334 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1335 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1336 dma_pa); 1337 dma_free_coherent(&bnad->pcidev->dev, 1338 mem_info->mdl[i].len, 1339 mem_info->mdl[i].kva, dma_pa); 1340 } else 1341 kfree(mem_info->mdl[i].kva); 1342 } 1343 } 1344 kfree(mem_info->mdl); 1345 mem_info->mdl = NULL; 1346 } 1347 1348 static int 1349 bnad_mem_alloc(struct bnad *bnad, 1350 struct bna_mem_info *mem_info) 1351 { 1352 int i; 1353 dma_addr_t dma_pa; 1354 1355 if ((mem_info->num == 0) || (mem_info->len == 0)) { 1356 mem_info->mdl = NULL; 1357 return 0; 1358 } 1359 1360 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), 1361 GFP_KERNEL); 1362 if (mem_info->mdl == NULL) 1363 return -ENOMEM; 1364 1365 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1366 for (i = 0; i < mem_info->num; i++) { 1367 mem_info->mdl[i].len = mem_info->len; 1368 mem_info->mdl[i].kva = 1369 dma_alloc_coherent(&bnad->pcidev->dev, 1370 mem_info->len, &dma_pa, 1371 GFP_KERNEL); 1372 if (mem_info->mdl[i].kva == NULL) 1373 goto err_return; 1374 1375 BNA_SET_DMA_ADDR(dma_pa, 1376 &(mem_info->mdl[i].dma)); 1377 } 1378 } else { 1379 for (i = 0; i < mem_info->num; i++) { 1380 mem_info->mdl[i].len = mem_info->len; 1381 mem_info->mdl[i].kva = kzalloc(mem_info->len, 1382 GFP_KERNEL); 1383 if (mem_info->mdl[i].kva == NULL) 1384 goto err_return; 1385 } 1386 } 1387 1388 return 0; 1389 1390 err_return: 1391 bnad_mem_free(bnad, mem_info); 1392 return -ENOMEM; 1393 } 1394 1395 /* Free IRQ for Mailbox */ 1396 static void 1397 bnad_mbox_irq_free(struct bnad *bnad) 1398 { 1399 int irq; 1400 unsigned long flags; 1401 1402 spin_lock_irqsave(&bnad->bna_lock, flags); 1403 bnad_disable_mbox_irq(bnad); 1404 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1405 1406 irq = BNAD_GET_MBOX_IRQ(bnad); 1407 free_irq(irq, bnad); 1408 } 1409 1410 /* 1411 * Allocates IRQ for Mailbox, but keep it disabled 1412 * This will be enabled once we get the mbox enable callback 1413 * from bna 1414 */ 1415 static int 1416 bnad_mbox_irq_alloc(struct bnad *bnad) 1417 { 1418 int err = 0; 1419 unsigned long irq_flags, flags; 1420 u32 irq; 1421 irq_handler_t irq_handler; 1422 1423 spin_lock_irqsave(&bnad->bna_lock, flags); 1424 if (bnad->cfg_flags & BNAD_CF_MSIX) { 1425 irq_handler = (irq_handler_t)bnad_msix_mbox_handler; 1426 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; 1427 irq_flags = 0; 1428 } else { 1429 irq_handler = (irq_handler_t)bnad_isr; 1430 irq = bnad->pcidev->irq; 1431 irq_flags = IRQF_SHARED; 1432 } 1433 1434 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1435 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); 1436 1437 /* 1438 * Set the Mbox IRQ disable flag, so that the IRQ handler 1439 * called from request_irq() for SHARED IRQs do not execute 1440 */ 1441 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 1442 1443 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 1444 1445 err = request_irq(irq, irq_handler, irq_flags, 1446 bnad->mbox_irq_name, bnad); 1447 1448 return err; 1449 } 1450 1451 static void 1452 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) 1453 { 1454 kfree(intr_info->idl); 1455 intr_info->idl = NULL; 1456 } 1457 1458 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ 1459 static int 1460 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, 1461 u32 txrx_id, struct bna_intr_info *intr_info) 1462 { 1463 int i, vector_start = 0; 1464 u32 cfg_flags; 1465 unsigned long flags; 1466 1467 spin_lock_irqsave(&bnad->bna_lock, flags); 1468 cfg_flags = bnad->cfg_flags; 1469 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1470 1471 if (cfg_flags & BNAD_CF_MSIX) { 1472 intr_info->intr_type = BNA_INTR_T_MSIX; 1473 intr_info->idl = kcalloc(intr_info->num, 1474 sizeof(struct bna_intr_descr), 1475 GFP_KERNEL); 1476 if (!intr_info->idl) 1477 return -ENOMEM; 1478 1479 switch (src) { 1480 case BNAD_INTR_TX: 1481 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; 1482 break; 1483 1484 case BNAD_INTR_RX: 1485 vector_start = BNAD_MAILBOX_MSIX_VECTORS + 1486 (bnad->num_tx * bnad->num_txq_per_tx) + 1487 txrx_id; 1488 break; 1489 1490 default: 1491 BUG(); 1492 } 1493 1494 for (i = 0; i < intr_info->num; i++) 1495 intr_info->idl[i].vector = vector_start + i; 1496 } else { 1497 intr_info->intr_type = BNA_INTR_T_INTX; 1498 intr_info->num = 1; 1499 intr_info->idl = kcalloc(intr_info->num, 1500 sizeof(struct bna_intr_descr), 1501 GFP_KERNEL); 1502 if (!intr_info->idl) 1503 return -ENOMEM; 1504 1505 switch (src) { 1506 case BNAD_INTR_TX: 1507 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; 1508 break; 1509 1510 case BNAD_INTR_RX: 1511 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; 1512 break; 1513 } 1514 } 1515 return 0; 1516 } 1517 1518 /* NOTE: Should be called for MSIX only 1519 * Unregisters Tx MSIX vector(s) from the kernel 1520 */ 1521 static void 1522 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, 1523 int num_txqs) 1524 { 1525 int i; 1526 int vector_num; 1527 1528 for (i = 0; i < num_txqs; i++) { 1529 if (tx_info->tcb[i] == NULL) 1530 continue; 1531 1532 vector_num = tx_info->tcb[i]->intr_vector; 1533 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); 1534 } 1535 } 1536 1537 /* NOTE: Should be called for MSIX only 1538 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1539 */ 1540 static int 1541 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, 1542 u32 tx_id, int num_txqs) 1543 { 1544 int i; 1545 int err; 1546 int vector_num; 1547 1548 for (i = 0; i < num_txqs; i++) { 1549 vector_num = tx_info->tcb[i]->intr_vector; 1550 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, 1551 tx_id + tx_info->tcb[i]->id); 1552 err = request_irq(bnad->msix_table[vector_num].vector, 1553 (irq_handler_t)bnad_msix_tx, 0, 1554 tx_info->tcb[i]->name, 1555 tx_info->tcb[i]); 1556 if (err) 1557 goto err_return; 1558 } 1559 1560 return 0; 1561 1562 err_return: 1563 if (i > 0) 1564 bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); 1565 return -1; 1566 } 1567 1568 /* NOTE: Should be called for MSIX only 1569 * Unregisters Rx MSIX vector(s) from the kernel 1570 */ 1571 static void 1572 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, 1573 int num_rxps) 1574 { 1575 int i; 1576 int vector_num; 1577 1578 for (i = 0; i < num_rxps; i++) { 1579 if (rx_info->rx_ctrl[i].ccb == NULL) 1580 continue; 1581 1582 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; 1583 free_irq(bnad->msix_table[vector_num].vector, 1584 rx_info->rx_ctrl[i].ccb); 1585 } 1586 } 1587 1588 /* NOTE: Should be called for MSIX only 1589 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1590 */ 1591 static int 1592 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, 1593 u32 rx_id, int num_rxps) 1594 { 1595 int i; 1596 int err; 1597 int vector_num; 1598 1599 for (i = 0; i < num_rxps; i++) { 1600 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; 1601 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", 1602 bnad->netdev->name, 1603 rx_id + rx_info->rx_ctrl[i].ccb->id); 1604 err = request_irq(bnad->msix_table[vector_num].vector, 1605 (irq_handler_t)bnad_msix_rx, 0, 1606 rx_info->rx_ctrl[i].ccb->name, 1607 rx_info->rx_ctrl[i].ccb); 1608 if (err) 1609 goto err_return; 1610 } 1611 1612 return 0; 1613 1614 err_return: 1615 if (i > 0) 1616 bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); 1617 return -1; 1618 } 1619 1620 /* Free Tx object Resources */ 1621 static void 1622 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) 1623 { 1624 int i; 1625 1626 for (i = 0; i < BNA_TX_RES_T_MAX; i++) { 1627 if (res_info[i].res_type == BNA_RES_T_MEM) 1628 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 1629 else if (res_info[i].res_type == BNA_RES_T_INTR) 1630 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); 1631 } 1632 } 1633 1634 /* Allocates memory and interrupt resources for Tx object */ 1635 static int 1636 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 1637 u32 tx_id) 1638 { 1639 int i, err = 0; 1640 1641 for (i = 0; i < BNA_TX_RES_T_MAX; i++) { 1642 if (res_info[i].res_type == BNA_RES_T_MEM) 1643 err = bnad_mem_alloc(bnad, 1644 &res_info[i].res_u.mem_info); 1645 else if (res_info[i].res_type == BNA_RES_T_INTR) 1646 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, 1647 &res_info[i].res_u.intr_info); 1648 if (err) 1649 goto err_return; 1650 } 1651 return 0; 1652 1653 err_return: 1654 bnad_tx_res_free(bnad, res_info); 1655 return err; 1656 } 1657 1658 /* Free Rx object Resources */ 1659 static void 1660 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) 1661 { 1662 int i; 1663 1664 for (i = 0; i < BNA_RX_RES_T_MAX; i++) { 1665 if (res_info[i].res_type == BNA_RES_T_MEM) 1666 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 1667 else if (res_info[i].res_type == BNA_RES_T_INTR) 1668 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); 1669 } 1670 } 1671 1672 /* Allocates memory and interrupt resources for Rx object */ 1673 static int 1674 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 1675 uint rx_id) 1676 { 1677 int i, err = 0; 1678 1679 /* All memory needs to be allocated before setup_ccbs */ 1680 for (i = 0; i < BNA_RX_RES_T_MAX; i++) { 1681 if (res_info[i].res_type == BNA_RES_T_MEM) 1682 err = bnad_mem_alloc(bnad, 1683 &res_info[i].res_u.mem_info); 1684 else if (res_info[i].res_type == BNA_RES_T_INTR) 1685 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, 1686 &res_info[i].res_u.intr_info); 1687 if (err) 1688 goto err_return; 1689 } 1690 return 0; 1691 1692 err_return: 1693 bnad_rx_res_free(bnad, res_info); 1694 return err; 1695 } 1696 1697 /* Timer callbacks */ 1698 /* a) IOC timer */ 1699 static void 1700 bnad_ioc_timeout(unsigned long data) 1701 { 1702 struct bnad *bnad = (struct bnad *)data; 1703 unsigned long flags; 1704 1705 spin_lock_irqsave(&bnad->bna_lock, flags); 1706 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); 1707 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1708 } 1709 1710 static void 1711 bnad_ioc_hb_check(unsigned long data) 1712 { 1713 struct bnad *bnad = (struct bnad *)data; 1714 unsigned long flags; 1715 1716 spin_lock_irqsave(&bnad->bna_lock, flags); 1717 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); 1718 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1719 } 1720 1721 static void 1722 bnad_iocpf_timeout(unsigned long data) 1723 { 1724 struct bnad *bnad = (struct bnad *)data; 1725 unsigned long flags; 1726 1727 spin_lock_irqsave(&bnad->bna_lock, flags); 1728 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); 1729 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1730 } 1731 1732 static void 1733 bnad_iocpf_sem_timeout(unsigned long data) 1734 { 1735 struct bnad *bnad = (struct bnad *)data; 1736 unsigned long flags; 1737 1738 spin_lock_irqsave(&bnad->bna_lock, flags); 1739 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); 1740 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1741 } 1742 1743 /* 1744 * All timer routines use bnad->bna_lock to protect against 1745 * the following race, which may occur in case of no locking: 1746 * Time CPU m CPU n 1747 * 0 1 = test_bit 1748 * 1 clear_bit 1749 * 2 del_timer_sync 1750 * 3 mod_timer 1751 */ 1752 1753 /* b) Dynamic Interrupt Moderation Timer */ 1754 static void 1755 bnad_dim_timeout(unsigned long data) 1756 { 1757 struct bnad *bnad = (struct bnad *)data; 1758 struct bnad_rx_info *rx_info; 1759 struct bnad_rx_ctrl *rx_ctrl; 1760 int i, j; 1761 unsigned long flags; 1762 1763 if (!netif_carrier_ok(bnad->netdev)) 1764 return; 1765 1766 spin_lock_irqsave(&bnad->bna_lock, flags); 1767 for (i = 0; i < bnad->num_rx; i++) { 1768 rx_info = &bnad->rx_info[i]; 1769 if (!rx_info->rx) 1770 continue; 1771 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 1772 rx_ctrl = &rx_info->rx_ctrl[j]; 1773 if (!rx_ctrl->ccb) 1774 continue; 1775 bna_rx_dim_update(rx_ctrl->ccb); 1776 } 1777 } 1778 1779 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ 1780 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) 1781 mod_timer(&bnad->dim_timer, 1782 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); 1783 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1784 } 1785 1786 /* c) Statistics Timer */ 1787 static void 1788 bnad_stats_timeout(unsigned long data) 1789 { 1790 struct bnad *bnad = (struct bnad *)data; 1791 unsigned long flags; 1792 1793 if (!netif_running(bnad->netdev) || 1794 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1795 return; 1796 1797 spin_lock_irqsave(&bnad->bna_lock, flags); 1798 bna_hw_stats_get(&bnad->bna); 1799 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1800 } 1801 1802 /* 1803 * Set up timer for DIM 1804 * Called with bnad->bna_lock held 1805 */ 1806 void 1807 bnad_dim_timer_start(struct bnad *bnad) 1808 { 1809 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && 1810 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { 1811 setup_timer(&bnad->dim_timer, bnad_dim_timeout, 1812 (unsigned long)bnad); 1813 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); 1814 mod_timer(&bnad->dim_timer, 1815 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); 1816 } 1817 } 1818 1819 /* 1820 * Set up timer for statistics 1821 * Called with mutex_lock(&bnad->conf_mutex) held 1822 */ 1823 static void 1824 bnad_stats_timer_start(struct bnad *bnad) 1825 { 1826 unsigned long flags; 1827 1828 spin_lock_irqsave(&bnad->bna_lock, flags); 1829 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { 1830 setup_timer(&bnad->stats_timer, bnad_stats_timeout, 1831 (unsigned long)bnad); 1832 mod_timer(&bnad->stats_timer, 1833 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1834 } 1835 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1836 } 1837 1838 /* 1839 * Stops the stats timer 1840 * Called with mutex_lock(&bnad->conf_mutex) held 1841 */ 1842 static void 1843 bnad_stats_timer_stop(struct bnad *bnad) 1844 { 1845 int to_del = 0; 1846 unsigned long flags; 1847 1848 spin_lock_irqsave(&bnad->bna_lock, flags); 1849 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1850 to_del = 1; 1851 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1852 if (to_del) 1853 del_timer_sync(&bnad->stats_timer); 1854 } 1855 1856 /* Utilities */ 1857 1858 static void 1859 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) 1860 { 1861 int i = 1; /* Index 0 has broadcast address */ 1862 struct netdev_hw_addr *mc_addr; 1863 1864 netdev_for_each_mc_addr(mc_addr, netdev) { 1865 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], 1866 ETH_ALEN); 1867 i++; 1868 } 1869 } 1870 1871 static int 1872 bnad_napi_poll_rx(struct napi_struct *napi, int budget) 1873 { 1874 struct bnad_rx_ctrl *rx_ctrl = 1875 container_of(napi, struct bnad_rx_ctrl, napi); 1876 struct bnad *bnad = rx_ctrl->bnad; 1877 int rcvd = 0; 1878 1879 rx_ctrl->rx_poll_ctr++; 1880 1881 if (!netif_carrier_ok(bnad->netdev)) 1882 goto poll_exit; 1883 1884 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); 1885 if (rcvd >= budget) 1886 return rcvd; 1887 1888 poll_exit: 1889 napi_complete(napi); 1890 1891 rx_ctrl->rx_complete++; 1892 1893 if (rx_ctrl->ccb) 1894 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); 1895 1896 return rcvd; 1897 } 1898 1899 #define BNAD_NAPI_POLL_QUOTA 64 1900 static void 1901 bnad_napi_add(struct bnad *bnad, u32 rx_id) 1902 { 1903 struct bnad_rx_ctrl *rx_ctrl; 1904 int i; 1905 1906 /* Initialize & enable NAPI */ 1907 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1908 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; 1909 netif_napi_add(bnad->netdev, &rx_ctrl->napi, 1910 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); 1911 } 1912 } 1913 1914 static void 1915 bnad_napi_delete(struct bnad *bnad, u32 rx_id) 1916 { 1917 int i; 1918 1919 /* First disable and then clean up */ 1920 for (i = 0; i < bnad->num_rxp_per_rx; i++) 1921 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1922 } 1923 1924 /* Should be held with conf_lock held */ 1925 void 1926 bnad_destroy_tx(struct bnad *bnad, u32 tx_id) 1927 { 1928 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1929 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1930 unsigned long flags; 1931 1932 if (!tx_info->tx) 1933 return; 1934 1935 init_completion(&bnad->bnad_completions.tx_comp); 1936 spin_lock_irqsave(&bnad->bna_lock, flags); 1937 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); 1938 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1939 wait_for_completion(&bnad->bnad_completions.tx_comp); 1940 1941 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) 1942 bnad_tx_msix_unregister(bnad, tx_info, 1943 bnad->num_txq_per_tx); 1944 1945 spin_lock_irqsave(&bnad->bna_lock, flags); 1946 bna_tx_destroy(tx_info->tx); 1947 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1948 1949 tx_info->tx = NULL; 1950 tx_info->tx_id = 0; 1951 1952 bnad_tx_res_free(bnad, res_info); 1953 } 1954 1955 /* Should be held with conf_lock held */ 1956 int 1957 bnad_setup_tx(struct bnad *bnad, u32 tx_id) 1958 { 1959 int err; 1960 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1961 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1962 struct bna_intr_info *intr_info = 1963 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; 1964 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; 1965 static const struct bna_tx_event_cbfn tx_cbfn = { 1966 .tcb_setup_cbfn = bnad_cb_tcb_setup, 1967 .tcb_destroy_cbfn = bnad_cb_tcb_destroy, 1968 .tx_stall_cbfn = bnad_cb_tx_stall, 1969 .tx_resume_cbfn = bnad_cb_tx_resume, 1970 .tx_cleanup_cbfn = bnad_cb_tx_cleanup, 1971 }; 1972 1973 struct bna_tx *tx; 1974 unsigned long flags; 1975 1976 tx_info->tx_id = tx_id; 1977 1978 /* Initialize the Tx object configuration */ 1979 tx_config->num_txq = bnad->num_txq_per_tx; 1980 tx_config->txq_depth = bnad->txq_depth; 1981 tx_config->tx_type = BNA_TX_T_REGULAR; 1982 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; 1983 1984 /* Get BNA's resource requirement for one tx object */ 1985 spin_lock_irqsave(&bnad->bna_lock, flags); 1986 bna_tx_res_req(bnad->num_txq_per_tx, 1987 bnad->txq_depth, res_info); 1988 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1989 1990 /* Fill Unmap Q memory requirements */ 1991 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], 1992 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * 1993 bnad->txq_depth)); 1994 1995 /* Allocate resources */ 1996 err = bnad_tx_res_alloc(bnad, res_info, tx_id); 1997 if (err) 1998 return err; 1999 2000 /* Ask BNA to create one Tx object, supplying required resources */ 2001 spin_lock_irqsave(&bnad->bna_lock, flags); 2002 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, 2003 tx_info); 2004 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2005 if (!tx) { 2006 err = -ENOMEM; 2007 goto err_return; 2008 } 2009 tx_info->tx = tx; 2010 2011 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, 2012 (work_func_t)bnad_tx_cleanup); 2013 2014 /* Register ISR for the Tx object */ 2015 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 2016 err = bnad_tx_msix_register(bnad, tx_info, 2017 tx_id, bnad->num_txq_per_tx); 2018 if (err) 2019 goto cleanup_tx; 2020 } 2021 2022 spin_lock_irqsave(&bnad->bna_lock, flags); 2023 bna_tx_enable(tx); 2024 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2025 2026 return 0; 2027 2028 cleanup_tx: 2029 spin_lock_irqsave(&bnad->bna_lock, flags); 2030 bna_tx_destroy(tx_info->tx); 2031 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2032 tx_info->tx = NULL; 2033 tx_info->tx_id = 0; 2034 err_return: 2035 bnad_tx_res_free(bnad, res_info); 2036 return err; 2037 } 2038 2039 /* Setup the rx config for bna_rx_create */ 2040 /* bnad decides the configuration */ 2041 static void 2042 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) 2043 { 2044 memset(rx_config, 0, sizeof(*rx_config)); 2045 rx_config->rx_type = BNA_RX_T_REGULAR; 2046 rx_config->num_paths = bnad->num_rxp_per_rx; 2047 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; 2048 2049 if (bnad->num_rxp_per_rx > 1) { 2050 rx_config->rss_status = BNA_STATUS_T_ENABLED; 2051 rx_config->rss_config.hash_type = 2052 (BFI_ENET_RSS_IPV6 | 2053 BFI_ENET_RSS_IPV6_TCP | 2054 BFI_ENET_RSS_IPV4 | 2055 BFI_ENET_RSS_IPV4_TCP); 2056 rx_config->rss_config.hash_mask = 2057 bnad->num_rxp_per_rx - 1; 2058 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, 2059 sizeof(rx_config->rss_config.toeplitz_hash_key)); 2060 } else { 2061 rx_config->rss_status = BNA_STATUS_T_DISABLED; 2062 memset(&rx_config->rss_config, 0, 2063 sizeof(rx_config->rss_config)); 2064 } 2065 2066 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); 2067 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; 2068 2069 /* BNA_RXP_SINGLE - one data-buffer queue 2070 * BNA_RXP_SLR - one small-buffer and one large-buffer queues 2071 * BNA_RXP_HDS - one header-buffer and one data-buffer queues 2072 */ 2073 /* TODO: configurable param for queue type */ 2074 rx_config->rxp_type = BNA_RXP_SLR; 2075 2076 if (BNAD_PCI_DEV_IS_CAT2(bnad) && 2077 rx_config->frame_size > 4096) { 2078 /* though size_routing_enable is set in SLR, 2079 * small packets may get routed to same rxq. 2080 * set buf_size to 2048 instead of PAGE_SIZE. 2081 */ 2082 rx_config->q0_buf_size = 2048; 2083 /* this should be in multiples of 2 */ 2084 rx_config->q0_num_vecs = 4; 2085 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; 2086 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; 2087 } else { 2088 rx_config->q0_buf_size = rx_config->frame_size; 2089 rx_config->q0_num_vecs = 1; 2090 rx_config->q0_depth = bnad->rxq_depth; 2091 } 2092 2093 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ 2094 if (rx_config->rxp_type == BNA_RXP_SLR) { 2095 rx_config->q1_depth = bnad->rxq_depth; 2096 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; 2097 } 2098 2099 rx_config->vlan_strip_status = 2100 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? 2101 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; 2102 } 2103 2104 static void 2105 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) 2106 { 2107 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 2108 int i; 2109 2110 for (i = 0; i < bnad->num_rxp_per_rx; i++) 2111 rx_info->rx_ctrl[i].bnad = bnad; 2112 } 2113 2114 /* Called with mutex_lock(&bnad->conf_mutex) held */ 2115 static u32 2116 bnad_reinit_rx(struct bnad *bnad) 2117 { 2118 struct net_device *netdev = bnad->netdev; 2119 u32 err = 0, current_err = 0; 2120 u32 rx_id = 0, count = 0; 2121 unsigned long flags; 2122 2123 /* destroy and create new rx objects */ 2124 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { 2125 if (!bnad->rx_info[rx_id].rx) 2126 continue; 2127 bnad_destroy_rx(bnad, rx_id); 2128 } 2129 2130 spin_lock_irqsave(&bnad->bna_lock, flags); 2131 bna_enet_mtu_set(&bnad->bna.enet, 2132 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); 2133 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2134 2135 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { 2136 count++; 2137 current_err = bnad_setup_rx(bnad, rx_id); 2138 if (current_err && !err) { 2139 err = current_err; 2140 pr_err("RXQ:%u setup failed\n", rx_id); 2141 } 2142 } 2143 2144 /* restore rx configuration */ 2145 if (bnad->rx_info[0].rx && !err) { 2146 bnad_restore_vlans(bnad, 0); 2147 bnad_enable_default_bcast(bnad); 2148 spin_lock_irqsave(&bnad->bna_lock, flags); 2149 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); 2150 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2151 bnad_set_rx_mode(netdev); 2152 } 2153 2154 return count; 2155 } 2156 2157 /* Called with bnad_conf_lock() held */ 2158 void 2159 bnad_destroy_rx(struct bnad *bnad, u32 rx_id) 2160 { 2161 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 2162 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 2163 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; 2164 unsigned long flags; 2165 int to_del = 0; 2166 2167 if (!rx_info->rx) 2168 return; 2169 2170 if (0 == rx_id) { 2171 spin_lock_irqsave(&bnad->bna_lock, flags); 2172 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && 2173 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { 2174 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); 2175 to_del = 1; 2176 } 2177 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2178 if (to_del) 2179 del_timer_sync(&bnad->dim_timer); 2180 } 2181 2182 init_completion(&bnad->bnad_completions.rx_comp); 2183 spin_lock_irqsave(&bnad->bna_lock, flags); 2184 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); 2185 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2186 wait_for_completion(&bnad->bnad_completions.rx_comp); 2187 2188 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) 2189 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); 2190 2191 bnad_napi_delete(bnad, rx_id); 2192 2193 spin_lock_irqsave(&bnad->bna_lock, flags); 2194 bna_rx_destroy(rx_info->rx); 2195 2196 rx_info->rx = NULL; 2197 rx_info->rx_id = 0; 2198 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2199 2200 bnad_rx_res_free(bnad, res_info); 2201 } 2202 2203 /* Called with mutex_lock(&bnad->conf_mutex) held */ 2204 int 2205 bnad_setup_rx(struct bnad *bnad, u32 rx_id) 2206 { 2207 int err; 2208 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 2209 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; 2210 struct bna_intr_info *intr_info = 2211 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 2212 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 2213 static const struct bna_rx_event_cbfn rx_cbfn = { 2214 .rcb_setup_cbfn = NULL, 2215 .rcb_destroy_cbfn = NULL, 2216 .ccb_setup_cbfn = bnad_cb_ccb_setup, 2217 .ccb_destroy_cbfn = bnad_cb_ccb_destroy, 2218 .rx_stall_cbfn = bnad_cb_rx_stall, 2219 .rx_cleanup_cbfn = bnad_cb_rx_cleanup, 2220 .rx_post_cbfn = bnad_cb_rx_post, 2221 }; 2222 struct bna_rx *rx; 2223 unsigned long flags; 2224 2225 rx_info->rx_id = rx_id; 2226 2227 /* Initialize the Rx object configuration */ 2228 bnad_init_rx_config(bnad, rx_config); 2229 2230 /* Get BNA's resource requirement for one Rx object */ 2231 spin_lock_irqsave(&bnad->bna_lock, flags); 2232 bna_rx_res_req(rx_config, res_info); 2233 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2234 2235 /* Fill Unmap Q memory requirements */ 2236 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], 2237 rx_config->num_paths, 2238 (rx_config->q0_depth * 2239 sizeof(struct bnad_rx_unmap)) + 2240 sizeof(struct bnad_rx_unmap_q)); 2241 2242 if (rx_config->rxp_type != BNA_RXP_SINGLE) { 2243 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], 2244 rx_config->num_paths, 2245 (rx_config->q1_depth * 2246 sizeof(struct bnad_rx_unmap) + 2247 sizeof(struct bnad_rx_unmap_q))); 2248 } 2249 /* Allocate resource */ 2250 err = bnad_rx_res_alloc(bnad, res_info, rx_id); 2251 if (err) 2252 return err; 2253 2254 bnad_rx_ctrl_init(bnad, rx_id); 2255 2256 /* Ask BNA to create one Rx object, supplying required resources */ 2257 spin_lock_irqsave(&bnad->bna_lock, flags); 2258 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, 2259 rx_info); 2260 if (!rx) { 2261 err = -ENOMEM; 2262 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2263 goto err_return; 2264 } 2265 rx_info->rx = rx; 2266 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2267 2268 INIT_WORK(&rx_info->rx_cleanup_work, 2269 (work_func_t)(bnad_rx_cleanup)); 2270 2271 /* 2272 * Init NAPI, so that state is set to NAPI_STATE_SCHED, 2273 * so that IRQ handler cannot schedule NAPI at this point. 2274 */ 2275 bnad_napi_add(bnad, rx_id); 2276 2277 /* Register ISR for the Rx object */ 2278 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 2279 err = bnad_rx_msix_register(bnad, rx_info, rx_id, 2280 rx_config->num_paths); 2281 if (err) 2282 goto err_return; 2283 } 2284 2285 spin_lock_irqsave(&bnad->bna_lock, flags); 2286 if (0 == rx_id) { 2287 /* Set up Dynamic Interrupt Moderation Vector */ 2288 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) 2289 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); 2290 2291 /* Enable VLAN filtering only on the default Rx */ 2292 bna_rx_vlanfilter_enable(rx); 2293 2294 /* Start the DIM timer */ 2295 bnad_dim_timer_start(bnad); 2296 } 2297 2298 bna_rx_enable(rx); 2299 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2300 2301 return 0; 2302 2303 err_return: 2304 bnad_destroy_rx(bnad, rx_id); 2305 return err; 2306 } 2307 2308 /* Called with conf_lock & bnad->bna_lock held */ 2309 void 2310 bnad_tx_coalescing_timeo_set(struct bnad *bnad) 2311 { 2312 struct bnad_tx_info *tx_info; 2313 2314 tx_info = &bnad->tx_info[0]; 2315 if (!tx_info->tx) 2316 return; 2317 2318 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); 2319 } 2320 2321 /* Called with conf_lock & bnad->bna_lock held */ 2322 void 2323 bnad_rx_coalescing_timeo_set(struct bnad *bnad) 2324 { 2325 struct bnad_rx_info *rx_info; 2326 int i; 2327 2328 for (i = 0; i < bnad->num_rx; i++) { 2329 rx_info = &bnad->rx_info[i]; 2330 if (!rx_info->rx) 2331 continue; 2332 bna_rx_coalescing_timeo_set(rx_info->rx, 2333 bnad->rx_coalescing_timeo); 2334 } 2335 } 2336 2337 /* 2338 * Called with bnad->bna_lock held 2339 */ 2340 int 2341 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) 2342 { 2343 int ret; 2344 2345 if (!is_valid_ether_addr(mac_addr)) 2346 return -EADDRNOTAVAIL; 2347 2348 /* If datapath is down, pretend everything went through */ 2349 if (!bnad->rx_info[0].rx) 2350 return 0; 2351 2352 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); 2353 if (ret != BNA_CB_SUCCESS) 2354 return -EADDRNOTAVAIL; 2355 2356 return 0; 2357 } 2358 2359 /* Should be called with conf_lock held */ 2360 int 2361 bnad_enable_default_bcast(struct bnad *bnad) 2362 { 2363 struct bnad_rx_info *rx_info = &bnad->rx_info[0]; 2364 int ret; 2365 unsigned long flags; 2366 2367 init_completion(&bnad->bnad_completions.mcast_comp); 2368 2369 spin_lock_irqsave(&bnad->bna_lock, flags); 2370 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, 2371 bnad_cb_rx_mcast_add); 2372 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2373 2374 if (ret == BNA_CB_SUCCESS) 2375 wait_for_completion(&bnad->bnad_completions.mcast_comp); 2376 else 2377 return -ENODEV; 2378 2379 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) 2380 return -ENODEV; 2381 2382 return 0; 2383 } 2384 2385 /* Called with mutex_lock(&bnad->conf_mutex) held */ 2386 void 2387 bnad_restore_vlans(struct bnad *bnad, u32 rx_id) 2388 { 2389 u16 vid; 2390 unsigned long flags; 2391 2392 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { 2393 spin_lock_irqsave(&bnad->bna_lock, flags); 2394 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); 2395 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2396 } 2397 } 2398 2399 /* Statistics utilities */ 2400 void 2401 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2402 { 2403 int i, j; 2404 2405 for (i = 0; i < bnad->num_rx; i++) { 2406 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 2407 if (bnad->rx_info[i].rx_ctrl[j].ccb) { 2408 stats->rx_packets += bnad->rx_info[i]. 2409 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; 2410 stats->rx_bytes += bnad->rx_info[i]. 2411 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; 2412 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && 2413 bnad->rx_info[i].rx_ctrl[j].ccb-> 2414 rcb[1]->rxq) { 2415 stats->rx_packets += 2416 bnad->rx_info[i].rx_ctrl[j]. 2417 ccb->rcb[1]->rxq->rx_packets; 2418 stats->rx_bytes += 2419 bnad->rx_info[i].rx_ctrl[j]. 2420 ccb->rcb[1]->rxq->rx_bytes; 2421 } 2422 } 2423 } 2424 } 2425 for (i = 0; i < bnad->num_tx; i++) { 2426 for (j = 0; j < bnad->num_txq_per_tx; j++) { 2427 if (bnad->tx_info[i].tcb[j]) { 2428 stats->tx_packets += 2429 bnad->tx_info[i].tcb[j]->txq->tx_packets; 2430 stats->tx_bytes += 2431 bnad->tx_info[i].tcb[j]->txq->tx_bytes; 2432 } 2433 } 2434 } 2435 } 2436 2437 /* 2438 * Must be called with the bna_lock held. 2439 */ 2440 void 2441 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2442 { 2443 struct bfi_enet_stats_mac *mac_stats; 2444 u32 bmap; 2445 int i; 2446 2447 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; 2448 stats->rx_errors = 2449 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + 2450 mac_stats->rx_frame_length_error + mac_stats->rx_code_error + 2451 mac_stats->rx_undersize; 2452 stats->tx_errors = mac_stats->tx_fcs_error + 2453 mac_stats->tx_undersize; 2454 stats->rx_dropped = mac_stats->rx_drop; 2455 stats->tx_dropped = mac_stats->tx_drop; 2456 stats->multicast = mac_stats->rx_multicast; 2457 stats->collisions = mac_stats->tx_total_collision; 2458 2459 stats->rx_length_errors = mac_stats->rx_frame_length_error; 2460 2461 /* receive ring buffer overflow ?? */ 2462 2463 stats->rx_crc_errors = mac_stats->rx_fcs_error; 2464 stats->rx_frame_errors = mac_stats->rx_alignment_error; 2465 /* recv'r fifo overrun */ 2466 bmap = bna_rx_rid_mask(&bnad->bna); 2467 for (i = 0; bmap; i++) { 2468 if (bmap & 1) { 2469 stats->rx_fifo_errors += 2470 bnad->stats.bna_stats-> 2471 hw_stats.rxf_stats[i].frame_drops; 2472 break; 2473 } 2474 bmap >>= 1; 2475 } 2476 } 2477 2478 static void 2479 bnad_mbox_irq_sync(struct bnad *bnad) 2480 { 2481 u32 irq; 2482 unsigned long flags; 2483 2484 spin_lock_irqsave(&bnad->bna_lock, flags); 2485 if (bnad->cfg_flags & BNAD_CF_MSIX) 2486 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; 2487 else 2488 irq = bnad->pcidev->irq; 2489 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2490 2491 synchronize_irq(irq); 2492 } 2493 2494 /* Utility used by bnad_start_xmit, for doing TSO */ 2495 static int 2496 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) 2497 { 2498 int err; 2499 2500 err = skb_cow_head(skb, 0); 2501 if (err < 0) { 2502 BNAD_UPDATE_CTR(bnad, tso_err); 2503 return err; 2504 } 2505 2506 /* 2507 * For TSO, the TCP checksum field is seeded with pseudo-header sum 2508 * excluding the length field. 2509 */ 2510 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { 2511 struct iphdr *iph = ip_hdr(skb); 2512 2513 /* Do we really need these? */ 2514 iph->tot_len = 0; 2515 iph->check = 0; 2516 2517 tcp_hdr(skb)->check = 2518 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 2519 IPPROTO_TCP, 0); 2520 BNAD_UPDATE_CTR(bnad, tso4); 2521 } else { 2522 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2523 2524 ipv6h->payload_len = 0; 2525 tcp_hdr(skb)->check = 2526 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, 2527 IPPROTO_TCP, 0); 2528 BNAD_UPDATE_CTR(bnad, tso6); 2529 } 2530 2531 return 0; 2532 } 2533 2534 /* 2535 * Initialize Q numbers depending on Rx Paths 2536 * Called with bnad->bna_lock held, because of cfg_flags 2537 * access. 2538 */ 2539 static void 2540 bnad_q_num_init(struct bnad *bnad) 2541 { 2542 int rxps; 2543 2544 rxps = min((uint)num_online_cpus(), 2545 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); 2546 2547 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) 2548 rxps = 1; /* INTx */ 2549 2550 bnad->num_rx = 1; 2551 bnad->num_tx = 1; 2552 bnad->num_rxp_per_rx = rxps; 2553 bnad->num_txq_per_tx = BNAD_TXQ_NUM; 2554 } 2555 2556 /* 2557 * Adjusts the Q numbers, given a number of msix vectors 2558 * Give preference to RSS as opposed to Tx priority Queues, 2559 * in such a case, just use 1 Tx Q 2560 * Called with bnad->bna_lock held b'cos of cfg_flags access 2561 */ 2562 static void 2563 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) 2564 { 2565 bnad->num_txq_per_tx = 1; 2566 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + 2567 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && 2568 (bnad->cfg_flags & BNAD_CF_MSIX)) { 2569 bnad->num_rxp_per_rx = msix_vectors - 2570 (bnad->num_tx * bnad->num_txq_per_tx) - 2571 BNAD_MAILBOX_MSIX_VECTORS; 2572 } else 2573 bnad->num_rxp_per_rx = 1; 2574 } 2575 2576 /* Enable / disable ioceth */ 2577 static int 2578 bnad_ioceth_disable(struct bnad *bnad) 2579 { 2580 unsigned long flags; 2581 int err = 0; 2582 2583 spin_lock_irqsave(&bnad->bna_lock, flags); 2584 init_completion(&bnad->bnad_completions.ioc_comp); 2585 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); 2586 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2587 2588 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, 2589 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); 2590 2591 err = bnad->bnad_completions.ioc_comp_status; 2592 return err; 2593 } 2594 2595 static int 2596 bnad_ioceth_enable(struct bnad *bnad) 2597 { 2598 int err = 0; 2599 unsigned long flags; 2600 2601 spin_lock_irqsave(&bnad->bna_lock, flags); 2602 init_completion(&bnad->bnad_completions.ioc_comp); 2603 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; 2604 bna_ioceth_enable(&bnad->bna.ioceth); 2605 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2606 2607 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, 2608 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); 2609 2610 err = bnad->bnad_completions.ioc_comp_status; 2611 2612 return err; 2613 } 2614 2615 /* Free BNA resources */ 2616 static void 2617 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, 2618 u32 res_val_max) 2619 { 2620 int i; 2621 2622 for (i = 0; i < res_val_max; i++) 2623 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 2624 } 2625 2626 /* Allocates memory and interrupt resources for BNA */ 2627 static int 2628 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 2629 u32 res_val_max) 2630 { 2631 int i, err; 2632 2633 for (i = 0; i < res_val_max; i++) { 2634 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); 2635 if (err) 2636 goto err_return; 2637 } 2638 return 0; 2639 2640 err_return: 2641 bnad_res_free(bnad, res_info, res_val_max); 2642 return err; 2643 } 2644 2645 /* Interrupt enable / disable */ 2646 static void 2647 bnad_enable_msix(struct bnad *bnad) 2648 { 2649 int i, ret; 2650 unsigned long flags; 2651 2652 spin_lock_irqsave(&bnad->bna_lock, flags); 2653 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { 2654 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2655 return; 2656 } 2657 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2658 2659 if (bnad->msix_table) 2660 return; 2661 2662 bnad->msix_table = 2663 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2664 2665 if (!bnad->msix_table) 2666 goto intx_mode; 2667 2668 for (i = 0; i < bnad->msix_num; i++) 2669 bnad->msix_table[i].entry = i; 2670 2671 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, 2672 1, bnad->msix_num); 2673 if (ret < 0) { 2674 goto intx_mode; 2675 } else if (ret < bnad->msix_num) { 2676 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", 2677 ret, bnad->msix_num); 2678 2679 spin_lock_irqsave(&bnad->bna_lock, flags); 2680 /* ret = #of vectors that we got */ 2681 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, 2682 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); 2683 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2684 2685 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + 2686 BNAD_MAILBOX_MSIX_VECTORS; 2687 2688 if (bnad->msix_num > ret) { 2689 pci_disable_msix(bnad->pcidev); 2690 goto intx_mode; 2691 } 2692 } 2693 2694 pci_intx(bnad->pcidev, 0); 2695 2696 return; 2697 2698 intx_mode: 2699 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); 2700 2701 kfree(bnad->msix_table); 2702 bnad->msix_table = NULL; 2703 bnad->msix_num = 0; 2704 spin_lock_irqsave(&bnad->bna_lock, flags); 2705 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2706 bnad_q_num_init(bnad); 2707 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2708 } 2709 2710 static void 2711 bnad_disable_msix(struct bnad *bnad) 2712 { 2713 u32 cfg_flags; 2714 unsigned long flags; 2715 2716 spin_lock_irqsave(&bnad->bna_lock, flags); 2717 cfg_flags = bnad->cfg_flags; 2718 if (bnad->cfg_flags & BNAD_CF_MSIX) 2719 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2720 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2721 2722 if (cfg_flags & BNAD_CF_MSIX) { 2723 pci_disable_msix(bnad->pcidev); 2724 kfree(bnad->msix_table); 2725 bnad->msix_table = NULL; 2726 } 2727 } 2728 2729 /* Netdev entry points */ 2730 static int 2731 bnad_open(struct net_device *netdev) 2732 { 2733 int err; 2734 struct bnad *bnad = netdev_priv(netdev); 2735 struct bna_pause_config pause_config; 2736 unsigned long flags; 2737 2738 mutex_lock(&bnad->conf_mutex); 2739 2740 /* Tx */ 2741 err = bnad_setup_tx(bnad, 0); 2742 if (err) 2743 goto err_return; 2744 2745 /* Rx */ 2746 err = bnad_setup_rx(bnad, 0); 2747 if (err) 2748 goto cleanup_tx; 2749 2750 /* Port */ 2751 pause_config.tx_pause = 0; 2752 pause_config.rx_pause = 0; 2753 2754 spin_lock_irqsave(&bnad->bna_lock, flags); 2755 bna_enet_mtu_set(&bnad->bna.enet, 2756 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); 2757 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); 2758 bna_enet_enable(&bnad->bna.enet); 2759 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2760 2761 /* Enable broadcast */ 2762 bnad_enable_default_bcast(bnad); 2763 2764 /* Restore VLANs, if any */ 2765 bnad_restore_vlans(bnad, 0); 2766 2767 /* Set the UCAST address */ 2768 spin_lock_irqsave(&bnad->bna_lock, flags); 2769 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); 2770 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2771 2772 /* Start the stats timer */ 2773 bnad_stats_timer_start(bnad); 2774 2775 mutex_unlock(&bnad->conf_mutex); 2776 2777 return 0; 2778 2779 cleanup_tx: 2780 bnad_destroy_tx(bnad, 0); 2781 2782 err_return: 2783 mutex_unlock(&bnad->conf_mutex); 2784 return err; 2785 } 2786 2787 static int 2788 bnad_stop(struct net_device *netdev) 2789 { 2790 struct bnad *bnad = netdev_priv(netdev); 2791 unsigned long flags; 2792 2793 mutex_lock(&bnad->conf_mutex); 2794 2795 /* Stop the stats timer */ 2796 bnad_stats_timer_stop(bnad); 2797 2798 init_completion(&bnad->bnad_completions.enet_comp); 2799 2800 spin_lock_irqsave(&bnad->bna_lock, flags); 2801 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, 2802 bnad_cb_enet_disabled); 2803 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2804 2805 wait_for_completion(&bnad->bnad_completions.enet_comp); 2806 2807 bnad_destroy_tx(bnad, 0); 2808 bnad_destroy_rx(bnad, 0); 2809 2810 /* Synchronize mailbox IRQ */ 2811 bnad_mbox_irq_sync(bnad); 2812 2813 mutex_unlock(&bnad->conf_mutex); 2814 2815 return 0; 2816 } 2817 2818 /* TX */ 2819 /* Returns 0 for success */ 2820 static int 2821 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, 2822 struct sk_buff *skb, struct bna_txq_entry *txqent) 2823 { 2824 u16 flags = 0; 2825 u32 gso_size; 2826 u16 vlan_tag = 0; 2827 2828 if (skb_vlan_tag_present(skb)) { 2829 vlan_tag = (u16)skb_vlan_tag_get(skb); 2830 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2831 } 2832 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { 2833 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) 2834 | (vlan_tag & 0x1fff); 2835 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2836 } 2837 txqent->hdr.wi.vlan_tag = htons(vlan_tag); 2838 2839 if (skb_is_gso(skb)) { 2840 gso_size = skb_shinfo(skb)->gso_size; 2841 if (unlikely(gso_size > bnad->netdev->mtu)) { 2842 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); 2843 return -EINVAL; 2844 } 2845 if (unlikely((gso_size + skb_transport_offset(skb) + 2846 tcp_hdrlen(skb)) >= skb->len)) { 2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); 2848 txqent->hdr.wi.lso_mss = 0; 2849 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); 2850 } else { 2851 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); 2852 txqent->hdr.wi.lso_mss = htons(gso_size); 2853 } 2854 2855 if (bnad_tso_prepare(bnad, skb)) { 2856 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); 2857 return -EINVAL; 2858 } 2859 2860 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); 2861 txqent->hdr.wi.l4_hdr_size_n_offset = 2862 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( 2863 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); 2864 } else { 2865 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); 2866 txqent->hdr.wi.lso_mss = 0; 2867 2868 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { 2869 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); 2870 return -EINVAL; 2871 } 2872 2873 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2874 __be16 net_proto = vlan_get_protocol(skb); 2875 u8 proto = 0; 2876 2877 if (net_proto == htons(ETH_P_IP)) 2878 proto = ip_hdr(skb)->protocol; 2879 #ifdef NETIF_F_IPV6_CSUM 2880 else if (net_proto == htons(ETH_P_IPV6)) { 2881 /* nexthdr may not be TCP immediately. */ 2882 proto = ipv6_hdr(skb)->nexthdr; 2883 } 2884 #endif 2885 if (proto == IPPROTO_TCP) { 2886 flags |= BNA_TXQ_WI_CF_TCP_CKSUM; 2887 txqent->hdr.wi.l4_hdr_size_n_offset = 2888 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2889 (0, skb_transport_offset(skb))); 2890 2891 BNAD_UPDATE_CTR(bnad, tcpcsum_offload); 2892 2893 if (unlikely(skb_headlen(skb) < 2894 skb_transport_offset(skb) + 2895 tcp_hdrlen(skb))) { 2896 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); 2897 return -EINVAL; 2898 } 2899 } else if (proto == IPPROTO_UDP) { 2900 flags |= BNA_TXQ_WI_CF_UDP_CKSUM; 2901 txqent->hdr.wi.l4_hdr_size_n_offset = 2902 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2903 (0, skb_transport_offset(skb))); 2904 2905 BNAD_UPDATE_CTR(bnad, udpcsum_offload); 2906 if (unlikely(skb_headlen(skb) < 2907 skb_transport_offset(skb) + 2908 sizeof(struct udphdr))) { 2909 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); 2910 return -EINVAL; 2911 } 2912 } else { 2913 2914 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); 2915 return -EINVAL; 2916 } 2917 } else 2918 txqent->hdr.wi.l4_hdr_size_n_offset = 0; 2919 } 2920 2921 txqent->hdr.wi.flags = htons(flags); 2922 txqent->hdr.wi.frame_length = htonl(skb->len); 2923 2924 return 0; 2925 } 2926 2927 /* 2928 * bnad_start_xmit : Netdev entry point for Transmit 2929 * Called under lock held by net_device 2930 */ 2931 static netdev_tx_t 2932 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2933 { 2934 struct bnad *bnad = netdev_priv(netdev); 2935 u32 txq_id = 0; 2936 struct bna_tcb *tcb = NULL; 2937 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; 2938 u32 prod, q_depth, vect_id; 2939 u32 wis, vectors, len; 2940 int i; 2941 dma_addr_t dma_addr; 2942 struct bna_txq_entry *txqent; 2943 2944 len = skb_headlen(skb); 2945 2946 /* Sanity checks for the skb */ 2947 2948 if (unlikely(skb->len <= ETH_HLEN)) { 2949 dev_kfree_skb_any(skb); 2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); 2951 return NETDEV_TX_OK; 2952 } 2953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { 2954 dev_kfree_skb_any(skb); 2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2956 return NETDEV_TX_OK; 2957 } 2958 if (unlikely(len == 0)) { 2959 dev_kfree_skb_any(skb); 2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2961 return NETDEV_TX_OK; 2962 } 2963 2964 tcb = bnad->tx_info[0].tcb[txq_id]; 2965 2966 /* 2967 * Takes care of the Tx that is scheduled between clearing the flag 2968 * and the netif_tx_stop_all_queues() call. 2969 */ 2970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2971 dev_kfree_skb_any(skb); 2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); 2973 return NETDEV_TX_OK; 2974 } 2975 2976 q_depth = tcb->q_depth; 2977 prod = tcb->producer_index; 2978 unmap_q = tcb->unmap_q; 2979 2980 vectors = 1 + skb_shinfo(skb)->nr_frags; 2981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2982 2983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { 2984 dev_kfree_skb_any(skb); 2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); 2986 return NETDEV_TX_OK; 2987 } 2988 2989 /* Check for available TxQ resources */ 2990 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { 2991 if ((*tcb->hw_consumer_index != tcb->consumer_index) && 2992 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2993 u32 sent; 2994 sent = bnad_txcmpl_process(bnad, tcb); 2995 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2996 bna_ib_ack(tcb->i_dbell, sent); 2997 smp_mb__before_atomic(); 2998 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 2999 } else { 3000 netif_stop_queue(netdev); 3001 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 3002 } 3003 3004 smp_mb(); 3005 /* 3006 * Check again to deal with race condition between 3007 * netif_stop_queue here, and netif_wake_queue in 3008 * interrupt handler which is not inside netif tx lock. 3009 */ 3010 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { 3011 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 3012 return NETDEV_TX_BUSY; 3013 } else { 3014 netif_wake_queue(netdev); 3015 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 3016 } 3017 } 3018 3019 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; 3020 head_unmap = &unmap_q[prod]; 3021 3022 /* Program the opcode, flags, frame_len, num_vectors in WI */ 3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { 3024 dev_kfree_skb_any(skb); 3025 return NETDEV_TX_OK; 3026 } 3027 txqent->hdr.wi.reserved = 0; 3028 txqent->hdr.wi.num_vectors = vectors; 3029 3030 head_unmap->skb = skb; 3031 head_unmap->nvecs = 0; 3032 3033 /* Program the vectors */ 3034 unmap = head_unmap; 3035 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 3036 len, DMA_TO_DEVICE); 3037 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); 3038 txqent->vector[0].length = htons(len); 3039 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); 3040 head_unmap->nvecs++; 3041 3042 for (i = 0, vect_id = 0; i < vectors - 1; i++) { 3043 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 3044 u32 size = skb_frag_size(frag); 3045 3046 if (unlikely(size == 0)) { 3047 /* Undo the changes starting at tcb->producer_index */ 3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, 3049 tcb->producer_index); 3050 dev_kfree_skb_any(skb); 3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); 3052 return NETDEV_TX_OK; 3053 } 3054 3055 len += size; 3056 3057 vect_id++; 3058 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { 3059 vect_id = 0; 3060 BNA_QE_INDX_INC(prod, q_depth); 3061 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; 3062 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); 3063 unmap = &unmap_q[prod]; 3064 } 3065 3066 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 3067 0, size, DMA_TO_DEVICE); 3068 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); 3069 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 3070 txqent->vector[vect_id].length = htons(size); 3071 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, 3072 dma_addr); 3073 head_unmap->nvecs++; 3074 } 3075 3076 if (unlikely(len != skb->len)) { 3077 /* Undo the changes starting at tcb->producer_index */ 3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); 3079 dev_kfree_skb_any(skb); 3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); 3081 return NETDEV_TX_OK; 3082 } 3083 3084 BNA_QE_INDX_INC(prod, q_depth); 3085 tcb->producer_index = prod; 3086 3087 smp_mb(); 3088 3089 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 3090 return NETDEV_TX_OK; 3091 3092 skb_tx_timestamp(skb); 3093 3094 bna_txq_prod_indx_doorbell(tcb); 3095 smp_mb(); 3096 3097 return NETDEV_TX_OK; 3098 } 3099 3100 /* 3101 * Used spin_lock to synchronize reading of stats structures, which 3102 * is written by BNA under the same lock. 3103 */ 3104 static struct rtnl_link_stats64 * 3105 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 3106 { 3107 struct bnad *bnad = netdev_priv(netdev); 3108 unsigned long flags; 3109 3110 spin_lock_irqsave(&bnad->bna_lock, flags); 3111 3112 bnad_netdev_qstats_fill(bnad, stats); 3113 bnad_netdev_hwstats_fill(bnad, stats); 3114 3115 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3116 3117 return stats; 3118 } 3119 3120 static void 3121 bnad_set_rx_ucast_fltr(struct bnad *bnad) 3122 { 3123 struct net_device *netdev = bnad->netdev; 3124 int uc_count = netdev_uc_count(netdev); 3125 enum bna_cb_status ret; 3126 u8 *mac_list; 3127 struct netdev_hw_addr *ha; 3128 int entry; 3129 3130 if (netdev_uc_empty(bnad->netdev)) { 3131 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); 3132 return; 3133 } 3134 3135 if (uc_count > bna_attr(&bnad->bna)->num_ucmac) 3136 goto mode_default; 3137 3138 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); 3139 if (mac_list == NULL) 3140 goto mode_default; 3141 3142 entry = 0; 3143 netdev_for_each_uc_addr(ha, netdev) { 3144 memcpy(&mac_list[entry * ETH_ALEN], 3145 &ha->addr[0], ETH_ALEN); 3146 entry++; 3147 } 3148 3149 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, 3150 mac_list, NULL); 3151 kfree(mac_list); 3152 3153 if (ret != BNA_CB_SUCCESS) 3154 goto mode_default; 3155 3156 return; 3157 3158 /* ucast packets not in UCAM are routed to default function */ 3159 mode_default: 3160 bnad->cfg_flags |= BNAD_CF_DEFAULT; 3161 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); 3162 } 3163 3164 static void 3165 bnad_set_rx_mcast_fltr(struct bnad *bnad) 3166 { 3167 struct net_device *netdev = bnad->netdev; 3168 int mc_count = netdev_mc_count(netdev); 3169 enum bna_cb_status ret; 3170 u8 *mac_list; 3171 3172 if (netdev->flags & IFF_ALLMULTI) 3173 goto mode_allmulti; 3174 3175 if (netdev_mc_empty(netdev)) 3176 return; 3177 3178 if (mc_count > bna_attr(&bnad->bna)->num_mcmac) 3179 goto mode_allmulti; 3180 3181 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); 3182 3183 if (mac_list == NULL) 3184 goto mode_allmulti; 3185 3186 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); 3187 3188 /* copy rest of the MCAST addresses */ 3189 bnad_netdev_mc_list_get(netdev, mac_list); 3190 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, 3191 mac_list, NULL); 3192 kfree(mac_list); 3193 3194 if (ret != BNA_CB_SUCCESS) 3195 goto mode_allmulti; 3196 3197 return; 3198 3199 mode_allmulti: 3200 bnad->cfg_flags |= BNAD_CF_ALLMULTI; 3201 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); 3202 } 3203 3204 void 3205 bnad_set_rx_mode(struct net_device *netdev) 3206 { 3207 struct bnad *bnad = netdev_priv(netdev); 3208 enum bna_rxmode new_mode, mode_mask; 3209 unsigned long flags; 3210 3211 spin_lock_irqsave(&bnad->bna_lock, flags); 3212 3213 if (bnad->rx_info[0].rx == NULL) { 3214 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3215 return; 3216 } 3217 3218 /* clear bnad flags to update it with new settings */ 3219 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | 3220 BNAD_CF_ALLMULTI); 3221 3222 new_mode = 0; 3223 if (netdev->flags & IFF_PROMISC) { 3224 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; 3225 bnad->cfg_flags |= BNAD_CF_PROMISC; 3226 } else { 3227 bnad_set_rx_mcast_fltr(bnad); 3228 3229 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) 3230 new_mode |= BNA_RXMODE_ALLMULTI; 3231 3232 bnad_set_rx_ucast_fltr(bnad); 3233 3234 if (bnad->cfg_flags & BNAD_CF_DEFAULT) 3235 new_mode |= BNA_RXMODE_DEFAULT; 3236 } 3237 3238 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | 3239 BNA_RXMODE_ALLMULTI; 3240 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); 3241 3242 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3243 } 3244 3245 /* 3246 * bna_lock is used to sync writes to netdev->addr 3247 * conf_lock cannot be used since this call may be made 3248 * in a non-blocking context. 3249 */ 3250 static int 3251 bnad_set_mac_address(struct net_device *netdev, void *mac_addr) 3252 { 3253 int err; 3254 struct bnad *bnad = netdev_priv(netdev); 3255 struct sockaddr *sa = (struct sockaddr *)mac_addr; 3256 unsigned long flags; 3257 3258 spin_lock_irqsave(&bnad->bna_lock, flags); 3259 3260 err = bnad_mac_addr_set_locked(bnad, sa->sa_data); 3261 3262 if (!err) 3263 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); 3264 3265 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3266 3267 return err; 3268 } 3269 3270 static int 3271 bnad_mtu_set(struct bnad *bnad, int frame_size) 3272 { 3273 unsigned long flags; 3274 3275 init_completion(&bnad->bnad_completions.mtu_comp); 3276 3277 spin_lock_irqsave(&bnad->bna_lock, flags); 3278 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); 3279 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3280 3281 wait_for_completion(&bnad->bnad_completions.mtu_comp); 3282 3283 return bnad->bnad_completions.mtu_comp_status; 3284 } 3285 3286 static int 3287 bnad_change_mtu(struct net_device *netdev, int new_mtu) 3288 { 3289 int err, mtu; 3290 struct bnad *bnad = netdev_priv(netdev); 3291 u32 rx_count = 0, frame, new_frame; 3292 3293 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) 3294 return -EINVAL; 3295 3296 mutex_lock(&bnad->conf_mutex); 3297 3298 mtu = netdev->mtu; 3299 netdev->mtu = new_mtu; 3300 3301 frame = BNAD_FRAME_SIZE(mtu); 3302 new_frame = BNAD_FRAME_SIZE(new_mtu); 3303 3304 /* check if multi-buffer needs to be enabled */ 3305 if (BNAD_PCI_DEV_IS_CAT2(bnad) && 3306 netif_running(bnad->netdev)) { 3307 /* only when transition is over 4K */ 3308 if ((frame <= 4096 && new_frame > 4096) || 3309 (frame > 4096 && new_frame <= 4096)) 3310 rx_count = bnad_reinit_rx(bnad); 3311 } 3312 3313 /* rx_count > 0 - new rx created 3314 * - Linux set err = 0 and return 3315 */ 3316 err = bnad_mtu_set(bnad, new_frame); 3317 if (err) 3318 err = -EBUSY; 3319 3320 mutex_unlock(&bnad->conf_mutex); 3321 return err; 3322 } 3323 3324 static int 3325 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3326 { 3327 struct bnad *bnad = netdev_priv(netdev); 3328 unsigned long flags; 3329 3330 if (!bnad->rx_info[0].rx) 3331 return 0; 3332 3333 mutex_lock(&bnad->conf_mutex); 3334 3335 spin_lock_irqsave(&bnad->bna_lock, flags); 3336 bna_rx_vlan_add(bnad->rx_info[0].rx, vid); 3337 set_bit(vid, bnad->active_vlans); 3338 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3339 3340 mutex_unlock(&bnad->conf_mutex); 3341 3342 return 0; 3343 } 3344 3345 static int 3346 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3347 { 3348 struct bnad *bnad = netdev_priv(netdev); 3349 unsigned long flags; 3350 3351 if (!bnad->rx_info[0].rx) 3352 return 0; 3353 3354 mutex_lock(&bnad->conf_mutex); 3355 3356 spin_lock_irqsave(&bnad->bna_lock, flags); 3357 clear_bit(vid, bnad->active_vlans); 3358 bna_rx_vlan_del(bnad->rx_info[0].rx, vid); 3359 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3360 3361 mutex_unlock(&bnad->conf_mutex); 3362 3363 return 0; 3364 } 3365 3366 static int bnad_set_features(struct net_device *dev, netdev_features_t features) 3367 { 3368 struct bnad *bnad = netdev_priv(dev); 3369 netdev_features_t changed = features ^ dev->features; 3370 3371 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { 3372 unsigned long flags; 3373 3374 spin_lock_irqsave(&bnad->bna_lock, flags); 3375 3376 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3377 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); 3378 else 3379 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); 3380 3381 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3382 } 3383 3384 return 0; 3385 } 3386 3387 #ifdef CONFIG_NET_POLL_CONTROLLER 3388 static void 3389 bnad_netpoll(struct net_device *netdev) 3390 { 3391 struct bnad *bnad = netdev_priv(netdev); 3392 struct bnad_rx_info *rx_info; 3393 struct bnad_rx_ctrl *rx_ctrl; 3394 u32 curr_mask; 3395 int i, j; 3396 3397 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { 3398 bna_intx_disable(&bnad->bna, curr_mask); 3399 bnad_isr(bnad->pcidev->irq, netdev); 3400 bna_intx_enable(&bnad->bna, curr_mask); 3401 } else { 3402 /* 3403 * Tx processing may happen in sending context, so no need 3404 * to explicitly process completions here 3405 */ 3406 3407 /* Rx processing */ 3408 for (i = 0; i < bnad->num_rx; i++) { 3409 rx_info = &bnad->rx_info[i]; 3410 if (!rx_info->rx) 3411 continue; 3412 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 3413 rx_ctrl = &rx_info->rx_ctrl[j]; 3414 if (rx_ctrl->ccb) 3415 bnad_netif_rx_schedule_poll(bnad, 3416 rx_ctrl->ccb); 3417 } 3418 } 3419 } 3420 } 3421 #endif 3422 3423 static const struct net_device_ops bnad_netdev_ops = { 3424 .ndo_open = bnad_open, 3425 .ndo_stop = bnad_stop, 3426 .ndo_start_xmit = bnad_start_xmit, 3427 .ndo_get_stats64 = bnad_get_stats64, 3428 .ndo_set_rx_mode = bnad_set_rx_mode, 3429 .ndo_validate_addr = eth_validate_addr, 3430 .ndo_set_mac_address = bnad_set_mac_address, 3431 .ndo_change_mtu = bnad_change_mtu, 3432 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, 3433 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, 3434 .ndo_set_features = bnad_set_features, 3435 #ifdef CONFIG_NET_POLL_CONTROLLER 3436 .ndo_poll_controller = bnad_netpoll 3437 #endif 3438 }; 3439 3440 static void 3441 bnad_netdev_init(struct bnad *bnad, bool using_dac) 3442 { 3443 struct net_device *netdev = bnad->netdev; 3444 3445 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 3446 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3447 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | 3448 NETIF_F_HW_VLAN_CTAG_RX; 3449 3450 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | 3451 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3452 NETIF_F_TSO | NETIF_F_TSO6; 3453 3454 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 3455 3456 if (using_dac) 3457 netdev->features |= NETIF_F_HIGHDMA; 3458 3459 netdev->mem_start = bnad->mmio_start; 3460 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; 3461 3462 netdev->netdev_ops = &bnad_netdev_ops; 3463 bnad_set_ethtool_ops(netdev); 3464 } 3465 3466 /* 3467 * 1. Initialize the bnad structure 3468 * 2. Setup netdev pointer in pci_dev 3469 * 3. Initialize no. of TxQ & CQs & MSIX vectors 3470 * 4. Initialize work queue. 3471 */ 3472 static int 3473 bnad_init(struct bnad *bnad, 3474 struct pci_dev *pdev, struct net_device *netdev) 3475 { 3476 unsigned long flags; 3477 3478 SET_NETDEV_DEV(netdev, &pdev->dev); 3479 pci_set_drvdata(pdev, netdev); 3480 3481 bnad->netdev = netdev; 3482 bnad->pcidev = pdev; 3483 bnad->mmio_start = pci_resource_start(pdev, 0); 3484 bnad->mmio_len = pci_resource_len(pdev, 0); 3485 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); 3486 if (!bnad->bar0) { 3487 dev_err(&pdev->dev, "ioremap for bar0 failed\n"); 3488 return -ENOMEM; 3489 } 3490 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, 3491 (unsigned long long) bnad->mmio_len); 3492 3493 spin_lock_irqsave(&bnad->bna_lock, flags); 3494 if (!bnad_msix_disable) 3495 bnad->cfg_flags = BNAD_CF_MSIX; 3496 3497 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; 3498 3499 bnad_q_num_init(bnad); 3500 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3501 3502 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + 3503 (bnad->num_rx * bnad->num_rxp_per_rx) + 3504 BNAD_MAILBOX_MSIX_VECTORS; 3505 3506 bnad->txq_depth = BNAD_TXQ_DEPTH; 3507 bnad->rxq_depth = BNAD_RXQ_DEPTH; 3508 3509 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 3510 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 3511 3512 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); 3513 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); 3514 if (!bnad->work_q) { 3515 iounmap(bnad->bar0); 3516 return -ENOMEM; 3517 } 3518 3519 return 0; 3520 } 3521 3522 /* 3523 * Must be called after bnad_pci_uninit() 3524 * so that iounmap() and pci_set_drvdata(NULL) 3525 * happens only after PCI uninitialization. 3526 */ 3527 static void 3528 bnad_uninit(struct bnad *bnad) 3529 { 3530 if (bnad->work_q) { 3531 flush_workqueue(bnad->work_q); 3532 destroy_workqueue(bnad->work_q); 3533 bnad->work_q = NULL; 3534 } 3535 3536 if (bnad->bar0) 3537 iounmap(bnad->bar0); 3538 } 3539 3540 /* 3541 * Initialize locks 3542 a) Per ioceth mutes used for serializing configuration 3543 changes from OS interface 3544 b) spin lock used to protect bna state machine 3545 */ 3546 static void 3547 bnad_lock_init(struct bnad *bnad) 3548 { 3549 spin_lock_init(&bnad->bna_lock); 3550 mutex_init(&bnad->conf_mutex); 3551 mutex_init(&bnad_list_mutex); 3552 } 3553 3554 static void 3555 bnad_lock_uninit(struct bnad *bnad) 3556 { 3557 mutex_destroy(&bnad->conf_mutex); 3558 mutex_destroy(&bnad_list_mutex); 3559 } 3560 3561 /* PCI Initialization */ 3562 static int 3563 bnad_pci_init(struct bnad *bnad, 3564 struct pci_dev *pdev, bool *using_dac) 3565 { 3566 int err; 3567 3568 err = pci_enable_device(pdev); 3569 if (err) 3570 return err; 3571 err = pci_request_regions(pdev, BNAD_NAME); 3572 if (err) 3573 goto disable_device; 3574 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3575 *using_dac = true; 3576 } else { 3577 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3578 if (err) 3579 goto release_regions; 3580 *using_dac = false; 3581 } 3582 pci_set_master(pdev); 3583 return 0; 3584 3585 release_regions: 3586 pci_release_regions(pdev); 3587 disable_device: 3588 pci_disable_device(pdev); 3589 3590 return err; 3591 } 3592 3593 static void 3594 bnad_pci_uninit(struct pci_dev *pdev) 3595 { 3596 pci_release_regions(pdev); 3597 pci_disable_device(pdev); 3598 } 3599 3600 static int 3601 bnad_pci_probe(struct pci_dev *pdev, 3602 const struct pci_device_id *pcidev_id) 3603 { 3604 bool using_dac; 3605 int err; 3606 struct bnad *bnad; 3607 struct bna *bna; 3608 struct net_device *netdev; 3609 struct bfa_pcidev pcidev_info; 3610 unsigned long flags; 3611 3612 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", 3613 pdev, pcidev_id, PCI_FUNC(pdev->devfn)); 3614 3615 mutex_lock(&bnad_fwimg_mutex); 3616 if (!cna_get_firmware_buf(pdev)) { 3617 mutex_unlock(&bnad_fwimg_mutex); 3618 pr_warn("Failed to load Firmware Image!\n"); 3619 return -ENODEV; 3620 } 3621 mutex_unlock(&bnad_fwimg_mutex); 3622 3623 /* 3624 * Allocates sizeof(struct net_device + struct bnad) 3625 * bnad = netdev->priv 3626 */ 3627 netdev = alloc_etherdev(sizeof(struct bnad)); 3628 if (!netdev) { 3629 err = -ENOMEM; 3630 return err; 3631 } 3632 bnad = netdev_priv(netdev); 3633 bnad_lock_init(bnad); 3634 bnad_add_to_list(bnad); 3635 3636 mutex_lock(&bnad->conf_mutex); 3637 /* 3638 * PCI initialization 3639 * Output : using_dac = 1 for 64 bit DMA 3640 * = 0 for 32 bit DMA 3641 */ 3642 using_dac = false; 3643 err = bnad_pci_init(bnad, pdev, &using_dac); 3644 if (err) 3645 goto unlock_mutex; 3646 3647 /* 3648 * Initialize bnad structure 3649 * Setup relation between pci_dev & netdev 3650 */ 3651 err = bnad_init(bnad, pdev, netdev); 3652 if (err) 3653 goto pci_uninit; 3654 3655 /* Initialize netdev structure, set up ethtool ops */ 3656 bnad_netdev_init(bnad, using_dac); 3657 3658 /* Set link to down state */ 3659 netif_carrier_off(netdev); 3660 3661 /* Setup the debugfs node for this bfad */ 3662 if (bna_debugfs_enable) 3663 bnad_debugfs_init(bnad); 3664 3665 /* Get resource requirement form bna */ 3666 spin_lock_irqsave(&bnad->bna_lock, flags); 3667 bna_res_req(&bnad->res_info[0]); 3668 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3669 3670 /* Allocate resources from bna */ 3671 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); 3672 if (err) 3673 goto drv_uninit; 3674 3675 bna = &bnad->bna; 3676 3677 /* Setup pcidev_info for bna_init() */ 3678 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); 3679 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); 3680 pcidev_info.device_id = bnad->pcidev->device; 3681 pcidev_info.pci_bar_kva = bnad->bar0; 3682 3683 spin_lock_irqsave(&bnad->bna_lock, flags); 3684 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); 3685 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3686 3687 bnad->stats.bna_stats = &bna->stats; 3688 3689 bnad_enable_msix(bnad); 3690 err = bnad_mbox_irq_alloc(bnad); 3691 if (err) 3692 goto res_free; 3693 3694 /* Set up timers */ 3695 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 3696 ((unsigned long)bnad)); 3697 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 3698 ((unsigned long)bnad)); 3699 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 3700 ((unsigned long)bnad)); 3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, 3702 ((unsigned long)bnad)); 3703 3704 /* Now start the timer before calling IOC */ 3705 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, 3706 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3707 3708 /* 3709 * Start the chip 3710 * If the call back comes with error, we bail out. 3711 * This is a catastrophic error. 3712 */ 3713 err = bnad_ioceth_enable(bnad); 3714 if (err) { 3715 pr_err("BNA: Initialization failed err=%d\n", 3716 err); 3717 goto probe_success; 3718 } 3719 3720 spin_lock_irqsave(&bnad->bna_lock, flags); 3721 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || 3722 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { 3723 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, 3724 bna_attr(bna)->num_rxp - 1); 3725 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || 3726 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) 3727 err = -EIO; 3728 } 3729 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3730 if (err) 3731 goto disable_ioceth; 3732 3733 spin_lock_irqsave(&bnad->bna_lock, flags); 3734 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); 3735 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3736 3737 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); 3738 if (err) { 3739 err = -EIO; 3740 goto disable_ioceth; 3741 } 3742 3743 spin_lock_irqsave(&bnad->bna_lock, flags); 3744 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); 3745 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3746 3747 /* Get the burnt-in mac */ 3748 spin_lock_irqsave(&bnad->bna_lock, flags); 3749 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); 3750 bnad_set_netdev_perm_addr(bnad); 3751 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3752 3753 mutex_unlock(&bnad->conf_mutex); 3754 3755 /* Finally, reguister with net_device layer */ 3756 err = register_netdev(netdev); 3757 if (err) { 3758 pr_err("BNA : Registering with netdev failed\n"); 3759 goto probe_uninit; 3760 } 3761 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); 3762 3763 return 0; 3764 3765 probe_success: 3766 mutex_unlock(&bnad->conf_mutex); 3767 return 0; 3768 3769 probe_uninit: 3770 mutex_lock(&bnad->conf_mutex); 3771 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); 3772 disable_ioceth: 3773 bnad_ioceth_disable(bnad); 3774 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3775 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); 3776 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); 3777 spin_lock_irqsave(&bnad->bna_lock, flags); 3778 bna_uninit(bna); 3779 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3780 bnad_mbox_irq_free(bnad); 3781 bnad_disable_msix(bnad); 3782 res_free: 3783 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); 3784 drv_uninit: 3785 /* Remove the debugfs node for this bnad */ 3786 kfree(bnad->regdata); 3787 bnad_debugfs_uninit(bnad); 3788 bnad_uninit(bnad); 3789 pci_uninit: 3790 bnad_pci_uninit(pdev); 3791 unlock_mutex: 3792 mutex_unlock(&bnad->conf_mutex); 3793 bnad_remove_from_list(bnad); 3794 bnad_lock_uninit(bnad); 3795 free_netdev(netdev); 3796 return err; 3797 } 3798 3799 static void 3800 bnad_pci_remove(struct pci_dev *pdev) 3801 { 3802 struct net_device *netdev = pci_get_drvdata(pdev); 3803 struct bnad *bnad; 3804 struct bna *bna; 3805 unsigned long flags; 3806 3807 if (!netdev) 3808 return; 3809 3810 pr_info("%s bnad_pci_remove\n", netdev->name); 3811 bnad = netdev_priv(netdev); 3812 bna = &bnad->bna; 3813 3814 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) 3815 unregister_netdev(netdev); 3816 3817 mutex_lock(&bnad->conf_mutex); 3818 bnad_ioceth_disable(bnad); 3819 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3820 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); 3821 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); 3822 spin_lock_irqsave(&bnad->bna_lock, flags); 3823 bna_uninit(bna); 3824 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3825 3826 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); 3827 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); 3828 bnad_mbox_irq_free(bnad); 3829 bnad_disable_msix(bnad); 3830 bnad_pci_uninit(pdev); 3831 mutex_unlock(&bnad->conf_mutex); 3832 bnad_remove_from_list(bnad); 3833 bnad_lock_uninit(bnad); 3834 /* Remove the debugfs node for this bnad */ 3835 kfree(bnad->regdata); 3836 bnad_debugfs_uninit(bnad); 3837 bnad_uninit(bnad); 3838 free_netdev(netdev); 3839 } 3840 3841 static const struct pci_device_id bnad_pci_id_table[] = { 3842 { 3843 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3844 PCI_DEVICE_ID_BROCADE_CT), 3845 .class = PCI_CLASS_NETWORK_ETHERNET << 8, 3846 .class_mask = 0xffff00 3847 }, 3848 { 3849 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3850 BFA_PCI_DEVICE_ID_CT2), 3851 .class = PCI_CLASS_NETWORK_ETHERNET << 8, 3852 .class_mask = 0xffff00 3853 }, 3854 {0, }, 3855 }; 3856 3857 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); 3858 3859 static struct pci_driver bnad_pci_driver = { 3860 .name = BNAD_NAME, 3861 .id_table = bnad_pci_id_table, 3862 .probe = bnad_pci_probe, 3863 .remove = bnad_pci_remove, 3864 }; 3865 3866 static int __init 3867 bnad_module_init(void) 3868 { 3869 int err; 3870 3871 pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", 3872 BNAD_VERSION); 3873 3874 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); 3875 3876 err = pci_register_driver(&bnad_pci_driver); 3877 if (err < 0) { 3878 pr_err("bna : PCI registration failed in module init " 3879 "(%d)\n", err); 3880 return err; 3881 } 3882 3883 return 0; 3884 } 3885 3886 static void __exit 3887 bnad_module_exit(void) 3888 { 3889 pci_unregister_driver(&bnad_pci_driver); 3890 release_firmware(bfi_fw); 3891 } 3892 3893 module_init(bnad_module_init); 3894 module_exit(bnad_module_exit); 3895 3896 MODULE_AUTHOR("Brocade"); 3897 MODULE_LICENSE("GPL"); 3898 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); 3899 MODULE_VERSION(BNAD_VERSION); 3900 MODULE_FIRMWARE(CNA_FW_FILE_CT); 3901 MODULE_FIRMWARE(CNA_FW_FILE_CT2); 3902