1 /* 2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/skbuff.h> 33 #include <linux/netdevice.h> 34 #include <linux/etherdevice.h> 35 #include <linux/if_vlan.h> 36 #include <linux/ip.h> 37 #include <linux/tcp.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/slab.h> 40 #include <linux/prefetch.h> 41 #include <net/arp.h> 42 #include "common.h" 43 #include "regs.h" 44 #include "sge_defs.h" 45 #include "t3_cpl.h" 46 #include "firmware_exports.h" 47 #include "cxgb3_offload.h" 48 49 #define USE_GTS 0 50 51 #define SGE_RX_SM_BUF_SIZE 1536 52 53 #define SGE_RX_COPY_THRES 256 54 #define SGE_RX_PULL_LEN 128 55 56 #define SGE_PG_RSVD SMP_CACHE_BYTES 57 /* 58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. 59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs 60 * directly. 61 */ 62 #define FL0_PG_CHUNK_SIZE 2048 63 #define FL0_PG_ORDER 0 64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) 65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) 66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) 68 69 #define SGE_RX_DROP_THRES 16 70 #define RX_RECLAIM_PERIOD (HZ/4) 71 72 /* 73 * Max number of Rx buffers we replenish at a time. 74 */ 75 #define MAX_RX_REFILL 16U 76 /* 77 * Period of the Tx buffer reclaim timer. This timer does not need to run 78 * frequently as Tx buffers are usually reclaimed by new Tx packets. 79 */ 80 #define TX_RECLAIM_PERIOD (HZ / 4) 81 #define TX_RECLAIM_TIMER_CHUNK 64U 82 #define TX_RECLAIM_CHUNK 16U 83 84 /* WR size in bytes */ 85 #define WR_LEN (WR_FLITS * 8) 86 87 /* 88 * Types of Tx queues in each queue set. Order here matters, do not change. 89 */ 90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; 91 92 /* Values for sge_txq.flags */ 93 enum { 94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */ 95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ 96 }; 97 98 struct tx_desc { 99 __be64 flit[TX_DESC_FLITS]; 100 }; 101 102 struct rx_desc { 103 __be32 addr_lo; 104 __be32 len_gen; 105 __be32 gen2; 106 __be32 addr_hi; 107 }; 108 109 struct tx_sw_desc { /* SW state per Tx descriptor */ 110 struct sk_buff *skb; 111 u8 eop; /* set if last descriptor for packet */ 112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */ 113 u8 fragidx; /* first page fragment associated with descriptor */ 114 s8 sflit; /* start flit of first SGL entry in descriptor */ 115 }; 116 117 struct rx_sw_desc { /* SW state per Rx descriptor */ 118 union { 119 struct sk_buff *skb; 120 struct fl_pg_chunk pg_chunk; 121 }; 122 DEFINE_DMA_UNMAP_ADDR(dma_addr); 123 }; 124 125 struct rsp_desc { /* response queue descriptor */ 126 struct rss_header rss_hdr; 127 __be32 flags; 128 __be32 len_cq; 129 u8 imm_data[47]; 130 u8 intr_gen; 131 }; 132 133 /* 134 * Holds unmapping information for Tx packets that need deferred unmapping. 135 * This structure lives at skb->head and must be allocated by callers. 136 */ 137 struct deferred_unmap_info { 138 struct pci_dev *pdev; 139 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 140 }; 141 142 /* 143 * Maps a number of flits to the number of Tx descriptors that can hold them. 144 * The formula is 145 * 146 * desc = 1 + (flits - 2) / (WR_FLITS - 1). 147 * 148 * HW allows up to 4 descriptors to be combined into a WR. 149 */ 150 static u8 flit_desc_map[] = { 151 0, 152 #if SGE_NUM_GENBITS == 1 153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 154 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 155 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 156 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 157 #elif SGE_NUM_GENBITS == 2 158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 159 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 160 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 161 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 162 #else 163 # error "SGE_NUM_GENBITS must be 1 or 2" 164 #endif 165 }; 166 167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) 168 { 169 return container_of(q, struct sge_qset, fl[qidx]); 170 } 171 172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) 173 { 174 return container_of(q, struct sge_qset, rspq); 175 } 176 177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) 178 { 179 return container_of(q, struct sge_qset, txq[qidx]); 180 } 181 182 /** 183 * refill_rspq - replenish an SGE response queue 184 * @adapter: the adapter 185 * @q: the response queue to replenish 186 * @credits: how many new responses to make available 187 * 188 * Replenishes a response queue by making the supplied number of responses 189 * available to HW. 190 */ 191 static inline void refill_rspq(struct adapter *adapter, 192 const struct sge_rspq *q, unsigned int credits) 193 { 194 rmb(); 195 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, 196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); 197 } 198 199 /** 200 * need_skb_unmap - does the platform need unmapping of sk_buffs? 201 * 202 * Returns true if the platform needs sk_buff unmapping. The compiler 203 * optimizes away unnecessary code if this returns true. 204 */ 205 static inline int need_skb_unmap(void) 206 { 207 #ifdef CONFIG_NEED_DMA_MAP_STATE 208 return 1; 209 #else 210 return 0; 211 #endif 212 } 213 214 /** 215 * unmap_skb - unmap a packet main body and its page fragments 216 * @skb: the packet 217 * @q: the Tx queue containing Tx descriptors for the packet 218 * @cidx: index of Tx descriptor 219 * @pdev: the PCI device 220 * 221 * Unmap the main body of an sk_buff and its page fragments, if any. 222 * Because of the fairly complicated structure of our SGLs and the desire 223 * to conserve space for metadata, the information necessary to unmap an 224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx 225 * descriptors (the physical addresses of the various data buffers), and 226 * the SW descriptor state (assorted indices). The send functions 227 * initialize the indices for the first packet descriptor so we can unmap 228 * the buffers held in the first Tx descriptor here, and we have enough 229 * information at this point to set the state for the next Tx descriptor. 230 * 231 * Note that it is possible to clean up the first descriptor of a packet 232 * before the send routines have written the next descriptors, but this 233 * race does not cause any problem. We just end up writing the unmapping 234 * info for the descriptor first. 235 */ 236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, 237 unsigned int cidx, struct pci_dev *pdev) 238 { 239 const struct sg_ent *sgp; 240 struct tx_sw_desc *d = &q->sdesc[cidx]; 241 int nfrags, frag_idx, curflit, j = d->addr_idx; 242 243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; 244 frag_idx = d->fragidx; 245 246 if (frag_idx == 0 && skb_headlen(skb)) { 247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), 248 skb_headlen(skb), PCI_DMA_TODEVICE); 249 j = 1; 250 } 251 252 curflit = d->sflit + 1 + j; 253 nfrags = skb_shinfo(skb)->nr_frags; 254 255 while (frag_idx < nfrags && curflit < WR_FLITS) { 256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), 257 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), 258 PCI_DMA_TODEVICE); 259 j ^= 1; 260 if (j == 0) { 261 sgp++; 262 curflit++; 263 } 264 curflit++; 265 frag_idx++; 266 } 267 268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ 269 d = cidx + 1 == q->size ? q->sdesc : d + 1; 270 d->fragidx = frag_idx; 271 d->addr_idx = j; 272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ 273 } 274 } 275 276 /** 277 * free_tx_desc - reclaims Tx descriptors and their buffers 278 * @adapter: the adapter 279 * @q: the Tx queue to reclaim descriptors from 280 * @n: the number of descriptors to reclaim 281 * 282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 283 * Tx buffers. Called with the Tx queue lock held. 284 */ 285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, 286 unsigned int n) 287 { 288 struct tx_sw_desc *d; 289 struct pci_dev *pdev = adapter->pdev; 290 unsigned int cidx = q->cidx; 291 292 const int need_unmap = need_skb_unmap() && 293 q->cntxt_id >= FW_TUNNEL_SGEEC_START; 294 295 d = &q->sdesc[cidx]; 296 while (n--) { 297 if (d->skb) { /* an SGL is present */ 298 if (need_unmap) 299 unmap_skb(d->skb, q, cidx, pdev); 300 if (d->eop) { 301 dev_consume_skb_any(d->skb); 302 d->skb = NULL; 303 } 304 } 305 ++d; 306 if (++cidx == q->size) { 307 cidx = 0; 308 d = q->sdesc; 309 } 310 } 311 q->cidx = cidx; 312 } 313 314 /** 315 * reclaim_completed_tx - reclaims completed Tx descriptors 316 * @adapter: the adapter 317 * @q: the Tx queue to reclaim completed descriptors from 318 * @chunk: maximum number of descriptors to reclaim 319 * 320 * Reclaims Tx descriptors that the SGE has indicated it has processed, 321 * and frees the associated buffers if possible. Called with the Tx 322 * queue's lock held. 323 */ 324 static inline unsigned int reclaim_completed_tx(struct adapter *adapter, 325 struct sge_txq *q, 326 unsigned int chunk) 327 { 328 unsigned int reclaim = q->processed - q->cleaned; 329 330 reclaim = min(chunk, reclaim); 331 if (reclaim) { 332 free_tx_desc(adapter, q, reclaim); 333 q->cleaned += reclaim; 334 q->in_use -= reclaim; 335 } 336 return q->processed - q->cleaned; 337 } 338 339 /** 340 * should_restart_tx - are there enough resources to restart a Tx queue? 341 * @q: the Tx queue 342 * 343 * Checks if there are enough descriptors to restart a suspended Tx queue. 344 */ 345 static inline int should_restart_tx(const struct sge_txq *q) 346 { 347 unsigned int r = q->processed - q->cleaned; 348 349 return q->in_use - r < (q->size >> 1); 350 } 351 352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, 353 struct rx_sw_desc *d) 354 { 355 if (q->use_pages && d->pg_chunk.page) { 356 (*d->pg_chunk.p_cnt)--; 357 if (!*d->pg_chunk.p_cnt) 358 pci_unmap_page(pdev, 359 d->pg_chunk.mapping, 360 q->alloc_size, PCI_DMA_FROMDEVICE); 361 362 put_page(d->pg_chunk.page); 363 d->pg_chunk.page = NULL; 364 } else { 365 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), 366 q->buf_size, PCI_DMA_FROMDEVICE); 367 kfree_skb(d->skb); 368 d->skb = NULL; 369 } 370 } 371 372 /** 373 * free_rx_bufs - free the Rx buffers on an SGE free list 374 * @pdev: the PCI device associated with the adapter 375 * @rxq: the SGE free list to clean up 376 * 377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from 378 * this queue should be stopped before calling this function. 379 */ 380 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) 381 { 382 unsigned int cidx = q->cidx; 383 384 while (q->credits--) { 385 struct rx_sw_desc *d = &q->sdesc[cidx]; 386 387 388 clear_rx_desc(pdev, q, d); 389 if (++cidx == q->size) 390 cidx = 0; 391 } 392 393 if (q->pg_chunk.page) { 394 __free_pages(q->pg_chunk.page, q->order); 395 q->pg_chunk.page = NULL; 396 } 397 } 398 399 /** 400 * add_one_rx_buf - add a packet buffer to a free-buffer list 401 * @va: buffer start VA 402 * @len: the buffer length 403 * @d: the HW Rx descriptor to write 404 * @sd: the SW Rx descriptor to write 405 * @gen: the generation bit value 406 * @pdev: the PCI device associated with the adapter 407 * 408 * Add a buffer of the given length to the supplied HW and SW Rx 409 * descriptors. 410 */ 411 static inline int add_one_rx_buf(void *va, unsigned int len, 412 struct rx_desc *d, struct rx_sw_desc *sd, 413 unsigned int gen, struct pci_dev *pdev) 414 { 415 dma_addr_t mapping; 416 417 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 418 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 419 return -ENOMEM; 420 421 dma_unmap_addr_set(sd, dma_addr, mapping); 422 423 d->addr_lo = cpu_to_be32(mapping); 424 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 425 dma_wmb(); 426 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 427 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 428 return 0; 429 } 430 431 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, 432 unsigned int gen) 433 { 434 d->addr_lo = cpu_to_be32(mapping); 435 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 436 dma_wmb(); 437 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 438 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 439 return 0; 440 } 441 442 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, 443 struct rx_sw_desc *sd, gfp_t gfp, 444 unsigned int order) 445 { 446 if (!q->pg_chunk.page) { 447 dma_addr_t mapping; 448 449 q->pg_chunk.page = alloc_pages(gfp, order); 450 if (unlikely(!q->pg_chunk.page)) 451 return -ENOMEM; 452 q->pg_chunk.va = page_address(q->pg_chunk.page); 453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - 454 SGE_PG_RSVD; 455 q->pg_chunk.offset = 0; 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { 459 __free_pages(q->pg_chunk.page, order); 460 q->pg_chunk.page = NULL; 461 return -EIO; 462 } 463 q->pg_chunk.mapping = mapping; 464 } 465 sd->pg_chunk = q->pg_chunk; 466 467 prefetch(sd->pg_chunk.p_cnt); 468 469 q->pg_chunk.offset += q->buf_size; 470 if (q->pg_chunk.offset == (PAGE_SIZE << order)) 471 q->pg_chunk.page = NULL; 472 else { 473 q->pg_chunk.va += q->buf_size; 474 get_page(q->pg_chunk.page); 475 } 476 477 if (sd->pg_chunk.offset == 0) 478 *sd->pg_chunk.p_cnt = 1; 479 else 480 *sd->pg_chunk.p_cnt += 1; 481 482 return 0; 483 } 484 485 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 486 { 487 if (q->pend_cred >= q->credits / 4) { 488 q->pend_cred = 0; 489 wmb(); 490 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 491 } 492 } 493 494 /** 495 * refill_fl - refill an SGE free-buffer list 496 * @adapter: the adapter 497 * @q: the free-list to refill 498 * @n: the number of new buffers to allocate 499 * @gfp: the gfp flags for allocating new buffers 500 * 501 * (Re)populate an SGE free-buffer list with up to @n new packet buffers, 502 * allocated with the supplied gfp flags. The caller must assure that 503 * @n does not exceed the queue's capacity. 504 */ 505 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 506 { 507 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 508 struct rx_desc *d = &q->desc[q->pidx]; 509 unsigned int count = 0; 510 511 while (n--) { 512 dma_addr_t mapping; 513 int err; 514 515 if (q->use_pages) { 516 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, 517 q->order))) { 518 nomem: q->alloc_failed++; 519 break; 520 } 521 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 522 dma_unmap_addr_set(sd, dma_addr, mapping); 523 524 add_one_rx_chunk(mapping, d, q->gen); 525 pci_dma_sync_single_for_device(adap->pdev, mapping, 526 q->buf_size - SGE_PG_RSVD, 527 PCI_DMA_FROMDEVICE); 528 } else { 529 void *buf_start; 530 531 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 532 if (!skb) 533 goto nomem; 534 535 sd->skb = skb; 536 buf_start = skb->data; 537 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, 538 q->gen, adap->pdev); 539 if (unlikely(err)) { 540 clear_rx_desc(adap->pdev, q, sd); 541 break; 542 } 543 } 544 545 d++; 546 sd++; 547 if (++q->pidx == q->size) { 548 q->pidx = 0; 549 q->gen ^= 1; 550 sd = q->sdesc; 551 d = q->desc; 552 } 553 count++; 554 } 555 556 q->credits += count; 557 q->pend_cred += count; 558 ring_fl_db(adap, q); 559 560 return count; 561 } 562 563 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 564 { 565 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), 566 GFP_ATOMIC | __GFP_COMP); 567 } 568 569 /** 570 * recycle_rx_buf - recycle a receive buffer 571 * @adapter: the adapter 572 * @q: the SGE free list 573 * @idx: index of buffer to recycle 574 * 575 * Recycles the specified buffer on the given free list by adding it at 576 * the next available slot on the list. 577 */ 578 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, 579 unsigned int idx) 580 { 581 struct rx_desc *from = &q->desc[idx]; 582 struct rx_desc *to = &q->desc[q->pidx]; 583 584 q->sdesc[q->pidx] = q->sdesc[idx]; 585 to->addr_lo = from->addr_lo; /* already big endian */ 586 to->addr_hi = from->addr_hi; /* likewise */ 587 dma_wmb(); 588 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); 589 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); 590 591 if (++q->pidx == q->size) { 592 q->pidx = 0; 593 q->gen ^= 1; 594 } 595 596 q->credits++; 597 q->pend_cred++; 598 ring_fl_db(adap, q); 599 } 600 601 /** 602 * alloc_ring - allocate resources for an SGE descriptor ring 603 * @pdev: the PCI device 604 * @nelem: the number of descriptors 605 * @elem_size: the size of each descriptor 606 * @sw_size: the size of the SW state associated with each ring element 607 * @phys: the physical address of the allocated ring 608 * @metadata: address of the array holding the SW state for the ring 609 * 610 * Allocates resources for an SGE descriptor ring, such as Tx queues, 611 * free buffer lists, or response queues. Each SGE ring requires 612 * space for its HW descriptors plus, optionally, space for the SW state 613 * associated with each HW entry (the metadata). The function returns 614 * three values: the virtual address for the HW ring (the return value 615 * of the function), the physical address of the HW ring, and the address 616 * of the SW ring. 617 */ 618 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, 619 size_t sw_size, dma_addr_t * phys, void *metadata) 620 { 621 size_t len = nelem * elem_size; 622 void *s = NULL; 623 void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 624 625 if (!p) 626 return NULL; 627 if (sw_size && metadata) { 628 s = kcalloc(nelem, sw_size, GFP_KERNEL); 629 630 if (!s) { 631 dma_free_coherent(&pdev->dev, len, p, *phys); 632 return NULL; 633 } 634 *(void **)metadata = s; 635 } 636 return p; 637 } 638 639 /** 640 * t3_reset_qset - reset a sge qset 641 * @q: the queue set 642 * 643 * Reset the qset structure. 644 * the NAPI structure is preserved in the event of 645 * the qset's reincarnation, for example during EEH recovery. 646 */ 647 static void t3_reset_qset(struct sge_qset *q) 648 { 649 if (q->adap && 650 !(q->adap->flags & NAPI_INIT)) { 651 memset(q, 0, sizeof(*q)); 652 return; 653 } 654 655 q->adap = NULL; 656 memset(&q->rspq, 0, sizeof(q->rspq)); 657 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); 658 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 659 q->txq_stopped = 0; 660 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 661 q->rx_reclaim_timer.function = NULL; 662 q->nomem = 0; 663 napi_free_frags(&q->napi); 664 } 665 666 667 /** 668 * free_qset - free the resources of an SGE queue set 669 * @adapter: the adapter owning the queue set 670 * @q: the queue set 671 * 672 * Release the HW and SW resources associated with an SGE queue set, such 673 * as HW contexts, packet buffers, and descriptor rings. Traffic to the 674 * queue set must be quiesced prior to calling this. 675 */ 676 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) 677 { 678 int i; 679 struct pci_dev *pdev = adapter->pdev; 680 681 for (i = 0; i < SGE_RXQ_PER_SET; ++i) 682 if (q->fl[i].desc) { 683 spin_lock_irq(&adapter->sge.reg_lock); 684 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); 685 spin_unlock_irq(&adapter->sge.reg_lock); 686 free_rx_bufs(pdev, &q->fl[i]); 687 kfree(q->fl[i].sdesc); 688 dma_free_coherent(&pdev->dev, 689 q->fl[i].size * 690 sizeof(struct rx_desc), q->fl[i].desc, 691 q->fl[i].phys_addr); 692 } 693 694 for (i = 0; i < SGE_TXQ_PER_SET; ++i) 695 if (q->txq[i].desc) { 696 spin_lock_irq(&adapter->sge.reg_lock); 697 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); 698 spin_unlock_irq(&adapter->sge.reg_lock); 699 if (q->txq[i].sdesc) { 700 free_tx_desc(adapter, &q->txq[i], 701 q->txq[i].in_use); 702 kfree(q->txq[i].sdesc); 703 } 704 dma_free_coherent(&pdev->dev, 705 q->txq[i].size * 706 sizeof(struct tx_desc), 707 q->txq[i].desc, q->txq[i].phys_addr); 708 __skb_queue_purge(&q->txq[i].sendq); 709 } 710 711 if (q->rspq.desc) { 712 spin_lock_irq(&adapter->sge.reg_lock); 713 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); 714 spin_unlock_irq(&adapter->sge.reg_lock); 715 dma_free_coherent(&pdev->dev, 716 q->rspq.size * sizeof(struct rsp_desc), 717 q->rspq.desc, q->rspq.phys_addr); 718 } 719 720 t3_reset_qset(q); 721 } 722 723 /** 724 * init_qset_cntxt - initialize an SGE queue set context info 725 * @qs: the queue set 726 * @id: the queue set id 727 * 728 * Initializes the TIDs and context ids for the queues of a queue set. 729 */ 730 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) 731 { 732 qs->rspq.cntxt_id = id; 733 qs->fl[0].cntxt_id = 2 * id; 734 qs->fl[1].cntxt_id = 2 * id + 1; 735 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; 736 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; 737 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; 738 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; 739 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; 740 } 741 742 /** 743 * sgl_len - calculates the size of an SGL of the given capacity 744 * @n: the number of SGL entries 745 * 746 * Calculates the number of flits needed for a scatter/gather list that 747 * can hold the given number of entries. 748 */ 749 static inline unsigned int sgl_len(unsigned int n) 750 { 751 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ 752 return (3 * n) / 2 + (n & 1); 753 } 754 755 /** 756 * flits_to_desc - returns the num of Tx descriptors for the given flits 757 * @n: the number of flits 758 * 759 * Calculates the number of Tx descriptors needed for the supplied number 760 * of flits. 761 */ 762 static inline unsigned int flits_to_desc(unsigned int n) 763 { 764 BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); 765 return flit_desc_map[n]; 766 } 767 768 /** 769 * get_packet - return the next ingress packet buffer from a free list 770 * @adap: the adapter that received the packet 771 * @fl: the SGE free list holding the packet 772 * @len: the packet length including any SGE padding 773 * @drop_thres: # of remaining buffers before we start dropping packets 774 * 775 * Get the next packet from a free list and complete setup of the 776 * sk_buff. If the packet is small we make a copy and recycle the 777 * original buffer, otherwise we use the original buffer itself. If a 778 * positive drop threshold is supplied packets are dropped and their 779 * buffers recycled if (a) the number of remaining buffers is under the 780 * threshold and the packet is too big to copy, or (b) the packet should 781 * be copied but there is no memory for the copy. 782 */ 783 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, 784 unsigned int len, unsigned int drop_thres) 785 { 786 struct sk_buff *skb = NULL; 787 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 788 789 prefetch(sd->skb->data); 790 fl->credits--; 791 792 if (len <= SGE_RX_COPY_THRES) { 793 skb = alloc_skb(len, GFP_ATOMIC); 794 if (likely(skb != NULL)) { 795 __skb_put(skb, len); 796 pci_dma_sync_single_for_cpu(adap->pdev, 797 dma_unmap_addr(sd, dma_addr), len, 798 PCI_DMA_FROMDEVICE); 799 memcpy(skb->data, sd->skb->data, len); 800 pci_dma_sync_single_for_device(adap->pdev, 801 dma_unmap_addr(sd, dma_addr), len, 802 PCI_DMA_FROMDEVICE); 803 } else if (!drop_thres) 804 goto use_orig_buf; 805 recycle: 806 recycle_rx_buf(adap, fl, fl->cidx); 807 return skb; 808 } 809 810 if (unlikely(fl->credits < drop_thres) && 811 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), 812 GFP_ATOMIC | __GFP_COMP) == 0) 813 goto recycle; 814 815 use_orig_buf: 816 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), 817 fl->buf_size, PCI_DMA_FROMDEVICE); 818 skb = sd->skb; 819 skb_put(skb, len); 820 __refill_fl(adap, fl); 821 return skb; 822 } 823 824 /** 825 * get_packet_pg - return the next ingress packet buffer from a free list 826 * @adap: the adapter that received the packet 827 * @fl: the SGE free list holding the packet 828 * @len: the packet length including any SGE padding 829 * @drop_thres: # of remaining buffers before we start dropping packets 830 * 831 * Get the next packet from a free list populated with page chunks. 832 * If the packet is small we make a copy and recycle the original buffer, 833 * otherwise we attach the original buffer as a page fragment to a fresh 834 * sk_buff. If a positive drop threshold is supplied packets are dropped 835 * and their buffers recycled if (a) the number of remaining buffers is 836 * under the threshold and the packet is too big to copy, or (b) there's 837 * no system memory. 838 * 839 * Note: this function is similar to @get_packet but deals with Rx buffers 840 * that are page chunks rather than sk_buffs. 841 */ 842 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, 843 struct sge_rspq *q, unsigned int len, 844 unsigned int drop_thres) 845 { 846 struct sk_buff *newskb, *skb; 847 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 848 849 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); 850 851 newskb = skb = q->pg_skb; 852 if (!skb && (len <= SGE_RX_COPY_THRES)) { 853 newskb = alloc_skb(len, GFP_ATOMIC); 854 if (likely(newskb != NULL)) { 855 __skb_put(newskb, len); 856 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, 857 PCI_DMA_FROMDEVICE); 858 memcpy(newskb->data, sd->pg_chunk.va, len); 859 pci_dma_sync_single_for_device(adap->pdev, dma_addr, 860 len, 861 PCI_DMA_FROMDEVICE); 862 } else if (!drop_thres) 863 return NULL; 864 recycle: 865 fl->credits--; 866 recycle_rx_buf(adap, fl, fl->cidx); 867 q->rx_recycle_buf++; 868 return newskb; 869 } 870 871 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) 872 goto recycle; 873 874 prefetch(sd->pg_chunk.p_cnt); 875 876 if (!skb) 877 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 878 879 if (unlikely(!newskb)) { 880 if (!drop_thres) 881 return NULL; 882 goto recycle; 883 } 884 885 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, 886 PCI_DMA_FROMDEVICE); 887 (*sd->pg_chunk.p_cnt)--; 888 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) 889 pci_unmap_page(adap->pdev, 890 sd->pg_chunk.mapping, 891 fl->alloc_size, 892 PCI_DMA_FROMDEVICE); 893 if (!skb) { 894 __skb_put(newskb, SGE_RX_PULL_LEN); 895 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 896 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, 897 sd->pg_chunk.offset + SGE_RX_PULL_LEN, 898 len - SGE_RX_PULL_LEN); 899 newskb->len = len; 900 newskb->data_len = len - SGE_RX_PULL_LEN; 901 newskb->truesize += newskb->data_len; 902 } else { 903 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, 904 sd->pg_chunk.page, 905 sd->pg_chunk.offset, len); 906 newskb->len += len; 907 newskb->data_len += len; 908 newskb->truesize += len; 909 } 910 911 fl->credits--; 912 /* 913 * We do not refill FLs here, we let the caller do it to overlap a 914 * prefetch. 915 */ 916 return newskb; 917 } 918 919 /** 920 * get_imm_packet - return the next ingress packet buffer from a response 921 * @resp: the response descriptor containing the packet data 922 * 923 * Return a packet containing the immediate data of the given response. 924 */ 925 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) 926 { 927 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); 928 929 if (skb) { 930 __skb_put(skb, IMMED_PKT_SIZE); 931 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); 932 } 933 return skb; 934 } 935 936 /** 937 * calc_tx_descs - calculate the number of Tx descriptors for a packet 938 * @skb: the packet 939 * 940 * Returns the number of Tx descriptors needed for the given Ethernet 941 * packet. Ethernet packets require addition of WR and CPL headers. 942 */ 943 static inline unsigned int calc_tx_descs(const struct sk_buff *skb) 944 { 945 unsigned int flits; 946 947 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) 948 return 1; 949 950 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; 951 if (skb_shinfo(skb)->gso_size) 952 flits++; 953 return flits_to_desc(flits); 954 } 955 956 /* map_skb - map a packet main body and its page fragments 957 * @pdev: the PCI device 958 * @skb: the packet 959 * @addr: placeholder to save the mapped addresses 960 * 961 * map the main body of an sk_buff and its page fragments, if any. 962 */ 963 static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, 964 dma_addr_t *addr) 965 { 966 const skb_frag_t *fp, *end; 967 const struct skb_shared_info *si; 968 969 if (skb_headlen(skb)) { 970 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), 971 PCI_DMA_TODEVICE); 972 if (pci_dma_mapping_error(pdev, *addr)) 973 goto out_err; 974 addr++; 975 } 976 977 si = skb_shinfo(skb); 978 end = &si->frags[si->nr_frags]; 979 980 for (fp = si->frags; fp < end; fp++) { 981 *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), 982 DMA_TO_DEVICE); 983 if (pci_dma_mapping_error(pdev, *addr)) 984 goto unwind; 985 addr++; 986 } 987 return 0; 988 989 unwind: 990 while (fp-- > si->frags) 991 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), 992 DMA_TO_DEVICE); 993 994 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); 995 out_err: 996 return -ENOMEM; 997 } 998 999 /** 1000 * write_sgl - populate a scatter/gather list for a packet 1001 * @skb: the packet 1002 * @sgp: the SGL to populate 1003 * @start: start address of skb main body data to include in the SGL 1004 * @len: length of skb main body data to include in the SGL 1005 * @addr: the list of the mapped addresses 1006 * 1007 * Copies the scatter/gather list for the buffers that make up a packet 1008 * and returns the SGL size in 8-byte words. The caller must size the SGL 1009 * appropriately. 1010 */ 1011 static inline unsigned int write_sgl(const struct sk_buff *skb, 1012 struct sg_ent *sgp, unsigned char *start, 1013 unsigned int len, const dma_addr_t *addr) 1014 { 1015 unsigned int i, j = 0, k = 0, nfrags; 1016 1017 if (len) { 1018 sgp->len[0] = cpu_to_be32(len); 1019 sgp->addr[j++] = cpu_to_be64(addr[k++]); 1020 } 1021 1022 nfrags = skb_shinfo(skb)->nr_frags; 1023 for (i = 0; i < nfrags; i++) { 1024 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1025 1026 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 1027 sgp->addr[j] = cpu_to_be64(addr[k++]); 1028 j ^= 1; 1029 if (j == 0) 1030 ++sgp; 1031 } 1032 if (j) 1033 sgp->len[j] = 0; 1034 return ((nfrags + (len != 0)) * 3) / 2 + j; 1035 } 1036 1037 /** 1038 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell 1039 * @adap: the adapter 1040 * @q: the Tx queue 1041 * 1042 * Ring the doorbel if a Tx queue is asleep. There is a natural race, 1043 * where the HW is going to sleep just after we checked, however, 1044 * then the interrupt handler will detect the outstanding TX packet 1045 * and ring the doorbell for us. 1046 * 1047 * When GTS is disabled we unconditionally ring the doorbell. 1048 */ 1049 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) 1050 { 1051 #if USE_GTS 1052 clear_bit(TXQ_LAST_PKT_DB, &q->flags); 1053 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { 1054 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1055 t3_write_reg(adap, A_SG_KDOORBELL, 1056 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1057 } 1058 #else 1059 wmb(); /* write descriptors before telling HW */ 1060 t3_write_reg(adap, A_SG_KDOORBELL, 1061 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1062 #endif 1063 } 1064 1065 static inline void wr_gen2(struct tx_desc *d, unsigned int gen) 1066 { 1067 #if SGE_NUM_GENBITS == 2 1068 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); 1069 #endif 1070 } 1071 1072 /** 1073 * write_wr_hdr_sgl - write a WR header and, optionally, SGL 1074 * @ndesc: number of Tx descriptors spanned by the SGL 1075 * @skb: the packet corresponding to the WR 1076 * @d: first Tx descriptor to be written 1077 * @pidx: index of above descriptors 1078 * @q: the SGE Tx queue 1079 * @sgl: the SGL 1080 * @flits: number of flits to the start of the SGL in the first descriptor 1081 * @sgl_flits: the SGL size in flits 1082 * @gen: the Tx descriptor generation 1083 * @wr_hi: top 32 bits of WR header based on WR type (big endian) 1084 * @wr_lo: low 32 bits of WR header based on WR type (big endian) 1085 * 1086 * Write a work request header and an associated SGL. If the SGL is 1087 * small enough to fit into one Tx descriptor it has already been written 1088 * and we just need to write the WR header. Otherwise we distribute the 1089 * SGL across the number of descriptors it spans. 1090 */ 1091 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, 1092 struct tx_desc *d, unsigned int pidx, 1093 const struct sge_txq *q, 1094 const struct sg_ent *sgl, 1095 unsigned int flits, unsigned int sgl_flits, 1096 unsigned int gen, __be32 wr_hi, 1097 __be32 wr_lo) 1098 { 1099 struct work_request_hdr *wrp = (struct work_request_hdr *)d; 1100 struct tx_sw_desc *sd = &q->sdesc[pidx]; 1101 1102 sd->skb = skb; 1103 if (need_skb_unmap()) { 1104 sd->fragidx = 0; 1105 sd->addr_idx = 0; 1106 sd->sflit = flits; 1107 } 1108 1109 if (likely(ndesc == 1)) { 1110 sd->eop = 1; 1111 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1112 V_WR_SGLSFLT(flits)) | wr_hi; 1113 dma_wmb(); 1114 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | 1115 V_WR_GEN(gen)) | wr_lo; 1116 wr_gen2(d, gen); 1117 } else { 1118 unsigned int ogen = gen; 1119 const u64 *fp = (const u64 *)sgl; 1120 struct work_request_hdr *wp = wrp; 1121 1122 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | 1123 V_WR_SGLSFLT(flits)) | wr_hi; 1124 1125 while (sgl_flits) { 1126 unsigned int avail = WR_FLITS - flits; 1127 1128 if (avail > sgl_flits) 1129 avail = sgl_flits; 1130 memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); 1131 sgl_flits -= avail; 1132 ndesc--; 1133 if (!sgl_flits) 1134 break; 1135 1136 fp += avail; 1137 d++; 1138 sd->eop = 0; 1139 sd++; 1140 if (++pidx == q->size) { 1141 pidx = 0; 1142 gen ^= 1; 1143 d = q->desc; 1144 sd = q->sdesc; 1145 } 1146 1147 sd->skb = skb; 1148 wrp = (struct work_request_hdr *)d; 1149 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | 1150 V_WR_SGLSFLT(1)) | wr_hi; 1151 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, 1152 sgl_flits + 1)) | 1153 V_WR_GEN(gen)) | wr_lo; 1154 wr_gen2(d, gen); 1155 flits = 1; 1156 } 1157 sd->eop = 1; 1158 wrp->wr_hi |= htonl(F_WR_EOP); 1159 dma_wmb(); 1160 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; 1161 wr_gen2((struct tx_desc *)wp, ogen); 1162 WARN_ON(ndesc != 0); 1163 } 1164 } 1165 1166 /** 1167 * write_tx_pkt_wr - write a TX_PKT work request 1168 * @adap: the adapter 1169 * @skb: the packet to send 1170 * @pi: the egress interface 1171 * @pidx: index of the first Tx descriptor to write 1172 * @gen: the generation value to use 1173 * @q: the Tx queue 1174 * @ndesc: number of descriptors the packet will occupy 1175 * @compl: the value of the COMPL bit to use 1176 * 1177 * Generate a TX_PKT work request to send the supplied packet. 1178 */ 1179 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, 1180 const struct port_info *pi, 1181 unsigned int pidx, unsigned int gen, 1182 struct sge_txq *q, unsigned int ndesc, 1183 unsigned int compl, const dma_addr_t *addr) 1184 { 1185 unsigned int flits, sgl_flits, cntrl, tso_info; 1186 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1187 struct tx_desc *d = &q->desc[pidx]; 1188 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; 1189 1190 cpl->len = htonl(skb->len); 1191 cntrl = V_TXPKT_INTF(pi->port_id); 1192 1193 if (skb_vlan_tag_present(skb)) 1194 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb)); 1195 1196 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1197 if (tso_info) { 1198 int eth_type; 1199 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; 1200 1201 d->flit[2] = 0; 1202 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 1203 hdr->cntrl = htonl(cntrl); 1204 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1205 CPL_ETH_II : CPL_ETH_II_VLAN; 1206 tso_info |= V_LSO_ETH_TYPE(eth_type) | 1207 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | 1208 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); 1209 hdr->lso_info = htonl(tso_info); 1210 flits = 3; 1211 } else { 1212 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1213 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ 1214 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); 1215 cpl->cntrl = htonl(cntrl); 1216 1217 if (skb->len <= WR_LEN - sizeof(*cpl)) { 1218 q->sdesc[pidx].skb = NULL; 1219 if (!skb->data_len) 1220 skb_copy_from_linear_data(skb, &d->flit[2], 1221 skb->len); 1222 else 1223 skb_copy_bits(skb, 0, &d->flit[2], skb->len); 1224 1225 flits = (skb->len + 7) / 8 + 2; 1226 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | 1227 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) 1228 | F_WR_SOP | F_WR_EOP | compl); 1229 dma_wmb(); 1230 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | 1231 V_WR_TID(q->token)); 1232 wr_gen2(d, gen); 1233 dev_consume_skb_any(skb); 1234 return; 1235 } 1236 1237 flits = 2; 1238 } 1239 1240 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1241 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); 1242 1243 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1244 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1245 htonl(V_WR_TID(q->token))); 1246 } 1247 1248 static inline void t3_stop_tx_queue(struct netdev_queue *txq, 1249 struct sge_qset *qs, struct sge_txq *q) 1250 { 1251 netif_tx_stop_queue(txq); 1252 set_bit(TXQ_ETH, &qs->txq_stopped); 1253 q->stops++; 1254 } 1255 1256 /** 1257 * eth_xmit - add a packet to the Ethernet Tx queue 1258 * @skb: the packet 1259 * @dev: the egress net device 1260 * 1261 * Add a packet to an SGE Tx queue. Runs with softirqs disabled. 1262 */ 1263 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1264 { 1265 int qidx; 1266 unsigned int ndesc, pidx, credits, gen, compl; 1267 const struct port_info *pi = netdev_priv(dev); 1268 struct adapter *adap = pi->adapter; 1269 struct netdev_queue *txq; 1270 struct sge_qset *qs; 1271 struct sge_txq *q; 1272 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1273 1274 /* 1275 * The chip min packet length is 9 octets but play safe and reject 1276 * anything shorter than an Ethernet header. 1277 */ 1278 if (unlikely(skb->len < ETH_HLEN)) { 1279 dev_kfree_skb_any(skb); 1280 return NETDEV_TX_OK; 1281 } 1282 1283 qidx = skb_get_queue_mapping(skb); 1284 qs = &pi->qs[qidx]; 1285 q = &qs->txq[TXQ_ETH]; 1286 txq = netdev_get_tx_queue(dev, qidx); 1287 1288 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1289 1290 credits = q->size - q->in_use; 1291 ndesc = calc_tx_descs(skb); 1292 1293 if (unlikely(credits < ndesc)) { 1294 t3_stop_tx_queue(txq, qs, q); 1295 dev_err(&adap->pdev->dev, 1296 "%s: Tx ring %u full while queue awake!\n", 1297 dev->name, q->cntxt_id & 7); 1298 return NETDEV_TX_BUSY; 1299 } 1300 1301 /* Check if ethernet packet can't be sent as immediate data */ 1302 if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) { 1303 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { 1304 dev_kfree_skb(skb); 1305 return NETDEV_TX_OK; 1306 } 1307 } 1308 1309 q->in_use += ndesc; 1310 if (unlikely(credits - ndesc < q->stop_thres)) { 1311 t3_stop_tx_queue(txq, qs, q); 1312 1313 if (should_restart_tx(q) && 1314 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1315 q->restarts++; 1316 netif_tx_start_queue(txq); 1317 } 1318 } 1319 1320 gen = q->gen; 1321 q->unacked += ndesc; 1322 compl = (q->unacked & 8) << (S_WR_COMPL - 3); 1323 q->unacked &= 7; 1324 pidx = q->pidx; 1325 q->pidx += ndesc; 1326 if (q->pidx >= q->size) { 1327 q->pidx -= q->size; 1328 q->gen ^= 1; 1329 } 1330 1331 /* update port statistics */ 1332 if (skb->ip_summed == CHECKSUM_PARTIAL) 1333 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1334 if (skb_shinfo(skb)->gso_size) 1335 qs->port_stats[SGE_PSTAT_TSO]++; 1336 if (skb_vlan_tag_present(skb)) 1337 qs->port_stats[SGE_PSTAT_VLANINS]++; 1338 1339 /* 1340 * We do not use Tx completion interrupts to free DMAd Tx packets. 1341 * This is good for performance but means that we rely on new Tx 1342 * packets arriving to run the destructors of completed packets, 1343 * which open up space in their sockets' send queues. Sometimes 1344 * we do not get such new packets causing Tx to stall. A single 1345 * UDP transmitter is a good example of this situation. We have 1346 * a clean up timer that periodically reclaims completed packets 1347 * but it doesn't run often enough (nor do we want it to) to prevent 1348 * lengthy stalls. A solution to this problem is to run the 1349 * destructor early, after the packet is queued but before it's DMAd. 1350 * A cons is that we lie to socket memory accounting, but the amount 1351 * of extra memory is reasonable (limited by the number of Tx 1352 * descriptors), the packets do actually get freed quickly by new 1353 * packets almost always, and for protocols like TCP that wait for 1354 * acks to really free up the data the extra memory is even less. 1355 * On the positive side we run the destructors on the sending CPU 1356 * rather than on a potentially different completing CPU, usually a 1357 * good thing. We also run them without holding our Tx queue lock, 1358 * unlike what reclaim_completed_tx() would otherwise do. 1359 * 1360 * Run the destructor before telling the DMA engine about the packet 1361 * to make sure it doesn't complete and get freed prematurely. 1362 */ 1363 if (likely(!skb_shared(skb))) 1364 skb_orphan(skb); 1365 1366 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); 1367 check_ring_tx_db(adap, q); 1368 return NETDEV_TX_OK; 1369 } 1370 1371 /** 1372 * write_imm - write a packet into a Tx descriptor as immediate data 1373 * @d: the Tx descriptor to write 1374 * @skb: the packet 1375 * @len: the length of packet data to write as immediate data 1376 * @gen: the generation bit value to write 1377 * 1378 * Writes a packet as immediate data into a Tx descriptor. The packet 1379 * contains a work request at its beginning. We must write the packet 1380 * carefully so the SGE doesn't read it accidentally before it's written 1381 * in its entirety. 1382 */ 1383 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, 1384 unsigned int len, unsigned int gen) 1385 { 1386 struct work_request_hdr *from = (struct work_request_hdr *)skb->data; 1387 struct work_request_hdr *to = (struct work_request_hdr *)d; 1388 1389 if (likely(!skb->data_len)) 1390 memcpy(&to[1], &from[1], len - sizeof(*from)); 1391 else 1392 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); 1393 1394 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | 1395 V_WR_BCNTLFLT(len & 7)); 1396 dma_wmb(); 1397 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | 1398 V_WR_LEN((len + 7) / 8)); 1399 wr_gen2(d, gen); 1400 kfree_skb(skb); 1401 } 1402 1403 /** 1404 * check_desc_avail - check descriptor availability on a send queue 1405 * @adap: the adapter 1406 * @q: the send queue 1407 * @skb: the packet needing the descriptors 1408 * @ndesc: the number of Tx descriptors needed 1409 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) 1410 * 1411 * Checks if the requested number of Tx descriptors is available on an 1412 * SGE send queue. If the queue is already suspended or not enough 1413 * descriptors are available the packet is queued for later transmission. 1414 * Must be called with the Tx queue locked. 1415 * 1416 * Returns 0 if enough descriptors are available, 1 if there aren't 1417 * enough descriptors and the packet has been queued, and 2 if the caller 1418 * needs to retry because there weren't enough descriptors at the 1419 * beginning of the call but some freed up in the mean time. 1420 */ 1421 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, 1422 struct sk_buff *skb, unsigned int ndesc, 1423 unsigned int qid) 1424 { 1425 if (unlikely(!skb_queue_empty(&q->sendq))) { 1426 addq_exit:__skb_queue_tail(&q->sendq, skb); 1427 return 1; 1428 } 1429 if (unlikely(q->size - q->in_use < ndesc)) { 1430 struct sge_qset *qs = txq_to_qset(q, qid); 1431 1432 set_bit(qid, &qs->txq_stopped); 1433 smp_mb__after_atomic(); 1434 1435 if (should_restart_tx(q) && 1436 test_and_clear_bit(qid, &qs->txq_stopped)) 1437 return 2; 1438 1439 q->stops++; 1440 goto addq_exit; 1441 } 1442 return 0; 1443 } 1444 1445 /** 1446 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1447 * @q: the SGE control Tx queue 1448 * 1449 * This is a variant of reclaim_completed_tx() that is used for Tx queues 1450 * that send only immediate data (presently just the control queues) and 1451 * thus do not have any sk_buffs to release. 1452 */ 1453 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1454 { 1455 unsigned int reclaim = q->processed - q->cleaned; 1456 1457 q->in_use -= reclaim; 1458 q->cleaned += reclaim; 1459 } 1460 1461 static inline int immediate(const struct sk_buff *skb) 1462 { 1463 return skb->len <= WR_LEN; 1464 } 1465 1466 /** 1467 * ctrl_xmit - send a packet through an SGE control Tx queue 1468 * @adap: the adapter 1469 * @q: the control queue 1470 * @skb: the packet 1471 * 1472 * Send a packet through an SGE control Tx queue. Packets sent through 1473 * a control queue must fit entirely as immediate data in a single Tx 1474 * descriptor and have no page fragments. 1475 */ 1476 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, 1477 struct sk_buff *skb) 1478 { 1479 int ret; 1480 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; 1481 1482 if (unlikely(!immediate(skb))) { 1483 WARN_ON(1); 1484 dev_kfree_skb(skb); 1485 return NET_XMIT_SUCCESS; 1486 } 1487 1488 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); 1489 wrp->wr_lo = htonl(V_WR_TID(q->token)); 1490 1491 spin_lock(&q->lock); 1492 again:reclaim_completed_tx_imm(q); 1493 1494 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); 1495 if (unlikely(ret)) { 1496 if (ret == 1) { 1497 spin_unlock(&q->lock); 1498 return NET_XMIT_CN; 1499 } 1500 goto again; 1501 } 1502 1503 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); 1504 1505 q->in_use++; 1506 if (++q->pidx >= q->size) { 1507 q->pidx = 0; 1508 q->gen ^= 1; 1509 } 1510 spin_unlock(&q->lock); 1511 wmb(); 1512 t3_write_reg(adap, A_SG_KDOORBELL, 1513 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1514 return NET_XMIT_SUCCESS; 1515 } 1516 1517 /** 1518 * restart_ctrlq - restart a suspended control queue 1519 * @qs: the queue set cotaining the control queue 1520 * 1521 * Resumes transmission on a suspended Tx control queue. 1522 */ 1523 static void restart_ctrlq(unsigned long data) 1524 { 1525 struct sk_buff *skb; 1526 struct sge_qset *qs = (struct sge_qset *)data; 1527 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1528 1529 spin_lock(&q->lock); 1530 again:reclaim_completed_tx_imm(q); 1531 1532 while (q->in_use < q->size && 1533 (skb = __skb_dequeue(&q->sendq)) != NULL) { 1534 1535 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); 1536 1537 if (++q->pidx >= q->size) { 1538 q->pidx = 0; 1539 q->gen ^= 1; 1540 } 1541 q->in_use++; 1542 } 1543 1544 if (!skb_queue_empty(&q->sendq)) { 1545 set_bit(TXQ_CTRL, &qs->txq_stopped); 1546 smp_mb__after_atomic(); 1547 1548 if (should_restart_tx(q) && 1549 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) 1550 goto again; 1551 q->stops++; 1552 } 1553 1554 spin_unlock(&q->lock); 1555 wmb(); 1556 t3_write_reg(qs->adap, A_SG_KDOORBELL, 1557 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1558 } 1559 1560 /* 1561 * Send a management message through control queue 0 1562 */ 1563 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1564 { 1565 int ret; 1566 local_bh_disable(); 1567 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1568 local_bh_enable(); 1569 1570 return ret; 1571 } 1572 1573 /** 1574 * deferred_unmap_destructor - unmap a packet when it is freed 1575 * @skb: the packet 1576 * 1577 * This is the packet destructor used for Tx packets that need to remain 1578 * mapped until they are freed rather than until their Tx descriptors are 1579 * freed. 1580 */ 1581 static void deferred_unmap_destructor(struct sk_buff *skb) 1582 { 1583 int i; 1584 const dma_addr_t *p; 1585 const struct skb_shared_info *si; 1586 const struct deferred_unmap_info *dui; 1587 1588 dui = (struct deferred_unmap_info *)skb->head; 1589 p = dui->addr; 1590 1591 if (skb_tail_pointer(skb) - skb_transport_header(skb)) 1592 pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - 1593 skb_transport_header(skb), PCI_DMA_TODEVICE); 1594 1595 si = skb_shinfo(skb); 1596 for (i = 0; i < si->nr_frags; i++) 1597 pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), 1598 PCI_DMA_TODEVICE); 1599 } 1600 1601 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, 1602 const struct sg_ent *sgl, int sgl_flits) 1603 { 1604 dma_addr_t *p; 1605 struct deferred_unmap_info *dui; 1606 1607 dui = (struct deferred_unmap_info *)skb->head; 1608 dui->pdev = pdev; 1609 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { 1610 *p++ = be64_to_cpu(sgl->addr[0]); 1611 *p++ = be64_to_cpu(sgl->addr[1]); 1612 } 1613 if (sgl_flits) 1614 *p = be64_to_cpu(sgl->addr[0]); 1615 } 1616 1617 /** 1618 * write_ofld_wr - write an offload work request 1619 * @adap: the adapter 1620 * @skb: the packet to send 1621 * @q: the Tx queue 1622 * @pidx: index of the first Tx descriptor to write 1623 * @gen: the generation value to use 1624 * @ndesc: number of descriptors the packet will occupy 1625 * 1626 * Write an offload work request to send the supplied packet. The packet 1627 * data already carry the work request with most fields populated. 1628 */ 1629 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1630 struct sge_txq *q, unsigned int pidx, 1631 unsigned int gen, unsigned int ndesc, 1632 const dma_addr_t *addr) 1633 { 1634 unsigned int sgl_flits, flits; 1635 struct work_request_hdr *from; 1636 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1637 struct tx_desc *d = &q->desc[pidx]; 1638 1639 if (immediate(skb)) { 1640 q->sdesc[pidx].skb = NULL; 1641 write_imm(d, skb, skb->len, gen); 1642 return; 1643 } 1644 1645 /* Only TX_DATA builds SGLs */ 1646 1647 from = (struct work_request_hdr *)skb->data; 1648 memcpy(&d->flit[1], &from[1], 1649 skb_transport_offset(skb) - sizeof(*from)); 1650 1651 flits = skb_transport_offset(skb) / 8; 1652 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1653 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), 1654 skb_tail_pointer(skb) - skb_transport_header(skb), 1655 addr); 1656 if (need_skb_unmap()) { 1657 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1658 skb->destructor = deferred_unmap_destructor; 1659 } 1660 1661 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1662 gen, from->wr_hi, from->wr_lo); 1663 } 1664 1665 /** 1666 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet 1667 * @skb: the packet 1668 * 1669 * Returns the number of Tx descriptors needed for the given offload 1670 * packet. These packets are already fully constructed. 1671 */ 1672 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) 1673 { 1674 unsigned int flits, cnt; 1675 1676 if (skb->len <= WR_LEN) 1677 return 1; /* packet fits as immediate data */ 1678 1679 flits = skb_transport_offset(skb) / 8; /* headers */ 1680 cnt = skb_shinfo(skb)->nr_frags; 1681 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 1682 cnt++; 1683 return flits_to_desc(flits + sgl_len(cnt)); 1684 } 1685 1686 /** 1687 * ofld_xmit - send a packet through an offload queue 1688 * @adap: the adapter 1689 * @q: the Tx offload queue 1690 * @skb: the packet 1691 * 1692 * Send an offload packet through an SGE offload queue. 1693 */ 1694 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, 1695 struct sk_buff *skb) 1696 { 1697 int ret; 1698 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; 1699 1700 spin_lock(&q->lock); 1701 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1702 1703 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); 1704 if (unlikely(ret)) { 1705 if (ret == 1) { 1706 skb->priority = ndesc; /* save for restart */ 1707 spin_unlock(&q->lock); 1708 return NET_XMIT_CN; 1709 } 1710 goto again; 1711 } 1712 1713 if (!immediate(skb) && 1714 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { 1715 spin_unlock(&q->lock); 1716 return NET_XMIT_SUCCESS; 1717 } 1718 1719 gen = q->gen; 1720 q->in_use += ndesc; 1721 pidx = q->pidx; 1722 q->pidx += ndesc; 1723 if (q->pidx >= q->size) { 1724 q->pidx -= q->size; 1725 q->gen ^= 1; 1726 } 1727 spin_unlock(&q->lock); 1728 1729 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); 1730 check_ring_tx_db(adap, q); 1731 return NET_XMIT_SUCCESS; 1732 } 1733 1734 /** 1735 * restart_offloadq - restart a suspended offload queue 1736 * @qs: the queue set cotaining the offload queue 1737 * 1738 * Resumes transmission on a suspended Tx offload queue. 1739 */ 1740 static void restart_offloadq(unsigned long data) 1741 { 1742 struct sk_buff *skb; 1743 struct sge_qset *qs = (struct sge_qset *)data; 1744 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1745 const struct port_info *pi = netdev_priv(qs->netdev); 1746 struct adapter *adap = pi->adapter; 1747 unsigned int written = 0; 1748 1749 spin_lock(&q->lock); 1750 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1751 1752 while ((skb = skb_peek(&q->sendq)) != NULL) { 1753 unsigned int gen, pidx; 1754 unsigned int ndesc = skb->priority; 1755 1756 if (unlikely(q->size - q->in_use < ndesc)) { 1757 set_bit(TXQ_OFLD, &qs->txq_stopped); 1758 smp_mb__after_atomic(); 1759 1760 if (should_restart_tx(q) && 1761 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) 1762 goto again; 1763 q->stops++; 1764 break; 1765 } 1766 1767 if (!immediate(skb) && 1768 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) 1769 break; 1770 1771 gen = q->gen; 1772 q->in_use += ndesc; 1773 pidx = q->pidx; 1774 q->pidx += ndesc; 1775 written += ndesc; 1776 if (q->pidx >= q->size) { 1777 q->pidx -= q->size; 1778 q->gen ^= 1; 1779 } 1780 __skb_unlink(skb, &q->sendq); 1781 spin_unlock(&q->lock); 1782 1783 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, 1784 (dma_addr_t *)skb->head); 1785 spin_lock(&q->lock); 1786 } 1787 spin_unlock(&q->lock); 1788 1789 #if USE_GTS 1790 set_bit(TXQ_RUNNING, &q->flags); 1791 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1792 #endif 1793 wmb(); 1794 if (likely(written)) 1795 t3_write_reg(adap, A_SG_KDOORBELL, 1796 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1797 } 1798 1799 /** 1800 * queue_set - return the queue set a packet should use 1801 * @skb: the packet 1802 * 1803 * Maps a packet to the SGE queue set it should use. The desired queue 1804 * set is carried in bits 1-3 in the packet's priority. 1805 */ 1806 static inline int queue_set(const struct sk_buff *skb) 1807 { 1808 return skb->priority >> 1; 1809 } 1810 1811 /** 1812 * is_ctrl_pkt - return whether an offload packet is a control packet 1813 * @skb: the packet 1814 * 1815 * Determines whether an offload packet should use an OFLD or a CTRL 1816 * Tx queue. This is indicated by bit 0 in the packet's priority. 1817 */ 1818 static inline int is_ctrl_pkt(const struct sk_buff *skb) 1819 { 1820 return skb->priority & 1; 1821 } 1822 1823 /** 1824 * t3_offload_tx - send an offload packet 1825 * @tdev: the offload device to send to 1826 * @skb: the packet 1827 * 1828 * Sends an offload packet. We use the packet priority to select the 1829 * appropriate Tx queue as follows: bit 0 indicates whether the packet 1830 * should be sent as regular or control, bits 1-3 select the queue set. 1831 */ 1832 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) 1833 { 1834 struct adapter *adap = tdev2adap(tdev); 1835 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; 1836 1837 if (unlikely(is_ctrl_pkt(skb))) 1838 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); 1839 1840 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); 1841 } 1842 1843 /** 1844 * offload_enqueue - add an offload packet to an SGE offload receive queue 1845 * @q: the SGE response queue 1846 * @skb: the packet 1847 * 1848 * Add a new offload packet to an SGE response queue's offload packet 1849 * queue. If the packet is the first on the queue it schedules the RX 1850 * softirq to process the queue. 1851 */ 1852 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1853 { 1854 int was_empty = skb_queue_empty(&q->rx_queue); 1855 1856 __skb_queue_tail(&q->rx_queue, skb); 1857 1858 if (was_empty) { 1859 struct sge_qset *qs = rspq_to_qset(q); 1860 1861 napi_schedule(&qs->napi); 1862 } 1863 } 1864 1865 /** 1866 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts 1867 * @tdev: the offload device that will be receiving the packets 1868 * @q: the SGE response queue that assembled the bundle 1869 * @skbs: the partial bundle 1870 * @n: the number of packets in the bundle 1871 * 1872 * Delivers a (partial) bundle of Rx offload packets to an offload device. 1873 */ 1874 static inline void deliver_partial_bundle(struct t3cdev *tdev, 1875 struct sge_rspq *q, 1876 struct sk_buff *skbs[], int n) 1877 { 1878 if (n) { 1879 q->offload_bundles++; 1880 tdev->recv(tdev, skbs, n); 1881 } 1882 } 1883 1884 /** 1885 * ofld_poll - NAPI handler for offload packets in interrupt mode 1886 * @dev: the network device doing the polling 1887 * @budget: polling budget 1888 * 1889 * The NAPI handler for offload packets when a response queue is serviced 1890 * by the hard interrupt handler, i.e., when it's operating in non-polling 1891 * mode. Creates small packet batches and sends them through the offload 1892 * receive handler. Batches need to be of modest size as we do prefetches 1893 * on the packets in each. 1894 */ 1895 static int ofld_poll(struct napi_struct *napi, int budget) 1896 { 1897 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); 1898 struct sge_rspq *q = &qs->rspq; 1899 struct adapter *adapter = qs->adap; 1900 int work_done = 0; 1901 1902 while (work_done < budget) { 1903 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; 1904 struct sk_buff_head queue; 1905 int ngathered; 1906 1907 spin_lock_irq(&q->lock); 1908 __skb_queue_head_init(&queue); 1909 skb_queue_splice_init(&q->rx_queue, &queue); 1910 if (skb_queue_empty(&queue)) { 1911 napi_complete_done(napi, work_done); 1912 spin_unlock_irq(&q->lock); 1913 return work_done; 1914 } 1915 spin_unlock_irq(&q->lock); 1916 1917 ngathered = 0; 1918 skb_queue_walk_safe(&queue, skb, tmp) { 1919 if (work_done >= budget) 1920 break; 1921 work_done++; 1922 1923 __skb_unlink(skb, &queue); 1924 prefetch(skb->data); 1925 skbs[ngathered] = skb; 1926 if (++ngathered == RX_BUNDLE_SIZE) { 1927 q->offload_bundles++; 1928 adapter->tdev.recv(&adapter->tdev, skbs, 1929 ngathered); 1930 ngathered = 0; 1931 } 1932 } 1933 if (!skb_queue_empty(&queue)) { 1934 /* splice remaining packets back onto Rx queue */ 1935 spin_lock_irq(&q->lock); 1936 skb_queue_splice(&queue, &q->rx_queue); 1937 spin_unlock_irq(&q->lock); 1938 } 1939 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1940 } 1941 1942 return work_done; 1943 } 1944 1945 /** 1946 * rx_offload - process a received offload packet 1947 * @tdev: the offload device receiving the packet 1948 * @rq: the response queue that received the packet 1949 * @skb: the packet 1950 * @rx_gather: a gather list of packets if we are building a bundle 1951 * @gather_idx: index of the next available slot in the bundle 1952 * 1953 * Process an ingress offload pakcet and add it to the offload ingress 1954 * queue. Returns the index of the next available slot in the bundle. 1955 */ 1956 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, 1957 struct sk_buff *skb, struct sk_buff *rx_gather[], 1958 unsigned int gather_idx) 1959 { 1960 skb_reset_mac_header(skb); 1961 skb_reset_network_header(skb); 1962 skb_reset_transport_header(skb); 1963 1964 if (rq->polling) { 1965 rx_gather[gather_idx++] = skb; 1966 if (gather_idx == RX_BUNDLE_SIZE) { 1967 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); 1968 gather_idx = 0; 1969 rq->offload_bundles++; 1970 } 1971 } else 1972 offload_enqueue(rq, skb); 1973 1974 return gather_idx; 1975 } 1976 1977 /** 1978 * restart_tx - check whether to restart suspended Tx queues 1979 * @qs: the queue set to resume 1980 * 1981 * Restarts suspended Tx queues of an SGE queue set if they have enough 1982 * free resources to resume operation. 1983 */ 1984 static void restart_tx(struct sge_qset *qs) 1985 { 1986 if (test_bit(TXQ_ETH, &qs->txq_stopped) && 1987 should_restart_tx(&qs->txq[TXQ_ETH]) && 1988 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1989 qs->txq[TXQ_ETH].restarts++; 1990 if (netif_running(qs->netdev)) 1991 netif_tx_wake_queue(qs->tx_q); 1992 } 1993 1994 if (test_bit(TXQ_OFLD, &qs->txq_stopped) && 1995 should_restart_tx(&qs->txq[TXQ_OFLD]) && 1996 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { 1997 qs->txq[TXQ_OFLD].restarts++; 1998 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); 1999 } 2000 if (test_bit(TXQ_CTRL, &qs->txq_stopped) && 2001 should_restart_tx(&qs->txq[TXQ_CTRL]) && 2002 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { 2003 qs->txq[TXQ_CTRL].restarts++; 2004 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); 2005 } 2006 } 2007 2008 /** 2009 * cxgb3_arp_process - process an ARP request probing a private IP address 2010 * @adapter: the adapter 2011 * @skb: the skbuff containing the ARP request 2012 * 2013 * Check if the ARP request is probing the private IP address 2014 * dedicated to iSCSI, generate an ARP reply if so. 2015 */ 2016 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) 2017 { 2018 struct net_device *dev = skb->dev; 2019 struct arphdr *arp; 2020 unsigned char *arp_ptr; 2021 unsigned char *sha; 2022 __be32 sip, tip; 2023 2024 if (!dev) 2025 return; 2026 2027 skb_reset_network_header(skb); 2028 arp = arp_hdr(skb); 2029 2030 if (arp->ar_op != htons(ARPOP_REQUEST)) 2031 return; 2032 2033 arp_ptr = (unsigned char *)(arp + 1); 2034 sha = arp_ptr; 2035 arp_ptr += dev->addr_len; 2036 memcpy(&sip, arp_ptr, sizeof(sip)); 2037 arp_ptr += sizeof(sip); 2038 arp_ptr += dev->addr_len; 2039 memcpy(&tip, arp_ptr, sizeof(tip)); 2040 2041 if (tip != pi->iscsi_ipv4addr) 2042 return; 2043 2044 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 2045 pi->iscsic.mac_addr, sha); 2046 2047 } 2048 2049 static inline int is_arp(struct sk_buff *skb) 2050 { 2051 return skb->protocol == htons(ETH_P_ARP); 2052 } 2053 2054 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, 2055 struct sk_buff *skb) 2056 { 2057 if (is_arp(skb)) { 2058 cxgb3_arp_process(pi, skb); 2059 return; 2060 } 2061 2062 if (pi->iscsic.recv) 2063 pi->iscsic.recv(pi, skb); 2064 2065 } 2066 2067 /** 2068 * rx_eth - process an ingress ethernet packet 2069 * @adap: the adapter 2070 * @rq: the response queue that received the packet 2071 * @skb: the packet 2072 * @pad: amount of padding at the start of the buffer 2073 * 2074 * Process an ingress ethernet pakcet and deliver it to the stack. 2075 * The padding is 2 if the packet was delivered in an Rx buffer and 0 2076 * if it was immediate data in a response. 2077 */ 2078 static void rx_eth(struct adapter *adap, struct sge_rspq *rq, 2079 struct sk_buff *skb, int pad, int lro) 2080 { 2081 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 2082 struct sge_qset *qs = rspq_to_qset(rq); 2083 struct port_info *pi; 2084 2085 skb_pull(skb, sizeof(*p) + pad); 2086 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2087 pi = netdev_priv(skb->dev); 2088 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && 2089 p->csum == htons(0xffff) && !p->fragment) { 2090 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2091 skb->ip_summed = CHECKSUM_UNNECESSARY; 2092 } else 2093 skb_checksum_none_assert(skb); 2094 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); 2095 2096 if (p->vlan_valid) { 2097 qs->port_stats[SGE_PSTAT_VLANEX]++; 2098 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); 2099 } 2100 if (rq->polling) { 2101 if (lro) 2102 napi_gro_receive(&qs->napi, skb); 2103 else { 2104 if (unlikely(pi->iscsic.flags)) 2105 cxgb3_process_iscsi_prov_pack(pi, skb); 2106 netif_receive_skb(skb); 2107 } 2108 } else 2109 netif_rx(skb); 2110 } 2111 2112 static inline int is_eth_tcp(u32 rss) 2113 { 2114 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; 2115 } 2116 2117 /** 2118 * lro_add_page - add a page chunk to an LRO session 2119 * @adap: the adapter 2120 * @qs: the associated queue set 2121 * @fl: the free list containing the page chunk to add 2122 * @len: packet length 2123 * @complete: Indicates the last fragment of a frame 2124 * 2125 * Add a received packet contained in a page chunk to an existing LRO 2126 * session. 2127 */ 2128 static void lro_add_page(struct adapter *adap, struct sge_qset *qs, 2129 struct sge_fl *fl, int len, int complete) 2130 { 2131 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2132 struct port_info *pi = netdev_priv(qs->netdev); 2133 struct sk_buff *skb = NULL; 2134 struct cpl_rx_pkt *cpl; 2135 struct skb_frag_struct *rx_frag; 2136 int nr_frags; 2137 int offset = 0; 2138 2139 if (!qs->nomem) { 2140 skb = napi_get_frags(&qs->napi); 2141 qs->nomem = !skb; 2142 } 2143 2144 fl->credits--; 2145 2146 pci_dma_sync_single_for_cpu(adap->pdev, 2147 dma_unmap_addr(sd, dma_addr), 2148 fl->buf_size - SGE_PG_RSVD, 2149 PCI_DMA_FROMDEVICE); 2150 2151 (*sd->pg_chunk.p_cnt)--; 2152 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) 2153 pci_unmap_page(adap->pdev, 2154 sd->pg_chunk.mapping, 2155 fl->alloc_size, 2156 PCI_DMA_FROMDEVICE); 2157 2158 if (!skb) { 2159 put_page(sd->pg_chunk.page); 2160 if (complete) 2161 qs->nomem = 0; 2162 return; 2163 } 2164 2165 rx_frag = skb_shinfo(skb)->frags; 2166 nr_frags = skb_shinfo(skb)->nr_frags; 2167 2168 if (!nr_frags) { 2169 offset = 2 + sizeof(struct cpl_rx_pkt); 2170 cpl = qs->lro_va = sd->pg_chunk.va + 2; 2171 2172 if ((qs->netdev->features & NETIF_F_RXCSUM) && 2173 cpl->csum_valid && cpl->csum == htons(0xffff)) { 2174 skb->ip_summed = CHECKSUM_UNNECESSARY; 2175 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2176 } else 2177 skb->ip_summed = CHECKSUM_NONE; 2178 } else 2179 cpl = qs->lro_va; 2180 2181 len -= offset; 2182 2183 rx_frag += nr_frags; 2184 __skb_frag_set_page(rx_frag, sd->pg_chunk.page); 2185 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2186 skb_frag_size_set(rx_frag, len); 2187 2188 skb->len += len; 2189 skb->data_len += len; 2190 skb->truesize += len; 2191 skb_shinfo(skb)->nr_frags++; 2192 2193 if (!complete) 2194 return; 2195 2196 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); 2197 2198 if (cpl->vlan_valid) { 2199 qs->port_stats[SGE_PSTAT_VLANEX]++; 2200 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); 2201 } 2202 napi_gro_frags(&qs->napi); 2203 } 2204 2205 /** 2206 * handle_rsp_cntrl_info - handles control information in a response 2207 * @qs: the queue set corresponding to the response 2208 * @flags: the response control flags 2209 * 2210 * Handles the control information of an SGE response, such as GTS 2211 * indications and completion credits for the queue set's Tx queues. 2212 * HW coalesces credits, we don't do any extra SW coalescing. 2213 */ 2214 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) 2215 { 2216 unsigned int credits; 2217 2218 #if USE_GTS 2219 if (flags & F_RSPD_TXQ0_GTS) 2220 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); 2221 #endif 2222 2223 credits = G_RSPD_TXQ0_CR(flags); 2224 if (credits) 2225 qs->txq[TXQ_ETH].processed += credits; 2226 2227 credits = G_RSPD_TXQ2_CR(flags); 2228 if (credits) 2229 qs->txq[TXQ_CTRL].processed += credits; 2230 2231 # if USE_GTS 2232 if (flags & F_RSPD_TXQ1_GTS) 2233 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); 2234 # endif 2235 credits = G_RSPD_TXQ1_CR(flags); 2236 if (credits) 2237 qs->txq[TXQ_OFLD].processed += credits; 2238 } 2239 2240 /** 2241 * check_ring_db - check if we need to ring any doorbells 2242 * @adapter: the adapter 2243 * @qs: the queue set whose Tx queues are to be examined 2244 * @sleeping: indicates which Tx queue sent GTS 2245 * 2246 * Checks if some of a queue set's Tx queues need to ring their doorbells 2247 * to resume transmission after idling while they still have unprocessed 2248 * descriptors. 2249 */ 2250 static void check_ring_db(struct adapter *adap, struct sge_qset *qs, 2251 unsigned int sleeping) 2252 { 2253 if (sleeping & F_RSPD_TXQ0_GTS) { 2254 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 2255 2256 if (txq->cleaned + txq->in_use != txq->processed && 2257 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { 2258 set_bit(TXQ_RUNNING, &txq->flags); 2259 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | 2260 V_EGRCNTX(txq->cntxt_id)); 2261 } 2262 } 2263 2264 if (sleeping & F_RSPD_TXQ1_GTS) { 2265 struct sge_txq *txq = &qs->txq[TXQ_OFLD]; 2266 2267 if (txq->cleaned + txq->in_use != txq->processed && 2268 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { 2269 set_bit(TXQ_RUNNING, &txq->flags); 2270 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | 2271 V_EGRCNTX(txq->cntxt_id)); 2272 } 2273 } 2274 } 2275 2276 /** 2277 * is_new_response - check if a response is newly written 2278 * @r: the response descriptor 2279 * @q: the response queue 2280 * 2281 * Returns true if a response descriptor contains a yet unprocessed 2282 * response. 2283 */ 2284 static inline int is_new_response(const struct rsp_desc *r, 2285 const struct sge_rspq *q) 2286 { 2287 return (r->intr_gen & F_RSPD_GEN2) == q->gen; 2288 } 2289 2290 static inline void clear_rspq_bufstate(struct sge_rspq * const q) 2291 { 2292 q->pg_skb = NULL; 2293 q->rx_recycle_buf = 0; 2294 } 2295 2296 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) 2297 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ 2298 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ 2299 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ 2300 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) 2301 2302 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ 2303 #define NOMEM_INTR_DELAY 2500 2304 2305 /** 2306 * process_responses - process responses from an SGE response queue 2307 * @adap: the adapter 2308 * @qs: the queue set to which the response queue belongs 2309 * @budget: how many responses can be processed in this round 2310 * 2311 * Process responses from an SGE response queue up to the supplied budget. 2312 * Responses include received packets as well as credits and other events 2313 * for the queues that belong to the response queue's queue set. 2314 * A negative budget is effectively unlimited. 2315 * 2316 * Additionally choose the interrupt holdoff time for the next interrupt 2317 * on this queue. If the system is under memory shortage use a fairly 2318 * long delay to help recovery. 2319 */ 2320 static int process_responses(struct adapter *adap, struct sge_qset *qs, 2321 int budget) 2322 { 2323 struct sge_rspq *q = &qs->rspq; 2324 struct rsp_desc *r = &q->desc[q->cidx]; 2325 int budget_left = budget; 2326 unsigned int sleeping = 0; 2327 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; 2328 int ngathered = 0; 2329 2330 q->next_holdoff = q->holdoff_tmr; 2331 2332 while (likely(budget_left && is_new_response(r, q))) { 2333 int packet_complete, eth, ethpad = 2; 2334 int lro = !!(qs->netdev->features & NETIF_F_GRO); 2335 struct sk_buff *skb = NULL; 2336 u32 len, flags; 2337 __be32 rss_hi, rss_lo; 2338 2339 dma_rmb(); 2340 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2341 rss_hi = *(const __be32 *)r; 2342 rss_lo = r->rss_hdr.rss_hash_val; 2343 flags = ntohl(r->flags); 2344 2345 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { 2346 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); 2347 if (!skb) 2348 goto no_mem; 2349 2350 __skb_put_data(skb, r, AN_PKT_SIZE); 2351 skb->data[0] = CPL_ASYNC_NOTIF; 2352 rss_hi = htonl(CPL_ASYNC_NOTIF << 24); 2353 q->async_notif++; 2354 } else if (flags & F_RSPD_IMM_DATA_VALID) { 2355 skb = get_imm_packet(r); 2356 if (unlikely(!skb)) { 2357 no_mem: 2358 q->next_holdoff = NOMEM_INTR_DELAY; 2359 q->nomem++; 2360 /* consume one credit since we tried */ 2361 budget_left--; 2362 break; 2363 } 2364 q->imm_data++; 2365 ethpad = 0; 2366 } else if ((len = ntohl(r->len_cq)) != 0) { 2367 struct sge_fl *fl; 2368 2369 lro &= eth && is_eth_tcp(rss_hi); 2370 2371 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2372 if (fl->use_pages) { 2373 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2374 2375 prefetch(addr); 2376 #if L1_CACHE_BYTES < 128 2377 prefetch(addr + L1_CACHE_BYTES); 2378 #endif 2379 __refill_fl(adap, fl); 2380 if (lro > 0) { 2381 lro_add_page(adap, qs, fl, 2382 G_RSPD_LEN(len), 2383 flags & F_RSPD_EOP); 2384 goto next_fl; 2385 } 2386 2387 skb = get_packet_pg(adap, fl, q, 2388 G_RSPD_LEN(len), 2389 eth ? 2390 SGE_RX_DROP_THRES : 0); 2391 q->pg_skb = skb; 2392 } else 2393 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2394 eth ? SGE_RX_DROP_THRES : 0); 2395 if (unlikely(!skb)) { 2396 if (!eth) 2397 goto no_mem; 2398 q->rx_drops++; 2399 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) 2400 __skb_pull(skb, 2); 2401 next_fl: 2402 if (++fl->cidx == fl->size) 2403 fl->cidx = 0; 2404 } else 2405 q->pure_rsps++; 2406 2407 if (flags & RSPD_CTRL_MASK) { 2408 sleeping |= flags & RSPD_GTS_MASK; 2409 handle_rsp_cntrl_info(qs, flags); 2410 } 2411 2412 r++; 2413 if (unlikely(++q->cidx == q->size)) { 2414 q->cidx = 0; 2415 q->gen ^= 1; 2416 r = q->desc; 2417 } 2418 prefetch(r); 2419 2420 if (++q->credits >= (q->size / 4)) { 2421 refill_rspq(adap, q, q->credits); 2422 q->credits = 0; 2423 } 2424 2425 packet_complete = flags & 2426 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | 2427 F_RSPD_ASYNC_NOTIF); 2428 2429 if (skb != NULL && packet_complete) { 2430 if (eth) 2431 rx_eth(adap, q, skb, ethpad, lro); 2432 else { 2433 q->offload_pkts++; 2434 /* Preserve the RSS info in csum & priority */ 2435 skb->csum = rss_hi; 2436 skb->priority = rss_lo; 2437 ngathered = rx_offload(&adap->tdev, q, skb, 2438 offload_skbs, 2439 ngathered); 2440 } 2441 2442 if (flags & F_RSPD_EOP) 2443 clear_rspq_bufstate(q); 2444 } 2445 --budget_left; 2446 } 2447 2448 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2449 2450 if (sleeping) 2451 check_ring_db(adap, qs, sleeping); 2452 2453 smp_mb(); /* commit Tx queue .processed updates */ 2454 if (unlikely(qs->txq_stopped != 0)) 2455 restart_tx(qs); 2456 2457 budget -= budget_left; 2458 return budget; 2459 } 2460 2461 static inline int is_pure_response(const struct rsp_desc *r) 2462 { 2463 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); 2464 2465 return (n | r->len_cq) == 0; 2466 } 2467 2468 /** 2469 * napi_rx_handler - the NAPI handler for Rx processing 2470 * @napi: the napi instance 2471 * @budget: how many packets we can process in this round 2472 * 2473 * Handler for new data events when using NAPI. 2474 */ 2475 static int napi_rx_handler(struct napi_struct *napi, int budget) 2476 { 2477 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); 2478 struct adapter *adap = qs->adap; 2479 int work_done = process_responses(adap, qs, budget); 2480 2481 if (likely(work_done < budget)) { 2482 napi_complete_done(napi, work_done); 2483 2484 /* 2485 * Because we don't atomically flush the following 2486 * write it is possible that in very rare cases it can 2487 * reach the device in a way that races with a new 2488 * response being written plus an error interrupt 2489 * causing the NAPI interrupt handler below to return 2490 * unhandled status to the OS. To protect against 2491 * this would require flushing the write and doing 2492 * both the write and the flush with interrupts off. 2493 * Way too expensive and unjustifiable given the 2494 * rarity of the race. 2495 * 2496 * The race cannot happen at all with MSI-X. 2497 */ 2498 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | 2499 V_NEWTIMER(qs->rspq.next_holdoff) | 2500 V_NEWINDEX(qs->rspq.cidx)); 2501 } 2502 return work_done; 2503 } 2504 2505 /* 2506 * Returns true if the device is already scheduled for polling. 2507 */ 2508 static inline int napi_is_scheduled(struct napi_struct *napi) 2509 { 2510 return test_bit(NAPI_STATE_SCHED, &napi->state); 2511 } 2512 2513 /** 2514 * process_pure_responses - process pure responses from a response queue 2515 * @adap: the adapter 2516 * @qs: the queue set owning the response queue 2517 * @r: the first pure response to process 2518 * 2519 * A simpler version of process_responses() that handles only pure (i.e., 2520 * non data-carrying) responses. Such respones are too light-weight to 2521 * justify calling a softirq under NAPI, so we handle them specially in 2522 * the interrupt handler. The function is called with a pointer to a 2523 * response, which the caller must ensure is a valid pure response. 2524 * 2525 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. 2526 */ 2527 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, 2528 struct rsp_desc *r) 2529 { 2530 struct sge_rspq *q = &qs->rspq; 2531 unsigned int sleeping = 0; 2532 2533 do { 2534 u32 flags = ntohl(r->flags); 2535 2536 r++; 2537 if (unlikely(++q->cidx == q->size)) { 2538 q->cidx = 0; 2539 q->gen ^= 1; 2540 r = q->desc; 2541 } 2542 prefetch(r); 2543 2544 if (flags & RSPD_CTRL_MASK) { 2545 sleeping |= flags & RSPD_GTS_MASK; 2546 handle_rsp_cntrl_info(qs, flags); 2547 } 2548 2549 q->pure_rsps++; 2550 if (++q->credits >= (q->size / 4)) { 2551 refill_rspq(adap, q, q->credits); 2552 q->credits = 0; 2553 } 2554 if (!is_new_response(r, q)) 2555 break; 2556 dma_rmb(); 2557 } while (is_pure_response(r)); 2558 2559 if (sleeping) 2560 check_ring_db(adap, qs, sleeping); 2561 2562 smp_mb(); /* commit Tx queue .processed updates */ 2563 if (unlikely(qs->txq_stopped != 0)) 2564 restart_tx(qs); 2565 2566 return is_new_response(r, q); 2567 } 2568 2569 /** 2570 * handle_responses - decide what to do with new responses in NAPI mode 2571 * @adap: the adapter 2572 * @q: the response queue 2573 * 2574 * This is used by the NAPI interrupt handlers to decide what to do with 2575 * new SGE responses. If there are no new responses it returns -1. If 2576 * there are new responses and they are pure (i.e., non-data carrying) 2577 * it handles them straight in hard interrupt context as they are very 2578 * cheap and don't deliver any packets. Finally, if there are any data 2579 * signaling responses it schedules the NAPI handler. Returns 1 if it 2580 * schedules NAPI, 0 if all new responses were pure. 2581 * 2582 * The caller must ascertain NAPI is not already running. 2583 */ 2584 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) 2585 { 2586 struct sge_qset *qs = rspq_to_qset(q); 2587 struct rsp_desc *r = &q->desc[q->cidx]; 2588 2589 if (!is_new_response(r, q)) 2590 return -1; 2591 dma_rmb(); 2592 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { 2593 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2594 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); 2595 return 0; 2596 } 2597 napi_schedule(&qs->napi); 2598 return 1; 2599 } 2600 2601 /* 2602 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case 2603 * (i.e., response queue serviced in hard interrupt). 2604 */ 2605 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) 2606 { 2607 struct sge_qset *qs = cookie; 2608 struct adapter *adap = qs->adap; 2609 struct sge_rspq *q = &qs->rspq; 2610 2611 spin_lock(&q->lock); 2612 if (process_responses(adap, qs, -1) == 0) 2613 q->unhandled_irqs++; 2614 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2615 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); 2616 spin_unlock(&q->lock); 2617 return IRQ_HANDLED; 2618 } 2619 2620 /* 2621 * The MSI-X interrupt handler for an SGE response queue for the NAPI case 2622 * (i.e., response queue serviced by NAPI polling). 2623 */ 2624 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) 2625 { 2626 struct sge_qset *qs = cookie; 2627 struct sge_rspq *q = &qs->rspq; 2628 2629 spin_lock(&q->lock); 2630 2631 if (handle_responses(qs->adap, q) < 0) 2632 q->unhandled_irqs++; 2633 spin_unlock(&q->lock); 2634 return IRQ_HANDLED; 2635 } 2636 2637 /* 2638 * The non-NAPI MSI interrupt handler. This needs to handle data events from 2639 * SGE response queues as well as error and other async events as they all use 2640 * the same MSI vector. We use one SGE response queue per port in this mode 2641 * and protect all response queues with queue 0's lock. 2642 */ 2643 static irqreturn_t t3_intr_msi(int irq, void *cookie) 2644 { 2645 int new_packets = 0; 2646 struct adapter *adap = cookie; 2647 struct sge_rspq *q = &adap->sge.qs[0].rspq; 2648 2649 spin_lock(&q->lock); 2650 2651 if (process_responses(adap, &adap->sge.qs[0], -1)) { 2652 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2653 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); 2654 new_packets = 1; 2655 } 2656 2657 if (adap->params.nports == 2 && 2658 process_responses(adap, &adap->sge.qs[1], -1)) { 2659 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; 2660 2661 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | 2662 V_NEWTIMER(q1->next_holdoff) | 2663 V_NEWINDEX(q1->cidx)); 2664 new_packets = 1; 2665 } 2666 2667 if (!new_packets && t3_slow_intr_handler(adap) == 0) 2668 q->unhandled_irqs++; 2669 2670 spin_unlock(&q->lock); 2671 return IRQ_HANDLED; 2672 } 2673 2674 static int rspq_check_napi(struct sge_qset *qs) 2675 { 2676 struct sge_rspq *q = &qs->rspq; 2677 2678 if (!napi_is_scheduled(&qs->napi) && 2679 is_new_response(&q->desc[q->cidx], q)) { 2680 napi_schedule(&qs->napi); 2681 return 1; 2682 } 2683 return 0; 2684 } 2685 2686 /* 2687 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced 2688 * by NAPI polling). Handles data events from SGE response queues as well as 2689 * error and other async events as they all use the same MSI vector. We use 2690 * one SGE response queue per port in this mode and protect all response 2691 * queues with queue 0's lock. 2692 */ 2693 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) 2694 { 2695 int new_packets; 2696 struct adapter *adap = cookie; 2697 struct sge_rspq *q = &adap->sge.qs[0].rspq; 2698 2699 spin_lock(&q->lock); 2700 2701 new_packets = rspq_check_napi(&adap->sge.qs[0]); 2702 if (adap->params.nports == 2) 2703 new_packets += rspq_check_napi(&adap->sge.qs[1]); 2704 if (!new_packets && t3_slow_intr_handler(adap) == 0) 2705 q->unhandled_irqs++; 2706 2707 spin_unlock(&q->lock); 2708 return IRQ_HANDLED; 2709 } 2710 2711 /* 2712 * A helper function that processes responses and issues GTS. 2713 */ 2714 static inline int process_responses_gts(struct adapter *adap, 2715 struct sge_rspq *rq) 2716 { 2717 int work; 2718 2719 work = process_responses(adap, rspq_to_qset(rq), -1); 2720 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | 2721 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); 2722 return work; 2723 } 2724 2725 /* 2726 * The legacy INTx interrupt handler. This needs to handle data events from 2727 * SGE response queues as well as error and other async events as they all use 2728 * the same interrupt pin. We use one SGE response queue per port in this mode 2729 * and protect all response queues with queue 0's lock. 2730 */ 2731 static irqreturn_t t3_intr(int irq, void *cookie) 2732 { 2733 int work_done, w0, w1; 2734 struct adapter *adap = cookie; 2735 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 2736 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; 2737 2738 spin_lock(&q0->lock); 2739 2740 w0 = is_new_response(&q0->desc[q0->cidx], q0); 2741 w1 = adap->params.nports == 2 && 2742 is_new_response(&q1->desc[q1->cidx], q1); 2743 2744 if (likely(w0 | w1)) { 2745 t3_write_reg(adap, A_PL_CLI, 0); 2746 t3_read_reg(adap, A_PL_CLI); /* flush */ 2747 2748 if (likely(w0)) 2749 process_responses_gts(adap, q0); 2750 2751 if (w1) 2752 process_responses_gts(adap, q1); 2753 2754 work_done = w0 | w1; 2755 } else 2756 work_done = t3_slow_intr_handler(adap); 2757 2758 spin_unlock(&q0->lock); 2759 return IRQ_RETVAL(work_done != 0); 2760 } 2761 2762 /* 2763 * Interrupt handler for legacy INTx interrupts for T3B-based cards. 2764 * Handles data events from SGE response queues as well as error and other 2765 * async events as they all use the same interrupt pin. We use one SGE 2766 * response queue per port in this mode and protect all response queues with 2767 * queue 0's lock. 2768 */ 2769 static irqreturn_t t3b_intr(int irq, void *cookie) 2770 { 2771 u32 map; 2772 struct adapter *adap = cookie; 2773 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 2774 2775 t3_write_reg(adap, A_PL_CLI, 0); 2776 map = t3_read_reg(adap, A_SG_DATA_INTR); 2777 2778 if (unlikely(!map)) /* shared interrupt, most likely */ 2779 return IRQ_NONE; 2780 2781 spin_lock(&q0->lock); 2782 2783 if (unlikely(map & F_ERRINTR)) 2784 t3_slow_intr_handler(adap); 2785 2786 if (likely(map & 1)) 2787 process_responses_gts(adap, q0); 2788 2789 if (map & 2) 2790 process_responses_gts(adap, &adap->sge.qs[1].rspq); 2791 2792 spin_unlock(&q0->lock); 2793 return IRQ_HANDLED; 2794 } 2795 2796 /* 2797 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. 2798 * Handles data events from SGE response queues as well as error and other 2799 * async events as they all use the same interrupt pin. We use one SGE 2800 * response queue per port in this mode and protect all response queues with 2801 * queue 0's lock. 2802 */ 2803 static irqreturn_t t3b_intr_napi(int irq, void *cookie) 2804 { 2805 u32 map; 2806 struct adapter *adap = cookie; 2807 struct sge_qset *qs0 = &adap->sge.qs[0]; 2808 struct sge_rspq *q0 = &qs0->rspq; 2809 2810 t3_write_reg(adap, A_PL_CLI, 0); 2811 map = t3_read_reg(adap, A_SG_DATA_INTR); 2812 2813 if (unlikely(!map)) /* shared interrupt, most likely */ 2814 return IRQ_NONE; 2815 2816 spin_lock(&q0->lock); 2817 2818 if (unlikely(map & F_ERRINTR)) 2819 t3_slow_intr_handler(adap); 2820 2821 if (likely(map & 1)) 2822 napi_schedule(&qs0->napi); 2823 2824 if (map & 2) 2825 napi_schedule(&adap->sge.qs[1].napi); 2826 2827 spin_unlock(&q0->lock); 2828 return IRQ_HANDLED; 2829 } 2830 2831 /** 2832 * t3_intr_handler - select the top-level interrupt handler 2833 * @adap: the adapter 2834 * @polling: whether using NAPI to service response queues 2835 * 2836 * Selects the top-level interrupt handler based on the type of interrupts 2837 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the 2838 * response queues. 2839 */ 2840 irq_handler_t t3_intr_handler(struct adapter *adap, int polling) 2841 { 2842 if (adap->flags & USING_MSIX) 2843 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; 2844 if (adap->flags & USING_MSI) 2845 return polling ? t3_intr_msi_napi : t3_intr_msi; 2846 if (adap->params.rev > 0) 2847 return polling ? t3b_intr_napi : t3b_intr; 2848 return t3_intr; 2849 } 2850 2851 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ 2852 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 2853 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 2854 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 2855 F_HIRCQPARITYERROR) 2856 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) 2857 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ 2858 F_RSPQDISABLED) 2859 2860 /** 2861 * t3_sge_err_intr_handler - SGE async event interrupt handler 2862 * @adapter: the adapter 2863 * 2864 * Interrupt handler for SGE asynchronous (non-data) events. 2865 */ 2866 void t3_sge_err_intr_handler(struct adapter *adapter) 2867 { 2868 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & 2869 ~F_FLEMPTY; 2870 2871 if (status & SGE_PARERR) 2872 CH_ALERT(adapter, "SGE parity error (0x%x)\n", 2873 status & SGE_PARERR); 2874 if (status & SGE_FRAMINGERR) 2875 CH_ALERT(adapter, "SGE framing error (0x%x)\n", 2876 status & SGE_FRAMINGERR); 2877 2878 if (status & F_RSPQCREDITOVERFOW) 2879 CH_ALERT(adapter, "SGE response queue credit overflow\n"); 2880 2881 if (status & F_RSPQDISABLED) { 2882 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); 2883 2884 CH_ALERT(adapter, 2885 "packet delivered to disabled response queue " 2886 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); 2887 } 2888 2889 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2890 queue_work(cxgb3_wq, &adapter->db_drop_task); 2891 2892 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) 2893 queue_work(cxgb3_wq, &adapter->db_full_task); 2894 2895 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) 2896 queue_work(cxgb3_wq, &adapter->db_empty_task); 2897 2898 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2899 if (status & SGE_FATALERR) 2900 t3_fatal_err(adapter); 2901 } 2902 2903 /** 2904 * sge_timer_tx - perform periodic maintenance of an SGE qset 2905 * @data: the SGE queue set to maintain 2906 * 2907 * Runs periodically from a timer to perform maintenance of an SGE queue 2908 * set. It performs two tasks: 2909 * 2910 * Cleans up any completed Tx descriptors that may still be pending. 2911 * Normal descriptor cleanup happens when new packets are added to a Tx 2912 * queue so this timer is relatively infrequent and does any cleanup only 2913 * if the Tx queue has not seen any new packets in a while. We make a 2914 * best effort attempt to reclaim descriptors, in that we don't wait 2915 * around if we cannot get a queue's lock (which most likely is because 2916 * someone else is queueing new packets and so will also handle the clean 2917 * up). Since control queues use immediate data exclusively we don't 2918 * bother cleaning them up here. 2919 * 2920 */ 2921 static void sge_timer_tx(struct timer_list *t) 2922 { 2923 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer); 2924 struct port_info *pi = netdev_priv(qs->netdev); 2925 struct adapter *adap = pi->adapter; 2926 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; 2927 unsigned long next_period; 2928 2929 if (__netif_tx_trylock(qs->tx_q)) { 2930 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], 2931 TX_RECLAIM_TIMER_CHUNK); 2932 __netif_tx_unlock(qs->tx_q); 2933 } 2934 2935 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { 2936 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], 2937 TX_RECLAIM_TIMER_CHUNK); 2938 spin_unlock(&qs->txq[TXQ_OFLD].lock); 2939 } 2940 2941 next_period = TX_RECLAIM_PERIOD >> 2942 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / 2943 TX_RECLAIM_TIMER_CHUNK); 2944 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2945 } 2946 2947 /** 2948 * sge_timer_rx - perform periodic maintenance of an SGE qset 2949 * @data: the SGE queue set to maintain 2950 * 2951 * a) Replenishes Rx queues that have run out due to memory shortage. 2952 * Normally new Rx buffers are added when existing ones are consumed but 2953 * when out of memory a queue can become empty. We try to add only a few 2954 * buffers here, the queue will be replenished fully as these new buffers 2955 * are used up if memory shortage has subsided. 2956 * 2957 * b) Return coalesced response queue credits in case a response queue is 2958 * starved. 2959 * 2960 */ 2961 static void sge_timer_rx(struct timer_list *t) 2962 { 2963 spinlock_t *lock; 2964 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer); 2965 struct port_info *pi = netdev_priv(qs->netdev); 2966 struct adapter *adap = pi->adapter; 2967 u32 status; 2968 2969 lock = adap->params.rev > 0 ? 2970 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; 2971 2972 if (!spin_trylock_irq(lock)) 2973 goto out; 2974 2975 if (napi_is_scheduled(&qs->napi)) 2976 goto unlock; 2977 2978 if (adap->params.rev < 4) { 2979 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); 2980 2981 if (status & (1 << qs->rspq.cntxt_id)) { 2982 qs->rspq.starved++; 2983 if (qs->rspq.credits) { 2984 qs->rspq.credits--; 2985 refill_rspq(adap, &qs->rspq, 1); 2986 qs->rspq.restarted++; 2987 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, 2988 1 << qs->rspq.cntxt_id); 2989 } 2990 } 2991 } 2992 2993 if (qs->fl[0].credits < qs->fl[0].size) 2994 __refill_fl(adap, &qs->fl[0]); 2995 if (qs->fl[1].credits < qs->fl[1].size) 2996 __refill_fl(adap, &qs->fl[1]); 2997 2998 unlock: 2999 spin_unlock_irq(lock); 3000 out: 3001 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3002 } 3003 3004 /** 3005 * t3_update_qset_coalesce - update coalescing settings for a queue set 3006 * @qs: the SGE queue set 3007 * @p: new queue set parameters 3008 * 3009 * Update the coalescing settings for an SGE queue set. Nothing is done 3010 * if the queue set is not initialized yet. 3011 */ 3012 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) 3013 { 3014 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ 3015 qs->rspq.polling = p->polling; 3016 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; 3017 } 3018 3019 /** 3020 * t3_sge_alloc_qset - initialize an SGE queue set 3021 * @adapter: the adapter 3022 * @id: the queue set id 3023 * @nports: how many Ethernet ports will be using this queue set 3024 * @irq_vec_idx: the IRQ vector index for response queue interrupts 3025 * @p: configuration parameters for this queue set 3026 * @ntxq: number of Tx queues for the queue set 3027 * @netdev: net device associated with this queue set 3028 * @netdevq: net device TX queue associated with this queue set 3029 * 3030 * Allocate resources and initialize an SGE queue set. A queue set 3031 * comprises a response queue, two Rx free-buffer queues, and up to 3 3032 * Tx queues. The Tx queues are assigned roles in the order Ethernet 3033 * queue, offload queue, and control queue. 3034 */ 3035 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, 3036 int irq_vec_idx, const struct qset_params *p, 3037 int ntxq, struct net_device *dev, 3038 struct netdev_queue *netdevq) 3039 { 3040 int i, avail, ret = -ENOMEM; 3041 struct sge_qset *q = &adapter->sge.qs[id]; 3042 3043 init_qset_cntxt(q, id); 3044 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); 3045 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); 3046 3047 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, 3048 sizeof(struct rx_desc), 3049 sizeof(struct rx_sw_desc), 3050 &q->fl[0].phys_addr, &q->fl[0].sdesc); 3051 if (!q->fl[0].desc) 3052 goto err; 3053 3054 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, 3055 sizeof(struct rx_desc), 3056 sizeof(struct rx_sw_desc), 3057 &q->fl[1].phys_addr, &q->fl[1].sdesc); 3058 if (!q->fl[1].desc) 3059 goto err; 3060 3061 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, 3062 sizeof(struct rsp_desc), 0, 3063 &q->rspq.phys_addr, NULL); 3064 if (!q->rspq.desc) 3065 goto err; 3066 3067 for (i = 0; i < ntxq; ++i) { 3068 /* 3069 * The control queue always uses immediate data so does not 3070 * need to keep track of any sk_buffs. 3071 */ 3072 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); 3073 3074 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], 3075 sizeof(struct tx_desc), sz, 3076 &q->txq[i].phys_addr, 3077 &q->txq[i].sdesc); 3078 if (!q->txq[i].desc) 3079 goto err; 3080 3081 q->txq[i].gen = 1; 3082 q->txq[i].size = p->txq_size[i]; 3083 spin_lock_init(&q->txq[i].lock); 3084 skb_queue_head_init(&q->txq[i].sendq); 3085 } 3086 3087 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, 3088 (unsigned long)q); 3089 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, 3090 (unsigned long)q); 3091 3092 q->fl[0].gen = q->fl[1].gen = 1; 3093 q->fl[0].size = p->fl_size; 3094 q->fl[1].size = p->jumbo_size; 3095 3096 q->rspq.gen = 1; 3097 q->rspq.size = p->rspq_size; 3098 spin_lock_init(&q->rspq.lock); 3099 skb_queue_head_init(&q->rspq.rx_queue); 3100 3101 q->txq[TXQ_ETH].stop_thres = nports * 3102 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 3103 3104 #if FL0_PG_CHUNK_SIZE > 0 3105 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; 3106 #else 3107 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); 3108 #endif 3109 #if FL1_PG_CHUNK_SIZE > 0 3110 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; 3111 #else 3112 q->fl[1].buf_size = is_offload(adapter) ? 3113 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 3114 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); 3115 #endif 3116 3117 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; 3118 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; 3119 q->fl[0].order = FL0_PG_ORDER; 3120 q->fl[1].order = FL1_PG_ORDER; 3121 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; 3122 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; 3123 3124 spin_lock_irq(&adapter->sge.reg_lock); 3125 3126 /* FL threshold comparison uses < */ 3127 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 3128 q->rspq.phys_addr, q->rspq.size, 3129 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); 3130 if (ret) 3131 goto err_unlock; 3132 3133 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 3134 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, 3135 q->fl[i].phys_addr, q->fl[i].size, 3136 q->fl[i].buf_size - SGE_PG_RSVD, 3137 p->cong_thres, 1, 0); 3138 if (ret) 3139 goto err_unlock; 3140 } 3141 3142 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, 3143 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, 3144 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, 3145 1, 0); 3146 if (ret) 3147 goto err_unlock; 3148 3149 if (ntxq > 1) { 3150 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, 3151 USE_GTS, SGE_CNTXT_OFLD, id, 3152 q->txq[TXQ_OFLD].phys_addr, 3153 q->txq[TXQ_OFLD].size, 0, 1, 0); 3154 if (ret) 3155 goto err_unlock; 3156 } 3157 3158 if (ntxq > 2) { 3159 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, 3160 SGE_CNTXT_CTRL, id, 3161 q->txq[TXQ_CTRL].phys_addr, 3162 q->txq[TXQ_CTRL].size, 3163 q->txq[TXQ_CTRL].token, 1, 0); 3164 if (ret) 3165 goto err_unlock; 3166 } 3167 3168 spin_unlock_irq(&adapter->sge.reg_lock); 3169 3170 q->adap = adapter; 3171 q->netdev = dev; 3172 q->tx_q = netdevq; 3173 t3_update_qset_coalesce(q, p); 3174 3175 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 3176 GFP_KERNEL | __GFP_COMP); 3177 if (!avail) { 3178 CH_ALERT(adapter, "free list queue 0 initialization failed\n"); 3179 goto err; 3180 } 3181 if (avail < q->fl[0].size) 3182 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", 3183 avail); 3184 3185 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, 3186 GFP_KERNEL | __GFP_COMP); 3187 if (avail < q->fl[1].size) 3188 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", 3189 avail); 3190 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); 3191 3192 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3193 V_NEWTIMER(q->rspq.holdoff_tmr)); 3194 3195 return 0; 3196 3197 err_unlock: 3198 spin_unlock_irq(&adapter->sge.reg_lock); 3199 err: 3200 t3_free_qset(adapter, q); 3201 return ret; 3202 } 3203 3204 /** 3205 * t3_start_sge_timers - start SGE timer call backs 3206 * @adap: the adapter 3207 * 3208 * Starts each SGE queue set's timer call back 3209 */ 3210 void t3_start_sge_timers(struct adapter *adap) 3211 { 3212 int i; 3213 3214 for (i = 0; i < SGE_QSETS; ++i) { 3215 struct sge_qset *q = &adap->sge.qs[i]; 3216 3217 if (q->tx_reclaim_timer.function) 3218 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3219 3220 if (q->rx_reclaim_timer.function) 3221 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3222 } 3223 } 3224 3225 /** 3226 * t3_stop_sge_timers - stop SGE timer call backs 3227 * @adap: the adapter 3228 * 3229 * Stops each SGE queue set's timer call back 3230 */ 3231 void t3_stop_sge_timers(struct adapter *adap) 3232 { 3233 int i; 3234 3235 for (i = 0; i < SGE_QSETS; ++i) { 3236 struct sge_qset *q = &adap->sge.qs[i]; 3237 3238 if (q->tx_reclaim_timer.function) 3239 del_timer_sync(&q->tx_reclaim_timer); 3240 if (q->rx_reclaim_timer.function) 3241 del_timer_sync(&q->rx_reclaim_timer); 3242 } 3243 } 3244 3245 /** 3246 * t3_free_sge_resources - free SGE resources 3247 * @adap: the adapter 3248 * 3249 * Frees resources used by the SGE queue sets. 3250 */ 3251 void t3_free_sge_resources(struct adapter *adap) 3252 { 3253 int i; 3254 3255 for (i = 0; i < SGE_QSETS; ++i) 3256 t3_free_qset(adap, &adap->sge.qs[i]); 3257 } 3258 3259 /** 3260 * t3_sge_start - enable SGE 3261 * @adap: the adapter 3262 * 3263 * Enables the SGE for DMAs. This is the last step in starting packet 3264 * transfers. 3265 */ 3266 void t3_sge_start(struct adapter *adap) 3267 { 3268 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); 3269 } 3270 3271 /** 3272 * t3_sge_stop - disable SGE operation 3273 * @adap: the adapter 3274 * 3275 * Disables the DMA engine. This can be called in emeregencies (e.g., 3276 * from error interrupts) or from normal process context. In the latter 3277 * case it also disables any pending queue restart tasklets. Note that 3278 * if it is called in interrupt context it cannot disable the restart 3279 * tasklets as it cannot wait, however the tasklets will have no effect 3280 * since the doorbells are disabled and the driver will call this again 3281 * later from process context, at which time the tasklets will be stopped 3282 * if they are still running. 3283 */ 3284 void t3_sge_stop(struct adapter *adap) 3285 { 3286 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); 3287 if (!in_interrupt()) { 3288 int i; 3289 3290 for (i = 0; i < SGE_QSETS; ++i) { 3291 struct sge_qset *qs = &adap->sge.qs[i]; 3292 3293 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); 3294 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); 3295 } 3296 } 3297 } 3298 3299 /** 3300 * t3_sge_init - initialize SGE 3301 * @adap: the adapter 3302 * @p: the SGE parameters 3303 * 3304 * Performs SGE initialization needed every time after a chip reset. 3305 * We do not initialize any of the queue sets here, instead the driver 3306 * top-level must request those individually. We also do not enable DMA 3307 * here, that should be done after the queues have been set up. 3308 */ 3309 void t3_sge_init(struct adapter *adap, struct sge_params *p) 3310 { 3311 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); 3312 3313 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | 3314 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | 3315 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | 3316 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; 3317 #if SGE_NUM_GENBITS == 1 3318 ctrl |= F_EGRGENCTRL; 3319 #endif 3320 if (adap->params.rev > 0) { 3321 if (!(adap->flags & (USING_MSIX | USING_MSI))) 3322 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; 3323 } 3324 t3_write_reg(adap, A_SG_CONTROL, ctrl); 3325 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | 3326 V_LORCQDRBTHRSH(512)); 3327 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); 3328 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | 3329 V_TIMEOUT(200 * core_ticks_per_usec(adap))); 3330 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 3331 adap->params.rev < T3_REV_C ? 1000 : 500); 3332 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); 3333 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); 3334 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); 3335 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); 3336 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); 3337 } 3338 3339 /** 3340 * t3_sge_prep - one-time SGE initialization 3341 * @adap: the associated adapter 3342 * @p: SGE parameters 3343 * 3344 * Performs one-time initialization of SGE SW state. Includes determining 3345 * defaults for the assorted SGE parameters, which admins can change until 3346 * they are used to initialize the SGE. 3347 */ 3348 void t3_sge_prep(struct adapter *adap, struct sge_params *p) 3349 { 3350 int i; 3351 3352 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - 3353 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3354 3355 for (i = 0; i < SGE_QSETS; ++i) { 3356 struct qset_params *q = p->qset + i; 3357 3358 q->polling = adap->params.rev > 0; 3359 q->coalesce_usecs = 5; 3360 q->rspq_size = 1024; 3361 q->fl_size = 1024; 3362 q->jumbo_size = 512; 3363 q->txq_size[TXQ_ETH] = 1024; 3364 q->txq_size[TXQ_OFLD] = 1024; 3365 q->txq_size[TXQ_CTRL] = 256; 3366 q->cong_thres = 0; 3367 } 3368 3369 spin_lock_init(&adap->sge.reg_lock); 3370 } 3371