1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/if_vlan.h> 39 #include <linux/ip.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/jiffies.h> 42 #include <linux/prefetch.h> 43 #include <linux/export.h> 44 #include <net/ipv6.h> 45 #include <net/tcp.h> 46 #include "cxgb4.h" 47 #include "t4_regs.h" 48 #include "t4_msg.h" 49 #include "t4fw_api.h" 50 51 /* 52 * Rx buffer size. We use largish buffers if possible but settle for single 53 * pages under memory shortage. 54 */ 55 #if PAGE_SHIFT >= 16 56 # define FL_PG_ORDER 0 57 #else 58 # define FL_PG_ORDER (16 - PAGE_SHIFT) 59 #endif 60 61 /* RX_PULL_LEN should be <= RX_COPY_THRES */ 62 #define RX_COPY_THRES 256 63 #define RX_PULL_LEN 128 64 65 /* 66 * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 67 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 68 */ 69 #define RX_PKT_SKB_LEN 512 70 71 /* 72 * Max number of Tx descriptors we clean up at a time. Should be modest as 73 * freeing skbs isn't cheap and it happens while holding locks. We just need 74 * to free packets faster than they arrive, we eventually catch up and keep 75 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. 76 */ 77 #define MAX_TX_RECLAIM 16 78 79 /* 80 * Max number of Rx buffers we replenish at a time. Again keep this modest, 81 * allocating buffers isn't cheap either. 82 */ 83 #define MAX_RX_REFILL 16U 84 85 /* 86 * Period of the Rx queue check timer. This timer is infrequent as it has 87 * something to do only when the system experiences severe memory shortage. 88 */ 89 #define RX_QCHECK_PERIOD (HZ / 2) 90 91 /* 92 * Period of the Tx queue check timer. 93 */ 94 #define TX_QCHECK_PERIOD (HZ / 2) 95 96 /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate 97 * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA 98 * State Machines in the same state for this amount of time (in HZ) then we'll 99 * issue a warning about a potential hang. We'll repeat the warning as the 100 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till 101 * the situation clears. If the situation clears, we'll note that as well. 102 */ 103 #define SGE_IDMA_WARN_THRESH (1 * HZ) 104 #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD) 105 106 /* 107 * Max number of Tx descriptors to be reclaimed by the Tx timer. 108 */ 109 #define MAX_TIMER_TX_RECLAIM 100 110 111 /* 112 * Timer index used when backing off due to memory shortage. 113 */ 114 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 115 116 /* 117 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will 118 * attempt to refill it. 119 */ 120 #define FL_STARVE_THRES 4 121 122 /* 123 * Suspend an Ethernet Tx queue with fewer available descriptors than this. 124 * This is the same as calc_tx_descs() for a TSO packet with 125 * nr_frags == MAX_SKB_FRAGS. 126 */ 127 #define ETHTXQ_STOP_THRES \ 128 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) 129 130 /* 131 * Suspension threshold for non-Ethernet Tx queues. We require enough room 132 * for a full sized WR. 133 */ 134 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 135 136 /* 137 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 138 * into a WR. 139 */ 140 #define MAX_IMM_TX_PKT_LEN 128 141 142 /* 143 * Max size of a WR sent through a control Tx queue. 144 */ 145 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 146 147 struct tx_sw_desc { /* SW state per Tx descriptor */ 148 struct sk_buff *skb; 149 struct ulptx_sgl *sgl; 150 }; 151 152 struct rx_sw_desc { /* SW state per Rx descriptor */ 153 struct page *page; 154 dma_addr_t dma_addr; 155 }; 156 157 /* 158 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 159 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 160 * We could easily support more but there doesn't seem to be much need for 161 * that ... 162 */ 163 #define FL_MTU_SMALL 1500 164 #define FL_MTU_LARGE 9000 165 166 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 167 unsigned int mtu) 168 { 169 struct sge *s = &adapter->sge; 170 171 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 172 } 173 174 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 175 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 176 177 /* 178 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 179 * these to specify the buffer size as an index into the SGE Free List Buffer 180 * Size register array. We also use bit 4, when the buffer has been unmapped 181 * for DMA, but this is of course never sent to the hardware and is only used 182 * to prevent double unmappings. All of the above requires that the Free List 183 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 184 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 185 * Free List Buffer alignment is 32 bytes, this works out for us ... 186 */ 187 enum { 188 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 189 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 190 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 191 192 /* 193 * XXX We shouldn't depend on being able to use these indices. 194 * XXX Especially when some other Master PF has initialized the 195 * XXX adapter or we use the Firmware Configuration File. We 196 * XXX should really search through the Host Buffer Size register 197 * XXX array for the appropriately sized buffer indices. 198 */ 199 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 200 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 201 202 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 203 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 204 }; 205 206 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 207 { 208 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 209 } 210 211 static inline bool is_buf_mapped(const struct rx_sw_desc *d) 212 { 213 return !(d->dma_addr & RX_UNMAPPED_BUF); 214 } 215 216 /** 217 * txq_avail - return the number of available slots in a Tx queue 218 * @q: the Tx queue 219 * 220 * Returns the number of descriptors in a Tx queue available to write new 221 * packets. 222 */ 223 static inline unsigned int txq_avail(const struct sge_txq *q) 224 { 225 return q->size - 1 - q->in_use; 226 } 227 228 /** 229 * fl_cap - return the capacity of a free-buffer list 230 * @fl: the FL 231 * 232 * Returns the capacity of a free-buffer list. The capacity is less than 233 * the size because one descriptor needs to be left unpopulated, otherwise 234 * HW will think the FL is empty. 235 */ 236 static inline unsigned int fl_cap(const struct sge_fl *fl) 237 { 238 return fl->size - 8; /* 1 descriptor = 8 buffers */ 239 } 240 241 static inline bool fl_starving(const struct sge_fl *fl) 242 { 243 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 244 } 245 246 static int map_skb(struct device *dev, const struct sk_buff *skb, 247 dma_addr_t *addr) 248 { 249 const skb_frag_t *fp, *end; 250 const struct skb_shared_info *si; 251 252 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 253 if (dma_mapping_error(dev, *addr)) 254 goto out_err; 255 256 si = skb_shinfo(skb); 257 end = &si->frags[si->nr_frags]; 258 259 for (fp = si->frags; fp < end; fp++) { 260 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 261 DMA_TO_DEVICE); 262 if (dma_mapping_error(dev, *addr)) 263 goto unwind; 264 } 265 return 0; 266 267 unwind: 268 while (fp-- > si->frags) 269 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 270 271 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 272 out_err: 273 return -ENOMEM; 274 } 275 276 #ifdef CONFIG_NEED_DMA_MAP_STATE 277 static void unmap_skb(struct device *dev, const struct sk_buff *skb, 278 const dma_addr_t *addr) 279 { 280 const skb_frag_t *fp, *end; 281 const struct skb_shared_info *si; 282 283 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 284 285 si = skb_shinfo(skb); 286 end = &si->frags[si->nr_frags]; 287 for (fp = si->frags; fp < end; fp++) 288 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 289 } 290 291 /** 292 * deferred_unmap_destructor - unmap a packet when it is freed 293 * @skb: the packet 294 * 295 * This is the packet destructor used for Tx packets that need to remain 296 * mapped until they are freed rather than until their Tx descriptors are 297 * freed. 298 */ 299 static void deferred_unmap_destructor(struct sk_buff *skb) 300 { 301 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 302 } 303 #endif 304 305 static void unmap_sgl(struct device *dev, const struct sk_buff *skb, 306 const struct ulptx_sgl *sgl, const struct sge_txq *q) 307 { 308 const struct ulptx_sge_pair *p; 309 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 310 311 if (likely(skb_headlen(skb))) 312 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 313 DMA_TO_DEVICE); 314 else { 315 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 316 DMA_TO_DEVICE); 317 nfrags--; 318 } 319 320 /* 321 * the complexity below is because of the possibility of a wrap-around 322 * in the middle of an SGL 323 */ 324 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { 325 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { 326 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 327 ntohl(p->len[0]), DMA_TO_DEVICE); 328 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), 329 ntohl(p->len[1]), DMA_TO_DEVICE); 330 p++; 331 } else if ((u8 *)p == (u8 *)q->stat) { 332 p = (const struct ulptx_sge_pair *)q->desc; 333 goto unmap; 334 } else if ((u8 *)p + 8 == (u8 *)q->stat) { 335 const __be64 *addr = (const __be64 *)q->desc; 336 337 dma_unmap_page(dev, be64_to_cpu(addr[0]), 338 ntohl(p->len[0]), DMA_TO_DEVICE); 339 dma_unmap_page(dev, be64_to_cpu(addr[1]), 340 ntohl(p->len[1]), DMA_TO_DEVICE); 341 p = (const struct ulptx_sge_pair *)&addr[2]; 342 } else { 343 const __be64 *addr = (const __be64 *)q->desc; 344 345 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 346 ntohl(p->len[0]), DMA_TO_DEVICE); 347 dma_unmap_page(dev, be64_to_cpu(addr[0]), 348 ntohl(p->len[1]), DMA_TO_DEVICE); 349 p = (const struct ulptx_sge_pair *)&addr[1]; 350 } 351 } 352 if (nfrags) { 353 __be64 addr; 354 355 if ((u8 *)p == (u8 *)q->stat) 356 p = (const struct ulptx_sge_pair *)q->desc; 357 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : 358 *(const __be64 *)q->desc; 359 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), 360 DMA_TO_DEVICE); 361 } 362 } 363 364 /** 365 * free_tx_desc - reclaims Tx descriptors and their buffers 366 * @adapter: the adapter 367 * @q: the Tx queue to reclaim descriptors from 368 * @n: the number of descriptors to reclaim 369 * @unmap: whether the buffers should be unmapped for DMA 370 * 371 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 372 * Tx buffers. Called with the Tx queue lock held. 373 */ 374 static void free_tx_desc(struct adapter *adap, struct sge_txq *q, 375 unsigned int n, bool unmap) 376 { 377 struct tx_sw_desc *d; 378 unsigned int cidx = q->cidx; 379 struct device *dev = adap->pdev_dev; 380 381 d = &q->sdesc[cidx]; 382 while (n--) { 383 if (d->skb) { /* an SGL is present */ 384 if (unmap) 385 unmap_sgl(dev, d->skb, d->sgl, q); 386 dev_consume_skb_any(d->skb); 387 d->skb = NULL; 388 } 389 ++d; 390 if (++cidx == q->size) { 391 cidx = 0; 392 d = q->sdesc; 393 } 394 } 395 q->cidx = cidx; 396 } 397 398 /* 399 * Return the number of reclaimable descriptors in a Tx queue. 400 */ 401 static inline int reclaimable(const struct sge_txq *q) 402 { 403 int hw_cidx = ntohs(q->stat->cidx); 404 hw_cidx -= q->cidx; 405 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 406 } 407 408 /** 409 * reclaim_completed_tx - reclaims completed Tx descriptors 410 * @adap: the adapter 411 * @q: the Tx queue to reclaim completed descriptors from 412 * @unmap: whether the buffers should be unmapped for DMA 413 * 414 * Reclaims Tx descriptors that the SGE has indicated it has processed, 415 * and frees the associated buffers if possible. Called with the Tx 416 * queue locked. 417 */ 418 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 419 bool unmap) 420 { 421 int avail = reclaimable(q); 422 423 if (avail) { 424 /* 425 * Limit the amount of clean up work we do at a time to keep 426 * the Tx lock hold time O(1). 427 */ 428 if (avail > MAX_TX_RECLAIM) 429 avail = MAX_TX_RECLAIM; 430 431 free_tx_desc(adap, q, avail, unmap); 432 q->in_use -= avail; 433 } 434 } 435 436 static inline int get_buf_size(struct adapter *adapter, 437 const struct rx_sw_desc *d) 438 { 439 struct sge *s = &adapter->sge; 440 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 441 int buf_size; 442 443 switch (rx_buf_size_idx) { 444 case RX_SMALL_PG_BUF: 445 buf_size = PAGE_SIZE; 446 break; 447 448 case RX_LARGE_PG_BUF: 449 buf_size = PAGE_SIZE << s->fl_pg_order; 450 break; 451 452 case RX_SMALL_MTU_BUF: 453 buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 454 break; 455 456 case RX_LARGE_MTU_BUF: 457 buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 458 break; 459 460 default: 461 BUG_ON(1); 462 } 463 464 return buf_size; 465 } 466 467 /** 468 * free_rx_bufs - free the Rx buffers on an SGE free list 469 * @adap: the adapter 470 * @q: the SGE free list to free buffers from 471 * @n: how many buffers to free 472 * 473 * Release the next @n buffers on an SGE free-buffer Rx queue. The 474 * buffers must be made inaccessible to HW before calling this function. 475 */ 476 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 477 { 478 while (n--) { 479 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 480 481 if (is_buf_mapped(d)) 482 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 483 get_buf_size(adap, d), 484 PCI_DMA_FROMDEVICE); 485 put_page(d->page); 486 d->page = NULL; 487 if (++q->cidx == q->size) 488 q->cidx = 0; 489 q->avail--; 490 } 491 } 492 493 /** 494 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 495 * @adap: the adapter 496 * @q: the SGE free list 497 * 498 * Unmap the current buffer on an SGE free-buffer Rx queue. The 499 * buffer must be made inaccessible to HW before calling this function. 500 * 501 * This is similar to @free_rx_bufs above but does not free the buffer. 502 * Do note that the FL still loses any further access to the buffer. 503 */ 504 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 505 { 506 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 507 508 if (is_buf_mapped(d)) 509 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 510 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); 511 d->page = NULL; 512 if (++q->cidx == q->size) 513 q->cidx = 0; 514 q->avail--; 515 } 516 517 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 518 { 519 u32 val; 520 if (q->pend_cred >= 8) { 521 val = PIDX(q->pend_cred / 8); 522 if (!is_t4(adap->params.chip)) 523 val |= DBTYPE(1); 524 wmb(); 525 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | 526 QID(q->cntxt_id) | val); 527 q->pend_cred &= 7; 528 } 529 } 530 531 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 532 dma_addr_t mapping) 533 { 534 sd->page = pg; 535 sd->dma_addr = mapping; /* includes size low bits */ 536 } 537 538 /** 539 * refill_fl - refill an SGE Rx buffer ring 540 * @adap: the adapter 541 * @q: the ring to refill 542 * @n: the number of new buffers to allocate 543 * @gfp: the gfp flags for the allocations 544 * 545 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 546 * allocated with the supplied gfp flags. The caller must assure that 547 * @n does not exceed the queue's capacity. If afterwards the queue is 548 * found critically low mark it as starving in the bitmap of starving FLs. 549 * 550 * Returns the number of buffers allocated. 551 */ 552 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 553 gfp_t gfp) 554 { 555 struct sge *s = &adap->sge; 556 struct page *pg; 557 dma_addr_t mapping; 558 unsigned int cred = q->avail; 559 __be64 *d = &q->desc[q->pidx]; 560 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 561 562 gfp |= __GFP_NOWARN | __GFP_COLD; 563 564 if (s->fl_pg_order == 0) 565 goto alloc_small_pages; 566 567 /* 568 * Prefer large buffers 569 */ 570 while (n) { 571 pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order); 572 if (unlikely(!pg)) { 573 q->large_alloc_failed++; 574 break; /* fall back to single pages */ 575 } 576 577 mapping = dma_map_page(adap->pdev_dev, pg, 0, 578 PAGE_SIZE << s->fl_pg_order, 579 PCI_DMA_FROMDEVICE); 580 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 581 __free_pages(pg, s->fl_pg_order); 582 goto out; /* do not try small pages for this error */ 583 } 584 mapping |= RX_LARGE_PG_BUF; 585 *d++ = cpu_to_be64(mapping); 586 587 set_rx_sw_desc(sd, pg, mapping); 588 sd++; 589 590 q->avail++; 591 if (++q->pidx == q->size) { 592 q->pidx = 0; 593 sd = q->sdesc; 594 d = q->desc; 595 } 596 n--; 597 } 598 599 alloc_small_pages: 600 while (n--) { 601 pg = __skb_alloc_page(gfp, NULL); 602 if (unlikely(!pg)) { 603 q->alloc_failed++; 604 break; 605 } 606 607 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 608 PCI_DMA_FROMDEVICE); 609 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 610 put_page(pg); 611 goto out; 612 } 613 *d++ = cpu_to_be64(mapping); 614 615 set_rx_sw_desc(sd, pg, mapping); 616 sd++; 617 618 q->avail++; 619 if (++q->pidx == q->size) { 620 q->pidx = 0; 621 sd = q->sdesc; 622 d = q->desc; 623 } 624 } 625 626 out: cred = q->avail - cred; 627 q->pend_cred += cred; 628 ring_fl_db(adap, q); 629 630 if (unlikely(fl_starving(q))) { 631 smp_wmb(); 632 set_bit(q->cntxt_id - adap->sge.egr_start, 633 adap->sge.starving_fl); 634 } 635 636 return cred; 637 } 638 639 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 640 { 641 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 642 GFP_ATOMIC); 643 } 644 645 /** 646 * alloc_ring - allocate resources for an SGE descriptor ring 647 * @dev: the PCI device's core device 648 * @nelem: the number of descriptors 649 * @elem_size: the size of each descriptor 650 * @sw_size: the size of the SW state associated with each ring element 651 * @phys: the physical address of the allocated ring 652 * @metadata: address of the array holding the SW state for the ring 653 * @stat_size: extra space in HW ring for status information 654 * @node: preferred node for memory allocations 655 * 656 * Allocates resources for an SGE descriptor ring, such as Tx queues, 657 * free buffer lists, or response queues. Each SGE ring requires 658 * space for its HW descriptors plus, optionally, space for the SW state 659 * associated with each HW entry (the metadata). The function returns 660 * three values: the virtual address for the HW ring (the return value 661 * of the function), the bus address of the HW ring, and the address 662 * of the SW ring. 663 */ 664 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 665 size_t sw_size, dma_addr_t *phys, void *metadata, 666 size_t stat_size, int node) 667 { 668 size_t len = nelem * elem_size + stat_size; 669 void *s = NULL; 670 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 671 672 if (!p) 673 return NULL; 674 if (sw_size) { 675 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); 676 677 if (!s) { 678 dma_free_coherent(dev, len, p, *phys); 679 return NULL; 680 } 681 } 682 if (metadata) 683 *(void **)metadata = s; 684 memset(p, 0, len); 685 return p; 686 } 687 688 /** 689 * sgl_len - calculates the size of an SGL of the given capacity 690 * @n: the number of SGL entries 691 * 692 * Calculates the number of flits needed for a scatter/gather list that 693 * can hold the given number of entries. 694 */ 695 static inline unsigned int sgl_len(unsigned int n) 696 { 697 n--; 698 return (3 * n) / 2 + (n & 1) + 2; 699 } 700 701 /** 702 * flits_to_desc - returns the num of Tx descriptors for the given flits 703 * @n: the number of flits 704 * 705 * Returns the number of Tx descriptors needed for the supplied number 706 * of flits. 707 */ 708 static inline unsigned int flits_to_desc(unsigned int n) 709 { 710 BUG_ON(n > SGE_MAX_WR_LEN / 8); 711 return DIV_ROUND_UP(n, 8); 712 } 713 714 /** 715 * is_eth_imm - can an Ethernet packet be sent as immediate data? 716 * @skb: the packet 717 * 718 * Returns whether an Ethernet packet is small enough to fit as 719 * immediate data. Return value corresponds to headroom required. 720 */ 721 static inline int is_eth_imm(const struct sk_buff *skb) 722 { 723 int hdrlen = skb_shinfo(skb)->gso_size ? 724 sizeof(struct cpl_tx_pkt_lso_core) : 0; 725 726 hdrlen += sizeof(struct cpl_tx_pkt); 727 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 728 return hdrlen; 729 return 0; 730 } 731 732 /** 733 * calc_tx_flits - calculate the number of flits for a packet Tx WR 734 * @skb: the packet 735 * 736 * Returns the number of flits needed for a Tx WR for the given Ethernet 737 * packet, including the needed WR and CPL headers. 738 */ 739 static inline unsigned int calc_tx_flits(const struct sk_buff *skb) 740 { 741 unsigned int flits; 742 int hdrlen = is_eth_imm(skb); 743 744 if (hdrlen) 745 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 746 747 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; 748 if (skb_shinfo(skb)->gso_size) 749 flits += 2; 750 return flits; 751 } 752 753 /** 754 * calc_tx_descs - calculate the number of Tx descriptors for a packet 755 * @skb: the packet 756 * 757 * Returns the number of Tx descriptors needed for the given Ethernet 758 * packet, including the needed WR and CPL headers. 759 */ 760 static inline unsigned int calc_tx_descs(const struct sk_buff *skb) 761 { 762 return flits_to_desc(calc_tx_flits(skb)); 763 } 764 765 /** 766 * write_sgl - populate a scatter/gather list for a packet 767 * @skb: the packet 768 * @q: the Tx queue we are writing into 769 * @sgl: starting location for writing the SGL 770 * @end: points right after the end of the SGL 771 * @start: start offset into skb main-body data to include in the SGL 772 * @addr: the list of bus addresses for the SGL elements 773 * 774 * Generates a gather list for the buffers that make up a packet. 775 * The caller must provide adequate space for the SGL that will be written. 776 * The SGL includes all of the packet's page fragments and the data in its 777 * main body except for the first @start bytes. @sgl must be 16-byte 778 * aligned and within a Tx descriptor with available space. @end points 779 * right after the end of the SGL but does not account for any potential 780 * wrap around, i.e., @end > @sgl. 781 */ 782 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, 783 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 784 const dma_addr_t *addr) 785 { 786 unsigned int i, len; 787 struct ulptx_sge_pair *to; 788 const struct skb_shared_info *si = skb_shinfo(skb); 789 unsigned int nfrags = si->nr_frags; 790 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 791 792 len = skb_headlen(skb) - start; 793 if (likely(len)) { 794 sgl->len0 = htonl(len); 795 sgl->addr0 = cpu_to_be64(addr[0] + start); 796 nfrags++; 797 } else { 798 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 799 sgl->addr0 = cpu_to_be64(addr[1]); 800 } 801 802 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); 803 if (likely(--nfrags == 0)) 804 return; 805 /* 806 * Most of the complexity below deals with the possibility we hit the 807 * end of the queue in the middle of writing the SGL. For this case 808 * only we create the SGL in a temporary buffer and then copy it. 809 */ 810 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 811 812 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 813 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 814 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 815 to->addr[0] = cpu_to_be64(addr[i]); 816 to->addr[1] = cpu_to_be64(addr[++i]); 817 } 818 if (nfrags) { 819 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 820 to->len[1] = cpu_to_be32(0); 821 to->addr[0] = cpu_to_be64(addr[i + 1]); 822 } 823 if (unlikely((u8 *)end > (u8 *)q->stat)) { 824 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 825 826 if (likely(part0)) 827 memcpy(sgl->sge, buf, part0); 828 part1 = (u8 *)end - (u8 *)q->stat; 829 memcpy(q->desc, (u8 *)buf + part0, part1); 830 end = (void *)q->desc + part1; 831 } 832 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 833 *end = 0; 834 } 835 836 /* This function copies 64 byte coalesced work request to 837 * memory mapped BAR2 space(user space writes). 838 * For coalesced WR SGE, fetches data from the FIFO instead of from Host. 839 */ 840 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 841 { 842 int count = 8; 843 844 while (count) { 845 writeq(*src, dst); 846 src++; 847 dst++; 848 count--; 849 } 850 } 851 852 /** 853 * ring_tx_db - check and potentially ring a Tx queue's doorbell 854 * @adap: the adapter 855 * @q: the Tx queue 856 * @n: number of new descriptors to give to HW 857 * 858 * Ring the doorbel for a Tx queue. 859 */ 860 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 861 { 862 unsigned int *wr, index; 863 unsigned long flags; 864 865 wmb(); /* write descriptors before telling HW */ 866 spin_lock_irqsave(&q->db_lock, flags); 867 if (!q->db_disabled) { 868 if (is_t4(adap->params.chip)) { 869 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 870 QID(q->cntxt_id) | PIDX(n)); 871 } else { 872 if (n == 1) { 873 index = q->pidx ? (q->pidx - 1) : (q->size - 1); 874 wr = (unsigned int *)&q->desc[index]; 875 cxgb_pio_copy((u64 __iomem *) 876 (adap->bar2 + q->udb + 64), 877 (u64 *)wr); 878 } else 879 writel(n, adap->bar2 + q->udb + 8); 880 wmb(); 881 } 882 } else 883 q->db_pidx_inc += n; 884 q->db_pidx = q->pidx; 885 spin_unlock_irqrestore(&q->db_lock, flags); 886 } 887 888 /** 889 * inline_tx_skb - inline a packet's data into Tx descriptors 890 * @skb: the packet 891 * @q: the Tx queue where the packet will be inlined 892 * @pos: starting position in the Tx queue where to inline the packet 893 * 894 * Inline a packet's contents directly into Tx descriptors, starting at 895 * the given position within the Tx DMA ring. 896 * Most of the complexity of this operation is dealing with wrap arounds 897 * in the middle of the packet we want to inline. 898 */ 899 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, 900 void *pos) 901 { 902 u64 *p; 903 int left = (void *)q->stat - pos; 904 905 if (likely(skb->len <= left)) { 906 if (likely(!skb->data_len)) 907 skb_copy_from_linear_data(skb, pos, skb->len); 908 else 909 skb_copy_bits(skb, 0, pos, skb->len); 910 pos += skb->len; 911 } else { 912 skb_copy_bits(skb, 0, pos, left); 913 skb_copy_bits(skb, left, q->desc, skb->len - left); 914 pos = (void *)q->desc + (skb->len - left); 915 } 916 917 /* 0-pad to multiple of 16 */ 918 p = PTR_ALIGN(pos, 8); 919 if ((uintptr_t)p & 8) 920 *p = 0; 921 } 922 923 /* 924 * Figure out what HW csum a packet wants and return the appropriate control 925 * bits. 926 */ 927 static u64 hwcsum(const struct sk_buff *skb) 928 { 929 int csum_type; 930 const struct iphdr *iph = ip_hdr(skb); 931 932 if (iph->version == 4) { 933 if (iph->protocol == IPPROTO_TCP) 934 csum_type = TX_CSUM_TCPIP; 935 else if (iph->protocol == IPPROTO_UDP) 936 csum_type = TX_CSUM_UDPIP; 937 else { 938 nocsum: /* 939 * unknown protocol, disable HW csum 940 * and hope a bad packet is detected 941 */ 942 return TXPKT_L4CSUM_DIS; 943 } 944 } else { 945 /* 946 * this doesn't work with extension headers 947 */ 948 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; 949 950 if (ip6h->nexthdr == IPPROTO_TCP) 951 csum_type = TX_CSUM_TCPIP6; 952 else if (ip6h->nexthdr == IPPROTO_UDP) 953 csum_type = TX_CSUM_UDPIP6; 954 else 955 goto nocsum; 956 } 957 958 if (likely(csum_type >= TX_CSUM_TCPIP)) 959 return TXPKT_CSUM_TYPE(csum_type) | 960 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | 961 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); 962 else { 963 int start = skb_transport_offset(skb); 964 965 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | 966 TXPKT_CSUM_LOC(start + skb->csum_offset); 967 } 968 } 969 970 static void eth_txq_stop(struct sge_eth_txq *q) 971 { 972 netif_tx_stop_queue(q->txq); 973 q->q.stops++; 974 } 975 976 static inline void txq_advance(struct sge_txq *q, unsigned int n) 977 { 978 q->in_use += n; 979 q->pidx += n; 980 if (q->pidx >= q->size) 981 q->pidx -= q->size; 982 } 983 984 /** 985 * t4_eth_xmit - add a packet to an Ethernet Tx queue 986 * @skb: the packet 987 * @dev: the egress net device 988 * 989 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 990 */ 991 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 992 { 993 int len; 994 u32 wr_mid; 995 u64 cntrl, *end; 996 int qidx, credits; 997 unsigned int flits, ndesc; 998 struct adapter *adap; 999 struct sge_eth_txq *q; 1000 const struct port_info *pi; 1001 struct fw_eth_tx_pkt_wr *wr; 1002 struct cpl_tx_pkt_core *cpl; 1003 const struct skb_shared_info *ssi; 1004 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1005 bool immediate = false; 1006 1007 /* 1008 * The chip min packet length is 10 octets but play safe and reject 1009 * anything shorter than an Ethernet header. 1010 */ 1011 if (unlikely(skb->len < ETH_HLEN)) { 1012 out_free: dev_kfree_skb_any(skb); 1013 return NETDEV_TX_OK; 1014 } 1015 1016 pi = netdev_priv(dev); 1017 adap = pi->adapter; 1018 qidx = skb_get_queue_mapping(skb); 1019 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1020 1021 reclaim_completed_tx(adap, &q->q, true); 1022 1023 flits = calc_tx_flits(skb); 1024 ndesc = flits_to_desc(flits); 1025 credits = txq_avail(&q->q) - ndesc; 1026 1027 if (unlikely(credits < 0)) { 1028 eth_txq_stop(q); 1029 dev_err(adap->pdev_dev, 1030 "%s: Tx ring %u full while queue awake!\n", 1031 dev->name, qidx); 1032 return NETDEV_TX_BUSY; 1033 } 1034 1035 if (is_eth_imm(skb)) 1036 immediate = true; 1037 1038 if (!immediate && 1039 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { 1040 q->mapping_err++; 1041 goto out_free; 1042 } 1043 1044 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); 1045 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1046 eth_txq_stop(q); 1047 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; 1048 } 1049 1050 wr = (void *)&q->q.desc[q->q.pidx]; 1051 wr->equiq_to_len16 = htonl(wr_mid); 1052 wr->r3 = cpu_to_be64(0); 1053 end = (u64 *)wr + flits; 1054 1055 len = immediate ? skb->len : 0; 1056 ssi = skb_shinfo(skb); 1057 if (ssi->gso_size) { 1058 struct cpl_tx_pkt_lso *lso = (void *)wr; 1059 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1060 int l3hdr_len = skb_network_header_len(skb); 1061 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1062 1063 len += sizeof(*lso); 1064 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1065 FW_WR_IMMDLEN(len)); 1066 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | 1067 LSO_FIRST_SLICE | LSO_LAST_SLICE | 1068 LSO_IPV6(v6) | 1069 LSO_ETHHDR_LEN(eth_xtra_len / 4) | 1070 LSO_IPHDR_LEN(l3hdr_len / 4) | 1071 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); 1072 lso->c.ipid_ofst = htons(0); 1073 lso->c.mss = htons(ssi->gso_size); 1074 lso->c.seqno_offset = htonl(0); 1075 lso->c.len = htonl(skb->len); 1076 cpl = (void *)(lso + 1); 1077 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1078 TXPKT_IPHDR_LEN(l3hdr_len) | 1079 TXPKT_ETHHDR_LEN(eth_xtra_len); 1080 q->tso++; 1081 q->tx_cso += ssi->gso_segs; 1082 } else { 1083 len += sizeof(*cpl); 1084 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1085 FW_WR_IMMDLEN(len)); 1086 cpl = (void *)(wr + 1); 1087 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1088 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; 1089 q->tx_cso++; 1090 } else 1091 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1092 } 1093 1094 if (vlan_tx_tag_present(skb)) { 1095 q->vlan_ins++; 1096 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1097 } 1098 1099 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1100 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); 1101 cpl->pack = htons(0); 1102 cpl->len = htons(skb->len); 1103 cpl->ctrl1 = cpu_to_be64(cntrl); 1104 1105 if (immediate) { 1106 inline_tx_skb(skb, &q->q, cpl + 1); 1107 dev_consume_skb_any(skb); 1108 } else { 1109 int last_desc; 1110 1111 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, 1112 addr); 1113 skb_orphan(skb); 1114 1115 last_desc = q->q.pidx + ndesc - 1; 1116 if (last_desc >= q->q.size) 1117 last_desc -= q->q.size; 1118 q->q.sdesc[last_desc].skb = skb; 1119 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); 1120 } 1121 1122 txq_advance(&q->q, ndesc); 1123 1124 ring_tx_db(adap, &q->q, ndesc); 1125 return NETDEV_TX_OK; 1126 } 1127 1128 /** 1129 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1130 * @q: the SGE control Tx queue 1131 * 1132 * This is a variant of reclaim_completed_tx() that is used for Tx queues 1133 * that send only immediate data (presently just the control queues) and 1134 * thus do not have any sk_buffs to release. 1135 */ 1136 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1137 { 1138 int hw_cidx = ntohs(q->stat->cidx); 1139 int reclaim = hw_cidx - q->cidx; 1140 1141 if (reclaim < 0) 1142 reclaim += q->size; 1143 1144 q->in_use -= reclaim; 1145 q->cidx = hw_cidx; 1146 } 1147 1148 /** 1149 * is_imm - check whether a packet can be sent as immediate data 1150 * @skb: the packet 1151 * 1152 * Returns true if a packet can be sent as a WR with immediate data. 1153 */ 1154 static inline int is_imm(const struct sk_buff *skb) 1155 { 1156 return skb->len <= MAX_CTRL_WR_LEN; 1157 } 1158 1159 /** 1160 * ctrlq_check_stop - check if a control queue is full and should stop 1161 * @q: the queue 1162 * @wr: most recent WR written to the queue 1163 * 1164 * Check if a control queue has become full and should be stopped. 1165 * We clean up control queue descriptors very lazily, only when we are out. 1166 * If the queue is still full after reclaiming any completed descriptors 1167 * we suspend it and have the last WR wake it up. 1168 */ 1169 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 1170 { 1171 reclaim_completed_tx_imm(&q->q); 1172 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1173 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); 1174 q->q.stops++; 1175 q->full = 1; 1176 } 1177 } 1178 1179 /** 1180 * ctrl_xmit - send a packet through an SGE control Tx queue 1181 * @q: the control queue 1182 * @skb: the packet 1183 * 1184 * Send a packet through an SGE control Tx queue. Packets sent through 1185 * a control queue must fit entirely as immediate data. 1186 */ 1187 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 1188 { 1189 unsigned int ndesc; 1190 struct fw_wr_hdr *wr; 1191 1192 if (unlikely(!is_imm(skb))) { 1193 WARN_ON(1); 1194 dev_kfree_skb(skb); 1195 return NET_XMIT_DROP; 1196 } 1197 1198 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 1199 spin_lock(&q->sendq.lock); 1200 1201 if (unlikely(q->full)) { 1202 skb->priority = ndesc; /* save for restart */ 1203 __skb_queue_tail(&q->sendq, skb); 1204 spin_unlock(&q->sendq.lock); 1205 return NET_XMIT_CN; 1206 } 1207 1208 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1209 inline_tx_skb(skb, &q->q, wr); 1210 1211 txq_advance(&q->q, ndesc); 1212 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 1213 ctrlq_check_stop(q, wr); 1214 1215 ring_tx_db(q->adap, &q->q, ndesc); 1216 spin_unlock(&q->sendq.lock); 1217 1218 kfree_skb(skb); 1219 return NET_XMIT_SUCCESS; 1220 } 1221 1222 /** 1223 * restart_ctrlq - restart a suspended control queue 1224 * @data: the control queue to restart 1225 * 1226 * Resumes transmission on a suspended Tx control queue. 1227 */ 1228 static void restart_ctrlq(unsigned long data) 1229 { 1230 struct sk_buff *skb; 1231 unsigned int written = 0; 1232 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; 1233 1234 spin_lock(&q->sendq.lock); 1235 reclaim_completed_tx_imm(&q->q); 1236 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 1237 1238 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 1239 struct fw_wr_hdr *wr; 1240 unsigned int ndesc = skb->priority; /* previously saved */ 1241 1242 /* 1243 * Write descriptors and free skbs outside the lock to limit 1244 * wait times. q->full is still set so new skbs will be queued. 1245 */ 1246 spin_unlock(&q->sendq.lock); 1247 1248 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1249 inline_tx_skb(skb, &q->q, wr); 1250 kfree_skb(skb); 1251 1252 written += ndesc; 1253 txq_advance(&q->q, ndesc); 1254 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1255 unsigned long old = q->q.stops; 1256 1257 ctrlq_check_stop(q, wr); 1258 if (q->q.stops != old) { /* suspended anew */ 1259 spin_lock(&q->sendq.lock); 1260 goto ringdb; 1261 } 1262 } 1263 if (written > 16) { 1264 ring_tx_db(q->adap, &q->q, written); 1265 written = 0; 1266 } 1267 spin_lock(&q->sendq.lock); 1268 } 1269 q->full = 0; 1270 ringdb: if (written) 1271 ring_tx_db(q->adap, &q->q, written); 1272 spin_unlock(&q->sendq.lock); 1273 } 1274 1275 /** 1276 * t4_mgmt_tx - send a management message 1277 * @adap: the adapter 1278 * @skb: the packet containing the management message 1279 * 1280 * Send a management message through control queue 0. 1281 */ 1282 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1283 { 1284 int ret; 1285 1286 local_bh_disable(); 1287 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 1288 local_bh_enable(); 1289 return ret; 1290 } 1291 1292 /** 1293 * is_ofld_imm - check whether a packet can be sent as immediate data 1294 * @skb: the packet 1295 * 1296 * Returns true if a packet can be sent as an offload WR with immediate 1297 * data. We currently use the same limit as for Ethernet packets. 1298 */ 1299 static inline int is_ofld_imm(const struct sk_buff *skb) 1300 { 1301 return skb->len <= MAX_IMM_TX_PKT_LEN; 1302 } 1303 1304 /** 1305 * calc_tx_flits_ofld - calculate # of flits for an offload packet 1306 * @skb: the packet 1307 * 1308 * Returns the number of flits needed for the given offload packet. 1309 * These packets are already fully constructed and no additional headers 1310 * will be added. 1311 */ 1312 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 1313 { 1314 unsigned int flits, cnt; 1315 1316 if (is_ofld_imm(skb)) 1317 return DIV_ROUND_UP(skb->len, 8); 1318 1319 flits = skb_transport_offset(skb) / 8U; /* headers */ 1320 cnt = skb_shinfo(skb)->nr_frags; 1321 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 1322 cnt++; 1323 return flits + sgl_len(cnt); 1324 } 1325 1326 /** 1327 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 1328 * @adap: the adapter 1329 * @q: the queue to stop 1330 * 1331 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 1332 * inability to map packets. A periodic timer attempts to restart 1333 * queues so marked. 1334 */ 1335 static void txq_stop_maperr(struct sge_ofld_txq *q) 1336 { 1337 q->mapping_err++; 1338 q->q.stops++; 1339 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 1340 q->adap->sge.txq_maperr); 1341 } 1342 1343 /** 1344 * ofldtxq_stop - stop an offload Tx queue that has become full 1345 * @q: the queue to stop 1346 * @skb: the packet causing the queue to become full 1347 * 1348 * Stops an offload Tx queue that has become full and modifies the packet 1349 * being written to request a wakeup. 1350 */ 1351 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) 1352 { 1353 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; 1354 1355 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); 1356 q->q.stops++; 1357 q->full = 1; 1358 } 1359 1360 /** 1361 * service_ofldq - restart a suspended offload queue 1362 * @q: the offload queue 1363 * 1364 * Services an offload Tx queue by moving packets from its packet queue 1365 * to the HW Tx ring. The function starts and ends with the queue locked. 1366 */ 1367 static void service_ofldq(struct sge_ofld_txq *q) 1368 { 1369 u64 *pos; 1370 int credits; 1371 struct sk_buff *skb; 1372 unsigned int written = 0; 1373 unsigned int flits, ndesc; 1374 1375 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 1376 /* 1377 * We drop the lock but leave skb on sendq, thus retaining 1378 * exclusive access to the state of the queue. 1379 */ 1380 spin_unlock(&q->sendq.lock); 1381 1382 reclaim_completed_tx(q->adap, &q->q, false); 1383 1384 flits = skb->priority; /* previously saved */ 1385 ndesc = flits_to_desc(flits); 1386 credits = txq_avail(&q->q) - ndesc; 1387 BUG_ON(credits < 0); 1388 if (unlikely(credits < TXQ_STOP_THRES)) 1389 ofldtxq_stop(q, skb); 1390 1391 pos = (u64 *)&q->q.desc[q->q.pidx]; 1392 if (is_ofld_imm(skb)) 1393 inline_tx_skb(skb, &q->q, pos); 1394 else if (map_skb(q->adap->pdev_dev, skb, 1395 (dma_addr_t *)skb->head)) { 1396 txq_stop_maperr(q); 1397 spin_lock(&q->sendq.lock); 1398 break; 1399 } else { 1400 int last_desc, hdr_len = skb_transport_offset(skb); 1401 1402 memcpy(pos, skb->data, hdr_len); 1403 write_sgl(skb, &q->q, (void *)pos + hdr_len, 1404 pos + flits, hdr_len, 1405 (dma_addr_t *)skb->head); 1406 #ifdef CONFIG_NEED_DMA_MAP_STATE 1407 skb->dev = q->adap->port[0]; 1408 skb->destructor = deferred_unmap_destructor; 1409 #endif 1410 last_desc = q->q.pidx + ndesc - 1; 1411 if (last_desc >= q->q.size) 1412 last_desc -= q->q.size; 1413 q->q.sdesc[last_desc].skb = skb; 1414 } 1415 1416 txq_advance(&q->q, ndesc); 1417 written += ndesc; 1418 if (unlikely(written > 32)) { 1419 ring_tx_db(q->adap, &q->q, written); 1420 written = 0; 1421 } 1422 1423 spin_lock(&q->sendq.lock); 1424 __skb_unlink(skb, &q->sendq); 1425 if (is_ofld_imm(skb)) 1426 kfree_skb(skb); 1427 } 1428 if (likely(written)) 1429 ring_tx_db(q->adap, &q->q, written); 1430 } 1431 1432 /** 1433 * ofld_xmit - send a packet through an offload queue 1434 * @q: the Tx offload queue 1435 * @skb: the packet 1436 * 1437 * Send an offload packet through an SGE offload queue. 1438 */ 1439 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) 1440 { 1441 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 1442 spin_lock(&q->sendq.lock); 1443 __skb_queue_tail(&q->sendq, skb); 1444 if (q->sendq.qlen == 1) 1445 service_ofldq(q); 1446 spin_unlock(&q->sendq.lock); 1447 return NET_XMIT_SUCCESS; 1448 } 1449 1450 /** 1451 * restart_ofldq - restart a suspended offload queue 1452 * @data: the offload queue to restart 1453 * 1454 * Resumes transmission on a suspended Tx offload queue. 1455 */ 1456 static void restart_ofldq(unsigned long data) 1457 { 1458 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; 1459 1460 spin_lock(&q->sendq.lock); 1461 q->full = 0; /* the queue actually is completely empty now */ 1462 service_ofldq(q); 1463 spin_unlock(&q->sendq.lock); 1464 } 1465 1466 /** 1467 * skb_txq - return the Tx queue an offload packet should use 1468 * @skb: the packet 1469 * 1470 * Returns the Tx queue an offload packet should use as indicated by bits 1471 * 1-15 in the packet's queue_mapping. 1472 */ 1473 static inline unsigned int skb_txq(const struct sk_buff *skb) 1474 { 1475 return skb->queue_mapping >> 1; 1476 } 1477 1478 /** 1479 * is_ctrl_pkt - return whether an offload packet is a control packet 1480 * @skb: the packet 1481 * 1482 * Returns whether an offload packet should use an OFLD or a CTRL 1483 * Tx queue as indicated by bit 0 in the packet's queue_mapping. 1484 */ 1485 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 1486 { 1487 return skb->queue_mapping & 1; 1488 } 1489 1490 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) 1491 { 1492 unsigned int idx = skb_txq(skb); 1493 1494 if (unlikely(is_ctrl_pkt(skb))) { 1495 /* Single ctrl queue is a requirement for LE workaround path */ 1496 if (adap->tids.nsftids) 1497 idx = 0; 1498 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 1499 } 1500 return ofld_xmit(&adap->sge.ofldtxq[idx], skb); 1501 } 1502 1503 /** 1504 * t4_ofld_send - send an offload packet 1505 * @adap: the adapter 1506 * @skb: the packet 1507 * 1508 * Sends an offload packet. We use the packet queue_mapping to select the 1509 * appropriate Tx queue as follows: bit 0 indicates whether the packet 1510 * should be sent as regular or control, bits 1-15 select the queue. 1511 */ 1512 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 1513 { 1514 int ret; 1515 1516 local_bh_disable(); 1517 ret = ofld_send(adap, skb); 1518 local_bh_enable(); 1519 return ret; 1520 } 1521 1522 /** 1523 * cxgb4_ofld_send - send an offload packet 1524 * @dev: the net device 1525 * @skb: the packet 1526 * 1527 * Sends an offload packet. This is an exported version of @t4_ofld_send, 1528 * intended for ULDs. 1529 */ 1530 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 1531 { 1532 return t4_ofld_send(netdev2adap(dev), skb); 1533 } 1534 EXPORT_SYMBOL(cxgb4_ofld_send); 1535 1536 static inline void copy_frags(struct sk_buff *skb, 1537 const struct pkt_gl *gl, unsigned int offset) 1538 { 1539 int i; 1540 1541 /* usually there's just one frag */ 1542 __skb_fill_page_desc(skb, 0, gl->frags[0].page, 1543 gl->frags[0].offset + offset, 1544 gl->frags[0].size - offset); 1545 skb_shinfo(skb)->nr_frags = gl->nfrags; 1546 for (i = 1; i < gl->nfrags; i++) 1547 __skb_fill_page_desc(skb, i, gl->frags[i].page, 1548 gl->frags[i].offset, 1549 gl->frags[i].size); 1550 1551 /* get a reference to the last page, we don't own it */ 1552 get_page(gl->frags[gl->nfrags - 1].page); 1553 } 1554 1555 /** 1556 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 1557 * @gl: the gather list 1558 * @skb_len: size of sk_buff main body if it carries fragments 1559 * @pull_len: amount of data to move to the sk_buff's main body 1560 * 1561 * Builds an sk_buff from the given packet gather list. Returns the 1562 * sk_buff or %NULL if sk_buff allocation failed. 1563 */ 1564 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 1565 unsigned int skb_len, unsigned int pull_len) 1566 { 1567 struct sk_buff *skb; 1568 1569 /* 1570 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 1571 * size, which is expected since buffers are at least PAGE_SIZEd. 1572 * In this case packets up to RX_COPY_THRES have only one fragment. 1573 */ 1574 if (gl->tot_len <= RX_COPY_THRES) { 1575 skb = dev_alloc_skb(gl->tot_len); 1576 if (unlikely(!skb)) 1577 goto out; 1578 __skb_put(skb, gl->tot_len); 1579 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 1580 } else { 1581 skb = dev_alloc_skb(skb_len); 1582 if (unlikely(!skb)) 1583 goto out; 1584 __skb_put(skb, pull_len); 1585 skb_copy_to_linear_data(skb, gl->va, pull_len); 1586 1587 copy_frags(skb, gl, pull_len); 1588 skb->len = gl->tot_len; 1589 skb->data_len = skb->len - pull_len; 1590 skb->truesize += skb->data_len; 1591 } 1592 out: return skb; 1593 } 1594 EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 1595 1596 /** 1597 * t4_pktgl_free - free a packet gather list 1598 * @gl: the gather list 1599 * 1600 * Releases the pages of a packet gather list. We do not own the last 1601 * page on the list and do not free it. 1602 */ 1603 static void t4_pktgl_free(const struct pkt_gl *gl) 1604 { 1605 int n; 1606 const struct page_frag *p; 1607 1608 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 1609 put_page(p->page); 1610 } 1611 1612 /* 1613 * Process an MPS trace packet. Give it an unused protocol number so it won't 1614 * be delivered to anyone and send it to the stack for capture. 1615 */ 1616 static noinline int handle_trace_pkt(struct adapter *adap, 1617 const struct pkt_gl *gl) 1618 { 1619 struct sk_buff *skb; 1620 1621 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 1622 if (unlikely(!skb)) { 1623 t4_pktgl_free(gl); 1624 return 0; 1625 } 1626 1627 if (is_t4(adap->params.chip)) 1628 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 1629 else 1630 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 1631 1632 skb_reset_mac_header(skb); 1633 skb->protocol = htons(0xffff); 1634 skb->dev = adap->port[0]; 1635 netif_receive_skb(skb); 1636 return 0; 1637 } 1638 1639 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1640 const struct cpl_rx_pkt *pkt) 1641 { 1642 struct adapter *adapter = rxq->rspq.adap; 1643 struct sge *s = &adapter->sge; 1644 int ret; 1645 struct sk_buff *skb; 1646 1647 skb = napi_get_frags(&rxq->rspq.napi); 1648 if (unlikely(!skb)) { 1649 t4_pktgl_free(gl); 1650 rxq->stats.rx_drops++; 1651 return; 1652 } 1653 1654 copy_frags(skb, gl, s->pktshift); 1655 skb->len = gl->tot_len - s->pktshift; 1656 skb->data_len = skb->len; 1657 skb->truesize += skb->data_len; 1658 skb->ip_summed = CHECKSUM_UNNECESSARY; 1659 skb_record_rx_queue(skb, rxq->rspq.idx); 1660 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 1661 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 1662 PKT_HASH_TYPE_L3); 1663 1664 if (unlikely(pkt->vlan_ex)) { 1665 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 1666 rxq->stats.vlan_ex++; 1667 } 1668 ret = napi_gro_frags(&rxq->rspq.napi); 1669 if (ret == GRO_HELD) 1670 rxq->stats.lro_pkts++; 1671 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 1672 rxq->stats.lro_merged++; 1673 rxq->stats.pkts++; 1674 rxq->stats.rx_cso++; 1675 } 1676 1677 /** 1678 * t4_ethrx_handler - process an ingress ethernet packet 1679 * @q: the response queue that received the packet 1680 * @rsp: the response queue descriptor holding the RX_PKT message 1681 * @si: the gather list of packet fragments 1682 * 1683 * Process an ingress ethernet packet and deliver it to the stack. 1684 */ 1685 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 1686 const struct pkt_gl *si) 1687 { 1688 bool csum_ok; 1689 struct sk_buff *skb; 1690 const struct cpl_rx_pkt *pkt; 1691 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1692 struct sge *s = &q->adap->sge; 1693 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 1694 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 1695 1696 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 1697 return handle_trace_pkt(q->adap, si); 1698 1699 pkt = (const struct cpl_rx_pkt *)rsp; 1700 csum_ok = pkt->csum_calc && !pkt->err_vec && 1701 (q->netdev->features & NETIF_F_RXCSUM); 1702 if ((pkt->l2info & htonl(RXF_TCP)) && 1703 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1704 do_gro(rxq, si, pkt); 1705 return 0; 1706 } 1707 1708 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 1709 if (unlikely(!skb)) { 1710 t4_pktgl_free(si); 1711 rxq->stats.rx_drops++; 1712 return 0; 1713 } 1714 1715 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */ 1716 skb->protocol = eth_type_trans(skb, q->netdev); 1717 skb_record_rx_queue(skb, q->idx); 1718 if (skb->dev->features & NETIF_F_RXHASH) 1719 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 1720 PKT_HASH_TYPE_L3); 1721 1722 rxq->stats.pkts++; 1723 1724 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1725 if (!pkt->ip_frag) { 1726 skb->ip_summed = CHECKSUM_UNNECESSARY; 1727 rxq->stats.rx_cso++; 1728 } else if (pkt->l2info & htonl(RXF_IP)) { 1729 __sum16 c = (__force __sum16)pkt->csum; 1730 skb->csum = csum_unfold(c); 1731 skb->ip_summed = CHECKSUM_COMPLETE; 1732 rxq->stats.rx_cso++; 1733 } 1734 } else 1735 skb_checksum_none_assert(skb); 1736 1737 if (unlikely(pkt->vlan_ex)) { 1738 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 1739 rxq->stats.vlan_ex++; 1740 } 1741 netif_receive_skb(skb); 1742 return 0; 1743 } 1744 1745 /** 1746 * restore_rx_bufs - put back a packet's Rx buffers 1747 * @si: the packet gather list 1748 * @q: the SGE free list 1749 * @frags: number of FL buffers to restore 1750 * 1751 * Puts back on an FL the Rx buffers associated with @si. The buffers 1752 * have already been unmapped and are left unmapped, we mark them so to 1753 * prevent further unmapping attempts. 1754 * 1755 * This function undoes a series of @unmap_rx_buf calls when we find out 1756 * that the current packet can't be processed right away afterall and we 1757 * need to come back to it later. This is a very rare event and there's 1758 * no effort to make this particularly efficient. 1759 */ 1760 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 1761 int frags) 1762 { 1763 struct rx_sw_desc *d; 1764 1765 while (frags--) { 1766 if (q->cidx == 0) 1767 q->cidx = q->size - 1; 1768 else 1769 q->cidx--; 1770 d = &q->sdesc[q->cidx]; 1771 d->page = si->frags[frags].page; 1772 d->dma_addr |= RX_UNMAPPED_BUF; 1773 q->avail++; 1774 } 1775 } 1776 1777 /** 1778 * is_new_response - check if a response is newly written 1779 * @r: the response descriptor 1780 * @q: the response queue 1781 * 1782 * Returns true if a response descriptor contains a yet unprocessed 1783 * response. 1784 */ 1785 static inline bool is_new_response(const struct rsp_ctrl *r, 1786 const struct sge_rspq *q) 1787 { 1788 return RSPD_GEN(r->type_gen) == q->gen; 1789 } 1790 1791 /** 1792 * rspq_next - advance to the next entry in a response queue 1793 * @q: the queue 1794 * 1795 * Updates the state of a response queue to advance it to the next entry. 1796 */ 1797 static inline void rspq_next(struct sge_rspq *q) 1798 { 1799 q->cur_desc = (void *)q->cur_desc + q->iqe_len; 1800 if (unlikely(++q->cidx == q->size)) { 1801 q->cidx = 0; 1802 q->gen ^= 1; 1803 q->cur_desc = q->desc; 1804 } 1805 } 1806 1807 /** 1808 * process_responses - process responses from an SGE response queue 1809 * @q: the ingress queue to process 1810 * @budget: how many responses can be processed in this round 1811 * 1812 * Process responses from an SGE response queue up to the supplied budget. 1813 * Responses include received packets as well as control messages from FW 1814 * or HW. 1815 * 1816 * Additionally choose the interrupt holdoff time for the next interrupt 1817 * on this queue. If the system is under memory shortage use a fairly 1818 * long delay to help recovery. 1819 */ 1820 static int process_responses(struct sge_rspq *q, int budget) 1821 { 1822 int ret, rsp_type; 1823 int budget_left = budget; 1824 const struct rsp_ctrl *rc; 1825 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1826 struct adapter *adapter = q->adap; 1827 struct sge *s = &adapter->sge; 1828 1829 while (likely(budget_left)) { 1830 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 1831 if (!is_new_response(rc, q)) 1832 break; 1833 1834 rmb(); 1835 rsp_type = RSPD_TYPE(rc->type_gen); 1836 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 1837 struct page_frag *fp; 1838 struct pkt_gl si; 1839 const struct rx_sw_desc *rsd; 1840 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 1841 1842 if (len & RSPD_NEWBUF) { 1843 if (likely(q->offset > 0)) { 1844 free_rx_bufs(q->adap, &rxq->fl, 1); 1845 q->offset = 0; 1846 } 1847 len = RSPD_LEN(len); 1848 } 1849 si.tot_len = len; 1850 1851 /* gather packet fragments */ 1852 for (frags = 0, fp = si.frags; ; frags++, fp++) { 1853 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1854 bufsz = get_buf_size(adapter, rsd); 1855 fp->page = rsd->page; 1856 fp->offset = q->offset; 1857 fp->size = min(bufsz, len); 1858 len -= fp->size; 1859 if (!len) 1860 break; 1861 unmap_rx_buf(q->adap, &rxq->fl); 1862 } 1863 1864 /* 1865 * Last buffer remains mapped so explicitly make it 1866 * coherent for CPU access. 1867 */ 1868 dma_sync_single_for_cpu(q->adap->pdev_dev, 1869 get_buf_addr(rsd), 1870 fp->size, DMA_FROM_DEVICE); 1871 1872 si.va = page_address(si.frags[0].page) + 1873 si.frags[0].offset; 1874 prefetch(si.va); 1875 1876 si.nfrags = frags + 1; 1877 ret = q->handler(q, q->cur_desc, &si); 1878 if (likely(ret == 0)) 1879 q->offset += ALIGN(fp->size, s->fl_align); 1880 else 1881 restore_rx_bufs(&si, &rxq->fl, frags); 1882 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1883 ret = q->handler(q, q->cur_desc, NULL); 1884 } else { 1885 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 1886 } 1887 1888 if (unlikely(ret)) { 1889 /* couldn't process descriptor, back off for recovery */ 1890 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); 1891 break; 1892 } 1893 1894 rspq_next(q); 1895 budget_left--; 1896 } 1897 1898 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) 1899 __refill_fl(q->adap, &rxq->fl); 1900 return budget - budget_left; 1901 } 1902 1903 /** 1904 * napi_rx_handler - the NAPI handler for Rx processing 1905 * @napi: the napi instance 1906 * @budget: how many packets we can process in this round 1907 * 1908 * Handler for new data events when using NAPI. This does not need any 1909 * locking or protection from interrupts as data interrupts are off at 1910 * this point and other adapter interrupts do not interfere (the latter 1911 * in not a concern at all with MSI-X as non-data interrupts then have 1912 * a separate handler). 1913 */ 1914 static int napi_rx_handler(struct napi_struct *napi, int budget) 1915 { 1916 unsigned int params; 1917 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 1918 int work_done = process_responses(q, budget); 1919 1920 if (likely(work_done < budget)) { 1921 napi_complete(napi); 1922 params = q->next_intr_params; 1923 q->next_intr_params = q->intr_params; 1924 } else 1925 params = QINTR_TIMER_IDX(7); 1926 1927 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | 1928 INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); 1929 return work_done; 1930 } 1931 1932 /* 1933 * The MSI-X interrupt handler for an SGE response queue. 1934 */ 1935 irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 1936 { 1937 struct sge_rspq *q = cookie; 1938 1939 napi_schedule(&q->napi); 1940 return IRQ_HANDLED; 1941 } 1942 1943 /* 1944 * Process the indirect interrupt entries in the interrupt queue and kick off 1945 * NAPI for each queue that has generated an entry. 1946 */ 1947 static unsigned int process_intrq(struct adapter *adap) 1948 { 1949 unsigned int credits; 1950 const struct rsp_ctrl *rc; 1951 struct sge_rspq *q = &adap->sge.intrq; 1952 1953 spin_lock(&adap->sge.intrq_lock); 1954 for (credits = 0; ; credits++) { 1955 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 1956 if (!is_new_response(rc, q)) 1957 break; 1958 1959 rmb(); 1960 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { 1961 unsigned int qid = ntohl(rc->pldbuflen_qid); 1962 1963 qid -= adap->sge.ingr_start; 1964 napi_schedule(&adap->sge.ingr_map[qid]->napi); 1965 } 1966 1967 rspq_next(q); 1968 } 1969 1970 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | 1971 INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); 1972 spin_unlock(&adap->sge.intrq_lock); 1973 return credits; 1974 } 1975 1976 /* 1977 * The MSI interrupt handler, which handles data events from SGE response queues 1978 * as well as error and other async events as they all use the same MSI vector. 1979 */ 1980 static irqreturn_t t4_intr_msi(int irq, void *cookie) 1981 { 1982 struct adapter *adap = cookie; 1983 1984 t4_slow_intr_handler(adap); 1985 process_intrq(adap); 1986 return IRQ_HANDLED; 1987 } 1988 1989 /* 1990 * Interrupt handler for legacy INTx interrupts. 1991 * Handles data events from SGE response queues as well as error and other 1992 * async events as they all use the same interrupt line. 1993 */ 1994 static irqreturn_t t4_intr_intx(int irq, void *cookie) 1995 { 1996 struct adapter *adap = cookie; 1997 1998 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); 1999 if (t4_slow_intr_handler(adap) | process_intrq(adap)) 2000 return IRQ_HANDLED; 2001 return IRQ_NONE; /* probably shared interrupt */ 2002 } 2003 2004 /** 2005 * t4_intr_handler - select the top-level interrupt handler 2006 * @adap: the adapter 2007 * 2008 * Selects the top-level interrupt handler based on the type of interrupts 2009 * (MSI-X, MSI, or INTx). 2010 */ 2011 irq_handler_t t4_intr_handler(struct adapter *adap) 2012 { 2013 if (adap->flags & USING_MSIX) 2014 return t4_sge_intr_msix; 2015 if (adap->flags & USING_MSI) 2016 return t4_intr_msi; 2017 return t4_intr_intx; 2018 } 2019 2020 static void sge_rx_timer_cb(unsigned long data) 2021 { 2022 unsigned long m; 2023 unsigned int i, idma_same_state_cnt[2]; 2024 struct adapter *adap = (struct adapter *)data; 2025 struct sge *s = &adap->sge; 2026 2027 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) 2028 for (m = s->starving_fl[i]; m; m &= m - 1) { 2029 struct sge_eth_rxq *rxq; 2030 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 2031 struct sge_fl *fl = s->egr_map[id]; 2032 2033 clear_bit(id, s->starving_fl); 2034 smp_mb__after_atomic(); 2035 2036 if (fl_starving(fl)) { 2037 rxq = container_of(fl, struct sge_eth_rxq, fl); 2038 if (napi_reschedule(&rxq->rspq.napi)) 2039 fl->starving++; 2040 else 2041 set_bit(id, s->starving_fl); 2042 } 2043 } 2044 2045 t4_write_reg(adap, SGE_DEBUG_INDEX, 13); 2046 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); 2047 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2048 2049 for (i = 0; i < 2; i++) { 2050 u32 debug0, debug11; 2051 2052 /* If the Ingress DMA Same State Counter ("timer") is less 2053 * than 1s, then we can reset our synthesized Stall Timer and 2054 * continue. If we have previously emitted warnings about a 2055 * potential stalled Ingress Queue, issue a note indicating 2056 * that the Ingress Queue has resumed forward progress. 2057 */ 2058 if (idma_same_state_cnt[i] < s->idma_1s_thresh) { 2059 if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH) 2060 CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n", 2061 i, s->idma_qid[i], 2062 s->idma_stalled[i]/HZ); 2063 s->idma_stalled[i] = 0; 2064 continue; 2065 } 2066 2067 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 2068 * domain. The first time we get here it'll be because we 2069 * passed the 1s Threshold; each additional time it'll be 2070 * because the RX Timer Callback is being fired on its regular 2071 * schedule. 2072 * 2073 * If the stall is below our Potential Hung Ingress Queue 2074 * Warning Threshold, continue. 2075 */ 2076 if (s->idma_stalled[i] == 0) 2077 s->idma_stalled[i] = HZ; 2078 else 2079 s->idma_stalled[i] += RX_QCHECK_PERIOD; 2080 2081 if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH) 2082 continue; 2083 2084 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */ 2085 if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0) 2086 continue; 2087 2088 /* Read and save the SGE IDMA State and Queue ID information. 2089 * We do this every time in case it changes across time ... 2090 */ 2091 t4_write_reg(adap, SGE_DEBUG_INDEX, 0); 2092 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2093 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 2094 2095 t4_write_reg(adap, SGE_DEBUG_INDEX, 11); 2096 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2097 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 2098 2099 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", 2100 i, s->idma_qid[i], s->idma_state[i], 2101 s->idma_stalled[i]/HZ, debug0, debug11); 2102 t4_sge_decode_idma_state(adap, s->idma_state[i]); 2103 } 2104 2105 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 2106 } 2107 2108 static void sge_tx_timer_cb(unsigned long data) 2109 { 2110 unsigned long m; 2111 unsigned int i, budget; 2112 struct adapter *adap = (struct adapter *)data; 2113 struct sge *s = &adap->sge; 2114 2115 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) 2116 for (m = s->txq_maperr[i]; m; m &= m - 1) { 2117 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 2118 struct sge_ofld_txq *txq = s->egr_map[id]; 2119 2120 clear_bit(id, s->txq_maperr); 2121 tasklet_schedule(&txq->qresume_tsk); 2122 } 2123 2124 budget = MAX_TIMER_TX_RECLAIM; 2125 i = s->ethtxq_rover; 2126 do { 2127 struct sge_eth_txq *q = &s->ethtxq[i]; 2128 2129 if (q->q.in_use && 2130 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && 2131 __netif_tx_trylock(q->txq)) { 2132 int avail = reclaimable(&q->q); 2133 2134 if (avail) { 2135 if (avail > budget) 2136 avail = budget; 2137 2138 free_tx_desc(adap, &q->q, avail, true); 2139 q->q.in_use -= avail; 2140 budget -= avail; 2141 } 2142 __netif_tx_unlock(q->txq); 2143 } 2144 2145 if (++i >= s->ethqsets) 2146 i = 0; 2147 } while (budget && i != s->ethtxq_rover); 2148 s->ethtxq_rover = i; 2149 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); 2150 } 2151 2152 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 2153 struct net_device *dev, int intr_idx, 2154 struct sge_fl *fl, rspq_handler_t hnd) 2155 { 2156 int ret, flsz = 0; 2157 struct fw_iq_cmd c; 2158 struct sge *s = &adap->sge; 2159 struct port_info *pi = netdev_priv(dev); 2160 2161 /* Size needs to be multiple of 16, including status entry. */ 2162 iq->size = roundup(iq->size, 16); 2163 2164 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 2165 &iq->phys_addr, NULL, 0, NUMA_NO_NODE); 2166 if (!iq->desc) 2167 return -ENOMEM; 2168 2169 memset(&c, 0, sizeof(c)); 2170 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 2171 FW_CMD_WRITE | FW_CMD_EXEC | 2172 FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); 2173 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | 2174 FW_LEN16(c)); 2175 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2176 FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | 2177 FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | 2178 FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : 2179 -intr_idx - 1)); 2180 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2181 FW_IQ_CMD_IQGTSMODE | 2182 FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | 2183 FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); 2184 c.iqsize = htons(iq->size); 2185 c.iqaddr = cpu_to_be64(iq->phys_addr); 2186 2187 if (fl) { 2188 fl->size = roundup(fl->size, 8); 2189 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 2190 sizeof(struct rx_sw_desc), &fl->addr, 2191 &fl->sdesc, s->stat_len, NUMA_NO_NODE); 2192 if (!fl->desc) 2193 goto fl_nomem; 2194 2195 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 2196 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) | 2197 FW_IQ_CMD_FL0FETCHRO(1) | 2198 FW_IQ_CMD_FL0DATARO(1) | 2199 FW_IQ_CMD_FL0PADEN(1)); 2200 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | 2201 FW_IQ_CMD_FL0FBMAX(3)); 2202 c.fl0size = htons(flsz); 2203 c.fl0addr = cpu_to_be64(fl->addr); 2204 } 2205 2206 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2207 if (ret) 2208 goto err; 2209 2210 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 2211 iq->cur_desc = iq->desc; 2212 iq->cidx = 0; 2213 iq->gen = 1; 2214 iq->next_intr_params = iq->intr_params; 2215 iq->cntxt_id = ntohs(c.iqid); 2216 iq->abs_id = ntohs(c.physiqid); 2217 iq->size--; /* subtract status entry */ 2218 iq->netdev = dev; 2219 iq->handler = hnd; 2220 2221 /* set offset to -1 to distinguish ingress queues without FL */ 2222 iq->offset = fl ? 0 : -1; 2223 2224 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 2225 2226 if (fl) { 2227 fl->cntxt_id = ntohs(c.fl0id); 2228 fl->avail = fl->pend_cred = 0; 2229 fl->pidx = fl->cidx = 0; 2230 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2231 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 2232 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 2233 } 2234 return 0; 2235 2236 fl_nomem: 2237 ret = -ENOMEM; 2238 err: 2239 if (iq->desc) { 2240 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 2241 iq->desc, iq->phys_addr); 2242 iq->desc = NULL; 2243 } 2244 if (fl && fl->desc) { 2245 kfree(fl->sdesc); 2246 fl->sdesc = NULL; 2247 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 2248 fl->desc, fl->addr); 2249 fl->desc = NULL; 2250 } 2251 return ret; 2252 } 2253 2254 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 2255 { 2256 q->cntxt_id = id; 2257 if (!is_t4(adap->params.chip)) { 2258 unsigned int s_qpp; 2259 unsigned short udb_density; 2260 unsigned long qpshift; 2261 int page; 2262 2263 s_qpp = QUEUESPERPAGEPF1 * adap->fn; 2264 udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap, 2265 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)); 2266 qpshift = PAGE_SHIFT - ilog2(udb_density); 2267 q->udb = q->cntxt_id << qpshift; 2268 q->udb &= PAGE_MASK; 2269 page = q->udb / PAGE_SIZE; 2270 q->udb += (q->cntxt_id - (page * udb_density)) * 128; 2271 } 2272 2273 q->in_use = 0; 2274 q->cidx = q->pidx = 0; 2275 q->stops = q->restarts = 0; 2276 q->stat = (void *)&q->desc[q->size]; 2277 spin_lock_init(&q->db_lock); 2278 adap->sge.egr_map[id - adap->sge.egr_start] = q; 2279 } 2280 2281 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 2282 struct net_device *dev, struct netdev_queue *netdevq, 2283 unsigned int iqid) 2284 { 2285 int ret, nentries; 2286 struct fw_eq_eth_cmd c; 2287 struct sge *s = &adap->sge; 2288 struct port_info *pi = netdev_priv(dev); 2289 2290 /* Add status entries */ 2291 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 2292 2293 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2294 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2295 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 2296 netdev_queue_numa_node_read(netdevq)); 2297 if (!txq->q.desc) 2298 return -ENOMEM; 2299 2300 memset(&c, 0, sizeof(c)); 2301 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 2302 FW_CMD_WRITE | FW_CMD_EXEC | 2303 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); 2304 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | 2305 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2306 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); 2307 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | 2308 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 2309 FW_EQ_ETH_CMD_FETCHRO(1) | 2310 FW_EQ_ETH_CMD_IQID(iqid)); 2311 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | 2312 FW_EQ_ETH_CMD_FBMAX(3) | 2313 FW_EQ_ETH_CMD_CIDXFTHRESH(5) | 2314 FW_EQ_ETH_CMD_EQSIZE(nentries)); 2315 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2316 2317 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2318 if (ret) { 2319 kfree(txq->q.sdesc); 2320 txq->q.sdesc = NULL; 2321 dma_free_coherent(adap->pdev_dev, 2322 nentries * sizeof(struct tx_desc), 2323 txq->q.desc, txq->q.phys_addr); 2324 txq->q.desc = NULL; 2325 return ret; 2326 } 2327 2328 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); 2329 txq->txq = netdevq; 2330 txq->tso = txq->tx_cso = txq->vlan_ins = 0; 2331 txq->mapping_err = 0; 2332 return 0; 2333 } 2334 2335 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 2336 struct net_device *dev, unsigned int iqid, 2337 unsigned int cmplqid) 2338 { 2339 int ret, nentries; 2340 struct fw_eq_ctrl_cmd c; 2341 struct sge *s = &adap->sge; 2342 struct port_info *pi = netdev_priv(dev); 2343 2344 /* Add status entries */ 2345 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 2346 2347 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 2348 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 2349 NULL, 0, NUMA_NO_NODE); 2350 if (!txq->q.desc) 2351 return -ENOMEM; 2352 2353 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 2354 FW_CMD_WRITE | FW_CMD_EXEC | 2355 FW_EQ_CTRL_CMD_PFN(adap->fn) | 2356 FW_EQ_CTRL_CMD_VFN(0)); 2357 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | 2358 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 2359 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); 2360 c.physeqid_pkd = htonl(0); 2361 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | 2362 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | 2363 FW_EQ_CTRL_CMD_FETCHRO | 2364 FW_EQ_CTRL_CMD_IQID(iqid)); 2365 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | 2366 FW_EQ_CTRL_CMD_FBMAX(3) | 2367 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | 2368 FW_EQ_CTRL_CMD_EQSIZE(nentries)); 2369 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2370 2371 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2372 if (ret) { 2373 dma_free_coherent(adap->pdev_dev, 2374 nentries * sizeof(struct tx_desc), 2375 txq->q.desc, txq->q.phys_addr); 2376 txq->q.desc = NULL; 2377 return ret; 2378 } 2379 2380 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); 2381 txq->adap = adap; 2382 skb_queue_head_init(&txq->sendq); 2383 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); 2384 txq->full = 0; 2385 return 0; 2386 } 2387 2388 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 2389 struct net_device *dev, unsigned int iqid) 2390 { 2391 int ret, nentries; 2392 struct fw_eq_ofld_cmd c; 2393 struct sge *s = &adap->sge; 2394 struct port_info *pi = netdev_priv(dev); 2395 2396 /* Add status entries */ 2397 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 2398 2399 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2400 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2401 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 2402 NUMA_NO_NODE); 2403 if (!txq->q.desc) 2404 return -ENOMEM; 2405 2406 memset(&c, 0, sizeof(c)); 2407 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 2408 FW_CMD_WRITE | FW_CMD_EXEC | 2409 FW_EQ_OFLD_CMD_PFN(adap->fn) | 2410 FW_EQ_OFLD_CMD_VFN(0)); 2411 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | 2412 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2413 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | 2414 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | 2415 FW_EQ_OFLD_CMD_FETCHRO(1) | 2416 FW_EQ_OFLD_CMD_IQID(iqid)); 2417 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | 2418 FW_EQ_OFLD_CMD_FBMAX(3) | 2419 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | 2420 FW_EQ_OFLD_CMD_EQSIZE(nentries)); 2421 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2422 2423 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2424 if (ret) { 2425 kfree(txq->q.sdesc); 2426 txq->q.sdesc = NULL; 2427 dma_free_coherent(adap->pdev_dev, 2428 nentries * sizeof(struct tx_desc), 2429 txq->q.desc, txq->q.phys_addr); 2430 txq->q.desc = NULL; 2431 return ret; 2432 } 2433 2434 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); 2435 txq->adap = adap; 2436 skb_queue_head_init(&txq->sendq); 2437 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); 2438 txq->full = 0; 2439 txq->mapping_err = 0; 2440 return 0; 2441 } 2442 2443 static void free_txq(struct adapter *adap, struct sge_txq *q) 2444 { 2445 struct sge *s = &adap->sge; 2446 2447 dma_free_coherent(adap->pdev_dev, 2448 q->size * sizeof(struct tx_desc) + s->stat_len, 2449 q->desc, q->phys_addr); 2450 q->cntxt_id = 0; 2451 q->sdesc = NULL; 2452 q->desc = NULL; 2453 } 2454 2455 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 2456 struct sge_fl *fl) 2457 { 2458 struct sge *s = &adap->sge; 2459 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2460 2461 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 2462 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, 2463 rq->cntxt_id, fl_id, 0xffff); 2464 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2465 rq->desc, rq->phys_addr); 2466 netif_napi_del(&rq->napi); 2467 rq->netdev = NULL; 2468 rq->cntxt_id = rq->abs_id = 0; 2469 rq->desc = NULL; 2470 2471 if (fl) { 2472 free_rx_bufs(adap, fl, fl->avail); 2473 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 2474 fl->desc, fl->addr); 2475 kfree(fl->sdesc); 2476 fl->sdesc = NULL; 2477 fl->cntxt_id = 0; 2478 fl->desc = NULL; 2479 } 2480 } 2481 2482 /** 2483 * t4_free_sge_resources - free SGE resources 2484 * @adap: the adapter 2485 * 2486 * Frees resources used by the SGE queue sets. 2487 */ 2488 void t4_free_sge_resources(struct adapter *adap) 2489 { 2490 int i; 2491 struct sge_eth_rxq *eq = adap->sge.ethrxq; 2492 struct sge_eth_txq *etq = adap->sge.ethtxq; 2493 struct sge_ofld_rxq *oq = adap->sge.ofldrxq; 2494 2495 /* clean up Ethernet Tx/Rx queues */ 2496 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2497 if (eq->rspq.desc) 2498 free_rspq_fl(adap, &eq->rspq, &eq->fl); 2499 if (etq->q.desc) { 2500 t4_eth_eq_free(adap, adap->fn, adap->fn, 0, 2501 etq->q.cntxt_id); 2502 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 2503 kfree(etq->q.sdesc); 2504 free_txq(adap, &etq->q); 2505 } 2506 } 2507 2508 /* clean up RDMA and iSCSI Rx queues */ 2509 for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { 2510 if (oq->rspq.desc) 2511 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2512 } 2513 for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { 2514 if (oq->rspq.desc) 2515 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2516 } 2517 for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) { 2518 if (oq->rspq.desc) 2519 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2520 } 2521 2522 /* clean up offload Tx queues */ 2523 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { 2524 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; 2525 2526 if (q->q.desc) { 2527 tasklet_kill(&q->qresume_tsk); 2528 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, 2529 q->q.cntxt_id); 2530 free_tx_desc(adap, &q->q, q->q.in_use, false); 2531 kfree(q->q.sdesc); 2532 __skb_queue_purge(&q->sendq); 2533 free_txq(adap, &q->q); 2534 } 2535 } 2536 2537 /* clean up control Tx queues */ 2538 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 2539 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 2540 2541 if (cq->q.desc) { 2542 tasklet_kill(&cq->qresume_tsk); 2543 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, 2544 cq->q.cntxt_id); 2545 __skb_queue_purge(&cq->sendq); 2546 free_txq(adap, &cq->q); 2547 } 2548 } 2549 2550 if (adap->sge.fw_evtq.desc) 2551 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 2552 2553 if (adap->sge.intrq.desc) 2554 free_rspq_fl(adap, &adap->sge.intrq, NULL); 2555 2556 /* clear the reverse egress queue map */ 2557 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); 2558 } 2559 2560 void t4_sge_start(struct adapter *adap) 2561 { 2562 adap->sge.ethtxq_rover = 0; 2563 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 2564 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 2565 } 2566 2567 /** 2568 * t4_sge_stop - disable SGE operation 2569 * @adap: the adapter 2570 * 2571 * Stop tasklets and timers associated with the DMA engine. Note that 2572 * this is effective only if measures have been taken to disable any HW 2573 * events that may restart them. 2574 */ 2575 void t4_sge_stop(struct adapter *adap) 2576 { 2577 int i; 2578 struct sge *s = &adap->sge; 2579 2580 if (in_interrupt()) /* actions below require waiting */ 2581 return; 2582 2583 if (s->rx_timer.function) 2584 del_timer_sync(&s->rx_timer); 2585 if (s->tx_timer.function) 2586 del_timer_sync(&s->tx_timer); 2587 2588 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { 2589 struct sge_ofld_txq *q = &s->ofldtxq[i]; 2590 2591 if (q->q.desc) 2592 tasklet_kill(&q->qresume_tsk); 2593 } 2594 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 2595 struct sge_ctrl_txq *cq = &s->ctrlq[i]; 2596 2597 if (cq->q.desc) 2598 tasklet_kill(&cq->qresume_tsk); 2599 } 2600 } 2601 2602 /** 2603 * t4_sge_init - initialize SGE 2604 * @adap: the adapter 2605 * 2606 * Performs SGE initialization needed every time after a chip reset. 2607 * We do not initialize any of the queues here, instead the driver 2608 * top-level must request them individually. 2609 * 2610 * Called in two different modes: 2611 * 2612 * 1. Perform actual hardware initialization and record hard-coded 2613 * parameters which were used. This gets used when we're the 2614 * Master PF and the Firmware Configuration File support didn't 2615 * work for some reason. 2616 * 2617 * 2. We're not the Master PF or initialization was performed with 2618 * a Firmware Configuration File. In this case we need to grab 2619 * any of the SGE operating parameters that we need to have in 2620 * order to do our job and make sure we can live with them ... 2621 */ 2622 2623 static int t4_sge_init_soft(struct adapter *adap) 2624 { 2625 struct sge *s = &adap->sge; 2626 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 2627 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 2628 u32 ingress_rx_threshold; 2629 2630 /* 2631 * Verify that CPL messages are going to the Ingress Queue for 2632 * process_responses() and that only packet data is going to the 2633 * Free Lists. 2634 */ 2635 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) != 2636 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { 2637 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 2638 return -EINVAL; 2639 } 2640 2641 /* 2642 * Validate the Host Buffer Register Array indices that we want to 2643 * use ... 2644 * 2645 * XXX Note that we should really read through the Host Buffer Size 2646 * XXX register array and find the indices of the Buffer Sizes which 2647 * XXX meet our needs! 2648 */ 2649 #define READ_FL_BUF(x) \ 2650 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32)) 2651 2652 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 2653 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 2654 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 2655 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 2656 2657 /* We only bother using the Large Page logic if the Large Page Buffer 2658 * is larger than our Page Size Buffer. 2659 */ 2660 if (fl_large_pg <= fl_small_pg) 2661 fl_large_pg = 0; 2662 2663 #undef READ_FL_BUF 2664 2665 /* The Page Size Buffer must be exactly equal to our Page Size and the 2666 * Large Page Size Buffer should be 0 (per above) or a power of 2. 2667 */ 2668 if (fl_small_pg != PAGE_SIZE || 2669 (fl_large_pg & (fl_large_pg-1)) != 0) { 2670 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2671 fl_small_pg, fl_large_pg); 2672 return -EINVAL; 2673 } 2674 if (fl_large_pg) 2675 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 2676 2677 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 2678 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 2679 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 2680 fl_small_mtu, fl_large_mtu); 2681 return -EINVAL; 2682 } 2683 2684 /* 2685 * Retrieve our RX interrupt holdoff timer values and counter 2686 * threshold values from the SGE parameters. 2687 */ 2688 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1); 2689 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3); 2690 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5); 2691 s->timer_val[0] = core_ticks_to_us(adap, 2692 TIMERVALUE0_GET(timer_value_0_and_1)); 2693 s->timer_val[1] = core_ticks_to_us(adap, 2694 TIMERVALUE1_GET(timer_value_0_and_1)); 2695 s->timer_val[2] = core_ticks_to_us(adap, 2696 TIMERVALUE2_GET(timer_value_2_and_3)); 2697 s->timer_val[3] = core_ticks_to_us(adap, 2698 TIMERVALUE3_GET(timer_value_2_and_3)); 2699 s->timer_val[4] = core_ticks_to_us(adap, 2700 TIMERVALUE4_GET(timer_value_4_and_5)); 2701 s->timer_val[5] = core_ticks_to_us(adap, 2702 TIMERVALUE5_GET(timer_value_4_and_5)); 2703 2704 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD); 2705 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); 2706 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); 2707 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); 2708 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); 2709 2710 return 0; 2711 } 2712 2713 static int t4_sge_init_hard(struct adapter *adap) 2714 { 2715 struct sge *s = &adap->sge; 2716 2717 /* 2718 * Set up our basic SGE mode to deliver CPL messages to our Ingress 2719 * Queue and Packet Date to the Free List. 2720 */ 2721 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK, 2722 RXPKTCPLMODE_MASK); 2723 2724 /* 2725 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2726 * and generate an interrupt when this occurs so we can recover. 2727 */ 2728 if (is_t4(adap->params.chip)) { 2729 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2730 V_HP_INT_THRESH(M_HP_INT_THRESH) | 2731 V_LP_INT_THRESH(M_LP_INT_THRESH), 2732 V_HP_INT_THRESH(dbfifo_int_thresh) | 2733 V_LP_INT_THRESH(dbfifo_int_thresh)); 2734 } else { 2735 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2736 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5), 2737 V_LP_INT_THRESH_T5(dbfifo_int_thresh)); 2738 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2, 2739 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5), 2740 V_HP_INT_THRESH_T5(dbfifo_int_thresh)); 2741 } 2742 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2743 F_ENABLE_DROP); 2744 2745 /* 2746 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by 2747 * t4_fixup_host_params(). 2748 */ 2749 s->fl_pg_order = FL_PG_ORDER; 2750 if (s->fl_pg_order) 2751 t4_write_reg(adap, 2752 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32), 2753 PAGE_SIZE << FL_PG_ORDER); 2754 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32), 2755 FL_MTU_SMALL_BUFSIZE(adap)); 2756 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32), 2757 FL_MTU_LARGE_BUFSIZE(adap)); 2758 2759 /* 2760 * Note that the SGE Ingress Packet Count Interrupt Threshold and 2761 * Timer Holdoff values must be supplied by our caller. 2762 */ 2763 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, 2764 THRESHOLD_0(s->counter_val[0]) | 2765 THRESHOLD_1(s->counter_val[1]) | 2766 THRESHOLD_2(s->counter_val[2]) | 2767 THRESHOLD_3(s->counter_val[3])); 2768 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, 2769 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | 2770 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); 2771 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, 2772 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) | 2773 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3]))); 2774 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, 2775 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) | 2776 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5]))); 2777 2778 return 0; 2779 } 2780 2781 int t4_sge_init(struct adapter *adap) 2782 { 2783 struct sge *s = &adap->sge; 2784 u32 sge_control, sge_conm_ctrl; 2785 int ret, egress_threshold; 2786 2787 /* 2788 * Ingress Padding Boundary and Egress Status Page Size are set up by 2789 * t4_fixup_host_params(). 2790 */ 2791 sge_control = t4_read_reg(adap, SGE_CONTROL); 2792 s->pktshift = PKTSHIFT_GET(sge_control); 2793 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2794 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + 2795 X_INGPADBOUNDARY_SHIFT); 2796 2797 if (adap->flags & USING_SOFT_PARAMS) 2798 ret = t4_sge_init_soft(adap); 2799 else 2800 ret = t4_sge_init_hard(adap); 2801 if (ret < 0) 2802 return ret; 2803 2804 /* 2805 * A FL with <= fl_starve_thres buffers is starving and a periodic 2806 * timer will attempt to refill it. This needs to be larger than the 2807 * SGE's Egress Congestion Threshold. If it isn't, then we can get 2808 * stuck waiting for new packets while the SGE is waiting for us to 2809 * give it more Free List entries. (Note that the SGE's Egress 2810 * Congestion Threshold is in units of 2 Free List pointers.) For T4, 2811 * there was only a single field to control this. For T5 there's the 2812 * original field which now only applies to Unpacked Mode Free List 2813 * buffers and a new field which only applies to Packed Mode Free List 2814 * buffers. 2815 */ 2816 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); 2817 if (is_t4(adap->params.chip)) 2818 egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); 2819 else 2820 egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); 2821 s->fl_starve_thres = 2*egress_threshold + 1; 2822 2823 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2824 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2825 s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2826 s->idma_stalled[0] = 0; 2827 s->idma_stalled[1] = 0; 2828 spin_lock_init(&s->intrq_lock); 2829 2830 return 0; 2831 } 2832