1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/if_vlan.h> 39 #include <linux/ip.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/jiffies.h> 42 #include <linux/prefetch.h> 43 #include <linux/export.h> 44 #include <net/xfrm.h> 45 #include <net/ipv6.h> 46 #include <net/tcp.h> 47 #include <net/busy_poll.h> 48 #ifdef CONFIG_CHELSIO_T4_FCOE 49 #include <scsi/fc/fc_fcoe.h> 50 #endif /* CONFIG_CHELSIO_T4_FCOE */ 51 #include "cxgb4.h" 52 #include "t4_regs.h" 53 #include "t4_values.h" 54 #include "t4_msg.h" 55 #include "t4fw_api.h" 56 #include "cxgb4_ptp.h" 57 #include "cxgb4_uld.h" 58 #include "cxgb4_tc_mqprio.h" 59 #include "sched.h" 60 61 /* 62 * Rx buffer size. We use largish buffers if possible but settle for single 63 * pages under memory shortage. 64 */ 65 #if PAGE_SHIFT >= 16 66 # define FL_PG_ORDER 0 67 #else 68 # define FL_PG_ORDER (16 - PAGE_SHIFT) 69 #endif 70 71 /* RX_PULL_LEN should be <= RX_COPY_THRES */ 72 #define RX_COPY_THRES 256 73 #define RX_PULL_LEN 128 74 75 /* 76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 78 */ 79 #define RX_PKT_SKB_LEN 512 80 81 /* 82 * Max number of Tx descriptors we clean up at a time. Should be modest as 83 * freeing skbs isn't cheap and it happens while holding locks. We just need 84 * to free packets faster than they arrive, we eventually catch up and keep 85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should 86 * also match the CIDX Flush Threshold. 87 */ 88 #define MAX_TX_RECLAIM 32 89 90 /* 91 * Max number of Rx buffers we replenish at a time. Again keep this modest, 92 * allocating buffers isn't cheap either. 93 */ 94 #define MAX_RX_REFILL 16U 95 96 /* 97 * Period of the Rx queue check timer. This timer is infrequent as it has 98 * something to do only when the system experiences severe memory shortage. 99 */ 100 #define RX_QCHECK_PERIOD (HZ / 2) 101 102 /* 103 * Period of the Tx queue check timer. 104 */ 105 #define TX_QCHECK_PERIOD (HZ / 2) 106 107 /* 108 * Max number of Tx descriptors to be reclaimed by the Tx timer. 109 */ 110 #define MAX_TIMER_TX_RECLAIM 100 111 112 /* 113 * Timer index used when backing off due to memory shortage. 114 */ 115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 116 117 /* 118 * Suspension threshold for non-Ethernet Tx queues. We require enough room 119 * for a full sized WR. 120 */ 121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 122 123 /* 124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 125 * into a WR. 126 */ 127 #define MAX_IMM_TX_PKT_LEN 256 128 129 /* 130 * Max size of a WR sent through a control Tx queue. 131 */ 132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 133 134 struct rx_sw_desc { /* SW state per Rx descriptor */ 135 struct page *page; 136 dma_addr_t dma_addr; 137 }; 138 139 /* 140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 142 * We could easily support more but there doesn't seem to be much need for 143 * that ... 144 */ 145 #define FL_MTU_SMALL 1500 146 #define FL_MTU_LARGE 9000 147 148 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 149 unsigned int mtu) 150 { 151 struct sge *s = &adapter->sge; 152 153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 154 } 155 156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 158 159 /* 160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 161 * these to specify the buffer size as an index into the SGE Free List Buffer 162 * Size register array. We also use bit 4, when the buffer has been unmapped 163 * for DMA, but this is of course never sent to the hardware and is only used 164 * to prevent double unmappings. All of the above requires that the Free List 165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 167 * Free List Buffer alignment is 32 bytes, this works out for us ... 168 */ 169 enum { 170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 173 174 /* 175 * XXX We shouldn't depend on being able to use these indices. 176 * XXX Especially when some other Master PF has initialized the 177 * XXX adapter or we use the Firmware Configuration File. We 178 * XXX should really search through the Host Buffer Size register 179 * XXX array for the appropriately sized buffer indices. 180 */ 181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 183 184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 186 }; 187 188 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; 189 #define MIN_NAPI_WORK 1 190 191 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 192 { 193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 194 } 195 196 static inline bool is_buf_mapped(const struct rx_sw_desc *d) 197 { 198 return !(d->dma_addr & RX_UNMAPPED_BUF); 199 } 200 201 /** 202 * txq_avail - return the number of available slots in a Tx queue 203 * @q: the Tx queue 204 * 205 * Returns the number of descriptors in a Tx queue available to write new 206 * packets. 207 */ 208 static inline unsigned int txq_avail(const struct sge_txq *q) 209 { 210 return q->size - 1 - q->in_use; 211 } 212 213 /** 214 * fl_cap - return the capacity of a free-buffer list 215 * @fl: the FL 216 * 217 * Returns the capacity of a free-buffer list. The capacity is less than 218 * the size because one descriptor needs to be left unpopulated, otherwise 219 * HW will think the FL is empty. 220 */ 221 static inline unsigned int fl_cap(const struct sge_fl *fl) 222 { 223 return fl->size - 8; /* 1 descriptor = 8 buffers */ 224 } 225 226 /** 227 * fl_starving - return whether a Free List is starving. 228 * @adapter: pointer to the adapter 229 * @fl: the Free List 230 * 231 * Tests specified Free List to see whether the number of buffers 232 * available to the hardware has falled below our "starvation" 233 * threshold. 234 */ 235 static inline bool fl_starving(const struct adapter *adapter, 236 const struct sge_fl *fl) 237 { 238 const struct sge *s = &adapter->sge; 239 240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; 241 } 242 243 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 244 dma_addr_t *addr) 245 { 246 const skb_frag_t *fp, *end; 247 const struct skb_shared_info *si; 248 249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 250 if (dma_mapping_error(dev, *addr)) 251 goto out_err; 252 253 si = skb_shinfo(skb); 254 end = &si->frags[si->nr_frags]; 255 256 for (fp = si->frags; fp < end; fp++) { 257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 258 DMA_TO_DEVICE); 259 if (dma_mapping_error(dev, *addr)) 260 goto unwind; 261 } 262 return 0; 263 264 unwind: 265 while (fp-- > si->frags) 266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 267 268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 269 out_err: 270 return -ENOMEM; 271 } 272 EXPORT_SYMBOL(cxgb4_map_skb); 273 274 static void unmap_skb(struct device *dev, const struct sk_buff *skb, 275 const dma_addr_t *addr) 276 { 277 const skb_frag_t *fp, *end; 278 const struct skb_shared_info *si; 279 280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 281 282 si = skb_shinfo(skb); 283 end = &si->frags[si->nr_frags]; 284 for (fp = si->frags; fp < end; fp++) 285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 286 } 287 288 #ifdef CONFIG_NEED_DMA_MAP_STATE 289 /** 290 * deferred_unmap_destructor - unmap a packet when it is freed 291 * @skb: the packet 292 * 293 * This is the packet destructor used for Tx packets that need to remain 294 * mapped until they are freed rather than until their Tx descriptors are 295 * freed. 296 */ 297 static void deferred_unmap_destructor(struct sk_buff *skb) 298 { 299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 300 } 301 #endif 302 303 /** 304 * free_tx_desc - reclaims Tx descriptors and their buffers 305 * @adapter: the adapter 306 * @q: the Tx queue to reclaim descriptors from 307 * @n: the number of descriptors to reclaim 308 * @unmap: whether the buffers should be unmapped for DMA 309 * 310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 311 * Tx buffers. Called with the Tx queue lock held. 312 */ 313 void free_tx_desc(struct adapter *adap, struct sge_txq *q, 314 unsigned int n, bool unmap) 315 { 316 unsigned int cidx = q->cidx; 317 struct tx_sw_desc *d; 318 319 d = &q->sdesc[cidx]; 320 while (n--) { 321 if (d->skb) { /* an SGL is present */ 322 if (unmap && d->addr[0]) { 323 unmap_skb(adap->pdev_dev, d->skb, d->addr); 324 memset(d->addr, 0, sizeof(d->addr)); 325 } 326 dev_consume_skb_any(d->skb); 327 d->skb = NULL; 328 } 329 ++d; 330 if (++cidx == q->size) { 331 cidx = 0; 332 d = q->sdesc; 333 } 334 } 335 q->cidx = cidx; 336 } 337 338 /* 339 * Return the number of reclaimable descriptors in a Tx queue. 340 */ 341 static inline int reclaimable(const struct sge_txq *q) 342 { 343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 344 hw_cidx -= q->cidx; 345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 346 } 347 348 /** 349 * reclaim_completed_tx - reclaims completed TX Descriptors 350 * @adap: the adapter 351 * @q: the Tx queue to reclaim completed descriptors from 352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 353 * @unmap: whether the buffers should be unmapped for DMA 354 * 355 * Reclaims Tx Descriptors that the SGE has indicated it has processed, 356 * and frees the associated buffers if possible. If @max == -1, then 357 * we'll use a defaiult maximum. Called with the TX Queue locked. 358 */ 359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 360 int maxreclaim, bool unmap) 361 { 362 int reclaim = reclaimable(q); 363 364 if (reclaim) { 365 /* 366 * Limit the amount of clean up work we do at a time to keep 367 * the Tx lock hold time O(1). 368 */ 369 if (maxreclaim < 0) 370 maxreclaim = MAX_TX_RECLAIM; 371 if (reclaim > maxreclaim) 372 reclaim = maxreclaim; 373 374 free_tx_desc(adap, q, reclaim, unmap); 375 q->in_use -= reclaim; 376 } 377 378 return reclaim; 379 } 380 381 /** 382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 383 * @adap: the adapter 384 * @q: the Tx queue to reclaim completed descriptors from 385 * @unmap: whether the buffers should be unmapped for DMA 386 * 387 * Reclaims Tx descriptors that the SGE has indicated it has processed, 388 * and frees the associated buffers if possible. Called with the Tx 389 * queue locked. 390 */ 391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 392 bool unmap) 393 { 394 (void)reclaim_completed_tx(adap, q, -1, unmap); 395 } 396 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 397 398 static inline int get_buf_size(struct adapter *adapter, 399 const struct rx_sw_desc *d) 400 { 401 struct sge *s = &adapter->sge; 402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 403 int buf_size; 404 405 switch (rx_buf_size_idx) { 406 case RX_SMALL_PG_BUF: 407 buf_size = PAGE_SIZE; 408 break; 409 410 case RX_LARGE_PG_BUF: 411 buf_size = PAGE_SIZE << s->fl_pg_order; 412 break; 413 414 case RX_SMALL_MTU_BUF: 415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 416 break; 417 418 case RX_LARGE_MTU_BUF: 419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 420 break; 421 422 default: 423 BUG(); 424 } 425 426 return buf_size; 427 } 428 429 /** 430 * free_rx_bufs - free the Rx buffers on an SGE free list 431 * @adap: the adapter 432 * @q: the SGE free list to free buffers from 433 * @n: how many buffers to free 434 * 435 * Release the next @n buffers on an SGE free-buffer Rx queue. The 436 * buffers must be made inaccessible to HW before calling this function. 437 */ 438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 439 { 440 while (n--) { 441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 442 443 if (is_buf_mapped(d)) 444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 445 get_buf_size(adap, d), 446 PCI_DMA_FROMDEVICE); 447 put_page(d->page); 448 d->page = NULL; 449 if (++q->cidx == q->size) 450 q->cidx = 0; 451 q->avail--; 452 } 453 } 454 455 /** 456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 457 * @adap: the adapter 458 * @q: the SGE free list 459 * 460 * Unmap the current buffer on an SGE free-buffer Rx queue. The 461 * buffer must be made inaccessible to HW before calling this function. 462 * 463 * This is similar to @free_rx_bufs above but does not free the buffer. 464 * Do note that the FL still loses any further access to the buffer. 465 */ 466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 467 { 468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 469 470 if (is_buf_mapped(d)) 471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); 473 d->page = NULL; 474 if (++q->cidx == q->size) 475 q->cidx = 0; 476 q->avail--; 477 } 478 479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 480 { 481 if (q->pend_cred >= 8) { 482 u32 val = adap->params.arch.sge_fl_db; 483 484 if (is_t4(adap->params.chip)) 485 val |= PIDX_V(q->pend_cred / 8); 486 else 487 val |= PIDX_T5_V(q->pend_cred / 8); 488 489 /* Make sure all memory writes to the Free List queue are 490 * committed before we tell the hardware about them. 491 */ 492 wmb(); 493 494 /* If we don't have access to the new User Doorbell (T5+), use 495 * the old doorbell mechanism; otherwise use the new BAR2 496 * mechanism. 497 */ 498 if (unlikely(q->bar2_addr == NULL)) { 499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 500 val | QID_V(q->cntxt_id)); 501 } else { 502 writel(val | QID_V(q->bar2_qid), 503 q->bar2_addr + SGE_UDB_KDOORBELL); 504 505 /* This Write memory Barrier will force the write to 506 * the User Doorbell area to be flushed. 507 */ 508 wmb(); 509 } 510 q->pend_cred &= 7; 511 } 512 } 513 514 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 515 dma_addr_t mapping) 516 { 517 sd->page = pg; 518 sd->dma_addr = mapping; /* includes size low bits */ 519 } 520 521 /** 522 * refill_fl - refill an SGE Rx buffer ring 523 * @adap: the adapter 524 * @q: the ring to refill 525 * @n: the number of new buffers to allocate 526 * @gfp: the gfp flags for the allocations 527 * 528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 529 * allocated with the supplied gfp flags. The caller must assure that 530 * @n does not exceed the queue's capacity. If afterwards the queue is 531 * found critically low mark it as starving in the bitmap of starving FLs. 532 * 533 * Returns the number of buffers allocated. 534 */ 535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 536 gfp_t gfp) 537 { 538 struct sge *s = &adap->sge; 539 struct page *pg; 540 dma_addr_t mapping; 541 unsigned int cred = q->avail; 542 __be64 *d = &q->desc[q->pidx]; 543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 544 int node; 545 546 #ifdef CONFIG_DEBUG_FS 547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) 548 goto out; 549 #endif 550 551 gfp |= __GFP_NOWARN; 552 node = dev_to_node(adap->pdev_dev); 553 554 if (s->fl_pg_order == 0) 555 goto alloc_small_pages; 556 557 /* 558 * Prefer large buffers 559 */ 560 while (n) { 561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); 562 if (unlikely(!pg)) { 563 q->large_alloc_failed++; 564 break; /* fall back to single pages */ 565 } 566 567 mapping = dma_map_page(adap->pdev_dev, pg, 0, 568 PAGE_SIZE << s->fl_pg_order, 569 PCI_DMA_FROMDEVICE); 570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 571 __free_pages(pg, s->fl_pg_order); 572 q->mapping_err++; 573 goto out; /* do not try small pages for this error */ 574 } 575 mapping |= RX_LARGE_PG_BUF; 576 *d++ = cpu_to_be64(mapping); 577 578 set_rx_sw_desc(sd, pg, mapping); 579 sd++; 580 581 q->avail++; 582 if (++q->pidx == q->size) { 583 q->pidx = 0; 584 sd = q->sdesc; 585 d = q->desc; 586 } 587 n--; 588 } 589 590 alloc_small_pages: 591 while (n--) { 592 pg = alloc_pages_node(node, gfp, 0); 593 if (unlikely(!pg)) { 594 q->alloc_failed++; 595 break; 596 } 597 598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 599 PCI_DMA_FROMDEVICE); 600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 601 put_page(pg); 602 q->mapping_err++; 603 goto out; 604 } 605 *d++ = cpu_to_be64(mapping); 606 607 set_rx_sw_desc(sd, pg, mapping); 608 sd++; 609 610 q->avail++; 611 if (++q->pidx == q->size) { 612 q->pidx = 0; 613 sd = q->sdesc; 614 d = q->desc; 615 } 616 } 617 618 out: cred = q->avail - cred; 619 q->pend_cred += cred; 620 ring_fl_db(adap, q); 621 622 if (unlikely(fl_starving(adap, q))) { 623 smp_wmb(); 624 q->low++; 625 set_bit(q->cntxt_id - adap->sge.egr_start, 626 adap->sge.starving_fl); 627 } 628 629 return cred; 630 } 631 632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 633 { 634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 635 GFP_ATOMIC); 636 } 637 638 /** 639 * alloc_ring - allocate resources for an SGE descriptor ring 640 * @dev: the PCI device's core device 641 * @nelem: the number of descriptors 642 * @elem_size: the size of each descriptor 643 * @sw_size: the size of the SW state associated with each ring element 644 * @phys: the physical address of the allocated ring 645 * @metadata: address of the array holding the SW state for the ring 646 * @stat_size: extra space in HW ring for status information 647 * @node: preferred node for memory allocations 648 * 649 * Allocates resources for an SGE descriptor ring, such as Tx queues, 650 * free buffer lists, or response queues. Each SGE ring requires 651 * space for its HW descriptors plus, optionally, space for the SW state 652 * associated with each HW entry (the metadata). The function returns 653 * three values: the virtual address for the HW ring (the return value 654 * of the function), the bus address of the HW ring, and the address 655 * of the SW ring. 656 */ 657 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 658 size_t sw_size, dma_addr_t *phys, void *metadata, 659 size_t stat_size, int node) 660 { 661 size_t len = nelem * elem_size + stat_size; 662 void *s = NULL; 663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 664 665 if (!p) 666 return NULL; 667 if (sw_size) { 668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node); 669 670 if (!s) { 671 dma_free_coherent(dev, len, p, *phys); 672 return NULL; 673 } 674 } 675 if (metadata) 676 *(void **)metadata = s; 677 return p; 678 } 679 680 /** 681 * sgl_len - calculates the size of an SGL of the given capacity 682 * @n: the number of SGL entries 683 * 684 * Calculates the number of flits needed for a scatter/gather list that 685 * can hold the given number of entries. 686 */ 687 static inline unsigned int sgl_len(unsigned int n) 688 { 689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL 691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 692 * repeated sequences of { Length[i], Length[i+1], Address[i], 693 * Address[i+1] } (this ensures that all addresses are on 64-bit 694 * boundaries). If N is even, then Length[N+1] should be set to 0 and 695 * Address[N+1] is omitted. 696 * 697 * The following calculation incorporates all of the above. It's 698 * somewhat hard to follow but, briefly: the "+2" accounts for the 699 * first two flits which include the DSGL header, Length0 and 700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and 702 * finally the "+((n-1)&1)" adds the one remaining flit needed if 703 * (n-1) is odd ... 704 */ 705 n--; 706 return (3 * n) / 2 + (n & 1) + 2; 707 } 708 709 /** 710 * flits_to_desc - returns the num of Tx descriptors for the given flits 711 * @n: the number of flits 712 * 713 * Returns the number of Tx descriptors needed for the supplied number 714 * of flits. 715 */ 716 static inline unsigned int flits_to_desc(unsigned int n) 717 { 718 BUG_ON(n > SGE_MAX_WR_LEN / 8); 719 return DIV_ROUND_UP(n, 8); 720 } 721 722 /** 723 * is_eth_imm - can an Ethernet packet be sent as immediate data? 724 * @skb: the packet 725 * 726 * Returns whether an Ethernet packet is small enough to fit as 727 * immediate data. Return value corresponds to headroom required. 728 */ 729 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) 730 { 731 int hdrlen = 0; 732 733 if (skb->encapsulation && skb_shinfo(skb)->gso_size && 734 chip_ver > CHELSIO_T5) { 735 hdrlen = sizeof(struct cpl_tx_tnl_lso); 736 hdrlen += sizeof(struct cpl_tx_pkt_core); 737 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 738 return 0; 739 } else { 740 hdrlen = skb_shinfo(skb)->gso_size ? 741 sizeof(struct cpl_tx_pkt_lso_core) : 0; 742 hdrlen += sizeof(struct cpl_tx_pkt); 743 } 744 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 745 return hdrlen; 746 return 0; 747 } 748 749 /** 750 * calc_tx_flits - calculate the number of flits for a packet Tx WR 751 * @skb: the packet 752 * 753 * Returns the number of flits needed for a Tx WR for the given Ethernet 754 * packet, including the needed WR and CPL headers. 755 */ 756 static inline unsigned int calc_tx_flits(const struct sk_buff *skb, 757 unsigned int chip_ver) 758 { 759 unsigned int flits; 760 int hdrlen = is_eth_imm(skb, chip_ver); 761 762 /* If the skb is small enough, we can pump it out as a work request 763 * with only immediate data. In that case we just have to have the 764 * TX Packet header plus the skb data in the Work Request. 765 */ 766 767 if (hdrlen) 768 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 769 770 /* Otherwise, we're going to have to construct a Scatter gather list 771 * of the skb body and fragments. We also include the flits necessary 772 * for the TX Packet Work Request and CPL. We always have a firmware 773 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 774 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 775 * message or, if we're doing a Large Send Offload, an LSO CPL message 776 * with an embedded TX Packet Write CPL message. 777 */ 778 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 779 if (skb_shinfo(skb)->gso_size) { 780 if (skb->encapsulation && chip_ver > CHELSIO_T5) { 781 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 782 sizeof(struct cpl_tx_tnl_lso); 783 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 784 u32 pkt_hdrlen; 785 786 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, 787 skb_headlen(skb)); 788 hdrlen = sizeof(struct fw_eth_tx_eo_wr) + 789 round_up(pkt_hdrlen, 16); 790 } else { 791 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 792 sizeof(struct cpl_tx_pkt_lso_core); 793 } 794 795 hdrlen += sizeof(struct cpl_tx_pkt_core); 796 flits += (hdrlen / sizeof(__be64)); 797 } else { 798 flits += (sizeof(struct fw_eth_tx_pkt_wr) + 799 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 800 } 801 return flits; 802 } 803 804 /** 805 * calc_tx_descs - calculate the number of Tx descriptors for a packet 806 * @skb: the packet 807 * 808 * Returns the number of Tx descriptors needed for the given Ethernet 809 * packet, including the needed WR and CPL headers. 810 */ 811 static inline unsigned int calc_tx_descs(const struct sk_buff *skb, 812 unsigned int chip_ver) 813 { 814 return flits_to_desc(calc_tx_flits(skb, chip_ver)); 815 } 816 817 /** 818 * cxgb4_write_sgl - populate a scatter/gather list for a packet 819 * @skb: the packet 820 * @q: the Tx queue we are writing into 821 * @sgl: starting location for writing the SGL 822 * @end: points right after the end of the SGL 823 * @start: start offset into skb main-body data to include in the SGL 824 * @addr: the list of bus addresses for the SGL elements 825 * 826 * Generates a gather list for the buffers that make up a packet. 827 * The caller must provide adequate space for the SGL that will be written. 828 * The SGL includes all of the packet's page fragments and the data in its 829 * main body except for the first @start bytes. @sgl must be 16-byte 830 * aligned and within a Tx descriptor with available space. @end points 831 * right after the end of the SGL but does not account for any potential 832 * wrap around, i.e., @end > @sgl. 833 */ 834 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 835 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 836 const dma_addr_t *addr) 837 { 838 unsigned int i, len; 839 struct ulptx_sge_pair *to; 840 const struct skb_shared_info *si = skb_shinfo(skb); 841 unsigned int nfrags = si->nr_frags; 842 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 843 844 len = skb_headlen(skb) - start; 845 if (likely(len)) { 846 sgl->len0 = htonl(len); 847 sgl->addr0 = cpu_to_be64(addr[0] + start); 848 nfrags++; 849 } else { 850 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 851 sgl->addr0 = cpu_to_be64(addr[1]); 852 } 853 854 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 855 ULPTX_NSGE_V(nfrags)); 856 if (likely(--nfrags == 0)) 857 return; 858 /* 859 * Most of the complexity below deals with the possibility we hit the 860 * end of the queue in the middle of writing the SGL. For this case 861 * only we create the SGL in a temporary buffer and then copy it. 862 */ 863 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 864 865 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 866 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 867 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 868 to->addr[0] = cpu_to_be64(addr[i]); 869 to->addr[1] = cpu_to_be64(addr[++i]); 870 } 871 if (nfrags) { 872 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 873 to->len[1] = cpu_to_be32(0); 874 to->addr[0] = cpu_to_be64(addr[i + 1]); 875 } 876 if (unlikely((u8 *)end > (u8 *)q->stat)) { 877 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 878 879 if (likely(part0)) 880 memcpy(sgl->sge, buf, part0); 881 part1 = (u8 *)end - (u8 *)q->stat; 882 memcpy(q->desc, (u8 *)buf + part0, part1); 883 end = (void *)q->desc + part1; 884 } 885 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 886 *end = 0; 887 } 888 EXPORT_SYMBOL(cxgb4_write_sgl); 889 890 /* This function copies 64 byte coalesced work request to 891 * memory mapped BAR2 space. For coalesced WR SGE fetches 892 * data from the FIFO instead of from Host. 893 */ 894 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 895 { 896 int count = 8; 897 898 while (count) { 899 writeq(*src, dst); 900 src++; 901 dst++; 902 count--; 903 } 904 } 905 906 /** 907 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 908 * @adap: the adapter 909 * @q: the Tx queue 910 * @n: number of new descriptors to give to HW 911 * 912 * Ring the doorbel for a Tx queue. 913 */ 914 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 915 { 916 /* Make sure that all writes to the TX Descriptors are committed 917 * before we tell the hardware about them. 918 */ 919 wmb(); 920 921 /* If we don't have access to the new User Doorbell (T5+), use the old 922 * doorbell mechanism; otherwise use the new BAR2 mechanism. 923 */ 924 if (unlikely(q->bar2_addr == NULL)) { 925 u32 val = PIDX_V(n); 926 unsigned long flags; 927 928 /* For T4 we need to participate in the Doorbell Recovery 929 * mechanism. 930 */ 931 spin_lock_irqsave(&q->db_lock, flags); 932 if (!q->db_disabled) 933 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 934 QID_V(q->cntxt_id) | val); 935 else 936 q->db_pidx_inc += n; 937 q->db_pidx = q->pidx; 938 spin_unlock_irqrestore(&q->db_lock, flags); 939 } else { 940 u32 val = PIDX_T5_V(n); 941 942 /* T4 and later chips share the same PIDX field offset within 943 * the doorbell, but T5 and later shrank the field in order to 944 * gain a bit for Doorbell Priority. The field was absurdly 945 * large in the first place (14 bits) so we just use the T5 946 * and later limits and warn if a Queue ID is too large. 947 */ 948 WARN_ON(val & DBPRIO_F); 949 950 /* If we're only writing a single TX Descriptor and we can use 951 * Inferred QID registers, we can use the Write Combining 952 * Gather Buffer; otherwise we use the simple doorbell. 953 */ 954 if (n == 1 && q->bar2_qid == 0) { 955 int index = (q->pidx 956 ? (q->pidx - 1) 957 : (q->size - 1)); 958 u64 *wr = (u64 *)&q->desc[index]; 959 960 cxgb_pio_copy((u64 __iomem *) 961 (q->bar2_addr + SGE_UDB_WCDOORBELL), 962 wr); 963 } else { 964 writel(val | QID_V(q->bar2_qid), 965 q->bar2_addr + SGE_UDB_KDOORBELL); 966 } 967 968 /* This Write Memory Barrier will force the write to the User 969 * Doorbell area to be flushed. This is needed to prevent 970 * writes on different CPUs for the same queue from hitting 971 * the adapter out of order. This is required when some Work 972 * Requests take the Write Combine Gather Buffer path (user 973 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 974 * take the traditional path where we simply increment the 975 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 976 * hardware DMA read the actual Work Request. 977 */ 978 wmb(); 979 } 980 } 981 EXPORT_SYMBOL(cxgb4_ring_tx_db); 982 983 /** 984 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 985 * @skb: the packet 986 * @q: the Tx queue where the packet will be inlined 987 * @pos: starting position in the Tx queue where to inline the packet 988 * 989 * Inline a packet's contents directly into Tx descriptors, starting at 990 * the given position within the Tx DMA ring. 991 * Most of the complexity of this operation is dealing with wrap arounds 992 * in the middle of the packet we want to inline. 993 */ 994 void cxgb4_inline_tx_skb(const struct sk_buff *skb, 995 const struct sge_txq *q, void *pos) 996 { 997 int left = (void *)q->stat - pos; 998 u64 *p; 999 1000 if (likely(skb->len <= left)) { 1001 if (likely(!skb->data_len)) 1002 skb_copy_from_linear_data(skb, pos, skb->len); 1003 else 1004 skb_copy_bits(skb, 0, pos, skb->len); 1005 pos += skb->len; 1006 } else { 1007 skb_copy_bits(skb, 0, pos, left); 1008 skb_copy_bits(skb, left, q->desc, skb->len - left); 1009 pos = (void *)q->desc + (skb->len - left); 1010 } 1011 1012 /* 0-pad to multiple of 16 */ 1013 p = PTR_ALIGN(pos, 8); 1014 if ((uintptr_t)p & 8) 1015 *p = 0; 1016 } 1017 EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1018 1019 static void *inline_tx_skb_header(const struct sk_buff *skb, 1020 const struct sge_txq *q, void *pos, 1021 int length) 1022 { 1023 u64 *p; 1024 int left = (void *)q->stat - pos; 1025 1026 if (likely(length <= left)) { 1027 memcpy(pos, skb->data, length); 1028 pos += length; 1029 } else { 1030 memcpy(pos, skb->data, left); 1031 memcpy(q->desc, skb->data + left, length - left); 1032 pos = (void *)q->desc + (length - left); 1033 } 1034 /* 0-pad to multiple of 16 */ 1035 p = PTR_ALIGN(pos, 8); 1036 if ((uintptr_t)p & 8) { 1037 *p = 0; 1038 return p + 1; 1039 } 1040 return p; 1041 } 1042 1043 /* 1044 * Figure out what HW csum a packet wants and return the appropriate control 1045 * bits. 1046 */ 1047 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) 1048 { 1049 int csum_type; 1050 bool inner_hdr_csum = false; 1051 u16 proto, ver; 1052 1053 if (skb->encapsulation && 1054 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) 1055 inner_hdr_csum = true; 1056 1057 if (inner_hdr_csum) { 1058 ver = inner_ip_hdr(skb)->version; 1059 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : 1060 inner_ipv6_hdr(skb)->nexthdr; 1061 } else { 1062 ver = ip_hdr(skb)->version; 1063 proto = (ver == 4) ? ip_hdr(skb)->protocol : 1064 ipv6_hdr(skb)->nexthdr; 1065 } 1066 1067 if (ver == 4) { 1068 if (proto == IPPROTO_TCP) 1069 csum_type = TX_CSUM_TCPIP; 1070 else if (proto == IPPROTO_UDP) 1071 csum_type = TX_CSUM_UDPIP; 1072 else { 1073 nocsum: /* 1074 * unknown protocol, disable HW csum 1075 * and hope a bad packet is detected 1076 */ 1077 return TXPKT_L4CSUM_DIS_F; 1078 } 1079 } else { 1080 /* 1081 * this doesn't work with extension headers 1082 */ 1083 if (proto == IPPROTO_TCP) 1084 csum_type = TX_CSUM_TCPIP6; 1085 else if (proto == IPPROTO_UDP) 1086 csum_type = TX_CSUM_UDPIP6; 1087 else 1088 goto nocsum; 1089 } 1090 1091 if (likely(csum_type >= TX_CSUM_TCPIP)) { 1092 int eth_hdr_len, l4_len; 1093 u64 hdr_len; 1094 1095 if (inner_hdr_csum) { 1096 /* This allows checksum offload for all encapsulated 1097 * packets like GRE etc.. 1098 */ 1099 l4_len = skb_inner_network_header_len(skb); 1100 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; 1101 } else { 1102 l4_len = skb_network_header_len(skb); 1103 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; 1104 } 1105 hdr_len = TXPKT_IPHDR_LEN_V(l4_len); 1106 1107 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 1108 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1109 else 1110 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1111 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; 1112 } else { 1113 int start = skb_transport_offset(skb); 1114 1115 return TXPKT_CSUM_TYPE_V(csum_type) | 1116 TXPKT_CSUM_START_V(start) | 1117 TXPKT_CSUM_LOC_V(start + skb->csum_offset); 1118 } 1119 } 1120 1121 static void eth_txq_stop(struct sge_eth_txq *q) 1122 { 1123 netif_tx_stop_queue(q->txq); 1124 q->q.stops++; 1125 } 1126 1127 static inline void txq_advance(struct sge_txq *q, unsigned int n) 1128 { 1129 q->in_use += n; 1130 q->pidx += n; 1131 if (q->pidx >= q->size) 1132 q->pidx -= q->size; 1133 } 1134 1135 #ifdef CONFIG_CHELSIO_T4_FCOE 1136 static inline int 1137 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, 1138 const struct port_info *pi, u64 *cntrl) 1139 { 1140 const struct cxgb_fcoe *fcoe = &pi->fcoe; 1141 1142 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) 1143 return 0; 1144 1145 if (skb->protocol != htons(ETH_P_FCOE)) 1146 return 0; 1147 1148 skb_reset_mac_header(skb); 1149 skb->mac_len = sizeof(struct ethhdr); 1150 1151 skb_set_network_header(skb, skb->mac_len); 1152 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); 1153 1154 if (!cxgb_fcoe_sof_eof_supported(adap, skb)) 1155 return -ENOTSUPP; 1156 1157 /* FC CRC offload */ 1158 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | 1159 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | 1160 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | 1161 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | 1162 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); 1163 return 0; 1164 } 1165 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1166 1167 /* Returns tunnel type if hardware supports offloading of the same. 1168 * It is called only for T5 and onwards. 1169 */ 1170 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) 1171 { 1172 u8 l4_hdr = 0; 1173 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1174 struct port_info *pi = netdev_priv(skb->dev); 1175 struct adapter *adapter = pi->adapter; 1176 1177 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1178 skb->inner_protocol != htons(ETH_P_TEB)) 1179 return tnl_type; 1180 1181 switch (vlan_get_protocol(skb)) { 1182 case htons(ETH_P_IP): 1183 l4_hdr = ip_hdr(skb)->protocol; 1184 break; 1185 case htons(ETH_P_IPV6): 1186 l4_hdr = ipv6_hdr(skb)->nexthdr; 1187 break; 1188 default: 1189 return tnl_type; 1190 } 1191 1192 switch (l4_hdr) { 1193 case IPPROTO_UDP: 1194 if (adapter->vxlan_port == udp_hdr(skb)->dest) 1195 tnl_type = TX_TNL_TYPE_VXLAN; 1196 else if (adapter->geneve_port == udp_hdr(skb)->dest) 1197 tnl_type = TX_TNL_TYPE_GENEVE; 1198 break; 1199 default: 1200 return tnl_type; 1201 } 1202 1203 return tnl_type; 1204 } 1205 1206 static inline void t6_fill_tnl_lso(struct sk_buff *skb, 1207 struct cpl_tx_tnl_lso *tnl_lso, 1208 enum cpl_tx_tnl_lso_type tnl_type) 1209 { 1210 u32 val; 1211 int in_eth_xtra_len; 1212 int l3hdr_len = skb_network_header_len(skb); 1213 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1214 const struct skb_shared_info *ssi = skb_shinfo(skb); 1215 bool v6 = (ip_hdr(skb)->version == 6); 1216 1217 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | 1218 CPL_TX_TNL_LSO_FIRST_F | 1219 CPL_TX_TNL_LSO_LAST_F | 1220 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | 1221 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | 1222 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | 1223 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | 1224 CPL_TX_TNL_LSO_IPLENSETOUT_F | 1225 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); 1226 tnl_lso->op_to_IpIdSplitOut = htonl(val); 1227 1228 tnl_lso->IpIdOffsetOut = 0; 1229 1230 /* Get the tunnel header length */ 1231 val = skb_inner_mac_header(skb) - skb_mac_header(skb); 1232 in_eth_xtra_len = skb_inner_network_header(skb) - 1233 skb_inner_mac_header(skb) - ETH_HLEN; 1234 1235 switch (tnl_type) { 1236 case TX_TNL_TYPE_VXLAN: 1237 case TX_TNL_TYPE_GENEVE: 1238 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 1239 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | 1240 CPL_TX_TNL_LSO_UDPLENSETOUT_F); 1241 break; 1242 default: 1243 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; 1244 break; 1245 } 1246 1247 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= 1248 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | 1249 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); 1250 1251 tnl_lso->r1 = 0; 1252 1253 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | 1254 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | 1255 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | 1256 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); 1257 tnl_lso->Flow_to_TcpHdrLen = htonl(val); 1258 1259 tnl_lso->IpIdOffset = htons(0); 1260 1261 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); 1262 tnl_lso->TCPSeqOffset = htonl(0); 1263 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); 1264 } 1265 1266 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, 1267 struct cpl_tx_pkt_lso_core *lso) 1268 { 1269 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1270 int l3hdr_len = skb_network_header_len(skb); 1271 const struct skb_shared_info *ssi; 1272 bool ipv6 = false; 1273 1274 ssi = skb_shinfo(skb); 1275 if (ssi->gso_type & SKB_GSO_TCPV6) 1276 ipv6 = true; 1277 1278 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1279 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | 1280 LSO_IPV6_V(ipv6) | 1281 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1282 LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1283 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1284 lso->ipid_ofst = htons(0); 1285 lso->mss = htons(ssi->gso_size); 1286 lso->seqno_offset = htonl(0); 1287 if (is_t4(adap->params.chip)) 1288 lso->len = htonl(skb->len); 1289 else 1290 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); 1291 1292 return (void *)(lso + 1); 1293 } 1294 1295 /** 1296 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update 1297 * @adap: the adapter 1298 * @eq: the Ethernet TX Queue 1299 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 1300 * 1301 * We're typically called here to update the state of an Ethernet TX 1302 * Queue with respect to the hardware's progress in consuming the TX 1303 * Work Requests that we've put on that Egress Queue. This happens 1304 * when we get Egress Queue Update messages and also prophylactically 1305 * in regular timer-based Ethernet TX Queue maintenance. 1306 */ 1307 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, 1308 int maxreclaim) 1309 { 1310 unsigned int reclaimed, hw_cidx; 1311 struct sge_txq *q = &eq->q; 1312 int hw_in_use; 1313 1314 if (!q->in_use || !__netif_tx_trylock(eq->txq)) 1315 return 0; 1316 1317 /* Reclaim pending completed TX Descriptors. */ 1318 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); 1319 1320 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 1321 hw_in_use = q->pidx - hw_cidx; 1322 if (hw_in_use < 0) 1323 hw_in_use += q->size; 1324 1325 /* If the TX Queue is currently stopped and there's now more than half 1326 * the queue available, restart it. Otherwise bail out since the rest 1327 * of what we want do here is with the possibility of shipping any 1328 * currently buffered Coalesced TX Work Request. 1329 */ 1330 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { 1331 netif_tx_wake_queue(eq->txq); 1332 eq->q.restarts++; 1333 } 1334 1335 __netif_tx_unlock(eq->txq); 1336 return reclaimed; 1337 } 1338 1339 static inline int cxgb4_validate_skb(struct sk_buff *skb, 1340 struct net_device *dev, 1341 u32 min_pkt_len) 1342 { 1343 u32 max_pkt_len; 1344 1345 /* The chip min packet length is 10 octets but some firmware 1346 * commands have a minimum packet length requirement. So, play 1347 * safe and reject anything shorter than @min_pkt_len. 1348 */ 1349 if (unlikely(skb->len < min_pkt_len)) 1350 return -EINVAL; 1351 1352 /* Discard the packet if the length is greater than mtu */ 1353 max_pkt_len = ETH_HLEN + dev->mtu; 1354 1355 if (skb_vlan_tagged(skb)) 1356 max_pkt_len += VLAN_HLEN; 1357 1358 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) 1359 return -EINVAL; 1360 1361 return 0; 1362 } 1363 1364 static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 1365 u32 hdr_len) 1366 { 1367 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 1368 wr->u.udpseg.ethlen = skb_network_offset(skb); 1369 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 1370 wr->u.udpseg.udplen = sizeof(struct udphdr); 1371 wr->u.udpseg.rtplen = 0; 1372 wr->u.udpseg.r4 = 0; 1373 if (skb_shinfo(skb)->gso_size) 1374 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1375 else 1376 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); 1377 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 1378 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); 1379 1380 return (void *)(wr + 1); 1381 } 1382 1383 /** 1384 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue 1385 * @skb: the packet 1386 * @dev: the egress net device 1387 * 1388 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1389 */ 1390 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1391 { 1392 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1393 bool ptp_enabled = is_ptp_enabled(skb, dev); 1394 unsigned int last_desc, flits, ndesc; 1395 u32 wr_mid, ctrl0, op, sgl_off = 0; 1396 const struct skb_shared_info *ssi; 1397 int len, qidx, credits, ret, left; 1398 struct tx_sw_desc *sgl_sdesc; 1399 struct fw_eth_tx_eo_wr *eowr; 1400 struct fw_eth_tx_pkt_wr *wr; 1401 struct cpl_tx_pkt_core *cpl; 1402 const struct port_info *pi; 1403 bool immediate = false; 1404 u64 cntrl, *end, *sgl; 1405 struct sge_eth_txq *q; 1406 unsigned int chip_ver; 1407 struct adapter *adap; 1408 1409 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 1410 if (ret) 1411 goto out_free; 1412 1413 pi = netdev_priv(dev); 1414 adap = pi->adapter; 1415 ssi = skb_shinfo(skb); 1416 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 1417 if (xfrm_offload(skb) && !ssi->gso_size) 1418 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); 1419 #endif /* CHELSIO_IPSEC_INLINE */ 1420 1421 #ifdef CONFIG_CHELSIO_TLS_DEVICE 1422 if (skb->decrypted) 1423 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); 1424 #endif /* CHELSIO_TLS_DEVICE */ 1425 1426 qidx = skb_get_queue_mapping(skb); 1427 if (ptp_enabled) { 1428 spin_lock(&adap->ptp_lock); 1429 if (!(adap->ptp_tx_skb)) { 1430 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1431 adap->ptp_tx_skb = skb_get(skb); 1432 } else { 1433 spin_unlock(&adap->ptp_lock); 1434 goto out_free; 1435 } 1436 q = &adap->sge.ptptxq; 1437 } else { 1438 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1439 } 1440 skb_tx_timestamp(skb); 1441 1442 reclaim_completed_tx(adap, &q->q, -1, true); 1443 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1444 1445 #ifdef CONFIG_CHELSIO_T4_FCOE 1446 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1447 if (unlikely(ret == -ENOTSUPP)) { 1448 if (ptp_enabled) 1449 spin_unlock(&adap->ptp_lock); 1450 goto out_free; 1451 } 1452 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1453 1454 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1455 flits = calc_tx_flits(skb, chip_ver); 1456 ndesc = flits_to_desc(flits); 1457 credits = txq_avail(&q->q) - ndesc; 1458 1459 if (unlikely(credits < 0)) { 1460 eth_txq_stop(q); 1461 dev_err(adap->pdev_dev, 1462 "%s: Tx ring %u full while queue awake!\n", 1463 dev->name, qidx); 1464 if (ptp_enabled) 1465 spin_unlock(&adap->ptp_lock); 1466 return NETDEV_TX_BUSY; 1467 } 1468 1469 if (is_eth_imm(skb, chip_ver)) 1470 immediate = true; 1471 1472 if (skb->encapsulation && chip_ver > CHELSIO_T5) 1473 tnl_type = cxgb_encap_offload_supported(skb); 1474 1475 last_desc = q->q.pidx + ndesc - 1; 1476 if (last_desc >= q->q.size) 1477 last_desc -= q->q.size; 1478 sgl_sdesc = &q->q.sdesc[last_desc]; 1479 1480 if (!immediate && 1481 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 1482 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1483 q->mapping_err++; 1484 if (ptp_enabled) 1485 spin_unlock(&adap->ptp_lock); 1486 goto out_free; 1487 } 1488 1489 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1490 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1491 /* After we're done injecting the Work Request for this 1492 * packet, we'll be below our "stop threshold" so stop the TX 1493 * Queue now and schedule a request for an SGE Egress Queue 1494 * Update message. The queue will get started later on when 1495 * the firmware processes this Work Request and sends us an 1496 * Egress Queue Status Update message indicating that space 1497 * has opened up. 1498 */ 1499 eth_txq_stop(q); 1500 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1501 } 1502 1503 wr = (void *)&q->q.desc[q->q.pidx]; 1504 eowr = (void *)&q->q.desc[q->q.pidx]; 1505 wr->equiq_to_len16 = htonl(wr_mid); 1506 wr->r3 = cpu_to_be64(0); 1507 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 1508 end = (u64 *)eowr + flits; 1509 else 1510 end = (u64 *)wr + flits; 1511 1512 len = immediate ? skb->len : 0; 1513 len += sizeof(*cpl); 1514 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { 1515 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1516 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); 1517 1518 if (tnl_type) 1519 len += sizeof(*tnl_lso); 1520 else 1521 len += sizeof(*lso); 1522 1523 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1524 FW_WR_IMMDLEN_V(len)); 1525 if (tnl_type) { 1526 struct iphdr *iph = ip_hdr(skb); 1527 1528 t6_fill_tnl_lso(skb, tnl_lso, tnl_type); 1529 cpl = (void *)(tnl_lso + 1); 1530 /* Driver is expected to compute partial checksum that 1531 * does not include the IP Total Length. 1532 */ 1533 if (iph->version == 4) { 1534 iph->check = 0; 1535 iph->tot_len = 0; 1536 iph->check = (u16)(~ip_fast_csum((u8 *)iph, 1537 iph->ihl)); 1538 } 1539 if (skb->ip_summed == CHECKSUM_PARTIAL) 1540 cntrl = hwcsum(adap->params.chip, skb); 1541 } else { 1542 cpl = write_tso_wr(adap, skb, lso); 1543 cntrl = hwcsum(adap->params.chip, skb); 1544 } 1545 sgl = (u64 *)(cpl + 1); /* sgl start here */ 1546 q->tso++; 1547 q->tx_cso += ssi->gso_segs; 1548 } else if (ssi->gso_size) { 1549 u64 *start; 1550 u32 hdrlen; 1551 1552 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 1553 len += hdrlen; 1554 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 1555 FW_ETH_TX_EO_WR_IMMDLEN_V(len)); 1556 cpl = write_eo_udp_wr(skb, eowr, hdrlen); 1557 cntrl = hwcsum(adap->params.chip, skb); 1558 1559 start = (u64 *)(cpl + 1); 1560 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, 1561 hdrlen); 1562 if (unlikely(start > sgl)) { 1563 left = (u8 *)end - (u8 *)q->q.stat; 1564 end = (void *)q->q.desc + left; 1565 } 1566 sgl_off = hdrlen; 1567 q->uso++; 1568 q->tx_cso += ssi->gso_segs; 1569 } else { 1570 if (ptp_enabled) 1571 op = FW_PTP_TX_PKT_WR; 1572 else 1573 op = FW_ETH_TX_PKT_WR; 1574 wr->op_immdlen = htonl(FW_WR_OP_V(op) | 1575 FW_WR_IMMDLEN_V(len)); 1576 cpl = (void *)(wr + 1); 1577 sgl = (u64 *)(cpl + 1); 1578 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1579 cntrl = hwcsum(adap->params.chip, skb) | 1580 TXPKT_IPCSUM_DIS_F; 1581 q->tx_cso++; 1582 } 1583 } 1584 1585 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { 1586 /* If current position is already at the end of the 1587 * txq, reset the current to point to start of the queue 1588 * and update the end ptr as well. 1589 */ 1590 left = (u8 *)end - (u8 *)q->q.stat; 1591 end = (void *)q->q.desc + left; 1592 sgl = (void *)q->q.desc; 1593 } 1594 1595 if (skb_vlan_tag_present(skb)) { 1596 q->vlan_ins++; 1597 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 1598 #ifdef CONFIG_CHELSIO_T4_FCOE 1599 if (skb->protocol == htons(ETH_P_FCOE)) 1600 cntrl |= TXPKT_VLAN_V( 1601 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 1602 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1603 } 1604 1605 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 1606 TXPKT_PF_V(adap->pf); 1607 if (ptp_enabled) 1608 ctrl0 |= TXPKT_TSTAMP_F; 1609 #ifdef CONFIG_CHELSIO_T4_DCB 1610 if (is_t4(adap->params.chip)) 1611 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); 1612 else 1613 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); 1614 #endif 1615 cpl->ctrl0 = htonl(ctrl0); 1616 cpl->pack = htons(0); 1617 cpl->len = htons(skb->len); 1618 cpl->ctrl1 = cpu_to_be64(cntrl); 1619 1620 if (immediate) { 1621 cxgb4_inline_tx_skb(skb, &q->q, sgl); 1622 dev_consume_skb_any(skb); 1623 } else { 1624 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, 1625 sgl_sdesc->addr); 1626 skb_orphan(skb); 1627 sgl_sdesc->skb = skb; 1628 } 1629 1630 txq_advance(&q->q, ndesc); 1631 1632 cxgb4_ring_tx_db(adap, &q->q, ndesc); 1633 if (ptp_enabled) 1634 spin_unlock(&adap->ptp_lock); 1635 return NETDEV_TX_OK; 1636 1637 out_free: 1638 dev_kfree_skb_any(skb); 1639 return NETDEV_TX_OK; 1640 } 1641 1642 /* Constants ... */ 1643 enum { 1644 /* Egress Queue sizes, producer and consumer indices are all in units 1645 * of Egress Context Units bytes. Note that as far as the hardware is 1646 * concerned, the free list is an Egress Queue (the host produces free 1647 * buffers which the hardware consumes) and free list entries are 1648 * 64-bit PCI DMA addresses. 1649 */ 1650 EQ_UNIT = SGE_EQ_IDXSIZE, 1651 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1652 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1653 1654 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1655 sizeof(struct cpl_tx_pkt_lso_core) + 1656 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), 1657 }; 1658 1659 /** 1660 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? 1661 * @skb: the packet 1662 * 1663 * Returns whether an Ethernet packet is small enough to fit completely as 1664 * immediate data. 1665 */ 1666 static inline int t4vf_is_eth_imm(const struct sk_buff *skb) 1667 { 1668 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request 1669 * which does not accommodate immediate data. We could dike out all 1670 * of the support code for immediate data but that would tie our hands 1671 * too much if we ever want to enhace the firmware. It would also 1672 * create more differences between the PF and VF Drivers. 1673 */ 1674 return false; 1675 } 1676 1677 /** 1678 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR 1679 * @skb: the packet 1680 * 1681 * Returns the number of flits needed for a TX Work Request for the 1682 * given Ethernet packet, including the needed WR and CPL headers. 1683 */ 1684 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) 1685 { 1686 unsigned int flits; 1687 1688 /* If the skb is small enough, we can pump it out as a work request 1689 * with only immediate data. In that case we just have to have the 1690 * TX Packet header plus the skb data in the Work Request. 1691 */ 1692 if (t4vf_is_eth_imm(skb)) 1693 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 1694 sizeof(__be64)); 1695 1696 /* Otherwise, we're going to have to construct a Scatter gather list 1697 * of the skb body and fragments. We also include the flits necessary 1698 * for the TX Packet Work Request and CPL. We always have a firmware 1699 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 1700 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 1701 * message or, if we're doing a Large Send Offload, an LSO CPL message 1702 * with an embedded TX Packet Write CPL message. 1703 */ 1704 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 1705 if (skb_shinfo(skb)->gso_size) 1706 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1707 sizeof(struct cpl_tx_pkt_lso_core) + 1708 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1709 else 1710 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1711 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1712 return flits; 1713 } 1714 1715 /** 1716 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue 1717 * @skb: the packet 1718 * @dev: the egress net device 1719 * 1720 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. 1721 */ 1722 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, 1723 struct net_device *dev) 1724 { 1725 unsigned int last_desc, flits, ndesc; 1726 const struct skb_shared_info *ssi; 1727 struct fw_eth_tx_pkt_vm_wr *wr; 1728 struct tx_sw_desc *sgl_sdesc; 1729 struct cpl_tx_pkt_core *cpl; 1730 const struct port_info *pi; 1731 struct sge_eth_txq *txq; 1732 struct adapter *adapter; 1733 int qidx, credits, ret; 1734 size_t fw_hdr_copy_len; 1735 u64 cntrl, *end; 1736 u32 wr_mid; 1737 1738 /* The chip minimum packet length is 10 octets but the firmware 1739 * command that we are using requires that we copy the Ethernet header 1740 * (including the VLAN tag) into the header so we reject anything 1741 * smaller than that ... 1742 */ 1743 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + 1744 sizeof(wr->ethtype) + sizeof(wr->vlantci); 1745 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); 1746 if (ret) 1747 goto out_free; 1748 1749 /* Figure out which TX Queue we're going to use. */ 1750 pi = netdev_priv(dev); 1751 adapter = pi->adapter; 1752 qidx = skb_get_queue_mapping(skb); 1753 WARN_ON(qidx >= pi->nqsets); 1754 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; 1755 1756 /* Take this opportunity to reclaim any TX Descriptors whose DMA 1757 * transfers have completed. 1758 */ 1759 reclaim_completed_tx(adapter, &txq->q, -1, true); 1760 1761 /* Calculate the number of flits and TX Descriptors we're going to 1762 * need along with how many TX Descriptors will be left over after 1763 * we inject our Work Request. 1764 */ 1765 flits = t4vf_calc_tx_flits(skb); 1766 ndesc = flits_to_desc(flits); 1767 credits = txq_avail(&txq->q) - ndesc; 1768 1769 if (unlikely(credits < 0)) { 1770 /* Not enough room for this packet's Work Request. Stop the 1771 * TX Queue and return a "busy" condition. The queue will get 1772 * started later on when the firmware informs us that space 1773 * has opened up. 1774 */ 1775 eth_txq_stop(txq); 1776 dev_err(adapter->pdev_dev, 1777 "%s: TX ring %u full while queue awake!\n", 1778 dev->name, qidx); 1779 return NETDEV_TX_BUSY; 1780 } 1781 1782 last_desc = txq->q.pidx + ndesc - 1; 1783 if (last_desc >= txq->q.size) 1784 last_desc -= txq->q.size; 1785 sgl_sdesc = &txq->q.sdesc[last_desc]; 1786 1787 if (!t4vf_is_eth_imm(skb) && 1788 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, 1789 sgl_sdesc->addr) < 0)) { 1790 /* We need to map the skb into PCI DMA space (because it can't 1791 * be in-lined directly into the Work Request) and the mapping 1792 * operation failed. Record the error and drop the packet. 1793 */ 1794 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1795 txq->mapping_err++; 1796 goto out_free; 1797 } 1798 1799 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1800 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1801 /* After we're done injecting the Work Request for this 1802 * packet, we'll be below our "stop threshold" so stop the TX 1803 * Queue now and schedule a request for an SGE Egress Queue 1804 * Update message. The queue will get started later on when 1805 * the firmware processes this Work Request and sends us an 1806 * Egress Queue Status Update message indicating that space 1807 * has opened up. 1808 */ 1809 eth_txq_stop(txq); 1810 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1811 } 1812 1813 /* Start filling in our Work Request. Note that we do _not_ handle 1814 * the WR Header wrapping around the TX Descriptor Ring. If our 1815 * maximum header size ever exceeds one TX Descriptor, we'll need to 1816 * do something else here. 1817 */ 1818 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1819 wr = (void *)&txq->q.desc[txq->q.pidx]; 1820 wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1821 wr->r3[0] = cpu_to_be32(0); 1822 wr->r3[1] = cpu_to_be32(0); 1823 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1824 end = (u64 *)wr + flits; 1825 1826 /* If this is a Large Send Offload packet we'll put in an LSO CPL 1827 * message with an encapsulated TX Packet CPL message. Otherwise we 1828 * just use a TX Packet CPL message. 1829 */ 1830 ssi = skb_shinfo(skb); 1831 if (ssi->gso_size) { 1832 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1833 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1834 int l3hdr_len = skb_network_header_len(skb); 1835 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1836 1837 wr->op_immdlen = 1838 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1839 FW_WR_IMMDLEN_V(sizeof(*lso) + 1840 sizeof(*cpl))); 1841 /* Fill in the LSO CPL message. */ 1842 lso->lso_ctrl = 1843 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1844 LSO_FIRST_SLICE_F | 1845 LSO_LAST_SLICE_F | 1846 LSO_IPV6_V(v6) | 1847 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1848 LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1849 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1850 lso->ipid_ofst = cpu_to_be16(0); 1851 lso->mss = cpu_to_be16(ssi->gso_size); 1852 lso->seqno_offset = cpu_to_be32(0); 1853 if (is_t4(adapter->params.chip)) 1854 lso->len = cpu_to_be32(skb->len); 1855 else 1856 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); 1857 1858 /* Set up TX Packet CPL pointer, control word and perform 1859 * accounting. 1860 */ 1861 cpl = (void *)(lso + 1); 1862 1863 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 1864 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1865 else 1866 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1867 1868 cntrl |= TXPKT_CSUM_TYPE_V(v6 ? 1869 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1870 TXPKT_IPHDR_LEN_V(l3hdr_len); 1871 txq->tso++; 1872 txq->tx_cso += ssi->gso_segs; 1873 } else { 1874 int len; 1875 1876 len = (t4vf_is_eth_imm(skb) 1877 ? skb->len + sizeof(*cpl) 1878 : sizeof(*cpl)); 1879 wr->op_immdlen = 1880 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1881 FW_WR_IMMDLEN_V(len)); 1882 1883 /* Set up TX Packet CPL pointer, control word and perform 1884 * accounting. 1885 */ 1886 cpl = (void *)(wr + 1); 1887 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1888 cntrl = hwcsum(adapter->params.chip, skb) | 1889 TXPKT_IPCSUM_DIS_F; 1890 txq->tx_cso++; 1891 } else { 1892 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1893 } 1894 } 1895 1896 /* If there's a VLAN tag present, add that to the list of things to 1897 * do in this Work Request. 1898 */ 1899 if (skb_vlan_tag_present(skb)) { 1900 txq->vlan_ins++; 1901 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 1902 } 1903 1904 /* Fill in the TX Packet CPL message header. */ 1905 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 1906 TXPKT_INTF_V(pi->port_id) | 1907 TXPKT_PF_V(0)); 1908 cpl->pack = cpu_to_be16(0); 1909 cpl->len = cpu_to_be16(skb->len); 1910 cpl->ctrl1 = cpu_to_be64(cntrl); 1911 1912 /* Fill in the body of the TX Packet CPL message with either in-lined 1913 * data or a Scatter/Gather List. 1914 */ 1915 if (t4vf_is_eth_imm(skb)) { 1916 /* In-line the packet's data and free the skb since we don't 1917 * need it any longer. 1918 */ 1919 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); 1920 dev_consume_skb_any(skb); 1921 } else { 1922 /* Write the skb's Scatter/Gather list into the TX Packet CPL 1923 * message and retain a pointer to the skb so we can free it 1924 * later when its DMA completes. (We store the skb pointer 1925 * in the Software Descriptor corresponding to the last TX 1926 * Descriptor used by the Work Request.) 1927 * 1928 * The retained skb will be freed when the corresponding TX 1929 * Descriptors are reclaimed after their DMAs complete. 1930 * However, this could take quite a while since, in general, 1931 * the hardware is set up to be lazy about sending DMA 1932 * completion notifications to us and we mostly perform TX 1933 * reclaims in the transmit routine. 1934 * 1935 * This is good for performamce but means that we rely on new 1936 * TX packets arriving to run the destructors of completed 1937 * packets, which open up space in their sockets' send queues. 1938 * Sometimes we do not get such new packets causing TX to 1939 * stall. A single UDP transmitter is a good example of this 1940 * situation. We have a clean up timer that periodically 1941 * reclaims completed packets but it doesn't run often enough 1942 * (nor do we want it to) to prevent lengthy stalls. A 1943 * solution to this problem is to run the destructor early, 1944 * after the packet is queued but before it's DMAd. A con is 1945 * that we lie to socket memory accounting, but the amount of 1946 * extra memory is reasonable (limited by the number of TX 1947 * descriptors), the packets do actually get freed quickly by 1948 * new packets almost always, and for protocols like TCP that 1949 * wait for acks to really free up the data the extra memory 1950 * is even less. On the positive side we run the destructors 1951 * on the sending CPU rather than on a potentially different 1952 * completing CPU, usually a good thing. 1953 * 1954 * Run the destructor before telling the DMA engine about the 1955 * packet to make sure it doesn't complete and get freed 1956 * prematurely. 1957 */ 1958 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); 1959 struct sge_txq *tq = &txq->q; 1960 1961 /* If the Work Request header was an exact multiple of our TX 1962 * Descriptor length, then it's possible that the starting SGL 1963 * pointer lines up exactly with the end of our TX Descriptor 1964 * ring. If that's the case, wrap around to the beginning 1965 * here ... 1966 */ 1967 if (unlikely((void *)sgl == (void *)tq->stat)) { 1968 sgl = (void *)tq->desc; 1969 end = (void *)((void *)tq->desc + 1970 ((void *)end - (void *)tq->stat)); 1971 } 1972 1973 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); 1974 skb_orphan(skb); 1975 sgl_sdesc->skb = skb; 1976 } 1977 1978 /* Advance our internal TX Queue state, tell the hardware about 1979 * the new TX descriptors and return success. 1980 */ 1981 txq_advance(&txq->q, ndesc); 1982 1983 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); 1984 return NETDEV_TX_OK; 1985 1986 out_free: 1987 /* An error of some sort happened. Free the TX skb and tell the 1988 * OS that we've "dealt" with the packet ... 1989 */ 1990 dev_kfree_skb_any(skb); 1991 return NETDEV_TX_OK; 1992 } 1993 1994 /** 1995 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1996 * @q: the SGE control Tx queue 1997 * 1998 * This is a variant of cxgb4_reclaim_completed_tx() that is used 1999 * for Tx queues that send only immediate data (presently just 2000 * the control queues) and thus do not have any sk_buffs to release. 2001 */ 2002 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 2003 { 2004 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 2005 int reclaim = hw_cidx - q->cidx; 2006 2007 if (reclaim < 0) 2008 reclaim += q->size; 2009 2010 q->in_use -= reclaim; 2011 q->cidx = hw_cidx; 2012 } 2013 2014 static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) 2015 { 2016 u32 val = *idx + n; 2017 2018 if (val >= max) 2019 val -= max; 2020 2021 *idx = val; 2022 } 2023 2024 void cxgb4_eosw_txq_free_desc(struct adapter *adap, 2025 struct sge_eosw_txq *eosw_txq, u32 ndesc) 2026 { 2027 struct tx_sw_desc *d; 2028 2029 d = &eosw_txq->desc[eosw_txq->last_cidx]; 2030 while (ndesc--) { 2031 if (d->skb) { 2032 if (d->addr[0]) { 2033 unmap_skb(adap->pdev_dev, d->skb, d->addr); 2034 memset(d->addr, 0, sizeof(d->addr)); 2035 } 2036 dev_consume_skb_any(d->skb); 2037 d->skb = NULL; 2038 } 2039 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, 2040 eosw_txq->ndesc); 2041 d = &eosw_txq->desc[eosw_txq->last_cidx]; 2042 } 2043 } 2044 2045 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) 2046 { 2047 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); 2048 eosw_txq->inuse += n; 2049 } 2050 2051 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, 2052 struct sk_buff *skb) 2053 { 2054 if (eosw_txq->inuse == eosw_txq->ndesc) 2055 return -ENOMEM; 2056 2057 eosw_txq->desc[eosw_txq->pidx].skb = skb; 2058 return 0; 2059 } 2060 2061 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) 2062 { 2063 return eosw_txq->desc[eosw_txq->last_pidx].skb; 2064 } 2065 2066 static inline u8 ethofld_calc_tx_flits(struct adapter *adap, 2067 struct sk_buff *skb, u32 hdr_len) 2068 { 2069 u8 flits, nsgl = 0; 2070 u32 wrlen; 2071 2072 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); 2073 if (skb_shinfo(skb)->gso_size && 2074 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 2075 wrlen += sizeof(struct cpl_tx_pkt_lso_core); 2076 2077 wrlen += roundup(hdr_len, 16); 2078 2079 /* Packet headers + WR + CPLs */ 2080 flits = DIV_ROUND_UP(wrlen, 8); 2081 2082 if (skb_shinfo(skb)->nr_frags > 0) { 2083 if (skb_headlen(skb) - hdr_len) 2084 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); 2085 else 2086 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); 2087 } else if (skb->len - hdr_len) { 2088 nsgl = sgl_len(1); 2089 } 2090 2091 return flits + nsgl; 2092 } 2093 2094 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, 2095 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 2096 u32 hdr_len, u32 wrlen) 2097 { 2098 const struct skb_shared_info *ssi = skb_shinfo(skb); 2099 struct cpl_tx_pkt_core *cpl; 2100 u32 immd_len, wrlen16; 2101 bool compl = false; 2102 u8 ver, proto; 2103 2104 ver = ip_hdr(skb)->version; 2105 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; 2106 2107 wrlen16 = DIV_ROUND_UP(wrlen, 16); 2108 immd_len = sizeof(struct cpl_tx_pkt_core); 2109 if (skb_shinfo(skb)->gso_size && 2110 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 2111 immd_len += sizeof(struct cpl_tx_pkt_lso_core); 2112 immd_len += hdr_len; 2113 2114 if (!eosw_txq->ncompl || 2115 (eosw_txq->last_compl + wrlen16) >= 2116 (adap->params.ofldq_wr_cred / 2)) { 2117 compl = true; 2118 eosw_txq->ncompl++; 2119 eosw_txq->last_compl = 0; 2120 } 2121 2122 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 2123 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) | 2124 FW_WR_COMPL_V(compl)); 2125 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | 2126 FW_WR_FLOWID_V(eosw_txq->hwtid)); 2127 wr->r3 = 0; 2128 if (proto == IPPROTO_UDP) { 2129 cpl = write_eo_udp_wr(skb, wr, hdr_len); 2130 } else { 2131 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 2132 wr->u.tcpseg.ethlen = skb_network_offset(skb); 2133 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 2134 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); 2135 wr->u.tcpseg.tsclk_tsoff = 0; 2136 wr->u.tcpseg.r4 = 0; 2137 wr->u.tcpseg.r5 = 0; 2138 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); 2139 2140 if (ssi->gso_size) { 2141 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 2142 2143 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); 2144 cpl = write_tso_wr(adap, skb, lso); 2145 } else { 2146 wr->u.tcpseg.mss = cpu_to_be16(0xffff); 2147 cpl = (void *)(wr + 1); 2148 } 2149 } 2150 2151 eosw_txq->cred -= wrlen16; 2152 eosw_txq->last_compl += wrlen16; 2153 return cpl; 2154 } 2155 2156 static int ethofld_hard_xmit(struct net_device *dev, 2157 struct sge_eosw_txq *eosw_txq) 2158 { 2159 struct port_info *pi = netdev2pinfo(dev); 2160 struct adapter *adap = netdev2adap(dev); 2161 u32 wrlen, wrlen16, hdr_len, data_len; 2162 enum sge_eosw_state next_state; 2163 u64 cntrl, *start, *end, *sgl; 2164 struct sge_eohw_txq *eohw_txq; 2165 struct cpl_tx_pkt_core *cpl; 2166 struct fw_eth_tx_eo_wr *wr; 2167 bool skip_eotx_wr = false; 2168 struct tx_sw_desc *d; 2169 struct sk_buff *skb; 2170 int left, ret = 0; 2171 u8 flits, ndesc; 2172 2173 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; 2174 spin_lock(&eohw_txq->lock); 2175 reclaim_completed_tx_imm(&eohw_txq->q); 2176 2177 d = &eosw_txq->desc[eosw_txq->last_pidx]; 2178 skb = d->skb; 2179 skb_tx_timestamp(skb); 2180 2181 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; 2182 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && 2183 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { 2184 hdr_len = skb->len; 2185 data_len = 0; 2186 flits = DIV_ROUND_UP(hdr_len, 8); 2187 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) 2188 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY; 2189 else 2190 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY; 2191 skip_eotx_wr = true; 2192 } else { 2193 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 2194 data_len = skb->len - hdr_len; 2195 flits = ethofld_calc_tx_flits(adap, skb, hdr_len); 2196 } 2197 ndesc = flits_to_desc(flits); 2198 wrlen = flits * 8; 2199 wrlen16 = DIV_ROUND_UP(wrlen, 16); 2200 2201 left = txq_avail(&eohw_txq->q) - ndesc; 2202 2203 /* If there are no descriptors left in hardware queues or no 2204 * CPL credits left in software queues, then wait for them 2205 * to come back and retry again. Note that we always request 2206 * for credits update via interrupt for every half credits 2207 * consumed. So, the interrupt will eventually restore the 2208 * credits and invoke the Tx path again. 2209 */ 2210 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { 2211 ret = -ENOMEM; 2212 goto out_unlock; 2213 } 2214 2215 if (unlikely(skip_eotx_wr)) { 2216 start = (u64 *)wr; 2217 eosw_txq->state = next_state; 2218 eosw_txq->cred -= wrlen16; 2219 eosw_txq->ncompl++; 2220 eosw_txq->last_compl = 0; 2221 goto write_wr_headers; 2222 } 2223 2224 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); 2225 cntrl = hwcsum(adap->params.chip, skb); 2226 if (skb_vlan_tag_present(skb)) 2227 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 2228 2229 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 2230 TXPKT_INTF_V(pi->tx_chan) | 2231 TXPKT_PF_V(adap->pf)); 2232 cpl->pack = 0; 2233 cpl->len = cpu_to_be16(skb->len); 2234 cpl->ctrl1 = cpu_to_be64(cntrl); 2235 2236 start = (u64 *)(cpl + 1); 2237 2238 write_wr_headers: 2239 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, 2240 hdr_len); 2241 if (data_len) { 2242 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); 2243 if (unlikely(ret)) { 2244 memset(d->addr, 0, sizeof(d->addr)); 2245 eohw_txq->mapping_err++; 2246 goto out_unlock; 2247 } 2248 2249 end = (u64 *)wr + flits; 2250 if (unlikely(start > sgl)) { 2251 left = (u8 *)end - (u8 *)eohw_txq->q.stat; 2252 end = (void *)eohw_txq->q.desc + left; 2253 } 2254 2255 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { 2256 /* If current position is already at the end of the 2257 * txq, reset the current to point to start of the queue 2258 * and update the end ptr as well. 2259 */ 2260 left = (u8 *)end - (u8 *)eohw_txq->q.stat; 2261 2262 end = (void *)eohw_txq->q.desc + left; 2263 sgl = (void *)eohw_txq->q.desc; 2264 } 2265 2266 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, 2267 d->addr); 2268 } 2269 2270 if (skb_shinfo(skb)->gso_size) { 2271 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 2272 eohw_txq->uso++; 2273 else 2274 eohw_txq->tso++; 2275 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; 2276 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2277 eohw_txq->tx_cso++; 2278 } 2279 2280 if (skb_vlan_tag_present(skb)) 2281 eohw_txq->vlan_ins++; 2282 2283 txq_advance(&eohw_txq->q, ndesc); 2284 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); 2285 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); 2286 2287 out_unlock: 2288 spin_unlock(&eohw_txq->lock); 2289 return ret; 2290 } 2291 2292 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) 2293 { 2294 struct sk_buff *skb; 2295 int pktcount, ret; 2296 2297 switch (eosw_txq->state) { 2298 case CXGB4_EO_STATE_ACTIVE: 2299 case CXGB4_EO_STATE_FLOWC_OPEN_SEND: 2300 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND: 2301 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 2302 if (pktcount < 0) 2303 pktcount += eosw_txq->ndesc; 2304 break; 2305 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY: 2306 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY: 2307 case CXGB4_EO_STATE_CLOSED: 2308 default: 2309 return; 2310 } 2311 2312 while (pktcount--) { 2313 skb = eosw_txq_peek(eosw_txq); 2314 if (!skb) { 2315 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, 2316 eosw_txq->ndesc); 2317 continue; 2318 } 2319 2320 ret = ethofld_hard_xmit(dev, eosw_txq); 2321 if (ret) 2322 break; 2323 } 2324 } 2325 2326 static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb, 2327 struct net_device *dev) 2328 { 2329 struct cxgb4_tc_port_mqprio *tc_port_mqprio; 2330 struct port_info *pi = netdev2pinfo(dev); 2331 struct adapter *adap = netdev2adap(dev); 2332 struct sge_eosw_txq *eosw_txq; 2333 u32 qid; 2334 int ret; 2335 2336 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 2337 if (ret) 2338 goto out_free; 2339 2340 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; 2341 qid = skb_get_queue_mapping(skb) - pi->nqsets; 2342 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; 2343 spin_lock_bh(&eosw_txq->lock); 2344 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 2345 goto out_unlock; 2346 2347 ret = eosw_txq_enqueue(eosw_txq, skb); 2348 if (ret) 2349 goto out_unlock; 2350 2351 /* SKB is queued for processing until credits are available. 2352 * So, call the destructor now and we'll free the skb later 2353 * after it has been successfully transmitted. 2354 */ 2355 skb_orphan(skb); 2356 2357 eosw_txq_advance(eosw_txq, 1); 2358 ethofld_xmit(dev, eosw_txq); 2359 spin_unlock_bh(&eosw_txq->lock); 2360 return NETDEV_TX_OK; 2361 2362 out_unlock: 2363 spin_unlock_bh(&eosw_txq->lock); 2364 out_free: 2365 dev_kfree_skb_any(skb); 2366 return NETDEV_TX_OK; 2367 } 2368 2369 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) 2370 { 2371 struct port_info *pi = netdev_priv(dev); 2372 u16 qid = skb_get_queue_mapping(skb); 2373 2374 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) 2375 return cxgb4_vf_eth_xmit(skb, dev); 2376 2377 if (unlikely(qid >= pi->nqsets)) 2378 return cxgb4_ethofld_xmit(skb, dev); 2379 2380 return cxgb4_eth_xmit(skb, dev); 2381 } 2382 2383 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) 2384 { 2385 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 2386 int pidx = eosw_txq->pidx; 2387 struct sk_buff *skb; 2388 2389 if (!pktcount) 2390 return; 2391 2392 if (pktcount < 0) 2393 pktcount += eosw_txq->ndesc; 2394 2395 while (pktcount--) { 2396 pidx--; 2397 if (pidx < 0) 2398 pidx += eosw_txq->ndesc; 2399 2400 skb = eosw_txq->desc[pidx].skb; 2401 if (skb) { 2402 dev_consume_skb_any(skb); 2403 eosw_txq->desc[pidx].skb = NULL; 2404 eosw_txq->inuse--; 2405 } 2406 } 2407 2408 eosw_txq->pidx = eosw_txq->last_pidx + 1; 2409 } 2410 2411 /** 2412 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. 2413 * @dev - netdevice 2414 * @eotid - ETHOFLD tid to bind/unbind 2415 * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid 2416 * 2417 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. 2418 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from 2419 * a traffic class. 2420 */ 2421 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) 2422 { 2423 struct port_info *pi = netdev2pinfo(dev); 2424 struct adapter *adap = netdev2adap(dev); 2425 enum sge_eosw_state next_state; 2426 struct sge_eosw_txq *eosw_txq; 2427 u32 len, len16, nparams = 6; 2428 struct fw_flowc_wr *flowc; 2429 struct eotid_entry *entry; 2430 struct sge_ofld_rxq *rxq; 2431 struct sk_buff *skb; 2432 int ret = 0; 2433 2434 len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams; 2435 len16 = DIV_ROUND_UP(len, 16); 2436 2437 entry = cxgb4_lookup_eotid(&adap->tids, eotid); 2438 if (!entry) 2439 return -ENOMEM; 2440 2441 eosw_txq = (struct sge_eosw_txq *)entry->data; 2442 if (!eosw_txq) 2443 return -ENOMEM; 2444 2445 skb = alloc_skb(len, GFP_KERNEL); 2446 if (!skb) 2447 return -ENOMEM; 2448 2449 spin_lock_bh(&eosw_txq->lock); 2450 if (tc != FW_SCHED_CLS_NONE) { 2451 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) 2452 goto out_unlock; 2453 2454 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND; 2455 } else { 2456 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 2457 goto out_unlock; 2458 2459 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND; 2460 } 2461 2462 flowc = __skb_put(skb, len); 2463 memset(flowc, 0, len); 2464 2465 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; 2466 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | 2467 FW_WR_FLOWID_V(eosw_txq->hwtid)); 2468 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 2469 FW_FLOWC_WR_NPARAMS_V(nparams) | 2470 FW_WR_COMPL_V(1)); 2471 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 2472 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); 2473 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 2474 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); 2475 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 2476 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); 2477 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 2478 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); 2479 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 2480 flowc->mnemval[4].val = cpu_to_be32(tc); 2481 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; 2482 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? 2483 FW_FLOWC_MNEM_EOSTATE_CLOSING : 2484 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 2485 2486 /* Free up any pending skbs to ensure there's room for 2487 * termination FLOWC. 2488 */ 2489 if (tc == FW_SCHED_CLS_NONE) 2490 eosw_txq_flush_pending_skbs(eosw_txq); 2491 2492 ret = eosw_txq_enqueue(eosw_txq, skb); 2493 if (ret) { 2494 dev_consume_skb_any(skb); 2495 goto out_unlock; 2496 } 2497 2498 eosw_txq->state = next_state; 2499 eosw_txq->flowc_idx = eosw_txq->pidx; 2500 eosw_txq_advance(eosw_txq, 1); 2501 ethofld_xmit(dev, eosw_txq); 2502 2503 out_unlock: 2504 spin_unlock_bh(&eosw_txq->lock); 2505 return ret; 2506 } 2507 2508 /** 2509 * is_imm - check whether a packet can be sent as immediate data 2510 * @skb: the packet 2511 * 2512 * Returns true if a packet can be sent as a WR with immediate data. 2513 */ 2514 static inline int is_imm(const struct sk_buff *skb) 2515 { 2516 return skb->len <= MAX_CTRL_WR_LEN; 2517 } 2518 2519 /** 2520 * ctrlq_check_stop - check if a control queue is full and should stop 2521 * @q: the queue 2522 * @wr: most recent WR written to the queue 2523 * 2524 * Check if a control queue has become full and should be stopped. 2525 * We clean up control queue descriptors very lazily, only when we are out. 2526 * If the queue is still full after reclaiming any completed descriptors 2527 * we suspend it and have the last WR wake it up. 2528 */ 2529 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 2530 { 2531 reclaim_completed_tx_imm(&q->q); 2532 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2533 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2534 q->q.stops++; 2535 q->full = 1; 2536 } 2537 } 2538 2539 /** 2540 * ctrl_xmit - send a packet through an SGE control Tx queue 2541 * @q: the control queue 2542 * @skb: the packet 2543 * 2544 * Send a packet through an SGE control Tx queue. Packets sent through 2545 * a control queue must fit entirely as immediate data. 2546 */ 2547 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 2548 { 2549 unsigned int ndesc; 2550 struct fw_wr_hdr *wr; 2551 2552 if (unlikely(!is_imm(skb))) { 2553 WARN_ON(1); 2554 dev_kfree_skb(skb); 2555 return NET_XMIT_DROP; 2556 } 2557 2558 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 2559 spin_lock(&q->sendq.lock); 2560 2561 if (unlikely(q->full)) { 2562 skb->priority = ndesc; /* save for restart */ 2563 __skb_queue_tail(&q->sendq, skb); 2564 spin_unlock(&q->sendq.lock); 2565 return NET_XMIT_CN; 2566 } 2567 2568 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2569 cxgb4_inline_tx_skb(skb, &q->q, wr); 2570 2571 txq_advance(&q->q, ndesc); 2572 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 2573 ctrlq_check_stop(q, wr); 2574 2575 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 2576 spin_unlock(&q->sendq.lock); 2577 2578 kfree_skb(skb); 2579 return NET_XMIT_SUCCESS; 2580 } 2581 2582 /** 2583 * restart_ctrlq - restart a suspended control queue 2584 * @data: the control queue to restart 2585 * 2586 * Resumes transmission on a suspended Tx control queue. 2587 */ 2588 static void restart_ctrlq(unsigned long data) 2589 { 2590 struct sk_buff *skb; 2591 unsigned int written = 0; 2592 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; 2593 2594 spin_lock(&q->sendq.lock); 2595 reclaim_completed_tx_imm(&q->q); 2596 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 2597 2598 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 2599 struct fw_wr_hdr *wr; 2600 unsigned int ndesc = skb->priority; /* previously saved */ 2601 2602 written += ndesc; 2603 /* Write descriptors and free skbs outside the lock to limit 2604 * wait times. q->full is still set so new skbs will be queued. 2605 */ 2606 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2607 txq_advance(&q->q, ndesc); 2608 spin_unlock(&q->sendq.lock); 2609 2610 cxgb4_inline_tx_skb(skb, &q->q, wr); 2611 kfree_skb(skb); 2612 2613 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2614 unsigned long old = q->q.stops; 2615 2616 ctrlq_check_stop(q, wr); 2617 if (q->q.stops != old) { /* suspended anew */ 2618 spin_lock(&q->sendq.lock); 2619 goto ringdb; 2620 } 2621 } 2622 if (written > 16) { 2623 cxgb4_ring_tx_db(q->adap, &q->q, written); 2624 written = 0; 2625 } 2626 spin_lock(&q->sendq.lock); 2627 } 2628 q->full = 0; 2629 ringdb: 2630 if (written) 2631 cxgb4_ring_tx_db(q->adap, &q->q, written); 2632 spin_unlock(&q->sendq.lock); 2633 } 2634 2635 /** 2636 * t4_mgmt_tx - send a management message 2637 * @adap: the adapter 2638 * @skb: the packet containing the management message 2639 * 2640 * Send a management message through control queue 0. 2641 */ 2642 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 2643 { 2644 int ret; 2645 2646 local_bh_disable(); 2647 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 2648 local_bh_enable(); 2649 return ret; 2650 } 2651 2652 /** 2653 * is_ofld_imm - check whether a packet can be sent as immediate data 2654 * @skb: the packet 2655 * 2656 * Returns true if a packet can be sent as an offload WR with immediate 2657 * data. We currently use the same limit as for Ethernet packets. 2658 */ 2659 static inline int is_ofld_imm(const struct sk_buff *skb) 2660 { 2661 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 2662 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 2663 2664 if (opcode == FW_CRYPTO_LOOKASIDE_WR) 2665 return skb->len <= SGE_MAX_WR_LEN; 2666 else 2667 return skb->len <= MAX_IMM_TX_PKT_LEN; 2668 } 2669 2670 /** 2671 * calc_tx_flits_ofld - calculate # of flits for an offload packet 2672 * @skb: the packet 2673 * 2674 * Returns the number of flits needed for the given offload packet. 2675 * These packets are already fully constructed and no additional headers 2676 * will be added. 2677 */ 2678 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 2679 { 2680 unsigned int flits, cnt; 2681 2682 if (is_ofld_imm(skb)) 2683 return DIV_ROUND_UP(skb->len, 8); 2684 2685 flits = skb_transport_offset(skb) / 8U; /* headers */ 2686 cnt = skb_shinfo(skb)->nr_frags; 2687 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 2688 cnt++; 2689 return flits + sgl_len(cnt); 2690 } 2691 2692 /** 2693 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 2694 * @adap: the adapter 2695 * @q: the queue to stop 2696 * 2697 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 2698 * inability to map packets. A periodic timer attempts to restart 2699 * queues so marked. 2700 */ 2701 static void txq_stop_maperr(struct sge_uld_txq *q) 2702 { 2703 q->mapping_err++; 2704 q->q.stops++; 2705 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 2706 q->adap->sge.txq_maperr); 2707 } 2708 2709 /** 2710 * ofldtxq_stop - stop an offload Tx queue that has become full 2711 * @q: the queue to stop 2712 * @wr: the Work Request causing the queue to become full 2713 * 2714 * Stops an offload Tx queue that has become full and modifies the packet 2715 * being written to request a wakeup. 2716 */ 2717 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) 2718 { 2719 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2720 q->q.stops++; 2721 q->full = 1; 2722 } 2723 2724 /** 2725 * service_ofldq - service/restart a suspended offload queue 2726 * @q: the offload queue 2727 * 2728 * Services an offload Tx queue by moving packets from its Pending Send 2729 * Queue to the Hardware TX ring. The function starts and ends with the 2730 * Send Queue locked, but drops the lock while putting the skb at the 2731 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock 2732 * allows more skbs to be added to the Send Queue by other threads. 2733 * The packet being processed at the head of the Pending Send Queue is 2734 * left on the queue in case we experience DMA Mapping errors, etc. 2735 * and need to give up and restart later. 2736 * 2737 * service_ofldq() can be thought of as a task which opportunistically 2738 * uses other threads execution contexts. We use the Offload Queue 2739 * boolean "service_ofldq_running" to make sure that only one instance 2740 * is ever running at a time ... 2741 */ 2742 static void service_ofldq(struct sge_uld_txq *q) 2743 __must_hold(&q->sendq.lock) 2744 { 2745 u64 *pos, *before, *end; 2746 int credits; 2747 struct sk_buff *skb; 2748 struct sge_txq *txq; 2749 unsigned int left; 2750 unsigned int written = 0; 2751 unsigned int flits, ndesc; 2752 2753 /* If another thread is currently in service_ofldq() processing the 2754 * Pending Send Queue then there's nothing to do. Otherwise, flag 2755 * that we're doing the work and continue. Examining/modifying 2756 * the Offload Queue boolean "service_ofldq_running" must be done 2757 * while holding the Pending Send Queue Lock. 2758 */ 2759 if (q->service_ofldq_running) 2760 return; 2761 q->service_ofldq_running = true; 2762 2763 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 2764 /* We drop the lock while we're working with the skb at the 2765 * head of the Pending Send Queue. This allows more skbs to 2766 * be added to the Pending Send Queue while we're working on 2767 * this one. We don't need to lock to guard the TX Ring 2768 * updates because only one thread of execution is ever 2769 * allowed into service_ofldq() at a time. 2770 */ 2771 spin_unlock(&q->sendq.lock); 2772 2773 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 2774 2775 flits = skb->priority; /* previously saved */ 2776 ndesc = flits_to_desc(flits); 2777 credits = txq_avail(&q->q) - ndesc; 2778 BUG_ON(credits < 0); 2779 if (unlikely(credits < TXQ_STOP_THRES)) 2780 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); 2781 2782 pos = (u64 *)&q->q.desc[q->q.pidx]; 2783 if (is_ofld_imm(skb)) 2784 cxgb4_inline_tx_skb(skb, &q->q, pos); 2785 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 2786 (dma_addr_t *)skb->head)) { 2787 txq_stop_maperr(q); 2788 spin_lock(&q->sendq.lock); 2789 break; 2790 } else { 2791 int last_desc, hdr_len = skb_transport_offset(skb); 2792 2793 /* The WR headers may not fit within one descriptor. 2794 * So we need to deal with wrap-around here. 2795 */ 2796 before = (u64 *)pos; 2797 end = (u64 *)pos + flits; 2798 txq = &q->q; 2799 pos = (void *)inline_tx_skb_header(skb, &q->q, 2800 (void *)pos, 2801 hdr_len); 2802 if (before > (u64 *)pos) { 2803 left = (u8 *)end - (u8 *)txq->stat; 2804 end = (void *)txq->desc + left; 2805 } 2806 2807 /* If current position is already at the end of the 2808 * ofld queue, reset the current to point to 2809 * start of the queue and update the end ptr as well. 2810 */ 2811 if (pos == (u64 *)txq->stat) { 2812 left = (u8 *)end - (u8 *)txq->stat; 2813 end = (void *)txq->desc + left; 2814 pos = (void *)txq->desc; 2815 } 2816 2817 cxgb4_write_sgl(skb, &q->q, (void *)pos, 2818 end, hdr_len, 2819 (dma_addr_t *)skb->head); 2820 #ifdef CONFIG_NEED_DMA_MAP_STATE 2821 skb->dev = q->adap->port[0]; 2822 skb->destructor = deferred_unmap_destructor; 2823 #endif 2824 last_desc = q->q.pidx + ndesc - 1; 2825 if (last_desc >= q->q.size) 2826 last_desc -= q->q.size; 2827 q->q.sdesc[last_desc].skb = skb; 2828 } 2829 2830 txq_advance(&q->q, ndesc); 2831 written += ndesc; 2832 if (unlikely(written > 32)) { 2833 cxgb4_ring_tx_db(q->adap, &q->q, written); 2834 written = 0; 2835 } 2836 2837 /* Reacquire the Pending Send Queue Lock so we can unlink the 2838 * skb we've just successfully transferred to the TX Ring and 2839 * loop for the next skb which may be at the head of the 2840 * Pending Send Queue. 2841 */ 2842 spin_lock(&q->sendq.lock); 2843 __skb_unlink(skb, &q->sendq); 2844 if (is_ofld_imm(skb)) 2845 kfree_skb(skb); 2846 } 2847 if (likely(written)) 2848 cxgb4_ring_tx_db(q->adap, &q->q, written); 2849 2850 /*Indicate that no thread is processing the Pending Send Queue 2851 * currently. 2852 */ 2853 q->service_ofldq_running = false; 2854 } 2855 2856 /** 2857 * ofld_xmit - send a packet through an offload queue 2858 * @q: the Tx offload queue 2859 * @skb: the packet 2860 * 2861 * Send an offload packet through an SGE offload queue. 2862 */ 2863 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) 2864 { 2865 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 2866 spin_lock(&q->sendq.lock); 2867 2868 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If 2869 * that results in this new skb being the only one on the queue, start 2870 * servicing it. If there are other skbs already on the list, then 2871 * either the queue is currently being processed or it's been stopped 2872 * for some reason and it'll be restarted at a later time. Restart 2873 * paths are triggered by events like experiencing a DMA Mapping Error 2874 * or filling the Hardware TX Ring. 2875 */ 2876 __skb_queue_tail(&q->sendq, skb); 2877 if (q->sendq.qlen == 1) 2878 service_ofldq(q); 2879 2880 spin_unlock(&q->sendq.lock); 2881 return NET_XMIT_SUCCESS; 2882 } 2883 2884 /** 2885 * restart_ofldq - restart a suspended offload queue 2886 * @data: the offload queue to restart 2887 * 2888 * Resumes transmission on a suspended Tx offload queue. 2889 */ 2890 static void restart_ofldq(unsigned long data) 2891 { 2892 struct sge_uld_txq *q = (struct sge_uld_txq *)data; 2893 2894 spin_lock(&q->sendq.lock); 2895 q->full = 0; /* the queue actually is completely empty now */ 2896 service_ofldq(q); 2897 spin_unlock(&q->sendq.lock); 2898 } 2899 2900 /** 2901 * skb_txq - return the Tx queue an offload packet should use 2902 * @skb: the packet 2903 * 2904 * Returns the Tx queue an offload packet should use as indicated by bits 2905 * 1-15 in the packet's queue_mapping. 2906 */ 2907 static inline unsigned int skb_txq(const struct sk_buff *skb) 2908 { 2909 return skb->queue_mapping >> 1; 2910 } 2911 2912 /** 2913 * is_ctrl_pkt - return whether an offload packet is a control packet 2914 * @skb: the packet 2915 * 2916 * Returns whether an offload packet should use an OFLD or a CTRL 2917 * Tx queue as indicated by bit 0 in the packet's queue_mapping. 2918 */ 2919 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 2920 { 2921 return skb->queue_mapping & 1; 2922 } 2923 2924 static inline int uld_send(struct adapter *adap, struct sk_buff *skb, 2925 unsigned int tx_uld_type) 2926 { 2927 struct sge_uld_txq_info *txq_info; 2928 struct sge_uld_txq *txq; 2929 unsigned int idx = skb_txq(skb); 2930 2931 if (unlikely(is_ctrl_pkt(skb))) { 2932 /* Single ctrl queue is a requirement for LE workaround path */ 2933 if (adap->tids.nsftids) 2934 idx = 0; 2935 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 2936 } 2937 2938 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 2939 if (unlikely(!txq_info)) { 2940 WARN_ON(true); 2941 return NET_XMIT_DROP; 2942 } 2943 2944 txq = &txq_info->uldtxq[idx]; 2945 return ofld_xmit(txq, skb); 2946 } 2947 2948 /** 2949 * t4_ofld_send - send an offload packet 2950 * @adap: the adapter 2951 * @skb: the packet 2952 * 2953 * Sends an offload packet. We use the packet queue_mapping to select the 2954 * appropriate Tx queue as follows: bit 0 indicates whether the packet 2955 * should be sent as regular or control, bits 1-15 select the queue. 2956 */ 2957 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 2958 { 2959 int ret; 2960 2961 local_bh_disable(); 2962 ret = uld_send(adap, skb, CXGB4_TX_OFLD); 2963 local_bh_enable(); 2964 return ret; 2965 } 2966 2967 /** 2968 * cxgb4_ofld_send - send an offload packet 2969 * @dev: the net device 2970 * @skb: the packet 2971 * 2972 * Sends an offload packet. This is an exported version of @t4_ofld_send, 2973 * intended for ULDs. 2974 */ 2975 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 2976 { 2977 return t4_ofld_send(netdev2adap(dev), skb); 2978 } 2979 EXPORT_SYMBOL(cxgb4_ofld_send); 2980 2981 static void *inline_tx_header(const void *src, 2982 const struct sge_txq *q, 2983 void *pos, int length) 2984 { 2985 int left = (void *)q->stat - pos; 2986 u64 *p; 2987 2988 if (likely(length <= left)) { 2989 memcpy(pos, src, length); 2990 pos += length; 2991 } else { 2992 memcpy(pos, src, left); 2993 memcpy(q->desc, src + left, length - left); 2994 pos = (void *)q->desc + (length - left); 2995 } 2996 /* 0-pad to multiple of 16 */ 2997 p = PTR_ALIGN(pos, 8); 2998 if ((uintptr_t)p & 8) { 2999 *p = 0; 3000 return p + 1; 3001 } 3002 return p; 3003 } 3004 3005 /** 3006 * ofld_xmit_direct - copy a WR into offload queue 3007 * @q: the Tx offload queue 3008 * @src: location of WR 3009 * @len: WR length 3010 * 3011 * Copy an immediate WR into an uncontended SGE offload queue. 3012 */ 3013 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, 3014 unsigned int len) 3015 { 3016 unsigned int ndesc; 3017 int credits; 3018 u64 *pos; 3019 3020 /* Use the lower limit as the cut-off */ 3021 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { 3022 WARN_ON(1); 3023 return NET_XMIT_DROP; 3024 } 3025 3026 /* Don't return NET_XMIT_CN here as the current 3027 * implementation doesn't queue the request 3028 * using an skb when the following conditions not met 3029 */ 3030 if (!spin_trylock(&q->sendq.lock)) 3031 return NET_XMIT_DROP; 3032 3033 if (q->full || !skb_queue_empty(&q->sendq) || 3034 q->service_ofldq_running) { 3035 spin_unlock(&q->sendq.lock); 3036 return NET_XMIT_DROP; 3037 } 3038 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); 3039 credits = txq_avail(&q->q) - ndesc; 3040 pos = (u64 *)&q->q.desc[q->q.pidx]; 3041 3042 /* ofldtxq_stop modifies WR header in-situ */ 3043 inline_tx_header(src, &q->q, pos, len); 3044 if (unlikely(credits < TXQ_STOP_THRES)) 3045 ofldtxq_stop(q, (struct fw_wr_hdr *)pos); 3046 txq_advance(&q->q, ndesc); 3047 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 3048 3049 spin_unlock(&q->sendq.lock); 3050 return NET_XMIT_SUCCESS; 3051 } 3052 3053 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, 3054 const void *src, unsigned int len) 3055 { 3056 struct sge_uld_txq_info *txq_info; 3057 struct sge_uld_txq *txq; 3058 struct adapter *adap; 3059 int ret; 3060 3061 adap = netdev2adap(dev); 3062 3063 local_bh_disable(); 3064 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 3065 if (unlikely(!txq_info)) { 3066 WARN_ON(true); 3067 local_bh_enable(); 3068 return NET_XMIT_DROP; 3069 } 3070 txq = &txq_info->uldtxq[idx]; 3071 3072 ret = ofld_xmit_direct(txq, src, len); 3073 local_bh_enable(); 3074 return net_xmit_eval(ret); 3075 } 3076 EXPORT_SYMBOL(cxgb4_immdata_send); 3077 3078 /** 3079 * t4_crypto_send - send crypto packet 3080 * @adap: the adapter 3081 * @skb: the packet 3082 * 3083 * Sends crypto packet. We use the packet queue_mapping to select the 3084 * appropriate Tx queue as follows: bit 0 indicates whether the packet 3085 * should be sent as regular or control, bits 1-15 select the queue. 3086 */ 3087 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) 3088 { 3089 int ret; 3090 3091 local_bh_disable(); 3092 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); 3093 local_bh_enable(); 3094 return ret; 3095 } 3096 3097 /** 3098 * cxgb4_crypto_send - send crypto packet 3099 * @dev: the net device 3100 * @skb: the packet 3101 * 3102 * Sends crypto packet. This is an exported version of @t4_crypto_send, 3103 * intended for ULDs. 3104 */ 3105 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) 3106 { 3107 return t4_crypto_send(netdev2adap(dev), skb); 3108 } 3109 EXPORT_SYMBOL(cxgb4_crypto_send); 3110 3111 static inline void copy_frags(struct sk_buff *skb, 3112 const struct pkt_gl *gl, unsigned int offset) 3113 { 3114 int i; 3115 3116 /* usually there's just one frag */ 3117 __skb_fill_page_desc(skb, 0, gl->frags[0].page, 3118 gl->frags[0].offset + offset, 3119 gl->frags[0].size - offset); 3120 skb_shinfo(skb)->nr_frags = gl->nfrags; 3121 for (i = 1; i < gl->nfrags; i++) 3122 __skb_fill_page_desc(skb, i, gl->frags[i].page, 3123 gl->frags[i].offset, 3124 gl->frags[i].size); 3125 3126 /* get a reference to the last page, we don't own it */ 3127 get_page(gl->frags[gl->nfrags - 1].page); 3128 } 3129 3130 /** 3131 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 3132 * @gl: the gather list 3133 * @skb_len: size of sk_buff main body if it carries fragments 3134 * @pull_len: amount of data to move to the sk_buff's main body 3135 * 3136 * Builds an sk_buff from the given packet gather list. Returns the 3137 * sk_buff or %NULL if sk_buff allocation failed. 3138 */ 3139 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 3140 unsigned int skb_len, unsigned int pull_len) 3141 { 3142 struct sk_buff *skb; 3143 3144 /* 3145 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 3146 * size, which is expected since buffers are at least PAGE_SIZEd. 3147 * In this case packets up to RX_COPY_THRES have only one fragment. 3148 */ 3149 if (gl->tot_len <= RX_COPY_THRES) { 3150 skb = dev_alloc_skb(gl->tot_len); 3151 if (unlikely(!skb)) 3152 goto out; 3153 __skb_put(skb, gl->tot_len); 3154 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 3155 } else { 3156 skb = dev_alloc_skb(skb_len); 3157 if (unlikely(!skb)) 3158 goto out; 3159 __skb_put(skb, pull_len); 3160 skb_copy_to_linear_data(skb, gl->va, pull_len); 3161 3162 copy_frags(skb, gl, pull_len); 3163 skb->len = gl->tot_len; 3164 skb->data_len = skb->len - pull_len; 3165 skb->truesize += skb->data_len; 3166 } 3167 out: return skb; 3168 } 3169 EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 3170 3171 /** 3172 * t4_pktgl_free - free a packet gather list 3173 * @gl: the gather list 3174 * 3175 * Releases the pages of a packet gather list. We do not own the last 3176 * page on the list and do not free it. 3177 */ 3178 static void t4_pktgl_free(const struct pkt_gl *gl) 3179 { 3180 int n; 3181 const struct page_frag *p; 3182 3183 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 3184 put_page(p->page); 3185 } 3186 3187 /* 3188 * Process an MPS trace packet. Give it an unused protocol number so it won't 3189 * be delivered to anyone and send it to the stack for capture. 3190 */ 3191 static noinline int handle_trace_pkt(struct adapter *adap, 3192 const struct pkt_gl *gl) 3193 { 3194 struct sk_buff *skb; 3195 3196 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 3197 if (unlikely(!skb)) { 3198 t4_pktgl_free(gl); 3199 return 0; 3200 } 3201 3202 if (is_t4(adap->params.chip)) 3203 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 3204 else 3205 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 3206 3207 skb_reset_mac_header(skb); 3208 skb->protocol = htons(0xffff); 3209 skb->dev = adap->port[0]; 3210 netif_receive_skb(skb); 3211 return 0; 3212 } 3213 3214 /** 3215 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp 3216 * @adap: the adapter 3217 * @hwtstamps: time stamp structure to update 3218 * @sgetstamp: 60bit iqe timestamp 3219 * 3220 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp 3221 * which is in Core Clock ticks into ktime_t and assign it 3222 **/ 3223 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, 3224 struct skb_shared_hwtstamps *hwtstamps, 3225 u64 sgetstamp) 3226 { 3227 u64 ns; 3228 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); 3229 3230 ns = div_u64(tmp, adap->params.vpd.cclk); 3231 3232 memset(hwtstamps, 0, sizeof(*hwtstamps)); 3233 hwtstamps->hwtstamp = ns_to_ktime(ns); 3234 } 3235 3236 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 3237 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) 3238 { 3239 struct adapter *adapter = rxq->rspq.adap; 3240 struct sge *s = &adapter->sge; 3241 struct port_info *pi; 3242 int ret; 3243 struct sk_buff *skb; 3244 3245 skb = napi_get_frags(&rxq->rspq.napi); 3246 if (unlikely(!skb)) { 3247 t4_pktgl_free(gl); 3248 rxq->stats.rx_drops++; 3249 return; 3250 } 3251 3252 copy_frags(skb, gl, s->pktshift); 3253 if (tnl_hdr_len) 3254 skb->csum_level = 1; 3255 skb->len = gl->tot_len - s->pktshift; 3256 skb->data_len = skb->len; 3257 skb->truesize += skb->data_len; 3258 skb->ip_summed = CHECKSUM_UNNECESSARY; 3259 skb_record_rx_queue(skb, rxq->rspq.idx); 3260 pi = netdev_priv(skb->dev); 3261 if (pi->rxtstamp) 3262 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), 3263 gl->sgetstamp); 3264 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 3265 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 3266 PKT_HASH_TYPE_L3); 3267 3268 if (unlikely(pkt->vlan_ex)) { 3269 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3270 rxq->stats.vlan_ex++; 3271 } 3272 ret = napi_gro_frags(&rxq->rspq.napi); 3273 if (ret == GRO_HELD) 3274 rxq->stats.lro_pkts++; 3275 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 3276 rxq->stats.lro_merged++; 3277 rxq->stats.pkts++; 3278 rxq->stats.rx_cso++; 3279 } 3280 3281 enum { 3282 RX_NON_PTP_PKT = 0, 3283 RX_PTP_PKT_SUC = 1, 3284 RX_PTP_PKT_ERR = 2 3285 }; 3286 3287 /** 3288 * t4_systim_to_hwstamp - read hardware time stamp 3289 * @adap: the adapter 3290 * @skb: the packet 3291 * 3292 * Read Time Stamp from MPS packet and insert in skb which 3293 * is forwarded to PTP application 3294 */ 3295 static noinline int t4_systim_to_hwstamp(struct adapter *adapter, 3296 struct sk_buff *skb) 3297 { 3298 struct skb_shared_hwtstamps *hwtstamps; 3299 struct cpl_rx_mps_pkt *cpl = NULL; 3300 unsigned char *data; 3301 int offset; 3302 3303 cpl = (struct cpl_rx_mps_pkt *)skb->data; 3304 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & 3305 X_CPL_RX_MPS_PKT_TYPE_PTP)) 3306 return RX_PTP_PKT_ERR; 3307 3308 data = skb->data + sizeof(*cpl); 3309 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); 3310 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; 3311 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) 3312 return RX_PTP_PKT_ERR; 3313 3314 hwtstamps = skb_hwtstamps(skb); 3315 memset(hwtstamps, 0, sizeof(*hwtstamps)); 3316 hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); 3317 3318 return RX_PTP_PKT_SUC; 3319 } 3320 3321 /** 3322 * t4_rx_hststamp - Recv PTP Event Message 3323 * @adap: the adapter 3324 * @rsp: the response queue descriptor holding the RX_PKT message 3325 * @skb: the packet 3326 * 3327 * PTP enabled and MPS packet, read HW timestamp 3328 */ 3329 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, 3330 struct sge_eth_rxq *rxq, struct sk_buff *skb) 3331 { 3332 int ret; 3333 3334 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && 3335 !is_t4(adapter->params.chip))) { 3336 ret = t4_systim_to_hwstamp(adapter, skb); 3337 if (ret == RX_PTP_PKT_ERR) { 3338 kfree_skb(skb); 3339 rxq->stats.rx_drops++; 3340 } 3341 return ret; 3342 } 3343 return RX_NON_PTP_PKT; 3344 } 3345 3346 /** 3347 * t4_tx_hststamp - Loopback PTP Transmit Event Message 3348 * @adap: the adapter 3349 * @skb: the packet 3350 * @dev: the ingress net device 3351 * 3352 * Read hardware timestamp for the loopback PTP Tx event message 3353 */ 3354 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, 3355 struct net_device *dev) 3356 { 3357 struct port_info *pi = netdev_priv(dev); 3358 3359 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { 3360 cxgb4_ptp_read_hwstamp(adapter, pi); 3361 kfree_skb(skb); 3362 return 0; 3363 } 3364 return 1; 3365 } 3366 3367 /** 3368 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages 3369 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue 3370 * @rsp: Response Entry pointer into Response Queue 3371 * @gl: Gather List pointer 3372 * 3373 * For adapters which support the SGE Doorbell Queue Timer facility, 3374 * we configure the Ethernet TX Queues to send CIDX Updates to the 3375 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE 3376 * messages. This adds a small load to PCIe Link RX bandwidth and, 3377 * potentially, higher CPU Interrupt load, but allows us to respond 3378 * much more quickly to the CIDX Updates. This is important for 3379 * Upper Layer Software which isn't willing to have a large amount 3380 * of TX Data outstanding before receiving DMA Completions. 3381 */ 3382 static void t4_tx_completion_handler(struct sge_rspq *rspq, 3383 const __be64 *rsp, 3384 const struct pkt_gl *gl) 3385 { 3386 u8 opcode = ((const struct rss_header *)rsp)->opcode; 3387 struct port_info *pi = netdev_priv(rspq->netdev); 3388 struct adapter *adapter = rspq->adap; 3389 struct sge *s = &adapter->sge; 3390 struct sge_eth_txq *txq; 3391 3392 /* skip RSS header */ 3393 rsp++; 3394 3395 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 3396 */ 3397 if (unlikely(opcode == CPL_FW4_MSG && 3398 ((const struct cpl_fw4_msg *)rsp)->type == 3399 FW_TYPE_RSSCPL)) { 3400 rsp++; 3401 opcode = ((const struct rss_header *)rsp)->opcode; 3402 rsp++; 3403 } 3404 3405 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { 3406 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", 3407 __func__, opcode); 3408 return; 3409 } 3410 3411 txq = &s->ethtxq[pi->first_qset + rspq->idx]; 3412 t4_sge_eth_txq_egress_update(adapter, txq, -1); 3413 } 3414 3415 /** 3416 * t4_ethrx_handler - process an ingress ethernet packet 3417 * @q: the response queue that received the packet 3418 * @rsp: the response queue descriptor holding the RX_PKT message 3419 * @si: the gather list of packet fragments 3420 * 3421 * Process an ingress ethernet packet and deliver it to the stack. 3422 */ 3423 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 3424 const struct pkt_gl *si) 3425 { 3426 bool csum_ok; 3427 struct sk_buff *skb; 3428 const struct cpl_rx_pkt *pkt; 3429 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 3430 struct adapter *adapter = q->adap; 3431 struct sge *s = &q->adap->sge; 3432 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 3433 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 3434 u16 err_vec, tnl_hdr_len = 0; 3435 struct port_info *pi; 3436 int ret = 0; 3437 3438 /* If we're looking at TX Queue CIDX Update, handle that separately 3439 * and return. 3440 */ 3441 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || 3442 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { 3443 t4_tx_completion_handler(q, rsp, si); 3444 return 0; 3445 } 3446 3447 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 3448 return handle_trace_pkt(q->adap, si); 3449 3450 pkt = (const struct cpl_rx_pkt *)rsp; 3451 /* Compressed error vector is enabled for T6 only */ 3452 if (q->adap->params.tp.rx_pkt_encap) { 3453 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); 3454 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); 3455 } else { 3456 err_vec = be16_to_cpu(pkt->err_vec); 3457 } 3458 3459 csum_ok = pkt->csum_calc && !err_vec && 3460 (q->netdev->features & NETIF_F_RXCSUM); 3461 3462 if (err_vec) 3463 rxq->stats.bad_rx_pkts++; 3464 3465 if (((pkt->l2info & htonl(RXF_TCP_F)) || 3466 tnl_hdr_len) && 3467 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 3468 do_gro(rxq, si, pkt, tnl_hdr_len); 3469 return 0; 3470 } 3471 3472 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 3473 if (unlikely(!skb)) { 3474 t4_pktgl_free(si); 3475 rxq->stats.rx_drops++; 3476 return 0; 3477 } 3478 pi = netdev_priv(q->netdev); 3479 3480 /* Handle PTP Event Rx packet */ 3481 if (unlikely(pi->ptp_enable)) { 3482 ret = t4_rx_hststamp(adapter, rsp, rxq, skb); 3483 if (ret == RX_PTP_PKT_ERR) 3484 return 0; 3485 } 3486 if (likely(!ret)) 3487 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ 3488 3489 /* Handle the PTP Event Tx Loopback packet */ 3490 if (unlikely(pi->ptp_enable && !ret && 3491 (pkt->l2info & htonl(RXF_UDP_F)) && 3492 cxgb4_ptp_is_ptp_rx(skb))) { 3493 if (!t4_tx_hststamp(adapter, skb, q->netdev)) 3494 return 0; 3495 } 3496 3497 skb->protocol = eth_type_trans(skb, q->netdev); 3498 skb_record_rx_queue(skb, q->idx); 3499 if (skb->dev->features & NETIF_F_RXHASH) 3500 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 3501 PKT_HASH_TYPE_L3); 3502 3503 rxq->stats.pkts++; 3504 3505 if (pi->rxtstamp) 3506 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), 3507 si->sgetstamp); 3508 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 3509 if (!pkt->ip_frag) { 3510 skb->ip_summed = CHECKSUM_UNNECESSARY; 3511 rxq->stats.rx_cso++; 3512 } else if (pkt->l2info & htonl(RXF_IP_F)) { 3513 __sum16 c = (__force __sum16)pkt->csum; 3514 skb->csum = csum_unfold(c); 3515 3516 if (tnl_hdr_len) { 3517 skb->ip_summed = CHECKSUM_UNNECESSARY; 3518 skb->csum_level = 1; 3519 } else { 3520 skb->ip_summed = CHECKSUM_COMPLETE; 3521 } 3522 rxq->stats.rx_cso++; 3523 } 3524 } else { 3525 skb_checksum_none_assert(skb); 3526 #ifdef CONFIG_CHELSIO_T4_FCOE 3527 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ 3528 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) 3529 3530 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { 3531 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && 3532 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { 3533 if (q->adap->params.tp.rx_pkt_encap) 3534 csum_ok = err_vec & 3535 T6_COMPR_RXERR_SUM_F; 3536 else 3537 csum_ok = err_vec & RXERR_CSUM_F; 3538 if (!csum_ok) 3539 skb->ip_summed = CHECKSUM_UNNECESSARY; 3540 } 3541 } 3542 3543 #undef CPL_RX_PKT_FLAGS 3544 #endif /* CONFIG_CHELSIO_T4_FCOE */ 3545 } 3546 3547 if (unlikely(pkt->vlan_ex)) { 3548 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3549 rxq->stats.vlan_ex++; 3550 } 3551 skb_mark_napi_id(skb, &q->napi); 3552 netif_receive_skb(skb); 3553 return 0; 3554 } 3555 3556 /** 3557 * restore_rx_bufs - put back a packet's Rx buffers 3558 * @si: the packet gather list 3559 * @q: the SGE free list 3560 * @frags: number of FL buffers to restore 3561 * 3562 * Puts back on an FL the Rx buffers associated with @si. The buffers 3563 * have already been unmapped and are left unmapped, we mark them so to 3564 * prevent further unmapping attempts. 3565 * 3566 * This function undoes a series of @unmap_rx_buf calls when we find out 3567 * that the current packet can't be processed right away afterall and we 3568 * need to come back to it later. This is a very rare event and there's 3569 * no effort to make this particularly efficient. 3570 */ 3571 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 3572 int frags) 3573 { 3574 struct rx_sw_desc *d; 3575 3576 while (frags--) { 3577 if (q->cidx == 0) 3578 q->cidx = q->size - 1; 3579 else 3580 q->cidx--; 3581 d = &q->sdesc[q->cidx]; 3582 d->page = si->frags[frags].page; 3583 d->dma_addr |= RX_UNMAPPED_BUF; 3584 q->avail++; 3585 } 3586 } 3587 3588 /** 3589 * is_new_response - check if a response is newly written 3590 * @r: the response descriptor 3591 * @q: the response queue 3592 * 3593 * Returns true if a response descriptor contains a yet unprocessed 3594 * response. 3595 */ 3596 static inline bool is_new_response(const struct rsp_ctrl *r, 3597 const struct sge_rspq *q) 3598 { 3599 return (r->type_gen >> RSPD_GEN_S) == q->gen; 3600 } 3601 3602 /** 3603 * rspq_next - advance to the next entry in a response queue 3604 * @q: the queue 3605 * 3606 * Updates the state of a response queue to advance it to the next entry. 3607 */ 3608 static inline void rspq_next(struct sge_rspq *q) 3609 { 3610 q->cur_desc = (void *)q->cur_desc + q->iqe_len; 3611 if (unlikely(++q->cidx == q->size)) { 3612 q->cidx = 0; 3613 q->gen ^= 1; 3614 q->cur_desc = q->desc; 3615 } 3616 } 3617 3618 /** 3619 * process_responses - process responses from an SGE response queue 3620 * @q: the ingress queue to process 3621 * @budget: how many responses can be processed in this round 3622 * 3623 * Process responses from an SGE response queue up to the supplied budget. 3624 * Responses include received packets as well as control messages from FW 3625 * or HW. 3626 * 3627 * Additionally choose the interrupt holdoff time for the next interrupt 3628 * on this queue. If the system is under memory shortage use a fairly 3629 * long delay to help recovery. 3630 */ 3631 static int process_responses(struct sge_rspq *q, int budget) 3632 { 3633 int ret, rsp_type; 3634 int budget_left = budget; 3635 const struct rsp_ctrl *rc; 3636 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 3637 struct adapter *adapter = q->adap; 3638 struct sge *s = &adapter->sge; 3639 3640 while (likely(budget_left)) { 3641 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 3642 if (!is_new_response(rc, q)) { 3643 if (q->flush_handler) 3644 q->flush_handler(q); 3645 break; 3646 } 3647 3648 dma_rmb(); 3649 rsp_type = RSPD_TYPE_G(rc->type_gen); 3650 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { 3651 struct page_frag *fp; 3652 struct pkt_gl si; 3653 const struct rx_sw_desc *rsd; 3654 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 3655 3656 if (len & RSPD_NEWBUF_F) { 3657 if (likely(q->offset > 0)) { 3658 free_rx_bufs(q->adap, &rxq->fl, 1); 3659 q->offset = 0; 3660 } 3661 len = RSPD_LEN_G(len); 3662 } 3663 si.tot_len = len; 3664 3665 /* gather packet fragments */ 3666 for (frags = 0, fp = si.frags; ; frags++, fp++) { 3667 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 3668 bufsz = get_buf_size(adapter, rsd); 3669 fp->page = rsd->page; 3670 fp->offset = q->offset; 3671 fp->size = min(bufsz, len); 3672 len -= fp->size; 3673 if (!len) 3674 break; 3675 unmap_rx_buf(q->adap, &rxq->fl); 3676 } 3677 3678 si.sgetstamp = SGE_TIMESTAMP_G( 3679 be64_to_cpu(rc->last_flit)); 3680 /* 3681 * Last buffer remains mapped so explicitly make it 3682 * coherent for CPU access. 3683 */ 3684 dma_sync_single_for_cpu(q->adap->pdev_dev, 3685 get_buf_addr(rsd), 3686 fp->size, DMA_FROM_DEVICE); 3687 3688 si.va = page_address(si.frags[0].page) + 3689 si.frags[0].offset; 3690 prefetch(si.va); 3691 3692 si.nfrags = frags + 1; 3693 ret = q->handler(q, q->cur_desc, &si); 3694 if (likely(ret == 0)) 3695 q->offset += ALIGN(fp->size, s->fl_align); 3696 else 3697 restore_rx_bufs(&si, &rxq->fl, frags); 3698 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { 3699 ret = q->handler(q, q->cur_desc, NULL); 3700 } else { 3701 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 3702 } 3703 3704 if (unlikely(ret)) { 3705 /* couldn't process descriptor, back off for recovery */ 3706 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); 3707 break; 3708 } 3709 3710 rspq_next(q); 3711 budget_left--; 3712 } 3713 3714 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) 3715 __refill_fl(q->adap, &rxq->fl); 3716 return budget - budget_left; 3717 } 3718 3719 /** 3720 * napi_rx_handler - the NAPI handler for Rx processing 3721 * @napi: the napi instance 3722 * @budget: how many packets we can process in this round 3723 * 3724 * Handler for new data events when using NAPI. This does not need any 3725 * locking or protection from interrupts as data interrupts are off at 3726 * this point and other adapter interrupts do not interfere (the latter 3727 * in not a concern at all with MSI-X as non-data interrupts then have 3728 * a separate handler). 3729 */ 3730 static int napi_rx_handler(struct napi_struct *napi, int budget) 3731 { 3732 unsigned int params; 3733 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 3734 int work_done; 3735 u32 val; 3736 3737 work_done = process_responses(q, budget); 3738 if (likely(work_done < budget)) { 3739 int timer_index; 3740 3741 napi_complete_done(napi, work_done); 3742 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); 3743 3744 if (q->adaptive_rx) { 3745 if (work_done > max(timer_pkt_quota[timer_index], 3746 MIN_NAPI_WORK)) 3747 timer_index = (timer_index + 1); 3748 else 3749 timer_index = timer_index - 1; 3750 3751 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 3752 q->next_intr_params = 3753 QINTR_TIMER_IDX_V(timer_index) | 3754 QINTR_CNT_EN_V(0); 3755 params = q->next_intr_params; 3756 } else { 3757 params = q->next_intr_params; 3758 q->next_intr_params = q->intr_params; 3759 } 3760 } else 3761 params = QINTR_TIMER_IDX_V(7); 3762 3763 val = CIDXINC_V(work_done) | SEINTARM_V(params); 3764 3765 /* If we don't have access to the new User GTS (T5+), use the old 3766 * doorbell mechanism; otherwise use the new BAR2 mechanism. 3767 */ 3768 if (unlikely(q->bar2_addr == NULL)) { 3769 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), 3770 val | INGRESSQID_V((u32)q->cntxt_id)); 3771 } else { 3772 writel(val | INGRESSQID_V(q->bar2_qid), 3773 q->bar2_addr + SGE_UDB_GTS); 3774 wmb(); 3775 } 3776 return work_done; 3777 } 3778 3779 void cxgb4_ethofld_restart(unsigned long data) 3780 { 3781 struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data; 3782 int pktcount; 3783 3784 spin_lock(&eosw_txq->lock); 3785 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; 3786 if (pktcount < 0) 3787 pktcount += eosw_txq->ndesc; 3788 3789 if (pktcount) { 3790 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), 3791 eosw_txq, pktcount); 3792 eosw_txq->inuse -= pktcount; 3793 } 3794 3795 /* There may be some packets waiting for completions. So, 3796 * attempt to send these packets now. 3797 */ 3798 ethofld_xmit(eosw_txq->netdev, eosw_txq); 3799 spin_unlock(&eosw_txq->lock); 3800 } 3801 3802 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions 3803 * @q: the response queue that received the packet 3804 * @rsp: the response queue descriptor holding the CPL message 3805 * @si: the gather list of packet fragments 3806 * 3807 * Process a ETHOFLD Tx completion. Increment the cidx here, but 3808 * free up the descriptors in a tasklet later. 3809 */ 3810 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, 3811 const struct pkt_gl *si) 3812 { 3813 u8 opcode = ((const struct rss_header *)rsp)->opcode; 3814 3815 /* skip RSS header */ 3816 rsp++; 3817 3818 if (opcode == CPL_FW4_ACK) { 3819 const struct cpl_fw4_ack *cpl; 3820 struct sge_eosw_txq *eosw_txq; 3821 struct eotid_entry *entry; 3822 struct sk_buff *skb; 3823 u32 hdr_len, eotid; 3824 u8 flits, wrlen16; 3825 int credits; 3826 3827 cpl = (const struct cpl_fw4_ack *)rsp; 3828 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - 3829 q->adap->tids.eotid_base; 3830 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); 3831 if (!entry) 3832 goto out_done; 3833 3834 eosw_txq = (struct sge_eosw_txq *)entry->data; 3835 if (!eosw_txq) 3836 goto out_done; 3837 3838 spin_lock(&eosw_txq->lock); 3839 credits = cpl->credits; 3840 while (credits > 0) { 3841 skb = eosw_txq->desc[eosw_txq->cidx].skb; 3842 if (!skb) 3843 break; 3844 3845 if (unlikely((eosw_txq->state == 3846 CXGB4_EO_STATE_FLOWC_OPEN_REPLY || 3847 eosw_txq->state == 3848 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) && 3849 eosw_txq->cidx == eosw_txq->flowc_idx)) { 3850 flits = DIV_ROUND_UP(skb->len, 8); 3851 if (eosw_txq->state == 3852 CXGB4_EO_STATE_FLOWC_OPEN_REPLY) 3853 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; 3854 else 3855 eosw_txq->state = CXGB4_EO_STATE_CLOSED; 3856 complete(&eosw_txq->completion); 3857 } else { 3858 hdr_len = eth_get_headlen(eosw_txq->netdev, 3859 skb->data, 3860 skb_headlen(skb)); 3861 flits = ethofld_calc_tx_flits(q->adap, skb, 3862 hdr_len); 3863 } 3864 eosw_txq_advance_index(&eosw_txq->cidx, 1, 3865 eosw_txq->ndesc); 3866 wrlen16 = DIV_ROUND_UP(flits * 8, 16); 3867 credits -= wrlen16; 3868 } 3869 3870 eosw_txq->cred += cpl->credits; 3871 eosw_txq->ncompl--; 3872 3873 spin_unlock(&eosw_txq->lock); 3874 3875 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, 3876 * if there were packets waiting for completion. 3877 */ 3878 tasklet_schedule(&eosw_txq->qresume_tsk); 3879 } 3880 3881 out_done: 3882 return 0; 3883 } 3884 3885 /* 3886 * The MSI-X interrupt handler for an SGE response queue. 3887 */ 3888 irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 3889 { 3890 struct sge_rspq *q = cookie; 3891 3892 napi_schedule(&q->napi); 3893 return IRQ_HANDLED; 3894 } 3895 3896 /* 3897 * Process the indirect interrupt entries in the interrupt queue and kick off 3898 * NAPI for each queue that has generated an entry. 3899 */ 3900 static unsigned int process_intrq(struct adapter *adap) 3901 { 3902 unsigned int credits; 3903 const struct rsp_ctrl *rc; 3904 struct sge_rspq *q = &adap->sge.intrq; 3905 u32 val; 3906 3907 spin_lock(&adap->sge.intrq_lock); 3908 for (credits = 0; ; credits++) { 3909 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 3910 if (!is_new_response(rc, q)) 3911 break; 3912 3913 dma_rmb(); 3914 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { 3915 unsigned int qid = ntohl(rc->pldbuflen_qid); 3916 3917 qid -= adap->sge.ingr_start; 3918 napi_schedule(&adap->sge.ingr_map[qid]->napi); 3919 } 3920 3921 rspq_next(q); 3922 } 3923 3924 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); 3925 3926 /* If we don't have access to the new User GTS (T5+), use the old 3927 * doorbell mechanism; otherwise use the new BAR2 mechanism. 3928 */ 3929 if (unlikely(q->bar2_addr == NULL)) { 3930 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 3931 val | INGRESSQID_V(q->cntxt_id)); 3932 } else { 3933 writel(val | INGRESSQID_V(q->bar2_qid), 3934 q->bar2_addr + SGE_UDB_GTS); 3935 wmb(); 3936 } 3937 spin_unlock(&adap->sge.intrq_lock); 3938 return credits; 3939 } 3940 3941 /* 3942 * The MSI interrupt handler, which handles data events from SGE response queues 3943 * as well as error and other async events as they all use the same MSI vector. 3944 */ 3945 static irqreturn_t t4_intr_msi(int irq, void *cookie) 3946 { 3947 struct adapter *adap = cookie; 3948 3949 if (adap->flags & CXGB4_MASTER_PF) 3950 t4_slow_intr_handler(adap); 3951 process_intrq(adap); 3952 return IRQ_HANDLED; 3953 } 3954 3955 /* 3956 * Interrupt handler for legacy INTx interrupts. 3957 * Handles data events from SGE response queues as well as error and other 3958 * async events as they all use the same interrupt line. 3959 */ 3960 static irqreturn_t t4_intr_intx(int irq, void *cookie) 3961 { 3962 struct adapter *adap = cookie; 3963 3964 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); 3965 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | 3966 process_intrq(adap)) 3967 return IRQ_HANDLED; 3968 return IRQ_NONE; /* probably shared interrupt */ 3969 } 3970 3971 /** 3972 * t4_intr_handler - select the top-level interrupt handler 3973 * @adap: the adapter 3974 * 3975 * Selects the top-level interrupt handler based on the type of interrupts 3976 * (MSI-X, MSI, or INTx). 3977 */ 3978 irq_handler_t t4_intr_handler(struct adapter *adap) 3979 { 3980 if (adap->flags & CXGB4_USING_MSIX) 3981 return t4_sge_intr_msix; 3982 if (adap->flags & CXGB4_USING_MSI) 3983 return t4_intr_msi; 3984 return t4_intr_intx; 3985 } 3986 3987 static void sge_rx_timer_cb(struct timer_list *t) 3988 { 3989 unsigned long m; 3990 unsigned int i; 3991 struct adapter *adap = from_timer(adap, t, sge.rx_timer); 3992 struct sge *s = &adap->sge; 3993 3994 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 3995 for (m = s->starving_fl[i]; m; m &= m - 1) { 3996 struct sge_eth_rxq *rxq; 3997 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 3998 struct sge_fl *fl = s->egr_map[id]; 3999 4000 clear_bit(id, s->starving_fl); 4001 smp_mb__after_atomic(); 4002 4003 if (fl_starving(adap, fl)) { 4004 rxq = container_of(fl, struct sge_eth_rxq, fl); 4005 if (napi_reschedule(&rxq->rspq.napi)) 4006 fl->starving++; 4007 else 4008 set_bit(id, s->starving_fl); 4009 } 4010 } 4011 /* The remainder of the SGE RX Timer Callback routine is dedicated to 4012 * global Master PF activities like checking for chip ingress stalls, 4013 * etc. 4014 */ 4015 if (!(adap->flags & CXGB4_MASTER_PF)) 4016 goto done; 4017 4018 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); 4019 4020 done: 4021 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 4022 } 4023 4024 static void sge_tx_timer_cb(struct timer_list *t) 4025 { 4026 struct adapter *adap = from_timer(adap, t, sge.tx_timer); 4027 struct sge *s = &adap->sge; 4028 unsigned long m, period; 4029 unsigned int i, budget; 4030 4031 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 4032 for (m = s->txq_maperr[i]; m; m &= m - 1) { 4033 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 4034 struct sge_uld_txq *txq = s->egr_map[id]; 4035 4036 clear_bit(id, s->txq_maperr); 4037 tasklet_schedule(&txq->qresume_tsk); 4038 } 4039 4040 if (!is_t4(adap->params.chip)) { 4041 struct sge_eth_txq *q = &s->ptptxq; 4042 int avail; 4043 4044 spin_lock(&adap->ptp_lock); 4045 avail = reclaimable(&q->q); 4046 4047 if (avail) { 4048 free_tx_desc(adap, &q->q, avail, false); 4049 q->q.in_use -= avail; 4050 } 4051 spin_unlock(&adap->ptp_lock); 4052 } 4053 4054 budget = MAX_TIMER_TX_RECLAIM; 4055 i = s->ethtxq_rover; 4056 do { 4057 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], 4058 budget); 4059 if (!budget) 4060 break; 4061 4062 if (++i >= s->ethqsets) 4063 i = 0; 4064 } while (i != s->ethtxq_rover); 4065 s->ethtxq_rover = i; 4066 4067 if (budget == 0) { 4068 /* If we found too many reclaimable packets schedule a timer 4069 * in the near future to continue where we left off. 4070 */ 4071 period = 2; 4072 } else { 4073 /* We reclaimed all reclaimable TX Descriptors, so reschedule 4074 * at the normal period. 4075 */ 4076 period = TX_QCHECK_PERIOD; 4077 } 4078 4079 mod_timer(&s->tx_timer, jiffies + period); 4080 } 4081 4082 /** 4083 * bar2_address - return the BAR2 address for an SGE Queue's Registers 4084 * @adapter: the adapter 4085 * @qid: the SGE Queue ID 4086 * @qtype: the SGE Queue Type (Egress or Ingress) 4087 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 4088 * 4089 * Returns the BAR2 address for the SGE Queue Registers associated with 4090 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 4091 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 4092 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 4093 * Registers are supported (e.g. the Write Combining Doorbell Buffer). 4094 */ 4095 static void __iomem *bar2_address(struct adapter *adapter, 4096 unsigned int qid, 4097 enum t4_bar2_qtype qtype, 4098 unsigned int *pbar2_qid) 4099 { 4100 u64 bar2_qoffset; 4101 int ret; 4102 4103 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, 4104 &bar2_qoffset, pbar2_qid); 4105 if (ret) 4106 return NULL; 4107 4108 return adapter->bar2 + bar2_qoffset; 4109 } 4110 4111 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 4112 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 4113 */ 4114 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 4115 struct net_device *dev, int intr_idx, 4116 struct sge_fl *fl, rspq_handler_t hnd, 4117 rspq_flush_handler_t flush_hnd, int cong) 4118 { 4119 int ret, flsz = 0; 4120 struct fw_iq_cmd c; 4121 struct sge *s = &adap->sge; 4122 struct port_info *pi = netdev_priv(dev); 4123 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); 4124 4125 /* Size needs to be multiple of 16, including status entry. */ 4126 iq->size = roundup(iq->size, 16); 4127 4128 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 4129 &iq->phys_addr, NULL, 0, 4130 dev_to_node(adap->pdev_dev)); 4131 if (!iq->desc) 4132 return -ENOMEM; 4133 4134 memset(&c, 0, sizeof(c)); 4135 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 4136 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4137 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); 4138 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | 4139 FW_LEN16(c)); 4140 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 4141 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 4142 FW_IQ_CMD_IQANDST_V(intr_idx < 0) | 4143 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | 4144 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 4145 -intr_idx - 1)); 4146 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 4147 FW_IQ_CMD_IQGTSMODE_F | 4148 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | 4149 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); 4150 c.iqsize = htons(iq->size); 4151 c.iqaddr = cpu_to_be64(iq->phys_addr); 4152 if (cong >= 0) 4153 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | 4154 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC 4155 : FW_IQ_IQTYPE_OFLD)); 4156 4157 if (fl) { 4158 unsigned int chip_ver = 4159 CHELSIO_CHIP_VERSION(adap->params.chip); 4160 4161 /* Allocate the ring for the hardware free list (with space 4162 * for its status page) along with the associated software 4163 * descriptor ring. The free list size needs to be a multiple 4164 * of the Egress Queue Unit and at least 2 Egress Units larger 4165 * than the SGE's Egress Congrestion Threshold 4166 * (fl_starve_thres - 1). 4167 */ 4168 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 4169 fl->size = s->fl_starve_thres - 1 + 2 * 8; 4170 fl->size = roundup(fl->size, 8); 4171 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 4172 sizeof(struct rx_sw_desc), &fl->addr, 4173 &fl->sdesc, s->stat_len, 4174 dev_to_node(adap->pdev_dev)); 4175 if (!fl->desc) 4176 goto fl_nomem; 4177 4178 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 4179 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 4180 FW_IQ_CMD_FL0FETCHRO_V(relaxed) | 4181 FW_IQ_CMD_FL0DATARO_V(relaxed) | 4182 FW_IQ_CMD_FL0PADEN_F); 4183 if (cong >= 0) 4184 c.iqns_to_fl0congen |= 4185 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 4186 FW_IQ_CMD_FL0CONGCIF_F | 4187 FW_IQ_CMD_FL0CONGEN_F); 4188 /* In T6, for egress queue type FL there is internal overhead 4189 * of 16B for header going into FLM module. Hence the maximum 4190 * allowed burst size is 448 bytes. For T4/T5, the hardware 4191 * doesn't coalesce fetch requests if more than 64 bytes of 4192 * Free List pointers are provided, so we use a 128-byte Fetch 4193 * Burst Minimum there (T6 implements coalescing so we can use 4194 * the smaller 64-byte value there). 4195 */ 4196 c.fl0dcaen_to_fl0cidxfthresh = 4197 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? 4198 FETCHBURSTMIN_128B_X : 4199 FETCHBURSTMIN_64B_T6_X) | 4200 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? 4201 FETCHBURSTMAX_512B_X : 4202 FETCHBURSTMAX_256B_X)); 4203 c.fl0size = htons(flsz); 4204 c.fl0addr = cpu_to_be64(fl->addr); 4205 } 4206 4207 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4208 if (ret) 4209 goto err; 4210 4211 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 4212 iq->cur_desc = iq->desc; 4213 iq->cidx = 0; 4214 iq->gen = 1; 4215 iq->next_intr_params = iq->intr_params; 4216 iq->cntxt_id = ntohs(c.iqid); 4217 iq->abs_id = ntohs(c.physiqid); 4218 iq->bar2_addr = bar2_address(adap, 4219 iq->cntxt_id, 4220 T4_BAR2_QTYPE_INGRESS, 4221 &iq->bar2_qid); 4222 iq->size--; /* subtract status entry */ 4223 iq->netdev = dev; 4224 iq->handler = hnd; 4225 iq->flush_handler = flush_hnd; 4226 4227 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); 4228 skb_queue_head_init(&iq->lro_mgr.lroq); 4229 4230 /* set offset to -1 to distinguish ingress queues without FL */ 4231 iq->offset = fl ? 0 : -1; 4232 4233 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 4234 4235 if (fl) { 4236 fl->cntxt_id = ntohs(c.fl0id); 4237 fl->avail = fl->pend_cred = 0; 4238 fl->pidx = fl->cidx = 0; 4239 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 4240 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 4241 4242 /* Note, we must initialize the BAR2 Free List User Doorbell 4243 * information before refilling the Free List! 4244 */ 4245 fl->bar2_addr = bar2_address(adap, 4246 fl->cntxt_id, 4247 T4_BAR2_QTYPE_EGRESS, 4248 &fl->bar2_qid); 4249 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 4250 } 4251 4252 /* For T5 and later we attempt to set up the Congestion Manager values 4253 * of the new RX Ethernet Queue. This should really be handled by 4254 * firmware because it's more complex than any host driver wants to 4255 * get involved with and it's different per chip and this is almost 4256 * certainly wrong. Firmware would be wrong as well, but it would be 4257 * a lot easier to fix in one place ... For now we do something very 4258 * simple (and hopefully less wrong). 4259 */ 4260 if (!is_t4(adap->params.chip) && cong >= 0) { 4261 u32 param, val, ch_map = 0; 4262 int i; 4263 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 4264 4265 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 4266 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 4267 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); 4268 if (cong == 0) { 4269 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); 4270 } else { 4271 val = 4272 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); 4273 for (i = 0; i < 4; i++) { 4274 if (cong & (1 << i)) 4275 ch_map |= 1 << (i << cng_ch_bits_log); 4276 } 4277 val |= CONMCTXT_CNGCHMAP_V(ch_map); 4278 } 4279 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 4280 ¶m, &val); 4281 if (ret) 4282 dev_warn(adap->pdev_dev, "Failed to set Congestion" 4283 " Manager Context for Ingress Queue %d: %d\n", 4284 iq->cntxt_id, -ret); 4285 } 4286 4287 return 0; 4288 4289 fl_nomem: 4290 ret = -ENOMEM; 4291 err: 4292 if (iq->desc) { 4293 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 4294 iq->desc, iq->phys_addr); 4295 iq->desc = NULL; 4296 } 4297 if (fl && fl->desc) { 4298 kfree(fl->sdesc); 4299 fl->sdesc = NULL; 4300 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 4301 fl->desc, fl->addr); 4302 fl->desc = NULL; 4303 } 4304 return ret; 4305 } 4306 4307 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 4308 { 4309 q->cntxt_id = id; 4310 q->bar2_addr = bar2_address(adap, 4311 q->cntxt_id, 4312 T4_BAR2_QTYPE_EGRESS, 4313 &q->bar2_qid); 4314 q->in_use = 0; 4315 q->cidx = q->pidx = 0; 4316 q->stops = q->restarts = 0; 4317 q->stat = (void *)&q->desc[q->size]; 4318 spin_lock_init(&q->db_lock); 4319 adap->sge.egr_map[id - adap->sge.egr_start] = q; 4320 } 4321 4322 /** 4323 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue 4324 * @adap: the adapter 4325 * @txq: the SGE Ethernet TX Queue to initialize 4326 * @dev: the Linux Network Device 4327 * @netdevq: the corresponding Linux TX Queue 4328 * @iqid: the Ingress Queue to which to deliver CIDX Update messages 4329 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers 4330 */ 4331 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 4332 struct net_device *dev, struct netdev_queue *netdevq, 4333 unsigned int iqid, u8 dbqt) 4334 { 4335 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4336 struct port_info *pi = netdev_priv(dev); 4337 struct sge *s = &adap->sge; 4338 struct fw_eq_eth_cmd c; 4339 int ret, nentries; 4340 4341 /* Add status entries */ 4342 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4343 4344 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 4345 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 4346 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 4347 netdev_queue_numa_node_read(netdevq)); 4348 if (!txq->q.desc) 4349 return -ENOMEM; 4350 4351 memset(&c, 0, sizeof(c)); 4352 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 4353 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4354 FW_EQ_ETH_CMD_PFN_V(adap->pf) | 4355 FW_EQ_ETH_CMD_VFN_V(0)); 4356 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | 4357 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 4358 4359 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer 4360 * mechanism, we use Ingress Queue messages for Hardware Consumer 4361 * Index Updates on the TX Queue. Otherwise we have the Hardware 4362 * write the CIDX Updates into the Status Page at the end of the 4363 * TX Queue. 4364 */ 4365 c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | 4366 FW_EQ_ETH_CMD_VIID_V(pi->viid)); 4367 4368 c.fetchszm_to_iqid = 4369 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 4370 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 4371 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 4372 4373 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ 4374 c.dcaen_to_eqsize = 4375 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4376 ? FETCHBURSTMIN_64B_X 4377 : FETCHBURSTMIN_64B_T6_X) | 4378 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4379 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4380 FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 4381 4382 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4383 4384 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the 4385 * currently configured Timer Index. THis can be changed later via an 4386 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE 4387 * Doorbell Queue mode is currently automatically enabled in the 4388 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... 4389 */ 4390 if (dbqt) 4391 c.timeren_timerix = 4392 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | 4393 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); 4394 4395 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4396 if (ret) { 4397 kfree(txq->q.sdesc); 4398 txq->q.sdesc = NULL; 4399 dma_free_coherent(adap->pdev_dev, 4400 nentries * sizeof(struct tx_desc), 4401 txq->q.desc, txq->q.phys_addr); 4402 txq->q.desc = NULL; 4403 return ret; 4404 } 4405 4406 txq->q.q_type = CXGB4_TXQ_ETH; 4407 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); 4408 txq->txq = netdevq; 4409 txq->tso = 0; 4410 txq->uso = 0; 4411 txq->tx_cso = 0; 4412 txq->vlan_ins = 0; 4413 txq->mapping_err = 0; 4414 txq->dbqt = dbqt; 4415 4416 return 0; 4417 } 4418 4419 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 4420 struct net_device *dev, unsigned int iqid, 4421 unsigned int cmplqid) 4422 { 4423 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4424 struct port_info *pi = netdev_priv(dev); 4425 struct sge *s = &adap->sge; 4426 struct fw_eq_ctrl_cmd c; 4427 int ret, nentries; 4428 4429 /* Add status entries */ 4430 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4431 4432 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 4433 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 4434 NULL, 0, dev_to_node(adap->pdev_dev)); 4435 if (!txq->q.desc) 4436 return -ENOMEM; 4437 4438 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 4439 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4440 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | 4441 FW_EQ_CTRL_CMD_VFN_V(0)); 4442 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | 4443 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 4444 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 4445 c.physeqid_pkd = htonl(0); 4446 c.fetchszm_to_iqid = 4447 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 4448 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 4449 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); 4450 c.dcaen_to_eqsize = 4451 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4452 ? FETCHBURSTMIN_64B_X 4453 : FETCHBURSTMIN_64B_T6_X) | 4454 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4455 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4456 FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 4457 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4458 4459 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4460 if (ret) { 4461 dma_free_coherent(adap->pdev_dev, 4462 nentries * sizeof(struct tx_desc), 4463 txq->q.desc, txq->q.phys_addr); 4464 txq->q.desc = NULL; 4465 return ret; 4466 } 4467 4468 txq->q.q_type = CXGB4_TXQ_CTRL; 4469 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); 4470 txq->adap = adap; 4471 skb_queue_head_init(&txq->sendq); 4472 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); 4473 txq->full = 0; 4474 return 0; 4475 } 4476 4477 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, 4478 unsigned int cmplqid) 4479 { 4480 u32 param, val; 4481 4482 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 4483 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | 4484 FW_PARAMS_PARAM_YZ_V(eqid)); 4485 val = cmplqid; 4486 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 4487 } 4488 4489 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q, 4490 struct net_device *dev, u32 cmd, u32 iqid) 4491 { 4492 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4493 struct port_info *pi = netdev_priv(dev); 4494 struct sge *s = &adap->sge; 4495 struct fw_eq_ofld_cmd c; 4496 u32 fb_min, nentries; 4497 int ret; 4498 4499 /* Add status entries */ 4500 nentries = q->size + s->stat_len / sizeof(struct tx_desc); 4501 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), 4502 sizeof(struct tx_sw_desc), &q->phys_addr, 4503 &q->sdesc, s->stat_len, NUMA_NO_NODE); 4504 if (!q->desc) 4505 return -ENOMEM; 4506 4507 if (chip_ver <= CHELSIO_T5) 4508 fb_min = FETCHBURSTMIN_64B_X; 4509 else 4510 fb_min = FETCHBURSTMIN_64B_T6_X; 4511 4512 memset(&c, 0, sizeof(c)); 4513 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | 4514 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4515 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | 4516 FW_EQ_OFLD_CMD_VFN_V(0)); 4517 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 4518 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 4519 c.fetchszm_to_iqid = 4520 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 4521 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 4522 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); 4523 c.dcaen_to_eqsize = 4524 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) | 4525 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4526 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4527 FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 4528 c.eqaddr = cpu_to_be64(q->phys_addr); 4529 4530 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4531 if (ret) { 4532 kfree(q->sdesc); 4533 q->sdesc = NULL; 4534 dma_free_coherent(adap->pdev_dev, 4535 nentries * sizeof(struct tx_desc), 4536 q->desc, q->phys_addr); 4537 q->desc = NULL; 4538 return ret; 4539 } 4540 4541 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); 4542 return 0; 4543 } 4544 4545 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, 4546 struct net_device *dev, unsigned int iqid, 4547 unsigned int uld_type) 4548 { 4549 u32 cmd = FW_EQ_OFLD_CMD; 4550 int ret; 4551 4552 if (unlikely(uld_type == CXGB4_TX_CRYPTO)) 4553 cmd = FW_EQ_CTRL_CMD; 4554 4555 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); 4556 if (ret) 4557 return ret; 4558 4559 txq->q.q_type = CXGB4_TXQ_ULD; 4560 txq->adap = adap; 4561 skb_queue_head_init(&txq->sendq); 4562 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); 4563 txq->full = 0; 4564 txq->mapping_err = 0; 4565 return 0; 4566 } 4567 4568 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, 4569 struct net_device *dev, u32 iqid) 4570 { 4571 int ret; 4572 4573 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); 4574 if (ret) 4575 return ret; 4576 4577 txq->q.q_type = CXGB4_TXQ_ULD; 4578 spin_lock_init(&txq->lock); 4579 txq->adap = adap; 4580 txq->tso = 0; 4581 txq->uso = 0; 4582 txq->tx_cso = 0; 4583 txq->vlan_ins = 0; 4584 txq->mapping_err = 0; 4585 return 0; 4586 } 4587 4588 void free_txq(struct adapter *adap, struct sge_txq *q) 4589 { 4590 struct sge *s = &adap->sge; 4591 4592 dma_free_coherent(adap->pdev_dev, 4593 q->size * sizeof(struct tx_desc) + s->stat_len, 4594 q->desc, q->phys_addr); 4595 q->cntxt_id = 0; 4596 q->sdesc = NULL; 4597 q->desc = NULL; 4598 } 4599 4600 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 4601 struct sge_fl *fl) 4602 { 4603 struct sge *s = &adap->sge; 4604 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 4605 4606 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 4607 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 4608 rq->cntxt_id, fl_id, 0xffff); 4609 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 4610 rq->desc, rq->phys_addr); 4611 netif_napi_del(&rq->napi); 4612 rq->netdev = NULL; 4613 rq->cntxt_id = rq->abs_id = 0; 4614 rq->desc = NULL; 4615 4616 if (fl) { 4617 free_rx_bufs(adap, fl, fl->avail); 4618 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 4619 fl->desc, fl->addr); 4620 kfree(fl->sdesc); 4621 fl->sdesc = NULL; 4622 fl->cntxt_id = 0; 4623 fl->desc = NULL; 4624 } 4625 } 4626 4627 /** 4628 * t4_free_ofld_rxqs - free a block of consecutive Rx queues 4629 * @adap: the adapter 4630 * @n: number of queues 4631 * @q: pointer to first queue 4632 * 4633 * Release the resources of a consecutive block of offload Rx queues. 4634 */ 4635 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) 4636 { 4637 for ( ; n; n--, q++) 4638 if (q->rspq.desc) 4639 free_rspq_fl(adap, &q->rspq, 4640 q->fl.size ? &q->fl : NULL); 4641 } 4642 4643 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) 4644 { 4645 if (txq->q.desc) { 4646 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 4647 txq->q.cntxt_id); 4648 free_tx_desc(adap, &txq->q, txq->q.in_use, false); 4649 kfree(txq->q.sdesc); 4650 free_txq(adap, &txq->q); 4651 } 4652 } 4653 4654 /** 4655 * t4_free_sge_resources - free SGE resources 4656 * @adap: the adapter 4657 * 4658 * Frees resources used by the SGE queue sets. 4659 */ 4660 void t4_free_sge_resources(struct adapter *adap) 4661 { 4662 int i; 4663 struct sge_eth_rxq *eq; 4664 struct sge_eth_txq *etq; 4665 4666 /* stop all Rx queues in order to start them draining */ 4667 for (i = 0; i < adap->sge.ethqsets; i++) { 4668 eq = &adap->sge.ethrxq[i]; 4669 if (eq->rspq.desc) 4670 t4_iq_stop(adap, adap->mbox, adap->pf, 0, 4671 FW_IQ_TYPE_FL_INT_CAP, 4672 eq->rspq.cntxt_id, 4673 eq->fl.size ? eq->fl.cntxt_id : 0xffff, 4674 0xffff); 4675 } 4676 4677 /* clean up Ethernet Tx/Rx queues */ 4678 for (i = 0; i < adap->sge.ethqsets; i++) { 4679 eq = &adap->sge.ethrxq[i]; 4680 if (eq->rspq.desc) 4681 free_rspq_fl(adap, &eq->rspq, 4682 eq->fl.size ? &eq->fl : NULL); 4683 if (eq->msix) { 4684 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); 4685 eq->msix = NULL; 4686 } 4687 4688 etq = &adap->sge.ethtxq[i]; 4689 if (etq->q.desc) { 4690 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4691 etq->q.cntxt_id); 4692 __netif_tx_lock_bh(etq->txq); 4693 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4694 __netif_tx_unlock_bh(etq->txq); 4695 kfree(etq->q.sdesc); 4696 free_txq(adap, &etq->q); 4697 } 4698 } 4699 4700 /* clean up control Tx queues */ 4701 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 4702 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 4703 4704 if (cq->q.desc) { 4705 tasklet_kill(&cq->qresume_tsk); 4706 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 4707 cq->q.cntxt_id); 4708 __skb_queue_purge(&cq->sendq); 4709 free_txq(adap, &cq->q); 4710 } 4711 } 4712 4713 if (adap->sge.fw_evtq.desc) { 4714 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 4715 if (adap->sge.fwevtq_msix_idx >= 0) 4716 cxgb4_free_msix_idx_in_bmap(adap, 4717 adap->sge.fwevtq_msix_idx); 4718 } 4719 4720 if (adap->sge.nd_msix_idx >= 0) 4721 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); 4722 4723 if (adap->sge.intrq.desc) 4724 free_rspq_fl(adap, &adap->sge.intrq, NULL); 4725 4726 if (!is_t4(adap->params.chip)) { 4727 etq = &adap->sge.ptptxq; 4728 if (etq->q.desc) { 4729 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4730 etq->q.cntxt_id); 4731 spin_lock_bh(&adap->ptp_lock); 4732 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4733 spin_unlock_bh(&adap->ptp_lock); 4734 kfree(etq->q.sdesc); 4735 free_txq(adap, &etq->q); 4736 } 4737 } 4738 4739 /* clear the reverse egress queue map */ 4740 memset(adap->sge.egr_map, 0, 4741 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); 4742 } 4743 4744 void t4_sge_start(struct adapter *adap) 4745 { 4746 adap->sge.ethtxq_rover = 0; 4747 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 4748 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 4749 } 4750 4751 /** 4752 * t4_sge_stop - disable SGE operation 4753 * @adap: the adapter 4754 * 4755 * Stop tasklets and timers associated with the DMA engine. Note that 4756 * this is effective only if measures have been taken to disable any HW 4757 * events that may restart them. 4758 */ 4759 void t4_sge_stop(struct adapter *adap) 4760 { 4761 int i; 4762 struct sge *s = &adap->sge; 4763 4764 if (in_interrupt()) /* actions below require waiting */ 4765 return; 4766 4767 if (s->rx_timer.function) 4768 del_timer_sync(&s->rx_timer); 4769 if (s->tx_timer.function) 4770 del_timer_sync(&s->tx_timer); 4771 4772 if (is_offload(adap)) { 4773 struct sge_uld_txq_info *txq_info; 4774 4775 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 4776 if (txq_info) { 4777 struct sge_uld_txq *txq = txq_info->uldtxq; 4778 4779 for_each_ofldtxq(&adap->sge, i) { 4780 if (txq->q.desc) 4781 tasklet_kill(&txq->qresume_tsk); 4782 } 4783 } 4784 } 4785 4786 if (is_pci_uld(adap)) { 4787 struct sge_uld_txq_info *txq_info; 4788 4789 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 4790 if (txq_info) { 4791 struct sge_uld_txq *txq = txq_info->uldtxq; 4792 4793 for_each_ofldtxq(&adap->sge, i) { 4794 if (txq->q.desc) 4795 tasklet_kill(&txq->qresume_tsk); 4796 } 4797 } 4798 } 4799 4800 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 4801 struct sge_ctrl_txq *cq = &s->ctrlq[i]; 4802 4803 if (cq->q.desc) 4804 tasklet_kill(&cq->qresume_tsk); 4805 } 4806 } 4807 4808 /** 4809 * t4_sge_init_soft - grab core SGE values needed by SGE code 4810 * @adap: the adapter 4811 * 4812 * We need to grab the SGE operating parameters that we need to have 4813 * in order to do our job and make sure we can live with them. 4814 */ 4815 4816 static int t4_sge_init_soft(struct adapter *adap) 4817 { 4818 struct sge *s = &adap->sge; 4819 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 4820 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 4821 u32 ingress_rx_threshold; 4822 4823 /* 4824 * Verify that CPL messages are going to the Ingress Queue for 4825 * process_responses() and that only packet data is going to the 4826 * Free Lists. 4827 */ 4828 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != 4829 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { 4830 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 4831 return -EINVAL; 4832 } 4833 4834 /* 4835 * Validate the Host Buffer Register Array indices that we want to 4836 * use ... 4837 * 4838 * XXX Note that we should really read through the Host Buffer Size 4839 * XXX register array and find the indices of the Buffer Sizes which 4840 * XXX meet our needs! 4841 */ 4842 #define READ_FL_BUF(x) \ 4843 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) 4844 4845 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 4846 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 4847 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 4848 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 4849 4850 /* We only bother using the Large Page logic if the Large Page Buffer 4851 * is larger than our Page Size Buffer. 4852 */ 4853 if (fl_large_pg <= fl_small_pg) 4854 fl_large_pg = 0; 4855 4856 #undef READ_FL_BUF 4857 4858 /* The Page Size Buffer must be exactly equal to our Page Size and the 4859 * Large Page Size Buffer should be 0 (per above) or a power of 2. 4860 */ 4861 if (fl_small_pg != PAGE_SIZE || 4862 (fl_large_pg & (fl_large_pg-1)) != 0) { 4863 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 4864 fl_small_pg, fl_large_pg); 4865 return -EINVAL; 4866 } 4867 if (fl_large_pg) 4868 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 4869 4870 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 4871 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 4872 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 4873 fl_small_mtu, fl_large_mtu); 4874 return -EINVAL; 4875 } 4876 4877 /* 4878 * Retrieve our RX interrupt holdoff timer values and counter 4879 * threshold values from the SGE parameters. 4880 */ 4881 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); 4882 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); 4883 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); 4884 s->timer_val[0] = core_ticks_to_us(adap, 4885 TIMERVALUE0_G(timer_value_0_and_1)); 4886 s->timer_val[1] = core_ticks_to_us(adap, 4887 TIMERVALUE1_G(timer_value_0_and_1)); 4888 s->timer_val[2] = core_ticks_to_us(adap, 4889 TIMERVALUE2_G(timer_value_2_and_3)); 4890 s->timer_val[3] = core_ticks_to_us(adap, 4891 TIMERVALUE3_G(timer_value_2_and_3)); 4892 s->timer_val[4] = core_ticks_to_us(adap, 4893 TIMERVALUE4_G(timer_value_4_and_5)); 4894 s->timer_val[5] = core_ticks_to_us(adap, 4895 TIMERVALUE5_G(timer_value_4_and_5)); 4896 4897 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); 4898 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); 4899 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); 4900 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); 4901 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); 4902 4903 return 0; 4904 } 4905 4906 /** 4907 * t4_sge_init - initialize SGE 4908 * @adap: the adapter 4909 * 4910 * Perform low-level SGE code initialization needed every time after a 4911 * chip reset. 4912 */ 4913 int t4_sge_init(struct adapter *adap) 4914 { 4915 struct sge *s = &adap->sge; 4916 u32 sge_control, sge_conm_ctrl; 4917 int ret, egress_threshold; 4918 4919 /* 4920 * Ingress Padding Boundary and Egress Status Page Size are set up by 4921 * t4_fixup_host_params(). 4922 */ 4923 sge_control = t4_read_reg(adap, SGE_CONTROL_A); 4924 s->pktshift = PKTSHIFT_G(sge_control); 4925 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; 4926 4927 s->fl_align = t4_fl_pkt_align(adap); 4928 ret = t4_sge_init_soft(adap); 4929 if (ret < 0) 4930 return ret; 4931 4932 /* 4933 * A FL with <= fl_starve_thres buffers is starving and a periodic 4934 * timer will attempt to refill it. This needs to be larger than the 4935 * SGE's Egress Congestion Threshold. If it isn't, then we can get 4936 * stuck waiting for new packets while the SGE is waiting for us to 4937 * give it more Free List entries. (Note that the SGE's Egress 4938 * Congestion Threshold is in units of 2 Free List pointers.) For T4, 4939 * there was only a single field to control this. For T5 there's the 4940 * original field which now only applies to Unpacked Mode Free List 4941 * buffers and a new field which only applies to Packed Mode Free List 4942 * buffers. 4943 */ 4944 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); 4945 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 4946 case CHELSIO_T4: 4947 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); 4948 break; 4949 case CHELSIO_T5: 4950 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 4951 break; 4952 case CHELSIO_T6: 4953 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 4954 break; 4955 default: 4956 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", 4957 CHELSIO_CHIP_VERSION(adap->params.chip)); 4958 return -EINVAL; 4959 } 4960 s->fl_starve_thres = 2*egress_threshold + 1; 4961 4962 t4_idma_monitor_init(adap, &s->idma_monitor); 4963 4964 /* Set up timers used for recuring callbacks to process RX and TX 4965 * administrative tasks. 4966 */ 4967 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); 4968 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); 4969 4970 spin_lock_init(&s->intrq_lock); 4971 4972 return 0; 4973 } 4974