1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/if_vlan.h> 39 #include <linux/ip.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/jiffies.h> 42 #include <linux/prefetch.h> 43 #include <linux/export.h> 44 #include <net/xfrm.h> 45 #include <net/ipv6.h> 46 #include <net/tcp.h> 47 #include <net/busy_poll.h> 48 #ifdef CONFIG_CHELSIO_T4_FCOE 49 #include <scsi/fc/fc_fcoe.h> 50 #endif /* CONFIG_CHELSIO_T4_FCOE */ 51 #include "cxgb4.h" 52 #include "t4_regs.h" 53 #include "t4_values.h" 54 #include "t4_msg.h" 55 #include "t4fw_api.h" 56 #include "cxgb4_ptp.h" 57 #include "cxgb4_uld.h" 58 #include "cxgb4_tc_mqprio.h" 59 #include "sched.h" 60 61 /* 62 * Rx buffer size. We use largish buffers if possible but settle for single 63 * pages under memory shortage. 64 */ 65 #if PAGE_SHIFT >= 16 66 # define FL_PG_ORDER 0 67 #else 68 # define FL_PG_ORDER (16 - PAGE_SHIFT) 69 #endif 70 71 /* RX_PULL_LEN should be <= RX_COPY_THRES */ 72 #define RX_COPY_THRES 256 73 #define RX_PULL_LEN 128 74 75 /* 76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 78 */ 79 #define RX_PKT_SKB_LEN 512 80 81 /* 82 * Max number of Tx descriptors we clean up at a time. Should be modest as 83 * freeing skbs isn't cheap and it happens while holding locks. We just need 84 * to free packets faster than they arrive, we eventually catch up and keep 85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should 86 * also match the CIDX Flush Threshold. 87 */ 88 #define MAX_TX_RECLAIM 32 89 90 /* 91 * Max number of Rx buffers we replenish at a time. Again keep this modest, 92 * allocating buffers isn't cheap either. 93 */ 94 #define MAX_RX_REFILL 16U 95 96 /* 97 * Period of the Rx queue check timer. This timer is infrequent as it has 98 * something to do only when the system experiences severe memory shortage. 99 */ 100 #define RX_QCHECK_PERIOD (HZ / 2) 101 102 /* 103 * Period of the Tx queue check timer. 104 */ 105 #define TX_QCHECK_PERIOD (HZ / 2) 106 107 /* 108 * Max number of Tx descriptors to be reclaimed by the Tx timer. 109 */ 110 #define MAX_TIMER_TX_RECLAIM 100 111 112 /* 113 * Timer index used when backing off due to memory shortage. 114 */ 115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 116 117 /* 118 * Suspension threshold for non-Ethernet Tx queues. We require enough room 119 * for a full sized WR. 120 */ 121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 122 123 /* 124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 125 * into a WR. 126 */ 127 #define MAX_IMM_TX_PKT_LEN 256 128 129 /* 130 * Max size of a WR sent through a control Tx queue. 131 */ 132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 133 134 struct rx_sw_desc { /* SW state per Rx descriptor */ 135 struct page *page; 136 dma_addr_t dma_addr; 137 }; 138 139 /* 140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 142 * We could easily support more but there doesn't seem to be much need for 143 * that ... 144 */ 145 #define FL_MTU_SMALL 1500 146 #define FL_MTU_LARGE 9000 147 148 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 149 unsigned int mtu) 150 { 151 struct sge *s = &adapter->sge; 152 153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 154 } 155 156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 158 159 /* 160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 161 * these to specify the buffer size as an index into the SGE Free List Buffer 162 * Size register array. We also use bit 4, when the buffer has been unmapped 163 * for DMA, but this is of course never sent to the hardware and is only used 164 * to prevent double unmappings. All of the above requires that the Free List 165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 167 * Free List Buffer alignment is 32 bytes, this works out for us ... 168 */ 169 enum { 170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 173 174 /* 175 * XXX We shouldn't depend on being able to use these indices. 176 * XXX Especially when some other Master PF has initialized the 177 * XXX adapter or we use the Firmware Configuration File. We 178 * XXX should really search through the Host Buffer Size register 179 * XXX array for the appropriately sized buffer indices. 180 */ 181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 183 184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 186 }; 187 188 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; 189 #define MIN_NAPI_WORK 1 190 191 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 192 { 193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 194 } 195 196 static inline bool is_buf_mapped(const struct rx_sw_desc *d) 197 { 198 return !(d->dma_addr & RX_UNMAPPED_BUF); 199 } 200 201 /** 202 * txq_avail - return the number of available slots in a Tx queue 203 * @q: the Tx queue 204 * 205 * Returns the number of descriptors in a Tx queue available to write new 206 * packets. 207 */ 208 static inline unsigned int txq_avail(const struct sge_txq *q) 209 { 210 return q->size - 1 - q->in_use; 211 } 212 213 /** 214 * fl_cap - return the capacity of a free-buffer list 215 * @fl: the FL 216 * 217 * Returns the capacity of a free-buffer list. The capacity is less than 218 * the size because one descriptor needs to be left unpopulated, otherwise 219 * HW will think the FL is empty. 220 */ 221 static inline unsigned int fl_cap(const struct sge_fl *fl) 222 { 223 return fl->size - 8; /* 1 descriptor = 8 buffers */ 224 } 225 226 /** 227 * fl_starving - return whether a Free List is starving. 228 * @adapter: pointer to the adapter 229 * @fl: the Free List 230 * 231 * Tests specified Free List to see whether the number of buffers 232 * available to the hardware has falled below our "starvation" 233 * threshold. 234 */ 235 static inline bool fl_starving(const struct adapter *adapter, 236 const struct sge_fl *fl) 237 { 238 const struct sge *s = &adapter->sge; 239 240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; 241 } 242 243 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 244 dma_addr_t *addr) 245 { 246 const skb_frag_t *fp, *end; 247 const struct skb_shared_info *si; 248 249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 250 if (dma_mapping_error(dev, *addr)) 251 goto out_err; 252 253 si = skb_shinfo(skb); 254 end = &si->frags[si->nr_frags]; 255 256 for (fp = si->frags; fp < end; fp++) { 257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 258 DMA_TO_DEVICE); 259 if (dma_mapping_error(dev, *addr)) 260 goto unwind; 261 } 262 return 0; 263 264 unwind: 265 while (fp-- > si->frags) 266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 267 268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 269 out_err: 270 return -ENOMEM; 271 } 272 EXPORT_SYMBOL(cxgb4_map_skb); 273 274 static void unmap_skb(struct device *dev, const struct sk_buff *skb, 275 const dma_addr_t *addr) 276 { 277 const skb_frag_t *fp, *end; 278 const struct skb_shared_info *si; 279 280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 281 282 si = skb_shinfo(skb); 283 end = &si->frags[si->nr_frags]; 284 for (fp = si->frags; fp < end; fp++) 285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 286 } 287 288 #ifdef CONFIG_NEED_DMA_MAP_STATE 289 /** 290 * deferred_unmap_destructor - unmap a packet when it is freed 291 * @skb: the packet 292 * 293 * This is the packet destructor used for Tx packets that need to remain 294 * mapped until they are freed rather than until their Tx descriptors are 295 * freed. 296 */ 297 static void deferred_unmap_destructor(struct sk_buff *skb) 298 { 299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 300 } 301 #endif 302 303 /** 304 * free_tx_desc - reclaims Tx descriptors and their buffers 305 * @adap: the adapter 306 * @q: the Tx queue to reclaim descriptors from 307 * @n: the number of descriptors to reclaim 308 * @unmap: whether the buffers should be unmapped for DMA 309 * 310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 311 * Tx buffers. Called with the Tx queue lock held. 312 */ 313 void free_tx_desc(struct adapter *adap, struct sge_txq *q, 314 unsigned int n, bool unmap) 315 { 316 unsigned int cidx = q->cidx; 317 struct tx_sw_desc *d; 318 319 d = &q->sdesc[cidx]; 320 while (n--) { 321 if (d->skb) { /* an SGL is present */ 322 if (unmap && d->addr[0]) { 323 unmap_skb(adap->pdev_dev, d->skb, d->addr); 324 memset(d->addr, 0, sizeof(d->addr)); 325 } 326 dev_consume_skb_any(d->skb); 327 d->skb = NULL; 328 } 329 ++d; 330 if (++cidx == q->size) { 331 cidx = 0; 332 d = q->sdesc; 333 } 334 } 335 q->cidx = cidx; 336 } 337 338 /* 339 * Return the number of reclaimable descriptors in a Tx queue. 340 */ 341 static inline int reclaimable(const struct sge_txq *q) 342 { 343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 344 hw_cidx -= q->cidx; 345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 346 } 347 348 /** 349 * reclaim_completed_tx - reclaims completed TX Descriptors 350 * @adap: the adapter 351 * @q: the Tx queue to reclaim completed descriptors from 352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 353 * @unmap: whether the buffers should be unmapped for DMA 354 * 355 * Reclaims Tx Descriptors that the SGE has indicated it has processed, 356 * and frees the associated buffers if possible. If @max == -1, then 357 * we'll use a defaiult maximum. Called with the TX Queue locked. 358 */ 359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 360 int maxreclaim, bool unmap) 361 { 362 int reclaim = reclaimable(q); 363 364 if (reclaim) { 365 /* 366 * Limit the amount of clean up work we do at a time to keep 367 * the Tx lock hold time O(1). 368 */ 369 if (maxreclaim < 0) 370 maxreclaim = MAX_TX_RECLAIM; 371 if (reclaim > maxreclaim) 372 reclaim = maxreclaim; 373 374 free_tx_desc(adap, q, reclaim, unmap); 375 q->in_use -= reclaim; 376 } 377 378 return reclaim; 379 } 380 381 /** 382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 383 * @adap: the adapter 384 * @q: the Tx queue to reclaim completed descriptors from 385 * @unmap: whether the buffers should be unmapped for DMA 386 * 387 * Reclaims Tx descriptors that the SGE has indicated it has processed, 388 * and frees the associated buffers if possible. Called with the Tx 389 * queue locked. 390 */ 391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 392 bool unmap) 393 { 394 (void)reclaim_completed_tx(adap, q, -1, unmap); 395 } 396 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 397 398 static inline int get_buf_size(struct adapter *adapter, 399 const struct rx_sw_desc *d) 400 { 401 struct sge *s = &adapter->sge; 402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 403 int buf_size; 404 405 switch (rx_buf_size_idx) { 406 case RX_SMALL_PG_BUF: 407 buf_size = PAGE_SIZE; 408 break; 409 410 case RX_LARGE_PG_BUF: 411 buf_size = PAGE_SIZE << s->fl_pg_order; 412 break; 413 414 case RX_SMALL_MTU_BUF: 415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 416 break; 417 418 case RX_LARGE_MTU_BUF: 419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 420 break; 421 422 default: 423 BUG(); 424 } 425 426 return buf_size; 427 } 428 429 /** 430 * free_rx_bufs - free the Rx buffers on an SGE free list 431 * @adap: the adapter 432 * @q: the SGE free list to free buffers from 433 * @n: how many buffers to free 434 * 435 * Release the next @n buffers on an SGE free-buffer Rx queue. The 436 * buffers must be made inaccessible to HW before calling this function. 437 */ 438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 439 { 440 while (n--) { 441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 442 443 if (is_buf_mapped(d)) 444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 445 get_buf_size(adap, d), 446 PCI_DMA_FROMDEVICE); 447 put_page(d->page); 448 d->page = NULL; 449 if (++q->cidx == q->size) 450 q->cidx = 0; 451 q->avail--; 452 } 453 } 454 455 /** 456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 457 * @adap: the adapter 458 * @q: the SGE free list 459 * 460 * Unmap the current buffer on an SGE free-buffer Rx queue. The 461 * buffer must be made inaccessible to HW before calling this function. 462 * 463 * This is similar to @free_rx_bufs above but does not free the buffer. 464 * Do note that the FL still loses any further access to the buffer. 465 */ 466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 467 { 468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 469 470 if (is_buf_mapped(d)) 471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); 473 d->page = NULL; 474 if (++q->cidx == q->size) 475 q->cidx = 0; 476 q->avail--; 477 } 478 479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 480 { 481 if (q->pend_cred >= 8) { 482 u32 val = adap->params.arch.sge_fl_db; 483 484 if (is_t4(adap->params.chip)) 485 val |= PIDX_V(q->pend_cred / 8); 486 else 487 val |= PIDX_T5_V(q->pend_cred / 8); 488 489 /* Make sure all memory writes to the Free List queue are 490 * committed before we tell the hardware about them. 491 */ 492 wmb(); 493 494 /* If we don't have access to the new User Doorbell (T5+), use 495 * the old doorbell mechanism; otherwise use the new BAR2 496 * mechanism. 497 */ 498 if (unlikely(q->bar2_addr == NULL)) { 499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 500 val | QID_V(q->cntxt_id)); 501 } else { 502 writel(val | QID_V(q->bar2_qid), 503 q->bar2_addr + SGE_UDB_KDOORBELL); 504 505 /* This Write memory Barrier will force the write to 506 * the User Doorbell area to be flushed. 507 */ 508 wmb(); 509 } 510 q->pend_cred &= 7; 511 } 512 } 513 514 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 515 dma_addr_t mapping) 516 { 517 sd->page = pg; 518 sd->dma_addr = mapping; /* includes size low bits */ 519 } 520 521 /** 522 * refill_fl - refill an SGE Rx buffer ring 523 * @adap: the adapter 524 * @q: the ring to refill 525 * @n: the number of new buffers to allocate 526 * @gfp: the gfp flags for the allocations 527 * 528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 529 * allocated with the supplied gfp flags. The caller must assure that 530 * @n does not exceed the queue's capacity. If afterwards the queue is 531 * found critically low mark it as starving in the bitmap of starving FLs. 532 * 533 * Returns the number of buffers allocated. 534 */ 535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 536 gfp_t gfp) 537 { 538 struct sge *s = &adap->sge; 539 struct page *pg; 540 dma_addr_t mapping; 541 unsigned int cred = q->avail; 542 __be64 *d = &q->desc[q->pidx]; 543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 544 int node; 545 546 #ifdef CONFIG_DEBUG_FS 547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) 548 goto out; 549 #endif 550 551 gfp |= __GFP_NOWARN; 552 node = dev_to_node(adap->pdev_dev); 553 554 if (s->fl_pg_order == 0) 555 goto alloc_small_pages; 556 557 /* 558 * Prefer large buffers 559 */ 560 while (n) { 561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); 562 if (unlikely(!pg)) { 563 q->large_alloc_failed++; 564 break; /* fall back to single pages */ 565 } 566 567 mapping = dma_map_page(adap->pdev_dev, pg, 0, 568 PAGE_SIZE << s->fl_pg_order, 569 PCI_DMA_FROMDEVICE); 570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 571 __free_pages(pg, s->fl_pg_order); 572 q->mapping_err++; 573 goto out; /* do not try small pages for this error */ 574 } 575 mapping |= RX_LARGE_PG_BUF; 576 *d++ = cpu_to_be64(mapping); 577 578 set_rx_sw_desc(sd, pg, mapping); 579 sd++; 580 581 q->avail++; 582 if (++q->pidx == q->size) { 583 q->pidx = 0; 584 sd = q->sdesc; 585 d = q->desc; 586 } 587 n--; 588 } 589 590 alloc_small_pages: 591 while (n--) { 592 pg = alloc_pages_node(node, gfp, 0); 593 if (unlikely(!pg)) { 594 q->alloc_failed++; 595 break; 596 } 597 598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 599 PCI_DMA_FROMDEVICE); 600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 601 put_page(pg); 602 q->mapping_err++; 603 goto out; 604 } 605 *d++ = cpu_to_be64(mapping); 606 607 set_rx_sw_desc(sd, pg, mapping); 608 sd++; 609 610 q->avail++; 611 if (++q->pidx == q->size) { 612 q->pidx = 0; 613 sd = q->sdesc; 614 d = q->desc; 615 } 616 } 617 618 out: cred = q->avail - cred; 619 q->pend_cred += cred; 620 ring_fl_db(adap, q); 621 622 if (unlikely(fl_starving(adap, q))) { 623 smp_wmb(); 624 q->low++; 625 set_bit(q->cntxt_id - adap->sge.egr_start, 626 adap->sge.starving_fl); 627 } 628 629 return cred; 630 } 631 632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 633 { 634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 635 GFP_ATOMIC); 636 } 637 638 /** 639 * alloc_ring - allocate resources for an SGE descriptor ring 640 * @dev: the PCI device's core device 641 * @nelem: the number of descriptors 642 * @elem_size: the size of each descriptor 643 * @sw_size: the size of the SW state associated with each ring element 644 * @phys: the physical address of the allocated ring 645 * @metadata: address of the array holding the SW state for the ring 646 * @stat_size: extra space in HW ring for status information 647 * @node: preferred node for memory allocations 648 * 649 * Allocates resources for an SGE descriptor ring, such as Tx queues, 650 * free buffer lists, or response queues. Each SGE ring requires 651 * space for its HW descriptors plus, optionally, space for the SW state 652 * associated with each HW entry (the metadata). The function returns 653 * three values: the virtual address for the HW ring (the return value 654 * of the function), the bus address of the HW ring, and the address 655 * of the SW ring. 656 */ 657 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 658 size_t sw_size, dma_addr_t *phys, void *metadata, 659 size_t stat_size, int node) 660 { 661 size_t len = nelem * elem_size + stat_size; 662 void *s = NULL; 663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 664 665 if (!p) 666 return NULL; 667 if (sw_size) { 668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node); 669 670 if (!s) { 671 dma_free_coherent(dev, len, p, *phys); 672 return NULL; 673 } 674 } 675 if (metadata) 676 *(void **)metadata = s; 677 return p; 678 } 679 680 /** 681 * sgl_len - calculates the size of an SGL of the given capacity 682 * @n: the number of SGL entries 683 * 684 * Calculates the number of flits needed for a scatter/gather list that 685 * can hold the given number of entries. 686 */ 687 static inline unsigned int sgl_len(unsigned int n) 688 { 689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL 691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 692 * repeated sequences of { Length[i], Length[i+1], Address[i], 693 * Address[i+1] } (this ensures that all addresses are on 64-bit 694 * boundaries). If N is even, then Length[N+1] should be set to 0 and 695 * Address[N+1] is omitted. 696 * 697 * The following calculation incorporates all of the above. It's 698 * somewhat hard to follow but, briefly: the "+2" accounts for the 699 * first two flits which include the DSGL header, Length0 and 700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and 702 * finally the "+((n-1)&1)" adds the one remaining flit needed if 703 * (n-1) is odd ... 704 */ 705 n--; 706 return (3 * n) / 2 + (n & 1) + 2; 707 } 708 709 /** 710 * flits_to_desc - returns the num of Tx descriptors for the given flits 711 * @n: the number of flits 712 * 713 * Returns the number of Tx descriptors needed for the supplied number 714 * of flits. 715 */ 716 static inline unsigned int flits_to_desc(unsigned int n) 717 { 718 BUG_ON(n > SGE_MAX_WR_LEN / 8); 719 return DIV_ROUND_UP(n, 8); 720 } 721 722 /** 723 * is_eth_imm - can an Ethernet packet be sent as immediate data? 724 * @skb: the packet 725 * @chip_ver: chip version 726 * 727 * Returns whether an Ethernet packet is small enough to fit as 728 * immediate data. Return value corresponds to headroom required. 729 */ 730 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) 731 { 732 int hdrlen = 0; 733 734 if (skb->encapsulation && skb_shinfo(skb)->gso_size && 735 chip_ver > CHELSIO_T5) { 736 hdrlen = sizeof(struct cpl_tx_tnl_lso); 737 hdrlen += sizeof(struct cpl_tx_pkt_core); 738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 739 return 0; 740 } else { 741 hdrlen = skb_shinfo(skb)->gso_size ? 742 sizeof(struct cpl_tx_pkt_lso_core) : 0; 743 hdrlen += sizeof(struct cpl_tx_pkt); 744 } 745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 746 return hdrlen; 747 return 0; 748 } 749 750 /** 751 * calc_tx_flits - calculate the number of flits for a packet Tx WR 752 * @skb: the packet 753 * @chip_ver: chip version 754 * 755 * Returns the number of flits needed for a Tx WR for the given Ethernet 756 * packet, including the needed WR and CPL headers. 757 */ 758 static inline unsigned int calc_tx_flits(const struct sk_buff *skb, 759 unsigned int chip_ver) 760 { 761 unsigned int flits; 762 int hdrlen = is_eth_imm(skb, chip_ver); 763 764 /* If the skb is small enough, we can pump it out as a work request 765 * with only immediate data. In that case we just have to have the 766 * TX Packet header plus the skb data in the Work Request. 767 */ 768 769 if (hdrlen) 770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 771 772 /* Otherwise, we're going to have to construct a Scatter gather list 773 * of the skb body and fragments. We also include the flits necessary 774 * for the TX Packet Work Request and CPL. We always have a firmware 775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 777 * message or, if we're doing a Large Send Offload, an LSO CPL message 778 * with an embedded TX Packet Write CPL message. 779 */ 780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 781 if (skb_shinfo(skb)->gso_size) { 782 if (skb->encapsulation && chip_ver > CHELSIO_T5) { 783 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 784 sizeof(struct cpl_tx_tnl_lso); 785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 786 u32 pkt_hdrlen; 787 788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, 789 skb_headlen(skb)); 790 hdrlen = sizeof(struct fw_eth_tx_eo_wr) + 791 round_up(pkt_hdrlen, 16); 792 } else { 793 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 794 sizeof(struct cpl_tx_pkt_lso_core); 795 } 796 797 hdrlen += sizeof(struct cpl_tx_pkt_core); 798 flits += (hdrlen / sizeof(__be64)); 799 } else { 800 flits += (sizeof(struct fw_eth_tx_pkt_wr) + 801 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 802 } 803 return flits; 804 } 805 806 /** 807 * calc_tx_descs - calculate the number of Tx descriptors for a packet 808 * @skb: the packet 809 * @chip_ver: chip version 810 * 811 * Returns the number of Tx descriptors needed for the given Ethernet 812 * packet, including the needed WR and CPL headers. 813 */ 814 static inline unsigned int calc_tx_descs(const struct sk_buff *skb, 815 unsigned int chip_ver) 816 { 817 return flits_to_desc(calc_tx_flits(skb, chip_ver)); 818 } 819 820 /** 821 * cxgb4_write_sgl - populate a scatter/gather list for a packet 822 * @skb: the packet 823 * @q: the Tx queue we are writing into 824 * @sgl: starting location for writing the SGL 825 * @end: points right after the end of the SGL 826 * @start: start offset into skb main-body data to include in the SGL 827 * @addr: the list of bus addresses for the SGL elements 828 * 829 * Generates a gather list for the buffers that make up a packet. 830 * The caller must provide adequate space for the SGL that will be written. 831 * The SGL includes all of the packet's page fragments and the data in its 832 * main body except for the first @start bytes. @sgl must be 16-byte 833 * aligned and within a Tx descriptor with available space. @end points 834 * right after the end of the SGL but does not account for any potential 835 * wrap around, i.e., @end > @sgl. 836 */ 837 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 838 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 839 const dma_addr_t *addr) 840 { 841 unsigned int i, len; 842 struct ulptx_sge_pair *to; 843 const struct skb_shared_info *si = skb_shinfo(skb); 844 unsigned int nfrags = si->nr_frags; 845 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 846 847 len = skb_headlen(skb) - start; 848 if (likely(len)) { 849 sgl->len0 = htonl(len); 850 sgl->addr0 = cpu_to_be64(addr[0] + start); 851 nfrags++; 852 } else { 853 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 854 sgl->addr0 = cpu_to_be64(addr[1]); 855 } 856 857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 858 ULPTX_NSGE_V(nfrags)); 859 if (likely(--nfrags == 0)) 860 return; 861 /* 862 * Most of the complexity below deals with the possibility we hit the 863 * end of the queue in the middle of writing the SGL. For this case 864 * only we create the SGL in a temporary buffer and then copy it. 865 */ 866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 867 868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 871 to->addr[0] = cpu_to_be64(addr[i]); 872 to->addr[1] = cpu_to_be64(addr[++i]); 873 } 874 if (nfrags) { 875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 876 to->len[1] = cpu_to_be32(0); 877 to->addr[0] = cpu_to_be64(addr[i + 1]); 878 } 879 if (unlikely((u8 *)end > (u8 *)q->stat)) { 880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 881 882 if (likely(part0)) 883 memcpy(sgl->sge, buf, part0); 884 part1 = (u8 *)end - (u8 *)q->stat; 885 memcpy(q->desc, (u8 *)buf + part0, part1); 886 end = (void *)q->desc + part1; 887 } 888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 889 *end = 0; 890 } 891 EXPORT_SYMBOL(cxgb4_write_sgl); 892 893 /* cxgb4_write_partial_sgl - populate SGL for partial packet 894 * @skb: the packet 895 * @q: the Tx queue we are writing into 896 * @sgl: starting location for writing the SGL 897 * @end: points right after the end of the SGL 898 * @addr: the list of bus addresses for the SGL elements 899 * @start: start offset in the SKB where partial data starts 900 * @len: length of data from @start to send out 901 * 902 * This API will handle sending out partial data of a skb if required. 903 * Unlike cxgb4_write_sgl, @start can be any offset into the skb data, 904 * and @len will decide how much data after @start offset to send out. 905 */ 906 void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q, 907 struct ulptx_sgl *sgl, u64 *end, 908 const dma_addr_t *addr, u32 start, u32 len) 909 { 910 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to; 911 u32 frag_size, skb_linear_data_len = skb_headlen(skb); 912 struct skb_shared_info *si = skb_shinfo(skb); 913 u8 i = 0, frag_idx = 0, nfrags = 0; 914 skb_frag_t *frag; 915 916 /* Fill the first SGL either from linear data or from partial 917 * frag based on @start. 918 */ 919 if (unlikely(start < skb_linear_data_len)) { 920 frag_size = min(len, skb_linear_data_len - start); 921 sgl->len0 = htonl(frag_size); 922 sgl->addr0 = cpu_to_be64(addr[0] + start); 923 len -= frag_size; 924 nfrags++; 925 } else { 926 start -= skb_linear_data_len; 927 frag = &si->frags[frag_idx]; 928 frag_size = skb_frag_size(frag); 929 /* find the first frag */ 930 while (start >= frag_size) { 931 start -= frag_size; 932 frag_idx++; 933 frag = &si->frags[frag_idx]; 934 frag_size = skb_frag_size(frag); 935 } 936 937 frag_size = min(len, skb_frag_size(frag) - start); 938 sgl->len0 = cpu_to_be32(frag_size); 939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); 940 len -= frag_size; 941 nfrags++; 942 frag_idx++; 943 } 944 945 /* If the entire partial data fit in one SGL, then send it out 946 * now. 947 */ 948 if (!len) 949 goto done; 950 951 /* Most of the complexity below deals with the possibility we hit the 952 * end of the queue in the middle of writing the SGL. For this case 953 * only we create the SGL in a temporary buffer and then copy it. 954 */ 955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 956 957 /* If the skb couldn't fit in first SGL completely, fill the 958 * rest of the frags in subsequent SGLs. Note that each SGL 959 * pair can store 2 frags. 960 */ 961 while (len) { 962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); 963 to->len[i & 1] = cpu_to_be32(frag_size); 964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); 965 if (i && (i & 1)) 966 to++; 967 nfrags++; 968 frag_idx++; 969 i++; 970 len -= frag_size; 971 } 972 973 /* If we ended in an odd boundary, then set the second SGL's 974 * length in the pair to 0. 975 */ 976 if (i & 1) 977 to->len[1] = cpu_to_be32(0); 978 979 /* Copy from temporary buffer to Tx ring, in case we hit the 980 * end of the queue in the middle of writing the SGL. 981 */ 982 if (unlikely((u8 *)end > (u8 *)q->stat)) { 983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 984 985 if (likely(part0)) 986 memcpy(sgl->sge, buf, part0); 987 part1 = (u8 *)end - (u8 *)q->stat; 988 memcpy(q->desc, (u8 *)buf + part0, part1); 989 end = (void *)q->desc + part1; 990 } 991 992 /* 0-pad to multiple of 16 */ 993 if ((uintptr_t)end & 8) 994 *end = 0; 995 done: 996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 997 ULPTX_NSGE_V(nfrags)); 998 } 999 EXPORT_SYMBOL(cxgb4_write_partial_sgl); 1000 1001 /* This function copies 64 byte coalesced work request to 1002 * memory mapped BAR2 space. For coalesced WR SGE fetches 1003 * data from the FIFO instead of from Host. 1004 */ 1005 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 1006 { 1007 int count = 8; 1008 1009 while (count) { 1010 writeq(*src, dst); 1011 src++; 1012 dst++; 1013 count--; 1014 } 1015 } 1016 1017 /** 1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 1019 * @adap: the adapter 1020 * @q: the Tx queue 1021 * @n: number of new descriptors to give to HW 1022 * 1023 * Ring the doorbel for a Tx queue. 1024 */ 1025 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 1026 { 1027 /* Make sure that all writes to the TX Descriptors are committed 1028 * before we tell the hardware about them. 1029 */ 1030 wmb(); 1031 1032 /* If we don't have access to the new User Doorbell (T5+), use the old 1033 * doorbell mechanism; otherwise use the new BAR2 mechanism. 1034 */ 1035 if (unlikely(q->bar2_addr == NULL)) { 1036 u32 val = PIDX_V(n); 1037 unsigned long flags; 1038 1039 /* For T4 we need to participate in the Doorbell Recovery 1040 * mechanism. 1041 */ 1042 spin_lock_irqsave(&q->db_lock, flags); 1043 if (!q->db_disabled) 1044 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 1045 QID_V(q->cntxt_id) | val); 1046 else 1047 q->db_pidx_inc += n; 1048 q->db_pidx = q->pidx; 1049 spin_unlock_irqrestore(&q->db_lock, flags); 1050 } else { 1051 u32 val = PIDX_T5_V(n); 1052 1053 /* T4 and later chips share the same PIDX field offset within 1054 * the doorbell, but T5 and later shrank the field in order to 1055 * gain a bit for Doorbell Priority. The field was absurdly 1056 * large in the first place (14 bits) so we just use the T5 1057 * and later limits and warn if a Queue ID is too large. 1058 */ 1059 WARN_ON(val & DBPRIO_F); 1060 1061 /* If we're only writing a single TX Descriptor and we can use 1062 * Inferred QID registers, we can use the Write Combining 1063 * Gather Buffer; otherwise we use the simple doorbell. 1064 */ 1065 if (n == 1 && q->bar2_qid == 0) { 1066 int index = (q->pidx 1067 ? (q->pidx - 1) 1068 : (q->size - 1)); 1069 u64 *wr = (u64 *)&q->desc[index]; 1070 1071 cxgb_pio_copy((u64 __iomem *) 1072 (q->bar2_addr + SGE_UDB_WCDOORBELL), 1073 wr); 1074 } else { 1075 writel(val | QID_V(q->bar2_qid), 1076 q->bar2_addr + SGE_UDB_KDOORBELL); 1077 } 1078 1079 /* This Write Memory Barrier will force the write to the User 1080 * Doorbell area to be flushed. This is needed to prevent 1081 * writes on different CPUs for the same queue from hitting 1082 * the adapter out of order. This is required when some Work 1083 * Requests take the Write Combine Gather Buffer path (user 1084 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 1085 * take the traditional path where we simply increment the 1086 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 1087 * hardware DMA read the actual Work Request. 1088 */ 1089 wmb(); 1090 } 1091 } 1092 EXPORT_SYMBOL(cxgb4_ring_tx_db); 1093 1094 /** 1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 1096 * @skb: the packet 1097 * @q: the Tx queue where the packet will be inlined 1098 * @pos: starting position in the Tx queue where to inline the packet 1099 * 1100 * Inline a packet's contents directly into Tx descriptors, starting at 1101 * the given position within the Tx DMA ring. 1102 * Most of the complexity of this operation is dealing with wrap arounds 1103 * in the middle of the packet we want to inline. 1104 */ 1105 void cxgb4_inline_tx_skb(const struct sk_buff *skb, 1106 const struct sge_txq *q, void *pos) 1107 { 1108 int left = (void *)q->stat - pos; 1109 u64 *p; 1110 1111 if (likely(skb->len <= left)) { 1112 if (likely(!skb->data_len)) 1113 skb_copy_from_linear_data(skb, pos, skb->len); 1114 else 1115 skb_copy_bits(skb, 0, pos, skb->len); 1116 pos += skb->len; 1117 } else { 1118 skb_copy_bits(skb, 0, pos, left); 1119 skb_copy_bits(skb, left, q->desc, skb->len - left); 1120 pos = (void *)q->desc + (skb->len - left); 1121 } 1122 1123 /* 0-pad to multiple of 16 */ 1124 p = PTR_ALIGN(pos, 8); 1125 if ((uintptr_t)p & 8) 1126 *p = 0; 1127 } 1128 EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1129 1130 static void *inline_tx_skb_header(const struct sk_buff *skb, 1131 const struct sge_txq *q, void *pos, 1132 int length) 1133 { 1134 u64 *p; 1135 int left = (void *)q->stat - pos; 1136 1137 if (likely(length <= left)) { 1138 memcpy(pos, skb->data, length); 1139 pos += length; 1140 } else { 1141 memcpy(pos, skb->data, left); 1142 memcpy(q->desc, skb->data + left, length - left); 1143 pos = (void *)q->desc + (length - left); 1144 } 1145 /* 0-pad to multiple of 16 */ 1146 p = PTR_ALIGN(pos, 8); 1147 if ((uintptr_t)p & 8) { 1148 *p = 0; 1149 return p + 1; 1150 } 1151 return p; 1152 } 1153 1154 /* 1155 * Figure out what HW csum a packet wants and return the appropriate control 1156 * bits. 1157 */ 1158 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) 1159 { 1160 int csum_type; 1161 bool inner_hdr_csum = false; 1162 u16 proto, ver; 1163 1164 if (skb->encapsulation && 1165 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) 1166 inner_hdr_csum = true; 1167 1168 if (inner_hdr_csum) { 1169 ver = inner_ip_hdr(skb)->version; 1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : 1171 inner_ipv6_hdr(skb)->nexthdr; 1172 } else { 1173 ver = ip_hdr(skb)->version; 1174 proto = (ver == 4) ? ip_hdr(skb)->protocol : 1175 ipv6_hdr(skb)->nexthdr; 1176 } 1177 1178 if (ver == 4) { 1179 if (proto == IPPROTO_TCP) 1180 csum_type = TX_CSUM_TCPIP; 1181 else if (proto == IPPROTO_UDP) 1182 csum_type = TX_CSUM_UDPIP; 1183 else { 1184 nocsum: /* 1185 * unknown protocol, disable HW csum 1186 * and hope a bad packet is detected 1187 */ 1188 return TXPKT_L4CSUM_DIS_F; 1189 } 1190 } else { 1191 /* 1192 * this doesn't work with extension headers 1193 */ 1194 if (proto == IPPROTO_TCP) 1195 csum_type = TX_CSUM_TCPIP6; 1196 else if (proto == IPPROTO_UDP) 1197 csum_type = TX_CSUM_UDPIP6; 1198 else 1199 goto nocsum; 1200 } 1201 1202 if (likely(csum_type >= TX_CSUM_TCPIP)) { 1203 int eth_hdr_len, l4_len; 1204 u64 hdr_len; 1205 1206 if (inner_hdr_csum) { 1207 /* This allows checksum offload for all encapsulated 1208 * packets like GRE etc.. 1209 */ 1210 l4_len = skb_inner_network_header_len(skb); 1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; 1212 } else { 1213 l4_len = skb_network_header_len(skb); 1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; 1215 } 1216 hdr_len = TXPKT_IPHDR_LEN_V(l4_len); 1217 1218 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 1219 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1220 else 1221 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1222 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; 1223 } else { 1224 int start = skb_transport_offset(skb); 1225 1226 return TXPKT_CSUM_TYPE_V(csum_type) | 1227 TXPKT_CSUM_START_V(start) | 1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset); 1229 } 1230 } 1231 1232 static void eth_txq_stop(struct sge_eth_txq *q) 1233 { 1234 netif_tx_stop_queue(q->txq); 1235 q->q.stops++; 1236 } 1237 1238 static inline void txq_advance(struct sge_txq *q, unsigned int n) 1239 { 1240 q->in_use += n; 1241 q->pidx += n; 1242 if (q->pidx >= q->size) 1243 q->pidx -= q->size; 1244 } 1245 1246 #ifdef CONFIG_CHELSIO_T4_FCOE 1247 static inline int 1248 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, 1249 const struct port_info *pi, u64 *cntrl) 1250 { 1251 const struct cxgb_fcoe *fcoe = &pi->fcoe; 1252 1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) 1254 return 0; 1255 1256 if (skb->protocol != htons(ETH_P_FCOE)) 1257 return 0; 1258 1259 skb_reset_mac_header(skb); 1260 skb->mac_len = sizeof(struct ethhdr); 1261 1262 skb_set_network_header(skb, skb->mac_len); 1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); 1264 1265 if (!cxgb_fcoe_sof_eof_supported(adap, skb)) 1266 return -ENOTSUPP; 1267 1268 /* FC CRC offload */ 1269 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | 1270 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | 1271 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | 1272 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | 1273 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); 1274 return 0; 1275 } 1276 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1277 1278 /* Returns tunnel type if hardware supports offloading of the same. 1279 * It is called only for T5 and onwards. 1280 */ 1281 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) 1282 { 1283 u8 l4_hdr = 0; 1284 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1285 struct port_info *pi = netdev_priv(skb->dev); 1286 struct adapter *adapter = pi->adapter; 1287 1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1289 skb->inner_protocol != htons(ETH_P_TEB)) 1290 return tnl_type; 1291 1292 switch (vlan_get_protocol(skb)) { 1293 case htons(ETH_P_IP): 1294 l4_hdr = ip_hdr(skb)->protocol; 1295 break; 1296 case htons(ETH_P_IPV6): 1297 l4_hdr = ipv6_hdr(skb)->nexthdr; 1298 break; 1299 default: 1300 return tnl_type; 1301 } 1302 1303 switch (l4_hdr) { 1304 case IPPROTO_UDP: 1305 if (adapter->vxlan_port == udp_hdr(skb)->dest) 1306 tnl_type = TX_TNL_TYPE_VXLAN; 1307 else if (adapter->geneve_port == udp_hdr(skb)->dest) 1308 tnl_type = TX_TNL_TYPE_GENEVE; 1309 break; 1310 default: 1311 return tnl_type; 1312 } 1313 1314 return tnl_type; 1315 } 1316 1317 static inline void t6_fill_tnl_lso(struct sk_buff *skb, 1318 struct cpl_tx_tnl_lso *tnl_lso, 1319 enum cpl_tx_tnl_lso_type tnl_type) 1320 { 1321 u32 val; 1322 int in_eth_xtra_len; 1323 int l3hdr_len = skb_network_header_len(skb); 1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1325 const struct skb_shared_info *ssi = skb_shinfo(skb); 1326 bool v6 = (ip_hdr(skb)->version == 6); 1327 1328 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | 1329 CPL_TX_TNL_LSO_FIRST_F | 1330 CPL_TX_TNL_LSO_LAST_F | 1331 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | 1332 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | 1333 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | 1334 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | 1335 CPL_TX_TNL_LSO_IPLENSETOUT_F | 1336 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); 1337 tnl_lso->op_to_IpIdSplitOut = htonl(val); 1338 1339 tnl_lso->IpIdOffsetOut = 0; 1340 1341 /* Get the tunnel header length */ 1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb); 1343 in_eth_xtra_len = skb_inner_network_header(skb) - 1344 skb_inner_mac_header(skb) - ETH_HLEN; 1345 1346 switch (tnl_type) { 1347 case TX_TNL_TYPE_VXLAN: 1348 case TX_TNL_TYPE_GENEVE: 1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 1350 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | 1351 CPL_TX_TNL_LSO_UDPLENSETOUT_F); 1352 break; 1353 default: 1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; 1355 break; 1356 } 1357 1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= 1359 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | 1360 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); 1361 1362 tnl_lso->r1 = 0; 1363 1364 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | 1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | 1366 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | 1367 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); 1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val); 1369 1370 tnl_lso->IpIdOffset = htons(0); 1371 1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); 1373 tnl_lso->TCPSeqOffset = htonl(0); 1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); 1375 } 1376 1377 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, 1378 struct cpl_tx_pkt_lso_core *lso) 1379 { 1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1381 int l3hdr_len = skb_network_header_len(skb); 1382 const struct skb_shared_info *ssi; 1383 bool ipv6 = false; 1384 1385 ssi = skb_shinfo(skb); 1386 if (ssi->gso_type & SKB_GSO_TCPV6) 1387 ipv6 = true; 1388 1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1390 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | 1391 LSO_IPV6_V(ipv6) | 1392 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1393 LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1395 lso->ipid_ofst = htons(0); 1396 lso->mss = htons(ssi->gso_size); 1397 lso->seqno_offset = htonl(0); 1398 if (is_t4(adap->params.chip)) 1399 lso->len = htonl(skb->len); 1400 else 1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); 1402 1403 return (void *)(lso + 1); 1404 } 1405 1406 /** 1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update 1408 * @adap: the adapter 1409 * @eq: the Ethernet TX Queue 1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 1411 * 1412 * We're typically called here to update the state of an Ethernet TX 1413 * Queue with respect to the hardware's progress in consuming the TX 1414 * Work Requests that we've put on that Egress Queue. This happens 1415 * when we get Egress Queue Update messages and also prophylactically 1416 * in regular timer-based Ethernet TX Queue maintenance. 1417 */ 1418 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, 1419 int maxreclaim) 1420 { 1421 unsigned int reclaimed, hw_cidx; 1422 struct sge_txq *q = &eq->q; 1423 int hw_in_use; 1424 1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) 1426 return 0; 1427 1428 /* Reclaim pending completed TX Descriptors. */ 1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); 1430 1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 1432 hw_in_use = q->pidx - hw_cidx; 1433 if (hw_in_use < 0) 1434 hw_in_use += q->size; 1435 1436 /* If the TX Queue is currently stopped and there's now more than half 1437 * the queue available, restart it. Otherwise bail out since the rest 1438 * of what we want do here is with the possibility of shipping any 1439 * currently buffered Coalesced TX Work Request. 1440 */ 1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { 1442 netif_tx_wake_queue(eq->txq); 1443 eq->q.restarts++; 1444 } 1445 1446 __netif_tx_unlock(eq->txq); 1447 return reclaimed; 1448 } 1449 1450 static inline int cxgb4_validate_skb(struct sk_buff *skb, 1451 struct net_device *dev, 1452 u32 min_pkt_len) 1453 { 1454 u32 max_pkt_len; 1455 1456 /* The chip min packet length is 10 octets but some firmware 1457 * commands have a minimum packet length requirement. So, play 1458 * safe and reject anything shorter than @min_pkt_len. 1459 */ 1460 if (unlikely(skb->len < min_pkt_len)) 1461 return -EINVAL; 1462 1463 /* Discard the packet if the length is greater than mtu */ 1464 max_pkt_len = ETH_HLEN + dev->mtu; 1465 1466 if (skb_vlan_tagged(skb)) 1467 max_pkt_len += VLAN_HLEN; 1468 1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) 1470 return -EINVAL; 1471 1472 return 0; 1473 } 1474 1475 static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 1476 u32 hdr_len) 1477 { 1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 1479 wr->u.udpseg.ethlen = skb_network_offset(skb); 1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 1481 wr->u.udpseg.udplen = sizeof(struct udphdr); 1482 wr->u.udpseg.rtplen = 0; 1483 wr->u.udpseg.r4 = 0; 1484 if (skb_shinfo(skb)->gso_size) 1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1486 else 1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); 1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); 1490 1491 return (void *)(wr + 1); 1492 } 1493 1494 /** 1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue 1496 * @skb: the packet 1497 * @dev: the egress net device 1498 * 1499 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1500 */ 1501 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1502 { 1503 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1504 bool ptp_enabled = is_ptp_enabled(skb, dev); 1505 unsigned int last_desc, flits, ndesc; 1506 u32 wr_mid, ctrl0, op, sgl_off = 0; 1507 const struct skb_shared_info *ssi; 1508 int len, qidx, credits, ret, left; 1509 struct tx_sw_desc *sgl_sdesc; 1510 struct fw_eth_tx_eo_wr *eowr; 1511 struct fw_eth_tx_pkt_wr *wr; 1512 struct cpl_tx_pkt_core *cpl; 1513 const struct port_info *pi; 1514 bool immediate = false; 1515 u64 cntrl, *end, *sgl; 1516 struct sge_eth_txq *q; 1517 unsigned int chip_ver; 1518 struct adapter *adap; 1519 1520 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 1521 if (ret) 1522 goto out_free; 1523 1524 pi = netdev_priv(dev); 1525 adap = pi->adapter; 1526 ssi = skb_shinfo(skb); 1527 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 1528 if (xfrm_offload(skb) && !ssi->gso_size) 1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); 1530 #endif /* CHELSIO_IPSEC_INLINE */ 1531 1532 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 1533 if (cxgb4_is_ktls_skb(skb) && 1534 (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)))) 1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); 1536 #endif /* CHELSIO_TLS_DEVICE */ 1537 1538 qidx = skb_get_queue_mapping(skb); 1539 if (ptp_enabled) { 1540 if (!(adap->ptp_tx_skb)) { 1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1542 adap->ptp_tx_skb = skb_get(skb); 1543 } else { 1544 goto out_free; 1545 } 1546 q = &adap->sge.ptptxq; 1547 } else { 1548 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1549 } 1550 skb_tx_timestamp(skb); 1551 1552 reclaim_completed_tx(adap, &q->q, -1, true); 1553 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1554 1555 #ifdef CONFIG_CHELSIO_T4_FCOE 1556 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1557 if (unlikely(ret == -EOPNOTSUPP)) 1558 goto out_free; 1559 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1560 1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1562 flits = calc_tx_flits(skb, chip_ver); 1563 ndesc = flits_to_desc(flits); 1564 credits = txq_avail(&q->q) - ndesc; 1565 1566 if (unlikely(credits < 0)) { 1567 eth_txq_stop(q); 1568 dev_err(adap->pdev_dev, 1569 "%s: Tx ring %u full while queue awake!\n", 1570 dev->name, qidx); 1571 return NETDEV_TX_BUSY; 1572 } 1573 1574 if (is_eth_imm(skb, chip_ver)) 1575 immediate = true; 1576 1577 if (skb->encapsulation && chip_ver > CHELSIO_T5) 1578 tnl_type = cxgb_encap_offload_supported(skb); 1579 1580 last_desc = q->q.pidx + ndesc - 1; 1581 if (last_desc >= q->q.size) 1582 last_desc -= q->q.size; 1583 sgl_sdesc = &q->q.sdesc[last_desc]; 1584 1585 if (!immediate && 1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1588 q->mapping_err++; 1589 goto out_free; 1590 } 1591 1592 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1593 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1594 /* After we're done injecting the Work Request for this 1595 * packet, we'll be below our "stop threshold" so stop the TX 1596 * Queue now and schedule a request for an SGE Egress Queue 1597 * Update message. The queue will get started later on when 1598 * the firmware processes this Work Request and sends us an 1599 * Egress Queue Status Update message indicating that space 1600 * has opened up. 1601 */ 1602 eth_txq_stop(q); 1603 if (chip_ver > CHELSIO_T5) 1604 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1605 } 1606 1607 wr = (void *)&q->q.desc[q->q.pidx]; 1608 eowr = (void *)&q->q.desc[q->q.pidx]; 1609 wr->equiq_to_len16 = htonl(wr_mid); 1610 wr->r3 = cpu_to_be64(0); 1611 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 1612 end = (u64 *)eowr + flits; 1613 else 1614 end = (u64 *)wr + flits; 1615 1616 len = immediate ? skb->len : 0; 1617 len += sizeof(*cpl); 1618 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { 1619 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1620 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); 1621 1622 if (tnl_type) 1623 len += sizeof(*tnl_lso); 1624 else 1625 len += sizeof(*lso); 1626 1627 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1628 FW_WR_IMMDLEN_V(len)); 1629 if (tnl_type) { 1630 struct iphdr *iph = ip_hdr(skb); 1631 1632 t6_fill_tnl_lso(skb, tnl_lso, tnl_type); 1633 cpl = (void *)(tnl_lso + 1); 1634 /* Driver is expected to compute partial checksum that 1635 * does not include the IP Total Length. 1636 */ 1637 if (iph->version == 4) { 1638 iph->check = 0; 1639 iph->tot_len = 0; 1640 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); 1641 } 1642 if (skb->ip_summed == CHECKSUM_PARTIAL) 1643 cntrl = hwcsum(adap->params.chip, skb); 1644 } else { 1645 cpl = write_tso_wr(adap, skb, lso); 1646 cntrl = hwcsum(adap->params.chip, skb); 1647 } 1648 sgl = (u64 *)(cpl + 1); /* sgl start here */ 1649 q->tso++; 1650 q->tx_cso += ssi->gso_segs; 1651 } else if (ssi->gso_size) { 1652 u64 *start; 1653 u32 hdrlen; 1654 1655 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 1656 len += hdrlen; 1657 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 1658 FW_ETH_TX_EO_WR_IMMDLEN_V(len)); 1659 cpl = write_eo_udp_wr(skb, eowr, hdrlen); 1660 cntrl = hwcsum(adap->params.chip, skb); 1661 1662 start = (u64 *)(cpl + 1); 1663 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, 1664 hdrlen); 1665 if (unlikely(start > sgl)) { 1666 left = (u8 *)end - (u8 *)q->q.stat; 1667 end = (void *)q->q.desc + left; 1668 } 1669 sgl_off = hdrlen; 1670 q->uso++; 1671 q->tx_cso += ssi->gso_segs; 1672 } else { 1673 if (ptp_enabled) 1674 op = FW_PTP_TX_PKT_WR; 1675 else 1676 op = FW_ETH_TX_PKT_WR; 1677 wr->op_immdlen = htonl(FW_WR_OP_V(op) | 1678 FW_WR_IMMDLEN_V(len)); 1679 cpl = (void *)(wr + 1); 1680 sgl = (u64 *)(cpl + 1); 1681 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1682 cntrl = hwcsum(adap->params.chip, skb) | 1683 TXPKT_IPCSUM_DIS_F; 1684 q->tx_cso++; 1685 } 1686 } 1687 1688 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { 1689 /* If current position is already at the end of the 1690 * txq, reset the current to point to start of the queue 1691 * and update the end ptr as well. 1692 */ 1693 left = (u8 *)end - (u8 *)q->q.stat; 1694 end = (void *)q->q.desc + left; 1695 sgl = (void *)q->q.desc; 1696 } 1697 1698 if (skb_vlan_tag_present(skb)) { 1699 q->vlan_ins++; 1700 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 1701 #ifdef CONFIG_CHELSIO_T4_FCOE 1702 if (skb->protocol == htons(ETH_P_FCOE)) 1703 cntrl |= TXPKT_VLAN_V( 1704 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 1705 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1706 } 1707 1708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 1709 TXPKT_PF_V(adap->pf); 1710 if (ptp_enabled) 1711 ctrl0 |= TXPKT_TSTAMP_F; 1712 #ifdef CONFIG_CHELSIO_T4_DCB 1713 if (is_t4(adap->params.chip)) 1714 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); 1715 else 1716 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); 1717 #endif 1718 cpl->ctrl0 = htonl(ctrl0); 1719 cpl->pack = htons(0); 1720 cpl->len = htons(skb->len); 1721 cpl->ctrl1 = cpu_to_be64(cntrl); 1722 1723 if (immediate) { 1724 cxgb4_inline_tx_skb(skb, &q->q, sgl); 1725 dev_consume_skb_any(skb); 1726 } else { 1727 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, 1728 sgl_sdesc->addr); 1729 skb_orphan(skb); 1730 sgl_sdesc->skb = skb; 1731 } 1732 1733 txq_advance(&q->q, ndesc); 1734 1735 cxgb4_ring_tx_db(adap, &q->q, ndesc); 1736 return NETDEV_TX_OK; 1737 1738 out_free: 1739 dev_kfree_skb_any(skb); 1740 return NETDEV_TX_OK; 1741 } 1742 1743 /* Constants ... */ 1744 enum { 1745 /* Egress Queue sizes, producer and consumer indices are all in units 1746 * of Egress Context Units bytes. Note that as far as the hardware is 1747 * concerned, the free list is an Egress Queue (the host produces free 1748 * buffers which the hardware consumes) and free list entries are 1749 * 64-bit PCI DMA addresses. 1750 */ 1751 EQ_UNIT = SGE_EQ_IDXSIZE, 1752 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1753 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1754 1755 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1756 sizeof(struct cpl_tx_pkt_lso_core) + 1757 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), 1758 }; 1759 1760 /** 1761 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? 1762 * @skb: the packet 1763 * 1764 * Returns whether an Ethernet packet is small enough to fit completely as 1765 * immediate data. 1766 */ 1767 static inline int t4vf_is_eth_imm(const struct sk_buff *skb) 1768 { 1769 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request 1770 * which does not accommodate immediate data. We could dike out all 1771 * of the support code for immediate data but that would tie our hands 1772 * too much if we ever want to enhace the firmware. It would also 1773 * create more differences between the PF and VF Drivers. 1774 */ 1775 return false; 1776 } 1777 1778 /** 1779 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR 1780 * @skb: the packet 1781 * 1782 * Returns the number of flits needed for a TX Work Request for the 1783 * given Ethernet packet, including the needed WR and CPL headers. 1784 */ 1785 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) 1786 { 1787 unsigned int flits; 1788 1789 /* If the skb is small enough, we can pump it out as a work request 1790 * with only immediate data. In that case we just have to have the 1791 * TX Packet header plus the skb data in the Work Request. 1792 */ 1793 if (t4vf_is_eth_imm(skb)) 1794 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 1795 sizeof(__be64)); 1796 1797 /* Otherwise, we're going to have to construct a Scatter gather list 1798 * of the skb body and fragments. We also include the flits necessary 1799 * for the TX Packet Work Request and CPL. We always have a firmware 1800 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 1801 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 1802 * message or, if we're doing a Large Send Offload, an LSO CPL message 1803 * with an embedded TX Packet Write CPL message. 1804 */ 1805 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 1806 if (skb_shinfo(skb)->gso_size) 1807 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1808 sizeof(struct cpl_tx_pkt_lso_core) + 1809 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1810 else 1811 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1812 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1813 return flits; 1814 } 1815 1816 /** 1817 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue 1818 * @skb: the packet 1819 * @dev: the egress net device 1820 * 1821 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. 1822 */ 1823 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, 1824 struct net_device *dev) 1825 { 1826 unsigned int last_desc, flits, ndesc; 1827 const struct skb_shared_info *ssi; 1828 struct fw_eth_tx_pkt_vm_wr *wr; 1829 struct tx_sw_desc *sgl_sdesc; 1830 struct cpl_tx_pkt_core *cpl; 1831 const struct port_info *pi; 1832 struct sge_eth_txq *txq; 1833 struct adapter *adapter; 1834 int qidx, credits, ret; 1835 size_t fw_hdr_copy_len; 1836 unsigned int chip_ver; 1837 u64 cntrl, *end; 1838 u32 wr_mid; 1839 1840 /* The chip minimum packet length is 10 octets but the firmware 1841 * command that we are using requires that we copy the Ethernet header 1842 * (including the VLAN tag) into the header so we reject anything 1843 * smaller than that ... 1844 */ 1845 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + 1846 sizeof(wr->ethtype) + sizeof(wr->vlantci); 1847 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); 1848 if (ret) 1849 goto out_free; 1850 1851 /* Figure out which TX Queue we're going to use. */ 1852 pi = netdev_priv(dev); 1853 adapter = pi->adapter; 1854 qidx = skb_get_queue_mapping(skb); 1855 WARN_ON(qidx >= pi->nqsets); 1856 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; 1857 1858 /* Take this opportunity to reclaim any TX Descriptors whose DMA 1859 * transfers have completed. 1860 */ 1861 reclaim_completed_tx(adapter, &txq->q, -1, true); 1862 1863 /* Calculate the number of flits and TX Descriptors we're going to 1864 * need along with how many TX Descriptors will be left over after 1865 * we inject our Work Request. 1866 */ 1867 flits = t4vf_calc_tx_flits(skb); 1868 ndesc = flits_to_desc(flits); 1869 credits = txq_avail(&txq->q) - ndesc; 1870 1871 if (unlikely(credits < 0)) { 1872 /* Not enough room for this packet's Work Request. Stop the 1873 * TX Queue and return a "busy" condition. The queue will get 1874 * started later on when the firmware informs us that space 1875 * has opened up. 1876 */ 1877 eth_txq_stop(txq); 1878 dev_err(adapter->pdev_dev, 1879 "%s: TX ring %u full while queue awake!\n", 1880 dev->name, qidx); 1881 return NETDEV_TX_BUSY; 1882 } 1883 1884 last_desc = txq->q.pidx + ndesc - 1; 1885 if (last_desc >= txq->q.size) 1886 last_desc -= txq->q.size; 1887 sgl_sdesc = &txq->q.sdesc[last_desc]; 1888 1889 if (!t4vf_is_eth_imm(skb) && 1890 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, 1891 sgl_sdesc->addr) < 0)) { 1892 /* We need to map the skb into PCI DMA space (because it can't 1893 * be in-lined directly into the Work Request) and the mapping 1894 * operation failed. Record the error and drop the packet. 1895 */ 1896 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1897 txq->mapping_err++; 1898 goto out_free; 1899 } 1900 1901 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); 1902 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1903 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1904 /* After we're done injecting the Work Request for this 1905 * packet, we'll be below our "stop threshold" so stop the TX 1906 * Queue now and schedule a request for an SGE Egress Queue 1907 * Update message. The queue will get started later on when 1908 * the firmware processes this Work Request and sends us an 1909 * Egress Queue Status Update message indicating that space 1910 * has opened up. 1911 */ 1912 eth_txq_stop(txq); 1913 if (chip_ver > CHELSIO_T5) 1914 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1915 } 1916 1917 /* Start filling in our Work Request. Note that we do _not_ handle 1918 * the WR Header wrapping around the TX Descriptor Ring. If our 1919 * maximum header size ever exceeds one TX Descriptor, we'll need to 1920 * do something else here. 1921 */ 1922 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1923 wr = (void *)&txq->q.desc[txq->q.pidx]; 1924 wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1925 wr->r3[0] = cpu_to_be32(0); 1926 wr->r3[1] = cpu_to_be32(0); 1927 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1928 end = (u64 *)wr + flits; 1929 1930 /* If this is a Large Send Offload packet we'll put in an LSO CPL 1931 * message with an encapsulated TX Packet CPL message. Otherwise we 1932 * just use a TX Packet CPL message. 1933 */ 1934 ssi = skb_shinfo(skb); 1935 if (ssi->gso_size) { 1936 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1937 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1938 int l3hdr_len = skb_network_header_len(skb); 1939 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1940 1941 wr->op_immdlen = 1942 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1943 FW_WR_IMMDLEN_V(sizeof(*lso) + 1944 sizeof(*cpl))); 1945 /* Fill in the LSO CPL message. */ 1946 lso->lso_ctrl = 1947 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1948 LSO_FIRST_SLICE_F | 1949 LSO_LAST_SLICE_F | 1950 LSO_IPV6_V(v6) | 1951 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1952 LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1953 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1954 lso->ipid_ofst = cpu_to_be16(0); 1955 lso->mss = cpu_to_be16(ssi->gso_size); 1956 lso->seqno_offset = cpu_to_be32(0); 1957 if (is_t4(adapter->params.chip)) 1958 lso->len = cpu_to_be32(skb->len); 1959 else 1960 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); 1961 1962 /* Set up TX Packet CPL pointer, control word and perform 1963 * accounting. 1964 */ 1965 cpl = (void *)(lso + 1); 1966 1967 if (chip_ver <= CHELSIO_T5) 1968 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1969 else 1970 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1971 1972 cntrl |= TXPKT_CSUM_TYPE_V(v6 ? 1973 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1974 TXPKT_IPHDR_LEN_V(l3hdr_len); 1975 txq->tso++; 1976 txq->tx_cso += ssi->gso_segs; 1977 } else { 1978 int len; 1979 1980 len = (t4vf_is_eth_imm(skb) 1981 ? skb->len + sizeof(*cpl) 1982 : sizeof(*cpl)); 1983 wr->op_immdlen = 1984 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1985 FW_WR_IMMDLEN_V(len)); 1986 1987 /* Set up TX Packet CPL pointer, control word and perform 1988 * accounting. 1989 */ 1990 cpl = (void *)(wr + 1); 1991 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1992 cntrl = hwcsum(adapter->params.chip, skb) | 1993 TXPKT_IPCSUM_DIS_F; 1994 txq->tx_cso++; 1995 } else { 1996 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1997 } 1998 } 1999 2000 /* If there's a VLAN tag present, add that to the list of things to 2001 * do in this Work Request. 2002 */ 2003 if (skb_vlan_tag_present(skb)) { 2004 txq->vlan_ins++; 2005 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 2006 } 2007 2008 /* Fill in the TX Packet CPL message header. */ 2009 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 2010 TXPKT_INTF_V(pi->port_id) | 2011 TXPKT_PF_V(0)); 2012 cpl->pack = cpu_to_be16(0); 2013 cpl->len = cpu_to_be16(skb->len); 2014 cpl->ctrl1 = cpu_to_be64(cntrl); 2015 2016 /* Fill in the body of the TX Packet CPL message with either in-lined 2017 * data or a Scatter/Gather List. 2018 */ 2019 if (t4vf_is_eth_imm(skb)) { 2020 /* In-line the packet's data and free the skb since we don't 2021 * need it any longer. 2022 */ 2023 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); 2024 dev_consume_skb_any(skb); 2025 } else { 2026 /* Write the skb's Scatter/Gather list into the TX Packet CPL 2027 * message and retain a pointer to the skb so we can free it 2028 * later when its DMA completes. (We store the skb pointer 2029 * in the Software Descriptor corresponding to the last TX 2030 * Descriptor used by the Work Request.) 2031 * 2032 * The retained skb will be freed when the corresponding TX 2033 * Descriptors are reclaimed after their DMAs complete. 2034 * However, this could take quite a while since, in general, 2035 * the hardware is set up to be lazy about sending DMA 2036 * completion notifications to us and we mostly perform TX 2037 * reclaims in the transmit routine. 2038 * 2039 * This is good for performamce but means that we rely on new 2040 * TX packets arriving to run the destructors of completed 2041 * packets, which open up space in their sockets' send queues. 2042 * Sometimes we do not get such new packets causing TX to 2043 * stall. A single UDP transmitter is a good example of this 2044 * situation. We have a clean up timer that periodically 2045 * reclaims completed packets but it doesn't run often enough 2046 * (nor do we want it to) to prevent lengthy stalls. A 2047 * solution to this problem is to run the destructor early, 2048 * after the packet is queued but before it's DMAd. A con is 2049 * that we lie to socket memory accounting, but the amount of 2050 * extra memory is reasonable (limited by the number of TX 2051 * descriptors), the packets do actually get freed quickly by 2052 * new packets almost always, and for protocols like TCP that 2053 * wait for acks to really free up the data the extra memory 2054 * is even less. On the positive side we run the destructors 2055 * on the sending CPU rather than on a potentially different 2056 * completing CPU, usually a good thing. 2057 * 2058 * Run the destructor before telling the DMA engine about the 2059 * packet to make sure it doesn't complete and get freed 2060 * prematurely. 2061 */ 2062 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); 2063 struct sge_txq *tq = &txq->q; 2064 2065 /* If the Work Request header was an exact multiple of our TX 2066 * Descriptor length, then it's possible that the starting SGL 2067 * pointer lines up exactly with the end of our TX Descriptor 2068 * ring. If that's the case, wrap around to the beginning 2069 * here ... 2070 */ 2071 if (unlikely((void *)sgl == (void *)tq->stat)) { 2072 sgl = (void *)tq->desc; 2073 end = (void *)((void *)tq->desc + 2074 ((void *)end - (void *)tq->stat)); 2075 } 2076 2077 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); 2078 skb_orphan(skb); 2079 sgl_sdesc->skb = skb; 2080 } 2081 2082 /* Advance our internal TX Queue state, tell the hardware about 2083 * the new TX descriptors and return success. 2084 */ 2085 txq_advance(&txq->q, ndesc); 2086 2087 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); 2088 return NETDEV_TX_OK; 2089 2090 out_free: 2091 /* An error of some sort happened. Free the TX skb and tell the 2092 * OS that we've "dealt" with the packet ... 2093 */ 2094 dev_kfree_skb_any(skb); 2095 return NETDEV_TX_OK; 2096 } 2097 2098 /** 2099 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 2100 * @q: the SGE control Tx queue 2101 * 2102 * This is a variant of cxgb4_reclaim_completed_tx() that is used 2103 * for Tx queues that send only immediate data (presently just 2104 * the control queues) and thus do not have any sk_buffs to release. 2105 */ 2106 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 2107 { 2108 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 2109 int reclaim = hw_cidx - q->cidx; 2110 2111 if (reclaim < 0) 2112 reclaim += q->size; 2113 2114 q->in_use -= reclaim; 2115 q->cidx = hw_cidx; 2116 } 2117 2118 static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) 2119 { 2120 u32 val = *idx + n; 2121 2122 if (val >= max) 2123 val -= max; 2124 2125 *idx = val; 2126 } 2127 2128 void cxgb4_eosw_txq_free_desc(struct adapter *adap, 2129 struct sge_eosw_txq *eosw_txq, u32 ndesc) 2130 { 2131 struct tx_sw_desc *d; 2132 2133 d = &eosw_txq->desc[eosw_txq->last_cidx]; 2134 while (ndesc--) { 2135 if (d->skb) { 2136 if (d->addr[0]) { 2137 unmap_skb(adap->pdev_dev, d->skb, d->addr); 2138 memset(d->addr, 0, sizeof(d->addr)); 2139 } 2140 dev_consume_skb_any(d->skb); 2141 d->skb = NULL; 2142 } 2143 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, 2144 eosw_txq->ndesc); 2145 d = &eosw_txq->desc[eosw_txq->last_cidx]; 2146 } 2147 } 2148 2149 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) 2150 { 2151 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); 2152 eosw_txq->inuse += n; 2153 } 2154 2155 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, 2156 struct sk_buff *skb) 2157 { 2158 if (eosw_txq->inuse == eosw_txq->ndesc) 2159 return -ENOMEM; 2160 2161 eosw_txq->desc[eosw_txq->pidx].skb = skb; 2162 return 0; 2163 } 2164 2165 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) 2166 { 2167 return eosw_txq->desc[eosw_txq->last_pidx].skb; 2168 } 2169 2170 static inline u8 ethofld_calc_tx_flits(struct adapter *adap, 2171 struct sk_buff *skb, u32 hdr_len) 2172 { 2173 u8 flits, nsgl = 0; 2174 u32 wrlen; 2175 2176 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); 2177 if (skb_shinfo(skb)->gso_size && 2178 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 2179 wrlen += sizeof(struct cpl_tx_pkt_lso_core); 2180 2181 wrlen += roundup(hdr_len, 16); 2182 2183 /* Packet headers + WR + CPLs */ 2184 flits = DIV_ROUND_UP(wrlen, 8); 2185 2186 if (skb_shinfo(skb)->nr_frags > 0) { 2187 if (skb_headlen(skb) - hdr_len) 2188 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); 2189 else 2190 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); 2191 } else if (skb->len - hdr_len) { 2192 nsgl = sgl_len(1); 2193 } 2194 2195 return flits + nsgl; 2196 } 2197 2198 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, 2199 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 2200 u32 hdr_len, u32 wrlen) 2201 { 2202 const struct skb_shared_info *ssi = skb_shinfo(skb); 2203 struct cpl_tx_pkt_core *cpl; 2204 u32 immd_len, wrlen16; 2205 bool compl = false; 2206 u8 ver, proto; 2207 2208 ver = ip_hdr(skb)->version; 2209 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; 2210 2211 wrlen16 = DIV_ROUND_UP(wrlen, 16); 2212 immd_len = sizeof(struct cpl_tx_pkt_core); 2213 if (skb_shinfo(skb)->gso_size && 2214 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 2215 immd_len += sizeof(struct cpl_tx_pkt_lso_core); 2216 immd_len += hdr_len; 2217 2218 if (!eosw_txq->ncompl || 2219 (eosw_txq->last_compl + wrlen16) >= 2220 (adap->params.ofldq_wr_cred / 2)) { 2221 compl = true; 2222 eosw_txq->ncompl++; 2223 eosw_txq->last_compl = 0; 2224 } 2225 2226 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 2227 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) | 2228 FW_WR_COMPL_V(compl)); 2229 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | 2230 FW_WR_FLOWID_V(eosw_txq->hwtid)); 2231 wr->r3 = 0; 2232 if (proto == IPPROTO_UDP) { 2233 cpl = write_eo_udp_wr(skb, wr, hdr_len); 2234 } else { 2235 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 2236 wr->u.tcpseg.ethlen = skb_network_offset(skb); 2237 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 2238 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); 2239 wr->u.tcpseg.tsclk_tsoff = 0; 2240 wr->u.tcpseg.r4 = 0; 2241 wr->u.tcpseg.r5 = 0; 2242 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); 2243 2244 if (ssi->gso_size) { 2245 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 2246 2247 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); 2248 cpl = write_tso_wr(adap, skb, lso); 2249 } else { 2250 wr->u.tcpseg.mss = cpu_to_be16(0xffff); 2251 cpl = (void *)(wr + 1); 2252 } 2253 } 2254 2255 eosw_txq->cred -= wrlen16; 2256 eosw_txq->last_compl += wrlen16; 2257 return cpl; 2258 } 2259 2260 static int ethofld_hard_xmit(struct net_device *dev, 2261 struct sge_eosw_txq *eosw_txq) 2262 { 2263 struct port_info *pi = netdev2pinfo(dev); 2264 struct adapter *adap = netdev2adap(dev); 2265 u32 wrlen, wrlen16, hdr_len, data_len; 2266 enum sge_eosw_state next_state; 2267 u64 cntrl, *start, *end, *sgl; 2268 struct sge_eohw_txq *eohw_txq; 2269 struct cpl_tx_pkt_core *cpl; 2270 struct fw_eth_tx_eo_wr *wr; 2271 bool skip_eotx_wr = false; 2272 struct tx_sw_desc *d; 2273 struct sk_buff *skb; 2274 int left, ret = 0; 2275 u8 flits, ndesc; 2276 2277 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; 2278 spin_lock(&eohw_txq->lock); 2279 reclaim_completed_tx_imm(&eohw_txq->q); 2280 2281 d = &eosw_txq->desc[eosw_txq->last_pidx]; 2282 skb = d->skb; 2283 skb_tx_timestamp(skb); 2284 2285 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; 2286 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && 2287 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { 2288 hdr_len = skb->len; 2289 data_len = 0; 2290 flits = DIV_ROUND_UP(hdr_len, 8); 2291 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) 2292 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY; 2293 else 2294 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY; 2295 skip_eotx_wr = true; 2296 } else { 2297 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 2298 data_len = skb->len - hdr_len; 2299 flits = ethofld_calc_tx_flits(adap, skb, hdr_len); 2300 } 2301 ndesc = flits_to_desc(flits); 2302 wrlen = flits * 8; 2303 wrlen16 = DIV_ROUND_UP(wrlen, 16); 2304 2305 left = txq_avail(&eohw_txq->q) - ndesc; 2306 2307 /* If there are no descriptors left in hardware queues or no 2308 * CPL credits left in software queues, then wait for them 2309 * to come back and retry again. Note that we always request 2310 * for credits update via interrupt for every half credits 2311 * consumed. So, the interrupt will eventually restore the 2312 * credits and invoke the Tx path again. 2313 */ 2314 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { 2315 ret = -ENOMEM; 2316 goto out_unlock; 2317 } 2318 2319 if (unlikely(skip_eotx_wr)) { 2320 start = (u64 *)wr; 2321 eosw_txq->state = next_state; 2322 eosw_txq->cred -= wrlen16; 2323 eosw_txq->ncompl++; 2324 eosw_txq->last_compl = 0; 2325 goto write_wr_headers; 2326 } 2327 2328 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); 2329 cntrl = hwcsum(adap->params.chip, skb); 2330 if (skb_vlan_tag_present(skb)) 2331 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 2332 2333 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 2334 TXPKT_INTF_V(pi->tx_chan) | 2335 TXPKT_PF_V(adap->pf)); 2336 cpl->pack = 0; 2337 cpl->len = cpu_to_be16(skb->len); 2338 cpl->ctrl1 = cpu_to_be64(cntrl); 2339 2340 start = (u64 *)(cpl + 1); 2341 2342 write_wr_headers: 2343 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, 2344 hdr_len); 2345 if (data_len) { 2346 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); 2347 if (unlikely(ret)) { 2348 memset(d->addr, 0, sizeof(d->addr)); 2349 eohw_txq->mapping_err++; 2350 goto out_unlock; 2351 } 2352 2353 end = (u64 *)wr + flits; 2354 if (unlikely(start > sgl)) { 2355 left = (u8 *)end - (u8 *)eohw_txq->q.stat; 2356 end = (void *)eohw_txq->q.desc + left; 2357 } 2358 2359 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { 2360 /* If current position is already at the end of the 2361 * txq, reset the current to point to start of the queue 2362 * and update the end ptr as well. 2363 */ 2364 left = (u8 *)end - (u8 *)eohw_txq->q.stat; 2365 2366 end = (void *)eohw_txq->q.desc + left; 2367 sgl = (void *)eohw_txq->q.desc; 2368 } 2369 2370 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, 2371 d->addr); 2372 } 2373 2374 if (skb_shinfo(skb)->gso_size) { 2375 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 2376 eohw_txq->uso++; 2377 else 2378 eohw_txq->tso++; 2379 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; 2380 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2381 eohw_txq->tx_cso++; 2382 } 2383 2384 if (skb_vlan_tag_present(skb)) 2385 eohw_txq->vlan_ins++; 2386 2387 txq_advance(&eohw_txq->q, ndesc); 2388 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); 2389 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); 2390 2391 out_unlock: 2392 spin_unlock(&eohw_txq->lock); 2393 return ret; 2394 } 2395 2396 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) 2397 { 2398 struct sk_buff *skb; 2399 int pktcount, ret; 2400 2401 switch (eosw_txq->state) { 2402 case CXGB4_EO_STATE_ACTIVE: 2403 case CXGB4_EO_STATE_FLOWC_OPEN_SEND: 2404 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND: 2405 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 2406 if (pktcount < 0) 2407 pktcount += eosw_txq->ndesc; 2408 break; 2409 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY: 2410 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY: 2411 case CXGB4_EO_STATE_CLOSED: 2412 default: 2413 return; 2414 } 2415 2416 while (pktcount--) { 2417 skb = eosw_txq_peek(eosw_txq); 2418 if (!skb) { 2419 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, 2420 eosw_txq->ndesc); 2421 continue; 2422 } 2423 2424 ret = ethofld_hard_xmit(dev, eosw_txq); 2425 if (ret) 2426 break; 2427 } 2428 } 2429 2430 static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb, 2431 struct net_device *dev) 2432 { 2433 struct cxgb4_tc_port_mqprio *tc_port_mqprio; 2434 struct port_info *pi = netdev2pinfo(dev); 2435 struct adapter *adap = netdev2adap(dev); 2436 struct sge_eosw_txq *eosw_txq; 2437 u32 qid; 2438 int ret; 2439 2440 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 2441 if (ret) 2442 goto out_free; 2443 2444 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; 2445 qid = skb_get_queue_mapping(skb) - pi->nqsets; 2446 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; 2447 spin_lock_bh(&eosw_txq->lock); 2448 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 2449 goto out_unlock; 2450 2451 ret = eosw_txq_enqueue(eosw_txq, skb); 2452 if (ret) 2453 goto out_unlock; 2454 2455 /* SKB is queued for processing until credits are available. 2456 * So, call the destructor now and we'll free the skb later 2457 * after it has been successfully transmitted. 2458 */ 2459 skb_orphan(skb); 2460 2461 eosw_txq_advance(eosw_txq, 1); 2462 ethofld_xmit(dev, eosw_txq); 2463 spin_unlock_bh(&eosw_txq->lock); 2464 return NETDEV_TX_OK; 2465 2466 out_unlock: 2467 spin_unlock_bh(&eosw_txq->lock); 2468 out_free: 2469 dev_kfree_skb_any(skb); 2470 return NETDEV_TX_OK; 2471 } 2472 2473 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) 2474 { 2475 struct port_info *pi = netdev_priv(dev); 2476 u16 qid = skb_get_queue_mapping(skb); 2477 2478 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) 2479 return cxgb4_vf_eth_xmit(skb, dev); 2480 2481 if (unlikely(qid >= pi->nqsets)) 2482 return cxgb4_ethofld_xmit(skb, dev); 2483 2484 if (is_ptp_enabled(skb, dev)) { 2485 struct adapter *adap = netdev2adap(dev); 2486 netdev_tx_t ret; 2487 2488 spin_lock(&adap->ptp_lock); 2489 ret = cxgb4_eth_xmit(skb, dev); 2490 spin_unlock(&adap->ptp_lock); 2491 return ret; 2492 } 2493 2494 return cxgb4_eth_xmit(skb, dev); 2495 } 2496 2497 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) 2498 { 2499 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 2500 int pidx = eosw_txq->pidx; 2501 struct sk_buff *skb; 2502 2503 if (!pktcount) 2504 return; 2505 2506 if (pktcount < 0) 2507 pktcount += eosw_txq->ndesc; 2508 2509 while (pktcount--) { 2510 pidx--; 2511 if (pidx < 0) 2512 pidx += eosw_txq->ndesc; 2513 2514 skb = eosw_txq->desc[pidx].skb; 2515 if (skb) { 2516 dev_consume_skb_any(skb); 2517 eosw_txq->desc[pidx].skb = NULL; 2518 eosw_txq->inuse--; 2519 } 2520 } 2521 2522 eosw_txq->pidx = eosw_txq->last_pidx + 1; 2523 } 2524 2525 /** 2526 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. 2527 * @dev: netdevice 2528 * @eotid: ETHOFLD tid to bind/unbind 2529 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid 2530 * 2531 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. 2532 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from 2533 * a traffic class. 2534 */ 2535 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) 2536 { 2537 struct port_info *pi = netdev2pinfo(dev); 2538 struct adapter *adap = netdev2adap(dev); 2539 enum sge_eosw_state next_state; 2540 struct sge_eosw_txq *eosw_txq; 2541 u32 len, len16, nparams = 6; 2542 struct fw_flowc_wr *flowc; 2543 struct eotid_entry *entry; 2544 struct sge_ofld_rxq *rxq; 2545 struct sk_buff *skb; 2546 int ret = 0; 2547 2548 len = struct_size(flowc, mnemval, nparams); 2549 len16 = DIV_ROUND_UP(len, 16); 2550 2551 entry = cxgb4_lookup_eotid(&adap->tids, eotid); 2552 if (!entry) 2553 return -ENOMEM; 2554 2555 eosw_txq = (struct sge_eosw_txq *)entry->data; 2556 if (!eosw_txq) 2557 return -ENOMEM; 2558 2559 skb = alloc_skb(len, GFP_KERNEL); 2560 if (!skb) 2561 return -ENOMEM; 2562 2563 spin_lock_bh(&eosw_txq->lock); 2564 if (tc != FW_SCHED_CLS_NONE) { 2565 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) 2566 goto out_free_skb; 2567 2568 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND; 2569 } else { 2570 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 2571 goto out_free_skb; 2572 2573 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND; 2574 } 2575 2576 flowc = __skb_put(skb, len); 2577 memset(flowc, 0, len); 2578 2579 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; 2580 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | 2581 FW_WR_FLOWID_V(eosw_txq->hwtid)); 2582 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 2583 FW_FLOWC_WR_NPARAMS_V(nparams) | 2584 FW_WR_COMPL_V(1)); 2585 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 2586 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); 2587 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 2588 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); 2589 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 2590 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); 2591 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 2592 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); 2593 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 2594 flowc->mnemval[4].val = cpu_to_be32(tc); 2595 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; 2596 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? 2597 FW_FLOWC_MNEM_EOSTATE_CLOSING : 2598 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 2599 2600 /* Free up any pending skbs to ensure there's room for 2601 * termination FLOWC. 2602 */ 2603 if (tc == FW_SCHED_CLS_NONE) 2604 eosw_txq_flush_pending_skbs(eosw_txq); 2605 2606 ret = eosw_txq_enqueue(eosw_txq, skb); 2607 if (ret) 2608 goto out_free_skb; 2609 2610 eosw_txq->state = next_state; 2611 eosw_txq->flowc_idx = eosw_txq->pidx; 2612 eosw_txq_advance(eosw_txq, 1); 2613 ethofld_xmit(dev, eosw_txq); 2614 2615 spin_unlock_bh(&eosw_txq->lock); 2616 return 0; 2617 2618 out_free_skb: 2619 dev_consume_skb_any(skb); 2620 spin_unlock_bh(&eosw_txq->lock); 2621 return ret; 2622 } 2623 2624 /** 2625 * is_imm - check whether a packet can be sent as immediate data 2626 * @skb: the packet 2627 * 2628 * Returns true if a packet can be sent as a WR with immediate data. 2629 */ 2630 static inline int is_imm(const struct sk_buff *skb) 2631 { 2632 return skb->len <= MAX_CTRL_WR_LEN; 2633 } 2634 2635 /** 2636 * ctrlq_check_stop - check if a control queue is full and should stop 2637 * @q: the queue 2638 * @wr: most recent WR written to the queue 2639 * 2640 * Check if a control queue has become full and should be stopped. 2641 * We clean up control queue descriptors very lazily, only when we are out. 2642 * If the queue is still full after reclaiming any completed descriptors 2643 * we suspend it and have the last WR wake it up. 2644 */ 2645 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 2646 { 2647 reclaim_completed_tx_imm(&q->q); 2648 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2649 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2650 q->q.stops++; 2651 q->full = 1; 2652 } 2653 } 2654 2655 #define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST" 2656 2657 int cxgb4_selftest_lb_pkt(struct net_device *netdev) 2658 { 2659 struct port_info *pi = netdev_priv(netdev); 2660 struct adapter *adap = pi->adapter; 2661 struct cxgb4_ethtool_lb_test *lb; 2662 int ret, i = 0, pkt_len, credits; 2663 struct fw_eth_tx_pkt_wr *wr; 2664 struct cpl_tx_pkt_core *cpl; 2665 u32 ctrl0, ndesc, flits; 2666 struct sge_eth_txq *q; 2667 u8 *sgl; 2668 2669 pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); 2670 2671 flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr), 2672 sizeof(__be64)); 2673 ndesc = flits_to_desc(flits); 2674 2675 lb = &pi->ethtool_lb; 2676 lb->loopback = 1; 2677 2678 q = &adap->sge.ethtxq[pi->first_qset]; 2679 __netif_tx_lock(q->txq, smp_processor_id()); 2680 2681 reclaim_completed_tx(adap, &q->q, -1, true); 2682 credits = txq_avail(&q->q) - ndesc; 2683 if (unlikely(credits < 0)) { 2684 __netif_tx_unlock(q->txq); 2685 return -ENOMEM; 2686 } 2687 2688 wr = (void *)&q->q.desc[q->q.pidx]; 2689 memset(wr, 0, sizeof(struct tx_desc)); 2690 2691 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 2692 FW_WR_IMMDLEN_V(pkt_len + 2693 sizeof(*cpl))); 2694 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); 2695 wr->r3 = cpu_to_be64(0); 2696 2697 cpl = (void *)(wr + 1); 2698 sgl = (u8 *)(cpl + 1); 2699 2700 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | 2701 TXPKT_INTF_V(pi->tx_chan + 4); 2702 2703 cpl->ctrl0 = htonl(ctrl0); 2704 cpl->pack = htons(0); 2705 cpl->len = htons(pkt_len); 2706 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); 2707 2708 eth_broadcast_addr(sgl); 2709 i += ETH_ALEN; 2710 ether_addr_copy(&sgl[i], netdev->dev_addr); 2711 i += ETH_ALEN; 2712 2713 snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s", 2714 CXGB4_SELFTEST_LB_STR); 2715 2716 init_completion(&lb->completion); 2717 txq_advance(&q->q, ndesc); 2718 cxgb4_ring_tx_db(adap, &q->q, ndesc); 2719 __netif_tx_unlock(q->txq); 2720 2721 /* wait for the pkt to return */ 2722 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); 2723 if (!ret) 2724 ret = -ETIMEDOUT; 2725 else 2726 ret = lb->result; 2727 2728 lb->loopback = 0; 2729 2730 return ret; 2731 } 2732 2733 /** 2734 * ctrl_xmit - send a packet through an SGE control Tx queue 2735 * @q: the control queue 2736 * @skb: the packet 2737 * 2738 * Send a packet through an SGE control Tx queue. Packets sent through 2739 * a control queue must fit entirely as immediate data. 2740 */ 2741 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 2742 { 2743 unsigned int ndesc; 2744 struct fw_wr_hdr *wr; 2745 2746 if (unlikely(!is_imm(skb))) { 2747 WARN_ON(1); 2748 dev_kfree_skb(skb); 2749 return NET_XMIT_DROP; 2750 } 2751 2752 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 2753 spin_lock(&q->sendq.lock); 2754 2755 if (unlikely(q->full)) { 2756 skb->priority = ndesc; /* save for restart */ 2757 __skb_queue_tail(&q->sendq, skb); 2758 spin_unlock(&q->sendq.lock); 2759 return NET_XMIT_CN; 2760 } 2761 2762 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2763 cxgb4_inline_tx_skb(skb, &q->q, wr); 2764 2765 txq_advance(&q->q, ndesc); 2766 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 2767 ctrlq_check_stop(q, wr); 2768 2769 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 2770 spin_unlock(&q->sendq.lock); 2771 2772 kfree_skb(skb); 2773 return NET_XMIT_SUCCESS; 2774 } 2775 2776 /** 2777 * restart_ctrlq - restart a suspended control queue 2778 * @t: pointer to the tasklet associated with this handler 2779 * 2780 * Resumes transmission on a suspended Tx control queue. 2781 */ 2782 static void restart_ctrlq(struct tasklet_struct *t) 2783 { 2784 struct sk_buff *skb; 2785 unsigned int written = 0; 2786 struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk); 2787 2788 spin_lock(&q->sendq.lock); 2789 reclaim_completed_tx_imm(&q->q); 2790 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 2791 2792 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 2793 struct fw_wr_hdr *wr; 2794 unsigned int ndesc = skb->priority; /* previously saved */ 2795 2796 written += ndesc; 2797 /* Write descriptors and free skbs outside the lock to limit 2798 * wait times. q->full is still set so new skbs will be queued. 2799 */ 2800 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2801 txq_advance(&q->q, ndesc); 2802 spin_unlock(&q->sendq.lock); 2803 2804 cxgb4_inline_tx_skb(skb, &q->q, wr); 2805 kfree_skb(skb); 2806 2807 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2808 unsigned long old = q->q.stops; 2809 2810 ctrlq_check_stop(q, wr); 2811 if (q->q.stops != old) { /* suspended anew */ 2812 spin_lock(&q->sendq.lock); 2813 goto ringdb; 2814 } 2815 } 2816 if (written > 16) { 2817 cxgb4_ring_tx_db(q->adap, &q->q, written); 2818 written = 0; 2819 } 2820 spin_lock(&q->sendq.lock); 2821 } 2822 q->full = 0; 2823 ringdb: 2824 if (written) 2825 cxgb4_ring_tx_db(q->adap, &q->q, written); 2826 spin_unlock(&q->sendq.lock); 2827 } 2828 2829 /** 2830 * t4_mgmt_tx - send a management message 2831 * @adap: the adapter 2832 * @skb: the packet containing the management message 2833 * 2834 * Send a management message through control queue 0. 2835 */ 2836 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 2837 { 2838 int ret; 2839 2840 local_bh_disable(); 2841 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 2842 local_bh_enable(); 2843 return ret; 2844 } 2845 2846 /** 2847 * is_ofld_imm - check whether a packet can be sent as immediate data 2848 * @skb: the packet 2849 * 2850 * Returns true if a packet can be sent as an offload WR with immediate 2851 * data. 2852 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field. 2853 * However, FW_ULPTX_WR commands have a 256 byte immediate only 2854 * payload limit. 2855 */ 2856 static inline int is_ofld_imm(const struct sk_buff *skb) 2857 { 2858 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 2859 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 2860 2861 if (unlikely(opcode == FW_ULPTX_WR)) 2862 return skb->len <= MAX_IMM_ULPTX_WR_LEN; 2863 else if (opcode == FW_CRYPTO_LOOKASIDE_WR) 2864 return skb->len <= SGE_MAX_WR_LEN; 2865 else 2866 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; 2867 } 2868 2869 /** 2870 * calc_tx_flits_ofld - calculate # of flits for an offload packet 2871 * @skb: the packet 2872 * 2873 * Returns the number of flits needed for the given offload packet. 2874 * These packets are already fully constructed and no additional headers 2875 * will be added. 2876 */ 2877 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 2878 { 2879 unsigned int flits, cnt; 2880 2881 if (is_ofld_imm(skb)) 2882 return DIV_ROUND_UP(skb->len, 8); 2883 2884 flits = skb_transport_offset(skb) / 8U; /* headers */ 2885 cnt = skb_shinfo(skb)->nr_frags; 2886 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 2887 cnt++; 2888 return flits + sgl_len(cnt); 2889 } 2890 2891 /** 2892 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 2893 * @q: the queue to stop 2894 * 2895 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 2896 * inability to map packets. A periodic timer attempts to restart 2897 * queues so marked. 2898 */ 2899 static void txq_stop_maperr(struct sge_uld_txq *q) 2900 { 2901 q->mapping_err++; 2902 q->q.stops++; 2903 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 2904 q->adap->sge.txq_maperr); 2905 } 2906 2907 /** 2908 * ofldtxq_stop - stop an offload Tx queue that has become full 2909 * @q: the queue to stop 2910 * @wr: the Work Request causing the queue to become full 2911 * 2912 * Stops an offload Tx queue that has become full and modifies the packet 2913 * being written to request a wakeup. 2914 */ 2915 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) 2916 { 2917 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2918 q->q.stops++; 2919 q->full = 1; 2920 } 2921 2922 /** 2923 * service_ofldq - service/restart a suspended offload queue 2924 * @q: the offload queue 2925 * 2926 * Services an offload Tx queue by moving packets from its Pending Send 2927 * Queue to the Hardware TX ring. The function starts and ends with the 2928 * Send Queue locked, but drops the lock while putting the skb at the 2929 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock 2930 * allows more skbs to be added to the Send Queue by other threads. 2931 * The packet being processed at the head of the Pending Send Queue is 2932 * left on the queue in case we experience DMA Mapping errors, etc. 2933 * and need to give up and restart later. 2934 * 2935 * service_ofldq() can be thought of as a task which opportunistically 2936 * uses other threads execution contexts. We use the Offload Queue 2937 * boolean "service_ofldq_running" to make sure that only one instance 2938 * is ever running at a time ... 2939 */ 2940 static void service_ofldq(struct sge_uld_txq *q) 2941 __must_hold(&q->sendq.lock) 2942 { 2943 u64 *pos, *before, *end; 2944 int credits; 2945 struct sk_buff *skb; 2946 struct sge_txq *txq; 2947 unsigned int left; 2948 unsigned int written = 0; 2949 unsigned int flits, ndesc; 2950 2951 /* If another thread is currently in service_ofldq() processing the 2952 * Pending Send Queue then there's nothing to do. Otherwise, flag 2953 * that we're doing the work and continue. Examining/modifying 2954 * the Offload Queue boolean "service_ofldq_running" must be done 2955 * while holding the Pending Send Queue Lock. 2956 */ 2957 if (q->service_ofldq_running) 2958 return; 2959 q->service_ofldq_running = true; 2960 2961 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 2962 /* We drop the lock while we're working with the skb at the 2963 * head of the Pending Send Queue. This allows more skbs to 2964 * be added to the Pending Send Queue while we're working on 2965 * this one. We don't need to lock to guard the TX Ring 2966 * updates because only one thread of execution is ever 2967 * allowed into service_ofldq() at a time. 2968 */ 2969 spin_unlock(&q->sendq.lock); 2970 2971 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 2972 2973 flits = skb->priority; /* previously saved */ 2974 ndesc = flits_to_desc(flits); 2975 credits = txq_avail(&q->q) - ndesc; 2976 BUG_ON(credits < 0); 2977 if (unlikely(credits < TXQ_STOP_THRES)) 2978 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); 2979 2980 pos = (u64 *)&q->q.desc[q->q.pidx]; 2981 if (is_ofld_imm(skb)) 2982 cxgb4_inline_tx_skb(skb, &q->q, pos); 2983 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 2984 (dma_addr_t *)skb->head)) { 2985 txq_stop_maperr(q); 2986 spin_lock(&q->sendq.lock); 2987 break; 2988 } else { 2989 int last_desc, hdr_len = skb_transport_offset(skb); 2990 2991 /* The WR headers may not fit within one descriptor. 2992 * So we need to deal with wrap-around here. 2993 */ 2994 before = (u64 *)pos; 2995 end = (u64 *)pos + flits; 2996 txq = &q->q; 2997 pos = (void *)inline_tx_skb_header(skb, &q->q, 2998 (void *)pos, 2999 hdr_len); 3000 if (before > (u64 *)pos) { 3001 left = (u8 *)end - (u8 *)txq->stat; 3002 end = (void *)txq->desc + left; 3003 } 3004 3005 /* If current position is already at the end of the 3006 * ofld queue, reset the current to point to 3007 * start of the queue and update the end ptr as well. 3008 */ 3009 if (pos == (u64 *)txq->stat) { 3010 left = (u8 *)end - (u8 *)txq->stat; 3011 end = (void *)txq->desc + left; 3012 pos = (void *)txq->desc; 3013 } 3014 3015 cxgb4_write_sgl(skb, &q->q, (void *)pos, 3016 end, hdr_len, 3017 (dma_addr_t *)skb->head); 3018 #ifdef CONFIG_NEED_DMA_MAP_STATE 3019 skb->dev = q->adap->port[0]; 3020 skb->destructor = deferred_unmap_destructor; 3021 #endif 3022 last_desc = q->q.pidx + ndesc - 1; 3023 if (last_desc >= q->q.size) 3024 last_desc -= q->q.size; 3025 q->q.sdesc[last_desc].skb = skb; 3026 } 3027 3028 txq_advance(&q->q, ndesc); 3029 written += ndesc; 3030 if (unlikely(written > 32)) { 3031 cxgb4_ring_tx_db(q->adap, &q->q, written); 3032 written = 0; 3033 } 3034 3035 /* Reacquire the Pending Send Queue Lock so we can unlink the 3036 * skb we've just successfully transferred to the TX Ring and 3037 * loop for the next skb which may be at the head of the 3038 * Pending Send Queue. 3039 */ 3040 spin_lock(&q->sendq.lock); 3041 __skb_unlink(skb, &q->sendq); 3042 if (is_ofld_imm(skb)) 3043 kfree_skb(skb); 3044 } 3045 if (likely(written)) 3046 cxgb4_ring_tx_db(q->adap, &q->q, written); 3047 3048 /*Indicate that no thread is processing the Pending Send Queue 3049 * currently. 3050 */ 3051 q->service_ofldq_running = false; 3052 } 3053 3054 /** 3055 * ofld_xmit - send a packet through an offload queue 3056 * @q: the Tx offload queue 3057 * @skb: the packet 3058 * 3059 * Send an offload packet through an SGE offload queue. 3060 */ 3061 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) 3062 { 3063 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 3064 spin_lock(&q->sendq.lock); 3065 3066 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If 3067 * that results in this new skb being the only one on the queue, start 3068 * servicing it. If there are other skbs already on the list, then 3069 * either the queue is currently being processed or it's been stopped 3070 * for some reason and it'll be restarted at a later time. Restart 3071 * paths are triggered by events like experiencing a DMA Mapping Error 3072 * or filling the Hardware TX Ring. 3073 */ 3074 __skb_queue_tail(&q->sendq, skb); 3075 if (q->sendq.qlen == 1) 3076 service_ofldq(q); 3077 3078 spin_unlock(&q->sendq.lock); 3079 return NET_XMIT_SUCCESS; 3080 } 3081 3082 /** 3083 * restart_ofldq - restart a suspended offload queue 3084 * @t: pointer to the tasklet associated with this handler 3085 * 3086 * Resumes transmission on a suspended Tx offload queue. 3087 */ 3088 static void restart_ofldq(struct tasklet_struct *t) 3089 { 3090 struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk); 3091 3092 spin_lock(&q->sendq.lock); 3093 q->full = 0; /* the queue actually is completely empty now */ 3094 service_ofldq(q); 3095 spin_unlock(&q->sendq.lock); 3096 } 3097 3098 /** 3099 * skb_txq - return the Tx queue an offload packet should use 3100 * @skb: the packet 3101 * 3102 * Returns the Tx queue an offload packet should use as indicated by bits 3103 * 1-15 in the packet's queue_mapping. 3104 */ 3105 static inline unsigned int skb_txq(const struct sk_buff *skb) 3106 { 3107 return skb->queue_mapping >> 1; 3108 } 3109 3110 /** 3111 * is_ctrl_pkt - return whether an offload packet is a control packet 3112 * @skb: the packet 3113 * 3114 * Returns whether an offload packet should use an OFLD or a CTRL 3115 * Tx queue as indicated by bit 0 in the packet's queue_mapping. 3116 */ 3117 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 3118 { 3119 return skb->queue_mapping & 1; 3120 } 3121 3122 static inline int uld_send(struct adapter *adap, struct sk_buff *skb, 3123 unsigned int tx_uld_type) 3124 { 3125 struct sge_uld_txq_info *txq_info; 3126 struct sge_uld_txq *txq; 3127 unsigned int idx = skb_txq(skb); 3128 3129 if (unlikely(is_ctrl_pkt(skb))) { 3130 /* Single ctrl queue is a requirement for LE workaround path */ 3131 if (adap->tids.nsftids) 3132 idx = 0; 3133 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 3134 } 3135 3136 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 3137 if (unlikely(!txq_info)) { 3138 WARN_ON(true); 3139 kfree_skb(skb); 3140 return NET_XMIT_DROP; 3141 } 3142 3143 txq = &txq_info->uldtxq[idx]; 3144 return ofld_xmit(txq, skb); 3145 } 3146 3147 /** 3148 * t4_ofld_send - send an offload packet 3149 * @adap: the adapter 3150 * @skb: the packet 3151 * 3152 * Sends an offload packet. We use the packet queue_mapping to select the 3153 * appropriate Tx queue as follows: bit 0 indicates whether the packet 3154 * should be sent as regular or control, bits 1-15 select the queue. 3155 */ 3156 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 3157 { 3158 int ret; 3159 3160 local_bh_disable(); 3161 ret = uld_send(adap, skb, CXGB4_TX_OFLD); 3162 local_bh_enable(); 3163 return ret; 3164 } 3165 3166 /** 3167 * cxgb4_ofld_send - send an offload packet 3168 * @dev: the net device 3169 * @skb: the packet 3170 * 3171 * Sends an offload packet. This is an exported version of @t4_ofld_send, 3172 * intended for ULDs. 3173 */ 3174 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 3175 { 3176 return t4_ofld_send(netdev2adap(dev), skb); 3177 } 3178 EXPORT_SYMBOL(cxgb4_ofld_send); 3179 3180 static void *inline_tx_header(const void *src, 3181 const struct sge_txq *q, 3182 void *pos, int length) 3183 { 3184 int left = (void *)q->stat - pos; 3185 u64 *p; 3186 3187 if (likely(length <= left)) { 3188 memcpy(pos, src, length); 3189 pos += length; 3190 } else { 3191 memcpy(pos, src, left); 3192 memcpy(q->desc, src + left, length - left); 3193 pos = (void *)q->desc + (length - left); 3194 } 3195 /* 0-pad to multiple of 16 */ 3196 p = PTR_ALIGN(pos, 8); 3197 if ((uintptr_t)p & 8) { 3198 *p = 0; 3199 return p + 1; 3200 } 3201 return p; 3202 } 3203 3204 /** 3205 * ofld_xmit_direct - copy a WR into offload queue 3206 * @q: the Tx offload queue 3207 * @src: location of WR 3208 * @len: WR length 3209 * 3210 * Copy an immediate WR into an uncontended SGE offload queue. 3211 */ 3212 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, 3213 unsigned int len) 3214 { 3215 unsigned int ndesc; 3216 int credits; 3217 u64 *pos; 3218 3219 /* Use the lower limit as the cut-off */ 3220 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { 3221 WARN_ON(1); 3222 return NET_XMIT_DROP; 3223 } 3224 3225 /* Don't return NET_XMIT_CN here as the current 3226 * implementation doesn't queue the request 3227 * using an skb when the following conditions not met 3228 */ 3229 if (!spin_trylock(&q->sendq.lock)) 3230 return NET_XMIT_DROP; 3231 3232 if (q->full || !skb_queue_empty(&q->sendq) || 3233 q->service_ofldq_running) { 3234 spin_unlock(&q->sendq.lock); 3235 return NET_XMIT_DROP; 3236 } 3237 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); 3238 credits = txq_avail(&q->q) - ndesc; 3239 pos = (u64 *)&q->q.desc[q->q.pidx]; 3240 3241 /* ofldtxq_stop modifies WR header in-situ */ 3242 inline_tx_header(src, &q->q, pos, len); 3243 if (unlikely(credits < TXQ_STOP_THRES)) 3244 ofldtxq_stop(q, (struct fw_wr_hdr *)pos); 3245 txq_advance(&q->q, ndesc); 3246 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 3247 3248 spin_unlock(&q->sendq.lock); 3249 return NET_XMIT_SUCCESS; 3250 } 3251 3252 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, 3253 const void *src, unsigned int len) 3254 { 3255 struct sge_uld_txq_info *txq_info; 3256 struct sge_uld_txq *txq; 3257 struct adapter *adap; 3258 int ret; 3259 3260 adap = netdev2adap(dev); 3261 3262 local_bh_disable(); 3263 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 3264 if (unlikely(!txq_info)) { 3265 WARN_ON(true); 3266 local_bh_enable(); 3267 return NET_XMIT_DROP; 3268 } 3269 txq = &txq_info->uldtxq[idx]; 3270 3271 ret = ofld_xmit_direct(txq, src, len); 3272 local_bh_enable(); 3273 return net_xmit_eval(ret); 3274 } 3275 EXPORT_SYMBOL(cxgb4_immdata_send); 3276 3277 /** 3278 * t4_crypto_send - send crypto packet 3279 * @adap: the adapter 3280 * @skb: the packet 3281 * 3282 * Sends crypto packet. We use the packet queue_mapping to select the 3283 * appropriate Tx queue as follows: bit 0 indicates whether the packet 3284 * should be sent as regular or control, bits 1-15 select the queue. 3285 */ 3286 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) 3287 { 3288 int ret; 3289 3290 local_bh_disable(); 3291 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); 3292 local_bh_enable(); 3293 return ret; 3294 } 3295 3296 /** 3297 * cxgb4_crypto_send - send crypto packet 3298 * @dev: the net device 3299 * @skb: the packet 3300 * 3301 * Sends crypto packet. This is an exported version of @t4_crypto_send, 3302 * intended for ULDs. 3303 */ 3304 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) 3305 { 3306 return t4_crypto_send(netdev2adap(dev), skb); 3307 } 3308 EXPORT_SYMBOL(cxgb4_crypto_send); 3309 3310 static inline void copy_frags(struct sk_buff *skb, 3311 const struct pkt_gl *gl, unsigned int offset) 3312 { 3313 int i; 3314 3315 /* usually there's just one frag */ 3316 __skb_fill_page_desc(skb, 0, gl->frags[0].page, 3317 gl->frags[0].offset + offset, 3318 gl->frags[0].size - offset); 3319 skb_shinfo(skb)->nr_frags = gl->nfrags; 3320 for (i = 1; i < gl->nfrags; i++) 3321 __skb_fill_page_desc(skb, i, gl->frags[i].page, 3322 gl->frags[i].offset, 3323 gl->frags[i].size); 3324 3325 /* get a reference to the last page, we don't own it */ 3326 get_page(gl->frags[gl->nfrags - 1].page); 3327 } 3328 3329 /** 3330 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 3331 * @gl: the gather list 3332 * @skb_len: size of sk_buff main body if it carries fragments 3333 * @pull_len: amount of data to move to the sk_buff's main body 3334 * 3335 * Builds an sk_buff from the given packet gather list. Returns the 3336 * sk_buff or %NULL if sk_buff allocation failed. 3337 */ 3338 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 3339 unsigned int skb_len, unsigned int pull_len) 3340 { 3341 struct sk_buff *skb; 3342 3343 /* 3344 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 3345 * size, which is expected since buffers are at least PAGE_SIZEd. 3346 * In this case packets up to RX_COPY_THRES have only one fragment. 3347 */ 3348 if (gl->tot_len <= RX_COPY_THRES) { 3349 skb = dev_alloc_skb(gl->tot_len); 3350 if (unlikely(!skb)) 3351 goto out; 3352 __skb_put(skb, gl->tot_len); 3353 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 3354 } else { 3355 skb = dev_alloc_skb(skb_len); 3356 if (unlikely(!skb)) 3357 goto out; 3358 __skb_put(skb, pull_len); 3359 skb_copy_to_linear_data(skb, gl->va, pull_len); 3360 3361 copy_frags(skb, gl, pull_len); 3362 skb->len = gl->tot_len; 3363 skb->data_len = skb->len - pull_len; 3364 skb->truesize += skb->data_len; 3365 } 3366 out: return skb; 3367 } 3368 EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 3369 3370 /** 3371 * t4_pktgl_free - free a packet gather list 3372 * @gl: the gather list 3373 * 3374 * Releases the pages of a packet gather list. We do not own the last 3375 * page on the list and do not free it. 3376 */ 3377 static void t4_pktgl_free(const struct pkt_gl *gl) 3378 { 3379 int n; 3380 const struct page_frag *p; 3381 3382 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 3383 put_page(p->page); 3384 } 3385 3386 /* 3387 * Process an MPS trace packet. Give it an unused protocol number so it won't 3388 * be delivered to anyone and send it to the stack for capture. 3389 */ 3390 static noinline int handle_trace_pkt(struct adapter *adap, 3391 const struct pkt_gl *gl) 3392 { 3393 struct sk_buff *skb; 3394 3395 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 3396 if (unlikely(!skb)) { 3397 t4_pktgl_free(gl); 3398 return 0; 3399 } 3400 3401 if (is_t4(adap->params.chip)) 3402 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 3403 else 3404 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 3405 3406 skb_reset_mac_header(skb); 3407 skb->protocol = htons(0xffff); 3408 skb->dev = adap->port[0]; 3409 netif_receive_skb(skb); 3410 return 0; 3411 } 3412 3413 /** 3414 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp 3415 * @adap: the adapter 3416 * @hwtstamps: time stamp structure to update 3417 * @sgetstamp: 60bit iqe timestamp 3418 * 3419 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp 3420 * which is in Core Clock ticks into ktime_t and assign it 3421 **/ 3422 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, 3423 struct skb_shared_hwtstamps *hwtstamps, 3424 u64 sgetstamp) 3425 { 3426 u64 ns; 3427 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); 3428 3429 ns = div_u64(tmp, adap->params.vpd.cclk); 3430 3431 memset(hwtstamps, 0, sizeof(*hwtstamps)); 3432 hwtstamps->hwtstamp = ns_to_ktime(ns); 3433 } 3434 3435 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 3436 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) 3437 { 3438 struct adapter *adapter = rxq->rspq.adap; 3439 struct sge *s = &adapter->sge; 3440 struct port_info *pi; 3441 int ret; 3442 struct sk_buff *skb; 3443 3444 skb = napi_get_frags(&rxq->rspq.napi); 3445 if (unlikely(!skb)) { 3446 t4_pktgl_free(gl); 3447 rxq->stats.rx_drops++; 3448 return; 3449 } 3450 3451 copy_frags(skb, gl, s->pktshift); 3452 if (tnl_hdr_len) 3453 skb->csum_level = 1; 3454 skb->len = gl->tot_len - s->pktshift; 3455 skb->data_len = skb->len; 3456 skb->truesize += skb->data_len; 3457 skb->ip_summed = CHECKSUM_UNNECESSARY; 3458 skb_record_rx_queue(skb, rxq->rspq.idx); 3459 pi = netdev_priv(skb->dev); 3460 if (pi->rxtstamp) 3461 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), 3462 gl->sgetstamp); 3463 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 3464 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 3465 PKT_HASH_TYPE_L3); 3466 3467 if (unlikely(pkt->vlan_ex)) { 3468 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3469 rxq->stats.vlan_ex++; 3470 } 3471 ret = napi_gro_frags(&rxq->rspq.napi); 3472 if (ret == GRO_HELD) 3473 rxq->stats.lro_pkts++; 3474 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 3475 rxq->stats.lro_merged++; 3476 rxq->stats.pkts++; 3477 rxq->stats.rx_cso++; 3478 } 3479 3480 enum { 3481 RX_NON_PTP_PKT = 0, 3482 RX_PTP_PKT_SUC = 1, 3483 RX_PTP_PKT_ERR = 2 3484 }; 3485 3486 /** 3487 * t4_systim_to_hwstamp - read hardware time stamp 3488 * @adapter: the adapter 3489 * @skb: the packet 3490 * 3491 * Read Time Stamp from MPS packet and insert in skb which 3492 * is forwarded to PTP application 3493 */ 3494 static noinline int t4_systim_to_hwstamp(struct adapter *adapter, 3495 struct sk_buff *skb) 3496 { 3497 struct skb_shared_hwtstamps *hwtstamps; 3498 struct cpl_rx_mps_pkt *cpl = NULL; 3499 unsigned char *data; 3500 int offset; 3501 3502 cpl = (struct cpl_rx_mps_pkt *)skb->data; 3503 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & 3504 X_CPL_RX_MPS_PKT_TYPE_PTP)) 3505 return RX_PTP_PKT_ERR; 3506 3507 data = skb->data + sizeof(*cpl); 3508 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); 3509 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; 3510 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) 3511 return RX_PTP_PKT_ERR; 3512 3513 hwtstamps = skb_hwtstamps(skb); 3514 memset(hwtstamps, 0, sizeof(*hwtstamps)); 3515 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); 3516 3517 return RX_PTP_PKT_SUC; 3518 } 3519 3520 /** 3521 * t4_rx_hststamp - Recv PTP Event Message 3522 * @adapter: the adapter 3523 * @rsp: the response queue descriptor holding the RX_PKT message 3524 * @rxq: the response queue holding the RX_PKT message 3525 * @skb: the packet 3526 * 3527 * PTP enabled and MPS packet, read HW timestamp 3528 */ 3529 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, 3530 struct sge_eth_rxq *rxq, struct sk_buff *skb) 3531 { 3532 int ret; 3533 3534 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && 3535 !is_t4(adapter->params.chip))) { 3536 ret = t4_systim_to_hwstamp(adapter, skb); 3537 if (ret == RX_PTP_PKT_ERR) { 3538 kfree_skb(skb); 3539 rxq->stats.rx_drops++; 3540 } 3541 return ret; 3542 } 3543 return RX_NON_PTP_PKT; 3544 } 3545 3546 /** 3547 * t4_tx_hststamp - Loopback PTP Transmit Event Message 3548 * @adapter: the adapter 3549 * @skb: the packet 3550 * @dev: the ingress net device 3551 * 3552 * Read hardware timestamp for the loopback PTP Tx event message 3553 */ 3554 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, 3555 struct net_device *dev) 3556 { 3557 struct port_info *pi = netdev_priv(dev); 3558 3559 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { 3560 cxgb4_ptp_read_hwstamp(adapter, pi); 3561 kfree_skb(skb); 3562 return 0; 3563 } 3564 return 1; 3565 } 3566 3567 /** 3568 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages 3569 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue 3570 * @rsp: Response Entry pointer into Response Queue 3571 * @gl: Gather List pointer 3572 * 3573 * For adapters which support the SGE Doorbell Queue Timer facility, 3574 * we configure the Ethernet TX Queues to send CIDX Updates to the 3575 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE 3576 * messages. This adds a small load to PCIe Link RX bandwidth and, 3577 * potentially, higher CPU Interrupt load, but allows us to respond 3578 * much more quickly to the CIDX Updates. This is important for 3579 * Upper Layer Software which isn't willing to have a large amount 3580 * of TX Data outstanding before receiving DMA Completions. 3581 */ 3582 static void t4_tx_completion_handler(struct sge_rspq *rspq, 3583 const __be64 *rsp, 3584 const struct pkt_gl *gl) 3585 { 3586 u8 opcode = ((const struct rss_header *)rsp)->opcode; 3587 struct port_info *pi = netdev_priv(rspq->netdev); 3588 struct adapter *adapter = rspq->adap; 3589 struct sge *s = &adapter->sge; 3590 struct sge_eth_txq *txq; 3591 3592 /* skip RSS header */ 3593 rsp++; 3594 3595 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 3596 */ 3597 if (unlikely(opcode == CPL_FW4_MSG && 3598 ((const struct cpl_fw4_msg *)rsp)->type == 3599 FW_TYPE_RSSCPL)) { 3600 rsp++; 3601 opcode = ((const struct rss_header *)rsp)->opcode; 3602 rsp++; 3603 } 3604 3605 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { 3606 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", 3607 __func__, opcode); 3608 return; 3609 } 3610 3611 txq = &s->ethtxq[pi->first_qset + rspq->idx]; 3612 3613 /* We've got the Hardware Consumer Index Update in the Egress Update 3614 * message. These Egress Update messages will be our sole CIDX Updates 3615 * we get since we don't want to chew up PCIe bandwidth for both Ingress 3616 * Messages and Status Page writes. However, The code which manages 3617 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value 3618 * stored in the Status Page at the end of the TX Queue. It's easiest 3619 * to simply copy the CIDX Update value from the Egress Update message 3620 * to the Status Page. Also note that no Endian issues need to be 3621 * considered here since both are Big Endian and we're just copying 3622 * bytes consistently ... 3623 */ 3624 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { 3625 struct cpl_sge_egr_update *egr; 3626 3627 egr = (struct cpl_sge_egr_update *)rsp; 3628 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); 3629 } 3630 3631 t4_sge_eth_txq_egress_update(adapter, txq, -1); 3632 } 3633 3634 static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si) 3635 { 3636 struct adapter *adap = pi->adapter; 3637 struct cxgb4_ethtool_lb_test *lb; 3638 struct sge *s = &adap->sge; 3639 struct net_device *netdev; 3640 u8 *data; 3641 int i; 3642 3643 netdev = adap->port[pi->port_id]; 3644 lb = &pi->ethtool_lb; 3645 data = si->va + s->pktshift; 3646 3647 i = ETH_ALEN; 3648 if (!ether_addr_equal(data + i, netdev->dev_addr)) 3649 return -1; 3650 3651 i += ETH_ALEN; 3652 if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR)) 3653 lb->result = -EIO; 3654 3655 complete(&lb->completion); 3656 return 0; 3657 } 3658 3659 /** 3660 * t4_ethrx_handler - process an ingress ethernet packet 3661 * @q: the response queue that received the packet 3662 * @rsp: the response queue descriptor holding the RX_PKT message 3663 * @si: the gather list of packet fragments 3664 * 3665 * Process an ingress ethernet packet and deliver it to the stack. 3666 */ 3667 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 3668 const struct pkt_gl *si) 3669 { 3670 bool csum_ok; 3671 struct sk_buff *skb; 3672 const struct cpl_rx_pkt *pkt; 3673 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 3674 struct adapter *adapter = q->adap; 3675 struct sge *s = &q->adap->sge; 3676 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 3677 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 3678 u16 err_vec, tnl_hdr_len = 0; 3679 struct port_info *pi; 3680 int ret = 0; 3681 3682 pi = netdev_priv(q->netdev); 3683 /* If we're looking at TX Queue CIDX Update, handle that separately 3684 * and return. 3685 */ 3686 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || 3687 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { 3688 t4_tx_completion_handler(q, rsp, si); 3689 return 0; 3690 } 3691 3692 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 3693 return handle_trace_pkt(q->adap, si); 3694 3695 pkt = (const struct cpl_rx_pkt *)rsp; 3696 /* Compressed error vector is enabled for T6 only */ 3697 if (q->adap->params.tp.rx_pkt_encap) { 3698 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); 3699 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); 3700 } else { 3701 err_vec = be16_to_cpu(pkt->err_vec); 3702 } 3703 3704 csum_ok = pkt->csum_calc && !err_vec && 3705 (q->netdev->features & NETIF_F_RXCSUM); 3706 3707 if (err_vec) 3708 rxq->stats.bad_rx_pkts++; 3709 3710 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { 3711 ret = cxgb4_validate_lb_pkt(pi, si); 3712 if (!ret) 3713 return 0; 3714 } 3715 3716 if (((pkt->l2info & htonl(RXF_TCP_F)) || 3717 tnl_hdr_len) && 3718 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 3719 do_gro(rxq, si, pkt, tnl_hdr_len); 3720 return 0; 3721 } 3722 3723 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 3724 if (unlikely(!skb)) { 3725 t4_pktgl_free(si); 3726 rxq->stats.rx_drops++; 3727 return 0; 3728 } 3729 3730 /* Handle PTP Event Rx packet */ 3731 if (unlikely(pi->ptp_enable)) { 3732 ret = t4_rx_hststamp(adapter, rsp, rxq, skb); 3733 if (ret == RX_PTP_PKT_ERR) 3734 return 0; 3735 } 3736 if (likely(!ret)) 3737 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ 3738 3739 /* Handle the PTP Event Tx Loopback packet */ 3740 if (unlikely(pi->ptp_enable && !ret && 3741 (pkt->l2info & htonl(RXF_UDP_F)) && 3742 cxgb4_ptp_is_ptp_rx(skb))) { 3743 if (!t4_tx_hststamp(adapter, skb, q->netdev)) 3744 return 0; 3745 } 3746 3747 skb->protocol = eth_type_trans(skb, q->netdev); 3748 skb_record_rx_queue(skb, q->idx); 3749 if (skb->dev->features & NETIF_F_RXHASH) 3750 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 3751 PKT_HASH_TYPE_L3); 3752 3753 rxq->stats.pkts++; 3754 3755 if (pi->rxtstamp) 3756 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), 3757 si->sgetstamp); 3758 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 3759 if (!pkt->ip_frag) { 3760 skb->ip_summed = CHECKSUM_UNNECESSARY; 3761 rxq->stats.rx_cso++; 3762 } else if (pkt->l2info & htonl(RXF_IP_F)) { 3763 __sum16 c = (__force __sum16)pkt->csum; 3764 skb->csum = csum_unfold(c); 3765 3766 if (tnl_hdr_len) { 3767 skb->ip_summed = CHECKSUM_UNNECESSARY; 3768 skb->csum_level = 1; 3769 } else { 3770 skb->ip_summed = CHECKSUM_COMPLETE; 3771 } 3772 rxq->stats.rx_cso++; 3773 } 3774 } else { 3775 skb_checksum_none_assert(skb); 3776 #ifdef CONFIG_CHELSIO_T4_FCOE 3777 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ 3778 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) 3779 3780 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { 3781 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && 3782 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { 3783 if (q->adap->params.tp.rx_pkt_encap) 3784 csum_ok = err_vec & 3785 T6_COMPR_RXERR_SUM_F; 3786 else 3787 csum_ok = err_vec & RXERR_CSUM_F; 3788 if (!csum_ok) 3789 skb->ip_summed = CHECKSUM_UNNECESSARY; 3790 } 3791 } 3792 3793 #undef CPL_RX_PKT_FLAGS 3794 #endif /* CONFIG_CHELSIO_T4_FCOE */ 3795 } 3796 3797 if (unlikely(pkt->vlan_ex)) { 3798 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3799 rxq->stats.vlan_ex++; 3800 } 3801 skb_mark_napi_id(skb, &q->napi); 3802 netif_receive_skb(skb); 3803 return 0; 3804 } 3805 3806 /** 3807 * restore_rx_bufs - put back a packet's Rx buffers 3808 * @si: the packet gather list 3809 * @q: the SGE free list 3810 * @frags: number of FL buffers to restore 3811 * 3812 * Puts back on an FL the Rx buffers associated with @si. The buffers 3813 * have already been unmapped and are left unmapped, we mark them so to 3814 * prevent further unmapping attempts. 3815 * 3816 * This function undoes a series of @unmap_rx_buf calls when we find out 3817 * that the current packet can't be processed right away afterall and we 3818 * need to come back to it later. This is a very rare event and there's 3819 * no effort to make this particularly efficient. 3820 */ 3821 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 3822 int frags) 3823 { 3824 struct rx_sw_desc *d; 3825 3826 while (frags--) { 3827 if (q->cidx == 0) 3828 q->cidx = q->size - 1; 3829 else 3830 q->cidx--; 3831 d = &q->sdesc[q->cidx]; 3832 d->page = si->frags[frags].page; 3833 d->dma_addr |= RX_UNMAPPED_BUF; 3834 q->avail++; 3835 } 3836 } 3837 3838 /** 3839 * is_new_response - check if a response is newly written 3840 * @r: the response descriptor 3841 * @q: the response queue 3842 * 3843 * Returns true if a response descriptor contains a yet unprocessed 3844 * response. 3845 */ 3846 static inline bool is_new_response(const struct rsp_ctrl *r, 3847 const struct sge_rspq *q) 3848 { 3849 return (r->type_gen >> RSPD_GEN_S) == q->gen; 3850 } 3851 3852 /** 3853 * rspq_next - advance to the next entry in a response queue 3854 * @q: the queue 3855 * 3856 * Updates the state of a response queue to advance it to the next entry. 3857 */ 3858 static inline void rspq_next(struct sge_rspq *q) 3859 { 3860 q->cur_desc = (void *)q->cur_desc + q->iqe_len; 3861 if (unlikely(++q->cidx == q->size)) { 3862 q->cidx = 0; 3863 q->gen ^= 1; 3864 q->cur_desc = q->desc; 3865 } 3866 } 3867 3868 /** 3869 * process_responses - process responses from an SGE response queue 3870 * @q: the ingress queue to process 3871 * @budget: how many responses can be processed in this round 3872 * 3873 * Process responses from an SGE response queue up to the supplied budget. 3874 * Responses include received packets as well as control messages from FW 3875 * or HW. 3876 * 3877 * Additionally choose the interrupt holdoff time for the next interrupt 3878 * on this queue. If the system is under memory shortage use a fairly 3879 * long delay to help recovery. 3880 */ 3881 static int process_responses(struct sge_rspq *q, int budget) 3882 { 3883 int ret, rsp_type; 3884 int budget_left = budget; 3885 const struct rsp_ctrl *rc; 3886 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 3887 struct adapter *adapter = q->adap; 3888 struct sge *s = &adapter->sge; 3889 3890 while (likely(budget_left)) { 3891 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 3892 if (!is_new_response(rc, q)) { 3893 if (q->flush_handler) 3894 q->flush_handler(q); 3895 break; 3896 } 3897 3898 dma_rmb(); 3899 rsp_type = RSPD_TYPE_G(rc->type_gen); 3900 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { 3901 struct page_frag *fp; 3902 struct pkt_gl si; 3903 const struct rx_sw_desc *rsd; 3904 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 3905 3906 if (len & RSPD_NEWBUF_F) { 3907 if (likely(q->offset > 0)) { 3908 free_rx_bufs(q->adap, &rxq->fl, 1); 3909 q->offset = 0; 3910 } 3911 len = RSPD_LEN_G(len); 3912 } 3913 si.tot_len = len; 3914 3915 /* gather packet fragments */ 3916 for (frags = 0, fp = si.frags; ; frags++, fp++) { 3917 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 3918 bufsz = get_buf_size(adapter, rsd); 3919 fp->page = rsd->page; 3920 fp->offset = q->offset; 3921 fp->size = min(bufsz, len); 3922 len -= fp->size; 3923 if (!len) 3924 break; 3925 unmap_rx_buf(q->adap, &rxq->fl); 3926 } 3927 3928 si.sgetstamp = SGE_TIMESTAMP_G( 3929 be64_to_cpu(rc->last_flit)); 3930 /* 3931 * Last buffer remains mapped so explicitly make it 3932 * coherent for CPU access. 3933 */ 3934 dma_sync_single_for_cpu(q->adap->pdev_dev, 3935 get_buf_addr(rsd), 3936 fp->size, DMA_FROM_DEVICE); 3937 3938 si.va = page_address(si.frags[0].page) + 3939 si.frags[0].offset; 3940 prefetch(si.va); 3941 3942 si.nfrags = frags + 1; 3943 ret = q->handler(q, q->cur_desc, &si); 3944 if (likely(ret == 0)) 3945 q->offset += ALIGN(fp->size, s->fl_align); 3946 else 3947 restore_rx_bufs(&si, &rxq->fl, frags); 3948 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { 3949 ret = q->handler(q, q->cur_desc, NULL); 3950 } else { 3951 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 3952 } 3953 3954 if (unlikely(ret)) { 3955 /* couldn't process descriptor, back off for recovery */ 3956 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); 3957 break; 3958 } 3959 3960 rspq_next(q); 3961 budget_left--; 3962 } 3963 3964 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) 3965 __refill_fl(q->adap, &rxq->fl); 3966 return budget - budget_left; 3967 } 3968 3969 /** 3970 * napi_rx_handler - the NAPI handler for Rx processing 3971 * @napi: the napi instance 3972 * @budget: how many packets we can process in this round 3973 * 3974 * Handler for new data events when using NAPI. This does not need any 3975 * locking or protection from interrupts as data interrupts are off at 3976 * this point and other adapter interrupts do not interfere (the latter 3977 * in not a concern at all with MSI-X as non-data interrupts then have 3978 * a separate handler). 3979 */ 3980 static int napi_rx_handler(struct napi_struct *napi, int budget) 3981 { 3982 unsigned int params; 3983 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 3984 int work_done; 3985 u32 val; 3986 3987 work_done = process_responses(q, budget); 3988 if (likely(work_done < budget)) { 3989 int timer_index; 3990 3991 napi_complete_done(napi, work_done); 3992 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); 3993 3994 if (q->adaptive_rx) { 3995 if (work_done > max(timer_pkt_quota[timer_index], 3996 MIN_NAPI_WORK)) 3997 timer_index = (timer_index + 1); 3998 else 3999 timer_index = timer_index - 1; 4000 4001 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 4002 q->next_intr_params = 4003 QINTR_TIMER_IDX_V(timer_index) | 4004 QINTR_CNT_EN_V(0); 4005 params = q->next_intr_params; 4006 } else { 4007 params = q->next_intr_params; 4008 q->next_intr_params = q->intr_params; 4009 } 4010 } else 4011 params = QINTR_TIMER_IDX_V(7); 4012 4013 val = CIDXINC_V(work_done) | SEINTARM_V(params); 4014 4015 /* If we don't have access to the new User GTS (T5+), use the old 4016 * doorbell mechanism; otherwise use the new BAR2 mechanism. 4017 */ 4018 if (unlikely(q->bar2_addr == NULL)) { 4019 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), 4020 val | INGRESSQID_V((u32)q->cntxt_id)); 4021 } else { 4022 writel(val | INGRESSQID_V(q->bar2_qid), 4023 q->bar2_addr + SGE_UDB_GTS); 4024 wmb(); 4025 } 4026 return work_done; 4027 } 4028 4029 void cxgb4_ethofld_restart(struct tasklet_struct *t) 4030 { 4031 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, 4032 qresume_tsk); 4033 int pktcount; 4034 4035 spin_lock(&eosw_txq->lock); 4036 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; 4037 if (pktcount < 0) 4038 pktcount += eosw_txq->ndesc; 4039 4040 if (pktcount) { 4041 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), 4042 eosw_txq, pktcount); 4043 eosw_txq->inuse -= pktcount; 4044 } 4045 4046 /* There may be some packets waiting for completions. So, 4047 * attempt to send these packets now. 4048 */ 4049 ethofld_xmit(eosw_txq->netdev, eosw_txq); 4050 spin_unlock(&eosw_txq->lock); 4051 } 4052 4053 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions 4054 * @q: the response queue that received the packet 4055 * @rsp: the response queue descriptor holding the CPL message 4056 * @si: the gather list of packet fragments 4057 * 4058 * Process a ETHOFLD Tx completion. Increment the cidx here, but 4059 * free up the descriptors in a tasklet later. 4060 */ 4061 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, 4062 const struct pkt_gl *si) 4063 { 4064 u8 opcode = ((const struct rss_header *)rsp)->opcode; 4065 4066 /* skip RSS header */ 4067 rsp++; 4068 4069 if (opcode == CPL_FW4_ACK) { 4070 const struct cpl_fw4_ack *cpl; 4071 struct sge_eosw_txq *eosw_txq; 4072 struct eotid_entry *entry; 4073 struct sk_buff *skb; 4074 u32 hdr_len, eotid; 4075 u8 flits, wrlen16; 4076 int credits; 4077 4078 cpl = (const struct cpl_fw4_ack *)rsp; 4079 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - 4080 q->adap->tids.eotid_base; 4081 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); 4082 if (!entry) 4083 goto out_done; 4084 4085 eosw_txq = (struct sge_eosw_txq *)entry->data; 4086 if (!eosw_txq) 4087 goto out_done; 4088 4089 spin_lock(&eosw_txq->lock); 4090 credits = cpl->credits; 4091 while (credits > 0) { 4092 skb = eosw_txq->desc[eosw_txq->cidx].skb; 4093 if (!skb) 4094 break; 4095 4096 if (unlikely((eosw_txq->state == 4097 CXGB4_EO_STATE_FLOWC_OPEN_REPLY || 4098 eosw_txq->state == 4099 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) && 4100 eosw_txq->cidx == eosw_txq->flowc_idx)) { 4101 flits = DIV_ROUND_UP(skb->len, 8); 4102 if (eosw_txq->state == 4103 CXGB4_EO_STATE_FLOWC_OPEN_REPLY) 4104 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; 4105 else 4106 eosw_txq->state = CXGB4_EO_STATE_CLOSED; 4107 complete(&eosw_txq->completion); 4108 } else { 4109 hdr_len = eth_get_headlen(eosw_txq->netdev, 4110 skb->data, 4111 skb_headlen(skb)); 4112 flits = ethofld_calc_tx_flits(q->adap, skb, 4113 hdr_len); 4114 } 4115 eosw_txq_advance_index(&eosw_txq->cidx, 1, 4116 eosw_txq->ndesc); 4117 wrlen16 = DIV_ROUND_UP(flits * 8, 16); 4118 credits -= wrlen16; 4119 } 4120 4121 eosw_txq->cred += cpl->credits; 4122 eosw_txq->ncompl--; 4123 4124 spin_unlock(&eosw_txq->lock); 4125 4126 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, 4127 * if there were packets waiting for completion. 4128 */ 4129 tasklet_schedule(&eosw_txq->qresume_tsk); 4130 } 4131 4132 out_done: 4133 return 0; 4134 } 4135 4136 /* 4137 * The MSI-X interrupt handler for an SGE response queue. 4138 */ 4139 irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 4140 { 4141 struct sge_rspq *q = cookie; 4142 4143 napi_schedule(&q->napi); 4144 return IRQ_HANDLED; 4145 } 4146 4147 /* 4148 * Process the indirect interrupt entries in the interrupt queue and kick off 4149 * NAPI for each queue that has generated an entry. 4150 */ 4151 static unsigned int process_intrq(struct adapter *adap) 4152 { 4153 unsigned int credits; 4154 const struct rsp_ctrl *rc; 4155 struct sge_rspq *q = &adap->sge.intrq; 4156 u32 val; 4157 4158 spin_lock(&adap->sge.intrq_lock); 4159 for (credits = 0; ; credits++) { 4160 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 4161 if (!is_new_response(rc, q)) 4162 break; 4163 4164 dma_rmb(); 4165 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { 4166 unsigned int qid = ntohl(rc->pldbuflen_qid); 4167 4168 qid -= adap->sge.ingr_start; 4169 napi_schedule(&adap->sge.ingr_map[qid]->napi); 4170 } 4171 4172 rspq_next(q); 4173 } 4174 4175 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); 4176 4177 /* If we don't have access to the new User GTS (T5+), use the old 4178 * doorbell mechanism; otherwise use the new BAR2 mechanism. 4179 */ 4180 if (unlikely(q->bar2_addr == NULL)) { 4181 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 4182 val | INGRESSQID_V(q->cntxt_id)); 4183 } else { 4184 writel(val | INGRESSQID_V(q->bar2_qid), 4185 q->bar2_addr + SGE_UDB_GTS); 4186 wmb(); 4187 } 4188 spin_unlock(&adap->sge.intrq_lock); 4189 return credits; 4190 } 4191 4192 /* 4193 * The MSI interrupt handler, which handles data events from SGE response queues 4194 * as well as error and other async events as they all use the same MSI vector. 4195 */ 4196 static irqreturn_t t4_intr_msi(int irq, void *cookie) 4197 { 4198 struct adapter *adap = cookie; 4199 4200 if (adap->flags & CXGB4_MASTER_PF) 4201 t4_slow_intr_handler(adap); 4202 process_intrq(adap); 4203 return IRQ_HANDLED; 4204 } 4205 4206 /* 4207 * Interrupt handler for legacy INTx interrupts. 4208 * Handles data events from SGE response queues as well as error and other 4209 * async events as they all use the same interrupt line. 4210 */ 4211 static irqreturn_t t4_intr_intx(int irq, void *cookie) 4212 { 4213 struct adapter *adap = cookie; 4214 4215 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); 4216 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | 4217 process_intrq(adap)) 4218 return IRQ_HANDLED; 4219 return IRQ_NONE; /* probably shared interrupt */ 4220 } 4221 4222 /** 4223 * t4_intr_handler - select the top-level interrupt handler 4224 * @adap: the adapter 4225 * 4226 * Selects the top-level interrupt handler based on the type of interrupts 4227 * (MSI-X, MSI, or INTx). 4228 */ 4229 irq_handler_t t4_intr_handler(struct adapter *adap) 4230 { 4231 if (adap->flags & CXGB4_USING_MSIX) 4232 return t4_sge_intr_msix; 4233 if (adap->flags & CXGB4_USING_MSI) 4234 return t4_intr_msi; 4235 return t4_intr_intx; 4236 } 4237 4238 static void sge_rx_timer_cb(struct timer_list *t) 4239 { 4240 unsigned long m; 4241 unsigned int i; 4242 struct adapter *adap = from_timer(adap, t, sge.rx_timer); 4243 struct sge *s = &adap->sge; 4244 4245 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 4246 for (m = s->starving_fl[i]; m; m &= m - 1) { 4247 struct sge_eth_rxq *rxq; 4248 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 4249 struct sge_fl *fl = s->egr_map[id]; 4250 4251 clear_bit(id, s->starving_fl); 4252 smp_mb__after_atomic(); 4253 4254 if (fl_starving(adap, fl)) { 4255 rxq = container_of(fl, struct sge_eth_rxq, fl); 4256 if (napi_reschedule(&rxq->rspq.napi)) 4257 fl->starving++; 4258 else 4259 set_bit(id, s->starving_fl); 4260 } 4261 } 4262 /* The remainder of the SGE RX Timer Callback routine is dedicated to 4263 * global Master PF activities like checking for chip ingress stalls, 4264 * etc. 4265 */ 4266 if (!(adap->flags & CXGB4_MASTER_PF)) 4267 goto done; 4268 4269 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); 4270 4271 done: 4272 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 4273 } 4274 4275 static void sge_tx_timer_cb(struct timer_list *t) 4276 { 4277 struct adapter *adap = from_timer(adap, t, sge.tx_timer); 4278 struct sge *s = &adap->sge; 4279 unsigned long m, period; 4280 unsigned int i, budget; 4281 4282 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 4283 for (m = s->txq_maperr[i]; m; m &= m - 1) { 4284 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 4285 struct sge_uld_txq *txq = s->egr_map[id]; 4286 4287 clear_bit(id, s->txq_maperr); 4288 tasklet_schedule(&txq->qresume_tsk); 4289 } 4290 4291 if (!is_t4(adap->params.chip)) { 4292 struct sge_eth_txq *q = &s->ptptxq; 4293 int avail; 4294 4295 spin_lock(&adap->ptp_lock); 4296 avail = reclaimable(&q->q); 4297 4298 if (avail) { 4299 free_tx_desc(adap, &q->q, avail, false); 4300 q->q.in_use -= avail; 4301 } 4302 spin_unlock(&adap->ptp_lock); 4303 } 4304 4305 budget = MAX_TIMER_TX_RECLAIM; 4306 i = s->ethtxq_rover; 4307 do { 4308 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], 4309 budget); 4310 if (!budget) 4311 break; 4312 4313 if (++i >= s->ethqsets) 4314 i = 0; 4315 } while (i != s->ethtxq_rover); 4316 s->ethtxq_rover = i; 4317 4318 if (budget == 0) { 4319 /* If we found too many reclaimable packets schedule a timer 4320 * in the near future to continue where we left off. 4321 */ 4322 period = 2; 4323 } else { 4324 /* We reclaimed all reclaimable TX Descriptors, so reschedule 4325 * at the normal period. 4326 */ 4327 period = TX_QCHECK_PERIOD; 4328 } 4329 4330 mod_timer(&s->tx_timer, jiffies + period); 4331 } 4332 4333 /** 4334 * bar2_address - return the BAR2 address for an SGE Queue's Registers 4335 * @adapter: the adapter 4336 * @qid: the SGE Queue ID 4337 * @qtype: the SGE Queue Type (Egress or Ingress) 4338 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 4339 * 4340 * Returns the BAR2 address for the SGE Queue Registers associated with 4341 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 4342 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 4343 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 4344 * Registers are supported (e.g. the Write Combining Doorbell Buffer). 4345 */ 4346 static void __iomem *bar2_address(struct adapter *adapter, 4347 unsigned int qid, 4348 enum t4_bar2_qtype qtype, 4349 unsigned int *pbar2_qid) 4350 { 4351 u64 bar2_qoffset; 4352 int ret; 4353 4354 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, 4355 &bar2_qoffset, pbar2_qid); 4356 if (ret) 4357 return NULL; 4358 4359 return adapter->bar2 + bar2_qoffset; 4360 } 4361 4362 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 4363 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 4364 */ 4365 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 4366 struct net_device *dev, int intr_idx, 4367 struct sge_fl *fl, rspq_handler_t hnd, 4368 rspq_flush_handler_t flush_hnd, int cong) 4369 { 4370 int ret, flsz = 0; 4371 struct fw_iq_cmd c; 4372 struct sge *s = &adap->sge; 4373 struct port_info *pi = netdev_priv(dev); 4374 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); 4375 4376 /* Size needs to be multiple of 16, including status entry. */ 4377 iq->size = roundup(iq->size, 16); 4378 4379 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 4380 &iq->phys_addr, NULL, 0, 4381 dev_to_node(adap->pdev_dev)); 4382 if (!iq->desc) 4383 return -ENOMEM; 4384 4385 memset(&c, 0, sizeof(c)); 4386 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 4387 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4388 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); 4389 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | 4390 FW_LEN16(c)); 4391 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 4392 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 4393 FW_IQ_CMD_IQANDST_V(intr_idx < 0) | 4394 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | 4395 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 4396 -intr_idx - 1)); 4397 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 4398 FW_IQ_CMD_IQGTSMODE_F | 4399 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | 4400 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); 4401 c.iqsize = htons(iq->size); 4402 c.iqaddr = cpu_to_be64(iq->phys_addr); 4403 if (cong >= 0) 4404 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | 4405 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC 4406 : FW_IQ_IQTYPE_OFLD)); 4407 4408 if (fl) { 4409 unsigned int chip_ver = 4410 CHELSIO_CHIP_VERSION(adap->params.chip); 4411 4412 /* Allocate the ring for the hardware free list (with space 4413 * for its status page) along with the associated software 4414 * descriptor ring. The free list size needs to be a multiple 4415 * of the Egress Queue Unit and at least 2 Egress Units larger 4416 * than the SGE's Egress Congrestion Threshold 4417 * (fl_starve_thres - 1). 4418 */ 4419 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 4420 fl->size = s->fl_starve_thres - 1 + 2 * 8; 4421 fl->size = roundup(fl->size, 8); 4422 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 4423 sizeof(struct rx_sw_desc), &fl->addr, 4424 &fl->sdesc, s->stat_len, 4425 dev_to_node(adap->pdev_dev)); 4426 if (!fl->desc) 4427 goto fl_nomem; 4428 4429 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 4430 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 4431 FW_IQ_CMD_FL0FETCHRO_V(relaxed) | 4432 FW_IQ_CMD_FL0DATARO_V(relaxed) | 4433 FW_IQ_CMD_FL0PADEN_F); 4434 if (cong >= 0) 4435 c.iqns_to_fl0congen |= 4436 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 4437 FW_IQ_CMD_FL0CONGCIF_F | 4438 FW_IQ_CMD_FL0CONGEN_F); 4439 /* In T6, for egress queue type FL there is internal overhead 4440 * of 16B for header going into FLM module. Hence the maximum 4441 * allowed burst size is 448 bytes. For T4/T5, the hardware 4442 * doesn't coalesce fetch requests if more than 64 bytes of 4443 * Free List pointers are provided, so we use a 128-byte Fetch 4444 * Burst Minimum there (T6 implements coalescing so we can use 4445 * the smaller 64-byte value there). 4446 */ 4447 c.fl0dcaen_to_fl0cidxfthresh = 4448 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? 4449 FETCHBURSTMIN_128B_X : 4450 FETCHBURSTMIN_64B_T6_X) | 4451 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? 4452 FETCHBURSTMAX_512B_X : 4453 FETCHBURSTMAX_256B_X)); 4454 c.fl0size = htons(flsz); 4455 c.fl0addr = cpu_to_be64(fl->addr); 4456 } 4457 4458 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4459 if (ret) 4460 goto err; 4461 4462 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 4463 iq->cur_desc = iq->desc; 4464 iq->cidx = 0; 4465 iq->gen = 1; 4466 iq->next_intr_params = iq->intr_params; 4467 iq->cntxt_id = ntohs(c.iqid); 4468 iq->abs_id = ntohs(c.physiqid); 4469 iq->bar2_addr = bar2_address(adap, 4470 iq->cntxt_id, 4471 T4_BAR2_QTYPE_INGRESS, 4472 &iq->bar2_qid); 4473 iq->size--; /* subtract status entry */ 4474 iq->netdev = dev; 4475 iq->handler = hnd; 4476 iq->flush_handler = flush_hnd; 4477 4478 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); 4479 skb_queue_head_init(&iq->lro_mgr.lroq); 4480 4481 /* set offset to -1 to distinguish ingress queues without FL */ 4482 iq->offset = fl ? 0 : -1; 4483 4484 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 4485 4486 if (fl) { 4487 fl->cntxt_id = ntohs(c.fl0id); 4488 fl->avail = fl->pend_cred = 0; 4489 fl->pidx = fl->cidx = 0; 4490 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 4491 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 4492 4493 /* Note, we must initialize the BAR2 Free List User Doorbell 4494 * information before refilling the Free List! 4495 */ 4496 fl->bar2_addr = bar2_address(adap, 4497 fl->cntxt_id, 4498 T4_BAR2_QTYPE_EGRESS, 4499 &fl->bar2_qid); 4500 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 4501 } 4502 4503 /* For T5 and later we attempt to set up the Congestion Manager values 4504 * of the new RX Ethernet Queue. This should really be handled by 4505 * firmware because it's more complex than any host driver wants to 4506 * get involved with and it's different per chip and this is almost 4507 * certainly wrong. Firmware would be wrong as well, but it would be 4508 * a lot easier to fix in one place ... For now we do something very 4509 * simple (and hopefully less wrong). 4510 */ 4511 if (!is_t4(adap->params.chip) && cong >= 0) { 4512 u32 param, val, ch_map = 0; 4513 int i; 4514 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 4515 4516 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 4517 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 4518 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); 4519 if (cong == 0) { 4520 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); 4521 } else { 4522 val = 4523 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); 4524 for (i = 0; i < 4; i++) { 4525 if (cong & (1 << i)) 4526 ch_map |= 1 << (i << cng_ch_bits_log); 4527 } 4528 val |= CONMCTXT_CNGCHMAP_V(ch_map); 4529 } 4530 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 4531 ¶m, &val); 4532 if (ret) 4533 dev_warn(adap->pdev_dev, "Failed to set Congestion" 4534 " Manager Context for Ingress Queue %d: %d\n", 4535 iq->cntxt_id, -ret); 4536 } 4537 4538 return 0; 4539 4540 fl_nomem: 4541 ret = -ENOMEM; 4542 err: 4543 if (iq->desc) { 4544 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 4545 iq->desc, iq->phys_addr); 4546 iq->desc = NULL; 4547 } 4548 if (fl && fl->desc) { 4549 kfree(fl->sdesc); 4550 fl->sdesc = NULL; 4551 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 4552 fl->desc, fl->addr); 4553 fl->desc = NULL; 4554 } 4555 return ret; 4556 } 4557 4558 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 4559 { 4560 q->cntxt_id = id; 4561 q->bar2_addr = bar2_address(adap, 4562 q->cntxt_id, 4563 T4_BAR2_QTYPE_EGRESS, 4564 &q->bar2_qid); 4565 q->in_use = 0; 4566 q->cidx = q->pidx = 0; 4567 q->stops = q->restarts = 0; 4568 q->stat = (void *)&q->desc[q->size]; 4569 spin_lock_init(&q->db_lock); 4570 adap->sge.egr_map[id - adap->sge.egr_start] = q; 4571 } 4572 4573 /** 4574 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue 4575 * @adap: the adapter 4576 * @txq: the SGE Ethernet TX Queue to initialize 4577 * @dev: the Linux Network Device 4578 * @netdevq: the corresponding Linux TX Queue 4579 * @iqid: the Ingress Queue to which to deliver CIDX Update messages 4580 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers 4581 */ 4582 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 4583 struct net_device *dev, struct netdev_queue *netdevq, 4584 unsigned int iqid, u8 dbqt) 4585 { 4586 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4587 struct port_info *pi = netdev_priv(dev); 4588 struct sge *s = &adap->sge; 4589 struct fw_eq_eth_cmd c; 4590 int ret, nentries; 4591 4592 /* Add status entries */ 4593 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4594 4595 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 4596 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 4597 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 4598 netdev_queue_numa_node_read(netdevq)); 4599 if (!txq->q.desc) 4600 return -ENOMEM; 4601 4602 memset(&c, 0, sizeof(c)); 4603 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 4604 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4605 FW_EQ_ETH_CMD_PFN_V(adap->pf) | 4606 FW_EQ_ETH_CMD_VFN_V(0)); 4607 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | 4608 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 4609 4610 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer 4611 * mechanism, we use Ingress Queue messages for Hardware Consumer 4612 * Index Updates on the TX Queue. Otherwise we have the Hardware 4613 * write the CIDX Updates into the Status Page at the end of the 4614 * TX Queue. 4615 */ 4616 c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ? 4617 FW_EQ_ETH_CMD_AUTOEQUIQE_F : 4618 FW_EQ_ETH_CMD_AUTOEQUEQE_F) | 4619 FW_EQ_ETH_CMD_VIID_V(pi->viid)); 4620 4621 c.fetchszm_to_iqid = 4622 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ? 4623 HOSTFCMODE_INGRESS_QUEUE_X : 4624 HOSTFCMODE_STATUS_PAGE_X) | 4625 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 4626 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 4627 4628 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ 4629 c.dcaen_to_eqsize = 4630 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4631 ? FETCHBURSTMIN_64B_X 4632 : FETCHBURSTMIN_64B_T6_X) | 4633 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4634 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4635 FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) | 4636 FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 4637 4638 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4639 4640 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the 4641 * currently configured Timer Index. THis can be changed later via an 4642 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE 4643 * Doorbell Queue mode is currently automatically enabled in the 4644 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... 4645 */ 4646 if (dbqt) 4647 c.timeren_timerix = 4648 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | 4649 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); 4650 4651 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4652 if (ret) { 4653 kfree(txq->q.sdesc); 4654 txq->q.sdesc = NULL; 4655 dma_free_coherent(adap->pdev_dev, 4656 nentries * sizeof(struct tx_desc), 4657 txq->q.desc, txq->q.phys_addr); 4658 txq->q.desc = NULL; 4659 return ret; 4660 } 4661 4662 txq->q.q_type = CXGB4_TXQ_ETH; 4663 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); 4664 txq->txq = netdevq; 4665 txq->tso = 0; 4666 txq->uso = 0; 4667 txq->tx_cso = 0; 4668 txq->vlan_ins = 0; 4669 txq->mapping_err = 0; 4670 txq->dbqt = dbqt; 4671 4672 return 0; 4673 } 4674 4675 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 4676 struct net_device *dev, unsigned int iqid, 4677 unsigned int cmplqid) 4678 { 4679 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4680 struct port_info *pi = netdev_priv(dev); 4681 struct sge *s = &adap->sge; 4682 struct fw_eq_ctrl_cmd c; 4683 int ret, nentries; 4684 4685 /* Add status entries */ 4686 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4687 4688 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 4689 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 4690 NULL, 0, dev_to_node(adap->pdev_dev)); 4691 if (!txq->q.desc) 4692 return -ENOMEM; 4693 4694 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 4695 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4696 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | 4697 FW_EQ_CTRL_CMD_VFN_V(0)); 4698 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | 4699 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 4700 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 4701 c.physeqid_pkd = htonl(0); 4702 c.fetchszm_to_iqid = 4703 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 4704 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 4705 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); 4706 c.dcaen_to_eqsize = 4707 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4708 ? FETCHBURSTMIN_64B_X 4709 : FETCHBURSTMIN_64B_T6_X) | 4710 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4711 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4712 FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 4713 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4714 4715 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4716 if (ret) { 4717 dma_free_coherent(adap->pdev_dev, 4718 nentries * sizeof(struct tx_desc), 4719 txq->q.desc, txq->q.phys_addr); 4720 txq->q.desc = NULL; 4721 return ret; 4722 } 4723 4724 txq->q.q_type = CXGB4_TXQ_CTRL; 4725 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); 4726 txq->adap = adap; 4727 skb_queue_head_init(&txq->sendq); 4728 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); 4729 txq->full = 0; 4730 return 0; 4731 } 4732 4733 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, 4734 unsigned int cmplqid) 4735 { 4736 u32 param, val; 4737 4738 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 4739 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | 4740 FW_PARAMS_PARAM_YZ_V(eqid)); 4741 val = cmplqid; 4742 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 4743 } 4744 4745 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q, 4746 struct net_device *dev, u32 cmd, u32 iqid) 4747 { 4748 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4749 struct port_info *pi = netdev_priv(dev); 4750 struct sge *s = &adap->sge; 4751 struct fw_eq_ofld_cmd c; 4752 u32 fb_min, nentries; 4753 int ret; 4754 4755 /* Add status entries */ 4756 nentries = q->size + s->stat_len / sizeof(struct tx_desc); 4757 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), 4758 sizeof(struct tx_sw_desc), &q->phys_addr, 4759 &q->sdesc, s->stat_len, NUMA_NO_NODE); 4760 if (!q->desc) 4761 return -ENOMEM; 4762 4763 if (chip_ver <= CHELSIO_T5) 4764 fb_min = FETCHBURSTMIN_64B_X; 4765 else 4766 fb_min = FETCHBURSTMIN_64B_T6_X; 4767 4768 memset(&c, 0, sizeof(c)); 4769 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | 4770 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4771 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | 4772 FW_EQ_OFLD_CMD_VFN_V(0)); 4773 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 4774 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 4775 c.fetchszm_to_iqid = 4776 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 4777 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 4778 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); 4779 c.dcaen_to_eqsize = 4780 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) | 4781 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 4782 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4783 FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 4784 c.eqaddr = cpu_to_be64(q->phys_addr); 4785 4786 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4787 if (ret) { 4788 kfree(q->sdesc); 4789 q->sdesc = NULL; 4790 dma_free_coherent(adap->pdev_dev, 4791 nentries * sizeof(struct tx_desc), 4792 q->desc, q->phys_addr); 4793 q->desc = NULL; 4794 return ret; 4795 } 4796 4797 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); 4798 return 0; 4799 } 4800 4801 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, 4802 struct net_device *dev, unsigned int iqid, 4803 unsigned int uld_type) 4804 { 4805 u32 cmd = FW_EQ_OFLD_CMD; 4806 int ret; 4807 4808 if (unlikely(uld_type == CXGB4_TX_CRYPTO)) 4809 cmd = FW_EQ_CTRL_CMD; 4810 4811 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); 4812 if (ret) 4813 return ret; 4814 4815 txq->q.q_type = CXGB4_TXQ_ULD; 4816 txq->adap = adap; 4817 skb_queue_head_init(&txq->sendq); 4818 tasklet_setup(&txq->qresume_tsk, restart_ofldq); 4819 txq->full = 0; 4820 txq->mapping_err = 0; 4821 return 0; 4822 } 4823 4824 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, 4825 struct net_device *dev, u32 iqid) 4826 { 4827 int ret; 4828 4829 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); 4830 if (ret) 4831 return ret; 4832 4833 txq->q.q_type = CXGB4_TXQ_ULD; 4834 spin_lock_init(&txq->lock); 4835 txq->adap = adap; 4836 txq->tso = 0; 4837 txq->uso = 0; 4838 txq->tx_cso = 0; 4839 txq->vlan_ins = 0; 4840 txq->mapping_err = 0; 4841 return 0; 4842 } 4843 4844 void free_txq(struct adapter *adap, struct sge_txq *q) 4845 { 4846 struct sge *s = &adap->sge; 4847 4848 dma_free_coherent(adap->pdev_dev, 4849 q->size * sizeof(struct tx_desc) + s->stat_len, 4850 q->desc, q->phys_addr); 4851 q->cntxt_id = 0; 4852 q->sdesc = NULL; 4853 q->desc = NULL; 4854 } 4855 4856 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 4857 struct sge_fl *fl) 4858 { 4859 struct sge *s = &adap->sge; 4860 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 4861 4862 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 4863 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 4864 rq->cntxt_id, fl_id, 0xffff); 4865 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 4866 rq->desc, rq->phys_addr); 4867 netif_napi_del(&rq->napi); 4868 rq->netdev = NULL; 4869 rq->cntxt_id = rq->abs_id = 0; 4870 rq->desc = NULL; 4871 4872 if (fl) { 4873 free_rx_bufs(adap, fl, fl->avail); 4874 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 4875 fl->desc, fl->addr); 4876 kfree(fl->sdesc); 4877 fl->sdesc = NULL; 4878 fl->cntxt_id = 0; 4879 fl->desc = NULL; 4880 } 4881 } 4882 4883 /** 4884 * t4_free_ofld_rxqs - free a block of consecutive Rx queues 4885 * @adap: the adapter 4886 * @n: number of queues 4887 * @q: pointer to first queue 4888 * 4889 * Release the resources of a consecutive block of offload Rx queues. 4890 */ 4891 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) 4892 { 4893 for ( ; n; n--, q++) 4894 if (q->rspq.desc) 4895 free_rspq_fl(adap, &q->rspq, 4896 q->fl.size ? &q->fl : NULL); 4897 } 4898 4899 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) 4900 { 4901 if (txq->q.desc) { 4902 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 4903 txq->q.cntxt_id); 4904 free_tx_desc(adap, &txq->q, txq->q.in_use, false); 4905 kfree(txq->q.sdesc); 4906 free_txq(adap, &txq->q); 4907 } 4908 } 4909 4910 /** 4911 * t4_free_sge_resources - free SGE resources 4912 * @adap: the adapter 4913 * 4914 * Frees resources used by the SGE queue sets. 4915 */ 4916 void t4_free_sge_resources(struct adapter *adap) 4917 { 4918 int i; 4919 struct sge_eth_rxq *eq; 4920 struct sge_eth_txq *etq; 4921 4922 /* stop all Rx queues in order to start them draining */ 4923 for (i = 0; i < adap->sge.ethqsets; i++) { 4924 eq = &adap->sge.ethrxq[i]; 4925 if (eq->rspq.desc) 4926 t4_iq_stop(adap, adap->mbox, adap->pf, 0, 4927 FW_IQ_TYPE_FL_INT_CAP, 4928 eq->rspq.cntxt_id, 4929 eq->fl.size ? eq->fl.cntxt_id : 0xffff, 4930 0xffff); 4931 } 4932 4933 /* clean up Ethernet Tx/Rx queues */ 4934 for (i = 0; i < adap->sge.ethqsets; i++) { 4935 eq = &adap->sge.ethrxq[i]; 4936 if (eq->rspq.desc) 4937 free_rspq_fl(adap, &eq->rspq, 4938 eq->fl.size ? &eq->fl : NULL); 4939 if (eq->msix) { 4940 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); 4941 eq->msix = NULL; 4942 } 4943 4944 etq = &adap->sge.ethtxq[i]; 4945 if (etq->q.desc) { 4946 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4947 etq->q.cntxt_id); 4948 __netif_tx_lock_bh(etq->txq); 4949 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4950 __netif_tx_unlock_bh(etq->txq); 4951 kfree(etq->q.sdesc); 4952 free_txq(adap, &etq->q); 4953 } 4954 } 4955 4956 /* clean up control Tx queues */ 4957 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 4958 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 4959 4960 if (cq->q.desc) { 4961 tasklet_kill(&cq->qresume_tsk); 4962 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 4963 cq->q.cntxt_id); 4964 __skb_queue_purge(&cq->sendq); 4965 free_txq(adap, &cq->q); 4966 } 4967 } 4968 4969 if (adap->sge.fw_evtq.desc) { 4970 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 4971 if (adap->sge.fwevtq_msix_idx >= 0) 4972 cxgb4_free_msix_idx_in_bmap(adap, 4973 adap->sge.fwevtq_msix_idx); 4974 } 4975 4976 if (adap->sge.nd_msix_idx >= 0) 4977 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); 4978 4979 if (adap->sge.intrq.desc) 4980 free_rspq_fl(adap, &adap->sge.intrq, NULL); 4981 4982 if (!is_t4(adap->params.chip)) { 4983 etq = &adap->sge.ptptxq; 4984 if (etq->q.desc) { 4985 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4986 etq->q.cntxt_id); 4987 spin_lock_bh(&adap->ptp_lock); 4988 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4989 spin_unlock_bh(&adap->ptp_lock); 4990 kfree(etq->q.sdesc); 4991 free_txq(adap, &etq->q); 4992 } 4993 } 4994 4995 /* clear the reverse egress queue map */ 4996 memset(adap->sge.egr_map, 0, 4997 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); 4998 } 4999 5000 void t4_sge_start(struct adapter *adap) 5001 { 5002 adap->sge.ethtxq_rover = 0; 5003 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 5004 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 5005 } 5006 5007 /** 5008 * t4_sge_stop - disable SGE operation 5009 * @adap: the adapter 5010 * 5011 * Stop tasklets and timers associated with the DMA engine. Note that 5012 * this is effective only if measures have been taken to disable any HW 5013 * events that may restart them. 5014 */ 5015 void t4_sge_stop(struct adapter *adap) 5016 { 5017 int i; 5018 struct sge *s = &adap->sge; 5019 5020 if (s->rx_timer.function) 5021 del_timer_sync(&s->rx_timer); 5022 if (s->tx_timer.function) 5023 del_timer_sync(&s->tx_timer); 5024 5025 if (is_offload(adap)) { 5026 struct sge_uld_txq_info *txq_info; 5027 5028 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 5029 if (txq_info) { 5030 struct sge_uld_txq *txq = txq_info->uldtxq; 5031 5032 for_each_ofldtxq(&adap->sge, i) { 5033 if (txq->q.desc) 5034 tasklet_kill(&txq->qresume_tsk); 5035 } 5036 } 5037 } 5038 5039 if (is_pci_uld(adap)) { 5040 struct sge_uld_txq_info *txq_info; 5041 5042 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 5043 if (txq_info) { 5044 struct sge_uld_txq *txq = txq_info->uldtxq; 5045 5046 for_each_ofldtxq(&adap->sge, i) { 5047 if (txq->q.desc) 5048 tasklet_kill(&txq->qresume_tsk); 5049 } 5050 } 5051 } 5052 5053 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 5054 struct sge_ctrl_txq *cq = &s->ctrlq[i]; 5055 5056 if (cq->q.desc) 5057 tasklet_kill(&cq->qresume_tsk); 5058 } 5059 } 5060 5061 /** 5062 * t4_sge_init_soft - grab core SGE values needed by SGE code 5063 * @adap: the adapter 5064 * 5065 * We need to grab the SGE operating parameters that we need to have 5066 * in order to do our job and make sure we can live with them. 5067 */ 5068 5069 static int t4_sge_init_soft(struct adapter *adap) 5070 { 5071 struct sge *s = &adap->sge; 5072 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 5073 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 5074 u32 ingress_rx_threshold; 5075 5076 /* 5077 * Verify that CPL messages are going to the Ingress Queue for 5078 * process_responses() and that only packet data is going to the 5079 * Free Lists. 5080 */ 5081 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != 5082 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { 5083 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 5084 return -EINVAL; 5085 } 5086 5087 /* 5088 * Validate the Host Buffer Register Array indices that we want to 5089 * use ... 5090 * 5091 * XXX Note that we should really read through the Host Buffer Size 5092 * XXX register array and find the indices of the Buffer Sizes which 5093 * XXX meet our needs! 5094 */ 5095 #define READ_FL_BUF(x) \ 5096 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) 5097 5098 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 5099 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 5100 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 5101 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 5102 5103 /* We only bother using the Large Page logic if the Large Page Buffer 5104 * is larger than our Page Size Buffer. 5105 */ 5106 if (fl_large_pg <= fl_small_pg) 5107 fl_large_pg = 0; 5108 5109 #undef READ_FL_BUF 5110 5111 /* The Page Size Buffer must be exactly equal to our Page Size and the 5112 * Large Page Size Buffer should be 0 (per above) or a power of 2. 5113 */ 5114 if (fl_small_pg != PAGE_SIZE || 5115 (fl_large_pg & (fl_large_pg-1)) != 0) { 5116 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 5117 fl_small_pg, fl_large_pg); 5118 return -EINVAL; 5119 } 5120 if (fl_large_pg) 5121 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 5122 5123 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 5124 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 5125 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 5126 fl_small_mtu, fl_large_mtu); 5127 return -EINVAL; 5128 } 5129 5130 /* 5131 * Retrieve our RX interrupt holdoff timer values and counter 5132 * threshold values from the SGE parameters. 5133 */ 5134 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); 5135 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); 5136 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); 5137 s->timer_val[0] = core_ticks_to_us(adap, 5138 TIMERVALUE0_G(timer_value_0_and_1)); 5139 s->timer_val[1] = core_ticks_to_us(adap, 5140 TIMERVALUE1_G(timer_value_0_and_1)); 5141 s->timer_val[2] = core_ticks_to_us(adap, 5142 TIMERVALUE2_G(timer_value_2_and_3)); 5143 s->timer_val[3] = core_ticks_to_us(adap, 5144 TIMERVALUE3_G(timer_value_2_and_3)); 5145 s->timer_val[4] = core_ticks_to_us(adap, 5146 TIMERVALUE4_G(timer_value_4_and_5)); 5147 s->timer_val[5] = core_ticks_to_us(adap, 5148 TIMERVALUE5_G(timer_value_4_and_5)); 5149 5150 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); 5151 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); 5152 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); 5153 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); 5154 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); 5155 5156 return 0; 5157 } 5158 5159 /** 5160 * t4_sge_init - initialize SGE 5161 * @adap: the adapter 5162 * 5163 * Perform low-level SGE code initialization needed every time after a 5164 * chip reset. 5165 */ 5166 int t4_sge_init(struct adapter *adap) 5167 { 5168 struct sge *s = &adap->sge; 5169 u32 sge_control, sge_conm_ctrl; 5170 int ret, egress_threshold; 5171 5172 /* 5173 * Ingress Padding Boundary and Egress Status Page Size are set up by 5174 * t4_fixup_host_params(). 5175 */ 5176 sge_control = t4_read_reg(adap, SGE_CONTROL_A); 5177 s->pktshift = PKTSHIFT_G(sge_control); 5178 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; 5179 5180 s->fl_align = t4_fl_pkt_align(adap); 5181 ret = t4_sge_init_soft(adap); 5182 if (ret < 0) 5183 return ret; 5184 5185 /* 5186 * A FL with <= fl_starve_thres buffers is starving and a periodic 5187 * timer will attempt to refill it. This needs to be larger than the 5188 * SGE's Egress Congestion Threshold. If it isn't, then we can get 5189 * stuck waiting for new packets while the SGE is waiting for us to 5190 * give it more Free List entries. (Note that the SGE's Egress 5191 * Congestion Threshold is in units of 2 Free List pointers.) For T4, 5192 * there was only a single field to control this. For T5 there's the 5193 * original field which now only applies to Unpacked Mode Free List 5194 * buffers and a new field which only applies to Packed Mode Free List 5195 * buffers. 5196 */ 5197 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); 5198 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 5199 case CHELSIO_T4: 5200 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); 5201 break; 5202 case CHELSIO_T5: 5203 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 5204 break; 5205 case CHELSIO_T6: 5206 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 5207 break; 5208 default: 5209 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", 5210 CHELSIO_CHIP_VERSION(adap->params.chip)); 5211 return -EINVAL; 5212 } 5213 s->fl_starve_thres = 2*egress_threshold + 1; 5214 5215 t4_idma_monitor_init(adap, &s->idma_monitor); 5216 5217 /* Set up timers used for recuring callbacks to process RX and TX 5218 * administrative tasks. 5219 */ 5220 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); 5221 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); 5222 5223 spin_lock_init(&s->intrq_lock); 5224 5225 return 0; 5226 } 5227