1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * This file is part of the Chelsio T4 Ethernet driver for Linux. 3f7917c00SJeff Kirsher * 4ce100b8bSAnish Bhatt * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5f7917c00SJeff Kirsher * 6f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 7f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 8f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 9f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 10f7917c00SJeff Kirsher * OpenIB.org BSD license below: 11f7917c00SJeff Kirsher * 12f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 13f7917c00SJeff Kirsher * without modification, are permitted provided that the following 14f7917c00SJeff Kirsher * conditions are met: 15f7917c00SJeff Kirsher * 16f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 17f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 18f7917c00SJeff Kirsher * disclaimer. 19f7917c00SJeff Kirsher * 20f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 21f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 22f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 23f7917c00SJeff Kirsher * provided with the distribution. 24f7917c00SJeff Kirsher * 25f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f7917c00SJeff Kirsher * SOFTWARE. 33f7917c00SJeff Kirsher */ 34f7917c00SJeff Kirsher 35f7917c00SJeff Kirsher #include <linux/skbuff.h> 36f7917c00SJeff Kirsher #include <linux/netdevice.h> 37f7917c00SJeff Kirsher #include <linux/etherdevice.h> 38f7917c00SJeff Kirsher #include <linux/if_vlan.h> 39f7917c00SJeff Kirsher #include <linux/ip.h> 40f7917c00SJeff Kirsher #include <linux/dma-mapping.h> 41f7917c00SJeff Kirsher #include <linux/jiffies.h> 42f7917c00SJeff Kirsher #include <linux/prefetch.h> 43ee40fa06SPaul Gortmaker #include <linux/export.h> 44a6ec572bSAtul Gupta #include <net/xfrm.h> 45f7917c00SJeff Kirsher #include <net/ipv6.h> 46f7917c00SJeff Kirsher #include <net/tcp.h> 473a336cb1SHariprasad Shenai #include <net/busy_poll.h> 4884a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 4984a200b3SVarun Prakash #include <scsi/fc/fc_fcoe.h> 5084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 51f7917c00SJeff Kirsher #include "cxgb4.h" 52f7917c00SJeff Kirsher #include "t4_regs.h" 53f612b815SHariprasad Shenai #include "t4_values.h" 54f7917c00SJeff Kirsher #include "t4_msg.h" 55f7917c00SJeff Kirsher #include "t4fw_api.h" 56a4569504SAtul Gupta #include "cxgb4_ptp.h" 57a6ec572bSAtul Gupta #include "cxgb4_uld.h" 584846d533SRahul Lakkireddy #include "cxgb4_tc_mqprio.h" 590e395b3cSRahul Lakkireddy #include "sched.h" 60f7917c00SJeff Kirsher 61f7917c00SJeff Kirsher /* 62f7917c00SJeff Kirsher * Rx buffer size. We use largish buffers if possible but settle for single 63f7917c00SJeff Kirsher * pages under memory shortage. 64f7917c00SJeff Kirsher */ 65f7917c00SJeff Kirsher #if PAGE_SHIFT >= 16 66f7917c00SJeff Kirsher # define FL_PG_ORDER 0 67f7917c00SJeff Kirsher #else 68f7917c00SJeff Kirsher # define FL_PG_ORDER (16 - PAGE_SHIFT) 69f7917c00SJeff Kirsher #endif 70f7917c00SJeff Kirsher 71f7917c00SJeff Kirsher /* RX_PULL_LEN should be <= RX_COPY_THRES */ 72f7917c00SJeff Kirsher #define RX_COPY_THRES 256 73f7917c00SJeff Kirsher #define RX_PULL_LEN 128 74f7917c00SJeff Kirsher 75f7917c00SJeff Kirsher /* 76f7917c00SJeff Kirsher * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 77f7917c00SJeff Kirsher * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 78f7917c00SJeff Kirsher */ 79f7917c00SJeff Kirsher #define RX_PKT_SKB_LEN 512 80f7917c00SJeff Kirsher 81f7917c00SJeff Kirsher /* 82f7917c00SJeff Kirsher * Max number of Tx descriptors we clean up at a time. Should be modest as 83f7917c00SJeff Kirsher * freeing skbs isn't cheap and it happens while holding locks. We just need 84f7917c00SJeff Kirsher * to free packets faster than they arrive, we eventually catch up and keep 85d429005fSVishal Kulkarni * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should 86d429005fSVishal Kulkarni * also match the CIDX Flush Threshold. 87f7917c00SJeff Kirsher */ 88d429005fSVishal Kulkarni #define MAX_TX_RECLAIM 32 89f7917c00SJeff Kirsher 90f7917c00SJeff Kirsher /* 91f7917c00SJeff Kirsher * Max number of Rx buffers we replenish at a time. Again keep this modest, 92f7917c00SJeff Kirsher * allocating buffers isn't cheap either. 93f7917c00SJeff Kirsher */ 94f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U 95f7917c00SJeff Kirsher 96f7917c00SJeff Kirsher /* 97f7917c00SJeff Kirsher * Period of the Rx queue check timer. This timer is infrequent as it has 98f7917c00SJeff Kirsher * something to do only when the system experiences severe memory shortage. 99f7917c00SJeff Kirsher */ 100f7917c00SJeff Kirsher #define RX_QCHECK_PERIOD (HZ / 2) 101f7917c00SJeff Kirsher 102f7917c00SJeff Kirsher /* 103f7917c00SJeff Kirsher * Period of the Tx queue check timer. 104f7917c00SJeff Kirsher */ 105f7917c00SJeff Kirsher #define TX_QCHECK_PERIOD (HZ / 2) 106f7917c00SJeff Kirsher 107f7917c00SJeff Kirsher /* 108f7917c00SJeff Kirsher * Max number of Tx descriptors to be reclaimed by the Tx timer. 109f7917c00SJeff Kirsher */ 110f7917c00SJeff Kirsher #define MAX_TIMER_TX_RECLAIM 100 111f7917c00SJeff Kirsher 112f7917c00SJeff Kirsher /* 113f7917c00SJeff Kirsher * Timer index used when backing off due to memory shortage. 114f7917c00SJeff Kirsher */ 115f7917c00SJeff Kirsher #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 116f7917c00SJeff Kirsher 117f7917c00SJeff Kirsher /* 118f7917c00SJeff Kirsher * Suspension threshold for non-Ethernet Tx queues. We require enough room 119f7917c00SJeff Kirsher * for a full sized WR. 120f7917c00SJeff Kirsher */ 121f7917c00SJeff Kirsher #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 122f7917c00SJeff Kirsher 123f7917c00SJeff Kirsher /* 124f7917c00SJeff Kirsher * Max Tx descriptor space we allow for an Ethernet packet to be inlined 125f7917c00SJeff Kirsher * into a WR. 126f7917c00SJeff Kirsher */ 12721dcfad6SHariprasad Shenai #define MAX_IMM_TX_PKT_LEN 256 128f7917c00SJeff Kirsher 129f7917c00SJeff Kirsher /* 130f7917c00SJeff Kirsher * Max size of a WR sent through a control Tx queue. 131f7917c00SJeff Kirsher */ 132f7917c00SJeff Kirsher #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 133f7917c00SJeff Kirsher 134f7917c00SJeff Kirsher struct rx_sw_desc { /* SW state per Rx descriptor */ 135f7917c00SJeff Kirsher struct page *page; 136f7917c00SJeff Kirsher dma_addr_t dma_addr; 137f7917c00SJeff Kirsher }; 138f7917c00SJeff Kirsher 139f7917c00SJeff Kirsher /* 14052367a76SVipul Pandya * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 14152367a76SVipul Pandya * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 14252367a76SVipul Pandya * We could easily support more but there doesn't seem to be much need for 14352367a76SVipul Pandya * that ... 14452367a76SVipul Pandya */ 14552367a76SVipul Pandya #define FL_MTU_SMALL 1500 14652367a76SVipul Pandya #define FL_MTU_LARGE 9000 14752367a76SVipul Pandya 14852367a76SVipul Pandya static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 14952367a76SVipul Pandya unsigned int mtu) 15052367a76SVipul Pandya { 15152367a76SVipul Pandya struct sge *s = &adapter->sge; 15252367a76SVipul Pandya 15352367a76SVipul Pandya return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 15452367a76SVipul Pandya } 15552367a76SVipul Pandya 15652367a76SVipul Pandya #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 15752367a76SVipul Pandya #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 15852367a76SVipul Pandya 15952367a76SVipul Pandya /* 16052367a76SVipul Pandya * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 16152367a76SVipul Pandya * these to specify the buffer size as an index into the SGE Free List Buffer 16252367a76SVipul Pandya * Size register array. We also use bit 4, when the buffer has been unmapped 16352367a76SVipul Pandya * for DMA, but this is of course never sent to the hardware and is only used 16452367a76SVipul Pandya * to prevent double unmappings. All of the above requires that the Free List 16552367a76SVipul Pandya * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 16652367a76SVipul Pandya * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 16752367a76SVipul Pandya * Free List Buffer alignment is 32 bytes, this works out for us ... 168f7917c00SJeff Kirsher */ 169f7917c00SJeff Kirsher enum { 17052367a76SVipul Pandya RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 17152367a76SVipul Pandya RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 17252367a76SVipul Pandya RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 17352367a76SVipul Pandya 17452367a76SVipul Pandya /* 17552367a76SVipul Pandya * XXX We shouldn't depend on being able to use these indices. 17652367a76SVipul Pandya * XXX Especially when some other Master PF has initialized the 17752367a76SVipul Pandya * XXX adapter or we use the Firmware Configuration File. We 17852367a76SVipul Pandya * XXX should really search through the Host Buffer Size register 17952367a76SVipul Pandya * XXX array for the appropriately sized buffer indices. 18052367a76SVipul Pandya */ 18152367a76SVipul Pandya RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 18252367a76SVipul Pandya RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 18352367a76SVipul Pandya 18452367a76SVipul Pandya RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 18552367a76SVipul Pandya RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 186f7917c00SJeff Kirsher }; 187f7917c00SJeff Kirsher 188e553ec3fSHariprasad Shenai static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; 189e553ec3fSHariprasad Shenai #define MIN_NAPI_WORK 1 190e553ec3fSHariprasad Shenai 191f7917c00SJeff Kirsher static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 192f7917c00SJeff Kirsher { 19352367a76SVipul Pandya return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 194f7917c00SJeff Kirsher } 195f7917c00SJeff Kirsher 196f7917c00SJeff Kirsher static inline bool is_buf_mapped(const struct rx_sw_desc *d) 197f7917c00SJeff Kirsher { 198f7917c00SJeff Kirsher return !(d->dma_addr & RX_UNMAPPED_BUF); 199f7917c00SJeff Kirsher } 200f7917c00SJeff Kirsher 201f7917c00SJeff Kirsher /** 202f7917c00SJeff Kirsher * txq_avail - return the number of available slots in a Tx queue 203f7917c00SJeff Kirsher * @q: the Tx queue 204f7917c00SJeff Kirsher * 205f7917c00SJeff Kirsher * Returns the number of descriptors in a Tx queue available to write new 206f7917c00SJeff Kirsher * packets. 207f7917c00SJeff Kirsher */ 208f7917c00SJeff Kirsher static inline unsigned int txq_avail(const struct sge_txq *q) 209f7917c00SJeff Kirsher { 210f7917c00SJeff Kirsher return q->size - 1 - q->in_use; 211f7917c00SJeff Kirsher } 212f7917c00SJeff Kirsher 213f7917c00SJeff Kirsher /** 214f7917c00SJeff Kirsher * fl_cap - return the capacity of a free-buffer list 215f7917c00SJeff Kirsher * @fl: the FL 216f7917c00SJeff Kirsher * 217f7917c00SJeff Kirsher * Returns the capacity of a free-buffer list. The capacity is less than 218f7917c00SJeff Kirsher * the size because one descriptor needs to be left unpopulated, otherwise 219f7917c00SJeff Kirsher * HW will think the FL is empty. 220f7917c00SJeff Kirsher */ 221f7917c00SJeff Kirsher static inline unsigned int fl_cap(const struct sge_fl *fl) 222f7917c00SJeff Kirsher { 223f7917c00SJeff Kirsher return fl->size - 8; /* 1 descriptor = 8 buffers */ 224f7917c00SJeff Kirsher } 225f7917c00SJeff Kirsher 226c098b026SHariprasad Shenai /** 227c098b026SHariprasad Shenai * fl_starving - return whether a Free List is starving. 228c098b026SHariprasad Shenai * @adapter: pointer to the adapter 229c098b026SHariprasad Shenai * @fl: the Free List 230c098b026SHariprasad Shenai * 231c098b026SHariprasad Shenai * Tests specified Free List to see whether the number of buffers 232c098b026SHariprasad Shenai * available to the hardware has falled below our "starvation" 233c098b026SHariprasad Shenai * threshold. 234c098b026SHariprasad Shenai */ 235c098b026SHariprasad Shenai static inline bool fl_starving(const struct adapter *adapter, 236c098b026SHariprasad Shenai const struct sge_fl *fl) 237f7917c00SJeff Kirsher { 238c098b026SHariprasad Shenai const struct sge *s = &adapter->sge; 239c098b026SHariprasad Shenai 240c098b026SHariprasad Shenai return fl->avail - fl->pend_cred <= s->fl_starve_thres; 241f7917c00SJeff Kirsher } 242f7917c00SJeff Kirsher 243a6ec572bSAtul Gupta int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 244f7917c00SJeff Kirsher dma_addr_t *addr) 245f7917c00SJeff Kirsher { 246f7917c00SJeff Kirsher const skb_frag_t *fp, *end; 247f7917c00SJeff Kirsher const struct skb_shared_info *si; 248f7917c00SJeff Kirsher 249f7917c00SJeff Kirsher *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 250f7917c00SJeff Kirsher if (dma_mapping_error(dev, *addr)) 251f7917c00SJeff Kirsher goto out_err; 252f7917c00SJeff Kirsher 253f7917c00SJeff Kirsher si = skb_shinfo(skb); 254f7917c00SJeff Kirsher end = &si->frags[si->nr_frags]; 255f7917c00SJeff Kirsher 256f7917c00SJeff Kirsher for (fp = si->frags; fp < end; fp++) { 257e91b0f24SIan Campbell *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 258e91b0f24SIan Campbell DMA_TO_DEVICE); 259f7917c00SJeff Kirsher if (dma_mapping_error(dev, *addr)) 260f7917c00SJeff Kirsher goto unwind; 261f7917c00SJeff Kirsher } 262f7917c00SJeff Kirsher return 0; 263f7917c00SJeff Kirsher 264f7917c00SJeff Kirsher unwind: 265f7917c00SJeff Kirsher while (fp-- > si->frags) 2669e903e08SEric Dumazet dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 267f7917c00SJeff Kirsher 268f7917c00SJeff Kirsher dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 269f7917c00SJeff Kirsher out_err: 270f7917c00SJeff Kirsher return -ENOMEM; 271f7917c00SJeff Kirsher } 272a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_map_skb); 273f7917c00SJeff Kirsher 274f7917c00SJeff Kirsher static void unmap_skb(struct device *dev, const struct sk_buff *skb, 275f7917c00SJeff Kirsher const dma_addr_t *addr) 276f7917c00SJeff Kirsher { 277f7917c00SJeff Kirsher const skb_frag_t *fp, *end; 278f7917c00SJeff Kirsher const struct skb_shared_info *si; 279f7917c00SJeff Kirsher 280f7917c00SJeff Kirsher dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 281f7917c00SJeff Kirsher 282f7917c00SJeff Kirsher si = skb_shinfo(skb); 283f7917c00SJeff Kirsher end = &si->frags[si->nr_frags]; 284f7917c00SJeff Kirsher for (fp = si->frags; fp < end; fp++) 2859e903e08SEric Dumazet dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 286f7917c00SJeff Kirsher } 287f7917c00SJeff Kirsher 288b1396c2bSRahul Lakkireddy #ifdef CONFIG_NEED_DMA_MAP_STATE 289f7917c00SJeff Kirsher /** 290f7917c00SJeff Kirsher * deferred_unmap_destructor - unmap a packet when it is freed 291f7917c00SJeff Kirsher * @skb: the packet 292f7917c00SJeff Kirsher * 293f7917c00SJeff Kirsher * This is the packet destructor used for Tx packets that need to remain 294f7917c00SJeff Kirsher * mapped until they are freed rather than until their Tx descriptors are 295f7917c00SJeff Kirsher * freed. 296f7917c00SJeff Kirsher */ 297f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb) 298f7917c00SJeff Kirsher { 299f7917c00SJeff Kirsher unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 300f7917c00SJeff Kirsher } 301f7917c00SJeff Kirsher #endif 302f7917c00SJeff Kirsher 303f7917c00SJeff Kirsher /** 304f7917c00SJeff Kirsher * free_tx_desc - reclaims Tx descriptors and their buffers 30529bbf5d7SRahul Lakkireddy * @adap: the adapter 306f7917c00SJeff Kirsher * @q: the Tx queue to reclaim descriptors from 307f7917c00SJeff Kirsher * @n: the number of descriptors to reclaim 308f7917c00SJeff Kirsher * @unmap: whether the buffers should be unmapped for DMA 309f7917c00SJeff Kirsher * 310f7917c00SJeff Kirsher * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 311f7917c00SJeff Kirsher * Tx buffers. Called with the Tx queue lock held. 312f7917c00SJeff Kirsher */ 313ab677ff4SHariprasad Shenai void free_tx_desc(struct adapter *adap, struct sge_txq *q, 314f7917c00SJeff Kirsher unsigned int n, bool unmap) 315f7917c00SJeff Kirsher { 316f7917c00SJeff Kirsher unsigned int cidx = q->cidx; 3170ed96b46SRahul Lakkireddy struct tx_sw_desc *d; 318f7917c00SJeff Kirsher 319f7917c00SJeff Kirsher d = &q->sdesc[cidx]; 320f7917c00SJeff Kirsher while (n--) { 321f7917c00SJeff Kirsher if (d->skb) { /* an SGL is present */ 3220ed96b46SRahul Lakkireddy if (unmap && d->addr[0]) { 3230ed96b46SRahul Lakkireddy unmap_skb(adap->pdev_dev, d->skb, d->addr); 3240ed96b46SRahul Lakkireddy memset(d->addr, 0, sizeof(d->addr)); 3250ed96b46SRahul Lakkireddy } 326a7525198SEric W. Biederman dev_consume_skb_any(d->skb); 327f7917c00SJeff Kirsher d->skb = NULL; 328f7917c00SJeff Kirsher } 329f7917c00SJeff Kirsher ++d; 330f7917c00SJeff Kirsher if (++cidx == q->size) { 331f7917c00SJeff Kirsher cidx = 0; 332f7917c00SJeff Kirsher d = q->sdesc; 333f7917c00SJeff Kirsher } 334f7917c00SJeff Kirsher } 335f7917c00SJeff Kirsher q->cidx = cidx; 336f7917c00SJeff Kirsher } 337f7917c00SJeff Kirsher 338f7917c00SJeff Kirsher /* 339f7917c00SJeff Kirsher * Return the number of reclaimable descriptors in a Tx queue. 340f7917c00SJeff Kirsher */ 341f7917c00SJeff Kirsher static inline int reclaimable(const struct sge_txq *q) 342f7917c00SJeff Kirsher { 3436aa7de05SMark Rutland int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 344f7917c00SJeff Kirsher hw_cidx -= q->cidx; 345f7917c00SJeff Kirsher return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 346f7917c00SJeff Kirsher } 347f7917c00SJeff Kirsher 348f7917c00SJeff Kirsher /** 349d429005fSVishal Kulkarni * reclaim_completed_tx - reclaims completed TX Descriptors 350d429005fSVishal Kulkarni * @adap: the adapter 351d429005fSVishal Kulkarni * @q: the Tx queue to reclaim completed descriptors from 352d429005fSVishal Kulkarni * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 353d429005fSVishal Kulkarni * @unmap: whether the buffers should be unmapped for DMA 354d429005fSVishal Kulkarni * 355d429005fSVishal Kulkarni * Reclaims Tx Descriptors that the SGE has indicated it has processed, 356d429005fSVishal Kulkarni * and frees the associated buffers if possible. If @max == -1, then 357d429005fSVishal Kulkarni * we'll use a defaiult maximum. Called with the TX Queue locked. 358d429005fSVishal Kulkarni */ 359d429005fSVishal Kulkarni static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 360d429005fSVishal Kulkarni int maxreclaim, bool unmap) 361d429005fSVishal Kulkarni { 362d429005fSVishal Kulkarni int reclaim = reclaimable(q); 363d429005fSVishal Kulkarni 364d429005fSVishal Kulkarni if (reclaim) { 365d429005fSVishal Kulkarni /* 366d429005fSVishal Kulkarni * Limit the amount of clean up work we do at a time to keep 367d429005fSVishal Kulkarni * the Tx lock hold time O(1). 368d429005fSVishal Kulkarni */ 369d429005fSVishal Kulkarni if (maxreclaim < 0) 370d429005fSVishal Kulkarni maxreclaim = MAX_TX_RECLAIM; 371d429005fSVishal Kulkarni if (reclaim > maxreclaim) 372d429005fSVishal Kulkarni reclaim = maxreclaim; 373d429005fSVishal Kulkarni 374d429005fSVishal Kulkarni free_tx_desc(adap, q, reclaim, unmap); 375d429005fSVishal Kulkarni q->in_use -= reclaim; 376d429005fSVishal Kulkarni } 377d429005fSVishal Kulkarni 378d429005fSVishal Kulkarni return reclaim; 379d429005fSVishal Kulkarni } 380d429005fSVishal Kulkarni 381d429005fSVishal Kulkarni /** 382a6ec572bSAtul Gupta * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 383f7917c00SJeff Kirsher * @adap: the adapter 384f7917c00SJeff Kirsher * @q: the Tx queue to reclaim completed descriptors from 385f7917c00SJeff Kirsher * @unmap: whether the buffers should be unmapped for DMA 386f7917c00SJeff Kirsher * 387f7917c00SJeff Kirsher * Reclaims Tx descriptors that the SGE has indicated it has processed, 388f7917c00SJeff Kirsher * and frees the associated buffers if possible. Called with the Tx 389f7917c00SJeff Kirsher * queue locked. 390f7917c00SJeff Kirsher */ 391d429005fSVishal Kulkarni void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 392f7917c00SJeff Kirsher bool unmap) 393f7917c00SJeff Kirsher { 394d429005fSVishal Kulkarni (void)reclaim_completed_tx(adap, q, -1, unmap); 395f7917c00SJeff Kirsher } 396a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 397f7917c00SJeff Kirsher 39852367a76SVipul Pandya static inline int get_buf_size(struct adapter *adapter, 39952367a76SVipul Pandya const struct rx_sw_desc *d) 400f7917c00SJeff Kirsher { 40152367a76SVipul Pandya struct sge *s = &adapter->sge; 40252367a76SVipul Pandya unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 40352367a76SVipul Pandya int buf_size; 40452367a76SVipul Pandya 40552367a76SVipul Pandya switch (rx_buf_size_idx) { 40652367a76SVipul Pandya case RX_SMALL_PG_BUF: 40752367a76SVipul Pandya buf_size = PAGE_SIZE; 40852367a76SVipul Pandya break; 40952367a76SVipul Pandya 41052367a76SVipul Pandya case RX_LARGE_PG_BUF: 41152367a76SVipul Pandya buf_size = PAGE_SIZE << s->fl_pg_order; 41252367a76SVipul Pandya break; 41352367a76SVipul Pandya 41452367a76SVipul Pandya case RX_SMALL_MTU_BUF: 41552367a76SVipul Pandya buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 41652367a76SVipul Pandya break; 41752367a76SVipul Pandya 41852367a76SVipul Pandya case RX_LARGE_MTU_BUF: 41952367a76SVipul Pandya buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 42052367a76SVipul Pandya break; 42152367a76SVipul Pandya 42252367a76SVipul Pandya default: 423047a013fSArnd Bergmann BUG(); 42452367a76SVipul Pandya } 42552367a76SVipul Pandya 42652367a76SVipul Pandya return buf_size; 427f7917c00SJeff Kirsher } 428f7917c00SJeff Kirsher 429f7917c00SJeff Kirsher /** 430f7917c00SJeff Kirsher * free_rx_bufs - free the Rx buffers on an SGE free list 431f7917c00SJeff Kirsher * @adap: the adapter 432f7917c00SJeff Kirsher * @q: the SGE free list to free buffers from 433f7917c00SJeff Kirsher * @n: how many buffers to free 434f7917c00SJeff Kirsher * 435f7917c00SJeff Kirsher * Release the next @n buffers on an SGE free-buffer Rx queue. The 436f7917c00SJeff Kirsher * buffers must be made inaccessible to HW before calling this function. 437f7917c00SJeff Kirsher */ 438f7917c00SJeff Kirsher static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 439f7917c00SJeff Kirsher { 440f7917c00SJeff Kirsher while (n--) { 441f7917c00SJeff Kirsher struct rx_sw_desc *d = &q->sdesc[q->cidx]; 442f7917c00SJeff Kirsher 443f7917c00SJeff Kirsher if (is_buf_mapped(d)) 444f7917c00SJeff Kirsher dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 44552367a76SVipul Pandya get_buf_size(adap, d), 446*4489d8f5SChristophe JAILLET DMA_FROM_DEVICE); 447f7917c00SJeff Kirsher put_page(d->page); 448f7917c00SJeff Kirsher d->page = NULL; 449f7917c00SJeff Kirsher if (++q->cidx == q->size) 450f7917c00SJeff Kirsher q->cidx = 0; 451f7917c00SJeff Kirsher q->avail--; 452f7917c00SJeff Kirsher } 453f7917c00SJeff Kirsher } 454f7917c00SJeff Kirsher 455f7917c00SJeff Kirsher /** 456f7917c00SJeff Kirsher * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 457f7917c00SJeff Kirsher * @adap: the adapter 458f7917c00SJeff Kirsher * @q: the SGE free list 459f7917c00SJeff Kirsher * 460f7917c00SJeff Kirsher * Unmap the current buffer on an SGE free-buffer Rx queue. The 461f7917c00SJeff Kirsher * buffer must be made inaccessible to HW before calling this function. 462f7917c00SJeff Kirsher * 463f7917c00SJeff Kirsher * This is similar to @free_rx_bufs above but does not free the buffer. 464f7917c00SJeff Kirsher * Do note that the FL still loses any further access to the buffer. 465f7917c00SJeff Kirsher */ 466f7917c00SJeff Kirsher static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 467f7917c00SJeff Kirsher { 468f7917c00SJeff Kirsher struct rx_sw_desc *d = &q->sdesc[q->cidx]; 469f7917c00SJeff Kirsher 470f7917c00SJeff Kirsher if (is_buf_mapped(d)) 471f7917c00SJeff Kirsher dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472*4489d8f5SChristophe JAILLET get_buf_size(adap, d), DMA_FROM_DEVICE); 473f7917c00SJeff Kirsher d->page = NULL; 474f7917c00SJeff Kirsher if (++q->cidx == q->size) 475f7917c00SJeff Kirsher q->cidx = 0; 476f7917c00SJeff Kirsher q->avail--; 477f7917c00SJeff Kirsher } 478f7917c00SJeff Kirsher 479f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 480f7917c00SJeff Kirsher { 481f7917c00SJeff Kirsher if (q->pend_cred >= 8) { 4823ccc6cf7SHariprasad Shenai u32 val = adap->params.arch.sge_fl_db; 4833ccc6cf7SHariprasad Shenai 484f612b815SHariprasad Shenai if (is_t4(adap->params.chip)) 4853ccc6cf7SHariprasad Shenai val |= PIDX_V(q->pend_cred / 8); 486f612b815SHariprasad Shenai else 4873ccc6cf7SHariprasad Shenai val |= PIDX_T5_V(q->pend_cred / 8); 4881ecc7b7aSHariprasad Shenai 4891ecc7b7aSHariprasad Shenai /* Make sure all memory writes to the Free List queue are 4901ecc7b7aSHariprasad Shenai * committed before we tell the hardware about them. 4911ecc7b7aSHariprasad Shenai */ 492f7917c00SJeff Kirsher wmb(); 493d63a6dcfSHariprasad Shenai 494df64e4d3SHariprasad Shenai /* If we don't have access to the new User Doorbell (T5+), use 495df64e4d3SHariprasad Shenai * the old doorbell mechanism; otherwise use the new BAR2 496df64e4d3SHariprasad Shenai * mechanism. 497d63a6dcfSHariprasad Shenai */ 498df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 499f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 500f612b815SHariprasad Shenai val | QID_V(q->cntxt_id)); 501d63a6dcfSHariprasad Shenai } else { 502f612b815SHariprasad Shenai writel(val | QID_V(q->bar2_qid), 503df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_KDOORBELL); 504d63a6dcfSHariprasad Shenai 505d63a6dcfSHariprasad Shenai /* This Write memory Barrier will force the write to 506d63a6dcfSHariprasad Shenai * the User Doorbell area to be flushed. 507d63a6dcfSHariprasad Shenai */ 508d63a6dcfSHariprasad Shenai wmb(); 509d63a6dcfSHariprasad Shenai } 510f7917c00SJeff Kirsher q->pend_cred &= 7; 511f7917c00SJeff Kirsher } 512f7917c00SJeff Kirsher } 513f7917c00SJeff Kirsher 514f7917c00SJeff Kirsher static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 515f7917c00SJeff Kirsher dma_addr_t mapping) 516f7917c00SJeff Kirsher { 517f7917c00SJeff Kirsher sd->page = pg; 518f7917c00SJeff Kirsher sd->dma_addr = mapping; /* includes size low bits */ 519f7917c00SJeff Kirsher } 520f7917c00SJeff Kirsher 521f7917c00SJeff Kirsher /** 522f7917c00SJeff Kirsher * refill_fl - refill an SGE Rx buffer ring 523f7917c00SJeff Kirsher * @adap: the adapter 524f7917c00SJeff Kirsher * @q: the ring to refill 525f7917c00SJeff Kirsher * @n: the number of new buffers to allocate 526f7917c00SJeff Kirsher * @gfp: the gfp flags for the allocations 527f7917c00SJeff Kirsher * 528f7917c00SJeff Kirsher * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 529f7917c00SJeff Kirsher * allocated with the supplied gfp flags. The caller must assure that 530f7917c00SJeff Kirsher * @n does not exceed the queue's capacity. If afterwards the queue is 531f7917c00SJeff Kirsher * found critically low mark it as starving in the bitmap of starving FLs. 532f7917c00SJeff Kirsher * 533f7917c00SJeff Kirsher * Returns the number of buffers allocated. 534f7917c00SJeff Kirsher */ 535f7917c00SJeff Kirsher static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 536f7917c00SJeff Kirsher gfp_t gfp) 537f7917c00SJeff Kirsher { 53852367a76SVipul Pandya struct sge *s = &adap->sge; 539f7917c00SJeff Kirsher struct page *pg; 540f7917c00SJeff Kirsher dma_addr_t mapping; 541f7917c00SJeff Kirsher unsigned int cred = q->avail; 542f7917c00SJeff Kirsher __be64 *d = &q->desc[q->pidx]; 543f7917c00SJeff Kirsher struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 544d52ce920SHariprasad Shenai int node; 545f7917c00SJeff Kirsher 5465b377d11SHariprasad Shenai #ifdef CONFIG_DEBUG_FS 5475b377d11SHariprasad Shenai if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) 5485b377d11SHariprasad Shenai goto out; 5495b377d11SHariprasad Shenai #endif 5505b377d11SHariprasad Shenai 551aa9cd31cSAlexander Duyck gfp |= __GFP_NOWARN; 552d52ce920SHariprasad Shenai node = dev_to_node(adap->pdev_dev); 553f7917c00SJeff Kirsher 55452367a76SVipul Pandya if (s->fl_pg_order == 0) 55552367a76SVipul Pandya goto alloc_small_pages; 55652367a76SVipul Pandya 557f7917c00SJeff Kirsher /* 558f7917c00SJeff Kirsher * Prefer large buffers 559f7917c00SJeff Kirsher */ 560f7917c00SJeff Kirsher while (n) { 561d52ce920SHariprasad Shenai pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); 562f7917c00SJeff Kirsher if (unlikely(!pg)) { 563f7917c00SJeff Kirsher q->large_alloc_failed++; 564f7917c00SJeff Kirsher break; /* fall back to single pages */ 565f7917c00SJeff Kirsher } 566f7917c00SJeff Kirsher 567f7917c00SJeff Kirsher mapping = dma_map_page(adap->pdev_dev, pg, 0, 56852367a76SVipul Pandya PAGE_SIZE << s->fl_pg_order, 569*4489d8f5SChristophe JAILLET DMA_FROM_DEVICE); 570f7917c00SJeff Kirsher if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 57152367a76SVipul Pandya __free_pages(pg, s->fl_pg_order); 57270055dd0SHariprasad Shenai q->mapping_err++; 573f7917c00SJeff Kirsher goto out; /* do not try small pages for this error */ 574f7917c00SJeff Kirsher } 57552367a76SVipul Pandya mapping |= RX_LARGE_PG_BUF; 576f7917c00SJeff Kirsher *d++ = cpu_to_be64(mapping); 577f7917c00SJeff Kirsher 578f7917c00SJeff Kirsher set_rx_sw_desc(sd, pg, mapping); 579f7917c00SJeff Kirsher sd++; 580f7917c00SJeff Kirsher 581f7917c00SJeff Kirsher q->avail++; 582f7917c00SJeff Kirsher if (++q->pidx == q->size) { 583f7917c00SJeff Kirsher q->pidx = 0; 584f7917c00SJeff Kirsher sd = q->sdesc; 585f7917c00SJeff Kirsher d = q->desc; 586f7917c00SJeff Kirsher } 587f7917c00SJeff Kirsher n--; 588f7917c00SJeff Kirsher } 589f7917c00SJeff Kirsher 59052367a76SVipul Pandya alloc_small_pages: 591f7917c00SJeff Kirsher while (n--) { 592d52ce920SHariprasad Shenai pg = alloc_pages_node(node, gfp, 0); 593f7917c00SJeff Kirsher if (unlikely(!pg)) { 594f7917c00SJeff Kirsher q->alloc_failed++; 595f7917c00SJeff Kirsher break; 596f7917c00SJeff Kirsher } 597f7917c00SJeff Kirsher 598f7917c00SJeff Kirsher mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 599*4489d8f5SChristophe JAILLET DMA_FROM_DEVICE); 600f7917c00SJeff Kirsher if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 6011f2149c1SEric Dumazet put_page(pg); 60270055dd0SHariprasad Shenai q->mapping_err++; 603f7917c00SJeff Kirsher goto out; 604f7917c00SJeff Kirsher } 605f7917c00SJeff Kirsher *d++ = cpu_to_be64(mapping); 606f7917c00SJeff Kirsher 607f7917c00SJeff Kirsher set_rx_sw_desc(sd, pg, mapping); 608f7917c00SJeff Kirsher sd++; 609f7917c00SJeff Kirsher 610f7917c00SJeff Kirsher q->avail++; 611f7917c00SJeff Kirsher if (++q->pidx == q->size) { 612f7917c00SJeff Kirsher q->pidx = 0; 613f7917c00SJeff Kirsher sd = q->sdesc; 614f7917c00SJeff Kirsher d = q->desc; 615f7917c00SJeff Kirsher } 616f7917c00SJeff Kirsher } 617f7917c00SJeff Kirsher 618f7917c00SJeff Kirsher out: cred = q->avail - cred; 619f7917c00SJeff Kirsher q->pend_cred += cred; 620f7917c00SJeff Kirsher ring_fl_db(adap, q); 621f7917c00SJeff Kirsher 622c098b026SHariprasad Shenai if (unlikely(fl_starving(adap, q))) { 623f7917c00SJeff Kirsher smp_wmb(); 62470055dd0SHariprasad Shenai q->low++; 625f7917c00SJeff Kirsher set_bit(q->cntxt_id - adap->sge.egr_start, 626f7917c00SJeff Kirsher adap->sge.starving_fl); 627f7917c00SJeff Kirsher } 628f7917c00SJeff Kirsher 629f7917c00SJeff Kirsher return cred; 630f7917c00SJeff Kirsher } 631f7917c00SJeff Kirsher 632f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 633f7917c00SJeff Kirsher { 634f7917c00SJeff Kirsher refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 635f7917c00SJeff Kirsher GFP_ATOMIC); 636f7917c00SJeff Kirsher } 637f7917c00SJeff Kirsher 638f7917c00SJeff Kirsher /** 639f7917c00SJeff Kirsher * alloc_ring - allocate resources for an SGE descriptor ring 640f7917c00SJeff Kirsher * @dev: the PCI device's core device 641f7917c00SJeff Kirsher * @nelem: the number of descriptors 642f7917c00SJeff Kirsher * @elem_size: the size of each descriptor 643f7917c00SJeff Kirsher * @sw_size: the size of the SW state associated with each ring element 644f7917c00SJeff Kirsher * @phys: the physical address of the allocated ring 645f7917c00SJeff Kirsher * @metadata: address of the array holding the SW state for the ring 646f7917c00SJeff Kirsher * @stat_size: extra space in HW ring for status information 647f7917c00SJeff Kirsher * @node: preferred node for memory allocations 648f7917c00SJeff Kirsher * 649f7917c00SJeff Kirsher * Allocates resources for an SGE descriptor ring, such as Tx queues, 650f7917c00SJeff Kirsher * free buffer lists, or response queues. Each SGE ring requires 651f7917c00SJeff Kirsher * space for its HW descriptors plus, optionally, space for the SW state 652f7917c00SJeff Kirsher * associated with each HW entry (the metadata). The function returns 653f7917c00SJeff Kirsher * three values: the virtual address for the HW ring (the return value 654f7917c00SJeff Kirsher * of the function), the bus address of the HW ring, and the address 655f7917c00SJeff Kirsher * of the SW ring. 656f7917c00SJeff Kirsher */ 657f7917c00SJeff Kirsher static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 658f7917c00SJeff Kirsher size_t sw_size, dma_addr_t *phys, void *metadata, 659f7917c00SJeff Kirsher size_t stat_size, int node) 660f7917c00SJeff Kirsher { 661f7917c00SJeff Kirsher size_t len = nelem * elem_size + stat_size; 662f7917c00SJeff Kirsher void *s = NULL; 663750afb08SLuis Chamberlain void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 664f7917c00SJeff Kirsher 665f7917c00SJeff Kirsher if (!p) 666f7917c00SJeff Kirsher return NULL; 667f7917c00SJeff Kirsher if (sw_size) { 668590b5b7dSKees Cook s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node); 669f7917c00SJeff Kirsher 670f7917c00SJeff Kirsher if (!s) { 671f7917c00SJeff Kirsher dma_free_coherent(dev, len, p, *phys); 672f7917c00SJeff Kirsher return NULL; 673f7917c00SJeff Kirsher } 674f7917c00SJeff Kirsher } 675f7917c00SJeff Kirsher if (metadata) 676f7917c00SJeff Kirsher *(void **)metadata = s; 677f7917c00SJeff Kirsher return p; 678f7917c00SJeff Kirsher } 679f7917c00SJeff Kirsher 680f7917c00SJeff Kirsher /** 681f7917c00SJeff Kirsher * sgl_len - calculates the size of an SGL of the given capacity 682f7917c00SJeff Kirsher * @n: the number of SGL entries 683f7917c00SJeff Kirsher * 684f7917c00SJeff Kirsher * Calculates the number of flits needed for a scatter/gather list that 685f7917c00SJeff Kirsher * can hold the given number of entries. 686f7917c00SJeff Kirsher */ 687f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n) 688f7917c00SJeff Kirsher { 6890aac3f56SHariprasad Shenai /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 6900aac3f56SHariprasad Shenai * addresses. The DSGL Work Request starts off with a 32-bit DSGL 6910aac3f56SHariprasad Shenai * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 6920aac3f56SHariprasad Shenai * repeated sequences of { Length[i], Length[i+1], Address[i], 6930aac3f56SHariprasad Shenai * Address[i+1] } (this ensures that all addresses are on 64-bit 6940aac3f56SHariprasad Shenai * boundaries). If N is even, then Length[N+1] should be set to 0 and 6950aac3f56SHariprasad Shenai * Address[N+1] is omitted. 6960aac3f56SHariprasad Shenai * 6970aac3f56SHariprasad Shenai * The following calculation incorporates all of the above. It's 6980aac3f56SHariprasad Shenai * somewhat hard to follow but, briefly: the "+2" accounts for the 6990aac3f56SHariprasad Shenai * first two flits which include the DSGL header, Length0 and 7000aac3f56SHariprasad Shenai * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 7010aac3f56SHariprasad Shenai * flits for every pair of the remaining N) +1 if (n-1) is odd; and 7020aac3f56SHariprasad Shenai * finally the "+((n-1)&1)" adds the one remaining flit needed if 7030aac3f56SHariprasad Shenai * (n-1) is odd ... 7040aac3f56SHariprasad Shenai */ 705f7917c00SJeff Kirsher n--; 706f7917c00SJeff Kirsher return (3 * n) / 2 + (n & 1) + 2; 707f7917c00SJeff Kirsher } 708f7917c00SJeff Kirsher 709f7917c00SJeff Kirsher /** 710f7917c00SJeff Kirsher * flits_to_desc - returns the num of Tx descriptors for the given flits 711f7917c00SJeff Kirsher * @n: the number of flits 712f7917c00SJeff Kirsher * 713f7917c00SJeff Kirsher * Returns the number of Tx descriptors needed for the supplied number 714f7917c00SJeff Kirsher * of flits. 715f7917c00SJeff Kirsher */ 716f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n) 717f7917c00SJeff Kirsher { 718f7917c00SJeff Kirsher BUG_ON(n > SGE_MAX_WR_LEN / 8); 719f7917c00SJeff Kirsher return DIV_ROUND_UP(n, 8); 720f7917c00SJeff Kirsher } 721f7917c00SJeff Kirsher 722f7917c00SJeff Kirsher /** 723f7917c00SJeff Kirsher * is_eth_imm - can an Ethernet packet be sent as immediate data? 724f7917c00SJeff Kirsher * @skb: the packet 72529bbf5d7SRahul Lakkireddy * @chip_ver: chip version 726f7917c00SJeff Kirsher * 727f7917c00SJeff Kirsher * Returns whether an Ethernet packet is small enough to fit as 7280034b298SKumar Sanghvi * immediate data. Return value corresponds to headroom required. 729f7917c00SJeff Kirsher */ 730d0a1299cSGanesh Goudar static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) 731f7917c00SJeff Kirsher { 732d0a1299cSGanesh Goudar int hdrlen = 0; 7330034b298SKumar Sanghvi 734d0a1299cSGanesh Goudar if (skb->encapsulation && skb_shinfo(skb)->gso_size && 735d0a1299cSGanesh Goudar chip_ver > CHELSIO_T5) { 736d0a1299cSGanesh Goudar hdrlen = sizeof(struct cpl_tx_tnl_lso); 737d0a1299cSGanesh Goudar hdrlen += sizeof(struct cpl_tx_pkt_core); 7381a2a14fbSRahul Lakkireddy } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 7391a2a14fbSRahul Lakkireddy return 0; 740d0a1299cSGanesh Goudar } else { 741d0a1299cSGanesh Goudar hdrlen = skb_shinfo(skb)->gso_size ? 742d0a1299cSGanesh Goudar sizeof(struct cpl_tx_pkt_lso_core) : 0; 7430034b298SKumar Sanghvi hdrlen += sizeof(struct cpl_tx_pkt); 744d0a1299cSGanesh Goudar } 7450034b298SKumar Sanghvi if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 7460034b298SKumar Sanghvi return hdrlen; 7470034b298SKumar Sanghvi return 0; 748f7917c00SJeff Kirsher } 749f7917c00SJeff Kirsher 750f7917c00SJeff Kirsher /** 751f7917c00SJeff Kirsher * calc_tx_flits - calculate the number of flits for a packet Tx WR 752f7917c00SJeff Kirsher * @skb: the packet 75329bbf5d7SRahul Lakkireddy * @chip_ver: chip version 754f7917c00SJeff Kirsher * 755f7917c00SJeff Kirsher * Returns the number of flits needed for a Tx WR for the given Ethernet 756f7917c00SJeff Kirsher * packet, including the needed WR and CPL headers. 757f7917c00SJeff Kirsher */ 758d0a1299cSGanesh Goudar static inline unsigned int calc_tx_flits(const struct sk_buff *skb, 759d0a1299cSGanesh Goudar unsigned int chip_ver) 760f7917c00SJeff Kirsher { 761f7917c00SJeff Kirsher unsigned int flits; 762d0a1299cSGanesh Goudar int hdrlen = is_eth_imm(skb, chip_ver); 763f7917c00SJeff Kirsher 7640aac3f56SHariprasad Shenai /* If the skb is small enough, we can pump it out as a work request 7650aac3f56SHariprasad Shenai * with only immediate data. In that case we just have to have the 7660aac3f56SHariprasad Shenai * TX Packet header plus the skb data in the Work Request. 7670aac3f56SHariprasad Shenai */ 7680aac3f56SHariprasad Shenai 7690034b298SKumar Sanghvi if (hdrlen) 7700034b298SKumar Sanghvi return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 771f7917c00SJeff Kirsher 7720aac3f56SHariprasad Shenai /* Otherwise, we're going to have to construct a Scatter gather list 7730aac3f56SHariprasad Shenai * of the skb body and fragments. We also include the flits necessary 7740aac3f56SHariprasad Shenai * for the TX Packet Work Request and CPL. We always have a firmware 7750aac3f56SHariprasad Shenai * Write Header (incorporated as part of the cpl_tx_pkt_lso and 7760aac3f56SHariprasad Shenai * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 7770aac3f56SHariprasad Shenai * message or, if we're doing a Large Send Offload, an LSO CPL message 7780aac3f56SHariprasad Shenai * with an embedded TX Packet Write CPL message. 7790aac3f56SHariprasad Shenai */ 780fd1754fbSHariprasad Shenai flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 781d0a1299cSGanesh Goudar if (skb_shinfo(skb)->gso_size) { 7821a2a14fbSRahul Lakkireddy if (skb->encapsulation && chip_ver > CHELSIO_T5) { 783d0a1299cSGanesh Goudar hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 784d0a1299cSGanesh Goudar sizeof(struct cpl_tx_tnl_lso); 7851a2a14fbSRahul Lakkireddy } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 7861a2a14fbSRahul Lakkireddy u32 pkt_hdrlen; 7871a2a14fbSRahul Lakkireddy 7881a2a14fbSRahul Lakkireddy pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, 7891a2a14fbSRahul Lakkireddy skb_headlen(skb)); 7901a2a14fbSRahul Lakkireddy hdrlen = sizeof(struct fw_eth_tx_eo_wr) + 7911a2a14fbSRahul Lakkireddy round_up(pkt_hdrlen, 16); 7921a2a14fbSRahul Lakkireddy } else { 793d0a1299cSGanesh Goudar hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 794d0a1299cSGanesh Goudar sizeof(struct cpl_tx_pkt_lso_core); 7951a2a14fbSRahul Lakkireddy } 796d0a1299cSGanesh Goudar 797d0a1299cSGanesh Goudar hdrlen += sizeof(struct cpl_tx_pkt_core); 798d0a1299cSGanesh Goudar flits += (hdrlen / sizeof(__be64)); 799d0a1299cSGanesh Goudar } else { 8000aac3f56SHariprasad Shenai flits += (sizeof(struct fw_eth_tx_pkt_wr) + 8010aac3f56SHariprasad Shenai sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 802d0a1299cSGanesh Goudar } 803f7917c00SJeff Kirsher return flits; 804f7917c00SJeff Kirsher } 805f7917c00SJeff Kirsher 806f7917c00SJeff Kirsher /** 807f7917c00SJeff Kirsher * calc_tx_descs - calculate the number of Tx descriptors for a packet 808f7917c00SJeff Kirsher * @skb: the packet 80929bbf5d7SRahul Lakkireddy * @chip_ver: chip version 810f7917c00SJeff Kirsher * 811f7917c00SJeff Kirsher * Returns the number of Tx descriptors needed for the given Ethernet 812f7917c00SJeff Kirsher * packet, including the needed WR and CPL headers. 813f7917c00SJeff Kirsher */ 814d0a1299cSGanesh Goudar static inline unsigned int calc_tx_descs(const struct sk_buff *skb, 815d0a1299cSGanesh Goudar unsigned int chip_ver) 816f7917c00SJeff Kirsher { 817d0a1299cSGanesh Goudar return flits_to_desc(calc_tx_flits(skb, chip_ver)); 818f7917c00SJeff Kirsher } 819f7917c00SJeff Kirsher 820f7917c00SJeff Kirsher /** 821a6ec572bSAtul Gupta * cxgb4_write_sgl - populate a scatter/gather list for a packet 822f7917c00SJeff Kirsher * @skb: the packet 823f7917c00SJeff Kirsher * @q: the Tx queue we are writing into 824f7917c00SJeff Kirsher * @sgl: starting location for writing the SGL 825f7917c00SJeff Kirsher * @end: points right after the end of the SGL 826f7917c00SJeff Kirsher * @start: start offset into skb main-body data to include in the SGL 827f7917c00SJeff Kirsher * @addr: the list of bus addresses for the SGL elements 828f7917c00SJeff Kirsher * 829f7917c00SJeff Kirsher * Generates a gather list for the buffers that make up a packet. 830f7917c00SJeff Kirsher * The caller must provide adequate space for the SGL that will be written. 831f7917c00SJeff Kirsher * The SGL includes all of the packet's page fragments and the data in its 832f7917c00SJeff Kirsher * main body except for the first @start bytes. @sgl must be 16-byte 833f7917c00SJeff Kirsher * aligned and within a Tx descriptor with available space. @end points 834f7917c00SJeff Kirsher * right after the end of the SGL but does not account for any potential 835f7917c00SJeff Kirsher * wrap around, i.e., @end > @sgl. 836f7917c00SJeff Kirsher */ 837a6ec572bSAtul Gupta void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 838f7917c00SJeff Kirsher struct ulptx_sgl *sgl, u64 *end, unsigned int start, 839f7917c00SJeff Kirsher const dma_addr_t *addr) 840f7917c00SJeff Kirsher { 841f7917c00SJeff Kirsher unsigned int i, len; 842f7917c00SJeff Kirsher struct ulptx_sge_pair *to; 843f7917c00SJeff Kirsher const struct skb_shared_info *si = skb_shinfo(skb); 844f7917c00SJeff Kirsher unsigned int nfrags = si->nr_frags; 845f7917c00SJeff Kirsher struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 846f7917c00SJeff Kirsher 847f7917c00SJeff Kirsher len = skb_headlen(skb) - start; 848f7917c00SJeff Kirsher if (likely(len)) { 849f7917c00SJeff Kirsher sgl->len0 = htonl(len); 850f7917c00SJeff Kirsher sgl->addr0 = cpu_to_be64(addr[0] + start); 851f7917c00SJeff Kirsher nfrags++; 852f7917c00SJeff Kirsher } else { 8539e903e08SEric Dumazet sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 854f7917c00SJeff Kirsher sgl->addr0 = cpu_to_be64(addr[1]); 855f7917c00SJeff Kirsher } 856f7917c00SJeff Kirsher 857bdc590b9SHariprasad Shenai sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 858bdc590b9SHariprasad Shenai ULPTX_NSGE_V(nfrags)); 859f7917c00SJeff Kirsher if (likely(--nfrags == 0)) 860f7917c00SJeff Kirsher return; 861f7917c00SJeff Kirsher /* 862f7917c00SJeff Kirsher * Most of the complexity below deals with the possibility we hit the 863f7917c00SJeff Kirsher * end of the queue in the middle of writing the SGL. For this case 864f7917c00SJeff Kirsher * only we create the SGL in a temporary buffer and then copy it. 865f7917c00SJeff Kirsher */ 866f7917c00SJeff Kirsher to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 867f7917c00SJeff Kirsher 868f7917c00SJeff Kirsher for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 8699e903e08SEric Dumazet to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 8709e903e08SEric Dumazet to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 871f7917c00SJeff Kirsher to->addr[0] = cpu_to_be64(addr[i]); 872f7917c00SJeff Kirsher to->addr[1] = cpu_to_be64(addr[++i]); 873f7917c00SJeff Kirsher } 874f7917c00SJeff Kirsher if (nfrags) { 8759e903e08SEric Dumazet to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 876f7917c00SJeff Kirsher to->len[1] = cpu_to_be32(0); 877f7917c00SJeff Kirsher to->addr[0] = cpu_to_be64(addr[i + 1]); 878f7917c00SJeff Kirsher } 879f7917c00SJeff Kirsher if (unlikely((u8 *)end > (u8 *)q->stat)) { 880f7917c00SJeff Kirsher unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 881f7917c00SJeff Kirsher 882f7917c00SJeff Kirsher if (likely(part0)) 883f7917c00SJeff Kirsher memcpy(sgl->sge, buf, part0); 884f7917c00SJeff Kirsher part1 = (u8 *)end - (u8 *)q->stat; 885f7917c00SJeff Kirsher memcpy(q->desc, (u8 *)buf + part0, part1); 886f7917c00SJeff Kirsher end = (void *)q->desc + part1; 887f7917c00SJeff Kirsher } 888f7917c00SJeff Kirsher if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 88964699336SJoe Perches *end = 0; 890f7917c00SJeff Kirsher } 891a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_write_sgl); 892f7917c00SJeff Kirsher 893687823d2SRohit Maheshwari /* cxgb4_write_partial_sgl - populate SGL for partial packet 894687823d2SRohit Maheshwari * @skb: the packet 895687823d2SRohit Maheshwari * @q: the Tx queue we are writing into 896687823d2SRohit Maheshwari * @sgl: starting location for writing the SGL 897687823d2SRohit Maheshwari * @end: points right after the end of the SGL 898687823d2SRohit Maheshwari * @addr: the list of bus addresses for the SGL elements 899687823d2SRohit Maheshwari * @start: start offset in the SKB where partial data starts 900687823d2SRohit Maheshwari * @len: length of data from @start to send out 901687823d2SRohit Maheshwari * 902687823d2SRohit Maheshwari * This API will handle sending out partial data of a skb if required. 903687823d2SRohit Maheshwari * Unlike cxgb4_write_sgl, @start can be any offset into the skb data, 904687823d2SRohit Maheshwari * and @len will decide how much data after @start offset to send out. 905687823d2SRohit Maheshwari */ 906687823d2SRohit Maheshwari void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q, 907687823d2SRohit Maheshwari struct ulptx_sgl *sgl, u64 *end, 908687823d2SRohit Maheshwari const dma_addr_t *addr, u32 start, u32 len) 909687823d2SRohit Maheshwari { 910687823d2SRohit Maheshwari struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to; 911687823d2SRohit Maheshwari u32 frag_size, skb_linear_data_len = skb_headlen(skb); 912687823d2SRohit Maheshwari struct skb_shared_info *si = skb_shinfo(skb); 913687823d2SRohit Maheshwari u8 i = 0, frag_idx = 0, nfrags = 0; 914687823d2SRohit Maheshwari skb_frag_t *frag; 915687823d2SRohit Maheshwari 916687823d2SRohit Maheshwari /* Fill the first SGL either from linear data or from partial 917687823d2SRohit Maheshwari * frag based on @start. 918687823d2SRohit Maheshwari */ 919687823d2SRohit Maheshwari if (unlikely(start < skb_linear_data_len)) { 920687823d2SRohit Maheshwari frag_size = min(len, skb_linear_data_len - start); 921687823d2SRohit Maheshwari sgl->len0 = htonl(frag_size); 922687823d2SRohit Maheshwari sgl->addr0 = cpu_to_be64(addr[0] + start); 923687823d2SRohit Maheshwari len -= frag_size; 924687823d2SRohit Maheshwari nfrags++; 925687823d2SRohit Maheshwari } else { 926687823d2SRohit Maheshwari start -= skb_linear_data_len; 927687823d2SRohit Maheshwari frag = &si->frags[frag_idx]; 928687823d2SRohit Maheshwari frag_size = skb_frag_size(frag); 929687823d2SRohit Maheshwari /* find the first frag */ 930687823d2SRohit Maheshwari while (start >= frag_size) { 931687823d2SRohit Maheshwari start -= frag_size; 932687823d2SRohit Maheshwari frag_idx++; 933687823d2SRohit Maheshwari frag = &si->frags[frag_idx]; 934687823d2SRohit Maheshwari frag_size = skb_frag_size(frag); 935687823d2SRohit Maheshwari } 936687823d2SRohit Maheshwari 937687823d2SRohit Maheshwari frag_size = min(len, skb_frag_size(frag) - start); 938687823d2SRohit Maheshwari sgl->len0 = cpu_to_be32(frag_size); 939687823d2SRohit Maheshwari sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); 940687823d2SRohit Maheshwari len -= frag_size; 941687823d2SRohit Maheshwari nfrags++; 942687823d2SRohit Maheshwari frag_idx++; 943687823d2SRohit Maheshwari } 944687823d2SRohit Maheshwari 945687823d2SRohit Maheshwari /* If the entire partial data fit in one SGL, then send it out 946687823d2SRohit Maheshwari * now. 947687823d2SRohit Maheshwari */ 948687823d2SRohit Maheshwari if (!len) 949687823d2SRohit Maheshwari goto done; 950687823d2SRohit Maheshwari 951687823d2SRohit Maheshwari /* Most of the complexity below deals with the possibility we hit the 952687823d2SRohit Maheshwari * end of the queue in the middle of writing the SGL. For this case 953687823d2SRohit Maheshwari * only we create the SGL in a temporary buffer and then copy it. 954687823d2SRohit Maheshwari */ 955687823d2SRohit Maheshwari to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 956687823d2SRohit Maheshwari 957687823d2SRohit Maheshwari /* If the skb couldn't fit in first SGL completely, fill the 958687823d2SRohit Maheshwari * rest of the frags in subsequent SGLs. Note that each SGL 959687823d2SRohit Maheshwari * pair can store 2 frags. 960687823d2SRohit Maheshwari */ 961687823d2SRohit Maheshwari while (len) { 962687823d2SRohit Maheshwari frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); 963687823d2SRohit Maheshwari to->len[i & 1] = cpu_to_be32(frag_size); 964687823d2SRohit Maheshwari to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); 965687823d2SRohit Maheshwari if (i && (i & 1)) 966687823d2SRohit Maheshwari to++; 967687823d2SRohit Maheshwari nfrags++; 968687823d2SRohit Maheshwari frag_idx++; 969687823d2SRohit Maheshwari i++; 970687823d2SRohit Maheshwari len -= frag_size; 971687823d2SRohit Maheshwari } 972687823d2SRohit Maheshwari 973687823d2SRohit Maheshwari /* If we ended in an odd boundary, then set the second SGL's 974687823d2SRohit Maheshwari * length in the pair to 0. 975687823d2SRohit Maheshwari */ 976687823d2SRohit Maheshwari if (i & 1) 977687823d2SRohit Maheshwari to->len[1] = cpu_to_be32(0); 978687823d2SRohit Maheshwari 979687823d2SRohit Maheshwari /* Copy from temporary buffer to Tx ring, in case we hit the 980687823d2SRohit Maheshwari * end of the queue in the middle of writing the SGL. 981687823d2SRohit Maheshwari */ 982687823d2SRohit Maheshwari if (unlikely((u8 *)end > (u8 *)q->stat)) { 983687823d2SRohit Maheshwari u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 984687823d2SRohit Maheshwari 985687823d2SRohit Maheshwari if (likely(part0)) 986687823d2SRohit Maheshwari memcpy(sgl->sge, buf, part0); 987687823d2SRohit Maheshwari part1 = (u8 *)end - (u8 *)q->stat; 988687823d2SRohit Maheshwari memcpy(q->desc, (u8 *)buf + part0, part1); 989687823d2SRohit Maheshwari end = (void *)q->desc + part1; 990687823d2SRohit Maheshwari } 991687823d2SRohit Maheshwari 992687823d2SRohit Maheshwari /* 0-pad to multiple of 16 */ 993687823d2SRohit Maheshwari if ((uintptr_t)end & 8) 994687823d2SRohit Maheshwari *end = 0; 995687823d2SRohit Maheshwari done: 996687823d2SRohit Maheshwari sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 997687823d2SRohit Maheshwari ULPTX_NSGE_V(nfrags)); 998687823d2SRohit Maheshwari } 999687823d2SRohit Maheshwari EXPORT_SYMBOL(cxgb4_write_partial_sgl); 1000687823d2SRohit Maheshwari 1001df64e4d3SHariprasad Shenai /* This function copies 64 byte coalesced work request to 1002df64e4d3SHariprasad Shenai * memory mapped BAR2 space. For coalesced WR SGE fetches 1003df64e4d3SHariprasad Shenai * data from the FIFO instead of from Host. 100422adfe0aSSantosh Rastapur */ 1005df64e4d3SHariprasad Shenai static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 100622adfe0aSSantosh Rastapur { 1007df64e4d3SHariprasad Shenai int count = 8; 100822adfe0aSSantosh Rastapur 100922adfe0aSSantosh Rastapur while (count) { 101022adfe0aSSantosh Rastapur writeq(*src, dst); 101122adfe0aSSantosh Rastapur src++; 101222adfe0aSSantosh Rastapur dst++; 101322adfe0aSSantosh Rastapur count--; 101422adfe0aSSantosh Rastapur } 101522adfe0aSSantosh Rastapur } 101622adfe0aSSantosh Rastapur 1017f7917c00SJeff Kirsher /** 1018a6ec572bSAtul Gupta * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 1019f7917c00SJeff Kirsher * @adap: the adapter 1020f7917c00SJeff Kirsher * @q: the Tx queue 1021f7917c00SJeff Kirsher * @n: number of new descriptors to give to HW 1022f7917c00SJeff Kirsher * 1023f7917c00SJeff Kirsher * Ring the doorbel for a Tx queue. 1024f7917c00SJeff Kirsher */ 1025a6ec572bSAtul Gupta inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 1026f7917c00SJeff Kirsher { 10271ecc7b7aSHariprasad Shenai /* Make sure that all writes to the TX Descriptors are committed 10281ecc7b7aSHariprasad Shenai * before we tell the hardware about them. 10291ecc7b7aSHariprasad Shenai */ 10301ecc7b7aSHariprasad Shenai wmb(); 1031d63a6dcfSHariprasad Shenai 1032df64e4d3SHariprasad Shenai /* If we don't have access to the new User Doorbell (T5+), use the old 1033df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 1034df64e4d3SHariprasad Shenai */ 1035df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 1036f612b815SHariprasad Shenai u32 val = PIDX_V(n); 103705eb2389SSteve Wise unsigned long flags; 103822adfe0aSSantosh Rastapur 1039d63a6dcfSHariprasad Shenai /* For T4 we need to participate in the Doorbell Recovery 1040d63a6dcfSHariprasad Shenai * mechanism. 1041d63a6dcfSHariprasad Shenai */ 104205eb2389SSteve Wise spin_lock_irqsave(&q->db_lock, flags); 1043d63a6dcfSHariprasad Shenai if (!q->db_disabled) 1044f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 1045f612b815SHariprasad Shenai QID_V(q->cntxt_id) | val); 1046d63a6dcfSHariprasad Shenai else 104705eb2389SSteve Wise q->db_pidx_inc += n; 10483069ee9bSVipul Pandya q->db_pidx = q->pidx; 104905eb2389SSteve Wise spin_unlock_irqrestore(&q->db_lock, flags); 1050d63a6dcfSHariprasad Shenai } else { 1051f612b815SHariprasad Shenai u32 val = PIDX_T5_V(n); 1052d63a6dcfSHariprasad Shenai 1053d63a6dcfSHariprasad Shenai /* T4 and later chips share the same PIDX field offset within 1054d63a6dcfSHariprasad Shenai * the doorbell, but T5 and later shrank the field in order to 1055d63a6dcfSHariprasad Shenai * gain a bit for Doorbell Priority. The field was absurdly 1056d63a6dcfSHariprasad Shenai * large in the first place (14 bits) so we just use the T5 1057d63a6dcfSHariprasad Shenai * and later limits and warn if a Queue ID is too large. 1058d63a6dcfSHariprasad Shenai */ 1059f612b815SHariprasad Shenai WARN_ON(val & DBPRIO_F); 1060d63a6dcfSHariprasad Shenai 1061df64e4d3SHariprasad Shenai /* If we're only writing a single TX Descriptor and we can use 1062df64e4d3SHariprasad Shenai * Inferred QID registers, we can use the Write Combining 1063df64e4d3SHariprasad Shenai * Gather Buffer; otherwise we use the simple doorbell. 1064d63a6dcfSHariprasad Shenai */ 1065df64e4d3SHariprasad Shenai if (n == 1 && q->bar2_qid == 0) { 1066d63a6dcfSHariprasad Shenai int index = (q->pidx 1067d63a6dcfSHariprasad Shenai ? (q->pidx - 1) 1068d63a6dcfSHariprasad Shenai : (q->size - 1)); 1069df64e4d3SHariprasad Shenai u64 *wr = (u64 *)&q->desc[index]; 1070d63a6dcfSHariprasad Shenai 1071df64e4d3SHariprasad Shenai cxgb_pio_copy((u64 __iomem *) 1072df64e4d3SHariprasad Shenai (q->bar2_addr + SGE_UDB_WCDOORBELL), 1073df64e4d3SHariprasad Shenai wr); 1074d63a6dcfSHariprasad Shenai } else { 1075f612b815SHariprasad Shenai writel(val | QID_V(q->bar2_qid), 1076df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_KDOORBELL); 1077d63a6dcfSHariprasad Shenai } 1078d63a6dcfSHariprasad Shenai 1079d63a6dcfSHariprasad Shenai /* This Write Memory Barrier will force the write to the User 1080d63a6dcfSHariprasad Shenai * Doorbell area to be flushed. This is needed to prevent 1081d63a6dcfSHariprasad Shenai * writes on different CPUs for the same queue from hitting 1082d63a6dcfSHariprasad Shenai * the adapter out of order. This is required when some Work 1083d63a6dcfSHariprasad Shenai * Requests take the Write Combine Gather Buffer path (user 1084d63a6dcfSHariprasad Shenai * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 1085d63a6dcfSHariprasad Shenai * take the traditional path where we simply increment the 1086d63a6dcfSHariprasad Shenai * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 1087d63a6dcfSHariprasad Shenai * hardware DMA read the actual Work Request. 1088d63a6dcfSHariprasad Shenai */ 1089d63a6dcfSHariprasad Shenai wmb(); 1090d63a6dcfSHariprasad Shenai } 1091f7917c00SJeff Kirsher } 1092a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_ring_tx_db); 1093f7917c00SJeff Kirsher 1094f7917c00SJeff Kirsher /** 1095a6ec572bSAtul Gupta * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 1096f7917c00SJeff Kirsher * @skb: the packet 1097f7917c00SJeff Kirsher * @q: the Tx queue where the packet will be inlined 1098f7917c00SJeff Kirsher * @pos: starting position in the Tx queue where to inline the packet 1099f7917c00SJeff Kirsher * 1100f7917c00SJeff Kirsher * Inline a packet's contents directly into Tx descriptors, starting at 1101f7917c00SJeff Kirsher * the given position within the Tx DMA ring. 1102f7917c00SJeff Kirsher * Most of the complexity of this operation is dealing with wrap arounds 1103f7917c00SJeff Kirsher * in the middle of the packet we want to inline. 1104f7917c00SJeff Kirsher */ 1105a6ec572bSAtul Gupta void cxgb4_inline_tx_skb(const struct sk_buff *skb, 1106a6ec572bSAtul Gupta const struct sge_txq *q, void *pos) 1107f7917c00SJeff Kirsher { 1108f7917c00SJeff Kirsher int left = (void *)q->stat - pos; 1109e383f248SAtul Gupta u64 *p; 1110f7917c00SJeff Kirsher 1111f7917c00SJeff Kirsher if (likely(skb->len <= left)) { 1112f7917c00SJeff Kirsher if (likely(!skb->data_len)) 1113f7917c00SJeff Kirsher skb_copy_from_linear_data(skb, pos, skb->len); 1114f7917c00SJeff Kirsher else 1115f7917c00SJeff Kirsher skb_copy_bits(skb, 0, pos, skb->len); 1116f7917c00SJeff Kirsher pos += skb->len; 1117f7917c00SJeff Kirsher } else { 1118f7917c00SJeff Kirsher skb_copy_bits(skb, 0, pos, left); 1119f7917c00SJeff Kirsher skb_copy_bits(skb, left, q->desc, skb->len - left); 1120f7917c00SJeff Kirsher pos = (void *)q->desc + (skb->len - left); 1121f7917c00SJeff Kirsher } 1122f7917c00SJeff Kirsher 1123f7917c00SJeff Kirsher /* 0-pad to multiple of 16 */ 1124f7917c00SJeff Kirsher p = PTR_ALIGN(pos, 8); 1125f7917c00SJeff Kirsher if ((uintptr_t)p & 8) 1126f7917c00SJeff Kirsher *p = 0; 1127f7917c00SJeff Kirsher } 1128a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1129f7917c00SJeff Kirsher 11308d0557d2SHariprasad Shenai static void *inline_tx_skb_header(const struct sk_buff *skb, 11318d0557d2SHariprasad Shenai const struct sge_txq *q, void *pos, 11328d0557d2SHariprasad Shenai int length) 11338d0557d2SHariprasad Shenai { 11348d0557d2SHariprasad Shenai u64 *p; 11358d0557d2SHariprasad Shenai int left = (void *)q->stat - pos; 11368d0557d2SHariprasad Shenai 11378d0557d2SHariprasad Shenai if (likely(length <= left)) { 11388d0557d2SHariprasad Shenai memcpy(pos, skb->data, length); 11398d0557d2SHariprasad Shenai pos += length; 11408d0557d2SHariprasad Shenai } else { 11418d0557d2SHariprasad Shenai memcpy(pos, skb->data, left); 11428d0557d2SHariprasad Shenai memcpy(q->desc, skb->data + left, length - left); 11438d0557d2SHariprasad Shenai pos = (void *)q->desc + (length - left); 11448d0557d2SHariprasad Shenai } 11458d0557d2SHariprasad Shenai /* 0-pad to multiple of 16 */ 11468d0557d2SHariprasad Shenai p = PTR_ALIGN(pos, 8); 11478d0557d2SHariprasad Shenai if ((uintptr_t)p & 8) { 11488d0557d2SHariprasad Shenai *p = 0; 11498d0557d2SHariprasad Shenai return p + 1; 11508d0557d2SHariprasad Shenai } 11518d0557d2SHariprasad Shenai return p; 11528d0557d2SHariprasad Shenai } 11538d0557d2SHariprasad Shenai 1154f7917c00SJeff Kirsher /* 1155f7917c00SJeff Kirsher * Figure out what HW csum a packet wants and return the appropriate control 1156f7917c00SJeff Kirsher * bits. 1157f7917c00SJeff Kirsher */ 11583ccc6cf7SHariprasad Shenai static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) 1159f7917c00SJeff Kirsher { 1160f7917c00SJeff Kirsher int csum_type; 1161c50ae55eSGanesh Goudar bool inner_hdr_csum = false; 1162c50ae55eSGanesh Goudar u16 proto, ver; 1163f7917c00SJeff Kirsher 1164c50ae55eSGanesh Goudar if (skb->encapsulation && 1165c50ae55eSGanesh Goudar (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) 1166c50ae55eSGanesh Goudar inner_hdr_csum = true; 1167c50ae55eSGanesh Goudar 1168c50ae55eSGanesh Goudar if (inner_hdr_csum) { 1169c50ae55eSGanesh Goudar ver = inner_ip_hdr(skb)->version; 1170c50ae55eSGanesh Goudar proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : 1171c50ae55eSGanesh Goudar inner_ipv6_hdr(skb)->nexthdr; 1172c50ae55eSGanesh Goudar } else { 1173c50ae55eSGanesh Goudar ver = ip_hdr(skb)->version; 1174c50ae55eSGanesh Goudar proto = (ver == 4) ? ip_hdr(skb)->protocol : 1175c50ae55eSGanesh Goudar ipv6_hdr(skb)->nexthdr; 1176c50ae55eSGanesh Goudar } 1177c50ae55eSGanesh Goudar 1178c50ae55eSGanesh Goudar if (ver == 4) { 1179c50ae55eSGanesh Goudar if (proto == IPPROTO_TCP) 1180f7917c00SJeff Kirsher csum_type = TX_CSUM_TCPIP; 1181c50ae55eSGanesh Goudar else if (proto == IPPROTO_UDP) 1182f7917c00SJeff Kirsher csum_type = TX_CSUM_UDPIP; 1183f7917c00SJeff Kirsher else { 1184f7917c00SJeff Kirsher nocsum: /* 1185f7917c00SJeff Kirsher * unknown protocol, disable HW csum 1186f7917c00SJeff Kirsher * and hope a bad packet is detected 1187f7917c00SJeff Kirsher */ 11881ecc7b7aSHariprasad Shenai return TXPKT_L4CSUM_DIS_F; 1189f7917c00SJeff Kirsher } 1190f7917c00SJeff Kirsher } else { 1191f7917c00SJeff Kirsher /* 1192f7917c00SJeff Kirsher * this doesn't work with extension headers 1193f7917c00SJeff Kirsher */ 1194c50ae55eSGanesh Goudar if (proto == IPPROTO_TCP) 1195f7917c00SJeff Kirsher csum_type = TX_CSUM_TCPIP6; 1196c50ae55eSGanesh Goudar else if (proto == IPPROTO_UDP) 1197f7917c00SJeff Kirsher csum_type = TX_CSUM_UDPIP6; 1198f7917c00SJeff Kirsher else 1199f7917c00SJeff Kirsher goto nocsum; 1200f7917c00SJeff Kirsher } 1201f7917c00SJeff Kirsher 12023ccc6cf7SHariprasad Shenai if (likely(csum_type >= TX_CSUM_TCPIP)) { 1203c50ae55eSGanesh Goudar int eth_hdr_len, l4_len; 1204c50ae55eSGanesh Goudar u64 hdr_len; 1205c50ae55eSGanesh Goudar 1206c50ae55eSGanesh Goudar if (inner_hdr_csum) { 1207c50ae55eSGanesh Goudar /* This allows checksum offload for all encapsulated 1208c50ae55eSGanesh Goudar * packets like GRE etc.. 1209c50ae55eSGanesh Goudar */ 1210c50ae55eSGanesh Goudar l4_len = skb_inner_network_header_len(skb); 1211c50ae55eSGanesh Goudar eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; 1212c50ae55eSGanesh Goudar } else { 1213c50ae55eSGanesh Goudar l4_len = skb_network_header_len(skb); 1214c50ae55eSGanesh Goudar eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; 1215c50ae55eSGanesh Goudar } 1216c50ae55eSGanesh Goudar hdr_len = TXPKT_IPHDR_LEN_V(l4_len); 12173ccc6cf7SHariprasad Shenai 12183ccc6cf7SHariprasad Shenai if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 12193ccc6cf7SHariprasad Shenai hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); 12203ccc6cf7SHariprasad Shenai else 12213ccc6cf7SHariprasad Shenai hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); 12223ccc6cf7SHariprasad Shenai return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; 12233ccc6cf7SHariprasad Shenai } else { 1224f7917c00SJeff Kirsher int start = skb_transport_offset(skb); 1225f7917c00SJeff Kirsher 12261ecc7b7aSHariprasad Shenai return TXPKT_CSUM_TYPE_V(csum_type) | 12271ecc7b7aSHariprasad Shenai TXPKT_CSUM_START_V(start) | 12281ecc7b7aSHariprasad Shenai TXPKT_CSUM_LOC_V(start + skb->csum_offset); 1229f7917c00SJeff Kirsher } 1230f7917c00SJeff Kirsher } 1231f7917c00SJeff Kirsher 1232f7917c00SJeff Kirsher static void eth_txq_stop(struct sge_eth_txq *q) 1233f7917c00SJeff Kirsher { 1234f7917c00SJeff Kirsher netif_tx_stop_queue(q->txq); 1235f7917c00SJeff Kirsher q->q.stops++; 1236f7917c00SJeff Kirsher } 1237f7917c00SJeff Kirsher 1238f7917c00SJeff Kirsher static inline void txq_advance(struct sge_txq *q, unsigned int n) 1239f7917c00SJeff Kirsher { 1240f7917c00SJeff Kirsher q->in_use += n; 1241f7917c00SJeff Kirsher q->pidx += n; 1242f7917c00SJeff Kirsher if (q->pidx >= q->size) 1243f7917c00SJeff Kirsher q->pidx -= q->size; 1244f7917c00SJeff Kirsher } 1245f7917c00SJeff Kirsher 124684a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 124784a200b3SVarun Prakash static inline int 124884a200b3SVarun Prakash cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, 124984a200b3SVarun Prakash const struct port_info *pi, u64 *cntrl) 125084a200b3SVarun Prakash { 125184a200b3SVarun Prakash const struct cxgb_fcoe *fcoe = &pi->fcoe; 125284a200b3SVarun Prakash 125384a200b3SVarun Prakash if (!(fcoe->flags & CXGB_FCOE_ENABLED)) 125484a200b3SVarun Prakash return 0; 125584a200b3SVarun Prakash 125684a200b3SVarun Prakash if (skb->protocol != htons(ETH_P_FCOE)) 125784a200b3SVarun Prakash return 0; 125884a200b3SVarun Prakash 125984a200b3SVarun Prakash skb_reset_mac_header(skb); 126084a200b3SVarun Prakash skb->mac_len = sizeof(struct ethhdr); 126184a200b3SVarun Prakash 126284a200b3SVarun Prakash skb_set_network_header(skb, skb->mac_len); 126384a200b3SVarun Prakash skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); 126484a200b3SVarun Prakash 126584a200b3SVarun Prakash if (!cxgb_fcoe_sof_eof_supported(adap, skb)) 126684a200b3SVarun Prakash return -ENOTSUPP; 126784a200b3SVarun Prakash 126884a200b3SVarun Prakash /* FC CRC offload */ 12691ecc7b7aSHariprasad Shenai *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | 12701ecc7b7aSHariprasad Shenai TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | 12711ecc7b7aSHariprasad Shenai TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | 12721ecc7b7aSHariprasad Shenai TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | 12731ecc7b7aSHariprasad Shenai TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); 127484a200b3SVarun Prakash return 0; 127584a200b3SVarun Prakash } 127684a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 127784a200b3SVarun Prakash 1278d0a1299cSGanesh Goudar /* Returns tunnel type if hardware supports offloading of the same. 1279d0a1299cSGanesh Goudar * It is called only for T5 and onwards. 1280d0a1299cSGanesh Goudar */ 1281d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) 1282d0a1299cSGanesh Goudar { 1283d0a1299cSGanesh Goudar u8 l4_hdr = 0; 1284d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1285d0a1299cSGanesh Goudar struct port_info *pi = netdev_priv(skb->dev); 1286d0a1299cSGanesh Goudar struct adapter *adapter = pi->adapter; 1287d0a1299cSGanesh Goudar 1288d0a1299cSGanesh Goudar if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1289d0a1299cSGanesh Goudar skb->inner_protocol != htons(ETH_P_TEB)) 1290d0a1299cSGanesh Goudar return tnl_type; 1291d0a1299cSGanesh Goudar 1292d0a1299cSGanesh Goudar switch (vlan_get_protocol(skb)) { 1293d0a1299cSGanesh Goudar case htons(ETH_P_IP): 1294d0a1299cSGanesh Goudar l4_hdr = ip_hdr(skb)->protocol; 1295d0a1299cSGanesh Goudar break; 1296d0a1299cSGanesh Goudar case htons(ETH_P_IPV6): 1297d0a1299cSGanesh Goudar l4_hdr = ipv6_hdr(skb)->nexthdr; 1298d0a1299cSGanesh Goudar break; 1299d0a1299cSGanesh Goudar default: 1300d0a1299cSGanesh Goudar return tnl_type; 1301d0a1299cSGanesh Goudar } 1302d0a1299cSGanesh Goudar 1303d0a1299cSGanesh Goudar switch (l4_hdr) { 1304d0a1299cSGanesh Goudar case IPPROTO_UDP: 1305d0a1299cSGanesh Goudar if (adapter->vxlan_port == udp_hdr(skb)->dest) 1306d0a1299cSGanesh Goudar tnl_type = TX_TNL_TYPE_VXLAN; 1307c746fc0eSGanesh Goudar else if (adapter->geneve_port == udp_hdr(skb)->dest) 1308c746fc0eSGanesh Goudar tnl_type = TX_TNL_TYPE_GENEVE; 1309d0a1299cSGanesh Goudar break; 1310d0a1299cSGanesh Goudar default: 1311d0a1299cSGanesh Goudar return tnl_type; 1312d0a1299cSGanesh Goudar } 1313d0a1299cSGanesh Goudar 1314d0a1299cSGanesh Goudar return tnl_type; 1315d0a1299cSGanesh Goudar } 1316d0a1299cSGanesh Goudar 1317d0a1299cSGanesh Goudar static inline void t6_fill_tnl_lso(struct sk_buff *skb, 1318d0a1299cSGanesh Goudar struct cpl_tx_tnl_lso *tnl_lso, 1319d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type tnl_type) 1320d0a1299cSGanesh Goudar { 1321d0a1299cSGanesh Goudar u32 val; 1322d0a1299cSGanesh Goudar int in_eth_xtra_len; 1323d0a1299cSGanesh Goudar int l3hdr_len = skb_network_header_len(skb); 1324d0a1299cSGanesh Goudar int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1325d0a1299cSGanesh Goudar const struct skb_shared_info *ssi = skb_shinfo(skb); 1326d0a1299cSGanesh Goudar bool v6 = (ip_hdr(skb)->version == 6); 1327d0a1299cSGanesh Goudar 1328d0a1299cSGanesh Goudar val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | 1329d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_FIRST_F | 1330d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_LAST_F | 1331d0a1299cSGanesh Goudar (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | 1332d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | 1333d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | 1334d0a1299cSGanesh Goudar (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | 1335d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPLENSETOUT_F | 1336d0a1299cSGanesh Goudar (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); 1337d0a1299cSGanesh Goudar tnl_lso->op_to_IpIdSplitOut = htonl(val); 1338d0a1299cSGanesh Goudar 1339d0a1299cSGanesh Goudar tnl_lso->IpIdOffsetOut = 0; 1340d0a1299cSGanesh Goudar 1341d0a1299cSGanesh Goudar /* Get the tunnel header length */ 1342d0a1299cSGanesh Goudar val = skb_inner_mac_header(skb) - skb_mac_header(skb); 1343d0a1299cSGanesh Goudar in_eth_xtra_len = skb_inner_network_header(skb) - 1344d0a1299cSGanesh Goudar skb_inner_mac_header(skb) - ETH_HLEN; 1345d0a1299cSGanesh Goudar 1346d0a1299cSGanesh Goudar switch (tnl_type) { 1347d0a1299cSGanesh Goudar case TX_TNL_TYPE_VXLAN: 1348c746fc0eSGanesh Goudar case TX_TNL_TYPE_GENEVE: 1349d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen = 1350d0a1299cSGanesh Goudar htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | 1351d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_UDPLENSETOUT_F); 1352d0a1299cSGanesh Goudar break; 1353d0a1299cSGanesh Goudar default: 1354d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; 1355d0a1299cSGanesh Goudar break; 1356d0a1299cSGanesh Goudar } 1357d0a1299cSGanesh Goudar 1358d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen |= 1359d0a1299cSGanesh Goudar htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | 1360d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); 1361d0a1299cSGanesh Goudar 1362d0a1299cSGanesh Goudar tnl_lso->r1 = 0; 1363d0a1299cSGanesh Goudar 1364d0a1299cSGanesh Goudar val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | 1365d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | 1366d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | 1367d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); 1368d0a1299cSGanesh Goudar tnl_lso->Flow_to_TcpHdrLen = htonl(val); 1369d0a1299cSGanesh Goudar 1370d0a1299cSGanesh Goudar tnl_lso->IpIdOffset = htons(0); 1371d0a1299cSGanesh Goudar 1372d0a1299cSGanesh Goudar tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); 1373d0a1299cSGanesh Goudar tnl_lso->TCPSeqOffset = htonl(0); 1374d0a1299cSGanesh Goudar tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); 1375d0a1299cSGanesh Goudar } 1376d0a1299cSGanesh Goudar 13774846d533SRahul Lakkireddy static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, 13784846d533SRahul Lakkireddy struct cpl_tx_pkt_lso_core *lso) 13794846d533SRahul Lakkireddy { 13804846d533SRahul Lakkireddy int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 13814846d533SRahul Lakkireddy int l3hdr_len = skb_network_header_len(skb); 13824846d533SRahul Lakkireddy const struct skb_shared_info *ssi; 13834846d533SRahul Lakkireddy bool ipv6 = false; 13844846d533SRahul Lakkireddy 13854846d533SRahul Lakkireddy ssi = skb_shinfo(skb); 13864846d533SRahul Lakkireddy if (ssi->gso_type & SKB_GSO_TCPV6) 13874846d533SRahul Lakkireddy ipv6 = true; 13884846d533SRahul Lakkireddy 13894846d533SRahul Lakkireddy lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 13904846d533SRahul Lakkireddy LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | 13914846d533SRahul Lakkireddy LSO_IPV6_V(ipv6) | 13924846d533SRahul Lakkireddy LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 13934846d533SRahul Lakkireddy LSO_IPHDR_LEN_V(l3hdr_len / 4) | 13944846d533SRahul Lakkireddy LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 13954846d533SRahul Lakkireddy lso->ipid_ofst = htons(0); 13964846d533SRahul Lakkireddy lso->mss = htons(ssi->gso_size); 13974846d533SRahul Lakkireddy lso->seqno_offset = htonl(0); 13984846d533SRahul Lakkireddy if (is_t4(adap->params.chip)) 13994846d533SRahul Lakkireddy lso->len = htonl(skb->len); 14004846d533SRahul Lakkireddy else 14014846d533SRahul Lakkireddy lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); 14024846d533SRahul Lakkireddy 14034846d533SRahul Lakkireddy return (void *)(lso + 1); 14044846d533SRahul Lakkireddy } 14054846d533SRahul Lakkireddy 1406f7917c00SJeff Kirsher /** 1407d429005fSVishal Kulkarni * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update 1408d429005fSVishal Kulkarni * @adap: the adapter 1409d429005fSVishal Kulkarni * @eq: the Ethernet TX Queue 1410d429005fSVishal Kulkarni * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 1411d429005fSVishal Kulkarni * 1412d429005fSVishal Kulkarni * We're typically called here to update the state of an Ethernet TX 1413d429005fSVishal Kulkarni * Queue with respect to the hardware's progress in consuming the TX 1414d429005fSVishal Kulkarni * Work Requests that we've put on that Egress Queue. This happens 1415d429005fSVishal Kulkarni * when we get Egress Queue Update messages and also prophylactically 1416d429005fSVishal Kulkarni * in regular timer-based Ethernet TX Queue maintenance. 1417d429005fSVishal Kulkarni */ 1418d429005fSVishal Kulkarni int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, 1419d429005fSVishal Kulkarni int maxreclaim) 1420d429005fSVishal Kulkarni { 1421f1f20a86SRahul Lakkireddy unsigned int reclaimed, hw_cidx; 1422d429005fSVishal Kulkarni struct sge_txq *q = &eq->q; 1423f1f20a86SRahul Lakkireddy int hw_in_use; 1424d429005fSVishal Kulkarni 1425d429005fSVishal Kulkarni if (!q->in_use || !__netif_tx_trylock(eq->txq)) 1426d429005fSVishal Kulkarni return 0; 1427d429005fSVishal Kulkarni 1428d429005fSVishal Kulkarni /* Reclaim pending completed TX Descriptors. */ 1429d429005fSVishal Kulkarni reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); 1430d429005fSVishal Kulkarni 1431f1f20a86SRahul Lakkireddy hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 1432f1f20a86SRahul Lakkireddy hw_in_use = q->pidx - hw_cidx; 1433f1f20a86SRahul Lakkireddy if (hw_in_use < 0) 1434f1f20a86SRahul Lakkireddy hw_in_use += q->size; 1435f1f20a86SRahul Lakkireddy 1436d429005fSVishal Kulkarni /* If the TX Queue is currently stopped and there's now more than half 1437d429005fSVishal Kulkarni * the queue available, restart it. Otherwise bail out since the rest 1438d429005fSVishal Kulkarni * of what we want do here is with the possibility of shipping any 1439d429005fSVishal Kulkarni * currently buffered Coalesced TX Work Request. 1440d429005fSVishal Kulkarni */ 1441f1f20a86SRahul Lakkireddy if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { 1442d429005fSVishal Kulkarni netif_tx_wake_queue(eq->txq); 1443d429005fSVishal Kulkarni eq->q.restarts++; 1444d429005fSVishal Kulkarni } 1445d429005fSVishal Kulkarni 1446d429005fSVishal Kulkarni __netif_tx_unlock(eq->txq); 1447d429005fSVishal Kulkarni return reclaimed; 1448d429005fSVishal Kulkarni } 1449d429005fSVishal Kulkarni 1450b1396c2bSRahul Lakkireddy static inline int cxgb4_validate_skb(struct sk_buff *skb, 1451b1396c2bSRahul Lakkireddy struct net_device *dev, 1452b1396c2bSRahul Lakkireddy u32 min_pkt_len) 1453b1396c2bSRahul Lakkireddy { 1454b1396c2bSRahul Lakkireddy u32 max_pkt_len; 1455b1396c2bSRahul Lakkireddy 1456b1396c2bSRahul Lakkireddy /* The chip min packet length is 10 octets but some firmware 1457b1396c2bSRahul Lakkireddy * commands have a minimum packet length requirement. So, play 1458b1396c2bSRahul Lakkireddy * safe and reject anything shorter than @min_pkt_len. 1459b1396c2bSRahul Lakkireddy */ 1460b1396c2bSRahul Lakkireddy if (unlikely(skb->len < min_pkt_len)) 1461b1396c2bSRahul Lakkireddy return -EINVAL; 1462b1396c2bSRahul Lakkireddy 1463b1396c2bSRahul Lakkireddy /* Discard the packet if the length is greater than mtu */ 1464b1396c2bSRahul Lakkireddy max_pkt_len = ETH_HLEN + dev->mtu; 1465b1396c2bSRahul Lakkireddy 1466b1396c2bSRahul Lakkireddy if (skb_vlan_tagged(skb)) 1467b1396c2bSRahul Lakkireddy max_pkt_len += VLAN_HLEN; 1468b1396c2bSRahul Lakkireddy 1469b1396c2bSRahul Lakkireddy if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) 1470b1396c2bSRahul Lakkireddy return -EINVAL; 1471b1396c2bSRahul Lakkireddy 1472b1396c2bSRahul Lakkireddy return 0; 1473b1396c2bSRahul Lakkireddy } 1474b1396c2bSRahul Lakkireddy 14751a2a14fbSRahul Lakkireddy static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 14761a2a14fbSRahul Lakkireddy u32 hdr_len) 14771a2a14fbSRahul Lakkireddy { 14781a2a14fbSRahul Lakkireddy wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 14791a2a14fbSRahul Lakkireddy wr->u.udpseg.ethlen = skb_network_offset(skb); 14801a2a14fbSRahul Lakkireddy wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 14811a2a14fbSRahul Lakkireddy wr->u.udpseg.udplen = sizeof(struct udphdr); 14821a2a14fbSRahul Lakkireddy wr->u.udpseg.rtplen = 0; 14831a2a14fbSRahul Lakkireddy wr->u.udpseg.r4 = 0; 14841a2a14fbSRahul Lakkireddy if (skb_shinfo(skb)->gso_size) 14851a2a14fbSRahul Lakkireddy wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 14861a2a14fbSRahul Lakkireddy else 14871a2a14fbSRahul Lakkireddy wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); 14881a2a14fbSRahul Lakkireddy wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 14891a2a14fbSRahul Lakkireddy wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); 14901a2a14fbSRahul Lakkireddy 14911a2a14fbSRahul Lakkireddy return (void *)(wr + 1); 14921a2a14fbSRahul Lakkireddy } 14931a2a14fbSRahul Lakkireddy 1494d429005fSVishal Kulkarni /** 1495d5fbda61SArjun Vynipadath * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue 1496f7917c00SJeff Kirsher * @skb: the packet 1497f7917c00SJeff Kirsher * @dev: the egress net device 1498f7917c00SJeff Kirsher * 1499f7917c00SJeff Kirsher * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1500f7917c00SJeff Kirsher */ 1501d5fbda61SArjun Vynipadath static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1502f7917c00SJeff Kirsher { 1503b1396c2bSRahul Lakkireddy enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1504b1396c2bSRahul Lakkireddy bool ptp_enabled = is_ptp_enabled(skb, dev); 15050ed96b46SRahul Lakkireddy unsigned int last_desc, flits, ndesc; 15061a2a14fbSRahul Lakkireddy u32 wr_mid, ctrl0, op, sgl_off = 0; 1507b1396c2bSRahul Lakkireddy const struct skb_shared_info *ssi; 15081a2a14fbSRahul Lakkireddy int len, qidx, credits, ret, left; 15090ed96b46SRahul Lakkireddy struct tx_sw_desc *sgl_sdesc; 15101a2a14fbSRahul Lakkireddy struct fw_eth_tx_eo_wr *eowr; 1511f7917c00SJeff Kirsher struct fw_eth_tx_pkt_wr *wr; 1512f7917c00SJeff Kirsher struct cpl_tx_pkt_core *cpl; 1513b1396c2bSRahul Lakkireddy const struct port_info *pi; 15140034b298SKumar Sanghvi bool immediate = false; 1515b1396c2bSRahul Lakkireddy u64 cntrl, *end, *sgl; 1516b1396c2bSRahul Lakkireddy struct sge_eth_txq *q; 1517d0a1299cSGanesh Goudar unsigned int chip_ver; 1518b1396c2bSRahul Lakkireddy struct adapter *adap; 1519d0a1299cSGanesh Goudar 1520b1396c2bSRahul Lakkireddy ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 1521b1396c2bSRahul Lakkireddy if (ret) 1522637d3e99SHariprasad Shenai goto out_free; 1523637d3e99SHariprasad Shenai 1524f7917c00SJeff Kirsher pi = netdev_priv(dev); 1525f7917c00SJeff Kirsher adap = pi->adapter; 1526a6ec572bSAtul Gupta ssi = skb_shinfo(skb); 15271b77be46SVinay Kumar Yadav #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 1528a6ec572bSAtul Gupta if (xfrm_offload(skb) && !ssi->gso_size) 15291b77be46SVinay Kumar Yadav return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); 1530a6ec572bSAtul Gupta #endif /* CHELSIO_IPSEC_INLINE */ 1531a6ec572bSAtul Gupta 1532a8c16e8eSRohit Maheshwari #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 15339d2e5e9eSRohit Maheshwari if (cxgb4_is_ktls_skb(skb) && 15349d2e5e9eSRohit Maheshwari (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)))) 1535a8c16e8eSRohit Maheshwari return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); 15365a4b9fe7SRohit Maheshwari #endif /* CHELSIO_TLS_DEVICE */ 15375a4b9fe7SRohit Maheshwari 1538f7917c00SJeff Kirsher qidx = skb_get_queue_mapping(skb); 1539a4569504SAtul Gupta if (ptp_enabled) { 1540a4569504SAtul Gupta if (!(adap->ptp_tx_skb)) { 1541a4569504SAtul Gupta skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1542a4569504SAtul Gupta adap->ptp_tx_skb = skb_get(skb); 1543a4569504SAtul Gupta } else { 1544a4569504SAtul Gupta goto out_free; 1545a4569504SAtul Gupta } 1546a4569504SAtul Gupta q = &adap->sge.ptptxq; 1547a4569504SAtul Gupta } else { 1548f7917c00SJeff Kirsher q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1549a4569504SAtul Gupta } 1550a4569504SAtul Gupta skb_tx_timestamp(skb); 1551f7917c00SJeff Kirsher 1552d429005fSVishal Kulkarni reclaim_completed_tx(adap, &q->q, -1, true); 15531ecc7b7aSHariprasad Shenai cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 155484a200b3SVarun Prakash 155584a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 1556b1396c2bSRahul Lakkireddy ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1557030c9882SRahul Lakkireddy if (unlikely(ret == -EOPNOTSUPP)) 155884a200b3SVarun Prakash goto out_free; 155984a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 1560f7917c00SJeff Kirsher 1561d0a1299cSGanesh Goudar chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1562d0a1299cSGanesh Goudar flits = calc_tx_flits(skb, chip_ver); 1563f7917c00SJeff Kirsher ndesc = flits_to_desc(flits); 1564f7917c00SJeff Kirsher credits = txq_avail(&q->q) - ndesc; 1565f7917c00SJeff Kirsher 1566f7917c00SJeff Kirsher if (unlikely(credits < 0)) { 1567f7917c00SJeff Kirsher eth_txq_stop(q); 1568f7917c00SJeff Kirsher dev_err(adap->pdev_dev, 1569f7917c00SJeff Kirsher "%s: Tx ring %u full while queue awake!\n", 1570f7917c00SJeff Kirsher dev->name, qidx); 1571f7917c00SJeff Kirsher return NETDEV_TX_BUSY; 1572f7917c00SJeff Kirsher } 1573f7917c00SJeff Kirsher 1574d0a1299cSGanesh Goudar if (is_eth_imm(skb, chip_ver)) 15750034b298SKumar Sanghvi immediate = true; 15760034b298SKumar Sanghvi 1577d0a1299cSGanesh Goudar if (skb->encapsulation && chip_ver > CHELSIO_T5) 1578d0a1299cSGanesh Goudar tnl_type = cxgb_encap_offload_supported(skb); 1579d0a1299cSGanesh Goudar 15800ed96b46SRahul Lakkireddy last_desc = q->q.pidx + ndesc - 1; 15810ed96b46SRahul Lakkireddy if (last_desc >= q->q.size) 15820ed96b46SRahul Lakkireddy last_desc -= q->q.size; 15830ed96b46SRahul Lakkireddy sgl_sdesc = &q->q.sdesc[last_desc]; 15840ed96b46SRahul Lakkireddy 15850034b298SKumar Sanghvi if (!immediate && 15860ed96b46SRahul Lakkireddy unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 15870ed96b46SRahul Lakkireddy memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1588f7917c00SJeff Kirsher q->mapping_err++; 1589f7917c00SJeff Kirsher goto out_free; 1590f7917c00SJeff Kirsher } 1591f7917c00SJeff Kirsher 1592e2ac9628SHariprasad Shenai wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1593f7917c00SJeff Kirsher if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1594d429005fSVishal Kulkarni /* After we're done injecting the Work Request for this 1595d429005fSVishal Kulkarni * packet, we'll be below our "stop threshold" so stop the TX 1596d429005fSVishal Kulkarni * Queue now and schedule a request for an SGE Egress Queue 1597d429005fSVishal Kulkarni * Update message. The queue will get started later on when 1598d429005fSVishal Kulkarni * the firmware processes this Work Request and sends us an 1599d429005fSVishal Kulkarni * Egress Queue Status Update message indicating that space 1600d429005fSVishal Kulkarni * has opened up. 1601d429005fSVishal Kulkarni */ 1602f7917c00SJeff Kirsher eth_txq_stop(q); 1603b660bccbSRaju Rangoju if (chip_ver > CHELSIO_T5) 1604e2ac9628SHariprasad Shenai wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1605f7917c00SJeff Kirsher } 1606f7917c00SJeff Kirsher 1607f7917c00SJeff Kirsher wr = (void *)&q->q.desc[q->q.pidx]; 16081a2a14fbSRahul Lakkireddy eowr = (void *)&q->q.desc[q->q.pidx]; 1609f7917c00SJeff Kirsher wr->equiq_to_len16 = htonl(wr_mid); 1610f7917c00SJeff Kirsher wr->r3 = cpu_to_be64(0); 16111a2a14fbSRahul Lakkireddy if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 16121a2a14fbSRahul Lakkireddy end = (u64 *)eowr + flits; 16131a2a14fbSRahul Lakkireddy else 1614f7917c00SJeff Kirsher end = (u64 *)wr + flits; 1615f7917c00SJeff Kirsher 16160034b298SKumar Sanghvi len = immediate ? skb->len : 0; 1617a6076fcdSGanesh Goudar len += sizeof(*cpl); 16181a2a14fbSRahul Lakkireddy if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { 1619a6076fcdSGanesh Goudar struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1620d0a1299cSGanesh Goudar struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); 1621f7917c00SJeff Kirsher 1622d0a1299cSGanesh Goudar if (tnl_type) 1623d0a1299cSGanesh Goudar len += sizeof(*tnl_lso); 1624d0a1299cSGanesh Goudar else 16250034b298SKumar Sanghvi len += sizeof(*lso); 1626d0a1299cSGanesh Goudar 1627e2ac9628SHariprasad Shenai wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1628e2ac9628SHariprasad Shenai FW_WR_IMMDLEN_V(len)); 1629d0a1299cSGanesh Goudar if (tnl_type) { 1630d0a1299cSGanesh Goudar struct iphdr *iph = ip_hdr(skb); 1631d0a1299cSGanesh Goudar 1632d0a1299cSGanesh Goudar t6_fill_tnl_lso(skb, tnl_lso, tnl_type); 1633d0a1299cSGanesh Goudar cpl = (void *)(tnl_lso + 1); 1634d0a1299cSGanesh Goudar /* Driver is expected to compute partial checksum that 1635d0a1299cSGanesh Goudar * does not include the IP Total Length. 1636d0a1299cSGanesh Goudar */ 1637d0a1299cSGanesh Goudar if (iph->version == 4) { 1638d0a1299cSGanesh Goudar iph->check = 0; 1639d0a1299cSGanesh Goudar iph->tot_len = 0; 16402f667016SRahul Lakkireddy iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); 1641d0a1299cSGanesh Goudar } 1642d0a1299cSGanesh Goudar if (skb->ip_summed == CHECKSUM_PARTIAL) 1643d0a1299cSGanesh Goudar cntrl = hwcsum(adap->params.chip, skb); 1644d0a1299cSGanesh Goudar } else { 16454846d533SRahul Lakkireddy cpl = write_tso_wr(adap, skb, lso); 16464846d533SRahul Lakkireddy cntrl = hwcsum(adap->params.chip, skb); 1647d0a1299cSGanesh Goudar } 1648c50ae55eSGanesh Goudar sgl = (u64 *)(cpl + 1); /* sgl start here */ 1649f7917c00SJeff Kirsher q->tso++; 1650f7917c00SJeff Kirsher q->tx_cso += ssi->gso_segs; 16511a2a14fbSRahul Lakkireddy } else if (ssi->gso_size) { 16521a2a14fbSRahul Lakkireddy u64 *start; 16531a2a14fbSRahul Lakkireddy u32 hdrlen; 16541a2a14fbSRahul Lakkireddy 16551a2a14fbSRahul Lakkireddy hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 16561a2a14fbSRahul Lakkireddy len += hdrlen; 16571a2a14fbSRahul Lakkireddy wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 16581a2a14fbSRahul Lakkireddy FW_ETH_TX_EO_WR_IMMDLEN_V(len)); 16591a2a14fbSRahul Lakkireddy cpl = write_eo_udp_wr(skb, eowr, hdrlen); 16601a2a14fbSRahul Lakkireddy cntrl = hwcsum(adap->params.chip, skb); 16611a2a14fbSRahul Lakkireddy 16621a2a14fbSRahul Lakkireddy start = (u64 *)(cpl + 1); 16631a2a14fbSRahul Lakkireddy sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, 16641a2a14fbSRahul Lakkireddy hdrlen); 16651a2a14fbSRahul Lakkireddy if (unlikely(start > sgl)) { 16661a2a14fbSRahul Lakkireddy left = (u8 *)end - (u8 *)q->q.stat; 16671a2a14fbSRahul Lakkireddy end = (void *)q->q.desc + left; 16681a2a14fbSRahul Lakkireddy } 16691a2a14fbSRahul Lakkireddy sgl_off = hdrlen; 16701a2a14fbSRahul Lakkireddy q->uso++; 16711a2a14fbSRahul Lakkireddy q->tx_cso += ssi->gso_segs; 1672f7917c00SJeff Kirsher } else { 1673a4569504SAtul Gupta if (ptp_enabled) 1674a4569504SAtul Gupta op = FW_PTP_TX_PKT_WR; 1675a4569504SAtul Gupta else 1676a4569504SAtul Gupta op = FW_ETH_TX_PKT_WR; 1677a4569504SAtul Gupta wr->op_immdlen = htonl(FW_WR_OP_V(op) | 1678e2ac9628SHariprasad Shenai FW_WR_IMMDLEN_V(len)); 1679f7917c00SJeff Kirsher cpl = (void *)(wr + 1); 1680c50ae55eSGanesh Goudar sgl = (u64 *)(cpl + 1); 1681f7917c00SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) { 16823ccc6cf7SHariprasad Shenai cntrl = hwcsum(adap->params.chip, skb) | 16833ccc6cf7SHariprasad Shenai TXPKT_IPCSUM_DIS_F; 1684f7917c00SJeff Kirsher q->tx_cso++; 168584a200b3SVarun Prakash } 1686f7917c00SJeff Kirsher } 1687f7917c00SJeff Kirsher 16881a2a14fbSRahul Lakkireddy if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { 16891a2a14fbSRahul Lakkireddy /* If current position is already at the end of the 16901a2a14fbSRahul Lakkireddy * txq, reset the current to point to start of the queue 16911a2a14fbSRahul Lakkireddy * and update the end ptr as well. 16921a2a14fbSRahul Lakkireddy */ 16931a2a14fbSRahul Lakkireddy left = (u8 *)end - (u8 *)q->q.stat; 16941a2a14fbSRahul Lakkireddy end = (void *)q->q.desc + left; 16951a2a14fbSRahul Lakkireddy sgl = (void *)q->q.desc; 16961a2a14fbSRahul Lakkireddy } 16971a2a14fbSRahul Lakkireddy 1698df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 1699f7917c00SJeff Kirsher q->vlan_ins++; 17001ecc7b7aSHariprasad Shenai cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 170184a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 170284a200b3SVarun Prakash if (skb->protocol == htons(ETH_P_FCOE)) 17031ecc7b7aSHariprasad Shenai cntrl |= TXPKT_VLAN_V( 170484a200b3SVarun Prakash ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 170584a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 1706f7917c00SJeff Kirsher } 1707f7917c00SJeff Kirsher 1708397665daSAnish Bhatt ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 1709397665daSAnish Bhatt TXPKT_PF_V(adap->pf); 1710a4569504SAtul Gupta if (ptp_enabled) 1711a4569504SAtul Gupta ctrl0 |= TXPKT_TSTAMP_F; 1712397665daSAnish Bhatt #ifdef CONFIG_CHELSIO_T4_DCB 1713397665daSAnish Bhatt if (is_t4(adap->params.chip)) 1714397665daSAnish Bhatt ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); 1715397665daSAnish Bhatt else 1716397665daSAnish Bhatt ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); 1717397665daSAnish Bhatt #endif 1718397665daSAnish Bhatt cpl->ctrl0 = htonl(ctrl0); 1719f7917c00SJeff Kirsher cpl->pack = htons(0); 1720f7917c00SJeff Kirsher cpl->len = htons(skb->len); 1721f7917c00SJeff Kirsher cpl->ctrl1 = cpu_to_be64(cntrl); 1722f7917c00SJeff Kirsher 17230034b298SKumar Sanghvi if (immediate) { 1724c50ae55eSGanesh Goudar cxgb4_inline_tx_skb(skb, &q->q, sgl); 1725a7525198SEric W. Biederman dev_consume_skb_any(skb); 1726f7917c00SJeff Kirsher } else { 17271a2a14fbSRahul Lakkireddy cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, 17280ed96b46SRahul Lakkireddy sgl_sdesc->addr); 1729f7917c00SJeff Kirsher skb_orphan(skb); 17300ed96b46SRahul Lakkireddy sgl_sdesc->skb = skb; 1731f7917c00SJeff Kirsher } 1732f7917c00SJeff Kirsher 1733f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 1734f7917c00SJeff Kirsher 1735a6ec572bSAtul Gupta cxgb4_ring_tx_db(adap, &q->q, ndesc); 1736f7917c00SJeff Kirsher return NETDEV_TX_OK; 1737b1396c2bSRahul Lakkireddy 1738b1396c2bSRahul Lakkireddy out_free: 1739b1396c2bSRahul Lakkireddy dev_kfree_skb_any(skb); 1740b1396c2bSRahul Lakkireddy return NETDEV_TX_OK; 1741f7917c00SJeff Kirsher } 1742f7917c00SJeff Kirsher 1743d5fbda61SArjun Vynipadath /* Constants ... */ 1744d5fbda61SArjun Vynipadath enum { 1745d5fbda61SArjun Vynipadath /* Egress Queue sizes, producer and consumer indices are all in units 1746d5fbda61SArjun Vynipadath * of Egress Context Units bytes. Note that as far as the hardware is 1747d5fbda61SArjun Vynipadath * concerned, the free list is an Egress Queue (the host produces free 1748d5fbda61SArjun Vynipadath * buffers which the hardware consumes) and free list entries are 1749d5fbda61SArjun Vynipadath * 64-bit PCI DMA addresses. 1750d5fbda61SArjun Vynipadath */ 1751d5fbda61SArjun Vynipadath EQ_UNIT = SGE_EQ_IDXSIZE, 1752d5fbda61SArjun Vynipadath FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1753d5fbda61SArjun Vynipadath TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 1754d5fbda61SArjun Vynipadath 1755d5fbda61SArjun Vynipadath T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1756d5fbda61SArjun Vynipadath sizeof(struct cpl_tx_pkt_lso_core) + 1757d5fbda61SArjun Vynipadath sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), 1758d5fbda61SArjun Vynipadath }; 1759d5fbda61SArjun Vynipadath 1760d5fbda61SArjun Vynipadath /** 1761d5fbda61SArjun Vynipadath * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? 1762d5fbda61SArjun Vynipadath * @skb: the packet 1763d5fbda61SArjun Vynipadath * 1764d5fbda61SArjun Vynipadath * Returns whether an Ethernet packet is small enough to fit completely as 1765d5fbda61SArjun Vynipadath * immediate data. 1766d5fbda61SArjun Vynipadath */ 1767d5fbda61SArjun Vynipadath static inline int t4vf_is_eth_imm(const struct sk_buff *skb) 1768d5fbda61SArjun Vynipadath { 1769d5fbda61SArjun Vynipadath /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request 1770d5fbda61SArjun Vynipadath * which does not accommodate immediate data. We could dike out all 1771d5fbda61SArjun Vynipadath * of the support code for immediate data but that would tie our hands 1772d5fbda61SArjun Vynipadath * too much if we ever want to enhace the firmware. It would also 1773d5fbda61SArjun Vynipadath * create more differences between the PF and VF Drivers. 1774d5fbda61SArjun Vynipadath */ 1775d5fbda61SArjun Vynipadath return false; 1776d5fbda61SArjun Vynipadath } 1777d5fbda61SArjun Vynipadath 1778d5fbda61SArjun Vynipadath /** 1779d5fbda61SArjun Vynipadath * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR 1780d5fbda61SArjun Vynipadath * @skb: the packet 1781d5fbda61SArjun Vynipadath * 1782d5fbda61SArjun Vynipadath * Returns the number of flits needed for a TX Work Request for the 1783d5fbda61SArjun Vynipadath * given Ethernet packet, including the needed WR and CPL headers. 1784d5fbda61SArjun Vynipadath */ 1785d5fbda61SArjun Vynipadath static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) 1786d5fbda61SArjun Vynipadath { 1787d5fbda61SArjun Vynipadath unsigned int flits; 1788d5fbda61SArjun Vynipadath 1789d5fbda61SArjun Vynipadath /* If the skb is small enough, we can pump it out as a work request 1790d5fbda61SArjun Vynipadath * with only immediate data. In that case we just have to have the 1791d5fbda61SArjun Vynipadath * TX Packet header plus the skb data in the Work Request. 1792d5fbda61SArjun Vynipadath */ 1793d5fbda61SArjun Vynipadath if (t4vf_is_eth_imm(skb)) 1794d5fbda61SArjun Vynipadath return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 1795d5fbda61SArjun Vynipadath sizeof(__be64)); 1796d5fbda61SArjun Vynipadath 1797d5fbda61SArjun Vynipadath /* Otherwise, we're going to have to construct a Scatter gather list 1798d5fbda61SArjun Vynipadath * of the skb body and fragments. We also include the flits necessary 1799d5fbda61SArjun Vynipadath * for the TX Packet Work Request and CPL. We always have a firmware 1800d5fbda61SArjun Vynipadath * Write Header (incorporated as part of the cpl_tx_pkt_lso and 1801d5fbda61SArjun Vynipadath * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 1802d5fbda61SArjun Vynipadath * message or, if we're doing a Large Send Offload, an LSO CPL message 1803d5fbda61SArjun Vynipadath * with an embedded TX Packet Write CPL message. 1804d5fbda61SArjun Vynipadath */ 1805d5fbda61SArjun Vynipadath flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 1806d5fbda61SArjun Vynipadath if (skb_shinfo(skb)->gso_size) 1807d5fbda61SArjun Vynipadath flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1808d5fbda61SArjun Vynipadath sizeof(struct cpl_tx_pkt_lso_core) + 1809d5fbda61SArjun Vynipadath sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1810d5fbda61SArjun Vynipadath else 1811d5fbda61SArjun Vynipadath flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 1812d5fbda61SArjun Vynipadath sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 1813d5fbda61SArjun Vynipadath return flits; 1814d5fbda61SArjun Vynipadath } 1815d5fbda61SArjun Vynipadath 1816d5fbda61SArjun Vynipadath /** 1817d5fbda61SArjun Vynipadath * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue 1818d5fbda61SArjun Vynipadath * @skb: the packet 1819d5fbda61SArjun Vynipadath * @dev: the egress net device 1820d5fbda61SArjun Vynipadath * 1821d5fbda61SArjun Vynipadath * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. 1822d5fbda61SArjun Vynipadath */ 1823d5fbda61SArjun Vynipadath static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, 1824d5fbda61SArjun Vynipadath struct net_device *dev) 1825d5fbda61SArjun Vynipadath { 18260ed96b46SRahul Lakkireddy unsigned int last_desc, flits, ndesc; 1827d5fbda61SArjun Vynipadath const struct skb_shared_info *ssi; 1828d5fbda61SArjun Vynipadath struct fw_eth_tx_pkt_vm_wr *wr; 18290ed96b46SRahul Lakkireddy struct tx_sw_desc *sgl_sdesc; 1830d5fbda61SArjun Vynipadath struct cpl_tx_pkt_core *cpl; 1831d5fbda61SArjun Vynipadath const struct port_info *pi; 1832d5fbda61SArjun Vynipadath struct sge_eth_txq *txq; 1833d5fbda61SArjun Vynipadath struct adapter *adapter; 1834b1396c2bSRahul Lakkireddy int qidx, credits, ret; 1835b1396c2bSRahul Lakkireddy size_t fw_hdr_copy_len; 1836b660bccbSRaju Rangoju unsigned int chip_ver; 1837d5fbda61SArjun Vynipadath u64 cntrl, *end; 1838d5fbda61SArjun Vynipadath u32 wr_mid; 1839d5fbda61SArjun Vynipadath 1840d5fbda61SArjun Vynipadath /* The chip minimum packet length is 10 octets but the firmware 1841d5fbda61SArjun Vynipadath * command that we are using requires that we copy the Ethernet header 1842d5fbda61SArjun Vynipadath * (including the VLAN tag) into the header so we reject anything 1843d5fbda61SArjun Vynipadath * smaller than that ... 1844d5fbda61SArjun Vynipadath */ 1845b1396c2bSRahul Lakkireddy fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + 1846b1396c2bSRahul Lakkireddy sizeof(wr->ethtype) + sizeof(wr->vlantci); 1847b1396c2bSRahul Lakkireddy ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); 1848b1396c2bSRahul Lakkireddy if (ret) 1849d5fbda61SArjun Vynipadath goto out_free; 1850d5fbda61SArjun Vynipadath 1851d5fbda61SArjun Vynipadath /* Figure out which TX Queue we're going to use. */ 1852d5fbda61SArjun Vynipadath pi = netdev_priv(dev); 1853d5fbda61SArjun Vynipadath adapter = pi->adapter; 1854d5fbda61SArjun Vynipadath qidx = skb_get_queue_mapping(skb); 1855d5fbda61SArjun Vynipadath WARN_ON(qidx >= pi->nqsets); 1856d5fbda61SArjun Vynipadath txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; 1857d5fbda61SArjun Vynipadath 1858d5fbda61SArjun Vynipadath /* Take this opportunity to reclaim any TX Descriptors whose DMA 1859d5fbda61SArjun Vynipadath * transfers have completed. 1860d5fbda61SArjun Vynipadath */ 1861d429005fSVishal Kulkarni reclaim_completed_tx(adapter, &txq->q, -1, true); 1862d5fbda61SArjun Vynipadath 1863d5fbda61SArjun Vynipadath /* Calculate the number of flits and TX Descriptors we're going to 1864d5fbda61SArjun Vynipadath * need along with how many TX Descriptors will be left over after 1865d5fbda61SArjun Vynipadath * we inject our Work Request. 1866d5fbda61SArjun Vynipadath */ 1867d5fbda61SArjun Vynipadath flits = t4vf_calc_tx_flits(skb); 1868d5fbda61SArjun Vynipadath ndesc = flits_to_desc(flits); 1869d5fbda61SArjun Vynipadath credits = txq_avail(&txq->q) - ndesc; 1870d5fbda61SArjun Vynipadath 1871d5fbda61SArjun Vynipadath if (unlikely(credits < 0)) { 1872d5fbda61SArjun Vynipadath /* Not enough room for this packet's Work Request. Stop the 1873d5fbda61SArjun Vynipadath * TX Queue and return a "busy" condition. The queue will get 1874d5fbda61SArjun Vynipadath * started later on when the firmware informs us that space 1875d5fbda61SArjun Vynipadath * has opened up. 1876d5fbda61SArjun Vynipadath */ 1877d5fbda61SArjun Vynipadath eth_txq_stop(txq); 1878d5fbda61SArjun Vynipadath dev_err(adapter->pdev_dev, 1879d5fbda61SArjun Vynipadath "%s: TX ring %u full while queue awake!\n", 1880d5fbda61SArjun Vynipadath dev->name, qidx); 1881d5fbda61SArjun Vynipadath return NETDEV_TX_BUSY; 1882d5fbda61SArjun Vynipadath } 1883d5fbda61SArjun Vynipadath 18840ed96b46SRahul Lakkireddy last_desc = txq->q.pidx + ndesc - 1; 18850ed96b46SRahul Lakkireddy if (last_desc >= txq->q.size) 18860ed96b46SRahul Lakkireddy last_desc -= txq->q.size; 18870ed96b46SRahul Lakkireddy sgl_sdesc = &txq->q.sdesc[last_desc]; 18880ed96b46SRahul Lakkireddy 1889d5fbda61SArjun Vynipadath if (!t4vf_is_eth_imm(skb) && 18900ed96b46SRahul Lakkireddy unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, 18910ed96b46SRahul Lakkireddy sgl_sdesc->addr) < 0)) { 1892d5fbda61SArjun Vynipadath /* We need to map the skb into PCI DMA space (because it can't 1893d5fbda61SArjun Vynipadath * be in-lined directly into the Work Request) and the mapping 1894d5fbda61SArjun Vynipadath * operation failed. Record the error and drop the packet. 1895d5fbda61SArjun Vynipadath */ 18960ed96b46SRahul Lakkireddy memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 1897d5fbda61SArjun Vynipadath txq->mapping_err++; 1898d5fbda61SArjun Vynipadath goto out_free; 1899d5fbda61SArjun Vynipadath } 1900d5fbda61SArjun Vynipadath 1901b660bccbSRaju Rangoju chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); 1902d5fbda61SArjun Vynipadath wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1903d5fbda61SArjun Vynipadath if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1904d5fbda61SArjun Vynipadath /* After we're done injecting the Work Request for this 1905d5fbda61SArjun Vynipadath * packet, we'll be below our "stop threshold" so stop the TX 1906d5fbda61SArjun Vynipadath * Queue now and schedule a request for an SGE Egress Queue 1907d5fbda61SArjun Vynipadath * Update message. The queue will get started later on when 1908d5fbda61SArjun Vynipadath * the firmware processes this Work Request and sends us an 1909d5fbda61SArjun Vynipadath * Egress Queue Status Update message indicating that space 1910d5fbda61SArjun Vynipadath * has opened up. 1911d5fbda61SArjun Vynipadath */ 1912d5fbda61SArjun Vynipadath eth_txq_stop(txq); 1913b660bccbSRaju Rangoju if (chip_ver > CHELSIO_T5) 1914d5fbda61SArjun Vynipadath wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1915d5fbda61SArjun Vynipadath } 1916d5fbda61SArjun Vynipadath 1917d5fbda61SArjun Vynipadath /* Start filling in our Work Request. Note that we do _not_ handle 1918d5fbda61SArjun Vynipadath * the WR Header wrapping around the TX Descriptor Ring. If our 1919d5fbda61SArjun Vynipadath * maximum header size ever exceeds one TX Descriptor, we'll need to 1920d5fbda61SArjun Vynipadath * do something else here. 1921d5fbda61SArjun Vynipadath */ 1922d5fbda61SArjun Vynipadath WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1923d5fbda61SArjun Vynipadath wr = (void *)&txq->q.desc[txq->q.pidx]; 1924d5fbda61SArjun Vynipadath wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1925d5fbda61SArjun Vynipadath wr->r3[0] = cpu_to_be32(0); 1926d5fbda61SArjun Vynipadath wr->r3[1] = cpu_to_be32(0); 1927d5fbda61SArjun Vynipadath skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1928d5fbda61SArjun Vynipadath end = (u64 *)wr + flits; 1929d5fbda61SArjun Vynipadath 1930d5fbda61SArjun Vynipadath /* If this is a Large Send Offload packet we'll put in an LSO CPL 1931d5fbda61SArjun Vynipadath * message with an encapsulated TX Packet CPL message. Otherwise we 1932d5fbda61SArjun Vynipadath * just use a TX Packet CPL message. 1933d5fbda61SArjun Vynipadath */ 1934d5fbda61SArjun Vynipadath ssi = skb_shinfo(skb); 1935d5fbda61SArjun Vynipadath if (ssi->gso_size) { 1936d5fbda61SArjun Vynipadath struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1937d5fbda61SArjun Vynipadath bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1938d5fbda61SArjun Vynipadath int l3hdr_len = skb_network_header_len(skb); 1939d5fbda61SArjun Vynipadath int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1940d5fbda61SArjun Vynipadath 1941d5fbda61SArjun Vynipadath wr->op_immdlen = 1942d5fbda61SArjun Vynipadath cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1943d5fbda61SArjun Vynipadath FW_WR_IMMDLEN_V(sizeof(*lso) + 1944d5fbda61SArjun Vynipadath sizeof(*cpl))); 1945d5fbda61SArjun Vynipadath /* Fill in the LSO CPL message. */ 1946d5fbda61SArjun Vynipadath lso->lso_ctrl = 1947d5fbda61SArjun Vynipadath cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1948d5fbda61SArjun Vynipadath LSO_FIRST_SLICE_F | 1949d5fbda61SArjun Vynipadath LSO_LAST_SLICE_F | 1950d5fbda61SArjun Vynipadath LSO_IPV6_V(v6) | 1951d5fbda61SArjun Vynipadath LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1952d5fbda61SArjun Vynipadath LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1953d5fbda61SArjun Vynipadath LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1954d5fbda61SArjun Vynipadath lso->ipid_ofst = cpu_to_be16(0); 1955d5fbda61SArjun Vynipadath lso->mss = cpu_to_be16(ssi->gso_size); 1956d5fbda61SArjun Vynipadath lso->seqno_offset = cpu_to_be32(0); 1957d5fbda61SArjun Vynipadath if (is_t4(adapter->params.chip)) 1958d5fbda61SArjun Vynipadath lso->len = cpu_to_be32(skb->len); 1959d5fbda61SArjun Vynipadath else 1960d5fbda61SArjun Vynipadath lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); 1961d5fbda61SArjun Vynipadath 1962d5fbda61SArjun Vynipadath /* Set up TX Packet CPL pointer, control word and perform 1963d5fbda61SArjun Vynipadath * accounting. 1964d5fbda61SArjun Vynipadath */ 1965d5fbda61SArjun Vynipadath cpl = (void *)(lso + 1); 1966d5fbda61SArjun Vynipadath 1967b660bccbSRaju Rangoju if (chip_ver <= CHELSIO_T5) 1968d5fbda61SArjun Vynipadath cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1969d5fbda61SArjun Vynipadath else 1970d5fbda61SArjun Vynipadath cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1971d5fbda61SArjun Vynipadath 1972d5fbda61SArjun Vynipadath cntrl |= TXPKT_CSUM_TYPE_V(v6 ? 1973d5fbda61SArjun Vynipadath TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1974d5fbda61SArjun Vynipadath TXPKT_IPHDR_LEN_V(l3hdr_len); 1975d5fbda61SArjun Vynipadath txq->tso++; 1976d5fbda61SArjun Vynipadath txq->tx_cso += ssi->gso_segs; 1977d5fbda61SArjun Vynipadath } else { 1978d5fbda61SArjun Vynipadath int len; 1979d5fbda61SArjun Vynipadath 1980d5fbda61SArjun Vynipadath len = (t4vf_is_eth_imm(skb) 1981d5fbda61SArjun Vynipadath ? skb->len + sizeof(*cpl) 1982d5fbda61SArjun Vynipadath : sizeof(*cpl)); 1983d5fbda61SArjun Vynipadath wr->op_immdlen = 1984d5fbda61SArjun Vynipadath cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | 1985d5fbda61SArjun Vynipadath FW_WR_IMMDLEN_V(len)); 1986d5fbda61SArjun Vynipadath 1987d5fbda61SArjun Vynipadath /* Set up TX Packet CPL pointer, control word and perform 1988d5fbda61SArjun Vynipadath * accounting. 1989d5fbda61SArjun Vynipadath */ 1990d5fbda61SArjun Vynipadath cpl = (void *)(wr + 1); 1991d5fbda61SArjun Vynipadath if (skb->ip_summed == CHECKSUM_PARTIAL) { 1992d5fbda61SArjun Vynipadath cntrl = hwcsum(adapter->params.chip, skb) | 1993d5fbda61SArjun Vynipadath TXPKT_IPCSUM_DIS_F; 1994d5fbda61SArjun Vynipadath txq->tx_cso++; 1995d5fbda61SArjun Vynipadath } else { 1996d5fbda61SArjun Vynipadath cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1997d5fbda61SArjun Vynipadath } 1998d5fbda61SArjun Vynipadath } 1999d5fbda61SArjun Vynipadath 2000d5fbda61SArjun Vynipadath /* If there's a VLAN tag present, add that to the list of things to 2001d5fbda61SArjun Vynipadath * do in this Work Request. 2002d5fbda61SArjun Vynipadath */ 2003d5fbda61SArjun Vynipadath if (skb_vlan_tag_present(skb)) { 2004d5fbda61SArjun Vynipadath txq->vlan_ins++; 2005d5fbda61SArjun Vynipadath cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 2006d5fbda61SArjun Vynipadath } 2007d5fbda61SArjun Vynipadath 2008d5fbda61SArjun Vynipadath /* Fill in the TX Packet CPL message header. */ 2009d5fbda61SArjun Vynipadath cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 2010d5fbda61SArjun Vynipadath TXPKT_INTF_V(pi->port_id) | 2011d5fbda61SArjun Vynipadath TXPKT_PF_V(0)); 2012d5fbda61SArjun Vynipadath cpl->pack = cpu_to_be16(0); 2013d5fbda61SArjun Vynipadath cpl->len = cpu_to_be16(skb->len); 2014d5fbda61SArjun Vynipadath cpl->ctrl1 = cpu_to_be64(cntrl); 2015d5fbda61SArjun Vynipadath 2016d5fbda61SArjun Vynipadath /* Fill in the body of the TX Packet CPL message with either in-lined 2017d5fbda61SArjun Vynipadath * data or a Scatter/Gather List. 2018d5fbda61SArjun Vynipadath */ 2019d5fbda61SArjun Vynipadath if (t4vf_is_eth_imm(skb)) { 2020d5fbda61SArjun Vynipadath /* In-line the packet's data and free the skb since we don't 2021d5fbda61SArjun Vynipadath * need it any longer. 2022d5fbda61SArjun Vynipadath */ 2023d5fbda61SArjun Vynipadath cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); 2024d5fbda61SArjun Vynipadath dev_consume_skb_any(skb); 2025d5fbda61SArjun Vynipadath } else { 2026d5fbda61SArjun Vynipadath /* Write the skb's Scatter/Gather list into the TX Packet CPL 2027d5fbda61SArjun Vynipadath * message and retain a pointer to the skb so we can free it 2028d5fbda61SArjun Vynipadath * later when its DMA completes. (We store the skb pointer 2029d5fbda61SArjun Vynipadath * in the Software Descriptor corresponding to the last TX 2030d5fbda61SArjun Vynipadath * Descriptor used by the Work Request.) 2031d5fbda61SArjun Vynipadath * 2032d5fbda61SArjun Vynipadath * The retained skb will be freed when the corresponding TX 2033d5fbda61SArjun Vynipadath * Descriptors are reclaimed after their DMAs complete. 2034d5fbda61SArjun Vynipadath * However, this could take quite a while since, in general, 2035d5fbda61SArjun Vynipadath * the hardware is set up to be lazy about sending DMA 2036d5fbda61SArjun Vynipadath * completion notifications to us and we mostly perform TX 2037d5fbda61SArjun Vynipadath * reclaims in the transmit routine. 2038d5fbda61SArjun Vynipadath * 2039d5fbda61SArjun Vynipadath * This is good for performamce but means that we rely on new 2040d5fbda61SArjun Vynipadath * TX packets arriving to run the destructors of completed 2041d5fbda61SArjun Vynipadath * packets, which open up space in their sockets' send queues. 2042d5fbda61SArjun Vynipadath * Sometimes we do not get such new packets causing TX to 2043d5fbda61SArjun Vynipadath * stall. A single UDP transmitter is a good example of this 2044d5fbda61SArjun Vynipadath * situation. We have a clean up timer that periodically 2045d5fbda61SArjun Vynipadath * reclaims completed packets but it doesn't run often enough 2046d5fbda61SArjun Vynipadath * (nor do we want it to) to prevent lengthy stalls. A 2047d5fbda61SArjun Vynipadath * solution to this problem is to run the destructor early, 2048d5fbda61SArjun Vynipadath * after the packet is queued but before it's DMAd. A con is 2049d5fbda61SArjun Vynipadath * that we lie to socket memory accounting, but the amount of 2050d5fbda61SArjun Vynipadath * extra memory is reasonable (limited by the number of TX 2051d5fbda61SArjun Vynipadath * descriptors), the packets do actually get freed quickly by 2052d5fbda61SArjun Vynipadath * new packets almost always, and for protocols like TCP that 2053d5fbda61SArjun Vynipadath * wait for acks to really free up the data the extra memory 2054d5fbda61SArjun Vynipadath * is even less. On the positive side we run the destructors 2055d5fbda61SArjun Vynipadath * on the sending CPU rather than on a potentially different 2056d5fbda61SArjun Vynipadath * completing CPU, usually a good thing. 2057d5fbda61SArjun Vynipadath * 2058d5fbda61SArjun Vynipadath * Run the destructor before telling the DMA engine about the 2059d5fbda61SArjun Vynipadath * packet to make sure it doesn't complete and get freed 2060d5fbda61SArjun Vynipadath * prematurely. 2061d5fbda61SArjun Vynipadath */ 2062d5fbda61SArjun Vynipadath struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); 2063d5fbda61SArjun Vynipadath struct sge_txq *tq = &txq->q; 2064d5fbda61SArjun Vynipadath 2065d5fbda61SArjun Vynipadath /* If the Work Request header was an exact multiple of our TX 2066d5fbda61SArjun Vynipadath * Descriptor length, then it's possible that the starting SGL 2067d5fbda61SArjun Vynipadath * pointer lines up exactly with the end of our TX Descriptor 2068d5fbda61SArjun Vynipadath * ring. If that's the case, wrap around to the beginning 2069d5fbda61SArjun Vynipadath * here ... 2070d5fbda61SArjun Vynipadath */ 2071d5fbda61SArjun Vynipadath if (unlikely((void *)sgl == (void *)tq->stat)) { 2072d5fbda61SArjun Vynipadath sgl = (void *)tq->desc; 2073d5fbda61SArjun Vynipadath end = (void *)((void *)tq->desc + 2074d5fbda61SArjun Vynipadath ((void *)end - (void *)tq->stat)); 2075d5fbda61SArjun Vynipadath } 2076d5fbda61SArjun Vynipadath 20770ed96b46SRahul Lakkireddy cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); 2078d5fbda61SArjun Vynipadath skb_orphan(skb); 20790ed96b46SRahul Lakkireddy sgl_sdesc->skb = skb; 2080d5fbda61SArjun Vynipadath } 2081d5fbda61SArjun Vynipadath 2082d5fbda61SArjun Vynipadath /* Advance our internal TX Queue state, tell the hardware about 2083d5fbda61SArjun Vynipadath * the new TX descriptors and return success. 2084d5fbda61SArjun Vynipadath */ 2085d5fbda61SArjun Vynipadath txq_advance(&txq->q, ndesc); 2086d5fbda61SArjun Vynipadath 2087d5fbda61SArjun Vynipadath cxgb4_ring_tx_db(adapter, &txq->q, ndesc); 2088d5fbda61SArjun Vynipadath return NETDEV_TX_OK; 2089d5fbda61SArjun Vynipadath 2090d5fbda61SArjun Vynipadath out_free: 2091d5fbda61SArjun Vynipadath /* An error of some sort happened. Free the TX skb and tell the 2092d5fbda61SArjun Vynipadath * OS that we've "dealt" with the packet ... 2093d5fbda61SArjun Vynipadath */ 2094d5fbda61SArjun Vynipadath dev_kfree_skb_any(skb); 2095d5fbda61SArjun Vynipadath return NETDEV_TX_OK; 2096d5fbda61SArjun Vynipadath } 2097d5fbda61SArjun Vynipadath 20984846d533SRahul Lakkireddy /** 20994846d533SRahul Lakkireddy * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 21004846d533SRahul Lakkireddy * @q: the SGE control Tx queue 21014846d533SRahul Lakkireddy * 21024846d533SRahul Lakkireddy * This is a variant of cxgb4_reclaim_completed_tx() that is used 21034846d533SRahul Lakkireddy * for Tx queues that send only immediate data (presently just 21044846d533SRahul Lakkireddy * the control queues) and thus do not have any sk_buffs to release. 21054846d533SRahul Lakkireddy */ 21064846d533SRahul Lakkireddy static inline void reclaim_completed_tx_imm(struct sge_txq *q) 21074846d533SRahul Lakkireddy { 21084846d533SRahul Lakkireddy int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 21094846d533SRahul Lakkireddy int reclaim = hw_cidx - q->cidx; 21104846d533SRahul Lakkireddy 21114846d533SRahul Lakkireddy if (reclaim < 0) 21124846d533SRahul Lakkireddy reclaim += q->size; 21134846d533SRahul Lakkireddy 21144846d533SRahul Lakkireddy q->in_use -= reclaim; 21154846d533SRahul Lakkireddy q->cidx = hw_cidx; 21164846d533SRahul Lakkireddy } 21174846d533SRahul Lakkireddy 2118b1396c2bSRahul Lakkireddy static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) 2119b1396c2bSRahul Lakkireddy { 2120b1396c2bSRahul Lakkireddy u32 val = *idx + n; 2121b1396c2bSRahul Lakkireddy 2122b1396c2bSRahul Lakkireddy if (val >= max) 2123b1396c2bSRahul Lakkireddy val -= max; 2124b1396c2bSRahul Lakkireddy 2125b1396c2bSRahul Lakkireddy *idx = val; 2126b1396c2bSRahul Lakkireddy } 2127b1396c2bSRahul Lakkireddy 2128b1396c2bSRahul Lakkireddy void cxgb4_eosw_txq_free_desc(struct adapter *adap, 2129b1396c2bSRahul Lakkireddy struct sge_eosw_txq *eosw_txq, u32 ndesc) 2130b1396c2bSRahul Lakkireddy { 21310ed96b46SRahul Lakkireddy struct tx_sw_desc *d; 2132b1396c2bSRahul Lakkireddy 2133b1396c2bSRahul Lakkireddy d = &eosw_txq->desc[eosw_txq->last_cidx]; 2134b1396c2bSRahul Lakkireddy while (ndesc--) { 2135b1396c2bSRahul Lakkireddy if (d->skb) { 2136b1396c2bSRahul Lakkireddy if (d->addr[0]) { 2137b1396c2bSRahul Lakkireddy unmap_skb(adap->pdev_dev, d->skb, d->addr); 2138b1396c2bSRahul Lakkireddy memset(d->addr, 0, sizeof(d->addr)); 2139b1396c2bSRahul Lakkireddy } 2140b1396c2bSRahul Lakkireddy dev_consume_skb_any(d->skb); 2141b1396c2bSRahul Lakkireddy d->skb = NULL; 2142b1396c2bSRahul Lakkireddy } 2143b1396c2bSRahul Lakkireddy eosw_txq_advance_index(&eosw_txq->last_cidx, 1, 2144b1396c2bSRahul Lakkireddy eosw_txq->ndesc); 2145b1396c2bSRahul Lakkireddy d = &eosw_txq->desc[eosw_txq->last_cidx]; 2146b1396c2bSRahul Lakkireddy } 2147b1396c2bSRahul Lakkireddy } 2148b1396c2bSRahul Lakkireddy 21494846d533SRahul Lakkireddy static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) 21504846d533SRahul Lakkireddy { 21514846d533SRahul Lakkireddy eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); 21524846d533SRahul Lakkireddy eosw_txq->inuse += n; 21534846d533SRahul Lakkireddy } 21544846d533SRahul Lakkireddy 21554846d533SRahul Lakkireddy static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, 21564846d533SRahul Lakkireddy struct sk_buff *skb) 21574846d533SRahul Lakkireddy { 21584846d533SRahul Lakkireddy if (eosw_txq->inuse == eosw_txq->ndesc) 21594846d533SRahul Lakkireddy return -ENOMEM; 21604846d533SRahul Lakkireddy 21614846d533SRahul Lakkireddy eosw_txq->desc[eosw_txq->pidx].skb = skb; 21624846d533SRahul Lakkireddy return 0; 21634846d533SRahul Lakkireddy } 21644846d533SRahul Lakkireddy 21654846d533SRahul Lakkireddy static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) 21664846d533SRahul Lakkireddy { 21674846d533SRahul Lakkireddy return eosw_txq->desc[eosw_txq->last_pidx].skb; 21684846d533SRahul Lakkireddy } 21694846d533SRahul Lakkireddy 21704846d533SRahul Lakkireddy static inline u8 ethofld_calc_tx_flits(struct adapter *adap, 21714846d533SRahul Lakkireddy struct sk_buff *skb, u32 hdr_len) 21724846d533SRahul Lakkireddy { 21734846d533SRahul Lakkireddy u8 flits, nsgl = 0; 21744846d533SRahul Lakkireddy u32 wrlen; 21754846d533SRahul Lakkireddy 21764846d533SRahul Lakkireddy wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); 21771a2a14fbSRahul Lakkireddy if (skb_shinfo(skb)->gso_size && 21781a2a14fbSRahul Lakkireddy !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 21794846d533SRahul Lakkireddy wrlen += sizeof(struct cpl_tx_pkt_lso_core); 21804846d533SRahul Lakkireddy 21814846d533SRahul Lakkireddy wrlen += roundup(hdr_len, 16); 21824846d533SRahul Lakkireddy 21834846d533SRahul Lakkireddy /* Packet headers + WR + CPLs */ 21844846d533SRahul Lakkireddy flits = DIV_ROUND_UP(wrlen, 8); 21854846d533SRahul Lakkireddy 21861a2a14fbSRahul Lakkireddy if (skb_shinfo(skb)->nr_frags > 0) { 21871a2a14fbSRahul Lakkireddy if (skb_headlen(skb) - hdr_len) 21881a2a14fbSRahul Lakkireddy nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); 21891a2a14fbSRahul Lakkireddy else 21904846d533SRahul Lakkireddy nsgl = sgl_len(skb_shinfo(skb)->nr_frags); 21911a2a14fbSRahul Lakkireddy } else if (skb->len - hdr_len) { 21924846d533SRahul Lakkireddy nsgl = sgl_len(1); 21931a2a14fbSRahul Lakkireddy } 21944846d533SRahul Lakkireddy 21954846d533SRahul Lakkireddy return flits + nsgl; 21964846d533SRahul Lakkireddy } 21974846d533SRahul Lakkireddy 21984f1d9726SRahul Lakkireddy static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, 21994846d533SRahul Lakkireddy struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 22004846d533SRahul Lakkireddy u32 hdr_len, u32 wrlen) 22014846d533SRahul Lakkireddy { 22024846d533SRahul Lakkireddy const struct skb_shared_info *ssi = skb_shinfo(skb); 22034846d533SRahul Lakkireddy struct cpl_tx_pkt_core *cpl; 22044846d533SRahul Lakkireddy u32 immd_len, wrlen16; 22054846d533SRahul Lakkireddy bool compl = false; 22061a2a14fbSRahul Lakkireddy u8 ver, proto; 22071a2a14fbSRahul Lakkireddy 22081a2a14fbSRahul Lakkireddy ver = ip_hdr(skb)->version; 22091a2a14fbSRahul Lakkireddy proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; 22104846d533SRahul Lakkireddy 22114846d533SRahul Lakkireddy wrlen16 = DIV_ROUND_UP(wrlen, 16); 22124846d533SRahul Lakkireddy immd_len = sizeof(struct cpl_tx_pkt_core); 22131a2a14fbSRahul Lakkireddy if (skb_shinfo(skb)->gso_size && 22141a2a14fbSRahul Lakkireddy !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) 22154846d533SRahul Lakkireddy immd_len += sizeof(struct cpl_tx_pkt_lso_core); 22164846d533SRahul Lakkireddy immd_len += hdr_len; 22174846d533SRahul Lakkireddy 22184846d533SRahul Lakkireddy if (!eosw_txq->ncompl || 22194f1d9726SRahul Lakkireddy (eosw_txq->last_compl + wrlen16) >= 22204f1d9726SRahul Lakkireddy (adap->params.ofldq_wr_cred / 2)) { 22214846d533SRahul Lakkireddy compl = true; 22224846d533SRahul Lakkireddy eosw_txq->ncompl++; 22234846d533SRahul Lakkireddy eosw_txq->last_compl = 0; 22244846d533SRahul Lakkireddy } 22254846d533SRahul Lakkireddy 22264846d533SRahul Lakkireddy wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | 22274846d533SRahul Lakkireddy FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) | 22284846d533SRahul Lakkireddy FW_WR_COMPL_V(compl)); 22294846d533SRahul Lakkireddy wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | 22304846d533SRahul Lakkireddy FW_WR_FLOWID_V(eosw_txq->hwtid)); 22314846d533SRahul Lakkireddy wr->r3 = 0; 22321a2a14fbSRahul Lakkireddy if (proto == IPPROTO_UDP) { 22331a2a14fbSRahul Lakkireddy cpl = write_eo_udp_wr(skb, wr, hdr_len); 22341a2a14fbSRahul Lakkireddy } else { 22354846d533SRahul Lakkireddy wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 22364846d533SRahul Lakkireddy wr->u.tcpseg.ethlen = skb_network_offset(skb); 22374846d533SRahul Lakkireddy wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); 22384846d533SRahul Lakkireddy wr->u.tcpseg.tcplen = tcp_hdrlen(skb); 22394846d533SRahul Lakkireddy wr->u.tcpseg.tsclk_tsoff = 0; 22404846d533SRahul Lakkireddy wr->u.tcpseg.r4 = 0; 22414846d533SRahul Lakkireddy wr->u.tcpseg.r5 = 0; 22424846d533SRahul Lakkireddy wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); 22434846d533SRahul Lakkireddy 22444846d533SRahul Lakkireddy if (ssi->gso_size) { 22454846d533SRahul Lakkireddy struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 22464846d533SRahul Lakkireddy 22474846d533SRahul Lakkireddy wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); 22484846d533SRahul Lakkireddy cpl = write_tso_wr(adap, skb, lso); 22494846d533SRahul Lakkireddy } else { 22504846d533SRahul Lakkireddy wr->u.tcpseg.mss = cpu_to_be16(0xffff); 22514846d533SRahul Lakkireddy cpl = (void *)(wr + 1); 22524846d533SRahul Lakkireddy } 22531a2a14fbSRahul Lakkireddy } 22544846d533SRahul Lakkireddy 22554846d533SRahul Lakkireddy eosw_txq->cred -= wrlen16; 22564846d533SRahul Lakkireddy eosw_txq->last_compl += wrlen16; 22574846d533SRahul Lakkireddy return cpl; 22584846d533SRahul Lakkireddy } 22594846d533SRahul Lakkireddy 22604f1d9726SRahul Lakkireddy static int ethofld_hard_xmit(struct net_device *dev, 22614846d533SRahul Lakkireddy struct sge_eosw_txq *eosw_txq) 22624846d533SRahul Lakkireddy { 22634846d533SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 22644846d533SRahul Lakkireddy struct adapter *adap = netdev2adap(dev); 22654846d533SRahul Lakkireddy u32 wrlen, wrlen16, hdr_len, data_len; 22660e395b3cSRahul Lakkireddy enum sge_eosw_state next_state; 22674846d533SRahul Lakkireddy u64 cntrl, *start, *end, *sgl; 22684846d533SRahul Lakkireddy struct sge_eohw_txq *eohw_txq; 22694846d533SRahul Lakkireddy struct cpl_tx_pkt_core *cpl; 22704846d533SRahul Lakkireddy struct fw_eth_tx_eo_wr *wr; 22710e395b3cSRahul Lakkireddy bool skip_eotx_wr = false; 22720ed96b46SRahul Lakkireddy struct tx_sw_desc *d; 22734846d533SRahul Lakkireddy struct sk_buff *skb; 22744f1d9726SRahul Lakkireddy int left, ret = 0; 22754846d533SRahul Lakkireddy u8 flits, ndesc; 22764846d533SRahul Lakkireddy 22774846d533SRahul Lakkireddy eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; 22784846d533SRahul Lakkireddy spin_lock(&eohw_txq->lock); 22794846d533SRahul Lakkireddy reclaim_completed_tx_imm(&eohw_txq->q); 22804846d533SRahul Lakkireddy 22814846d533SRahul Lakkireddy d = &eosw_txq->desc[eosw_txq->last_pidx]; 22824846d533SRahul Lakkireddy skb = d->skb; 22834846d533SRahul Lakkireddy skb_tx_timestamp(skb); 22844846d533SRahul Lakkireddy 22854846d533SRahul Lakkireddy wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; 22860e395b3cSRahul Lakkireddy if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && 22870e395b3cSRahul Lakkireddy eosw_txq->last_pidx == eosw_txq->flowc_idx)) { 22880e395b3cSRahul Lakkireddy hdr_len = skb->len; 22890e395b3cSRahul Lakkireddy data_len = 0; 22900e395b3cSRahul Lakkireddy flits = DIV_ROUND_UP(hdr_len, 8); 22910e395b3cSRahul Lakkireddy if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) 22920e395b3cSRahul Lakkireddy next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY; 22930e395b3cSRahul Lakkireddy else 22940e395b3cSRahul Lakkireddy next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY; 22950e395b3cSRahul Lakkireddy skip_eotx_wr = true; 22960e395b3cSRahul Lakkireddy } else { 22974846d533SRahul Lakkireddy hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); 22984846d533SRahul Lakkireddy data_len = skb->len - hdr_len; 22994846d533SRahul Lakkireddy flits = ethofld_calc_tx_flits(adap, skb, hdr_len); 23000e395b3cSRahul Lakkireddy } 23014846d533SRahul Lakkireddy ndesc = flits_to_desc(flits); 23024846d533SRahul Lakkireddy wrlen = flits * 8; 23034846d533SRahul Lakkireddy wrlen16 = DIV_ROUND_UP(wrlen, 16); 23044846d533SRahul Lakkireddy 23054f1d9726SRahul Lakkireddy left = txq_avail(&eohw_txq->q) - ndesc; 23064f1d9726SRahul Lakkireddy 23074f1d9726SRahul Lakkireddy /* If there are no descriptors left in hardware queues or no 23084f1d9726SRahul Lakkireddy * CPL credits left in software queues, then wait for them 23094f1d9726SRahul Lakkireddy * to come back and retry again. Note that we always request 23104f1d9726SRahul Lakkireddy * for credits update via interrupt for every half credits 23114f1d9726SRahul Lakkireddy * consumed. So, the interrupt will eventually restore the 23124f1d9726SRahul Lakkireddy * credits and invoke the Tx path again. 23134846d533SRahul Lakkireddy */ 23144f1d9726SRahul Lakkireddy if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { 23154f1d9726SRahul Lakkireddy ret = -ENOMEM; 23164846d533SRahul Lakkireddy goto out_unlock; 23174f1d9726SRahul Lakkireddy } 23184846d533SRahul Lakkireddy 23190e395b3cSRahul Lakkireddy if (unlikely(skip_eotx_wr)) { 23200e395b3cSRahul Lakkireddy start = (u64 *)wr; 23210e395b3cSRahul Lakkireddy eosw_txq->state = next_state; 232269422a7eSRahul Lakkireddy eosw_txq->cred -= wrlen16; 232369422a7eSRahul Lakkireddy eosw_txq->ncompl++; 232469422a7eSRahul Lakkireddy eosw_txq->last_compl = 0; 23250e395b3cSRahul Lakkireddy goto write_wr_headers; 23260e395b3cSRahul Lakkireddy } 23270e395b3cSRahul Lakkireddy 23284846d533SRahul Lakkireddy cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); 23294846d533SRahul Lakkireddy cntrl = hwcsum(adap->params.chip, skb); 23304846d533SRahul Lakkireddy if (skb_vlan_tag_present(skb)) 23314846d533SRahul Lakkireddy cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 23324846d533SRahul Lakkireddy 23334846d533SRahul Lakkireddy cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | 23344846d533SRahul Lakkireddy TXPKT_INTF_V(pi->tx_chan) | 23354846d533SRahul Lakkireddy TXPKT_PF_V(adap->pf)); 23364846d533SRahul Lakkireddy cpl->pack = 0; 23374846d533SRahul Lakkireddy cpl->len = cpu_to_be16(skb->len); 23384846d533SRahul Lakkireddy cpl->ctrl1 = cpu_to_be64(cntrl); 23394846d533SRahul Lakkireddy 23404846d533SRahul Lakkireddy start = (u64 *)(cpl + 1); 23414846d533SRahul Lakkireddy 23420e395b3cSRahul Lakkireddy write_wr_headers: 23434846d533SRahul Lakkireddy sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, 23444846d533SRahul Lakkireddy hdr_len); 23454846d533SRahul Lakkireddy if (data_len) { 23464f1d9726SRahul Lakkireddy ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); 23474f1d9726SRahul Lakkireddy if (unlikely(ret)) { 23484846d533SRahul Lakkireddy memset(d->addr, 0, sizeof(d->addr)); 23494846d533SRahul Lakkireddy eohw_txq->mapping_err++; 23504846d533SRahul Lakkireddy goto out_unlock; 23514846d533SRahul Lakkireddy } 23524846d533SRahul Lakkireddy 23534846d533SRahul Lakkireddy end = (u64 *)wr + flits; 23544846d533SRahul Lakkireddy if (unlikely(start > sgl)) { 23554846d533SRahul Lakkireddy left = (u8 *)end - (u8 *)eohw_txq->q.stat; 23564846d533SRahul Lakkireddy end = (void *)eohw_txq->q.desc + left; 23574846d533SRahul Lakkireddy } 23584846d533SRahul Lakkireddy 23594846d533SRahul Lakkireddy if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { 23604846d533SRahul Lakkireddy /* If current position is already at the end of the 23614846d533SRahul Lakkireddy * txq, reset the current to point to start of the queue 23624846d533SRahul Lakkireddy * and update the end ptr as well. 23634846d533SRahul Lakkireddy */ 23644846d533SRahul Lakkireddy left = (u8 *)end - (u8 *)eohw_txq->q.stat; 23654846d533SRahul Lakkireddy 23664846d533SRahul Lakkireddy end = (void *)eohw_txq->q.desc + left; 23674846d533SRahul Lakkireddy sgl = (void *)eohw_txq->q.desc; 23684846d533SRahul Lakkireddy } 23694846d533SRahul Lakkireddy 23704846d533SRahul Lakkireddy cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, 23714846d533SRahul Lakkireddy d->addr); 23724846d533SRahul Lakkireddy } 23734846d533SRahul Lakkireddy 23748311f0beSRahul Lakkireddy if (skb_shinfo(skb)->gso_size) { 23758311f0beSRahul Lakkireddy if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 23768311f0beSRahul Lakkireddy eohw_txq->uso++; 23778311f0beSRahul Lakkireddy else 23788311f0beSRahul Lakkireddy eohw_txq->tso++; 23798311f0beSRahul Lakkireddy eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; 23808311f0beSRahul Lakkireddy } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 23818311f0beSRahul Lakkireddy eohw_txq->tx_cso++; 23828311f0beSRahul Lakkireddy } 23838311f0beSRahul Lakkireddy 23848311f0beSRahul Lakkireddy if (skb_vlan_tag_present(skb)) 23858311f0beSRahul Lakkireddy eohw_txq->vlan_ins++; 23868311f0beSRahul Lakkireddy 23874846d533SRahul Lakkireddy txq_advance(&eohw_txq->q, ndesc); 23884846d533SRahul Lakkireddy cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); 23894846d533SRahul Lakkireddy eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); 23904846d533SRahul Lakkireddy 23914846d533SRahul Lakkireddy out_unlock: 23924846d533SRahul Lakkireddy spin_unlock(&eohw_txq->lock); 23934f1d9726SRahul Lakkireddy return ret; 23944846d533SRahul Lakkireddy } 23954846d533SRahul Lakkireddy 23964846d533SRahul Lakkireddy static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) 23974846d533SRahul Lakkireddy { 23984846d533SRahul Lakkireddy struct sk_buff *skb; 23994f1d9726SRahul Lakkireddy int pktcount, ret; 24004846d533SRahul Lakkireddy 24014846d533SRahul Lakkireddy switch (eosw_txq->state) { 24024846d533SRahul Lakkireddy case CXGB4_EO_STATE_ACTIVE: 24030e395b3cSRahul Lakkireddy case CXGB4_EO_STATE_FLOWC_OPEN_SEND: 24040e395b3cSRahul Lakkireddy case CXGB4_EO_STATE_FLOWC_CLOSE_SEND: 24054846d533SRahul Lakkireddy pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 24064846d533SRahul Lakkireddy if (pktcount < 0) 24074846d533SRahul Lakkireddy pktcount += eosw_txq->ndesc; 24084846d533SRahul Lakkireddy break; 24090e395b3cSRahul Lakkireddy case CXGB4_EO_STATE_FLOWC_OPEN_REPLY: 24100e395b3cSRahul Lakkireddy case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY: 24114846d533SRahul Lakkireddy case CXGB4_EO_STATE_CLOSED: 24124846d533SRahul Lakkireddy default: 24134846d533SRahul Lakkireddy return; 2414272630feSRahul Lakkireddy } 24154846d533SRahul Lakkireddy 24164846d533SRahul Lakkireddy while (pktcount--) { 24174846d533SRahul Lakkireddy skb = eosw_txq_peek(eosw_txq); 24184846d533SRahul Lakkireddy if (!skb) { 24194846d533SRahul Lakkireddy eosw_txq_advance_index(&eosw_txq->last_pidx, 1, 24204846d533SRahul Lakkireddy eosw_txq->ndesc); 24214846d533SRahul Lakkireddy continue; 24224846d533SRahul Lakkireddy } 24234846d533SRahul Lakkireddy 24244f1d9726SRahul Lakkireddy ret = ethofld_hard_xmit(dev, eosw_txq); 24254f1d9726SRahul Lakkireddy if (ret) 24264f1d9726SRahul Lakkireddy break; 24274846d533SRahul Lakkireddy } 24284846d533SRahul Lakkireddy } 24294846d533SRahul Lakkireddy 2430b1396c2bSRahul Lakkireddy static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb, 2431b1396c2bSRahul Lakkireddy struct net_device *dev) 2432b1396c2bSRahul Lakkireddy { 24334846d533SRahul Lakkireddy struct cxgb4_tc_port_mqprio *tc_port_mqprio; 24344846d533SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 24354846d533SRahul Lakkireddy struct adapter *adap = netdev2adap(dev); 24364846d533SRahul Lakkireddy struct sge_eosw_txq *eosw_txq; 24374846d533SRahul Lakkireddy u32 qid; 2438b1396c2bSRahul Lakkireddy int ret; 2439b1396c2bSRahul Lakkireddy 2440b1396c2bSRahul Lakkireddy ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); 2441b1396c2bSRahul Lakkireddy if (ret) 2442b1396c2bSRahul Lakkireddy goto out_free; 2443b1396c2bSRahul Lakkireddy 24444846d533SRahul Lakkireddy tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; 24454846d533SRahul Lakkireddy qid = skb_get_queue_mapping(skb) - pi->nqsets; 24464846d533SRahul Lakkireddy eosw_txq = &tc_port_mqprio->eosw_txq[qid]; 24474846d533SRahul Lakkireddy spin_lock_bh(&eosw_txq->lock); 24484846d533SRahul Lakkireddy if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 24494846d533SRahul Lakkireddy goto out_unlock; 24504846d533SRahul Lakkireddy 24514846d533SRahul Lakkireddy ret = eosw_txq_enqueue(eosw_txq, skb); 24524846d533SRahul Lakkireddy if (ret) 24534846d533SRahul Lakkireddy goto out_unlock; 24544846d533SRahul Lakkireddy 24554846d533SRahul Lakkireddy /* SKB is queued for processing until credits are available. 24564846d533SRahul Lakkireddy * So, call the destructor now and we'll free the skb later 24574846d533SRahul Lakkireddy * after it has been successfully transmitted. 24584846d533SRahul Lakkireddy */ 24594846d533SRahul Lakkireddy skb_orphan(skb); 24604846d533SRahul Lakkireddy 24614846d533SRahul Lakkireddy eosw_txq_advance(eosw_txq, 1); 24624846d533SRahul Lakkireddy ethofld_xmit(dev, eosw_txq); 24634846d533SRahul Lakkireddy spin_unlock_bh(&eosw_txq->lock); 24644846d533SRahul Lakkireddy return NETDEV_TX_OK; 24654846d533SRahul Lakkireddy 24664846d533SRahul Lakkireddy out_unlock: 24674846d533SRahul Lakkireddy spin_unlock_bh(&eosw_txq->lock); 2468b1396c2bSRahul Lakkireddy out_free: 2469b1396c2bSRahul Lakkireddy dev_kfree_skb_any(skb); 2470b1396c2bSRahul Lakkireddy return NETDEV_TX_OK; 2471b1396c2bSRahul Lakkireddy } 2472b1396c2bSRahul Lakkireddy 2473d5fbda61SArjun Vynipadath netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) 2474d5fbda61SArjun Vynipadath { 2475d5fbda61SArjun Vynipadath struct port_info *pi = netdev_priv(dev); 2476b1396c2bSRahul Lakkireddy u16 qid = skb_get_queue_mapping(skb); 2477d5fbda61SArjun Vynipadath 2478d5fbda61SArjun Vynipadath if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) 2479d5fbda61SArjun Vynipadath return cxgb4_vf_eth_xmit(skb, dev); 2480d5fbda61SArjun Vynipadath 2481b1396c2bSRahul Lakkireddy if (unlikely(qid >= pi->nqsets)) 2482b1396c2bSRahul Lakkireddy return cxgb4_ethofld_xmit(skb, dev); 2483b1396c2bSRahul Lakkireddy 2484030c9882SRahul Lakkireddy if (is_ptp_enabled(skb, dev)) { 2485030c9882SRahul Lakkireddy struct adapter *adap = netdev2adap(dev); 2486030c9882SRahul Lakkireddy netdev_tx_t ret; 2487030c9882SRahul Lakkireddy 2488030c9882SRahul Lakkireddy spin_lock(&adap->ptp_lock); 2489030c9882SRahul Lakkireddy ret = cxgb4_eth_xmit(skb, dev); 2490030c9882SRahul Lakkireddy spin_unlock(&adap->ptp_lock); 2491030c9882SRahul Lakkireddy return ret; 2492030c9882SRahul Lakkireddy } 2493030c9882SRahul Lakkireddy 2494d5fbda61SArjun Vynipadath return cxgb4_eth_xmit(skb, dev); 2495d5fbda61SArjun Vynipadath } 2496d5fbda61SArjun Vynipadath 249769422a7eSRahul Lakkireddy static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) 249869422a7eSRahul Lakkireddy { 249969422a7eSRahul Lakkireddy int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 250069422a7eSRahul Lakkireddy int pidx = eosw_txq->pidx; 250169422a7eSRahul Lakkireddy struct sk_buff *skb; 250269422a7eSRahul Lakkireddy 250369422a7eSRahul Lakkireddy if (!pktcount) 250469422a7eSRahul Lakkireddy return; 250569422a7eSRahul Lakkireddy 250669422a7eSRahul Lakkireddy if (pktcount < 0) 250769422a7eSRahul Lakkireddy pktcount += eosw_txq->ndesc; 250869422a7eSRahul Lakkireddy 250969422a7eSRahul Lakkireddy while (pktcount--) { 251069422a7eSRahul Lakkireddy pidx--; 251169422a7eSRahul Lakkireddy if (pidx < 0) 251269422a7eSRahul Lakkireddy pidx += eosw_txq->ndesc; 251369422a7eSRahul Lakkireddy 251469422a7eSRahul Lakkireddy skb = eosw_txq->desc[pidx].skb; 251569422a7eSRahul Lakkireddy if (skb) { 251669422a7eSRahul Lakkireddy dev_consume_skb_any(skb); 251769422a7eSRahul Lakkireddy eosw_txq->desc[pidx].skb = NULL; 251869422a7eSRahul Lakkireddy eosw_txq->inuse--; 251969422a7eSRahul Lakkireddy } 252069422a7eSRahul Lakkireddy } 252169422a7eSRahul Lakkireddy 252269422a7eSRahul Lakkireddy eosw_txq->pidx = eosw_txq->last_pidx + 1; 252369422a7eSRahul Lakkireddy } 252469422a7eSRahul Lakkireddy 2525f7917c00SJeff Kirsher /** 25260e395b3cSRahul Lakkireddy * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. 252729bbf5d7SRahul Lakkireddy * @dev: netdevice 252829bbf5d7SRahul Lakkireddy * @eotid: ETHOFLD tid to bind/unbind 252929bbf5d7SRahul Lakkireddy * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid 25300e395b3cSRahul Lakkireddy * 25310e395b3cSRahul Lakkireddy * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. 25320e395b3cSRahul Lakkireddy * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from 25330e395b3cSRahul Lakkireddy * a traffic class. 25340e395b3cSRahul Lakkireddy */ 25350e395b3cSRahul Lakkireddy int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) 25360e395b3cSRahul Lakkireddy { 25370e395b3cSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 25380e395b3cSRahul Lakkireddy struct adapter *adap = netdev2adap(dev); 25390e395b3cSRahul Lakkireddy enum sge_eosw_state next_state; 25400e395b3cSRahul Lakkireddy struct sge_eosw_txq *eosw_txq; 25410e395b3cSRahul Lakkireddy u32 len, len16, nparams = 6; 25420e395b3cSRahul Lakkireddy struct fw_flowc_wr *flowc; 25430e395b3cSRahul Lakkireddy struct eotid_entry *entry; 25440e395b3cSRahul Lakkireddy struct sge_ofld_rxq *rxq; 25450e395b3cSRahul Lakkireddy struct sk_buff *skb; 25460e395b3cSRahul Lakkireddy int ret = 0; 25470e395b3cSRahul Lakkireddy 2548a422d5ffSGustavo A. R. Silva len = struct_size(flowc, mnemval, nparams); 25490e395b3cSRahul Lakkireddy len16 = DIV_ROUND_UP(len, 16); 25500e395b3cSRahul Lakkireddy 25510e395b3cSRahul Lakkireddy entry = cxgb4_lookup_eotid(&adap->tids, eotid); 25520e395b3cSRahul Lakkireddy if (!entry) 25530e395b3cSRahul Lakkireddy return -ENOMEM; 25540e395b3cSRahul Lakkireddy 25550e395b3cSRahul Lakkireddy eosw_txq = (struct sge_eosw_txq *)entry->data; 25560e395b3cSRahul Lakkireddy if (!eosw_txq) 25570e395b3cSRahul Lakkireddy return -ENOMEM; 25580e395b3cSRahul Lakkireddy 25593822d067SRahul Lakkireddy if (!(adap->flags & CXGB4_FW_OK)) { 25603822d067SRahul Lakkireddy /* Don't stall caller when access to FW is lost */ 25613822d067SRahul Lakkireddy complete(&eosw_txq->completion); 25623822d067SRahul Lakkireddy return -EIO; 25633822d067SRahul Lakkireddy } 25643822d067SRahul Lakkireddy 25650e395b3cSRahul Lakkireddy skb = alloc_skb(len, GFP_KERNEL); 25660e395b3cSRahul Lakkireddy if (!skb) 25670e395b3cSRahul Lakkireddy return -ENOMEM; 25680e395b3cSRahul Lakkireddy 25690e395b3cSRahul Lakkireddy spin_lock_bh(&eosw_txq->lock); 25700e395b3cSRahul Lakkireddy if (tc != FW_SCHED_CLS_NONE) { 25710e395b3cSRahul Lakkireddy if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) 257252bfcdd8SÍñigo Huguet goto out_free_skb; 25730e395b3cSRahul Lakkireddy 25740e395b3cSRahul Lakkireddy next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND; 25750e395b3cSRahul Lakkireddy } else { 25760e395b3cSRahul Lakkireddy if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) 257752bfcdd8SÍñigo Huguet goto out_free_skb; 25780e395b3cSRahul Lakkireddy 25790e395b3cSRahul Lakkireddy next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND; 25800e395b3cSRahul Lakkireddy } 25810e395b3cSRahul Lakkireddy 25820e395b3cSRahul Lakkireddy flowc = __skb_put(skb, len); 25830e395b3cSRahul Lakkireddy memset(flowc, 0, len); 25840e395b3cSRahul Lakkireddy 25850e395b3cSRahul Lakkireddy rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; 25860e395b3cSRahul Lakkireddy flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | 25870e395b3cSRahul Lakkireddy FW_WR_FLOWID_V(eosw_txq->hwtid)); 25880e395b3cSRahul Lakkireddy flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 25890e395b3cSRahul Lakkireddy FW_FLOWC_WR_NPARAMS_V(nparams) | 25900e395b3cSRahul Lakkireddy FW_WR_COMPL_V(1)); 25910e395b3cSRahul Lakkireddy flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 25920e395b3cSRahul Lakkireddy flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); 25930e395b3cSRahul Lakkireddy flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 25940e395b3cSRahul Lakkireddy flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); 25950e395b3cSRahul Lakkireddy flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 25960e395b3cSRahul Lakkireddy flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); 25970e395b3cSRahul Lakkireddy flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 25980e395b3cSRahul Lakkireddy flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); 25990e395b3cSRahul Lakkireddy flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 26000e395b3cSRahul Lakkireddy flowc->mnemval[4].val = cpu_to_be32(tc); 26010e395b3cSRahul Lakkireddy flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; 26020e395b3cSRahul Lakkireddy flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? 26030e395b3cSRahul Lakkireddy FW_FLOWC_MNEM_EOSTATE_CLOSING : 26040e395b3cSRahul Lakkireddy FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 26050e395b3cSRahul Lakkireddy 260669422a7eSRahul Lakkireddy /* Free up any pending skbs to ensure there's room for 260769422a7eSRahul Lakkireddy * termination FLOWC. 260869422a7eSRahul Lakkireddy */ 260969422a7eSRahul Lakkireddy if (tc == FW_SCHED_CLS_NONE) 261069422a7eSRahul Lakkireddy eosw_txq_flush_pending_skbs(eosw_txq); 26110e395b3cSRahul Lakkireddy 26120e395b3cSRahul Lakkireddy ret = eosw_txq_enqueue(eosw_txq, skb); 261352bfcdd8SÍñigo Huguet if (ret) 261452bfcdd8SÍñigo Huguet goto out_free_skb; 26150e395b3cSRahul Lakkireddy 26160e395b3cSRahul Lakkireddy eosw_txq->state = next_state; 26170e395b3cSRahul Lakkireddy eosw_txq->flowc_idx = eosw_txq->pidx; 26180e395b3cSRahul Lakkireddy eosw_txq_advance(eosw_txq, 1); 26190e395b3cSRahul Lakkireddy ethofld_xmit(dev, eosw_txq); 26200e395b3cSRahul Lakkireddy 262152bfcdd8SÍñigo Huguet spin_unlock_bh(&eosw_txq->lock); 262252bfcdd8SÍñigo Huguet return 0; 262352bfcdd8SÍñigo Huguet 262452bfcdd8SÍñigo Huguet out_free_skb: 262552bfcdd8SÍñigo Huguet dev_consume_skb_any(skb); 26260e395b3cSRahul Lakkireddy spin_unlock_bh(&eosw_txq->lock); 26270e395b3cSRahul Lakkireddy return ret; 26280e395b3cSRahul Lakkireddy } 26290e395b3cSRahul Lakkireddy 26300e395b3cSRahul Lakkireddy /** 2631f7917c00SJeff Kirsher * is_imm - check whether a packet can be sent as immediate data 2632f7917c00SJeff Kirsher * @skb: the packet 2633f7917c00SJeff Kirsher * 2634f7917c00SJeff Kirsher * Returns true if a packet can be sent as a WR with immediate data. 2635f7917c00SJeff Kirsher */ 2636f7917c00SJeff Kirsher static inline int is_imm(const struct sk_buff *skb) 2637f7917c00SJeff Kirsher { 2638f7917c00SJeff Kirsher return skb->len <= MAX_CTRL_WR_LEN; 2639f7917c00SJeff Kirsher } 2640f7917c00SJeff Kirsher 2641f7917c00SJeff Kirsher /** 2642f7917c00SJeff Kirsher * ctrlq_check_stop - check if a control queue is full and should stop 2643f7917c00SJeff Kirsher * @q: the queue 2644f7917c00SJeff Kirsher * @wr: most recent WR written to the queue 2645f7917c00SJeff Kirsher * 2646f7917c00SJeff Kirsher * Check if a control queue has become full and should be stopped. 2647f7917c00SJeff Kirsher * We clean up control queue descriptors very lazily, only when we are out. 2648f7917c00SJeff Kirsher * If the queue is still full after reclaiming any completed descriptors 2649f7917c00SJeff Kirsher * we suspend it and have the last WR wake it up. 2650f7917c00SJeff Kirsher */ 2651f7917c00SJeff Kirsher static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 2652f7917c00SJeff Kirsher { 2653f7917c00SJeff Kirsher reclaim_completed_tx_imm(&q->q); 2654f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2655e2ac9628SHariprasad Shenai wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2656f7917c00SJeff Kirsher q->q.stops++; 2657f7917c00SJeff Kirsher q->full = 1; 2658f7917c00SJeff Kirsher } 2659f7917c00SJeff Kirsher } 2660f7917c00SJeff Kirsher 26617235ffaeSVishal Kulkarni #define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST" 26627235ffaeSVishal Kulkarni 26637235ffaeSVishal Kulkarni int cxgb4_selftest_lb_pkt(struct net_device *netdev) 26647235ffaeSVishal Kulkarni { 26657235ffaeSVishal Kulkarni struct port_info *pi = netdev_priv(netdev); 26667235ffaeSVishal Kulkarni struct adapter *adap = pi->adapter; 26677235ffaeSVishal Kulkarni struct cxgb4_ethtool_lb_test *lb; 26687235ffaeSVishal Kulkarni int ret, i = 0, pkt_len, credits; 26697235ffaeSVishal Kulkarni struct fw_eth_tx_pkt_wr *wr; 26707235ffaeSVishal Kulkarni struct cpl_tx_pkt_core *cpl; 26717235ffaeSVishal Kulkarni u32 ctrl0, ndesc, flits; 26727235ffaeSVishal Kulkarni struct sge_eth_txq *q; 26737235ffaeSVishal Kulkarni u8 *sgl; 26747235ffaeSVishal Kulkarni 26757235ffaeSVishal Kulkarni pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); 26767235ffaeSVishal Kulkarni 267733595642SGanji Aravind flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr), 267833595642SGanji Aravind sizeof(__be64)); 26797235ffaeSVishal Kulkarni ndesc = flits_to_desc(flits); 26807235ffaeSVishal Kulkarni 26817235ffaeSVishal Kulkarni lb = &pi->ethtool_lb; 26827235ffaeSVishal Kulkarni lb->loopback = 1; 26837235ffaeSVishal Kulkarni 26847235ffaeSVishal Kulkarni q = &adap->sge.ethtxq[pi->first_qset]; 2685c650e048SGanji Aravind __netif_tx_lock(q->txq, smp_processor_id()); 26867235ffaeSVishal Kulkarni 26877235ffaeSVishal Kulkarni reclaim_completed_tx(adap, &q->q, -1, true); 26887235ffaeSVishal Kulkarni credits = txq_avail(&q->q) - ndesc; 2689c650e048SGanji Aravind if (unlikely(credits < 0)) { 2690c650e048SGanji Aravind __netif_tx_unlock(q->txq); 26917235ffaeSVishal Kulkarni return -ENOMEM; 2692c650e048SGanji Aravind } 26937235ffaeSVishal Kulkarni 26947235ffaeSVishal Kulkarni wr = (void *)&q->q.desc[q->q.pidx]; 26957235ffaeSVishal Kulkarni memset(wr, 0, sizeof(struct tx_desc)); 26967235ffaeSVishal Kulkarni 26977235ffaeSVishal Kulkarni wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 26987235ffaeSVishal Kulkarni FW_WR_IMMDLEN_V(pkt_len + 26997235ffaeSVishal Kulkarni sizeof(*cpl))); 27007235ffaeSVishal Kulkarni wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); 27017235ffaeSVishal Kulkarni wr->r3 = cpu_to_be64(0); 27027235ffaeSVishal Kulkarni 27037235ffaeSVishal Kulkarni cpl = (void *)(wr + 1); 27047235ffaeSVishal Kulkarni sgl = (u8 *)(cpl + 1); 27057235ffaeSVishal Kulkarni 27067235ffaeSVishal Kulkarni ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | 27077235ffaeSVishal Kulkarni TXPKT_INTF_V(pi->tx_chan + 4); 27087235ffaeSVishal Kulkarni 27097235ffaeSVishal Kulkarni cpl->ctrl0 = htonl(ctrl0); 27107235ffaeSVishal Kulkarni cpl->pack = htons(0); 27117235ffaeSVishal Kulkarni cpl->len = htons(pkt_len); 27127235ffaeSVishal Kulkarni cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); 27137235ffaeSVishal Kulkarni 27147235ffaeSVishal Kulkarni eth_broadcast_addr(sgl); 27157235ffaeSVishal Kulkarni i += ETH_ALEN; 27167235ffaeSVishal Kulkarni ether_addr_copy(&sgl[i], netdev->dev_addr); 27177235ffaeSVishal Kulkarni i += ETH_ALEN; 27187235ffaeSVishal Kulkarni 27197235ffaeSVishal Kulkarni snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s", 27207235ffaeSVishal Kulkarni CXGB4_SELFTEST_LB_STR); 27217235ffaeSVishal Kulkarni 27227235ffaeSVishal Kulkarni init_completion(&lb->completion); 27237235ffaeSVishal Kulkarni txq_advance(&q->q, ndesc); 27247235ffaeSVishal Kulkarni cxgb4_ring_tx_db(adap, &q->q, ndesc); 2725c650e048SGanji Aravind __netif_tx_unlock(q->txq); 27267235ffaeSVishal Kulkarni 27277235ffaeSVishal Kulkarni /* wait for the pkt to return */ 27287235ffaeSVishal Kulkarni ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); 27297235ffaeSVishal Kulkarni if (!ret) 27307235ffaeSVishal Kulkarni ret = -ETIMEDOUT; 27317235ffaeSVishal Kulkarni else 27327235ffaeSVishal Kulkarni ret = lb->result; 27337235ffaeSVishal Kulkarni 27347235ffaeSVishal Kulkarni lb->loopback = 0; 27357235ffaeSVishal Kulkarni 27367235ffaeSVishal Kulkarni return ret; 27377235ffaeSVishal Kulkarni } 27387235ffaeSVishal Kulkarni 2739f7917c00SJeff Kirsher /** 2740f7917c00SJeff Kirsher * ctrl_xmit - send a packet through an SGE control Tx queue 2741f7917c00SJeff Kirsher * @q: the control queue 2742f7917c00SJeff Kirsher * @skb: the packet 2743f7917c00SJeff Kirsher * 2744f7917c00SJeff Kirsher * Send a packet through an SGE control Tx queue. Packets sent through 2745f7917c00SJeff Kirsher * a control queue must fit entirely as immediate data. 2746f7917c00SJeff Kirsher */ 2747f7917c00SJeff Kirsher static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 2748f7917c00SJeff Kirsher { 2749f7917c00SJeff Kirsher unsigned int ndesc; 2750f7917c00SJeff Kirsher struct fw_wr_hdr *wr; 2751f7917c00SJeff Kirsher 2752f7917c00SJeff Kirsher if (unlikely(!is_imm(skb))) { 2753f7917c00SJeff Kirsher WARN_ON(1); 2754f7917c00SJeff Kirsher dev_kfree_skb(skb); 2755f7917c00SJeff Kirsher return NET_XMIT_DROP; 2756f7917c00SJeff Kirsher } 2757f7917c00SJeff Kirsher 2758f7917c00SJeff Kirsher ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 2759f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 2760f7917c00SJeff Kirsher 2761f7917c00SJeff Kirsher if (unlikely(q->full)) { 2762f7917c00SJeff Kirsher skb->priority = ndesc; /* save for restart */ 2763f7917c00SJeff Kirsher __skb_queue_tail(&q->sendq, skb); 2764f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 2765f7917c00SJeff Kirsher return NET_XMIT_CN; 2766f7917c00SJeff Kirsher } 2767f7917c00SJeff Kirsher 2768f7917c00SJeff Kirsher wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2769a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, wr); 2770f7917c00SJeff Kirsher 2771f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 2772f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 2773f7917c00SJeff Kirsher ctrlq_check_stop(q, wr); 2774f7917c00SJeff Kirsher 2775a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 2776f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 2777f7917c00SJeff Kirsher 2778f7917c00SJeff Kirsher kfree_skb(skb); 2779f7917c00SJeff Kirsher return NET_XMIT_SUCCESS; 2780f7917c00SJeff Kirsher } 2781f7917c00SJeff Kirsher 2782f7917c00SJeff Kirsher /** 2783f7917c00SJeff Kirsher * restart_ctrlq - restart a suspended control queue 27846660de07SAllen Pais * @t: pointer to the tasklet associated with this handler 2785f7917c00SJeff Kirsher * 2786f7917c00SJeff Kirsher * Resumes transmission on a suspended Tx control queue. 2787f7917c00SJeff Kirsher */ 27886660de07SAllen Pais static void restart_ctrlq(struct tasklet_struct *t) 2789f7917c00SJeff Kirsher { 2790f7917c00SJeff Kirsher struct sk_buff *skb; 2791f7917c00SJeff Kirsher unsigned int written = 0; 27926660de07SAllen Pais struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk); 2793f7917c00SJeff Kirsher 2794f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 2795f7917c00SJeff Kirsher reclaim_completed_tx_imm(&q->q); 2796f7917c00SJeff Kirsher BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 2797f7917c00SJeff Kirsher 2798f7917c00SJeff Kirsher while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 2799f7917c00SJeff Kirsher struct fw_wr_hdr *wr; 2800f7917c00SJeff Kirsher unsigned int ndesc = skb->priority; /* previously saved */ 2801f7917c00SJeff Kirsher 2802a4011fd4SHariprasad Shenai written += ndesc; 2803a4011fd4SHariprasad Shenai /* Write descriptors and free skbs outside the lock to limit 2804f7917c00SJeff Kirsher * wait times. q->full is still set so new skbs will be queued. 2805f7917c00SJeff Kirsher */ 2806a4011fd4SHariprasad Shenai wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 2807a4011fd4SHariprasad Shenai txq_advance(&q->q, ndesc); 2808f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 2809f7917c00SJeff Kirsher 2810a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, wr); 2811f7917c00SJeff Kirsher kfree_skb(skb); 2812f7917c00SJeff Kirsher 2813f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 2814f7917c00SJeff Kirsher unsigned long old = q->q.stops; 2815f7917c00SJeff Kirsher 2816f7917c00SJeff Kirsher ctrlq_check_stop(q, wr); 2817f7917c00SJeff Kirsher if (q->q.stops != old) { /* suspended anew */ 2818f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 2819f7917c00SJeff Kirsher goto ringdb; 2820f7917c00SJeff Kirsher } 2821f7917c00SJeff Kirsher } 2822f7917c00SJeff Kirsher if (written > 16) { 2823a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 2824f7917c00SJeff Kirsher written = 0; 2825f7917c00SJeff Kirsher } 2826f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 2827f7917c00SJeff Kirsher } 2828f7917c00SJeff Kirsher q->full = 0; 2829a6ec572bSAtul Gupta ringdb: 2830a6ec572bSAtul Gupta if (written) 2831a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 2832f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 2833f7917c00SJeff Kirsher } 2834f7917c00SJeff Kirsher 2835f7917c00SJeff Kirsher /** 2836f7917c00SJeff Kirsher * t4_mgmt_tx - send a management message 2837f7917c00SJeff Kirsher * @adap: the adapter 2838f7917c00SJeff Kirsher * @skb: the packet containing the management message 2839f7917c00SJeff Kirsher * 2840f7917c00SJeff Kirsher * Send a management message through control queue 0. 2841f7917c00SJeff Kirsher */ 2842f7917c00SJeff Kirsher int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 2843f7917c00SJeff Kirsher { 2844f7917c00SJeff Kirsher int ret; 2845f7917c00SJeff Kirsher 2846f7917c00SJeff Kirsher local_bh_disable(); 2847f7917c00SJeff Kirsher ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 2848f7917c00SJeff Kirsher local_bh_enable(); 2849f7917c00SJeff Kirsher return ret; 2850f7917c00SJeff Kirsher } 2851f7917c00SJeff Kirsher 2852f7917c00SJeff Kirsher /** 2853f7917c00SJeff Kirsher * is_ofld_imm - check whether a packet can be sent as immediate data 2854f7917c00SJeff Kirsher * @skb: the packet 2855f7917c00SJeff Kirsher * 2856f7917c00SJeff Kirsher * Returns true if a packet can be sent as an offload WR with immediate 28572355a677SAyush Sawal * data. 28582355a677SAyush Sawal * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field. 28592355a677SAyush Sawal * However, FW_ULPTX_WR commands have a 256 byte immediate only 28602355a677SAyush Sawal * payload limit. 2861f7917c00SJeff Kirsher */ 2862f7917c00SJeff Kirsher static inline int is_ofld_imm(const struct sk_buff *skb) 2863f7917c00SJeff Kirsher { 28642f47d580SHarsh Jain struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 28652f47d580SHarsh Jain unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 28662f47d580SHarsh Jain 28672355a677SAyush Sawal if (unlikely(opcode == FW_ULPTX_WR)) 28682355a677SAyush Sawal return skb->len <= MAX_IMM_ULPTX_WR_LEN; 28692355a677SAyush Sawal else if (opcode == FW_CRYPTO_LOOKASIDE_WR) 28702f47d580SHarsh Jain return skb->len <= SGE_MAX_WR_LEN; 28712f47d580SHarsh Jain else 28722355a677SAyush Sawal return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; 2873f7917c00SJeff Kirsher } 2874f7917c00SJeff Kirsher 2875f7917c00SJeff Kirsher /** 2876f7917c00SJeff Kirsher * calc_tx_flits_ofld - calculate # of flits for an offload packet 2877f7917c00SJeff Kirsher * @skb: the packet 2878f7917c00SJeff Kirsher * 2879f7917c00SJeff Kirsher * Returns the number of flits needed for the given offload packet. 2880f7917c00SJeff Kirsher * These packets are already fully constructed and no additional headers 2881f7917c00SJeff Kirsher * will be added. 2882f7917c00SJeff Kirsher */ 2883f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 2884f7917c00SJeff Kirsher { 2885f7917c00SJeff Kirsher unsigned int flits, cnt; 2886f7917c00SJeff Kirsher 2887f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 2888f7917c00SJeff Kirsher return DIV_ROUND_UP(skb->len, 8); 2889f7917c00SJeff Kirsher 2890f7917c00SJeff Kirsher flits = skb_transport_offset(skb) / 8U; /* headers */ 2891f7917c00SJeff Kirsher cnt = skb_shinfo(skb)->nr_frags; 289215dd16c2SLi RongQing if (skb_tail_pointer(skb) != skb_transport_header(skb)) 2893f7917c00SJeff Kirsher cnt++; 2894f7917c00SJeff Kirsher return flits + sgl_len(cnt); 2895f7917c00SJeff Kirsher } 2896f7917c00SJeff Kirsher 2897f7917c00SJeff Kirsher /** 2898f7917c00SJeff Kirsher * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 2899f7917c00SJeff Kirsher * @q: the queue to stop 2900f7917c00SJeff Kirsher * 2901f7917c00SJeff Kirsher * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 2902f7917c00SJeff Kirsher * inability to map packets. A periodic timer attempts to restart 2903f7917c00SJeff Kirsher * queues so marked. 2904f7917c00SJeff Kirsher */ 2905ab677ff4SHariprasad Shenai static void txq_stop_maperr(struct sge_uld_txq *q) 2906f7917c00SJeff Kirsher { 2907f7917c00SJeff Kirsher q->mapping_err++; 2908f7917c00SJeff Kirsher q->q.stops++; 2909f7917c00SJeff Kirsher set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 2910f7917c00SJeff Kirsher q->adap->sge.txq_maperr); 2911f7917c00SJeff Kirsher } 2912f7917c00SJeff Kirsher 2913f7917c00SJeff Kirsher /** 2914f7917c00SJeff Kirsher * ofldtxq_stop - stop an offload Tx queue that has become full 2915f7917c00SJeff Kirsher * @q: the queue to stop 2916e383f248SAtul Gupta * @wr: the Work Request causing the queue to become full 2917f7917c00SJeff Kirsher * 2918f7917c00SJeff Kirsher * Stops an offload Tx queue that has become full and modifies the packet 2919f7917c00SJeff Kirsher * being written to request a wakeup. 2920f7917c00SJeff Kirsher */ 2921e383f248SAtul Gupta static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) 2922f7917c00SJeff Kirsher { 2923e2ac9628SHariprasad Shenai wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 2924f7917c00SJeff Kirsher q->q.stops++; 2925f7917c00SJeff Kirsher q->full = 1; 2926f7917c00SJeff Kirsher } 2927f7917c00SJeff Kirsher 2928f7917c00SJeff Kirsher /** 2929126fca64SHariprasad Shenai * service_ofldq - service/restart a suspended offload queue 2930f7917c00SJeff Kirsher * @q: the offload queue 2931f7917c00SJeff Kirsher * 2932126fca64SHariprasad Shenai * Services an offload Tx queue by moving packets from its Pending Send 2933126fca64SHariprasad Shenai * Queue to the Hardware TX ring. The function starts and ends with the 2934126fca64SHariprasad Shenai * Send Queue locked, but drops the lock while putting the skb at the 2935126fca64SHariprasad Shenai * head of the Send Queue onto the Hardware TX Ring. Dropping the lock 2936126fca64SHariprasad Shenai * allows more skbs to be added to the Send Queue by other threads. 2937126fca64SHariprasad Shenai * The packet being processed at the head of the Pending Send Queue is 2938126fca64SHariprasad Shenai * left on the queue in case we experience DMA Mapping errors, etc. 2939126fca64SHariprasad Shenai * and need to give up and restart later. 2940126fca64SHariprasad Shenai * 2941126fca64SHariprasad Shenai * service_ofldq() can be thought of as a task which opportunistically 2942126fca64SHariprasad Shenai * uses other threads execution contexts. We use the Offload Queue 2943126fca64SHariprasad Shenai * boolean "service_ofldq_running" to make sure that only one instance 2944126fca64SHariprasad Shenai * is ever running at a time ... 2945f7917c00SJeff Kirsher */ 2946ab677ff4SHariprasad Shenai static void service_ofldq(struct sge_uld_txq *q) 2947cae9566aSJules Irenge __must_hold(&q->sendq.lock) 2948f7917c00SJeff Kirsher { 29498d0557d2SHariprasad Shenai u64 *pos, *before, *end; 2950f7917c00SJeff Kirsher int credits; 2951f7917c00SJeff Kirsher struct sk_buff *skb; 29528d0557d2SHariprasad Shenai struct sge_txq *txq; 29538d0557d2SHariprasad Shenai unsigned int left; 2954f7917c00SJeff Kirsher unsigned int written = 0; 2955f7917c00SJeff Kirsher unsigned int flits, ndesc; 2956f7917c00SJeff Kirsher 2957126fca64SHariprasad Shenai /* If another thread is currently in service_ofldq() processing the 2958126fca64SHariprasad Shenai * Pending Send Queue then there's nothing to do. Otherwise, flag 2959126fca64SHariprasad Shenai * that we're doing the work and continue. Examining/modifying 2960126fca64SHariprasad Shenai * the Offload Queue boolean "service_ofldq_running" must be done 2961126fca64SHariprasad Shenai * while holding the Pending Send Queue Lock. 2962126fca64SHariprasad Shenai */ 2963126fca64SHariprasad Shenai if (q->service_ofldq_running) 2964126fca64SHariprasad Shenai return; 2965126fca64SHariprasad Shenai q->service_ofldq_running = true; 2966126fca64SHariprasad Shenai 2967f7917c00SJeff Kirsher while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 2968126fca64SHariprasad Shenai /* We drop the lock while we're working with the skb at the 2969126fca64SHariprasad Shenai * head of the Pending Send Queue. This allows more skbs to 2970126fca64SHariprasad Shenai * be added to the Pending Send Queue while we're working on 2971126fca64SHariprasad Shenai * this one. We don't need to lock to guard the TX Ring 2972126fca64SHariprasad Shenai * updates because only one thread of execution is ever 2973126fca64SHariprasad Shenai * allowed into service_ofldq() at a time. 2974f7917c00SJeff Kirsher */ 2975f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 2976f7917c00SJeff Kirsher 2977a6ec572bSAtul Gupta cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 2978f7917c00SJeff Kirsher 2979f7917c00SJeff Kirsher flits = skb->priority; /* previously saved */ 2980f7917c00SJeff Kirsher ndesc = flits_to_desc(flits); 2981f7917c00SJeff Kirsher credits = txq_avail(&q->q) - ndesc; 2982f7917c00SJeff Kirsher BUG_ON(credits < 0); 2983f7917c00SJeff Kirsher if (unlikely(credits < TXQ_STOP_THRES)) 2984e383f248SAtul Gupta ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); 2985f7917c00SJeff Kirsher 2986f7917c00SJeff Kirsher pos = (u64 *)&q->q.desc[q->q.pidx]; 2987f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 2988a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, pos); 2989a6ec572bSAtul Gupta else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 2990f7917c00SJeff Kirsher (dma_addr_t *)skb->head)) { 2991f7917c00SJeff Kirsher txq_stop_maperr(q); 2992f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 2993f7917c00SJeff Kirsher break; 2994f7917c00SJeff Kirsher } else { 2995f7917c00SJeff Kirsher int last_desc, hdr_len = skb_transport_offset(skb); 2996f7917c00SJeff Kirsher 29978d0557d2SHariprasad Shenai /* The WR headers may not fit within one descriptor. 29988d0557d2SHariprasad Shenai * So we need to deal with wrap-around here. 29998d0557d2SHariprasad Shenai */ 30008d0557d2SHariprasad Shenai before = (u64 *)pos; 30018d0557d2SHariprasad Shenai end = (u64 *)pos + flits; 30028d0557d2SHariprasad Shenai txq = &q->q; 30038d0557d2SHariprasad Shenai pos = (void *)inline_tx_skb_header(skb, &q->q, 30048d0557d2SHariprasad Shenai (void *)pos, 30058d0557d2SHariprasad Shenai hdr_len); 30068d0557d2SHariprasad Shenai if (before > (u64 *)pos) { 30078d0557d2SHariprasad Shenai left = (u8 *)end - (u8 *)txq->stat; 30088d0557d2SHariprasad Shenai end = (void *)txq->desc + left; 30098d0557d2SHariprasad Shenai } 30108d0557d2SHariprasad Shenai 30118d0557d2SHariprasad Shenai /* If current position is already at the end of the 30128d0557d2SHariprasad Shenai * ofld queue, reset the current to point to 30138d0557d2SHariprasad Shenai * start of the queue and update the end ptr as well. 30148d0557d2SHariprasad Shenai */ 30158d0557d2SHariprasad Shenai if (pos == (u64 *)txq->stat) { 30168d0557d2SHariprasad Shenai left = (u8 *)end - (u8 *)txq->stat; 30178d0557d2SHariprasad Shenai end = (void *)txq->desc + left; 30188d0557d2SHariprasad Shenai pos = (void *)txq->desc; 30198d0557d2SHariprasad Shenai } 30208d0557d2SHariprasad Shenai 3021a6ec572bSAtul Gupta cxgb4_write_sgl(skb, &q->q, (void *)pos, 30228d0557d2SHariprasad Shenai end, hdr_len, 3023f7917c00SJeff Kirsher (dma_addr_t *)skb->head); 3024f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE 3025f7917c00SJeff Kirsher skb->dev = q->adap->port[0]; 3026f7917c00SJeff Kirsher skb->destructor = deferred_unmap_destructor; 3027f7917c00SJeff Kirsher #endif 3028f7917c00SJeff Kirsher last_desc = q->q.pidx + ndesc - 1; 3029f7917c00SJeff Kirsher if (last_desc >= q->q.size) 3030f7917c00SJeff Kirsher last_desc -= q->q.size; 3031f7917c00SJeff Kirsher q->q.sdesc[last_desc].skb = skb; 3032f7917c00SJeff Kirsher } 3033f7917c00SJeff Kirsher 3034f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 3035f7917c00SJeff Kirsher written += ndesc; 3036f7917c00SJeff Kirsher if (unlikely(written > 32)) { 3037a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 3038f7917c00SJeff Kirsher written = 0; 3039f7917c00SJeff Kirsher } 3040f7917c00SJeff Kirsher 3041126fca64SHariprasad Shenai /* Reacquire the Pending Send Queue Lock so we can unlink the 3042126fca64SHariprasad Shenai * skb we've just successfully transferred to the TX Ring and 3043126fca64SHariprasad Shenai * loop for the next skb which may be at the head of the 3044126fca64SHariprasad Shenai * Pending Send Queue. 3045126fca64SHariprasad Shenai */ 3046f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 3047f7917c00SJeff Kirsher __skb_unlink(skb, &q->sendq); 3048f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 3049f7917c00SJeff Kirsher kfree_skb(skb); 3050f7917c00SJeff Kirsher } 3051f7917c00SJeff Kirsher if (likely(written)) 3052a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 3053126fca64SHariprasad Shenai 3054126fca64SHariprasad Shenai /*Indicate that no thread is processing the Pending Send Queue 3055126fca64SHariprasad Shenai * currently. 3056126fca64SHariprasad Shenai */ 3057126fca64SHariprasad Shenai q->service_ofldq_running = false; 3058f7917c00SJeff Kirsher } 3059f7917c00SJeff Kirsher 3060f7917c00SJeff Kirsher /** 3061f7917c00SJeff Kirsher * ofld_xmit - send a packet through an offload queue 3062f7917c00SJeff Kirsher * @q: the Tx offload queue 3063f7917c00SJeff Kirsher * @skb: the packet 3064f7917c00SJeff Kirsher * 3065f7917c00SJeff Kirsher * Send an offload packet through an SGE offload queue. 3066f7917c00SJeff Kirsher */ 3067ab677ff4SHariprasad Shenai static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) 3068f7917c00SJeff Kirsher { 3069f7917c00SJeff Kirsher skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 3070f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 3071126fca64SHariprasad Shenai 3072126fca64SHariprasad Shenai /* Queue the new skb onto the Offload Queue's Pending Send Queue. If 3073126fca64SHariprasad Shenai * that results in this new skb being the only one on the queue, start 3074126fca64SHariprasad Shenai * servicing it. If there are other skbs already on the list, then 3075126fca64SHariprasad Shenai * either the queue is currently being processed or it's been stopped 3076126fca64SHariprasad Shenai * for some reason and it'll be restarted at a later time. Restart 3077126fca64SHariprasad Shenai * paths are triggered by events like experiencing a DMA Mapping Error 3078126fca64SHariprasad Shenai * or filling the Hardware TX Ring. 3079126fca64SHariprasad Shenai */ 3080f7917c00SJeff Kirsher __skb_queue_tail(&q->sendq, skb); 3081f7917c00SJeff Kirsher if (q->sendq.qlen == 1) 3082f7917c00SJeff Kirsher service_ofldq(q); 3083126fca64SHariprasad Shenai 3084f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 3085f7917c00SJeff Kirsher return NET_XMIT_SUCCESS; 3086f7917c00SJeff Kirsher } 3087f7917c00SJeff Kirsher 3088f7917c00SJeff Kirsher /** 3089f7917c00SJeff Kirsher * restart_ofldq - restart a suspended offload queue 30906660de07SAllen Pais * @t: pointer to the tasklet associated with this handler 3091f7917c00SJeff Kirsher * 3092f7917c00SJeff Kirsher * Resumes transmission on a suspended Tx offload queue. 3093f7917c00SJeff Kirsher */ 30946660de07SAllen Pais static void restart_ofldq(struct tasklet_struct *t) 3095f7917c00SJeff Kirsher { 30966660de07SAllen Pais struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk); 3097f7917c00SJeff Kirsher 3098f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 3099f7917c00SJeff Kirsher q->full = 0; /* the queue actually is completely empty now */ 3100f7917c00SJeff Kirsher service_ofldq(q); 3101f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 3102f7917c00SJeff Kirsher } 3103f7917c00SJeff Kirsher 3104f7917c00SJeff Kirsher /** 3105f7917c00SJeff Kirsher * skb_txq - return the Tx queue an offload packet should use 3106f7917c00SJeff Kirsher * @skb: the packet 3107f7917c00SJeff Kirsher * 3108f7917c00SJeff Kirsher * Returns the Tx queue an offload packet should use as indicated by bits 3109f7917c00SJeff Kirsher * 1-15 in the packet's queue_mapping. 3110f7917c00SJeff Kirsher */ 3111f7917c00SJeff Kirsher static inline unsigned int skb_txq(const struct sk_buff *skb) 3112f7917c00SJeff Kirsher { 3113f7917c00SJeff Kirsher return skb->queue_mapping >> 1; 3114f7917c00SJeff Kirsher } 3115f7917c00SJeff Kirsher 3116f7917c00SJeff Kirsher /** 3117f7917c00SJeff Kirsher * is_ctrl_pkt - return whether an offload packet is a control packet 3118f7917c00SJeff Kirsher * @skb: the packet 3119f7917c00SJeff Kirsher * 3120f7917c00SJeff Kirsher * Returns whether an offload packet should use an OFLD or a CTRL 3121f7917c00SJeff Kirsher * Tx queue as indicated by bit 0 in the packet's queue_mapping. 3122f7917c00SJeff Kirsher */ 3123f7917c00SJeff Kirsher static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 3124f7917c00SJeff Kirsher { 3125f7917c00SJeff Kirsher return skb->queue_mapping & 1; 3126f7917c00SJeff Kirsher } 3127f7917c00SJeff Kirsher 3128ab677ff4SHariprasad Shenai static inline int uld_send(struct adapter *adap, struct sk_buff *skb, 3129ab677ff4SHariprasad Shenai unsigned int tx_uld_type) 3130f7917c00SJeff Kirsher { 3131ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 3132ab677ff4SHariprasad Shenai struct sge_uld_txq *txq; 3133f7917c00SJeff Kirsher unsigned int idx = skb_txq(skb); 3134f7917c00SJeff Kirsher 31354fe44dd7SKumar Sanghvi if (unlikely(is_ctrl_pkt(skb))) { 31364fe44dd7SKumar Sanghvi /* Single ctrl queue is a requirement for LE workaround path */ 31374fe44dd7SKumar Sanghvi if (adap->tids.nsftids) 31384fe44dd7SKumar Sanghvi idx = 0; 3139f7917c00SJeff Kirsher return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 31404fe44dd7SKumar Sanghvi } 31410d4b729dSArjun V 31420d4b729dSArjun V txq_info = adap->sge.uld_txq_info[tx_uld_type]; 31430d4b729dSArjun V if (unlikely(!txq_info)) { 31440d4b729dSArjun V WARN_ON(true); 3145e6827d1aSNavid Emamdoost kfree_skb(skb); 31460d4b729dSArjun V return NET_XMIT_DROP; 31470d4b729dSArjun V } 31480d4b729dSArjun V 31490d4b729dSArjun V txq = &txq_info->uldtxq[idx]; 3150ab677ff4SHariprasad Shenai return ofld_xmit(txq, skb); 3151f7917c00SJeff Kirsher } 3152f7917c00SJeff Kirsher 3153f7917c00SJeff Kirsher /** 3154f7917c00SJeff Kirsher * t4_ofld_send - send an offload packet 3155f7917c00SJeff Kirsher * @adap: the adapter 3156f7917c00SJeff Kirsher * @skb: the packet 3157f7917c00SJeff Kirsher * 3158f7917c00SJeff Kirsher * Sends an offload packet. We use the packet queue_mapping to select the 3159f7917c00SJeff Kirsher * appropriate Tx queue as follows: bit 0 indicates whether the packet 3160f7917c00SJeff Kirsher * should be sent as regular or control, bits 1-15 select the queue. 3161f7917c00SJeff Kirsher */ 3162f7917c00SJeff Kirsher int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 3163f7917c00SJeff Kirsher { 3164f7917c00SJeff Kirsher int ret; 3165f7917c00SJeff Kirsher 3166f7917c00SJeff Kirsher local_bh_disable(); 3167ab677ff4SHariprasad Shenai ret = uld_send(adap, skb, CXGB4_TX_OFLD); 3168f7917c00SJeff Kirsher local_bh_enable(); 3169f7917c00SJeff Kirsher return ret; 3170f7917c00SJeff Kirsher } 3171f7917c00SJeff Kirsher 3172f7917c00SJeff Kirsher /** 3173f7917c00SJeff Kirsher * cxgb4_ofld_send - send an offload packet 3174f7917c00SJeff Kirsher * @dev: the net device 3175f7917c00SJeff Kirsher * @skb: the packet 3176f7917c00SJeff Kirsher * 3177f7917c00SJeff Kirsher * Sends an offload packet. This is an exported version of @t4_ofld_send, 3178f7917c00SJeff Kirsher * intended for ULDs. 3179f7917c00SJeff Kirsher */ 3180f7917c00SJeff Kirsher int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 3181f7917c00SJeff Kirsher { 3182f7917c00SJeff Kirsher return t4_ofld_send(netdev2adap(dev), skb); 3183f7917c00SJeff Kirsher } 3184f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_ofld_send); 3185f7917c00SJeff Kirsher 3186e383f248SAtul Gupta static void *inline_tx_header(const void *src, 3187e383f248SAtul Gupta const struct sge_txq *q, 3188e383f248SAtul Gupta void *pos, int length) 3189e383f248SAtul Gupta { 3190e383f248SAtul Gupta int left = (void *)q->stat - pos; 3191e383f248SAtul Gupta u64 *p; 3192e383f248SAtul Gupta 3193e383f248SAtul Gupta if (likely(length <= left)) { 3194e383f248SAtul Gupta memcpy(pos, src, length); 3195e383f248SAtul Gupta pos += length; 3196e383f248SAtul Gupta } else { 3197e383f248SAtul Gupta memcpy(pos, src, left); 3198e383f248SAtul Gupta memcpy(q->desc, src + left, length - left); 3199e383f248SAtul Gupta pos = (void *)q->desc + (length - left); 3200e383f248SAtul Gupta } 3201e383f248SAtul Gupta /* 0-pad to multiple of 16 */ 3202e383f248SAtul Gupta p = PTR_ALIGN(pos, 8); 3203e383f248SAtul Gupta if ((uintptr_t)p & 8) { 3204e383f248SAtul Gupta *p = 0; 3205e383f248SAtul Gupta return p + 1; 3206e383f248SAtul Gupta } 3207e383f248SAtul Gupta return p; 3208e383f248SAtul Gupta } 3209e383f248SAtul Gupta 3210e383f248SAtul Gupta /** 3211e383f248SAtul Gupta * ofld_xmit_direct - copy a WR into offload queue 3212e383f248SAtul Gupta * @q: the Tx offload queue 3213e383f248SAtul Gupta * @src: location of WR 3214e383f248SAtul Gupta * @len: WR length 3215e383f248SAtul Gupta * 3216e383f248SAtul Gupta * Copy an immediate WR into an uncontended SGE offload queue. 3217e383f248SAtul Gupta */ 3218e383f248SAtul Gupta static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, 3219e383f248SAtul Gupta unsigned int len) 3220e383f248SAtul Gupta { 3221e383f248SAtul Gupta unsigned int ndesc; 3222e383f248SAtul Gupta int credits; 3223e383f248SAtul Gupta u64 *pos; 3224e383f248SAtul Gupta 3225e383f248SAtul Gupta /* Use the lower limit as the cut-off */ 3226e383f248SAtul Gupta if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { 3227e383f248SAtul Gupta WARN_ON(1); 3228e383f248SAtul Gupta return NET_XMIT_DROP; 3229e383f248SAtul Gupta } 3230e383f248SAtul Gupta 3231e383f248SAtul Gupta /* Don't return NET_XMIT_CN here as the current 3232e383f248SAtul Gupta * implementation doesn't queue the request 3233e383f248SAtul Gupta * using an skb when the following conditions not met 3234e383f248SAtul Gupta */ 3235e383f248SAtul Gupta if (!spin_trylock(&q->sendq.lock)) 3236e383f248SAtul Gupta return NET_XMIT_DROP; 3237e383f248SAtul Gupta 3238e383f248SAtul Gupta if (q->full || !skb_queue_empty(&q->sendq) || 3239e383f248SAtul Gupta q->service_ofldq_running) { 3240e383f248SAtul Gupta spin_unlock(&q->sendq.lock); 3241e383f248SAtul Gupta return NET_XMIT_DROP; 3242e383f248SAtul Gupta } 3243e383f248SAtul Gupta ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); 3244e383f248SAtul Gupta credits = txq_avail(&q->q) - ndesc; 3245e383f248SAtul Gupta pos = (u64 *)&q->q.desc[q->q.pidx]; 3246e383f248SAtul Gupta 3247e383f248SAtul Gupta /* ofldtxq_stop modifies WR header in-situ */ 3248e383f248SAtul Gupta inline_tx_header(src, &q->q, pos, len); 3249e383f248SAtul Gupta if (unlikely(credits < TXQ_STOP_THRES)) 3250e383f248SAtul Gupta ofldtxq_stop(q, (struct fw_wr_hdr *)pos); 3251e383f248SAtul Gupta txq_advance(&q->q, ndesc); 3252e383f248SAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 3253e383f248SAtul Gupta 3254e383f248SAtul Gupta spin_unlock(&q->sendq.lock); 3255e383f248SAtul Gupta return NET_XMIT_SUCCESS; 3256e383f248SAtul Gupta } 3257e383f248SAtul Gupta 3258e383f248SAtul Gupta int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, 3259e383f248SAtul Gupta const void *src, unsigned int len) 3260e383f248SAtul Gupta { 3261e383f248SAtul Gupta struct sge_uld_txq_info *txq_info; 3262e383f248SAtul Gupta struct sge_uld_txq *txq; 3263e383f248SAtul Gupta struct adapter *adap; 3264e383f248SAtul Gupta int ret; 3265e383f248SAtul Gupta 3266e383f248SAtul Gupta adap = netdev2adap(dev); 3267e383f248SAtul Gupta 3268e383f248SAtul Gupta local_bh_disable(); 3269e383f248SAtul Gupta txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 3270e383f248SAtul Gupta if (unlikely(!txq_info)) { 3271e383f248SAtul Gupta WARN_ON(true); 3272e383f248SAtul Gupta local_bh_enable(); 3273e383f248SAtul Gupta return NET_XMIT_DROP; 3274e383f248SAtul Gupta } 3275e383f248SAtul Gupta txq = &txq_info->uldtxq[idx]; 3276e383f248SAtul Gupta 3277e383f248SAtul Gupta ret = ofld_xmit_direct(txq, src, len); 3278e383f248SAtul Gupta local_bh_enable(); 3279e383f248SAtul Gupta return net_xmit_eval(ret); 3280e383f248SAtul Gupta } 3281e383f248SAtul Gupta EXPORT_SYMBOL(cxgb4_immdata_send); 3282e383f248SAtul Gupta 3283ab677ff4SHariprasad Shenai /** 3284ab677ff4SHariprasad Shenai * t4_crypto_send - send crypto packet 3285ab677ff4SHariprasad Shenai * @adap: the adapter 3286ab677ff4SHariprasad Shenai * @skb: the packet 3287ab677ff4SHariprasad Shenai * 3288ab677ff4SHariprasad Shenai * Sends crypto packet. We use the packet queue_mapping to select the 3289ab677ff4SHariprasad Shenai * appropriate Tx queue as follows: bit 0 indicates whether the packet 3290ab677ff4SHariprasad Shenai * should be sent as regular or control, bits 1-15 select the queue. 3291ab677ff4SHariprasad Shenai */ 3292ab677ff4SHariprasad Shenai static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) 3293ab677ff4SHariprasad Shenai { 3294ab677ff4SHariprasad Shenai int ret; 3295ab677ff4SHariprasad Shenai 3296ab677ff4SHariprasad Shenai local_bh_disable(); 3297ab677ff4SHariprasad Shenai ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); 3298ab677ff4SHariprasad Shenai local_bh_enable(); 3299ab677ff4SHariprasad Shenai return ret; 3300ab677ff4SHariprasad Shenai } 3301ab677ff4SHariprasad Shenai 3302ab677ff4SHariprasad Shenai /** 3303ab677ff4SHariprasad Shenai * cxgb4_crypto_send - send crypto packet 3304ab677ff4SHariprasad Shenai * @dev: the net device 3305ab677ff4SHariprasad Shenai * @skb: the packet 3306ab677ff4SHariprasad Shenai * 3307ab677ff4SHariprasad Shenai * Sends crypto packet. This is an exported version of @t4_crypto_send, 3308ab677ff4SHariprasad Shenai * intended for ULDs. 3309ab677ff4SHariprasad Shenai */ 3310ab677ff4SHariprasad Shenai int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) 3311ab677ff4SHariprasad Shenai { 3312ab677ff4SHariprasad Shenai return t4_crypto_send(netdev2adap(dev), skb); 3313ab677ff4SHariprasad Shenai } 3314ab677ff4SHariprasad Shenai EXPORT_SYMBOL(cxgb4_crypto_send); 3315ab677ff4SHariprasad Shenai 3316e91b0f24SIan Campbell static inline void copy_frags(struct sk_buff *skb, 3317f7917c00SJeff Kirsher const struct pkt_gl *gl, unsigned int offset) 3318f7917c00SJeff Kirsher { 3319e91b0f24SIan Campbell int i; 3320f7917c00SJeff Kirsher 3321f7917c00SJeff Kirsher /* usually there's just one frag */ 3322e91b0f24SIan Campbell __skb_fill_page_desc(skb, 0, gl->frags[0].page, 3323e91b0f24SIan Campbell gl->frags[0].offset + offset, 3324e91b0f24SIan Campbell gl->frags[0].size - offset); 3325e91b0f24SIan Campbell skb_shinfo(skb)->nr_frags = gl->nfrags; 3326e91b0f24SIan Campbell for (i = 1; i < gl->nfrags; i++) 3327e91b0f24SIan Campbell __skb_fill_page_desc(skb, i, gl->frags[i].page, 3328e91b0f24SIan Campbell gl->frags[i].offset, 3329e91b0f24SIan Campbell gl->frags[i].size); 3330f7917c00SJeff Kirsher 3331f7917c00SJeff Kirsher /* get a reference to the last page, we don't own it */ 3332e91b0f24SIan Campbell get_page(gl->frags[gl->nfrags - 1].page); 3333f7917c00SJeff Kirsher } 3334f7917c00SJeff Kirsher 3335f7917c00SJeff Kirsher /** 3336f7917c00SJeff Kirsher * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 3337f7917c00SJeff Kirsher * @gl: the gather list 3338f7917c00SJeff Kirsher * @skb_len: size of sk_buff main body if it carries fragments 3339f7917c00SJeff Kirsher * @pull_len: amount of data to move to the sk_buff's main body 3340f7917c00SJeff Kirsher * 3341f7917c00SJeff Kirsher * Builds an sk_buff from the given packet gather list. Returns the 3342f7917c00SJeff Kirsher * sk_buff or %NULL if sk_buff allocation failed. 3343f7917c00SJeff Kirsher */ 3344f7917c00SJeff Kirsher struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 3345f7917c00SJeff Kirsher unsigned int skb_len, unsigned int pull_len) 3346f7917c00SJeff Kirsher { 3347f7917c00SJeff Kirsher struct sk_buff *skb; 3348f7917c00SJeff Kirsher 3349f7917c00SJeff Kirsher /* 3350f7917c00SJeff Kirsher * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 3351f7917c00SJeff Kirsher * size, which is expected since buffers are at least PAGE_SIZEd. 3352f7917c00SJeff Kirsher * In this case packets up to RX_COPY_THRES have only one fragment. 3353f7917c00SJeff Kirsher */ 3354f7917c00SJeff Kirsher if (gl->tot_len <= RX_COPY_THRES) { 3355f7917c00SJeff Kirsher skb = dev_alloc_skb(gl->tot_len); 3356f7917c00SJeff Kirsher if (unlikely(!skb)) 3357f7917c00SJeff Kirsher goto out; 3358f7917c00SJeff Kirsher __skb_put(skb, gl->tot_len); 3359f7917c00SJeff Kirsher skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 3360f7917c00SJeff Kirsher } else { 3361f7917c00SJeff Kirsher skb = dev_alloc_skb(skb_len); 3362f7917c00SJeff Kirsher if (unlikely(!skb)) 3363f7917c00SJeff Kirsher goto out; 3364f7917c00SJeff Kirsher __skb_put(skb, pull_len); 3365f7917c00SJeff Kirsher skb_copy_to_linear_data(skb, gl->va, pull_len); 3366f7917c00SJeff Kirsher 3367e91b0f24SIan Campbell copy_frags(skb, gl, pull_len); 3368f7917c00SJeff Kirsher skb->len = gl->tot_len; 3369f7917c00SJeff Kirsher skb->data_len = skb->len - pull_len; 3370f7917c00SJeff Kirsher skb->truesize += skb->data_len; 3371f7917c00SJeff Kirsher } 3372f7917c00SJeff Kirsher out: return skb; 3373f7917c00SJeff Kirsher } 3374f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 3375f7917c00SJeff Kirsher 3376f7917c00SJeff Kirsher /** 3377f7917c00SJeff Kirsher * t4_pktgl_free - free a packet gather list 3378f7917c00SJeff Kirsher * @gl: the gather list 3379f7917c00SJeff Kirsher * 3380f7917c00SJeff Kirsher * Releases the pages of a packet gather list. We do not own the last 3381f7917c00SJeff Kirsher * page on the list and do not free it. 3382f7917c00SJeff Kirsher */ 3383f7917c00SJeff Kirsher static void t4_pktgl_free(const struct pkt_gl *gl) 3384f7917c00SJeff Kirsher { 3385f7917c00SJeff Kirsher int n; 3386e91b0f24SIan Campbell const struct page_frag *p; 3387f7917c00SJeff Kirsher 3388f7917c00SJeff Kirsher for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 3389f7917c00SJeff Kirsher put_page(p->page); 3390f7917c00SJeff Kirsher } 3391f7917c00SJeff Kirsher 3392f7917c00SJeff Kirsher /* 3393f7917c00SJeff Kirsher * Process an MPS trace packet. Give it an unused protocol number so it won't 3394f7917c00SJeff Kirsher * be delivered to anyone and send it to the stack for capture. 3395f7917c00SJeff Kirsher */ 3396f7917c00SJeff Kirsher static noinline int handle_trace_pkt(struct adapter *adap, 3397f7917c00SJeff Kirsher const struct pkt_gl *gl) 3398f7917c00SJeff Kirsher { 3399f7917c00SJeff Kirsher struct sk_buff *skb; 3400f7917c00SJeff Kirsher 3401f7917c00SJeff Kirsher skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 3402f7917c00SJeff Kirsher if (unlikely(!skb)) { 3403f7917c00SJeff Kirsher t4_pktgl_free(gl); 3404f7917c00SJeff Kirsher return 0; 3405f7917c00SJeff Kirsher } 3406f7917c00SJeff Kirsher 3407d14807ddSHariprasad Shenai if (is_t4(adap->params.chip)) 34080a57a536SSantosh Rastapur __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 34090a57a536SSantosh Rastapur else 34100a57a536SSantosh Rastapur __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 34110a57a536SSantosh Rastapur 3412f7917c00SJeff Kirsher skb_reset_mac_header(skb); 3413f7917c00SJeff Kirsher skb->protocol = htons(0xffff); 3414f7917c00SJeff Kirsher skb->dev = adap->port[0]; 3415f7917c00SJeff Kirsher netif_receive_skb(skb); 3416f7917c00SJeff Kirsher return 0; 3417f7917c00SJeff Kirsher } 3418f7917c00SJeff Kirsher 34195e2a5ebcSHariprasad Shenai /** 34205e2a5ebcSHariprasad Shenai * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp 34215e2a5ebcSHariprasad Shenai * @adap: the adapter 34225e2a5ebcSHariprasad Shenai * @hwtstamps: time stamp structure to update 34235e2a5ebcSHariprasad Shenai * @sgetstamp: 60bit iqe timestamp 34245e2a5ebcSHariprasad Shenai * 34255e2a5ebcSHariprasad Shenai * Every ingress queue entry has the 60-bit timestamp, convert that timestamp 34265e2a5ebcSHariprasad Shenai * which is in Core Clock ticks into ktime_t and assign it 34275e2a5ebcSHariprasad Shenai **/ 34285e2a5ebcSHariprasad Shenai static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, 34295e2a5ebcSHariprasad Shenai struct skb_shared_hwtstamps *hwtstamps, 34305e2a5ebcSHariprasad Shenai u64 sgetstamp) 34315e2a5ebcSHariprasad Shenai { 34325e2a5ebcSHariprasad Shenai u64 ns; 34335e2a5ebcSHariprasad Shenai u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); 34345e2a5ebcSHariprasad Shenai 34355e2a5ebcSHariprasad Shenai ns = div_u64(tmp, adap->params.vpd.cclk); 34365e2a5ebcSHariprasad Shenai 34375e2a5ebcSHariprasad Shenai memset(hwtstamps, 0, sizeof(*hwtstamps)); 34385e2a5ebcSHariprasad Shenai hwtstamps->hwtstamp = ns_to_ktime(ns); 34395e2a5ebcSHariprasad Shenai } 34405e2a5ebcSHariprasad Shenai 3441f7917c00SJeff Kirsher static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 3442c50ae55eSGanesh Goudar const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) 3443f7917c00SJeff Kirsher { 344452367a76SVipul Pandya struct adapter *adapter = rxq->rspq.adap; 344552367a76SVipul Pandya struct sge *s = &adapter->sge; 34465e2a5ebcSHariprasad Shenai struct port_info *pi; 3447f7917c00SJeff Kirsher int ret; 3448f7917c00SJeff Kirsher struct sk_buff *skb; 3449f7917c00SJeff Kirsher 3450f7917c00SJeff Kirsher skb = napi_get_frags(&rxq->rspq.napi); 3451f7917c00SJeff Kirsher if (unlikely(!skb)) { 3452f7917c00SJeff Kirsher t4_pktgl_free(gl); 3453f7917c00SJeff Kirsher rxq->stats.rx_drops++; 3454f7917c00SJeff Kirsher return; 3455f7917c00SJeff Kirsher } 3456f7917c00SJeff Kirsher 345752367a76SVipul Pandya copy_frags(skb, gl, s->pktshift); 3458c50ae55eSGanesh Goudar if (tnl_hdr_len) 3459c50ae55eSGanesh Goudar skb->csum_level = 1; 346052367a76SVipul Pandya skb->len = gl->tot_len - s->pktshift; 3461f7917c00SJeff Kirsher skb->data_len = skb->len; 3462f7917c00SJeff Kirsher skb->truesize += skb->data_len; 3463f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 3464f7917c00SJeff Kirsher skb_record_rx_queue(skb, rxq->rspq.idx); 34655e2a5ebcSHariprasad Shenai pi = netdev_priv(skb->dev); 34665e2a5ebcSHariprasad Shenai if (pi->rxtstamp) 34675e2a5ebcSHariprasad Shenai cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), 34685e2a5ebcSHariprasad Shenai gl->sgetstamp); 3469f7917c00SJeff Kirsher if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 34708264989cSTom Herbert skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 34718264989cSTom Herbert PKT_HASH_TYPE_L3); 3472f7917c00SJeff Kirsher 3473f7917c00SJeff Kirsher if (unlikely(pkt->vlan_ex)) { 347486a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3475f7917c00SJeff Kirsher rxq->stats.vlan_ex++; 3476f7917c00SJeff Kirsher } 3477f7917c00SJeff Kirsher ret = napi_gro_frags(&rxq->rspq.napi); 3478f7917c00SJeff Kirsher if (ret == GRO_HELD) 3479f7917c00SJeff Kirsher rxq->stats.lro_pkts++; 3480f7917c00SJeff Kirsher else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 3481f7917c00SJeff Kirsher rxq->stats.lro_merged++; 3482f7917c00SJeff Kirsher rxq->stats.pkts++; 3483f7917c00SJeff Kirsher rxq->stats.rx_cso++; 3484f7917c00SJeff Kirsher } 3485f7917c00SJeff Kirsher 3486a4569504SAtul Gupta enum { 3487a4569504SAtul Gupta RX_NON_PTP_PKT = 0, 3488a4569504SAtul Gupta RX_PTP_PKT_SUC = 1, 3489a4569504SAtul Gupta RX_PTP_PKT_ERR = 2 3490a4569504SAtul Gupta }; 3491a4569504SAtul Gupta 3492a4569504SAtul Gupta /** 3493a4569504SAtul Gupta * t4_systim_to_hwstamp - read hardware time stamp 349429bbf5d7SRahul Lakkireddy * @adapter: the adapter 3495a4569504SAtul Gupta * @skb: the packet 3496a4569504SAtul Gupta * 3497a4569504SAtul Gupta * Read Time Stamp from MPS packet and insert in skb which 3498a4569504SAtul Gupta * is forwarded to PTP application 3499a4569504SAtul Gupta */ 3500a4569504SAtul Gupta static noinline int t4_systim_to_hwstamp(struct adapter *adapter, 3501a4569504SAtul Gupta struct sk_buff *skb) 3502a4569504SAtul Gupta { 3503a4569504SAtul Gupta struct skb_shared_hwtstamps *hwtstamps; 3504a4569504SAtul Gupta struct cpl_rx_mps_pkt *cpl = NULL; 3505a4569504SAtul Gupta unsigned char *data; 3506a4569504SAtul Gupta int offset; 3507a4569504SAtul Gupta 3508a4569504SAtul Gupta cpl = (struct cpl_rx_mps_pkt *)skb->data; 3509a4569504SAtul Gupta if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & 3510a4569504SAtul Gupta X_CPL_RX_MPS_PKT_TYPE_PTP)) 3511a4569504SAtul Gupta return RX_PTP_PKT_ERR; 3512a4569504SAtul Gupta 3513a4569504SAtul Gupta data = skb->data + sizeof(*cpl); 3514a4569504SAtul Gupta skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); 3515a4569504SAtul Gupta offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; 3516a4569504SAtul Gupta if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) 3517a4569504SAtul Gupta return RX_PTP_PKT_ERR; 3518a4569504SAtul Gupta 3519a4569504SAtul Gupta hwtstamps = skb_hwtstamps(skb); 3520a4569504SAtul Gupta memset(hwtstamps, 0, sizeof(*hwtstamps)); 3521589b1c9cSRahul Lakkireddy hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); 3522a4569504SAtul Gupta 3523a4569504SAtul Gupta return RX_PTP_PKT_SUC; 3524a4569504SAtul Gupta } 3525a4569504SAtul Gupta 3526a4569504SAtul Gupta /** 3527a4569504SAtul Gupta * t4_rx_hststamp - Recv PTP Event Message 352829bbf5d7SRahul Lakkireddy * @adapter: the adapter 3529a4569504SAtul Gupta * @rsp: the response queue descriptor holding the RX_PKT message 353029bbf5d7SRahul Lakkireddy * @rxq: the response queue holding the RX_PKT message 3531a4569504SAtul Gupta * @skb: the packet 3532a4569504SAtul Gupta * 3533a4569504SAtul Gupta * PTP enabled and MPS packet, read HW timestamp 3534a4569504SAtul Gupta */ 3535a4569504SAtul Gupta static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, 3536a4569504SAtul Gupta struct sge_eth_rxq *rxq, struct sk_buff *skb) 3537a4569504SAtul Gupta { 3538a4569504SAtul Gupta int ret; 3539a4569504SAtul Gupta 3540a4569504SAtul Gupta if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && 3541a4569504SAtul Gupta !is_t4(adapter->params.chip))) { 3542a4569504SAtul Gupta ret = t4_systim_to_hwstamp(adapter, skb); 3543a4569504SAtul Gupta if (ret == RX_PTP_PKT_ERR) { 3544a4569504SAtul Gupta kfree_skb(skb); 3545a4569504SAtul Gupta rxq->stats.rx_drops++; 3546a4569504SAtul Gupta } 3547a4569504SAtul Gupta return ret; 3548a4569504SAtul Gupta } 3549a4569504SAtul Gupta return RX_NON_PTP_PKT; 3550a4569504SAtul Gupta } 3551a4569504SAtul Gupta 3552a4569504SAtul Gupta /** 3553a4569504SAtul Gupta * t4_tx_hststamp - Loopback PTP Transmit Event Message 355429bbf5d7SRahul Lakkireddy * @adapter: the adapter 3555a4569504SAtul Gupta * @skb: the packet 3556a4569504SAtul Gupta * @dev: the ingress net device 3557a4569504SAtul Gupta * 3558a4569504SAtul Gupta * Read hardware timestamp for the loopback PTP Tx event message 3559a4569504SAtul Gupta */ 3560a4569504SAtul Gupta static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, 3561a4569504SAtul Gupta struct net_device *dev) 3562a4569504SAtul Gupta { 3563a4569504SAtul Gupta struct port_info *pi = netdev_priv(dev); 3564a4569504SAtul Gupta 3565a4569504SAtul Gupta if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { 3566a4569504SAtul Gupta cxgb4_ptp_read_hwstamp(adapter, pi); 3567a4569504SAtul Gupta kfree_skb(skb); 3568a4569504SAtul Gupta return 0; 3569a4569504SAtul Gupta } 3570a4569504SAtul Gupta return 1; 3571a4569504SAtul Gupta } 3572a4569504SAtul Gupta 3573f7917c00SJeff Kirsher /** 3574d429005fSVishal Kulkarni * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages 3575d429005fSVishal Kulkarni * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue 3576d429005fSVishal Kulkarni * @rsp: Response Entry pointer into Response Queue 3577d429005fSVishal Kulkarni * @gl: Gather List pointer 3578d429005fSVishal Kulkarni * 3579d429005fSVishal Kulkarni * For adapters which support the SGE Doorbell Queue Timer facility, 3580d429005fSVishal Kulkarni * we configure the Ethernet TX Queues to send CIDX Updates to the 3581d429005fSVishal Kulkarni * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE 3582d429005fSVishal Kulkarni * messages. This adds a small load to PCIe Link RX bandwidth and, 3583d429005fSVishal Kulkarni * potentially, higher CPU Interrupt load, but allows us to respond 3584d429005fSVishal Kulkarni * much more quickly to the CIDX Updates. This is important for 3585d429005fSVishal Kulkarni * Upper Layer Software which isn't willing to have a large amount 3586d429005fSVishal Kulkarni * of TX Data outstanding before receiving DMA Completions. 3587d429005fSVishal Kulkarni */ 3588d429005fSVishal Kulkarni static void t4_tx_completion_handler(struct sge_rspq *rspq, 3589d429005fSVishal Kulkarni const __be64 *rsp, 3590d429005fSVishal Kulkarni const struct pkt_gl *gl) 3591d429005fSVishal Kulkarni { 3592d429005fSVishal Kulkarni u8 opcode = ((const struct rss_header *)rsp)->opcode; 3593d429005fSVishal Kulkarni struct port_info *pi = netdev_priv(rspq->netdev); 3594d429005fSVishal Kulkarni struct adapter *adapter = rspq->adap; 3595d429005fSVishal Kulkarni struct sge *s = &adapter->sge; 3596d429005fSVishal Kulkarni struct sge_eth_txq *txq; 3597d429005fSVishal Kulkarni 3598d429005fSVishal Kulkarni /* skip RSS header */ 3599d429005fSVishal Kulkarni rsp++; 3600d429005fSVishal Kulkarni 3601d429005fSVishal Kulkarni /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 3602d429005fSVishal Kulkarni */ 3603d429005fSVishal Kulkarni if (unlikely(opcode == CPL_FW4_MSG && 3604d429005fSVishal Kulkarni ((const struct cpl_fw4_msg *)rsp)->type == 3605d429005fSVishal Kulkarni FW_TYPE_RSSCPL)) { 3606d429005fSVishal Kulkarni rsp++; 3607d429005fSVishal Kulkarni opcode = ((const struct rss_header *)rsp)->opcode; 3608d429005fSVishal Kulkarni rsp++; 3609d429005fSVishal Kulkarni } 3610d429005fSVishal Kulkarni 3611d429005fSVishal Kulkarni if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { 3612d429005fSVishal Kulkarni pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", 3613d429005fSVishal Kulkarni __func__, opcode); 3614d429005fSVishal Kulkarni return; 3615d429005fSVishal Kulkarni } 3616d429005fSVishal Kulkarni 3617d429005fSVishal Kulkarni txq = &s->ethtxq[pi->first_qset + rspq->idx]; 3618b660bccbSRaju Rangoju 3619b660bccbSRaju Rangoju /* We've got the Hardware Consumer Index Update in the Egress Update 3620b660bccbSRaju Rangoju * message. These Egress Update messages will be our sole CIDX Updates 3621b660bccbSRaju Rangoju * we get since we don't want to chew up PCIe bandwidth for both Ingress 3622b660bccbSRaju Rangoju * Messages and Status Page writes. However, The code which manages 3623b660bccbSRaju Rangoju * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value 3624b660bccbSRaju Rangoju * stored in the Status Page at the end of the TX Queue. It's easiest 3625b660bccbSRaju Rangoju * to simply copy the CIDX Update value from the Egress Update message 3626b660bccbSRaju Rangoju * to the Status Page. Also note that no Endian issues need to be 3627b660bccbSRaju Rangoju * considered here since both are Big Endian and we're just copying 3628b660bccbSRaju Rangoju * bytes consistently ... 3629b660bccbSRaju Rangoju */ 3630b660bccbSRaju Rangoju if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { 3631b660bccbSRaju Rangoju struct cpl_sge_egr_update *egr; 3632b660bccbSRaju Rangoju 3633b660bccbSRaju Rangoju egr = (struct cpl_sge_egr_update *)rsp; 3634b660bccbSRaju Rangoju WRITE_ONCE(txq->q.stat->cidx, egr->cidx); 3635b660bccbSRaju Rangoju } 3636b660bccbSRaju Rangoju 3637d429005fSVishal Kulkarni t4_sge_eth_txq_egress_update(adapter, txq, -1); 3638d429005fSVishal Kulkarni } 3639d429005fSVishal Kulkarni 36407235ffaeSVishal Kulkarni static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si) 36417235ffaeSVishal Kulkarni { 36427235ffaeSVishal Kulkarni struct adapter *adap = pi->adapter; 36437235ffaeSVishal Kulkarni struct cxgb4_ethtool_lb_test *lb; 36447235ffaeSVishal Kulkarni struct sge *s = &adap->sge; 36457235ffaeSVishal Kulkarni struct net_device *netdev; 36467235ffaeSVishal Kulkarni u8 *data; 36477235ffaeSVishal Kulkarni int i; 36487235ffaeSVishal Kulkarni 36497235ffaeSVishal Kulkarni netdev = adap->port[pi->port_id]; 36507235ffaeSVishal Kulkarni lb = &pi->ethtool_lb; 36517235ffaeSVishal Kulkarni data = si->va + s->pktshift; 36527235ffaeSVishal Kulkarni 36537235ffaeSVishal Kulkarni i = ETH_ALEN; 36547235ffaeSVishal Kulkarni if (!ether_addr_equal(data + i, netdev->dev_addr)) 36557235ffaeSVishal Kulkarni return -1; 36567235ffaeSVishal Kulkarni 36577235ffaeSVishal Kulkarni i += ETH_ALEN; 36587235ffaeSVishal Kulkarni if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR)) 36597235ffaeSVishal Kulkarni lb->result = -EIO; 36607235ffaeSVishal Kulkarni 36617235ffaeSVishal Kulkarni complete(&lb->completion); 36627235ffaeSVishal Kulkarni return 0; 36637235ffaeSVishal Kulkarni } 36647235ffaeSVishal Kulkarni 3665d429005fSVishal Kulkarni /** 3666f7917c00SJeff Kirsher * t4_ethrx_handler - process an ingress ethernet packet 3667f7917c00SJeff Kirsher * @q: the response queue that received the packet 3668f7917c00SJeff Kirsher * @rsp: the response queue descriptor holding the RX_PKT message 3669f7917c00SJeff Kirsher * @si: the gather list of packet fragments 3670f7917c00SJeff Kirsher * 3671f7917c00SJeff Kirsher * Process an ingress ethernet packet and deliver it to the stack. 3672f7917c00SJeff Kirsher */ 3673f7917c00SJeff Kirsher int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 3674f7917c00SJeff Kirsher const struct pkt_gl *si) 3675f7917c00SJeff Kirsher { 3676f7917c00SJeff Kirsher bool csum_ok; 3677f7917c00SJeff Kirsher struct sk_buff *skb; 3678f7917c00SJeff Kirsher const struct cpl_rx_pkt *pkt; 3679f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 3680a4569504SAtul Gupta struct adapter *adapter = q->adap; 368152367a76SVipul Pandya struct sge *s = &q->adap->sge; 3682d14807ddSHariprasad Shenai int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 36830a57a536SSantosh Rastapur CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 3684c50ae55eSGanesh Goudar u16 err_vec, tnl_hdr_len = 0; 368584a200b3SVarun Prakash struct port_info *pi; 3686a4569504SAtul Gupta int ret = 0; 3687f7917c00SJeff Kirsher 36887235ffaeSVishal Kulkarni pi = netdev_priv(q->netdev); 3689d429005fSVishal Kulkarni /* If we're looking at TX Queue CIDX Update, handle that separately 3690d429005fSVishal Kulkarni * and return. 3691d429005fSVishal Kulkarni */ 3692d429005fSVishal Kulkarni if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || 3693d429005fSVishal Kulkarni (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { 3694d429005fSVishal Kulkarni t4_tx_completion_handler(q, rsp, si); 3695d429005fSVishal Kulkarni return 0; 3696d429005fSVishal Kulkarni } 3697d429005fSVishal Kulkarni 36980a57a536SSantosh Rastapur if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 3699f7917c00SJeff Kirsher return handle_trace_pkt(q->adap, si); 3700f7917c00SJeff Kirsher 3701f7917c00SJeff Kirsher pkt = (const struct cpl_rx_pkt *)rsp; 37028eb9f2f9SArjun V /* Compressed error vector is enabled for T6 only */ 3703c50ae55eSGanesh Goudar if (q->adap->params.tp.rx_pkt_encap) { 37048eb9f2f9SArjun V err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); 3705c50ae55eSGanesh Goudar tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); 3706c50ae55eSGanesh Goudar } else { 37078eb9f2f9SArjun V err_vec = be16_to_cpu(pkt->err_vec); 3708c50ae55eSGanesh Goudar } 37098eb9f2f9SArjun V 37108eb9f2f9SArjun V csum_ok = pkt->csum_calc && !err_vec && 3711cca2822dSHariprasad Shenai (q->netdev->features & NETIF_F_RXCSUM); 3712992bea8eSGanesh Goudar 3713992bea8eSGanesh Goudar if (err_vec) 3714992bea8eSGanesh Goudar rxq->stats.bad_rx_pkts++; 3715992bea8eSGanesh Goudar 37167235ffaeSVishal Kulkarni if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { 37177235ffaeSVishal Kulkarni ret = cxgb4_validate_lb_pkt(pi, si); 37187235ffaeSVishal Kulkarni if (!ret) 37197235ffaeSVishal Kulkarni return 0; 37207235ffaeSVishal Kulkarni } 37217235ffaeSVishal Kulkarni 3722c50ae55eSGanesh Goudar if (((pkt->l2info & htonl(RXF_TCP_F)) || 3723c50ae55eSGanesh Goudar tnl_hdr_len) && 3724f7917c00SJeff Kirsher (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 3725c50ae55eSGanesh Goudar do_gro(rxq, si, pkt, tnl_hdr_len); 3726f7917c00SJeff Kirsher return 0; 3727f7917c00SJeff Kirsher } 3728f7917c00SJeff Kirsher 3729f7917c00SJeff Kirsher skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 3730f7917c00SJeff Kirsher if (unlikely(!skb)) { 3731f7917c00SJeff Kirsher t4_pktgl_free(si); 3732f7917c00SJeff Kirsher rxq->stats.rx_drops++; 3733f7917c00SJeff Kirsher return 0; 3734f7917c00SJeff Kirsher } 3735f7917c00SJeff Kirsher 3736a4569504SAtul Gupta /* Handle PTP Event Rx packet */ 3737a4569504SAtul Gupta if (unlikely(pi->ptp_enable)) { 3738a4569504SAtul Gupta ret = t4_rx_hststamp(adapter, rsp, rxq, skb); 3739a4569504SAtul Gupta if (ret == RX_PTP_PKT_ERR) 3740a4569504SAtul Gupta return 0; 3741a4569504SAtul Gupta } 3742a4569504SAtul Gupta if (likely(!ret)) 3743a4569504SAtul Gupta __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ 3744a4569504SAtul Gupta 3745a4569504SAtul Gupta /* Handle the PTP Event Tx Loopback packet */ 3746a4569504SAtul Gupta if (unlikely(pi->ptp_enable && !ret && 3747a4569504SAtul Gupta (pkt->l2info & htonl(RXF_UDP_F)) && 3748a4569504SAtul Gupta cxgb4_ptp_is_ptp_rx(skb))) { 3749a4569504SAtul Gupta if (!t4_tx_hststamp(adapter, skb, q->netdev)) 3750a4569504SAtul Gupta return 0; 3751a4569504SAtul Gupta } 3752a4569504SAtul Gupta 3753f7917c00SJeff Kirsher skb->protocol = eth_type_trans(skb, q->netdev); 3754f7917c00SJeff Kirsher skb_record_rx_queue(skb, q->idx); 3755f7917c00SJeff Kirsher if (skb->dev->features & NETIF_F_RXHASH) 37568264989cSTom Herbert skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 37578264989cSTom Herbert PKT_HASH_TYPE_L3); 3758f7917c00SJeff Kirsher 3759f7917c00SJeff Kirsher rxq->stats.pkts++; 3760f7917c00SJeff Kirsher 37615e2a5ebcSHariprasad Shenai if (pi->rxtstamp) 37625e2a5ebcSHariprasad Shenai cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), 37635e2a5ebcSHariprasad Shenai si->sgetstamp); 3764bdc590b9SHariprasad Shenai if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 3765f7917c00SJeff Kirsher if (!pkt->ip_frag) { 3766f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 3767f7917c00SJeff Kirsher rxq->stats.rx_cso++; 3768bdc590b9SHariprasad Shenai } else if (pkt->l2info & htonl(RXF_IP_F)) { 3769f7917c00SJeff Kirsher __sum16 c = (__force __sum16)pkt->csum; 3770f7917c00SJeff Kirsher skb->csum = csum_unfold(c); 3771c50ae55eSGanesh Goudar 3772c50ae55eSGanesh Goudar if (tnl_hdr_len) { 3773c50ae55eSGanesh Goudar skb->ip_summed = CHECKSUM_UNNECESSARY; 3774c50ae55eSGanesh Goudar skb->csum_level = 1; 3775c50ae55eSGanesh Goudar } else { 3776f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_COMPLETE; 3777c50ae55eSGanesh Goudar } 3778f7917c00SJeff Kirsher rxq->stats.rx_cso++; 3779f7917c00SJeff Kirsher } 378084a200b3SVarun Prakash } else { 3781f7917c00SJeff Kirsher skb_checksum_none_assert(skb); 378284a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 378384a200b3SVarun Prakash #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ 378484a200b3SVarun Prakash RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) 378584a200b3SVarun Prakash 378684a200b3SVarun Prakash if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { 378784a200b3SVarun Prakash if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && 378884a200b3SVarun Prakash (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { 37898eb9f2f9SArjun V if (q->adap->params.tp.rx_pkt_encap) 37908eb9f2f9SArjun V csum_ok = err_vec & 37918eb9f2f9SArjun V T6_COMPR_RXERR_SUM_F; 37928eb9f2f9SArjun V else 37938eb9f2f9SArjun V csum_ok = err_vec & RXERR_CSUM_F; 37948eb9f2f9SArjun V if (!csum_ok) 379584a200b3SVarun Prakash skb->ip_summed = CHECKSUM_UNNECESSARY; 379684a200b3SVarun Prakash } 379784a200b3SVarun Prakash } 379884a200b3SVarun Prakash 379984a200b3SVarun Prakash #undef CPL_RX_PKT_FLAGS 380084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 380184a200b3SVarun Prakash } 3802f7917c00SJeff Kirsher 3803f7917c00SJeff Kirsher if (unlikely(pkt->vlan_ex)) { 380486a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 3805f7917c00SJeff Kirsher rxq->stats.vlan_ex++; 3806f7917c00SJeff Kirsher } 38073a336cb1SHariprasad Shenai skb_mark_napi_id(skb, &q->napi); 3808f7917c00SJeff Kirsher netif_receive_skb(skb); 3809f7917c00SJeff Kirsher return 0; 3810f7917c00SJeff Kirsher } 3811f7917c00SJeff Kirsher 3812f7917c00SJeff Kirsher /** 3813f7917c00SJeff Kirsher * restore_rx_bufs - put back a packet's Rx buffers 3814f7917c00SJeff Kirsher * @si: the packet gather list 3815f7917c00SJeff Kirsher * @q: the SGE free list 3816f7917c00SJeff Kirsher * @frags: number of FL buffers to restore 3817f7917c00SJeff Kirsher * 3818f7917c00SJeff Kirsher * Puts back on an FL the Rx buffers associated with @si. The buffers 3819f7917c00SJeff Kirsher * have already been unmapped and are left unmapped, we mark them so to 3820f7917c00SJeff Kirsher * prevent further unmapping attempts. 3821f7917c00SJeff Kirsher * 3822f7917c00SJeff Kirsher * This function undoes a series of @unmap_rx_buf calls when we find out 3823f7917c00SJeff Kirsher * that the current packet can't be processed right away afterall and we 3824f7917c00SJeff Kirsher * need to come back to it later. This is a very rare event and there's 3825f7917c00SJeff Kirsher * no effort to make this particularly efficient. 3826f7917c00SJeff Kirsher */ 3827f7917c00SJeff Kirsher static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 3828f7917c00SJeff Kirsher int frags) 3829f7917c00SJeff Kirsher { 3830f7917c00SJeff Kirsher struct rx_sw_desc *d; 3831f7917c00SJeff Kirsher 3832f7917c00SJeff Kirsher while (frags--) { 3833f7917c00SJeff Kirsher if (q->cidx == 0) 3834f7917c00SJeff Kirsher q->cidx = q->size - 1; 3835f7917c00SJeff Kirsher else 3836f7917c00SJeff Kirsher q->cidx--; 3837f7917c00SJeff Kirsher d = &q->sdesc[q->cidx]; 3838f7917c00SJeff Kirsher d->page = si->frags[frags].page; 3839f7917c00SJeff Kirsher d->dma_addr |= RX_UNMAPPED_BUF; 3840f7917c00SJeff Kirsher q->avail++; 3841f7917c00SJeff Kirsher } 3842f7917c00SJeff Kirsher } 3843f7917c00SJeff Kirsher 3844f7917c00SJeff Kirsher /** 3845f7917c00SJeff Kirsher * is_new_response - check if a response is newly written 3846f7917c00SJeff Kirsher * @r: the response descriptor 3847f7917c00SJeff Kirsher * @q: the response queue 3848f7917c00SJeff Kirsher * 3849f7917c00SJeff Kirsher * Returns true if a response descriptor contains a yet unprocessed 3850f7917c00SJeff Kirsher * response. 3851f7917c00SJeff Kirsher */ 3852f7917c00SJeff Kirsher static inline bool is_new_response(const struct rsp_ctrl *r, 3853f7917c00SJeff Kirsher const struct sge_rspq *q) 3854f7917c00SJeff Kirsher { 38551ecc7b7aSHariprasad Shenai return (r->type_gen >> RSPD_GEN_S) == q->gen; 3856f7917c00SJeff Kirsher } 3857f7917c00SJeff Kirsher 3858f7917c00SJeff Kirsher /** 3859f7917c00SJeff Kirsher * rspq_next - advance to the next entry in a response queue 3860f7917c00SJeff Kirsher * @q: the queue 3861f7917c00SJeff Kirsher * 3862f7917c00SJeff Kirsher * Updates the state of a response queue to advance it to the next entry. 3863f7917c00SJeff Kirsher */ 3864f7917c00SJeff Kirsher static inline void rspq_next(struct sge_rspq *q) 3865f7917c00SJeff Kirsher { 3866f7917c00SJeff Kirsher q->cur_desc = (void *)q->cur_desc + q->iqe_len; 3867f7917c00SJeff Kirsher if (unlikely(++q->cidx == q->size)) { 3868f7917c00SJeff Kirsher q->cidx = 0; 3869f7917c00SJeff Kirsher q->gen ^= 1; 3870f7917c00SJeff Kirsher q->cur_desc = q->desc; 3871f7917c00SJeff Kirsher } 3872f7917c00SJeff Kirsher } 3873f7917c00SJeff Kirsher 3874f7917c00SJeff Kirsher /** 3875f7917c00SJeff Kirsher * process_responses - process responses from an SGE response queue 3876f7917c00SJeff Kirsher * @q: the ingress queue to process 3877f7917c00SJeff Kirsher * @budget: how many responses can be processed in this round 3878f7917c00SJeff Kirsher * 3879f7917c00SJeff Kirsher * Process responses from an SGE response queue up to the supplied budget. 3880f7917c00SJeff Kirsher * Responses include received packets as well as control messages from FW 3881f7917c00SJeff Kirsher * or HW. 3882f7917c00SJeff Kirsher * 3883f7917c00SJeff Kirsher * Additionally choose the interrupt holdoff time for the next interrupt 3884f7917c00SJeff Kirsher * on this queue. If the system is under memory shortage use a fairly 3885f7917c00SJeff Kirsher * long delay to help recovery. 3886f7917c00SJeff Kirsher */ 3887f7917c00SJeff Kirsher static int process_responses(struct sge_rspq *q, int budget) 3888f7917c00SJeff Kirsher { 3889f7917c00SJeff Kirsher int ret, rsp_type; 3890f7917c00SJeff Kirsher int budget_left = budget; 3891f7917c00SJeff Kirsher const struct rsp_ctrl *rc; 3892f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 389352367a76SVipul Pandya struct adapter *adapter = q->adap; 389452367a76SVipul Pandya struct sge *s = &adapter->sge; 3895f7917c00SJeff Kirsher 3896f7917c00SJeff Kirsher while (likely(budget_left)) { 3897f7917c00SJeff Kirsher rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 38982337ba42SVarun Prakash if (!is_new_response(rc, q)) { 38992337ba42SVarun Prakash if (q->flush_handler) 39002337ba42SVarun Prakash q->flush_handler(q); 3901f7917c00SJeff Kirsher break; 39022337ba42SVarun Prakash } 3903f7917c00SJeff Kirsher 3904019be1cfSAlexander Duyck dma_rmb(); 39051ecc7b7aSHariprasad Shenai rsp_type = RSPD_TYPE_G(rc->type_gen); 39061ecc7b7aSHariprasad Shenai if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { 3907e91b0f24SIan Campbell struct page_frag *fp; 3908f7917c00SJeff Kirsher struct pkt_gl si; 3909f7917c00SJeff Kirsher const struct rx_sw_desc *rsd; 3910f7917c00SJeff Kirsher u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 3911f7917c00SJeff Kirsher 39121ecc7b7aSHariprasad Shenai if (len & RSPD_NEWBUF_F) { 3913f7917c00SJeff Kirsher if (likely(q->offset > 0)) { 3914f7917c00SJeff Kirsher free_rx_bufs(q->adap, &rxq->fl, 1); 3915f7917c00SJeff Kirsher q->offset = 0; 3916f7917c00SJeff Kirsher } 39171ecc7b7aSHariprasad Shenai len = RSPD_LEN_G(len); 3918f7917c00SJeff Kirsher } 3919f7917c00SJeff Kirsher si.tot_len = len; 3920f7917c00SJeff Kirsher 3921f7917c00SJeff Kirsher /* gather packet fragments */ 3922f7917c00SJeff Kirsher for (frags = 0, fp = si.frags; ; frags++, fp++) { 3923f7917c00SJeff Kirsher rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 392452367a76SVipul Pandya bufsz = get_buf_size(adapter, rsd); 3925f7917c00SJeff Kirsher fp->page = rsd->page; 3926e91b0f24SIan Campbell fp->offset = q->offset; 3927e91b0f24SIan Campbell fp->size = min(bufsz, len); 3928e91b0f24SIan Campbell len -= fp->size; 3929f7917c00SJeff Kirsher if (!len) 3930f7917c00SJeff Kirsher break; 3931f7917c00SJeff Kirsher unmap_rx_buf(q->adap, &rxq->fl); 3932f7917c00SJeff Kirsher } 3933f7917c00SJeff Kirsher 39345e2a5ebcSHariprasad Shenai si.sgetstamp = SGE_TIMESTAMP_G( 39355e2a5ebcSHariprasad Shenai be64_to_cpu(rc->last_flit)); 3936f7917c00SJeff Kirsher /* 3937f7917c00SJeff Kirsher * Last buffer remains mapped so explicitly make it 3938f7917c00SJeff Kirsher * coherent for CPU access. 3939f7917c00SJeff Kirsher */ 3940f7917c00SJeff Kirsher dma_sync_single_for_cpu(q->adap->pdev_dev, 3941f7917c00SJeff Kirsher get_buf_addr(rsd), 3942e91b0f24SIan Campbell fp->size, DMA_FROM_DEVICE); 3943f7917c00SJeff Kirsher 3944f7917c00SJeff Kirsher si.va = page_address(si.frags[0].page) + 3945e91b0f24SIan Campbell si.frags[0].offset; 3946f7917c00SJeff Kirsher prefetch(si.va); 3947f7917c00SJeff Kirsher 3948f7917c00SJeff Kirsher si.nfrags = frags + 1; 3949f7917c00SJeff Kirsher ret = q->handler(q, q->cur_desc, &si); 3950f7917c00SJeff Kirsher if (likely(ret == 0)) 395152367a76SVipul Pandya q->offset += ALIGN(fp->size, s->fl_align); 3952f7917c00SJeff Kirsher else 3953f7917c00SJeff Kirsher restore_rx_bufs(&si, &rxq->fl, frags); 39541ecc7b7aSHariprasad Shenai } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { 3955f7917c00SJeff Kirsher ret = q->handler(q, q->cur_desc, NULL); 3956f7917c00SJeff Kirsher } else { 3957f7917c00SJeff Kirsher ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 3958f7917c00SJeff Kirsher } 3959f7917c00SJeff Kirsher 3960f7917c00SJeff Kirsher if (unlikely(ret)) { 3961f7917c00SJeff Kirsher /* couldn't process descriptor, back off for recovery */ 39621ecc7b7aSHariprasad Shenai q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); 3963f7917c00SJeff Kirsher break; 3964f7917c00SJeff Kirsher } 3965f7917c00SJeff Kirsher 3966f7917c00SJeff Kirsher rspq_next(q); 3967f7917c00SJeff Kirsher budget_left--; 3968f7917c00SJeff Kirsher } 3969f7917c00SJeff Kirsher 3970da08e425SHariprasad Shenai if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) 3971f7917c00SJeff Kirsher __refill_fl(q->adap, &rxq->fl); 3972f7917c00SJeff Kirsher return budget - budget_left; 3973f7917c00SJeff Kirsher } 3974f7917c00SJeff Kirsher 3975f7917c00SJeff Kirsher /** 3976f7917c00SJeff Kirsher * napi_rx_handler - the NAPI handler for Rx processing 3977f7917c00SJeff Kirsher * @napi: the napi instance 3978f7917c00SJeff Kirsher * @budget: how many packets we can process in this round 3979f7917c00SJeff Kirsher * 3980f7917c00SJeff Kirsher * Handler for new data events when using NAPI. This does not need any 3981f7917c00SJeff Kirsher * locking or protection from interrupts as data interrupts are off at 3982f7917c00SJeff Kirsher * this point and other adapter interrupts do not interfere (the latter 3983f7917c00SJeff Kirsher * in not a concern at all with MSI-X as non-data interrupts then have 3984f7917c00SJeff Kirsher * a separate handler). 3985f7917c00SJeff Kirsher */ 3986f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget) 3987f7917c00SJeff Kirsher { 3988f7917c00SJeff Kirsher unsigned int params; 3989f7917c00SJeff Kirsher struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 39903a336cb1SHariprasad Shenai int work_done; 3991d63a6dcfSHariprasad Shenai u32 val; 3992f7917c00SJeff Kirsher 39933a336cb1SHariprasad Shenai work_done = process_responses(q, budget); 3994f7917c00SJeff Kirsher if (likely(work_done < budget)) { 3995e553ec3fSHariprasad Shenai int timer_index; 3996e553ec3fSHariprasad Shenai 3997812787b8SHariprasad Shenai napi_complete_done(napi, work_done); 39981ecc7b7aSHariprasad Shenai timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); 3999e553ec3fSHariprasad Shenai 4000e553ec3fSHariprasad Shenai if (q->adaptive_rx) { 4001e553ec3fSHariprasad Shenai if (work_done > max(timer_pkt_quota[timer_index], 4002e553ec3fSHariprasad Shenai MIN_NAPI_WORK)) 4003e553ec3fSHariprasad Shenai timer_index = (timer_index + 1); 4004e553ec3fSHariprasad Shenai else 4005e553ec3fSHariprasad Shenai timer_index = timer_index - 1; 4006e553ec3fSHariprasad Shenai 4007e553ec3fSHariprasad Shenai timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 40081ecc7b7aSHariprasad Shenai q->next_intr_params = 40091ecc7b7aSHariprasad Shenai QINTR_TIMER_IDX_V(timer_index) | 40101ecc7b7aSHariprasad Shenai QINTR_CNT_EN_V(0); 4011e553ec3fSHariprasad Shenai params = q->next_intr_params; 4012e553ec3fSHariprasad Shenai } else { 4013f7917c00SJeff Kirsher params = q->next_intr_params; 4014f7917c00SJeff Kirsher q->next_intr_params = q->intr_params; 4015e553ec3fSHariprasad Shenai } 4016f7917c00SJeff Kirsher } else 40171ecc7b7aSHariprasad Shenai params = QINTR_TIMER_IDX_V(7); 4018f7917c00SJeff Kirsher 4019f612b815SHariprasad Shenai val = CIDXINC_V(work_done) | SEINTARM_V(params); 4020df64e4d3SHariprasad Shenai 4021df64e4d3SHariprasad Shenai /* If we don't have access to the new User GTS (T5+), use the old 4022df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 4023df64e4d3SHariprasad Shenai */ 4024df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 4025f612b815SHariprasad Shenai t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), 4026f612b815SHariprasad Shenai val | INGRESSQID_V((u32)q->cntxt_id)); 4027d63a6dcfSHariprasad Shenai } else { 4028f612b815SHariprasad Shenai writel(val | INGRESSQID_V(q->bar2_qid), 4029df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_GTS); 4030d63a6dcfSHariprasad Shenai wmb(); 4031d63a6dcfSHariprasad Shenai } 4032f7917c00SJeff Kirsher return work_done; 4033f7917c00SJeff Kirsher } 4034f7917c00SJeff Kirsher 40350eb484eeSAllen Pais void cxgb4_ethofld_restart(struct tasklet_struct *t) 4036b1396c2bSRahul Lakkireddy { 40370eb484eeSAllen Pais struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, 40380eb484eeSAllen Pais qresume_tsk); 4039b1396c2bSRahul Lakkireddy int pktcount; 4040b1396c2bSRahul Lakkireddy 4041b1396c2bSRahul Lakkireddy spin_lock(&eosw_txq->lock); 4042b1396c2bSRahul Lakkireddy pktcount = eosw_txq->cidx - eosw_txq->last_cidx; 4043b1396c2bSRahul Lakkireddy if (pktcount < 0) 4044b1396c2bSRahul Lakkireddy pktcount += eosw_txq->ndesc; 4045b1396c2bSRahul Lakkireddy 40464846d533SRahul Lakkireddy if (pktcount) { 4047b1396c2bSRahul Lakkireddy cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), 4048b1396c2bSRahul Lakkireddy eosw_txq, pktcount); 40494846d533SRahul Lakkireddy eosw_txq->inuse -= pktcount; 40504846d533SRahul Lakkireddy } 40514846d533SRahul Lakkireddy 40524846d533SRahul Lakkireddy /* There may be some packets waiting for completions. So, 40534846d533SRahul Lakkireddy * attempt to send these packets now. 40544846d533SRahul Lakkireddy */ 40554846d533SRahul Lakkireddy ethofld_xmit(eosw_txq->netdev, eosw_txq); 4056b1396c2bSRahul Lakkireddy spin_unlock(&eosw_txq->lock); 4057b1396c2bSRahul Lakkireddy } 4058b1396c2bSRahul Lakkireddy 40594846d533SRahul Lakkireddy /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions 40604846d533SRahul Lakkireddy * @q: the response queue that received the packet 40614846d533SRahul Lakkireddy * @rsp: the response queue descriptor holding the CPL message 40624846d533SRahul Lakkireddy * @si: the gather list of packet fragments 40634846d533SRahul Lakkireddy * 40644846d533SRahul Lakkireddy * Process a ETHOFLD Tx completion. Increment the cidx here, but 40654846d533SRahul Lakkireddy * free up the descriptors in a tasklet later. 40664846d533SRahul Lakkireddy */ 40674846d533SRahul Lakkireddy int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, 40684846d533SRahul Lakkireddy const struct pkt_gl *si) 40694846d533SRahul Lakkireddy { 40704846d533SRahul Lakkireddy u8 opcode = ((const struct rss_header *)rsp)->opcode; 40714846d533SRahul Lakkireddy 40724846d533SRahul Lakkireddy /* skip RSS header */ 40734846d533SRahul Lakkireddy rsp++; 40744846d533SRahul Lakkireddy 40754846d533SRahul Lakkireddy if (opcode == CPL_FW4_ACK) { 40764846d533SRahul Lakkireddy const struct cpl_fw4_ack *cpl; 40774846d533SRahul Lakkireddy struct sge_eosw_txq *eosw_txq; 40784846d533SRahul Lakkireddy struct eotid_entry *entry; 40794846d533SRahul Lakkireddy struct sk_buff *skb; 40804846d533SRahul Lakkireddy u32 hdr_len, eotid; 40814846d533SRahul Lakkireddy u8 flits, wrlen16; 40824846d533SRahul Lakkireddy int credits; 40834846d533SRahul Lakkireddy 40844846d533SRahul Lakkireddy cpl = (const struct cpl_fw4_ack *)rsp; 40854846d533SRahul Lakkireddy eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - 40864846d533SRahul Lakkireddy q->adap->tids.eotid_base; 40874846d533SRahul Lakkireddy entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); 40884846d533SRahul Lakkireddy if (!entry) 40894846d533SRahul Lakkireddy goto out_done; 40904846d533SRahul Lakkireddy 40914846d533SRahul Lakkireddy eosw_txq = (struct sge_eosw_txq *)entry->data; 40924846d533SRahul Lakkireddy if (!eosw_txq) 40934846d533SRahul Lakkireddy goto out_done; 40944846d533SRahul Lakkireddy 40954846d533SRahul Lakkireddy spin_lock(&eosw_txq->lock); 40964846d533SRahul Lakkireddy credits = cpl->credits; 40974846d533SRahul Lakkireddy while (credits > 0) { 40984846d533SRahul Lakkireddy skb = eosw_txq->desc[eosw_txq->cidx].skb; 40994846d533SRahul Lakkireddy if (!skb) 41004846d533SRahul Lakkireddy break; 41014846d533SRahul Lakkireddy 41020e395b3cSRahul Lakkireddy if (unlikely((eosw_txq->state == 41030e395b3cSRahul Lakkireddy CXGB4_EO_STATE_FLOWC_OPEN_REPLY || 41040e395b3cSRahul Lakkireddy eosw_txq->state == 41050e395b3cSRahul Lakkireddy CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) && 41060e395b3cSRahul Lakkireddy eosw_txq->cidx == eosw_txq->flowc_idx)) { 41070e395b3cSRahul Lakkireddy flits = DIV_ROUND_UP(skb->len, 8); 41080e395b3cSRahul Lakkireddy if (eosw_txq->state == 41090e395b3cSRahul Lakkireddy CXGB4_EO_STATE_FLOWC_OPEN_REPLY) 41100e395b3cSRahul Lakkireddy eosw_txq->state = CXGB4_EO_STATE_ACTIVE; 41110e395b3cSRahul Lakkireddy else 41120e395b3cSRahul Lakkireddy eosw_txq->state = CXGB4_EO_STATE_CLOSED; 41130e395b3cSRahul Lakkireddy complete(&eosw_txq->completion); 41140e395b3cSRahul Lakkireddy } else { 41150e395b3cSRahul Lakkireddy hdr_len = eth_get_headlen(eosw_txq->netdev, 41160e395b3cSRahul Lakkireddy skb->data, 41174846d533SRahul Lakkireddy skb_headlen(skb)); 41180e395b3cSRahul Lakkireddy flits = ethofld_calc_tx_flits(q->adap, skb, 41190e395b3cSRahul Lakkireddy hdr_len); 41200e395b3cSRahul Lakkireddy } 41214846d533SRahul Lakkireddy eosw_txq_advance_index(&eosw_txq->cidx, 1, 41224846d533SRahul Lakkireddy eosw_txq->ndesc); 41234846d533SRahul Lakkireddy wrlen16 = DIV_ROUND_UP(flits * 8, 16); 41244846d533SRahul Lakkireddy credits -= wrlen16; 41254846d533SRahul Lakkireddy } 41264846d533SRahul Lakkireddy 41274846d533SRahul Lakkireddy eosw_txq->cred += cpl->credits; 41284846d533SRahul Lakkireddy eosw_txq->ncompl--; 41294846d533SRahul Lakkireddy 41304846d533SRahul Lakkireddy spin_unlock(&eosw_txq->lock); 41314846d533SRahul Lakkireddy 41324846d533SRahul Lakkireddy /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, 41334846d533SRahul Lakkireddy * if there were packets waiting for completion. 41344846d533SRahul Lakkireddy */ 41354846d533SRahul Lakkireddy tasklet_schedule(&eosw_txq->qresume_tsk); 41364846d533SRahul Lakkireddy } 41374846d533SRahul Lakkireddy 41384846d533SRahul Lakkireddy out_done: 41394846d533SRahul Lakkireddy return 0; 41404846d533SRahul Lakkireddy } 41414846d533SRahul Lakkireddy 4142f7917c00SJeff Kirsher /* 4143f7917c00SJeff Kirsher * The MSI-X interrupt handler for an SGE response queue. 4144f7917c00SJeff Kirsher */ 4145f7917c00SJeff Kirsher irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 4146f7917c00SJeff Kirsher { 4147f7917c00SJeff Kirsher struct sge_rspq *q = cookie; 4148f7917c00SJeff Kirsher 4149f7917c00SJeff Kirsher napi_schedule(&q->napi); 4150f7917c00SJeff Kirsher return IRQ_HANDLED; 4151f7917c00SJeff Kirsher } 4152f7917c00SJeff Kirsher 4153f7917c00SJeff Kirsher /* 4154f7917c00SJeff Kirsher * Process the indirect interrupt entries in the interrupt queue and kick off 4155f7917c00SJeff Kirsher * NAPI for each queue that has generated an entry. 4156f7917c00SJeff Kirsher */ 4157f7917c00SJeff Kirsher static unsigned int process_intrq(struct adapter *adap) 4158f7917c00SJeff Kirsher { 4159f7917c00SJeff Kirsher unsigned int credits; 4160f7917c00SJeff Kirsher const struct rsp_ctrl *rc; 4161f7917c00SJeff Kirsher struct sge_rspq *q = &adap->sge.intrq; 4162d63a6dcfSHariprasad Shenai u32 val; 4163f7917c00SJeff Kirsher 4164f7917c00SJeff Kirsher spin_lock(&adap->sge.intrq_lock); 4165f7917c00SJeff Kirsher for (credits = 0; ; credits++) { 4166f7917c00SJeff Kirsher rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 4167f7917c00SJeff Kirsher if (!is_new_response(rc, q)) 4168f7917c00SJeff Kirsher break; 4169f7917c00SJeff Kirsher 4170019be1cfSAlexander Duyck dma_rmb(); 41711ecc7b7aSHariprasad Shenai if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { 4172f7917c00SJeff Kirsher unsigned int qid = ntohl(rc->pldbuflen_qid); 4173f7917c00SJeff Kirsher 4174f7917c00SJeff Kirsher qid -= adap->sge.ingr_start; 4175f7917c00SJeff Kirsher napi_schedule(&adap->sge.ingr_map[qid]->napi); 4176f7917c00SJeff Kirsher } 4177f7917c00SJeff Kirsher 4178f7917c00SJeff Kirsher rspq_next(q); 4179f7917c00SJeff Kirsher } 4180f7917c00SJeff Kirsher 4181f612b815SHariprasad Shenai val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); 4182df64e4d3SHariprasad Shenai 4183df64e4d3SHariprasad Shenai /* If we don't have access to the new User GTS (T5+), use the old 4184df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 4185df64e4d3SHariprasad Shenai */ 4186df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 4187f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 4188f612b815SHariprasad Shenai val | INGRESSQID_V(q->cntxt_id)); 4189d63a6dcfSHariprasad Shenai } else { 4190f612b815SHariprasad Shenai writel(val | INGRESSQID_V(q->bar2_qid), 4191df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_GTS); 4192d63a6dcfSHariprasad Shenai wmb(); 4193d63a6dcfSHariprasad Shenai } 4194f7917c00SJeff Kirsher spin_unlock(&adap->sge.intrq_lock); 4195f7917c00SJeff Kirsher return credits; 4196f7917c00SJeff Kirsher } 4197f7917c00SJeff Kirsher 4198f7917c00SJeff Kirsher /* 4199f7917c00SJeff Kirsher * The MSI interrupt handler, which handles data events from SGE response queues 4200f7917c00SJeff Kirsher * as well as error and other async events as they all use the same MSI vector. 4201f7917c00SJeff Kirsher */ 4202f7917c00SJeff Kirsher static irqreturn_t t4_intr_msi(int irq, void *cookie) 4203f7917c00SJeff Kirsher { 4204f7917c00SJeff Kirsher struct adapter *adap = cookie; 4205f7917c00SJeff Kirsher 420680f61f19SArjun Vynipadath if (adap->flags & CXGB4_MASTER_PF) 4207f7917c00SJeff Kirsher t4_slow_intr_handler(adap); 4208f7917c00SJeff Kirsher process_intrq(adap); 4209f7917c00SJeff Kirsher return IRQ_HANDLED; 4210f7917c00SJeff Kirsher } 4211f7917c00SJeff Kirsher 4212f7917c00SJeff Kirsher /* 4213f7917c00SJeff Kirsher * Interrupt handler for legacy INTx interrupts. 4214f7917c00SJeff Kirsher * Handles data events from SGE response queues as well as error and other 4215f7917c00SJeff Kirsher * async events as they all use the same interrupt line. 4216f7917c00SJeff Kirsher */ 4217f7917c00SJeff Kirsher static irqreturn_t t4_intr_intx(int irq, void *cookie) 4218f7917c00SJeff Kirsher { 4219f7917c00SJeff Kirsher struct adapter *adap = cookie; 4220f7917c00SJeff Kirsher 4221f061de42SHariprasad Shenai t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); 422280f61f19SArjun Vynipadath if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | 4223c3c7b121SHariprasad Shenai process_intrq(adap)) 4224f7917c00SJeff Kirsher return IRQ_HANDLED; 4225f7917c00SJeff Kirsher return IRQ_NONE; /* probably shared interrupt */ 4226f7917c00SJeff Kirsher } 4227f7917c00SJeff Kirsher 4228f7917c00SJeff Kirsher /** 4229f7917c00SJeff Kirsher * t4_intr_handler - select the top-level interrupt handler 4230f7917c00SJeff Kirsher * @adap: the adapter 4231f7917c00SJeff Kirsher * 4232f7917c00SJeff Kirsher * Selects the top-level interrupt handler based on the type of interrupts 4233f7917c00SJeff Kirsher * (MSI-X, MSI, or INTx). 4234f7917c00SJeff Kirsher */ 4235f7917c00SJeff Kirsher irq_handler_t t4_intr_handler(struct adapter *adap) 4236f7917c00SJeff Kirsher { 423780f61f19SArjun Vynipadath if (adap->flags & CXGB4_USING_MSIX) 4238f7917c00SJeff Kirsher return t4_sge_intr_msix; 423980f61f19SArjun Vynipadath if (adap->flags & CXGB4_USING_MSI) 4240f7917c00SJeff Kirsher return t4_intr_msi; 4241f7917c00SJeff Kirsher return t4_intr_intx; 4242f7917c00SJeff Kirsher } 4243f7917c00SJeff Kirsher 42440e23daebSKees Cook static void sge_rx_timer_cb(struct timer_list *t) 4245f7917c00SJeff Kirsher { 4246f7917c00SJeff Kirsher unsigned long m; 4247a3bfb617SHariprasad Shenai unsigned int i; 42480e23daebSKees Cook struct adapter *adap = from_timer(adap, t, sge.rx_timer); 4249f7917c00SJeff Kirsher struct sge *s = &adap->sge; 4250f7917c00SJeff Kirsher 42514b8e27a8SHariprasad Shenai for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 4252f7917c00SJeff Kirsher for (m = s->starving_fl[i]; m; m &= m - 1) { 4253f7917c00SJeff Kirsher struct sge_eth_rxq *rxq; 4254f7917c00SJeff Kirsher unsigned int id = __ffs(m) + i * BITS_PER_LONG; 4255f7917c00SJeff Kirsher struct sge_fl *fl = s->egr_map[id]; 4256f7917c00SJeff Kirsher 4257f7917c00SJeff Kirsher clear_bit(id, s->starving_fl); 42584e857c58SPeter Zijlstra smp_mb__after_atomic(); 4259f7917c00SJeff Kirsher 4260c098b026SHariprasad Shenai if (fl_starving(adap, fl)) { 4261f7917c00SJeff Kirsher rxq = container_of(fl, struct sge_eth_rxq, fl); 4262f7917c00SJeff Kirsher if (napi_reschedule(&rxq->rspq.napi)) 4263f7917c00SJeff Kirsher fl->starving++; 4264f7917c00SJeff Kirsher else 4265f7917c00SJeff Kirsher set_bit(id, s->starving_fl); 4266f7917c00SJeff Kirsher } 4267f7917c00SJeff Kirsher } 4268a3bfb617SHariprasad Shenai /* The remainder of the SGE RX Timer Callback routine is dedicated to 4269a3bfb617SHariprasad Shenai * global Master PF activities like checking for chip ingress stalls, 4270a3bfb617SHariprasad Shenai * etc. 42710f4d201fSKumar Sanghvi */ 427280f61f19SArjun Vynipadath if (!(adap->flags & CXGB4_MASTER_PF)) 4273a3bfb617SHariprasad Shenai goto done; 42740f4d201fSKumar Sanghvi 4275a3bfb617SHariprasad Shenai t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); 42760f4d201fSKumar Sanghvi 4277a3bfb617SHariprasad Shenai done: 4278f7917c00SJeff Kirsher mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 4279f7917c00SJeff Kirsher } 4280f7917c00SJeff Kirsher 42810e23daebSKees Cook static void sge_tx_timer_cb(struct timer_list *t) 4282f7917c00SJeff Kirsher { 42830e23daebSKees Cook struct adapter *adap = from_timer(adap, t, sge.tx_timer); 4284f7917c00SJeff Kirsher struct sge *s = &adap->sge; 4285d429005fSVishal Kulkarni unsigned long m, period; 4286d429005fSVishal Kulkarni unsigned int i, budget; 4287f7917c00SJeff Kirsher 42884b8e27a8SHariprasad Shenai for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 4289f7917c00SJeff Kirsher for (m = s->txq_maperr[i]; m; m &= m - 1) { 4290f7917c00SJeff Kirsher unsigned long id = __ffs(m) + i * BITS_PER_LONG; 4291ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = s->egr_map[id]; 4292f7917c00SJeff Kirsher 4293f7917c00SJeff Kirsher clear_bit(id, s->txq_maperr); 4294f7917c00SJeff Kirsher tasklet_schedule(&txq->qresume_tsk); 4295f7917c00SJeff Kirsher } 4296f7917c00SJeff Kirsher 4297a4569504SAtul Gupta if (!is_t4(adap->params.chip)) { 4298a4569504SAtul Gupta struct sge_eth_txq *q = &s->ptptxq; 4299a4569504SAtul Gupta int avail; 4300a4569504SAtul Gupta 4301a4569504SAtul Gupta spin_lock(&adap->ptp_lock); 4302a4569504SAtul Gupta avail = reclaimable(&q->q); 4303a4569504SAtul Gupta 4304a4569504SAtul Gupta if (avail) { 4305a4569504SAtul Gupta free_tx_desc(adap, &q->q, avail, false); 4306a4569504SAtul Gupta q->q.in_use -= avail; 4307a4569504SAtul Gupta } 4308a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 4309a4569504SAtul Gupta } 4310a4569504SAtul Gupta 4311f7917c00SJeff Kirsher budget = MAX_TIMER_TX_RECLAIM; 4312f7917c00SJeff Kirsher i = s->ethtxq_rover; 4313f7917c00SJeff Kirsher do { 4314d429005fSVishal Kulkarni budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], 4315d429005fSVishal Kulkarni budget); 4316d429005fSVishal Kulkarni if (!budget) 4317d429005fSVishal Kulkarni break; 4318f7917c00SJeff Kirsher 4319f7917c00SJeff Kirsher if (++i >= s->ethqsets) 4320f7917c00SJeff Kirsher i = 0; 4321d429005fSVishal Kulkarni } while (i != s->ethtxq_rover); 4322f7917c00SJeff Kirsher s->ethtxq_rover = i; 4323d429005fSVishal Kulkarni 4324d429005fSVishal Kulkarni if (budget == 0) { 4325d429005fSVishal Kulkarni /* If we found too many reclaimable packets schedule a timer 4326d429005fSVishal Kulkarni * in the near future to continue where we left off. 4327d429005fSVishal Kulkarni */ 4328d429005fSVishal Kulkarni period = 2; 4329d429005fSVishal Kulkarni } else { 4330d429005fSVishal Kulkarni /* We reclaimed all reclaimable TX Descriptors, so reschedule 4331d429005fSVishal Kulkarni * at the normal period. 4332d429005fSVishal Kulkarni */ 4333d429005fSVishal Kulkarni period = TX_QCHECK_PERIOD; 4334d429005fSVishal Kulkarni } 4335d429005fSVishal Kulkarni 4336d429005fSVishal Kulkarni mod_timer(&s->tx_timer, jiffies + period); 4337f7917c00SJeff Kirsher } 4338f7917c00SJeff Kirsher 4339d63a6dcfSHariprasad Shenai /** 4340df64e4d3SHariprasad Shenai * bar2_address - return the BAR2 address for an SGE Queue's Registers 4341df64e4d3SHariprasad Shenai * @adapter: the adapter 4342df64e4d3SHariprasad Shenai * @qid: the SGE Queue ID 4343df64e4d3SHariprasad Shenai * @qtype: the SGE Queue Type (Egress or Ingress) 4344df64e4d3SHariprasad Shenai * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 4345d63a6dcfSHariprasad Shenai * 4346df64e4d3SHariprasad Shenai * Returns the BAR2 address for the SGE Queue Registers associated with 4347df64e4d3SHariprasad Shenai * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 4348df64e4d3SHariprasad Shenai * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 4349df64e4d3SHariprasad Shenai * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 4350df64e4d3SHariprasad Shenai * Registers are supported (e.g. the Write Combining Doorbell Buffer). 4351d63a6dcfSHariprasad Shenai */ 4352df64e4d3SHariprasad Shenai static void __iomem *bar2_address(struct adapter *adapter, 4353df64e4d3SHariprasad Shenai unsigned int qid, 4354df64e4d3SHariprasad Shenai enum t4_bar2_qtype qtype, 4355df64e4d3SHariprasad Shenai unsigned int *pbar2_qid) 4356d63a6dcfSHariprasad Shenai { 4357df64e4d3SHariprasad Shenai u64 bar2_qoffset; 4358df64e4d3SHariprasad Shenai int ret; 4359d63a6dcfSHariprasad Shenai 4360e0456717SLinus Torvalds ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, 4361df64e4d3SHariprasad Shenai &bar2_qoffset, pbar2_qid); 4362df64e4d3SHariprasad Shenai if (ret) 4363df64e4d3SHariprasad Shenai return NULL; 4364d63a6dcfSHariprasad Shenai 4365df64e4d3SHariprasad Shenai return adapter->bar2 + bar2_qoffset; 4366d63a6dcfSHariprasad Shenai } 4367d63a6dcfSHariprasad Shenai 4368145ef8a5SHariprasad Shenai /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 4369145ef8a5SHariprasad Shenai * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 4370145ef8a5SHariprasad Shenai */ 4371f7917c00SJeff Kirsher int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 4372f7917c00SJeff Kirsher struct net_device *dev, int intr_idx, 43732337ba42SVarun Prakash struct sge_fl *fl, rspq_handler_t hnd, 43742337ba42SVarun Prakash rspq_flush_handler_t flush_hnd, int cong) 4375f7917c00SJeff Kirsher { 4376f7917c00SJeff Kirsher int ret, flsz = 0; 4377f7917c00SJeff Kirsher struct fw_iq_cmd c; 437852367a76SVipul Pandya struct sge *s = &adap->sge; 4379f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 438080f61f19SArjun Vynipadath int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); 4381f7917c00SJeff Kirsher 4382f7917c00SJeff Kirsher /* Size needs to be multiple of 16, including status entry. */ 4383f7917c00SJeff Kirsher iq->size = roundup(iq->size, 16); 4384f7917c00SJeff Kirsher 4385f7917c00SJeff Kirsher iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 43860ac5b708SHariprasad Shenai &iq->phys_addr, NULL, 0, 43870ac5b708SHariprasad Shenai dev_to_node(adap->pdev_dev)); 4388f7917c00SJeff Kirsher if (!iq->desc) 4389f7917c00SJeff Kirsher return -ENOMEM; 4390f7917c00SJeff Kirsher 4391f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 4392e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 4393e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4394b2612722SHariprasad Shenai FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); 43956e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | 4396f7917c00SJeff Kirsher FW_LEN16(c)); 43976e4b51a6SHariprasad Shenai c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 43986e4b51a6SHariprasad Shenai FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 43991ecc7b7aSHariprasad Shenai FW_IQ_CMD_IQANDST_V(intr_idx < 0) | 44001ecc7b7aSHariprasad Shenai FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | 44016e4b51a6SHariprasad Shenai FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 4402f7917c00SJeff Kirsher -intr_idx - 1)); 44036e4b51a6SHariprasad Shenai c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 44046e4b51a6SHariprasad Shenai FW_IQ_CMD_IQGTSMODE_F | 44056e4b51a6SHariprasad Shenai FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | 44066e4b51a6SHariprasad Shenai FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); 4407f7917c00SJeff Kirsher c.iqsize = htons(iq->size); 4408f7917c00SJeff Kirsher c.iqaddr = cpu_to_be64(iq->phys_addr); 4409145ef8a5SHariprasad Shenai if (cong >= 0) 44108dce04f1SArjun Vynipadath c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | 44118dce04f1SArjun Vynipadath FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC 44128dce04f1SArjun Vynipadath : FW_IQ_IQTYPE_OFLD)); 4413f7917c00SJeff Kirsher 4414f7917c00SJeff Kirsher if (fl) { 4415d429005fSVishal Kulkarni unsigned int chip_ver = 4416d429005fSVishal Kulkarni CHELSIO_CHIP_VERSION(adap->params.chip); 44173ccc6cf7SHariprasad Shenai 441813432997SHariprasad Shenai /* Allocate the ring for the hardware free list (with space 441913432997SHariprasad Shenai * for its status page) along with the associated software 442013432997SHariprasad Shenai * descriptor ring. The free list size needs to be a multiple 442113432997SHariprasad Shenai * of the Egress Queue Unit and at least 2 Egress Units larger 442213432997SHariprasad Shenai * than the SGE's Egress Congrestion Threshold 442313432997SHariprasad Shenai * (fl_starve_thres - 1). 442413432997SHariprasad Shenai */ 442513432997SHariprasad Shenai if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 442613432997SHariprasad Shenai fl->size = s->fl_starve_thres - 1 + 2 * 8; 4427f7917c00SJeff Kirsher fl->size = roundup(fl->size, 8); 4428f7917c00SJeff Kirsher fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 4429f7917c00SJeff Kirsher sizeof(struct rx_sw_desc), &fl->addr, 44300ac5b708SHariprasad Shenai &fl->sdesc, s->stat_len, 44310ac5b708SHariprasad Shenai dev_to_node(adap->pdev_dev)); 4432f7917c00SJeff Kirsher if (!fl->desc) 4433f7917c00SJeff Kirsher goto fl_nomem; 4434f7917c00SJeff Kirsher 443552367a76SVipul Pandya flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 4436145ef8a5SHariprasad Shenai c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 4437b0ba9d5fSCasey Leedom FW_IQ_CMD_FL0FETCHRO_V(relaxed) | 4438b0ba9d5fSCasey Leedom FW_IQ_CMD_FL0DATARO_V(relaxed) | 44396e4b51a6SHariprasad Shenai FW_IQ_CMD_FL0PADEN_F); 4440145ef8a5SHariprasad Shenai if (cong >= 0) 4441145ef8a5SHariprasad Shenai c.iqns_to_fl0congen |= 4442145ef8a5SHariprasad Shenai htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 4443145ef8a5SHariprasad Shenai FW_IQ_CMD_FL0CONGCIF_F | 4444145ef8a5SHariprasad Shenai FW_IQ_CMD_FL0CONGEN_F); 4445edadad80SHariprasad Shenai /* In T6, for egress queue type FL there is internal overhead 4446edadad80SHariprasad Shenai * of 16B for header going into FLM module. Hence the maximum 4447edadad80SHariprasad Shenai * allowed burst size is 448 bytes. For T4/T5, the hardware 4448edadad80SHariprasad Shenai * doesn't coalesce fetch requests if more than 64 bytes of 4449edadad80SHariprasad Shenai * Free List pointers are provided, so we use a 128-byte Fetch 4450edadad80SHariprasad Shenai * Burst Minimum there (T6 implements coalescing so we can use 4451edadad80SHariprasad Shenai * the smaller 64-byte value there). 4452edadad80SHariprasad Shenai */ 44531ecc7b7aSHariprasad Shenai c.fl0dcaen_to_fl0cidxfthresh = 4454d429005fSVishal Kulkarni htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? 4455edadad80SHariprasad Shenai FETCHBURSTMIN_128B_X : 4456d429005fSVishal Kulkarni FETCHBURSTMIN_64B_T6_X) | 4457d429005fSVishal Kulkarni FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? 44583ccc6cf7SHariprasad Shenai FETCHBURSTMAX_512B_X : 44593ccc6cf7SHariprasad Shenai FETCHBURSTMAX_256B_X)); 4460f7917c00SJeff Kirsher c.fl0size = htons(flsz); 4461f7917c00SJeff Kirsher c.fl0addr = cpu_to_be64(fl->addr); 4462f7917c00SJeff Kirsher } 4463f7917c00SJeff Kirsher 4464b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4465f7917c00SJeff Kirsher if (ret) 4466f7917c00SJeff Kirsher goto err; 4467f7917c00SJeff Kirsher 4468f7917c00SJeff Kirsher netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 4469f7917c00SJeff Kirsher iq->cur_desc = iq->desc; 4470f7917c00SJeff Kirsher iq->cidx = 0; 4471f7917c00SJeff Kirsher iq->gen = 1; 4472f7917c00SJeff Kirsher iq->next_intr_params = iq->intr_params; 4473f7917c00SJeff Kirsher iq->cntxt_id = ntohs(c.iqid); 4474f7917c00SJeff Kirsher iq->abs_id = ntohs(c.physiqid); 4475df64e4d3SHariprasad Shenai iq->bar2_addr = bar2_address(adap, 4476df64e4d3SHariprasad Shenai iq->cntxt_id, 4477df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_INGRESS, 4478df64e4d3SHariprasad Shenai &iq->bar2_qid); 4479f7917c00SJeff Kirsher iq->size--; /* subtract status entry */ 4480f7917c00SJeff Kirsher iq->netdev = dev; 4481f7917c00SJeff Kirsher iq->handler = hnd; 44822337ba42SVarun Prakash iq->flush_handler = flush_hnd; 44832337ba42SVarun Prakash 44842337ba42SVarun Prakash memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); 44852337ba42SVarun Prakash skb_queue_head_init(&iq->lro_mgr.lroq); 4486f7917c00SJeff Kirsher 4487f7917c00SJeff Kirsher /* set offset to -1 to distinguish ingress queues without FL */ 4488f7917c00SJeff Kirsher iq->offset = fl ? 0 : -1; 4489f7917c00SJeff Kirsher 4490f7917c00SJeff Kirsher adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 4491f7917c00SJeff Kirsher 4492f7917c00SJeff Kirsher if (fl) { 4493f7917c00SJeff Kirsher fl->cntxt_id = ntohs(c.fl0id); 4494f7917c00SJeff Kirsher fl->avail = fl->pend_cred = 0; 4495f7917c00SJeff Kirsher fl->pidx = fl->cidx = 0; 4496f7917c00SJeff Kirsher fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 4497f7917c00SJeff Kirsher adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 4498d63a6dcfSHariprasad Shenai 4499df64e4d3SHariprasad Shenai /* Note, we must initialize the BAR2 Free List User Doorbell 4500df64e4d3SHariprasad Shenai * information before refilling the Free List! 4501d63a6dcfSHariprasad Shenai */ 4502df64e4d3SHariprasad Shenai fl->bar2_addr = bar2_address(adap, 4503df64e4d3SHariprasad Shenai fl->cntxt_id, 4504df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_EGRESS, 4505df64e4d3SHariprasad Shenai &fl->bar2_qid); 4506f7917c00SJeff Kirsher refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 4507f7917c00SJeff Kirsher } 4508b8b1ae99SHariprasad Shenai 4509b8b1ae99SHariprasad Shenai /* For T5 and later we attempt to set up the Congestion Manager values 4510b8b1ae99SHariprasad Shenai * of the new RX Ethernet Queue. This should really be handled by 4511b8b1ae99SHariprasad Shenai * firmware because it's more complex than any host driver wants to 4512b8b1ae99SHariprasad Shenai * get involved with and it's different per chip and this is almost 4513b8b1ae99SHariprasad Shenai * certainly wrong. Firmware would be wrong as well, but it would be 4514b8b1ae99SHariprasad Shenai * a lot easier to fix in one place ... For now we do something very 4515b8b1ae99SHariprasad Shenai * simple (and hopefully less wrong). 4516b8b1ae99SHariprasad Shenai */ 4517b8b1ae99SHariprasad Shenai if (!is_t4(adap->params.chip) && cong >= 0) { 45182216d014SHariprasad Shenai u32 param, val, ch_map = 0; 4519b8b1ae99SHariprasad Shenai int i; 45202216d014SHariprasad Shenai u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 4521b8b1ae99SHariprasad Shenai 4522b8b1ae99SHariprasad Shenai param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 4523b8b1ae99SHariprasad Shenai FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 4524b8b1ae99SHariprasad Shenai FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); 4525b8b1ae99SHariprasad Shenai if (cong == 0) { 4526b8b1ae99SHariprasad Shenai val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); 4527b8b1ae99SHariprasad Shenai } else { 4528b8b1ae99SHariprasad Shenai val = 4529b8b1ae99SHariprasad Shenai CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); 4530b8b1ae99SHariprasad Shenai for (i = 0; i < 4; i++) { 4531b8b1ae99SHariprasad Shenai if (cong & (1 << i)) 45322216d014SHariprasad Shenai ch_map |= 1 << (i << cng_ch_bits_log); 4533b8b1ae99SHariprasad Shenai } 45342216d014SHariprasad Shenai val |= CONMCTXT_CNGCHMAP_V(ch_map); 4535b8b1ae99SHariprasad Shenai } 4536b2612722SHariprasad Shenai ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 4537b8b1ae99SHariprasad Shenai ¶m, &val); 4538b8b1ae99SHariprasad Shenai if (ret) 4539b8b1ae99SHariprasad Shenai dev_warn(adap->pdev_dev, "Failed to set Congestion" 4540b8b1ae99SHariprasad Shenai " Manager Context for Ingress Queue %d: %d\n", 4541b8b1ae99SHariprasad Shenai iq->cntxt_id, -ret); 4542b8b1ae99SHariprasad Shenai } 4543b8b1ae99SHariprasad Shenai 4544f7917c00SJeff Kirsher return 0; 4545f7917c00SJeff Kirsher 4546f7917c00SJeff Kirsher fl_nomem: 4547f7917c00SJeff Kirsher ret = -ENOMEM; 4548f7917c00SJeff Kirsher err: 4549f7917c00SJeff Kirsher if (iq->desc) { 4550f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 4551f7917c00SJeff Kirsher iq->desc, iq->phys_addr); 4552f7917c00SJeff Kirsher iq->desc = NULL; 4553f7917c00SJeff Kirsher } 4554f7917c00SJeff Kirsher if (fl && fl->desc) { 4555f7917c00SJeff Kirsher kfree(fl->sdesc); 4556f7917c00SJeff Kirsher fl->sdesc = NULL; 4557f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 4558f7917c00SJeff Kirsher fl->desc, fl->addr); 4559f7917c00SJeff Kirsher fl->desc = NULL; 4560f7917c00SJeff Kirsher } 4561f7917c00SJeff Kirsher return ret; 4562f7917c00SJeff Kirsher } 4563f7917c00SJeff Kirsher 4564f7917c00SJeff Kirsher static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 4565f7917c00SJeff Kirsher { 456622adfe0aSSantosh Rastapur q->cntxt_id = id; 4567df64e4d3SHariprasad Shenai q->bar2_addr = bar2_address(adap, 4568df64e4d3SHariprasad Shenai q->cntxt_id, 4569df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_EGRESS, 4570df64e4d3SHariprasad Shenai &q->bar2_qid); 4571f7917c00SJeff Kirsher q->in_use = 0; 4572f7917c00SJeff Kirsher q->cidx = q->pidx = 0; 4573f7917c00SJeff Kirsher q->stops = q->restarts = 0; 4574f7917c00SJeff Kirsher q->stat = (void *)&q->desc[q->size]; 45753069ee9bSVipul Pandya spin_lock_init(&q->db_lock); 4576f7917c00SJeff Kirsher adap->sge.egr_map[id - adap->sge.egr_start] = q; 4577f7917c00SJeff Kirsher } 4578f7917c00SJeff Kirsher 4579d429005fSVishal Kulkarni /** 4580d429005fSVishal Kulkarni * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue 4581d429005fSVishal Kulkarni * @adap: the adapter 4582d429005fSVishal Kulkarni * @txq: the SGE Ethernet TX Queue to initialize 4583d429005fSVishal Kulkarni * @dev: the Linux Network Device 4584d429005fSVishal Kulkarni * @netdevq: the corresponding Linux TX Queue 4585d429005fSVishal Kulkarni * @iqid: the Ingress Queue to which to deliver CIDX Update messages 4586d429005fSVishal Kulkarni * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers 4587d429005fSVishal Kulkarni */ 4588f7917c00SJeff Kirsher int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 4589f7917c00SJeff Kirsher struct net_device *dev, struct netdev_queue *netdevq, 4590d429005fSVishal Kulkarni unsigned int iqid, u8 dbqt) 4591f7917c00SJeff Kirsher { 4592d429005fSVishal Kulkarni unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4593f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 4594d429005fSVishal Kulkarni struct sge *s = &adap->sge; 4595d429005fSVishal Kulkarni struct fw_eq_eth_cmd c; 4596d429005fSVishal Kulkarni int ret, nentries; 4597f7917c00SJeff Kirsher 4598f7917c00SJeff Kirsher /* Add status entries */ 459952367a76SVipul Pandya nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4600f7917c00SJeff Kirsher 4601f7917c00SJeff Kirsher txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 4602f7917c00SJeff Kirsher sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 460352367a76SVipul Pandya &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 4604f7917c00SJeff Kirsher netdev_queue_numa_node_read(netdevq)); 4605f7917c00SJeff Kirsher if (!txq->q.desc) 4606f7917c00SJeff Kirsher return -ENOMEM; 4607f7917c00SJeff Kirsher 4608f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 4609e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 4610e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4611b2612722SHariprasad Shenai FW_EQ_ETH_CMD_PFN_V(adap->pf) | 46126e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_VFN_V(0)); 46136e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | 46146e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 4615d429005fSVishal Kulkarni 4616d429005fSVishal Kulkarni /* For TX Ethernet Queues using the SGE Doorbell Queue Timer 4617d429005fSVishal Kulkarni * mechanism, we use Ingress Queue messages for Hardware Consumer 4618d429005fSVishal Kulkarni * Index Updates on the TX Queue. Otherwise we have the Hardware 4619d429005fSVishal Kulkarni * write the CIDX Updates into the Status Page at the end of the 4620d429005fSVishal Kulkarni * TX Queue. 4621d429005fSVishal Kulkarni */ 4622b660bccbSRaju Rangoju c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ? 4623b660bccbSRaju Rangoju FW_EQ_ETH_CMD_AUTOEQUIQE_F : 4624b660bccbSRaju Rangoju FW_EQ_ETH_CMD_AUTOEQUEQE_F) | 46256e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_VIID_V(pi->viid)); 4626d429005fSVishal Kulkarni 46271ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 4628b660bccbSRaju Rangoju htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ? 4629b660bccbSRaju Rangoju HOSTFCMODE_INGRESS_QUEUE_X : 4630b660bccbSRaju Rangoju HOSTFCMODE_STATUS_PAGE_X) | 46316e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 46321ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 4633d429005fSVishal Kulkarni 4634d429005fSVishal Kulkarni /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ 46351ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 4636d429005fSVishal Kulkarni htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4637d429005fSVishal Kulkarni ? FETCHBURSTMIN_64B_X 4638d429005fSVishal Kulkarni : FETCHBURSTMIN_64B_T6_X) | 46391ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 46401ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 4641b660bccbSRaju Rangoju FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) | 46426e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 4643d429005fSVishal Kulkarni 4644f7917c00SJeff Kirsher c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4645f7917c00SJeff Kirsher 4646d429005fSVishal Kulkarni /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the 4647d429005fSVishal Kulkarni * currently configured Timer Index. THis can be changed later via an 4648d429005fSVishal Kulkarni * ethtool -C tx-usecs {Timer Val} command. Note that the SGE 4649d429005fSVishal Kulkarni * Doorbell Queue mode is currently automatically enabled in the 4650d429005fSVishal Kulkarni * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... 4651d429005fSVishal Kulkarni */ 4652d429005fSVishal Kulkarni if (dbqt) 4653d429005fSVishal Kulkarni c.timeren_timerix = 4654d429005fSVishal Kulkarni cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | 4655d429005fSVishal Kulkarni FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); 4656d429005fSVishal Kulkarni 4657b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4658f7917c00SJeff Kirsher if (ret) { 4659f7917c00SJeff Kirsher kfree(txq->q.sdesc); 4660f7917c00SJeff Kirsher txq->q.sdesc = NULL; 4661f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 4662f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 4663f7917c00SJeff Kirsher txq->q.desc, txq->q.phys_addr); 4664f7917c00SJeff Kirsher txq->q.desc = NULL; 4665f7917c00SJeff Kirsher return ret; 4666f7917c00SJeff Kirsher } 4667f7917c00SJeff Kirsher 4668ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_ETH; 46696e4b51a6SHariprasad Shenai init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); 4670f7917c00SJeff Kirsher txq->txq = netdevq; 46711a2a14fbSRahul Lakkireddy txq->tso = 0; 46721a2a14fbSRahul Lakkireddy txq->uso = 0; 46731a2a14fbSRahul Lakkireddy txq->tx_cso = 0; 46741a2a14fbSRahul Lakkireddy txq->vlan_ins = 0; 4675f7917c00SJeff Kirsher txq->mapping_err = 0; 4676d429005fSVishal Kulkarni txq->dbqt = dbqt; 4677d429005fSVishal Kulkarni 4678f7917c00SJeff Kirsher return 0; 4679f7917c00SJeff Kirsher } 4680f7917c00SJeff Kirsher 4681f7917c00SJeff Kirsher int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 4682f7917c00SJeff Kirsher struct net_device *dev, unsigned int iqid, 4683f7917c00SJeff Kirsher unsigned int cmplqid) 4684f7917c00SJeff Kirsher { 4685d429005fSVishal Kulkarni unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4686f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 4687d429005fSVishal Kulkarni struct sge *s = &adap->sge; 4688d429005fSVishal Kulkarni struct fw_eq_ctrl_cmd c; 4689d429005fSVishal Kulkarni int ret, nentries; 4690f7917c00SJeff Kirsher 4691f7917c00SJeff Kirsher /* Add status entries */ 469252367a76SVipul Pandya nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 4693f7917c00SJeff Kirsher 4694f7917c00SJeff Kirsher txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 4695f7917c00SJeff Kirsher sizeof(struct tx_desc), 0, &txq->q.phys_addr, 4696982b81ebSHariprasad Shenai NULL, 0, dev_to_node(adap->pdev_dev)); 4697f7917c00SJeff Kirsher if (!txq->q.desc) 4698f7917c00SJeff Kirsher return -ENOMEM; 4699f7917c00SJeff Kirsher 4700e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 4701e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4702b2612722SHariprasad Shenai FW_EQ_CTRL_CMD_PFN_V(adap->pf) | 47036e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_VFN_V(0)); 47046e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | 47056e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 47066e4b51a6SHariprasad Shenai c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 4707f7917c00SJeff Kirsher c.physeqid_pkd = htonl(0); 47081ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 47091ecc7b7aSHariprasad Shenai htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 47106e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 47111ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); 47121ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 4713d429005fSVishal Kulkarni htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 4714d429005fSVishal Kulkarni ? FETCHBURSTMIN_64B_X 4715d429005fSVishal Kulkarni : FETCHBURSTMIN_64B_T6_X) | 47161ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 47171ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 47186e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 4719f7917c00SJeff Kirsher c.eqaddr = cpu_to_be64(txq->q.phys_addr); 4720f7917c00SJeff Kirsher 4721b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4722f7917c00SJeff Kirsher if (ret) { 4723f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 4724f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 4725f7917c00SJeff Kirsher txq->q.desc, txq->q.phys_addr); 4726f7917c00SJeff Kirsher txq->q.desc = NULL; 4727f7917c00SJeff Kirsher return ret; 4728f7917c00SJeff Kirsher } 4729f7917c00SJeff Kirsher 4730ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_CTRL; 47316e4b51a6SHariprasad Shenai init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); 4732f7917c00SJeff Kirsher txq->adap = adap; 4733f7917c00SJeff Kirsher skb_queue_head_init(&txq->sendq); 47346660de07SAllen Pais tasklet_setup(&txq->qresume_tsk, restart_ctrlq); 4735f7917c00SJeff Kirsher txq->full = 0; 4736f7917c00SJeff Kirsher return 0; 4737f7917c00SJeff Kirsher } 4738f7917c00SJeff Kirsher 47390fbc81b3SHariprasad Shenai int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, 47400fbc81b3SHariprasad Shenai unsigned int cmplqid) 47410fbc81b3SHariprasad Shenai { 47420fbc81b3SHariprasad Shenai u32 param, val; 47430fbc81b3SHariprasad Shenai 47440fbc81b3SHariprasad Shenai param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 47450fbc81b3SHariprasad Shenai FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | 47460fbc81b3SHariprasad Shenai FW_PARAMS_PARAM_YZ_V(eqid)); 47470fbc81b3SHariprasad Shenai val = cmplqid; 47480fbc81b3SHariprasad Shenai return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 47490fbc81b3SHariprasad Shenai } 47500fbc81b3SHariprasad Shenai 47512d0cb84dSRahul Lakkireddy static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q, 47522d0cb84dSRahul Lakkireddy struct net_device *dev, u32 cmd, u32 iqid) 4753f7917c00SJeff Kirsher { 4754d429005fSVishal Kulkarni unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4755f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 47562d0cb84dSRahul Lakkireddy struct sge *s = &adap->sge; 47572d0cb84dSRahul Lakkireddy struct fw_eq_ofld_cmd c; 47582d0cb84dSRahul Lakkireddy u32 fb_min, nentries; 47592d0cb84dSRahul Lakkireddy int ret; 4760f7917c00SJeff Kirsher 4761f7917c00SJeff Kirsher /* Add status entries */ 47622d0cb84dSRahul Lakkireddy nentries = q->size + s->stat_len / sizeof(struct tx_desc); 47632d0cb84dSRahul Lakkireddy q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), 47642d0cb84dSRahul Lakkireddy sizeof(struct tx_sw_desc), &q->phys_addr, 47652d0cb84dSRahul Lakkireddy &q->sdesc, s->stat_len, NUMA_NO_NODE); 47662d0cb84dSRahul Lakkireddy if (!q->desc) 4767f7917c00SJeff Kirsher return -ENOMEM; 4768f7917c00SJeff Kirsher 47692d0cb84dSRahul Lakkireddy if (chip_ver <= CHELSIO_T5) 47702d0cb84dSRahul Lakkireddy fb_min = FETCHBURSTMIN_64B_X; 47712d0cb84dSRahul Lakkireddy else 47722d0cb84dSRahul Lakkireddy fb_min = FETCHBURSTMIN_64B_T6_X; 47732d0cb84dSRahul Lakkireddy 4774f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 4775ab677ff4SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | 4776e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 4777b2612722SHariprasad Shenai FW_EQ_OFLD_CMD_PFN_V(adap->pf) | 47786e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_VFN_V(0)); 47796e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 47806e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 47811ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 47821ecc7b7aSHariprasad Shenai htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 47836e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 47841ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); 47851ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 47862d0cb84dSRahul Lakkireddy htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) | 47871ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 47881ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 47896e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 47902d0cb84dSRahul Lakkireddy c.eqaddr = cpu_to_be64(q->phys_addr); 4791f7917c00SJeff Kirsher 4792b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4793f7917c00SJeff Kirsher if (ret) { 47942d0cb84dSRahul Lakkireddy kfree(q->sdesc); 47952d0cb84dSRahul Lakkireddy q->sdesc = NULL; 4796f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 4797f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 47982d0cb84dSRahul Lakkireddy q->desc, q->phys_addr); 47992d0cb84dSRahul Lakkireddy q->desc = NULL; 4800f7917c00SJeff Kirsher return ret; 4801f7917c00SJeff Kirsher } 4802f7917c00SJeff Kirsher 48032d0cb84dSRahul Lakkireddy init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); 48042d0cb84dSRahul Lakkireddy return 0; 48052d0cb84dSRahul Lakkireddy } 48062d0cb84dSRahul Lakkireddy 48072d0cb84dSRahul Lakkireddy int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, 48082d0cb84dSRahul Lakkireddy struct net_device *dev, unsigned int iqid, 48092d0cb84dSRahul Lakkireddy unsigned int uld_type) 48102d0cb84dSRahul Lakkireddy { 48112d0cb84dSRahul Lakkireddy u32 cmd = FW_EQ_OFLD_CMD; 48122d0cb84dSRahul Lakkireddy int ret; 48132d0cb84dSRahul Lakkireddy 48142d0cb84dSRahul Lakkireddy if (unlikely(uld_type == CXGB4_TX_CRYPTO)) 48152d0cb84dSRahul Lakkireddy cmd = FW_EQ_CTRL_CMD; 48162d0cb84dSRahul Lakkireddy 48172d0cb84dSRahul Lakkireddy ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); 48182d0cb84dSRahul Lakkireddy if (ret) 48192d0cb84dSRahul Lakkireddy return ret; 48202d0cb84dSRahul Lakkireddy 4821ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_ULD; 4822f7917c00SJeff Kirsher txq->adap = adap; 4823f7917c00SJeff Kirsher skb_queue_head_init(&txq->sendq); 48246660de07SAllen Pais tasklet_setup(&txq->qresume_tsk, restart_ofldq); 4825f7917c00SJeff Kirsher txq->full = 0; 4826f7917c00SJeff Kirsher txq->mapping_err = 0; 4827f7917c00SJeff Kirsher return 0; 4828f7917c00SJeff Kirsher } 4829f7917c00SJeff Kirsher 48302d0cb84dSRahul Lakkireddy int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, 48312d0cb84dSRahul Lakkireddy struct net_device *dev, u32 iqid) 48322d0cb84dSRahul Lakkireddy { 48332d0cb84dSRahul Lakkireddy int ret; 48342d0cb84dSRahul Lakkireddy 48352d0cb84dSRahul Lakkireddy ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); 48362d0cb84dSRahul Lakkireddy if (ret) 48372d0cb84dSRahul Lakkireddy return ret; 48382d0cb84dSRahul Lakkireddy 48392d0cb84dSRahul Lakkireddy txq->q.q_type = CXGB4_TXQ_ULD; 48402d0cb84dSRahul Lakkireddy spin_lock_init(&txq->lock); 48412d0cb84dSRahul Lakkireddy txq->adap = adap; 48422d0cb84dSRahul Lakkireddy txq->tso = 0; 48438311f0beSRahul Lakkireddy txq->uso = 0; 48442d0cb84dSRahul Lakkireddy txq->tx_cso = 0; 48452d0cb84dSRahul Lakkireddy txq->vlan_ins = 0; 48462d0cb84dSRahul Lakkireddy txq->mapping_err = 0; 48472d0cb84dSRahul Lakkireddy return 0; 48482d0cb84dSRahul Lakkireddy } 48492d0cb84dSRahul Lakkireddy 4850ab677ff4SHariprasad Shenai void free_txq(struct adapter *adap, struct sge_txq *q) 4851f7917c00SJeff Kirsher { 485252367a76SVipul Pandya struct sge *s = &adap->sge; 485352367a76SVipul Pandya 4854f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 485552367a76SVipul Pandya q->size * sizeof(struct tx_desc) + s->stat_len, 4856f7917c00SJeff Kirsher q->desc, q->phys_addr); 4857f7917c00SJeff Kirsher q->cntxt_id = 0; 4858f7917c00SJeff Kirsher q->sdesc = NULL; 4859f7917c00SJeff Kirsher q->desc = NULL; 4860f7917c00SJeff Kirsher } 4861f7917c00SJeff Kirsher 486294cdb8bbSHariprasad Shenai void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 4863f7917c00SJeff Kirsher struct sge_fl *fl) 4864f7917c00SJeff Kirsher { 486552367a76SVipul Pandya struct sge *s = &adap->sge; 4866f7917c00SJeff Kirsher unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 4867f7917c00SJeff Kirsher 4868f7917c00SJeff Kirsher adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 4869b2612722SHariprasad Shenai t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 4870f7917c00SJeff Kirsher rq->cntxt_id, fl_id, 0xffff); 4871f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 4872f7917c00SJeff Kirsher rq->desc, rq->phys_addr); 4873f7917c00SJeff Kirsher netif_napi_del(&rq->napi); 4874f7917c00SJeff Kirsher rq->netdev = NULL; 4875f7917c00SJeff Kirsher rq->cntxt_id = rq->abs_id = 0; 4876f7917c00SJeff Kirsher rq->desc = NULL; 4877f7917c00SJeff Kirsher 4878f7917c00SJeff Kirsher if (fl) { 4879f7917c00SJeff Kirsher free_rx_bufs(adap, fl, fl->avail); 488052367a76SVipul Pandya dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 4881f7917c00SJeff Kirsher fl->desc, fl->addr); 4882f7917c00SJeff Kirsher kfree(fl->sdesc); 4883f7917c00SJeff Kirsher fl->sdesc = NULL; 4884f7917c00SJeff Kirsher fl->cntxt_id = 0; 4885f7917c00SJeff Kirsher fl->desc = NULL; 4886f7917c00SJeff Kirsher } 4887f7917c00SJeff Kirsher } 4888f7917c00SJeff Kirsher 4889f7917c00SJeff Kirsher /** 48905fa76694SHariprasad Shenai * t4_free_ofld_rxqs - free a block of consecutive Rx queues 48915fa76694SHariprasad Shenai * @adap: the adapter 48925fa76694SHariprasad Shenai * @n: number of queues 48935fa76694SHariprasad Shenai * @q: pointer to first queue 48945fa76694SHariprasad Shenai * 48955fa76694SHariprasad Shenai * Release the resources of a consecutive block of offload Rx queues. 48965fa76694SHariprasad Shenai */ 48975fa76694SHariprasad Shenai void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) 48985fa76694SHariprasad Shenai { 48995fa76694SHariprasad Shenai for ( ; n; n--, q++) 49005fa76694SHariprasad Shenai if (q->rspq.desc) 49015fa76694SHariprasad Shenai free_rspq_fl(adap, &q->rspq, 49025fa76694SHariprasad Shenai q->fl.size ? &q->fl : NULL); 49035fa76694SHariprasad Shenai } 49045fa76694SHariprasad Shenai 49052d0cb84dSRahul Lakkireddy void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) 49062d0cb84dSRahul Lakkireddy { 49072d0cb84dSRahul Lakkireddy if (txq->q.desc) { 49082d0cb84dSRahul Lakkireddy t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 49092d0cb84dSRahul Lakkireddy txq->q.cntxt_id); 49102d0cb84dSRahul Lakkireddy free_tx_desc(adap, &txq->q, txq->q.in_use, false); 49112d0cb84dSRahul Lakkireddy kfree(txq->q.sdesc); 49122d0cb84dSRahul Lakkireddy free_txq(adap, &txq->q); 49132d0cb84dSRahul Lakkireddy } 49142d0cb84dSRahul Lakkireddy } 49152d0cb84dSRahul Lakkireddy 49165fa76694SHariprasad Shenai /** 4917f7917c00SJeff Kirsher * t4_free_sge_resources - free SGE resources 4918f7917c00SJeff Kirsher * @adap: the adapter 4919f7917c00SJeff Kirsher * 4920f7917c00SJeff Kirsher * Frees resources used by the SGE queue sets. 4921f7917c00SJeff Kirsher */ 4922f7917c00SJeff Kirsher void t4_free_sge_resources(struct adapter *adap) 4923f7917c00SJeff Kirsher { 4924f7917c00SJeff Kirsher int i; 4925ebf4dc2bSHariprasad Shenai struct sge_eth_rxq *eq; 4926ebf4dc2bSHariprasad Shenai struct sge_eth_txq *etq; 4927ebf4dc2bSHariprasad Shenai 4928ebf4dc2bSHariprasad Shenai /* stop all Rx queues in order to start them draining */ 4929ebf4dc2bSHariprasad Shenai for (i = 0; i < adap->sge.ethqsets; i++) { 4930ebf4dc2bSHariprasad Shenai eq = &adap->sge.ethrxq[i]; 4931ebf4dc2bSHariprasad Shenai if (eq->rspq.desc) 4932ebf4dc2bSHariprasad Shenai t4_iq_stop(adap, adap->mbox, adap->pf, 0, 4933ebf4dc2bSHariprasad Shenai FW_IQ_TYPE_FL_INT_CAP, 4934ebf4dc2bSHariprasad Shenai eq->rspq.cntxt_id, 4935ebf4dc2bSHariprasad Shenai eq->fl.size ? eq->fl.cntxt_id : 0xffff, 4936ebf4dc2bSHariprasad Shenai 0xffff); 4937ebf4dc2bSHariprasad Shenai } 4938f7917c00SJeff Kirsher 4939f7917c00SJeff Kirsher /* clean up Ethernet Tx/Rx queues */ 4940ebf4dc2bSHariprasad Shenai for (i = 0; i < adap->sge.ethqsets; i++) { 4941ebf4dc2bSHariprasad Shenai eq = &adap->sge.ethrxq[i]; 4942f7917c00SJeff Kirsher if (eq->rspq.desc) 49435fa76694SHariprasad Shenai free_rspq_fl(adap, &eq->rspq, 49445fa76694SHariprasad Shenai eq->fl.size ? &eq->fl : NULL); 494576c3a552SRahul Lakkireddy if (eq->msix) { 494676c3a552SRahul Lakkireddy cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); 494776c3a552SRahul Lakkireddy eq->msix = NULL; 494876c3a552SRahul Lakkireddy } 4949ebf4dc2bSHariprasad Shenai 4950ebf4dc2bSHariprasad Shenai etq = &adap->sge.ethtxq[i]; 4951f7917c00SJeff Kirsher if (etq->q.desc) { 4952b2612722SHariprasad Shenai t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4953f7917c00SJeff Kirsher etq->q.cntxt_id); 4954fbe80776SHariprasad Shenai __netif_tx_lock_bh(etq->txq); 4955f7917c00SJeff Kirsher free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4956fbe80776SHariprasad Shenai __netif_tx_unlock_bh(etq->txq); 4957f7917c00SJeff Kirsher kfree(etq->q.sdesc); 4958f7917c00SJeff Kirsher free_txq(adap, &etq->q); 4959f7917c00SJeff Kirsher } 4960f7917c00SJeff Kirsher } 4961f7917c00SJeff Kirsher 4962f7917c00SJeff Kirsher /* clean up control Tx queues */ 4963f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 4964f7917c00SJeff Kirsher struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 4965f7917c00SJeff Kirsher 4966f7917c00SJeff Kirsher if (cq->q.desc) { 4967f7917c00SJeff Kirsher tasklet_kill(&cq->qresume_tsk); 4968b2612722SHariprasad Shenai t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 4969f7917c00SJeff Kirsher cq->q.cntxt_id); 4970f7917c00SJeff Kirsher __skb_queue_purge(&cq->sendq); 4971f7917c00SJeff Kirsher free_txq(adap, &cq->q); 4972f7917c00SJeff Kirsher } 4973f7917c00SJeff Kirsher } 4974f7917c00SJeff Kirsher 497576c3a552SRahul Lakkireddy if (adap->sge.fw_evtq.desc) { 4976f7917c00SJeff Kirsher free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 497776c3a552SRahul Lakkireddy if (adap->sge.fwevtq_msix_idx >= 0) 497876c3a552SRahul Lakkireddy cxgb4_free_msix_idx_in_bmap(adap, 497976c3a552SRahul Lakkireddy adap->sge.fwevtq_msix_idx); 498076c3a552SRahul Lakkireddy } 498176c3a552SRahul Lakkireddy 498276c3a552SRahul Lakkireddy if (adap->sge.nd_msix_idx >= 0) 498376c3a552SRahul Lakkireddy cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); 4984f7917c00SJeff Kirsher 4985f7917c00SJeff Kirsher if (adap->sge.intrq.desc) 4986f7917c00SJeff Kirsher free_rspq_fl(adap, &adap->sge.intrq, NULL); 4987f7917c00SJeff Kirsher 4988a4569504SAtul Gupta if (!is_t4(adap->params.chip)) { 4989a4569504SAtul Gupta etq = &adap->sge.ptptxq; 4990a4569504SAtul Gupta if (etq->q.desc) { 4991a4569504SAtul Gupta t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 4992a4569504SAtul Gupta etq->q.cntxt_id); 4993a4569504SAtul Gupta spin_lock_bh(&adap->ptp_lock); 4994a4569504SAtul Gupta free_tx_desc(adap, &etq->q, etq->q.in_use, true); 4995a4569504SAtul Gupta spin_unlock_bh(&adap->ptp_lock); 4996a4569504SAtul Gupta kfree(etq->q.sdesc); 4997a4569504SAtul Gupta free_txq(adap, &etq->q); 4998a4569504SAtul Gupta } 4999a4569504SAtul Gupta } 5000a4569504SAtul Gupta 5001f7917c00SJeff Kirsher /* clear the reverse egress queue map */ 50024b8e27a8SHariprasad Shenai memset(adap->sge.egr_map, 0, 50034b8e27a8SHariprasad Shenai adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); 5004f7917c00SJeff Kirsher } 5005f7917c00SJeff Kirsher 5006f7917c00SJeff Kirsher void t4_sge_start(struct adapter *adap) 5007f7917c00SJeff Kirsher { 5008f7917c00SJeff Kirsher adap->sge.ethtxq_rover = 0; 5009f7917c00SJeff Kirsher mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 5010f7917c00SJeff Kirsher mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 5011f7917c00SJeff Kirsher } 5012f7917c00SJeff Kirsher 5013f7917c00SJeff Kirsher /** 5014f7917c00SJeff Kirsher * t4_sge_stop - disable SGE operation 5015f7917c00SJeff Kirsher * @adap: the adapter 5016f7917c00SJeff Kirsher * 5017f7917c00SJeff Kirsher * Stop tasklets and timers associated with the DMA engine. Note that 5018f7917c00SJeff Kirsher * this is effective only if measures have been taken to disable any HW 5019f7917c00SJeff Kirsher * events that may restart them. 5020f7917c00SJeff Kirsher */ 5021f7917c00SJeff Kirsher void t4_sge_stop(struct adapter *adap) 5022f7917c00SJeff Kirsher { 5023f7917c00SJeff Kirsher int i; 5024f7917c00SJeff Kirsher struct sge *s = &adap->sge; 5025f7917c00SJeff Kirsher 5026f7917c00SJeff Kirsher if (s->rx_timer.function) 5027f7917c00SJeff Kirsher del_timer_sync(&s->rx_timer); 5028f7917c00SJeff Kirsher if (s->tx_timer.function) 5029f7917c00SJeff Kirsher del_timer_sync(&s->tx_timer); 5030f7917c00SJeff Kirsher 5031ab677ff4SHariprasad Shenai if (is_offload(adap)) { 5032ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 5033f7917c00SJeff Kirsher 5034ab677ff4SHariprasad Shenai txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 5035ab677ff4SHariprasad Shenai if (txq_info) { 5036ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = txq_info->uldtxq; 5037ab677ff4SHariprasad Shenai 5038ab677ff4SHariprasad Shenai for_each_ofldtxq(&adap->sge, i) { 5039ab677ff4SHariprasad Shenai if (txq->q.desc) 5040ab677ff4SHariprasad Shenai tasklet_kill(&txq->qresume_tsk); 5041f7917c00SJeff Kirsher } 5042ab677ff4SHariprasad Shenai } 5043ab677ff4SHariprasad Shenai } 5044ab677ff4SHariprasad Shenai 5045ab677ff4SHariprasad Shenai if (is_pci_uld(adap)) { 5046ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 5047ab677ff4SHariprasad Shenai 5048ab677ff4SHariprasad Shenai txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 5049ab677ff4SHariprasad Shenai if (txq_info) { 5050ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = txq_info->uldtxq; 5051ab677ff4SHariprasad Shenai 5052ab677ff4SHariprasad Shenai for_each_ofldtxq(&adap->sge, i) { 5053ab677ff4SHariprasad Shenai if (txq->q.desc) 5054ab677ff4SHariprasad Shenai tasklet_kill(&txq->qresume_tsk); 5055ab677ff4SHariprasad Shenai } 5056ab677ff4SHariprasad Shenai } 5057ab677ff4SHariprasad Shenai } 5058ab677ff4SHariprasad Shenai 5059f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 5060f7917c00SJeff Kirsher struct sge_ctrl_txq *cq = &s->ctrlq[i]; 5061f7917c00SJeff Kirsher 5062f7917c00SJeff Kirsher if (cq->q.desc) 5063f7917c00SJeff Kirsher tasklet_kill(&cq->qresume_tsk); 5064f7917c00SJeff Kirsher } 5065f7917c00SJeff Kirsher } 5066f7917c00SJeff Kirsher 5067f7917c00SJeff Kirsher /** 506806640310SHariprasad Shenai * t4_sge_init_soft - grab core SGE values needed by SGE code 5069f7917c00SJeff Kirsher * @adap: the adapter 5070f7917c00SJeff Kirsher * 507106640310SHariprasad Shenai * We need to grab the SGE operating parameters that we need to have 507206640310SHariprasad Shenai * in order to do our job and make sure we can live with them. 5073f7917c00SJeff Kirsher */ 5074f7917c00SJeff Kirsher 507552367a76SVipul Pandya static int t4_sge_init_soft(struct adapter *adap) 507652367a76SVipul Pandya { 507752367a76SVipul Pandya struct sge *s = &adap->sge; 507852367a76SVipul Pandya u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 507952367a76SVipul Pandya u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 508052367a76SVipul Pandya u32 ingress_rx_threshold; 508152367a76SVipul Pandya 508252367a76SVipul Pandya /* 508352367a76SVipul Pandya * Verify that CPL messages are going to the Ingress Queue for 508452367a76SVipul Pandya * process_responses() and that only packet data is going to the 508552367a76SVipul Pandya * Free Lists. 508652367a76SVipul Pandya */ 5087f612b815SHariprasad Shenai if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != 5088f612b815SHariprasad Shenai RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { 508952367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 509052367a76SVipul Pandya return -EINVAL; 509152367a76SVipul Pandya } 509252367a76SVipul Pandya 509352367a76SVipul Pandya /* 509452367a76SVipul Pandya * Validate the Host Buffer Register Array indices that we want to 509552367a76SVipul Pandya * use ... 509652367a76SVipul Pandya * 509752367a76SVipul Pandya * XXX Note that we should really read through the Host Buffer Size 509852367a76SVipul Pandya * XXX register array and find the indices of the Buffer Sizes which 509952367a76SVipul Pandya * XXX meet our needs! 510052367a76SVipul Pandya */ 510152367a76SVipul Pandya #define READ_FL_BUF(x) \ 5102f612b815SHariprasad Shenai t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) 510352367a76SVipul Pandya 510452367a76SVipul Pandya fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 510552367a76SVipul Pandya fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 510652367a76SVipul Pandya fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 510752367a76SVipul Pandya fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 510852367a76SVipul Pandya 510992ddcc7bSKumar Sanghvi /* We only bother using the Large Page logic if the Large Page Buffer 511092ddcc7bSKumar Sanghvi * is larger than our Page Size Buffer. 511192ddcc7bSKumar Sanghvi */ 511292ddcc7bSKumar Sanghvi if (fl_large_pg <= fl_small_pg) 511392ddcc7bSKumar Sanghvi fl_large_pg = 0; 511492ddcc7bSKumar Sanghvi 511552367a76SVipul Pandya #undef READ_FL_BUF 511652367a76SVipul Pandya 511792ddcc7bSKumar Sanghvi /* The Page Size Buffer must be exactly equal to our Page Size and the 511892ddcc7bSKumar Sanghvi * Large Page Size Buffer should be 0 (per above) or a power of 2. 511992ddcc7bSKumar Sanghvi */ 512052367a76SVipul Pandya if (fl_small_pg != PAGE_SIZE || 512192ddcc7bSKumar Sanghvi (fl_large_pg & (fl_large_pg-1)) != 0) { 512252367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 512352367a76SVipul Pandya fl_small_pg, fl_large_pg); 512452367a76SVipul Pandya return -EINVAL; 512552367a76SVipul Pandya } 512652367a76SVipul Pandya if (fl_large_pg) 512752367a76SVipul Pandya s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 512852367a76SVipul Pandya 512952367a76SVipul Pandya if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 513052367a76SVipul Pandya fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 513152367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 513252367a76SVipul Pandya fl_small_mtu, fl_large_mtu); 513352367a76SVipul Pandya return -EINVAL; 513452367a76SVipul Pandya } 513552367a76SVipul Pandya 513652367a76SVipul Pandya /* 513752367a76SVipul Pandya * Retrieve our RX interrupt holdoff timer values and counter 513852367a76SVipul Pandya * threshold values from the SGE parameters. 513952367a76SVipul Pandya */ 5140f061de42SHariprasad Shenai timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); 5141f061de42SHariprasad Shenai timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); 5142f061de42SHariprasad Shenai timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); 514352367a76SVipul Pandya s->timer_val[0] = core_ticks_to_us(adap, 5144f061de42SHariprasad Shenai TIMERVALUE0_G(timer_value_0_and_1)); 514552367a76SVipul Pandya s->timer_val[1] = core_ticks_to_us(adap, 5146f061de42SHariprasad Shenai TIMERVALUE1_G(timer_value_0_and_1)); 514752367a76SVipul Pandya s->timer_val[2] = core_ticks_to_us(adap, 5148f061de42SHariprasad Shenai TIMERVALUE2_G(timer_value_2_and_3)); 514952367a76SVipul Pandya s->timer_val[3] = core_ticks_to_us(adap, 5150f061de42SHariprasad Shenai TIMERVALUE3_G(timer_value_2_and_3)); 515152367a76SVipul Pandya s->timer_val[4] = core_ticks_to_us(adap, 5152f061de42SHariprasad Shenai TIMERVALUE4_G(timer_value_4_and_5)); 515352367a76SVipul Pandya s->timer_val[5] = core_ticks_to_us(adap, 5154f061de42SHariprasad Shenai TIMERVALUE5_G(timer_value_4_and_5)); 515552367a76SVipul Pandya 5156f612b815SHariprasad Shenai ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); 5157f612b815SHariprasad Shenai s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); 5158f612b815SHariprasad Shenai s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); 5159f612b815SHariprasad Shenai s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); 5160f612b815SHariprasad Shenai s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); 516152367a76SVipul Pandya 516252367a76SVipul Pandya return 0; 516352367a76SVipul Pandya } 516452367a76SVipul Pandya 516506640310SHariprasad Shenai /** 516606640310SHariprasad Shenai * t4_sge_init - initialize SGE 516706640310SHariprasad Shenai * @adap: the adapter 516806640310SHariprasad Shenai * 516906640310SHariprasad Shenai * Perform low-level SGE code initialization needed every time after a 517006640310SHariprasad Shenai * chip reset. 517152367a76SVipul Pandya */ 517252367a76SVipul Pandya int t4_sge_init(struct adapter *adap) 517352367a76SVipul Pandya { 517452367a76SVipul Pandya struct sge *s = &adap->sge; 5175acac5962SHariprasad Shenai u32 sge_control, sge_conm_ctrl; 5176c2b955e0SKumar Sanghvi int ret, egress_threshold; 517752367a76SVipul Pandya 517852367a76SVipul Pandya /* 517952367a76SVipul Pandya * Ingress Padding Boundary and Egress Status Page Size are set up by 518052367a76SVipul Pandya * t4_fixup_host_params(). 518152367a76SVipul Pandya */ 5182f612b815SHariprasad Shenai sge_control = t4_read_reg(adap, SGE_CONTROL_A); 5183f612b815SHariprasad Shenai s->pktshift = PKTSHIFT_G(sge_control); 5184f612b815SHariprasad Shenai s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; 5185ce8f407aSHariprasad Shenai 5186acac5962SHariprasad Shenai s->fl_align = t4_fl_pkt_align(adap); 518752367a76SVipul Pandya ret = t4_sge_init_soft(adap); 518852367a76SVipul Pandya if (ret < 0) 518952367a76SVipul Pandya return ret; 519052367a76SVipul Pandya 519152367a76SVipul Pandya /* 519252367a76SVipul Pandya * A FL with <= fl_starve_thres buffers is starving and a periodic 519352367a76SVipul Pandya * timer will attempt to refill it. This needs to be larger than the 519452367a76SVipul Pandya * SGE's Egress Congestion Threshold. If it isn't, then we can get 519552367a76SVipul Pandya * stuck waiting for new packets while the SGE is waiting for us to 519652367a76SVipul Pandya * give it more Free List entries. (Note that the SGE's Egress 5197c2b955e0SKumar Sanghvi * Congestion Threshold is in units of 2 Free List pointers.) For T4, 5198c2b955e0SKumar Sanghvi * there was only a single field to control this. For T5 there's the 5199c2b955e0SKumar Sanghvi * original field which now only applies to Unpacked Mode Free List 5200c2b955e0SKumar Sanghvi * buffers and a new field which only applies to Packed Mode Free List 5201c2b955e0SKumar Sanghvi * buffers. 520252367a76SVipul Pandya */ 5203f612b815SHariprasad Shenai sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); 5204676d6a75SHariprasad Shenai switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 5205676d6a75SHariprasad Shenai case CHELSIO_T4: 5206f612b815SHariprasad Shenai egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); 5207676d6a75SHariprasad Shenai break; 5208676d6a75SHariprasad Shenai case CHELSIO_T5: 5209f612b815SHariprasad Shenai egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 5210676d6a75SHariprasad Shenai break; 5211676d6a75SHariprasad Shenai case CHELSIO_T6: 5212676d6a75SHariprasad Shenai egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 5213676d6a75SHariprasad Shenai break; 5214676d6a75SHariprasad Shenai default: 5215676d6a75SHariprasad Shenai dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", 5216676d6a75SHariprasad Shenai CHELSIO_CHIP_VERSION(adap->params.chip)); 5217676d6a75SHariprasad Shenai return -EINVAL; 5218676d6a75SHariprasad Shenai } 5219c2b955e0SKumar Sanghvi s->fl_starve_thres = 2*egress_threshold + 1; 522052367a76SVipul Pandya 5221a3bfb617SHariprasad Shenai t4_idma_monitor_init(adap, &s->idma_monitor); 5222a3bfb617SHariprasad Shenai 52231ecc7b7aSHariprasad Shenai /* Set up timers used for recuring callbacks to process RX and TX 52241ecc7b7aSHariprasad Shenai * administrative tasks. 52251ecc7b7aSHariprasad Shenai */ 52260e23daebSKees Cook timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); 52270e23daebSKees Cook timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); 5228a3bfb617SHariprasad Shenai 5229f7917c00SJeff Kirsher spin_lock_init(&s->intrq_lock); 523052367a76SVipul Pandya 523152367a76SVipul Pandya return 0; 5232f7917c00SJeff Kirsher } 5233