1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * This file is part of the Chelsio T4 Ethernet driver for Linux. 3f7917c00SJeff Kirsher * 4ce100b8bSAnish Bhatt * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5f7917c00SJeff Kirsher * 6f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 7f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 8f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 9f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 10f7917c00SJeff Kirsher * OpenIB.org BSD license below: 11f7917c00SJeff Kirsher * 12f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 13f7917c00SJeff Kirsher * without modification, are permitted provided that the following 14f7917c00SJeff Kirsher * conditions are met: 15f7917c00SJeff Kirsher * 16f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 17f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 18f7917c00SJeff Kirsher * disclaimer. 19f7917c00SJeff Kirsher * 20f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 21f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 22f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 23f7917c00SJeff Kirsher * provided with the distribution. 24f7917c00SJeff Kirsher * 25f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f7917c00SJeff Kirsher * SOFTWARE. 33f7917c00SJeff Kirsher */ 34f7917c00SJeff Kirsher 35f7917c00SJeff Kirsher #include <linux/skbuff.h> 36f7917c00SJeff Kirsher #include <linux/netdevice.h> 37f7917c00SJeff Kirsher #include <linux/etherdevice.h> 38f7917c00SJeff Kirsher #include <linux/if_vlan.h> 39f7917c00SJeff Kirsher #include <linux/ip.h> 40f7917c00SJeff Kirsher #include <linux/dma-mapping.h> 41f7917c00SJeff Kirsher #include <linux/jiffies.h> 42f7917c00SJeff Kirsher #include <linux/prefetch.h> 43ee40fa06SPaul Gortmaker #include <linux/export.h> 44a6ec572bSAtul Gupta #include <net/xfrm.h> 45f7917c00SJeff Kirsher #include <net/ipv6.h> 46f7917c00SJeff Kirsher #include <net/tcp.h> 473a336cb1SHariprasad Shenai #include <net/busy_poll.h> 4884a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 4984a200b3SVarun Prakash #include <scsi/fc/fc_fcoe.h> 5084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 51f7917c00SJeff Kirsher #include "cxgb4.h" 52f7917c00SJeff Kirsher #include "t4_regs.h" 53f612b815SHariprasad Shenai #include "t4_values.h" 54f7917c00SJeff Kirsher #include "t4_msg.h" 55f7917c00SJeff Kirsher #include "t4fw_api.h" 56a4569504SAtul Gupta #include "cxgb4_ptp.h" 57a6ec572bSAtul Gupta #include "cxgb4_uld.h" 58f7917c00SJeff Kirsher 59f7917c00SJeff Kirsher /* 60f7917c00SJeff Kirsher * Rx buffer size. We use largish buffers if possible but settle for single 61f7917c00SJeff Kirsher * pages under memory shortage. 62f7917c00SJeff Kirsher */ 63f7917c00SJeff Kirsher #if PAGE_SHIFT >= 16 64f7917c00SJeff Kirsher # define FL_PG_ORDER 0 65f7917c00SJeff Kirsher #else 66f7917c00SJeff Kirsher # define FL_PG_ORDER (16 - PAGE_SHIFT) 67f7917c00SJeff Kirsher #endif 68f7917c00SJeff Kirsher 69f7917c00SJeff Kirsher /* RX_PULL_LEN should be <= RX_COPY_THRES */ 70f7917c00SJeff Kirsher #define RX_COPY_THRES 256 71f7917c00SJeff Kirsher #define RX_PULL_LEN 128 72f7917c00SJeff Kirsher 73f7917c00SJeff Kirsher /* 74f7917c00SJeff Kirsher * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 75f7917c00SJeff Kirsher * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 76f7917c00SJeff Kirsher */ 77f7917c00SJeff Kirsher #define RX_PKT_SKB_LEN 512 78f7917c00SJeff Kirsher 79f7917c00SJeff Kirsher /* 80f7917c00SJeff Kirsher * Max number of Tx descriptors we clean up at a time. Should be modest as 81f7917c00SJeff Kirsher * freeing skbs isn't cheap and it happens while holding locks. We just need 82f7917c00SJeff Kirsher * to free packets faster than they arrive, we eventually catch up and keep 83f7917c00SJeff Kirsher * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. 84f7917c00SJeff Kirsher */ 85f7917c00SJeff Kirsher #define MAX_TX_RECLAIM 16 86f7917c00SJeff Kirsher 87f7917c00SJeff Kirsher /* 88f7917c00SJeff Kirsher * Max number of Rx buffers we replenish at a time. Again keep this modest, 89f7917c00SJeff Kirsher * allocating buffers isn't cheap either. 90f7917c00SJeff Kirsher */ 91f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U 92f7917c00SJeff Kirsher 93f7917c00SJeff Kirsher /* 94f7917c00SJeff Kirsher * Period of the Rx queue check timer. This timer is infrequent as it has 95f7917c00SJeff Kirsher * something to do only when the system experiences severe memory shortage. 96f7917c00SJeff Kirsher */ 97f7917c00SJeff Kirsher #define RX_QCHECK_PERIOD (HZ / 2) 98f7917c00SJeff Kirsher 99f7917c00SJeff Kirsher /* 100f7917c00SJeff Kirsher * Period of the Tx queue check timer. 101f7917c00SJeff Kirsher */ 102f7917c00SJeff Kirsher #define TX_QCHECK_PERIOD (HZ / 2) 103f7917c00SJeff Kirsher 104f7917c00SJeff Kirsher /* 105f7917c00SJeff Kirsher * Max number of Tx descriptors to be reclaimed by the Tx timer. 106f7917c00SJeff Kirsher */ 107f7917c00SJeff Kirsher #define MAX_TIMER_TX_RECLAIM 100 108f7917c00SJeff Kirsher 109f7917c00SJeff Kirsher /* 110f7917c00SJeff Kirsher * Timer index used when backing off due to memory shortage. 111f7917c00SJeff Kirsher */ 112f7917c00SJeff Kirsher #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 113f7917c00SJeff Kirsher 114f7917c00SJeff Kirsher /* 115f7917c00SJeff Kirsher * Suspension threshold for non-Ethernet Tx queues. We require enough room 116f7917c00SJeff Kirsher * for a full sized WR. 117f7917c00SJeff Kirsher */ 118f7917c00SJeff Kirsher #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 119f7917c00SJeff Kirsher 120f7917c00SJeff Kirsher /* 121f7917c00SJeff Kirsher * Max Tx descriptor space we allow for an Ethernet packet to be inlined 122f7917c00SJeff Kirsher * into a WR. 123f7917c00SJeff Kirsher */ 12421dcfad6SHariprasad Shenai #define MAX_IMM_TX_PKT_LEN 256 125f7917c00SJeff Kirsher 126f7917c00SJeff Kirsher /* 127f7917c00SJeff Kirsher * Max size of a WR sent through a control Tx queue. 128f7917c00SJeff Kirsher */ 129f7917c00SJeff Kirsher #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 130f7917c00SJeff Kirsher 131f7917c00SJeff Kirsher struct rx_sw_desc { /* SW state per Rx descriptor */ 132f7917c00SJeff Kirsher struct page *page; 133f7917c00SJeff Kirsher dma_addr_t dma_addr; 134f7917c00SJeff Kirsher }; 135f7917c00SJeff Kirsher 136f7917c00SJeff Kirsher /* 13752367a76SVipul Pandya * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 13852367a76SVipul Pandya * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 13952367a76SVipul Pandya * We could easily support more but there doesn't seem to be much need for 14052367a76SVipul Pandya * that ... 14152367a76SVipul Pandya */ 14252367a76SVipul Pandya #define FL_MTU_SMALL 1500 14352367a76SVipul Pandya #define FL_MTU_LARGE 9000 14452367a76SVipul Pandya 14552367a76SVipul Pandya static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 14652367a76SVipul Pandya unsigned int mtu) 14752367a76SVipul Pandya { 14852367a76SVipul Pandya struct sge *s = &adapter->sge; 14952367a76SVipul Pandya 15052367a76SVipul Pandya return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 15152367a76SVipul Pandya } 15252367a76SVipul Pandya 15352367a76SVipul Pandya #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 15452367a76SVipul Pandya #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 15552367a76SVipul Pandya 15652367a76SVipul Pandya /* 15752367a76SVipul Pandya * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 15852367a76SVipul Pandya * these to specify the buffer size as an index into the SGE Free List Buffer 15952367a76SVipul Pandya * Size register array. We also use bit 4, when the buffer has been unmapped 16052367a76SVipul Pandya * for DMA, but this is of course never sent to the hardware and is only used 16152367a76SVipul Pandya * to prevent double unmappings. All of the above requires that the Free List 16252367a76SVipul Pandya * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 16352367a76SVipul Pandya * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 16452367a76SVipul Pandya * Free List Buffer alignment is 32 bytes, this works out for us ... 165f7917c00SJeff Kirsher */ 166f7917c00SJeff Kirsher enum { 16752367a76SVipul Pandya RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 16852367a76SVipul Pandya RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 16952367a76SVipul Pandya RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 17052367a76SVipul Pandya 17152367a76SVipul Pandya /* 17252367a76SVipul Pandya * XXX We shouldn't depend on being able to use these indices. 17352367a76SVipul Pandya * XXX Especially when some other Master PF has initialized the 17452367a76SVipul Pandya * XXX adapter or we use the Firmware Configuration File. We 17552367a76SVipul Pandya * XXX should really search through the Host Buffer Size register 17652367a76SVipul Pandya * XXX array for the appropriately sized buffer indices. 17752367a76SVipul Pandya */ 17852367a76SVipul Pandya RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 17952367a76SVipul Pandya RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 18052367a76SVipul Pandya 18152367a76SVipul Pandya RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 18252367a76SVipul Pandya RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 183f7917c00SJeff Kirsher }; 184f7917c00SJeff Kirsher 185e553ec3fSHariprasad Shenai static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; 186e553ec3fSHariprasad Shenai #define MIN_NAPI_WORK 1 187e553ec3fSHariprasad Shenai 188f7917c00SJeff Kirsher static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 189f7917c00SJeff Kirsher { 19052367a76SVipul Pandya return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 191f7917c00SJeff Kirsher } 192f7917c00SJeff Kirsher 193f7917c00SJeff Kirsher static inline bool is_buf_mapped(const struct rx_sw_desc *d) 194f7917c00SJeff Kirsher { 195f7917c00SJeff Kirsher return !(d->dma_addr & RX_UNMAPPED_BUF); 196f7917c00SJeff Kirsher } 197f7917c00SJeff Kirsher 198f7917c00SJeff Kirsher /** 199f7917c00SJeff Kirsher * txq_avail - return the number of available slots in a Tx queue 200f7917c00SJeff Kirsher * @q: the Tx queue 201f7917c00SJeff Kirsher * 202f7917c00SJeff Kirsher * Returns the number of descriptors in a Tx queue available to write new 203f7917c00SJeff Kirsher * packets. 204f7917c00SJeff Kirsher */ 205f7917c00SJeff Kirsher static inline unsigned int txq_avail(const struct sge_txq *q) 206f7917c00SJeff Kirsher { 207f7917c00SJeff Kirsher return q->size - 1 - q->in_use; 208f7917c00SJeff Kirsher } 209f7917c00SJeff Kirsher 210f7917c00SJeff Kirsher /** 211f7917c00SJeff Kirsher * fl_cap - return the capacity of a free-buffer list 212f7917c00SJeff Kirsher * @fl: the FL 213f7917c00SJeff Kirsher * 214f7917c00SJeff Kirsher * Returns the capacity of a free-buffer list. The capacity is less than 215f7917c00SJeff Kirsher * the size because one descriptor needs to be left unpopulated, otherwise 216f7917c00SJeff Kirsher * HW will think the FL is empty. 217f7917c00SJeff Kirsher */ 218f7917c00SJeff Kirsher static inline unsigned int fl_cap(const struct sge_fl *fl) 219f7917c00SJeff Kirsher { 220f7917c00SJeff Kirsher return fl->size - 8; /* 1 descriptor = 8 buffers */ 221f7917c00SJeff Kirsher } 222f7917c00SJeff Kirsher 223c098b026SHariprasad Shenai /** 224c098b026SHariprasad Shenai * fl_starving - return whether a Free List is starving. 225c098b026SHariprasad Shenai * @adapter: pointer to the adapter 226c098b026SHariprasad Shenai * @fl: the Free List 227c098b026SHariprasad Shenai * 228c098b026SHariprasad Shenai * Tests specified Free List to see whether the number of buffers 229c098b026SHariprasad Shenai * available to the hardware has falled below our "starvation" 230c098b026SHariprasad Shenai * threshold. 231c098b026SHariprasad Shenai */ 232c098b026SHariprasad Shenai static inline bool fl_starving(const struct adapter *adapter, 233c098b026SHariprasad Shenai const struct sge_fl *fl) 234f7917c00SJeff Kirsher { 235c098b026SHariprasad Shenai const struct sge *s = &adapter->sge; 236c098b026SHariprasad Shenai 237c098b026SHariprasad Shenai return fl->avail - fl->pend_cred <= s->fl_starve_thres; 238f7917c00SJeff Kirsher } 239f7917c00SJeff Kirsher 240a6ec572bSAtul Gupta int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 241f7917c00SJeff Kirsher dma_addr_t *addr) 242f7917c00SJeff Kirsher { 243f7917c00SJeff Kirsher const skb_frag_t *fp, *end; 244f7917c00SJeff Kirsher const struct skb_shared_info *si; 245f7917c00SJeff Kirsher 246f7917c00SJeff Kirsher *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 247f7917c00SJeff Kirsher if (dma_mapping_error(dev, *addr)) 248f7917c00SJeff Kirsher goto out_err; 249f7917c00SJeff Kirsher 250f7917c00SJeff Kirsher si = skb_shinfo(skb); 251f7917c00SJeff Kirsher end = &si->frags[si->nr_frags]; 252f7917c00SJeff Kirsher 253f7917c00SJeff Kirsher for (fp = si->frags; fp < end; fp++) { 254e91b0f24SIan Campbell *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 255e91b0f24SIan Campbell DMA_TO_DEVICE); 256f7917c00SJeff Kirsher if (dma_mapping_error(dev, *addr)) 257f7917c00SJeff Kirsher goto unwind; 258f7917c00SJeff Kirsher } 259f7917c00SJeff Kirsher return 0; 260f7917c00SJeff Kirsher 261f7917c00SJeff Kirsher unwind: 262f7917c00SJeff Kirsher while (fp-- > si->frags) 2639e903e08SEric Dumazet dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 264f7917c00SJeff Kirsher 265f7917c00SJeff Kirsher dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 266f7917c00SJeff Kirsher out_err: 267f7917c00SJeff Kirsher return -ENOMEM; 268f7917c00SJeff Kirsher } 269a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_map_skb); 270f7917c00SJeff Kirsher 271f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE 272f7917c00SJeff Kirsher static void unmap_skb(struct device *dev, const struct sk_buff *skb, 273f7917c00SJeff Kirsher const dma_addr_t *addr) 274f7917c00SJeff Kirsher { 275f7917c00SJeff Kirsher const skb_frag_t *fp, *end; 276f7917c00SJeff Kirsher const struct skb_shared_info *si; 277f7917c00SJeff Kirsher 278f7917c00SJeff Kirsher dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 279f7917c00SJeff Kirsher 280f7917c00SJeff Kirsher si = skb_shinfo(skb); 281f7917c00SJeff Kirsher end = &si->frags[si->nr_frags]; 282f7917c00SJeff Kirsher for (fp = si->frags; fp < end; fp++) 2839e903e08SEric Dumazet dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 284f7917c00SJeff Kirsher } 285f7917c00SJeff Kirsher 286f7917c00SJeff Kirsher /** 287f7917c00SJeff Kirsher * deferred_unmap_destructor - unmap a packet when it is freed 288f7917c00SJeff Kirsher * @skb: the packet 289f7917c00SJeff Kirsher * 290f7917c00SJeff Kirsher * This is the packet destructor used for Tx packets that need to remain 291f7917c00SJeff Kirsher * mapped until they are freed rather than until their Tx descriptors are 292f7917c00SJeff Kirsher * freed. 293f7917c00SJeff Kirsher */ 294f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb) 295f7917c00SJeff Kirsher { 296f7917c00SJeff Kirsher unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 297f7917c00SJeff Kirsher } 298f7917c00SJeff Kirsher #endif 299f7917c00SJeff Kirsher 300f7917c00SJeff Kirsher static void unmap_sgl(struct device *dev, const struct sk_buff *skb, 301f7917c00SJeff Kirsher const struct ulptx_sgl *sgl, const struct sge_txq *q) 302f7917c00SJeff Kirsher { 303f7917c00SJeff Kirsher const struct ulptx_sge_pair *p; 304f7917c00SJeff Kirsher unsigned int nfrags = skb_shinfo(skb)->nr_frags; 305f7917c00SJeff Kirsher 306f7917c00SJeff Kirsher if (likely(skb_headlen(skb))) 307f7917c00SJeff Kirsher dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 308f7917c00SJeff Kirsher DMA_TO_DEVICE); 309f7917c00SJeff Kirsher else { 310f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 311f7917c00SJeff Kirsher DMA_TO_DEVICE); 312f7917c00SJeff Kirsher nfrags--; 313f7917c00SJeff Kirsher } 314f7917c00SJeff Kirsher 315f7917c00SJeff Kirsher /* 316f7917c00SJeff Kirsher * the complexity below is because of the possibility of a wrap-around 317f7917c00SJeff Kirsher * in the middle of an SGL 318f7917c00SJeff Kirsher */ 319f7917c00SJeff Kirsher for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { 320f7917c00SJeff Kirsher if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { 321f7917c00SJeff Kirsher unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 322f7917c00SJeff Kirsher ntohl(p->len[0]), DMA_TO_DEVICE); 323f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(p->addr[1]), 324f7917c00SJeff Kirsher ntohl(p->len[1]), DMA_TO_DEVICE); 325f7917c00SJeff Kirsher p++; 326f7917c00SJeff Kirsher } else if ((u8 *)p == (u8 *)q->stat) { 327f7917c00SJeff Kirsher p = (const struct ulptx_sge_pair *)q->desc; 328f7917c00SJeff Kirsher goto unmap; 329f7917c00SJeff Kirsher } else if ((u8 *)p + 8 == (u8 *)q->stat) { 330f7917c00SJeff Kirsher const __be64 *addr = (const __be64 *)q->desc; 331f7917c00SJeff Kirsher 332f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(addr[0]), 333f7917c00SJeff Kirsher ntohl(p->len[0]), DMA_TO_DEVICE); 334f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(addr[1]), 335f7917c00SJeff Kirsher ntohl(p->len[1]), DMA_TO_DEVICE); 336f7917c00SJeff Kirsher p = (const struct ulptx_sge_pair *)&addr[2]; 337f7917c00SJeff Kirsher } else { 338f7917c00SJeff Kirsher const __be64 *addr = (const __be64 *)q->desc; 339f7917c00SJeff Kirsher 340f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 341f7917c00SJeff Kirsher ntohl(p->len[0]), DMA_TO_DEVICE); 342f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(addr[0]), 343f7917c00SJeff Kirsher ntohl(p->len[1]), DMA_TO_DEVICE); 344f7917c00SJeff Kirsher p = (const struct ulptx_sge_pair *)&addr[1]; 345f7917c00SJeff Kirsher } 346f7917c00SJeff Kirsher } 347f7917c00SJeff Kirsher if (nfrags) { 348f7917c00SJeff Kirsher __be64 addr; 349f7917c00SJeff Kirsher 350f7917c00SJeff Kirsher if ((u8 *)p == (u8 *)q->stat) 351f7917c00SJeff Kirsher p = (const struct ulptx_sge_pair *)q->desc; 352f7917c00SJeff Kirsher addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : 353f7917c00SJeff Kirsher *(const __be64 *)q->desc; 354f7917c00SJeff Kirsher dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), 355f7917c00SJeff Kirsher DMA_TO_DEVICE); 356f7917c00SJeff Kirsher } 357f7917c00SJeff Kirsher } 358f7917c00SJeff Kirsher 359f7917c00SJeff Kirsher /** 360f7917c00SJeff Kirsher * free_tx_desc - reclaims Tx descriptors and their buffers 361f7917c00SJeff Kirsher * @adapter: the adapter 362f7917c00SJeff Kirsher * @q: the Tx queue to reclaim descriptors from 363f7917c00SJeff Kirsher * @n: the number of descriptors to reclaim 364f7917c00SJeff Kirsher * @unmap: whether the buffers should be unmapped for DMA 365f7917c00SJeff Kirsher * 366f7917c00SJeff Kirsher * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 367f7917c00SJeff Kirsher * Tx buffers. Called with the Tx queue lock held. 368f7917c00SJeff Kirsher */ 369ab677ff4SHariprasad Shenai void free_tx_desc(struct adapter *adap, struct sge_txq *q, 370f7917c00SJeff Kirsher unsigned int n, bool unmap) 371f7917c00SJeff Kirsher { 372f7917c00SJeff Kirsher struct tx_sw_desc *d; 373f7917c00SJeff Kirsher unsigned int cidx = q->cidx; 374f7917c00SJeff Kirsher struct device *dev = adap->pdev_dev; 375f7917c00SJeff Kirsher 376f7917c00SJeff Kirsher d = &q->sdesc[cidx]; 377f7917c00SJeff Kirsher while (n--) { 378f7917c00SJeff Kirsher if (d->skb) { /* an SGL is present */ 379f7917c00SJeff Kirsher if (unmap) 380f7917c00SJeff Kirsher unmap_sgl(dev, d->skb, d->sgl, q); 381a7525198SEric W. Biederman dev_consume_skb_any(d->skb); 382f7917c00SJeff Kirsher d->skb = NULL; 383f7917c00SJeff Kirsher } 384f7917c00SJeff Kirsher ++d; 385f7917c00SJeff Kirsher if (++cidx == q->size) { 386f7917c00SJeff Kirsher cidx = 0; 387f7917c00SJeff Kirsher d = q->sdesc; 388f7917c00SJeff Kirsher } 389f7917c00SJeff Kirsher } 390f7917c00SJeff Kirsher q->cidx = cidx; 391f7917c00SJeff Kirsher } 392f7917c00SJeff Kirsher 393f7917c00SJeff Kirsher /* 394f7917c00SJeff Kirsher * Return the number of reclaimable descriptors in a Tx queue. 395f7917c00SJeff Kirsher */ 396f7917c00SJeff Kirsher static inline int reclaimable(const struct sge_txq *q) 397f7917c00SJeff Kirsher { 3986aa7de05SMark Rutland int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 399f7917c00SJeff Kirsher hw_cidx -= q->cidx; 400f7917c00SJeff Kirsher return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 401f7917c00SJeff Kirsher } 402f7917c00SJeff Kirsher 403f7917c00SJeff Kirsher /** 404a6ec572bSAtul Gupta * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 405f7917c00SJeff Kirsher * @adap: the adapter 406f7917c00SJeff Kirsher * @q: the Tx queue to reclaim completed descriptors from 407f7917c00SJeff Kirsher * @unmap: whether the buffers should be unmapped for DMA 408f7917c00SJeff Kirsher * 409f7917c00SJeff Kirsher * Reclaims Tx descriptors that the SGE has indicated it has processed, 410f7917c00SJeff Kirsher * and frees the associated buffers if possible. Called with the Tx 411f7917c00SJeff Kirsher * queue locked. 412f7917c00SJeff Kirsher */ 413a6ec572bSAtul Gupta inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 414f7917c00SJeff Kirsher bool unmap) 415f7917c00SJeff Kirsher { 416f7917c00SJeff Kirsher int avail = reclaimable(q); 417f7917c00SJeff Kirsher 418f7917c00SJeff Kirsher if (avail) { 419f7917c00SJeff Kirsher /* 420f7917c00SJeff Kirsher * Limit the amount of clean up work we do at a time to keep 421f7917c00SJeff Kirsher * the Tx lock hold time O(1). 422f7917c00SJeff Kirsher */ 423f7917c00SJeff Kirsher if (avail > MAX_TX_RECLAIM) 424f7917c00SJeff Kirsher avail = MAX_TX_RECLAIM; 425f7917c00SJeff Kirsher 426f7917c00SJeff Kirsher free_tx_desc(adap, q, avail, unmap); 427f7917c00SJeff Kirsher q->in_use -= avail; 428f7917c00SJeff Kirsher } 429f7917c00SJeff Kirsher } 430a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 431f7917c00SJeff Kirsher 43252367a76SVipul Pandya static inline int get_buf_size(struct adapter *adapter, 43352367a76SVipul Pandya const struct rx_sw_desc *d) 434f7917c00SJeff Kirsher { 43552367a76SVipul Pandya struct sge *s = &adapter->sge; 43652367a76SVipul Pandya unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 43752367a76SVipul Pandya int buf_size; 43852367a76SVipul Pandya 43952367a76SVipul Pandya switch (rx_buf_size_idx) { 44052367a76SVipul Pandya case RX_SMALL_PG_BUF: 44152367a76SVipul Pandya buf_size = PAGE_SIZE; 44252367a76SVipul Pandya break; 44352367a76SVipul Pandya 44452367a76SVipul Pandya case RX_LARGE_PG_BUF: 44552367a76SVipul Pandya buf_size = PAGE_SIZE << s->fl_pg_order; 44652367a76SVipul Pandya break; 44752367a76SVipul Pandya 44852367a76SVipul Pandya case RX_SMALL_MTU_BUF: 44952367a76SVipul Pandya buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 45052367a76SVipul Pandya break; 45152367a76SVipul Pandya 45252367a76SVipul Pandya case RX_LARGE_MTU_BUF: 45352367a76SVipul Pandya buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 45452367a76SVipul Pandya break; 45552367a76SVipul Pandya 45652367a76SVipul Pandya default: 45752367a76SVipul Pandya BUG_ON(1); 45852367a76SVipul Pandya } 45952367a76SVipul Pandya 46052367a76SVipul Pandya return buf_size; 461f7917c00SJeff Kirsher } 462f7917c00SJeff Kirsher 463f7917c00SJeff Kirsher /** 464f7917c00SJeff Kirsher * free_rx_bufs - free the Rx buffers on an SGE free list 465f7917c00SJeff Kirsher * @adap: the adapter 466f7917c00SJeff Kirsher * @q: the SGE free list to free buffers from 467f7917c00SJeff Kirsher * @n: how many buffers to free 468f7917c00SJeff Kirsher * 469f7917c00SJeff Kirsher * Release the next @n buffers on an SGE free-buffer Rx queue. The 470f7917c00SJeff Kirsher * buffers must be made inaccessible to HW before calling this function. 471f7917c00SJeff Kirsher */ 472f7917c00SJeff Kirsher static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 473f7917c00SJeff Kirsher { 474f7917c00SJeff Kirsher while (n--) { 475f7917c00SJeff Kirsher struct rx_sw_desc *d = &q->sdesc[q->cidx]; 476f7917c00SJeff Kirsher 477f7917c00SJeff Kirsher if (is_buf_mapped(d)) 478f7917c00SJeff Kirsher dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 47952367a76SVipul Pandya get_buf_size(adap, d), 48052367a76SVipul Pandya PCI_DMA_FROMDEVICE); 481f7917c00SJeff Kirsher put_page(d->page); 482f7917c00SJeff Kirsher d->page = NULL; 483f7917c00SJeff Kirsher if (++q->cidx == q->size) 484f7917c00SJeff Kirsher q->cidx = 0; 485f7917c00SJeff Kirsher q->avail--; 486f7917c00SJeff Kirsher } 487f7917c00SJeff Kirsher } 488f7917c00SJeff Kirsher 489f7917c00SJeff Kirsher /** 490f7917c00SJeff Kirsher * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 491f7917c00SJeff Kirsher * @adap: the adapter 492f7917c00SJeff Kirsher * @q: the SGE free list 493f7917c00SJeff Kirsher * 494f7917c00SJeff Kirsher * Unmap the current buffer on an SGE free-buffer Rx queue. The 495f7917c00SJeff Kirsher * buffer must be made inaccessible to HW before calling this function. 496f7917c00SJeff Kirsher * 497f7917c00SJeff Kirsher * This is similar to @free_rx_bufs above but does not free the buffer. 498f7917c00SJeff Kirsher * Do note that the FL still loses any further access to the buffer. 499f7917c00SJeff Kirsher */ 500f7917c00SJeff Kirsher static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 501f7917c00SJeff Kirsher { 502f7917c00SJeff Kirsher struct rx_sw_desc *d = &q->sdesc[q->cidx]; 503f7917c00SJeff Kirsher 504f7917c00SJeff Kirsher if (is_buf_mapped(d)) 505f7917c00SJeff Kirsher dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 50652367a76SVipul Pandya get_buf_size(adap, d), PCI_DMA_FROMDEVICE); 507f7917c00SJeff Kirsher d->page = NULL; 508f7917c00SJeff Kirsher if (++q->cidx == q->size) 509f7917c00SJeff Kirsher q->cidx = 0; 510f7917c00SJeff Kirsher q->avail--; 511f7917c00SJeff Kirsher } 512f7917c00SJeff Kirsher 513f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 514f7917c00SJeff Kirsher { 515f7917c00SJeff Kirsher if (q->pend_cred >= 8) { 5163ccc6cf7SHariprasad Shenai u32 val = adap->params.arch.sge_fl_db; 5173ccc6cf7SHariprasad Shenai 518f612b815SHariprasad Shenai if (is_t4(adap->params.chip)) 5193ccc6cf7SHariprasad Shenai val |= PIDX_V(q->pend_cred / 8); 520f612b815SHariprasad Shenai else 5213ccc6cf7SHariprasad Shenai val |= PIDX_T5_V(q->pend_cred / 8); 5221ecc7b7aSHariprasad Shenai 5231ecc7b7aSHariprasad Shenai /* Make sure all memory writes to the Free List queue are 5241ecc7b7aSHariprasad Shenai * committed before we tell the hardware about them. 5251ecc7b7aSHariprasad Shenai */ 526f7917c00SJeff Kirsher wmb(); 527d63a6dcfSHariprasad Shenai 528df64e4d3SHariprasad Shenai /* If we don't have access to the new User Doorbell (T5+), use 529df64e4d3SHariprasad Shenai * the old doorbell mechanism; otherwise use the new BAR2 530df64e4d3SHariprasad Shenai * mechanism. 531d63a6dcfSHariprasad Shenai */ 532df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 533f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 534f612b815SHariprasad Shenai val | QID_V(q->cntxt_id)); 535d63a6dcfSHariprasad Shenai } else { 536f612b815SHariprasad Shenai writel(val | QID_V(q->bar2_qid), 537df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_KDOORBELL); 538d63a6dcfSHariprasad Shenai 539d63a6dcfSHariprasad Shenai /* This Write memory Barrier will force the write to 540d63a6dcfSHariprasad Shenai * the User Doorbell area to be flushed. 541d63a6dcfSHariprasad Shenai */ 542d63a6dcfSHariprasad Shenai wmb(); 543d63a6dcfSHariprasad Shenai } 544f7917c00SJeff Kirsher q->pend_cred &= 7; 545f7917c00SJeff Kirsher } 546f7917c00SJeff Kirsher } 547f7917c00SJeff Kirsher 548f7917c00SJeff Kirsher static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 549f7917c00SJeff Kirsher dma_addr_t mapping) 550f7917c00SJeff Kirsher { 551f7917c00SJeff Kirsher sd->page = pg; 552f7917c00SJeff Kirsher sd->dma_addr = mapping; /* includes size low bits */ 553f7917c00SJeff Kirsher } 554f7917c00SJeff Kirsher 555f7917c00SJeff Kirsher /** 556f7917c00SJeff Kirsher * refill_fl - refill an SGE Rx buffer ring 557f7917c00SJeff Kirsher * @adap: the adapter 558f7917c00SJeff Kirsher * @q: the ring to refill 559f7917c00SJeff Kirsher * @n: the number of new buffers to allocate 560f7917c00SJeff Kirsher * @gfp: the gfp flags for the allocations 561f7917c00SJeff Kirsher * 562f7917c00SJeff Kirsher * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 563f7917c00SJeff Kirsher * allocated with the supplied gfp flags. The caller must assure that 564f7917c00SJeff Kirsher * @n does not exceed the queue's capacity. If afterwards the queue is 565f7917c00SJeff Kirsher * found critically low mark it as starving in the bitmap of starving FLs. 566f7917c00SJeff Kirsher * 567f7917c00SJeff Kirsher * Returns the number of buffers allocated. 568f7917c00SJeff Kirsher */ 569f7917c00SJeff Kirsher static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 570f7917c00SJeff Kirsher gfp_t gfp) 571f7917c00SJeff Kirsher { 57252367a76SVipul Pandya struct sge *s = &adap->sge; 573f7917c00SJeff Kirsher struct page *pg; 574f7917c00SJeff Kirsher dma_addr_t mapping; 575f7917c00SJeff Kirsher unsigned int cred = q->avail; 576f7917c00SJeff Kirsher __be64 *d = &q->desc[q->pidx]; 577f7917c00SJeff Kirsher struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 578d52ce920SHariprasad Shenai int node; 579f7917c00SJeff Kirsher 5805b377d11SHariprasad Shenai #ifdef CONFIG_DEBUG_FS 5815b377d11SHariprasad Shenai if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) 5825b377d11SHariprasad Shenai goto out; 5835b377d11SHariprasad Shenai #endif 5845b377d11SHariprasad Shenai 585aa9cd31cSAlexander Duyck gfp |= __GFP_NOWARN; 586d52ce920SHariprasad Shenai node = dev_to_node(adap->pdev_dev); 587f7917c00SJeff Kirsher 58852367a76SVipul Pandya if (s->fl_pg_order == 0) 58952367a76SVipul Pandya goto alloc_small_pages; 59052367a76SVipul Pandya 591f7917c00SJeff Kirsher /* 592f7917c00SJeff Kirsher * Prefer large buffers 593f7917c00SJeff Kirsher */ 594f7917c00SJeff Kirsher while (n) { 595d52ce920SHariprasad Shenai pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); 596f7917c00SJeff Kirsher if (unlikely(!pg)) { 597f7917c00SJeff Kirsher q->large_alloc_failed++; 598f7917c00SJeff Kirsher break; /* fall back to single pages */ 599f7917c00SJeff Kirsher } 600f7917c00SJeff Kirsher 601f7917c00SJeff Kirsher mapping = dma_map_page(adap->pdev_dev, pg, 0, 60252367a76SVipul Pandya PAGE_SIZE << s->fl_pg_order, 603f7917c00SJeff Kirsher PCI_DMA_FROMDEVICE); 604f7917c00SJeff Kirsher if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 60552367a76SVipul Pandya __free_pages(pg, s->fl_pg_order); 60670055dd0SHariprasad Shenai q->mapping_err++; 607f7917c00SJeff Kirsher goto out; /* do not try small pages for this error */ 608f7917c00SJeff Kirsher } 60952367a76SVipul Pandya mapping |= RX_LARGE_PG_BUF; 610f7917c00SJeff Kirsher *d++ = cpu_to_be64(mapping); 611f7917c00SJeff Kirsher 612f7917c00SJeff Kirsher set_rx_sw_desc(sd, pg, mapping); 613f7917c00SJeff Kirsher sd++; 614f7917c00SJeff Kirsher 615f7917c00SJeff Kirsher q->avail++; 616f7917c00SJeff Kirsher if (++q->pidx == q->size) { 617f7917c00SJeff Kirsher q->pidx = 0; 618f7917c00SJeff Kirsher sd = q->sdesc; 619f7917c00SJeff Kirsher d = q->desc; 620f7917c00SJeff Kirsher } 621f7917c00SJeff Kirsher n--; 622f7917c00SJeff Kirsher } 623f7917c00SJeff Kirsher 62452367a76SVipul Pandya alloc_small_pages: 625f7917c00SJeff Kirsher while (n--) { 626d52ce920SHariprasad Shenai pg = alloc_pages_node(node, gfp, 0); 627f7917c00SJeff Kirsher if (unlikely(!pg)) { 628f7917c00SJeff Kirsher q->alloc_failed++; 629f7917c00SJeff Kirsher break; 630f7917c00SJeff Kirsher } 631f7917c00SJeff Kirsher 632f7917c00SJeff Kirsher mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 633f7917c00SJeff Kirsher PCI_DMA_FROMDEVICE); 634f7917c00SJeff Kirsher if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 6351f2149c1SEric Dumazet put_page(pg); 63670055dd0SHariprasad Shenai q->mapping_err++; 637f7917c00SJeff Kirsher goto out; 638f7917c00SJeff Kirsher } 639f7917c00SJeff Kirsher *d++ = cpu_to_be64(mapping); 640f7917c00SJeff Kirsher 641f7917c00SJeff Kirsher set_rx_sw_desc(sd, pg, mapping); 642f7917c00SJeff Kirsher sd++; 643f7917c00SJeff Kirsher 644f7917c00SJeff Kirsher q->avail++; 645f7917c00SJeff Kirsher if (++q->pidx == q->size) { 646f7917c00SJeff Kirsher q->pidx = 0; 647f7917c00SJeff Kirsher sd = q->sdesc; 648f7917c00SJeff Kirsher d = q->desc; 649f7917c00SJeff Kirsher } 650f7917c00SJeff Kirsher } 651f7917c00SJeff Kirsher 652f7917c00SJeff Kirsher out: cred = q->avail - cred; 653f7917c00SJeff Kirsher q->pend_cred += cred; 654f7917c00SJeff Kirsher ring_fl_db(adap, q); 655f7917c00SJeff Kirsher 656c098b026SHariprasad Shenai if (unlikely(fl_starving(adap, q))) { 657f7917c00SJeff Kirsher smp_wmb(); 65870055dd0SHariprasad Shenai q->low++; 659f7917c00SJeff Kirsher set_bit(q->cntxt_id - adap->sge.egr_start, 660f7917c00SJeff Kirsher adap->sge.starving_fl); 661f7917c00SJeff Kirsher } 662f7917c00SJeff Kirsher 663f7917c00SJeff Kirsher return cred; 664f7917c00SJeff Kirsher } 665f7917c00SJeff Kirsher 666f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 667f7917c00SJeff Kirsher { 668f7917c00SJeff Kirsher refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 669f7917c00SJeff Kirsher GFP_ATOMIC); 670f7917c00SJeff Kirsher } 671f7917c00SJeff Kirsher 672f7917c00SJeff Kirsher /** 673f7917c00SJeff Kirsher * alloc_ring - allocate resources for an SGE descriptor ring 674f7917c00SJeff Kirsher * @dev: the PCI device's core device 675f7917c00SJeff Kirsher * @nelem: the number of descriptors 676f7917c00SJeff Kirsher * @elem_size: the size of each descriptor 677f7917c00SJeff Kirsher * @sw_size: the size of the SW state associated with each ring element 678f7917c00SJeff Kirsher * @phys: the physical address of the allocated ring 679f7917c00SJeff Kirsher * @metadata: address of the array holding the SW state for the ring 680f7917c00SJeff Kirsher * @stat_size: extra space in HW ring for status information 681f7917c00SJeff Kirsher * @node: preferred node for memory allocations 682f7917c00SJeff Kirsher * 683f7917c00SJeff Kirsher * Allocates resources for an SGE descriptor ring, such as Tx queues, 684f7917c00SJeff Kirsher * free buffer lists, or response queues. Each SGE ring requires 685f7917c00SJeff Kirsher * space for its HW descriptors plus, optionally, space for the SW state 686f7917c00SJeff Kirsher * associated with each HW entry (the metadata). The function returns 687f7917c00SJeff Kirsher * three values: the virtual address for the HW ring (the return value 688f7917c00SJeff Kirsher * of the function), the bus address of the HW ring, and the address 689f7917c00SJeff Kirsher * of the SW ring. 690f7917c00SJeff Kirsher */ 691f7917c00SJeff Kirsher static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 692f7917c00SJeff Kirsher size_t sw_size, dma_addr_t *phys, void *metadata, 693f7917c00SJeff Kirsher size_t stat_size, int node) 694f7917c00SJeff Kirsher { 695f7917c00SJeff Kirsher size_t len = nelem * elem_size + stat_size; 696f7917c00SJeff Kirsher void *s = NULL; 697f7917c00SJeff Kirsher void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 698f7917c00SJeff Kirsher 699f7917c00SJeff Kirsher if (!p) 700f7917c00SJeff Kirsher return NULL; 701f7917c00SJeff Kirsher if (sw_size) { 702f7917c00SJeff Kirsher s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); 703f7917c00SJeff Kirsher 704f7917c00SJeff Kirsher if (!s) { 705f7917c00SJeff Kirsher dma_free_coherent(dev, len, p, *phys); 706f7917c00SJeff Kirsher return NULL; 707f7917c00SJeff Kirsher } 708f7917c00SJeff Kirsher } 709f7917c00SJeff Kirsher if (metadata) 710f7917c00SJeff Kirsher *(void **)metadata = s; 711f7917c00SJeff Kirsher memset(p, 0, len); 712f7917c00SJeff Kirsher return p; 713f7917c00SJeff Kirsher } 714f7917c00SJeff Kirsher 715f7917c00SJeff Kirsher /** 716f7917c00SJeff Kirsher * sgl_len - calculates the size of an SGL of the given capacity 717f7917c00SJeff Kirsher * @n: the number of SGL entries 718f7917c00SJeff Kirsher * 719f7917c00SJeff Kirsher * Calculates the number of flits needed for a scatter/gather list that 720f7917c00SJeff Kirsher * can hold the given number of entries. 721f7917c00SJeff Kirsher */ 722f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n) 723f7917c00SJeff Kirsher { 7240aac3f56SHariprasad Shenai /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 7250aac3f56SHariprasad Shenai * addresses. The DSGL Work Request starts off with a 32-bit DSGL 7260aac3f56SHariprasad Shenai * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 7270aac3f56SHariprasad Shenai * repeated sequences of { Length[i], Length[i+1], Address[i], 7280aac3f56SHariprasad Shenai * Address[i+1] } (this ensures that all addresses are on 64-bit 7290aac3f56SHariprasad Shenai * boundaries). If N is even, then Length[N+1] should be set to 0 and 7300aac3f56SHariprasad Shenai * Address[N+1] is omitted. 7310aac3f56SHariprasad Shenai * 7320aac3f56SHariprasad Shenai * The following calculation incorporates all of the above. It's 7330aac3f56SHariprasad Shenai * somewhat hard to follow but, briefly: the "+2" accounts for the 7340aac3f56SHariprasad Shenai * first two flits which include the DSGL header, Length0 and 7350aac3f56SHariprasad Shenai * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 7360aac3f56SHariprasad Shenai * flits for every pair of the remaining N) +1 if (n-1) is odd; and 7370aac3f56SHariprasad Shenai * finally the "+((n-1)&1)" adds the one remaining flit needed if 7380aac3f56SHariprasad Shenai * (n-1) is odd ... 7390aac3f56SHariprasad Shenai */ 740f7917c00SJeff Kirsher n--; 741f7917c00SJeff Kirsher return (3 * n) / 2 + (n & 1) + 2; 742f7917c00SJeff Kirsher } 743f7917c00SJeff Kirsher 744f7917c00SJeff Kirsher /** 745f7917c00SJeff Kirsher * flits_to_desc - returns the num of Tx descriptors for the given flits 746f7917c00SJeff Kirsher * @n: the number of flits 747f7917c00SJeff Kirsher * 748f7917c00SJeff Kirsher * Returns the number of Tx descriptors needed for the supplied number 749f7917c00SJeff Kirsher * of flits. 750f7917c00SJeff Kirsher */ 751f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n) 752f7917c00SJeff Kirsher { 753f7917c00SJeff Kirsher BUG_ON(n > SGE_MAX_WR_LEN / 8); 754f7917c00SJeff Kirsher return DIV_ROUND_UP(n, 8); 755f7917c00SJeff Kirsher } 756f7917c00SJeff Kirsher 757f7917c00SJeff Kirsher /** 758f7917c00SJeff Kirsher * is_eth_imm - can an Ethernet packet be sent as immediate data? 759f7917c00SJeff Kirsher * @skb: the packet 760f7917c00SJeff Kirsher * 761f7917c00SJeff Kirsher * Returns whether an Ethernet packet is small enough to fit as 7620034b298SKumar Sanghvi * immediate data. Return value corresponds to headroom required. 763f7917c00SJeff Kirsher */ 764d0a1299cSGanesh Goudar static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) 765f7917c00SJeff Kirsher { 766d0a1299cSGanesh Goudar int hdrlen = 0; 7670034b298SKumar Sanghvi 768d0a1299cSGanesh Goudar if (skb->encapsulation && skb_shinfo(skb)->gso_size && 769d0a1299cSGanesh Goudar chip_ver > CHELSIO_T5) { 770d0a1299cSGanesh Goudar hdrlen = sizeof(struct cpl_tx_tnl_lso); 771d0a1299cSGanesh Goudar hdrlen += sizeof(struct cpl_tx_pkt_core); 772d0a1299cSGanesh Goudar } else { 773d0a1299cSGanesh Goudar hdrlen = skb_shinfo(skb)->gso_size ? 774d0a1299cSGanesh Goudar sizeof(struct cpl_tx_pkt_lso_core) : 0; 7750034b298SKumar Sanghvi hdrlen += sizeof(struct cpl_tx_pkt); 776d0a1299cSGanesh Goudar } 7770034b298SKumar Sanghvi if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 7780034b298SKumar Sanghvi return hdrlen; 7790034b298SKumar Sanghvi return 0; 780f7917c00SJeff Kirsher } 781f7917c00SJeff Kirsher 782f7917c00SJeff Kirsher /** 783f7917c00SJeff Kirsher * calc_tx_flits - calculate the number of flits for a packet Tx WR 784f7917c00SJeff Kirsher * @skb: the packet 785f7917c00SJeff Kirsher * 786f7917c00SJeff Kirsher * Returns the number of flits needed for a Tx WR for the given Ethernet 787f7917c00SJeff Kirsher * packet, including the needed WR and CPL headers. 788f7917c00SJeff Kirsher */ 789d0a1299cSGanesh Goudar static inline unsigned int calc_tx_flits(const struct sk_buff *skb, 790d0a1299cSGanesh Goudar unsigned int chip_ver) 791f7917c00SJeff Kirsher { 792f7917c00SJeff Kirsher unsigned int flits; 793d0a1299cSGanesh Goudar int hdrlen = is_eth_imm(skb, chip_ver); 794f7917c00SJeff Kirsher 7950aac3f56SHariprasad Shenai /* If the skb is small enough, we can pump it out as a work request 7960aac3f56SHariprasad Shenai * with only immediate data. In that case we just have to have the 7970aac3f56SHariprasad Shenai * TX Packet header plus the skb data in the Work Request. 7980aac3f56SHariprasad Shenai */ 7990aac3f56SHariprasad Shenai 8000034b298SKumar Sanghvi if (hdrlen) 8010034b298SKumar Sanghvi return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 802f7917c00SJeff Kirsher 8030aac3f56SHariprasad Shenai /* Otherwise, we're going to have to construct a Scatter gather list 8040aac3f56SHariprasad Shenai * of the skb body and fragments. We also include the flits necessary 8050aac3f56SHariprasad Shenai * for the TX Packet Work Request and CPL. We always have a firmware 8060aac3f56SHariprasad Shenai * Write Header (incorporated as part of the cpl_tx_pkt_lso and 8070aac3f56SHariprasad Shenai * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 8080aac3f56SHariprasad Shenai * message or, if we're doing a Large Send Offload, an LSO CPL message 8090aac3f56SHariprasad Shenai * with an embedded TX Packet Write CPL message. 8100aac3f56SHariprasad Shenai */ 811fd1754fbSHariprasad Shenai flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 812d0a1299cSGanesh Goudar if (skb_shinfo(skb)->gso_size) { 813d0a1299cSGanesh Goudar if (skb->encapsulation && chip_ver > CHELSIO_T5) 814d0a1299cSGanesh Goudar hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 815d0a1299cSGanesh Goudar sizeof(struct cpl_tx_tnl_lso); 8160aac3f56SHariprasad Shenai else 817d0a1299cSGanesh Goudar hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 818d0a1299cSGanesh Goudar sizeof(struct cpl_tx_pkt_lso_core); 819d0a1299cSGanesh Goudar 820d0a1299cSGanesh Goudar hdrlen += sizeof(struct cpl_tx_pkt_core); 821d0a1299cSGanesh Goudar flits += (hdrlen / sizeof(__be64)); 822d0a1299cSGanesh Goudar } else { 8230aac3f56SHariprasad Shenai flits += (sizeof(struct fw_eth_tx_pkt_wr) + 8240aac3f56SHariprasad Shenai sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 825d0a1299cSGanesh Goudar } 826f7917c00SJeff Kirsher return flits; 827f7917c00SJeff Kirsher } 828f7917c00SJeff Kirsher 829f7917c00SJeff Kirsher /** 830f7917c00SJeff Kirsher * calc_tx_descs - calculate the number of Tx descriptors for a packet 831f7917c00SJeff Kirsher * @skb: the packet 832f7917c00SJeff Kirsher * 833f7917c00SJeff Kirsher * Returns the number of Tx descriptors needed for the given Ethernet 834f7917c00SJeff Kirsher * packet, including the needed WR and CPL headers. 835f7917c00SJeff Kirsher */ 836d0a1299cSGanesh Goudar static inline unsigned int calc_tx_descs(const struct sk_buff *skb, 837d0a1299cSGanesh Goudar unsigned int chip_ver) 838f7917c00SJeff Kirsher { 839d0a1299cSGanesh Goudar return flits_to_desc(calc_tx_flits(skb, chip_ver)); 840f7917c00SJeff Kirsher } 841f7917c00SJeff Kirsher 842f7917c00SJeff Kirsher /** 843a6ec572bSAtul Gupta * cxgb4_write_sgl - populate a scatter/gather list for a packet 844f7917c00SJeff Kirsher * @skb: the packet 845f7917c00SJeff Kirsher * @q: the Tx queue we are writing into 846f7917c00SJeff Kirsher * @sgl: starting location for writing the SGL 847f7917c00SJeff Kirsher * @end: points right after the end of the SGL 848f7917c00SJeff Kirsher * @start: start offset into skb main-body data to include in the SGL 849f7917c00SJeff Kirsher * @addr: the list of bus addresses for the SGL elements 850f7917c00SJeff Kirsher * 851f7917c00SJeff Kirsher * Generates a gather list for the buffers that make up a packet. 852f7917c00SJeff Kirsher * The caller must provide adequate space for the SGL that will be written. 853f7917c00SJeff Kirsher * The SGL includes all of the packet's page fragments and the data in its 854f7917c00SJeff Kirsher * main body except for the first @start bytes. @sgl must be 16-byte 855f7917c00SJeff Kirsher * aligned and within a Tx descriptor with available space. @end points 856f7917c00SJeff Kirsher * right after the end of the SGL but does not account for any potential 857f7917c00SJeff Kirsher * wrap around, i.e., @end > @sgl. 858f7917c00SJeff Kirsher */ 859a6ec572bSAtul Gupta void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 860f7917c00SJeff Kirsher struct ulptx_sgl *sgl, u64 *end, unsigned int start, 861f7917c00SJeff Kirsher const dma_addr_t *addr) 862f7917c00SJeff Kirsher { 863f7917c00SJeff Kirsher unsigned int i, len; 864f7917c00SJeff Kirsher struct ulptx_sge_pair *to; 865f7917c00SJeff Kirsher const struct skb_shared_info *si = skb_shinfo(skb); 866f7917c00SJeff Kirsher unsigned int nfrags = si->nr_frags; 867f7917c00SJeff Kirsher struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 868f7917c00SJeff Kirsher 869f7917c00SJeff Kirsher len = skb_headlen(skb) - start; 870f7917c00SJeff Kirsher if (likely(len)) { 871f7917c00SJeff Kirsher sgl->len0 = htonl(len); 872f7917c00SJeff Kirsher sgl->addr0 = cpu_to_be64(addr[0] + start); 873f7917c00SJeff Kirsher nfrags++; 874f7917c00SJeff Kirsher } else { 8759e903e08SEric Dumazet sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 876f7917c00SJeff Kirsher sgl->addr0 = cpu_to_be64(addr[1]); 877f7917c00SJeff Kirsher } 878f7917c00SJeff Kirsher 879bdc590b9SHariprasad Shenai sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 880bdc590b9SHariprasad Shenai ULPTX_NSGE_V(nfrags)); 881f7917c00SJeff Kirsher if (likely(--nfrags == 0)) 882f7917c00SJeff Kirsher return; 883f7917c00SJeff Kirsher /* 884f7917c00SJeff Kirsher * Most of the complexity below deals with the possibility we hit the 885f7917c00SJeff Kirsher * end of the queue in the middle of writing the SGL. For this case 886f7917c00SJeff Kirsher * only we create the SGL in a temporary buffer and then copy it. 887f7917c00SJeff Kirsher */ 888f7917c00SJeff Kirsher to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 889f7917c00SJeff Kirsher 890f7917c00SJeff Kirsher for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 8919e903e08SEric Dumazet to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 8929e903e08SEric Dumazet to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 893f7917c00SJeff Kirsher to->addr[0] = cpu_to_be64(addr[i]); 894f7917c00SJeff Kirsher to->addr[1] = cpu_to_be64(addr[++i]); 895f7917c00SJeff Kirsher } 896f7917c00SJeff Kirsher if (nfrags) { 8979e903e08SEric Dumazet to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 898f7917c00SJeff Kirsher to->len[1] = cpu_to_be32(0); 899f7917c00SJeff Kirsher to->addr[0] = cpu_to_be64(addr[i + 1]); 900f7917c00SJeff Kirsher } 901f7917c00SJeff Kirsher if (unlikely((u8 *)end > (u8 *)q->stat)) { 902f7917c00SJeff Kirsher unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 903f7917c00SJeff Kirsher 904f7917c00SJeff Kirsher if (likely(part0)) 905f7917c00SJeff Kirsher memcpy(sgl->sge, buf, part0); 906f7917c00SJeff Kirsher part1 = (u8 *)end - (u8 *)q->stat; 907f7917c00SJeff Kirsher memcpy(q->desc, (u8 *)buf + part0, part1); 908f7917c00SJeff Kirsher end = (void *)q->desc + part1; 909f7917c00SJeff Kirsher } 910f7917c00SJeff Kirsher if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 91164699336SJoe Perches *end = 0; 912f7917c00SJeff Kirsher } 913a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_write_sgl); 914f7917c00SJeff Kirsher 915df64e4d3SHariprasad Shenai /* This function copies 64 byte coalesced work request to 916df64e4d3SHariprasad Shenai * memory mapped BAR2 space. For coalesced WR SGE fetches 917df64e4d3SHariprasad Shenai * data from the FIFO instead of from Host. 91822adfe0aSSantosh Rastapur */ 919df64e4d3SHariprasad Shenai static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 92022adfe0aSSantosh Rastapur { 921df64e4d3SHariprasad Shenai int count = 8; 92222adfe0aSSantosh Rastapur 92322adfe0aSSantosh Rastapur while (count) { 92422adfe0aSSantosh Rastapur writeq(*src, dst); 92522adfe0aSSantosh Rastapur src++; 92622adfe0aSSantosh Rastapur dst++; 92722adfe0aSSantosh Rastapur count--; 92822adfe0aSSantosh Rastapur } 92922adfe0aSSantosh Rastapur } 93022adfe0aSSantosh Rastapur 931f7917c00SJeff Kirsher /** 932a6ec572bSAtul Gupta * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 933f7917c00SJeff Kirsher * @adap: the adapter 934f7917c00SJeff Kirsher * @q: the Tx queue 935f7917c00SJeff Kirsher * @n: number of new descriptors to give to HW 936f7917c00SJeff Kirsher * 937f7917c00SJeff Kirsher * Ring the doorbel for a Tx queue. 938f7917c00SJeff Kirsher */ 939a6ec572bSAtul Gupta inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 940f7917c00SJeff Kirsher { 9411ecc7b7aSHariprasad Shenai /* Make sure that all writes to the TX Descriptors are committed 9421ecc7b7aSHariprasad Shenai * before we tell the hardware about them. 9431ecc7b7aSHariprasad Shenai */ 9441ecc7b7aSHariprasad Shenai wmb(); 945d63a6dcfSHariprasad Shenai 946df64e4d3SHariprasad Shenai /* If we don't have access to the new User Doorbell (T5+), use the old 947df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 948df64e4d3SHariprasad Shenai */ 949df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 950f612b815SHariprasad Shenai u32 val = PIDX_V(n); 95105eb2389SSteve Wise unsigned long flags; 95222adfe0aSSantosh Rastapur 953d63a6dcfSHariprasad Shenai /* For T4 we need to participate in the Doorbell Recovery 954d63a6dcfSHariprasad Shenai * mechanism. 955d63a6dcfSHariprasad Shenai */ 95605eb2389SSteve Wise spin_lock_irqsave(&q->db_lock, flags); 957d63a6dcfSHariprasad Shenai if (!q->db_disabled) 958f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 959f612b815SHariprasad Shenai QID_V(q->cntxt_id) | val); 960d63a6dcfSHariprasad Shenai else 96105eb2389SSteve Wise q->db_pidx_inc += n; 9623069ee9bSVipul Pandya q->db_pidx = q->pidx; 96305eb2389SSteve Wise spin_unlock_irqrestore(&q->db_lock, flags); 964d63a6dcfSHariprasad Shenai } else { 965f612b815SHariprasad Shenai u32 val = PIDX_T5_V(n); 966d63a6dcfSHariprasad Shenai 967d63a6dcfSHariprasad Shenai /* T4 and later chips share the same PIDX field offset within 968d63a6dcfSHariprasad Shenai * the doorbell, but T5 and later shrank the field in order to 969d63a6dcfSHariprasad Shenai * gain a bit for Doorbell Priority. The field was absurdly 970d63a6dcfSHariprasad Shenai * large in the first place (14 bits) so we just use the T5 971d63a6dcfSHariprasad Shenai * and later limits and warn if a Queue ID is too large. 972d63a6dcfSHariprasad Shenai */ 973f612b815SHariprasad Shenai WARN_ON(val & DBPRIO_F); 974d63a6dcfSHariprasad Shenai 975df64e4d3SHariprasad Shenai /* If we're only writing a single TX Descriptor and we can use 976df64e4d3SHariprasad Shenai * Inferred QID registers, we can use the Write Combining 977df64e4d3SHariprasad Shenai * Gather Buffer; otherwise we use the simple doorbell. 978d63a6dcfSHariprasad Shenai */ 979df64e4d3SHariprasad Shenai if (n == 1 && q->bar2_qid == 0) { 980d63a6dcfSHariprasad Shenai int index = (q->pidx 981d63a6dcfSHariprasad Shenai ? (q->pidx - 1) 982d63a6dcfSHariprasad Shenai : (q->size - 1)); 983df64e4d3SHariprasad Shenai u64 *wr = (u64 *)&q->desc[index]; 984d63a6dcfSHariprasad Shenai 985df64e4d3SHariprasad Shenai cxgb_pio_copy((u64 __iomem *) 986df64e4d3SHariprasad Shenai (q->bar2_addr + SGE_UDB_WCDOORBELL), 987df64e4d3SHariprasad Shenai wr); 988d63a6dcfSHariprasad Shenai } else { 989f612b815SHariprasad Shenai writel(val | QID_V(q->bar2_qid), 990df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_KDOORBELL); 991d63a6dcfSHariprasad Shenai } 992d63a6dcfSHariprasad Shenai 993d63a6dcfSHariprasad Shenai /* This Write Memory Barrier will force the write to the User 994d63a6dcfSHariprasad Shenai * Doorbell area to be flushed. This is needed to prevent 995d63a6dcfSHariprasad Shenai * writes on different CPUs for the same queue from hitting 996d63a6dcfSHariprasad Shenai * the adapter out of order. This is required when some Work 997d63a6dcfSHariprasad Shenai * Requests take the Write Combine Gather Buffer path (user 998d63a6dcfSHariprasad Shenai * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 999d63a6dcfSHariprasad Shenai * take the traditional path where we simply increment the 1000d63a6dcfSHariprasad Shenai * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 1001d63a6dcfSHariprasad Shenai * hardware DMA read the actual Work Request. 1002d63a6dcfSHariprasad Shenai */ 1003d63a6dcfSHariprasad Shenai wmb(); 1004d63a6dcfSHariprasad Shenai } 1005f7917c00SJeff Kirsher } 1006a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_ring_tx_db); 1007f7917c00SJeff Kirsher 1008f7917c00SJeff Kirsher /** 1009a6ec572bSAtul Gupta * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 1010f7917c00SJeff Kirsher * @skb: the packet 1011f7917c00SJeff Kirsher * @q: the Tx queue where the packet will be inlined 1012f7917c00SJeff Kirsher * @pos: starting position in the Tx queue where to inline the packet 1013f7917c00SJeff Kirsher * 1014f7917c00SJeff Kirsher * Inline a packet's contents directly into Tx descriptors, starting at 1015f7917c00SJeff Kirsher * the given position within the Tx DMA ring. 1016f7917c00SJeff Kirsher * Most of the complexity of this operation is dealing with wrap arounds 1017f7917c00SJeff Kirsher * in the middle of the packet we want to inline. 1018f7917c00SJeff Kirsher */ 1019a6ec572bSAtul Gupta void cxgb4_inline_tx_skb(const struct sk_buff *skb, 1020a6ec572bSAtul Gupta const struct sge_txq *q, void *pos) 1021f7917c00SJeff Kirsher { 1022f7917c00SJeff Kirsher int left = (void *)q->stat - pos; 1023e383f248SAtul Gupta u64 *p; 1024f7917c00SJeff Kirsher 1025f7917c00SJeff Kirsher if (likely(skb->len <= left)) { 1026f7917c00SJeff Kirsher if (likely(!skb->data_len)) 1027f7917c00SJeff Kirsher skb_copy_from_linear_data(skb, pos, skb->len); 1028f7917c00SJeff Kirsher else 1029f7917c00SJeff Kirsher skb_copy_bits(skb, 0, pos, skb->len); 1030f7917c00SJeff Kirsher pos += skb->len; 1031f7917c00SJeff Kirsher } else { 1032f7917c00SJeff Kirsher skb_copy_bits(skb, 0, pos, left); 1033f7917c00SJeff Kirsher skb_copy_bits(skb, left, q->desc, skb->len - left); 1034f7917c00SJeff Kirsher pos = (void *)q->desc + (skb->len - left); 1035f7917c00SJeff Kirsher } 1036f7917c00SJeff Kirsher 1037f7917c00SJeff Kirsher /* 0-pad to multiple of 16 */ 1038f7917c00SJeff Kirsher p = PTR_ALIGN(pos, 8); 1039f7917c00SJeff Kirsher if ((uintptr_t)p & 8) 1040f7917c00SJeff Kirsher *p = 0; 1041f7917c00SJeff Kirsher } 1042a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1043f7917c00SJeff Kirsher 10448d0557d2SHariprasad Shenai static void *inline_tx_skb_header(const struct sk_buff *skb, 10458d0557d2SHariprasad Shenai const struct sge_txq *q, void *pos, 10468d0557d2SHariprasad Shenai int length) 10478d0557d2SHariprasad Shenai { 10488d0557d2SHariprasad Shenai u64 *p; 10498d0557d2SHariprasad Shenai int left = (void *)q->stat - pos; 10508d0557d2SHariprasad Shenai 10518d0557d2SHariprasad Shenai if (likely(length <= left)) { 10528d0557d2SHariprasad Shenai memcpy(pos, skb->data, length); 10538d0557d2SHariprasad Shenai pos += length; 10548d0557d2SHariprasad Shenai } else { 10558d0557d2SHariprasad Shenai memcpy(pos, skb->data, left); 10568d0557d2SHariprasad Shenai memcpy(q->desc, skb->data + left, length - left); 10578d0557d2SHariprasad Shenai pos = (void *)q->desc + (length - left); 10588d0557d2SHariprasad Shenai } 10598d0557d2SHariprasad Shenai /* 0-pad to multiple of 16 */ 10608d0557d2SHariprasad Shenai p = PTR_ALIGN(pos, 8); 10618d0557d2SHariprasad Shenai if ((uintptr_t)p & 8) { 10628d0557d2SHariprasad Shenai *p = 0; 10638d0557d2SHariprasad Shenai return p + 1; 10648d0557d2SHariprasad Shenai } 10658d0557d2SHariprasad Shenai return p; 10668d0557d2SHariprasad Shenai } 10678d0557d2SHariprasad Shenai 1068f7917c00SJeff Kirsher /* 1069f7917c00SJeff Kirsher * Figure out what HW csum a packet wants and return the appropriate control 1070f7917c00SJeff Kirsher * bits. 1071f7917c00SJeff Kirsher */ 10723ccc6cf7SHariprasad Shenai static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) 1073f7917c00SJeff Kirsher { 1074f7917c00SJeff Kirsher int csum_type; 1075c50ae55eSGanesh Goudar bool inner_hdr_csum = false; 1076c50ae55eSGanesh Goudar u16 proto, ver; 1077f7917c00SJeff Kirsher 1078c50ae55eSGanesh Goudar if (skb->encapsulation && 1079c50ae55eSGanesh Goudar (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) 1080c50ae55eSGanesh Goudar inner_hdr_csum = true; 1081c50ae55eSGanesh Goudar 1082c50ae55eSGanesh Goudar if (inner_hdr_csum) { 1083c50ae55eSGanesh Goudar ver = inner_ip_hdr(skb)->version; 1084c50ae55eSGanesh Goudar proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : 1085c50ae55eSGanesh Goudar inner_ipv6_hdr(skb)->nexthdr; 1086c50ae55eSGanesh Goudar } else { 1087c50ae55eSGanesh Goudar ver = ip_hdr(skb)->version; 1088c50ae55eSGanesh Goudar proto = (ver == 4) ? ip_hdr(skb)->protocol : 1089c50ae55eSGanesh Goudar ipv6_hdr(skb)->nexthdr; 1090c50ae55eSGanesh Goudar } 1091c50ae55eSGanesh Goudar 1092c50ae55eSGanesh Goudar if (ver == 4) { 1093c50ae55eSGanesh Goudar if (proto == IPPROTO_TCP) 1094f7917c00SJeff Kirsher csum_type = TX_CSUM_TCPIP; 1095c50ae55eSGanesh Goudar else if (proto == IPPROTO_UDP) 1096f7917c00SJeff Kirsher csum_type = TX_CSUM_UDPIP; 1097f7917c00SJeff Kirsher else { 1098f7917c00SJeff Kirsher nocsum: /* 1099f7917c00SJeff Kirsher * unknown protocol, disable HW csum 1100f7917c00SJeff Kirsher * and hope a bad packet is detected 1101f7917c00SJeff Kirsher */ 11021ecc7b7aSHariprasad Shenai return TXPKT_L4CSUM_DIS_F; 1103f7917c00SJeff Kirsher } 1104f7917c00SJeff Kirsher } else { 1105f7917c00SJeff Kirsher /* 1106f7917c00SJeff Kirsher * this doesn't work with extension headers 1107f7917c00SJeff Kirsher */ 1108c50ae55eSGanesh Goudar if (proto == IPPROTO_TCP) 1109f7917c00SJeff Kirsher csum_type = TX_CSUM_TCPIP6; 1110c50ae55eSGanesh Goudar else if (proto == IPPROTO_UDP) 1111f7917c00SJeff Kirsher csum_type = TX_CSUM_UDPIP6; 1112f7917c00SJeff Kirsher else 1113f7917c00SJeff Kirsher goto nocsum; 1114f7917c00SJeff Kirsher } 1115f7917c00SJeff Kirsher 11163ccc6cf7SHariprasad Shenai if (likely(csum_type >= TX_CSUM_TCPIP)) { 1117c50ae55eSGanesh Goudar int eth_hdr_len, l4_len; 1118c50ae55eSGanesh Goudar u64 hdr_len; 1119c50ae55eSGanesh Goudar 1120c50ae55eSGanesh Goudar if (inner_hdr_csum) { 1121c50ae55eSGanesh Goudar /* This allows checksum offload for all encapsulated 1122c50ae55eSGanesh Goudar * packets like GRE etc.. 1123c50ae55eSGanesh Goudar */ 1124c50ae55eSGanesh Goudar l4_len = skb_inner_network_header_len(skb); 1125c50ae55eSGanesh Goudar eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; 1126c50ae55eSGanesh Goudar } else { 1127c50ae55eSGanesh Goudar l4_len = skb_network_header_len(skb); 1128c50ae55eSGanesh Goudar eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; 1129c50ae55eSGanesh Goudar } 1130c50ae55eSGanesh Goudar hdr_len = TXPKT_IPHDR_LEN_V(l4_len); 11313ccc6cf7SHariprasad Shenai 11323ccc6cf7SHariprasad Shenai if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 11333ccc6cf7SHariprasad Shenai hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); 11343ccc6cf7SHariprasad Shenai else 11353ccc6cf7SHariprasad Shenai hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); 11363ccc6cf7SHariprasad Shenai return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; 11373ccc6cf7SHariprasad Shenai } else { 1138f7917c00SJeff Kirsher int start = skb_transport_offset(skb); 1139f7917c00SJeff Kirsher 11401ecc7b7aSHariprasad Shenai return TXPKT_CSUM_TYPE_V(csum_type) | 11411ecc7b7aSHariprasad Shenai TXPKT_CSUM_START_V(start) | 11421ecc7b7aSHariprasad Shenai TXPKT_CSUM_LOC_V(start + skb->csum_offset); 1143f7917c00SJeff Kirsher } 1144f7917c00SJeff Kirsher } 1145f7917c00SJeff Kirsher 1146f7917c00SJeff Kirsher static void eth_txq_stop(struct sge_eth_txq *q) 1147f7917c00SJeff Kirsher { 1148f7917c00SJeff Kirsher netif_tx_stop_queue(q->txq); 1149f7917c00SJeff Kirsher q->q.stops++; 1150f7917c00SJeff Kirsher } 1151f7917c00SJeff Kirsher 1152f7917c00SJeff Kirsher static inline void txq_advance(struct sge_txq *q, unsigned int n) 1153f7917c00SJeff Kirsher { 1154f7917c00SJeff Kirsher q->in_use += n; 1155f7917c00SJeff Kirsher q->pidx += n; 1156f7917c00SJeff Kirsher if (q->pidx >= q->size) 1157f7917c00SJeff Kirsher q->pidx -= q->size; 1158f7917c00SJeff Kirsher } 1159f7917c00SJeff Kirsher 116084a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 116184a200b3SVarun Prakash static inline int 116284a200b3SVarun Prakash cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, 116384a200b3SVarun Prakash const struct port_info *pi, u64 *cntrl) 116484a200b3SVarun Prakash { 116584a200b3SVarun Prakash const struct cxgb_fcoe *fcoe = &pi->fcoe; 116684a200b3SVarun Prakash 116784a200b3SVarun Prakash if (!(fcoe->flags & CXGB_FCOE_ENABLED)) 116884a200b3SVarun Prakash return 0; 116984a200b3SVarun Prakash 117084a200b3SVarun Prakash if (skb->protocol != htons(ETH_P_FCOE)) 117184a200b3SVarun Prakash return 0; 117284a200b3SVarun Prakash 117384a200b3SVarun Prakash skb_reset_mac_header(skb); 117484a200b3SVarun Prakash skb->mac_len = sizeof(struct ethhdr); 117584a200b3SVarun Prakash 117684a200b3SVarun Prakash skb_set_network_header(skb, skb->mac_len); 117784a200b3SVarun Prakash skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); 117884a200b3SVarun Prakash 117984a200b3SVarun Prakash if (!cxgb_fcoe_sof_eof_supported(adap, skb)) 118084a200b3SVarun Prakash return -ENOTSUPP; 118184a200b3SVarun Prakash 118284a200b3SVarun Prakash /* FC CRC offload */ 11831ecc7b7aSHariprasad Shenai *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | 11841ecc7b7aSHariprasad Shenai TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | 11851ecc7b7aSHariprasad Shenai TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | 11861ecc7b7aSHariprasad Shenai TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | 11871ecc7b7aSHariprasad Shenai TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); 118884a200b3SVarun Prakash return 0; 118984a200b3SVarun Prakash } 119084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 119184a200b3SVarun Prakash 1192d0a1299cSGanesh Goudar /* Returns tunnel type if hardware supports offloading of the same. 1193d0a1299cSGanesh Goudar * It is called only for T5 and onwards. 1194d0a1299cSGanesh Goudar */ 1195d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) 1196d0a1299cSGanesh Goudar { 1197d0a1299cSGanesh Goudar u8 l4_hdr = 0; 1198d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1199d0a1299cSGanesh Goudar struct port_info *pi = netdev_priv(skb->dev); 1200d0a1299cSGanesh Goudar struct adapter *adapter = pi->adapter; 1201d0a1299cSGanesh Goudar 1202d0a1299cSGanesh Goudar if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1203d0a1299cSGanesh Goudar skb->inner_protocol != htons(ETH_P_TEB)) 1204d0a1299cSGanesh Goudar return tnl_type; 1205d0a1299cSGanesh Goudar 1206d0a1299cSGanesh Goudar switch (vlan_get_protocol(skb)) { 1207d0a1299cSGanesh Goudar case htons(ETH_P_IP): 1208d0a1299cSGanesh Goudar l4_hdr = ip_hdr(skb)->protocol; 1209d0a1299cSGanesh Goudar break; 1210d0a1299cSGanesh Goudar case htons(ETH_P_IPV6): 1211d0a1299cSGanesh Goudar l4_hdr = ipv6_hdr(skb)->nexthdr; 1212d0a1299cSGanesh Goudar break; 1213d0a1299cSGanesh Goudar default: 1214d0a1299cSGanesh Goudar return tnl_type; 1215d0a1299cSGanesh Goudar } 1216d0a1299cSGanesh Goudar 1217d0a1299cSGanesh Goudar switch (l4_hdr) { 1218d0a1299cSGanesh Goudar case IPPROTO_UDP: 1219d0a1299cSGanesh Goudar if (adapter->vxlan_port == udp_hdr(skb)->dest) 1220d0a1299cSGanesh Goudar tnl_type = TX_TNL_TYPE_VXLAN; 1221c746fc0eSGanesh Goudar else if (adapter->geneve_port == udp_hdr(skb)->dest) 1222c746fc0eSGanesh Goudar tnl_type = TX_TNL_TYPE_GENEVE; 1223d0a1299cSGanesh Goudar break; 1224d0a1299cSGanesh Goudar default: 1225d0a1299cSGanesh Goudar return tnl_type; 1226d0a1299cSGanesh Goudar } 1227d0a1299cSGanesh Goudar 1228d0a1299cSGanesh Goudar return tnl_type; 1229d0a1299cSGanesh Goudar } 1230d0a1299cSGanesh Goudar 1231d0a1299cSGanesh Goudar static inline void t6_fill_tnl_lso(struct sk_buff *skb, 1232d0a1299cSGanesh Goudar struct cpl_tx_tnl_lso *tnl_lso, 1233d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type tnl_type) 1234d0a1299cSGanesh Goudar { 1235d0a1299cSGanesh Goudar u32 val; 1236d0a1299cSGanesh Goudar int in_eth_xtra_len; 1237d0a1299cSGanesh Goudar int l3hdr_len = skb_network_header_len(skb); 1238d0a1299cSGanesh Goudar int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1239d0a1299cSGanesh Goudar const struct skb_shared_info *ssi = skb_shinfo(skb); 1240d0a1299cSGanesh Goudar bool v6 = (ip_hdr(skb)->version == 6); 1241d0a1299cSGanesh Goudar 1242d0a1299cSGanesh Goudar val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | 1243d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_FIRST_F | 1244d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_LAST_F | 1245d0a1299cSGanesh Goudar (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | 1246d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | 1247d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | 1248d0a1299cSGanesh Goudar (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | 1249d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPLENSETOUT_F | 1250d0a1299cSGanesh Goudar (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); 1251d0a1299cSGanesh Goudar tnl_lso->op_to_IpIdSplitOut = htonl(val); 1252d0a1299cSGanesh Goudar 1253d0a1299cSGanesh Goudar tnl_lso->IpIdOffsetOut = 0; 1254d0a1299cSGanesh Goudar 1255d0a1299cSGanesh Goudar /* Get the tunnel header length */ 1256d0a1299cSGanesh Goudar val = skb_inner_mac_header(skb) - skb_mac_header(skb); 1257d0a1299cSGanesh Goudar in_eth_xtra_len = skb_inner_network_header(skb) - 1258d0a1299cSGanesh Goudar skb_inner_mac_header(skb) - ETH_HLEN; 1259d0a1299cSGanesh Goudar 1260d0a1299cSGanesh Goudar switch (tnl_type) { 1261d0a1299cSGanesh Goudar case TX_TNL_TYPE_VXLAN: 1262c746fc0eSGanesh Goudar case TX_TNL_TYPE_GENEVE: 1263d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen = 1264d0a1299cSGanesh Goudar htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | 1265d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_UDPLENSETOUT_F); 1266d0a1299cSGanesh Goudar break; 1267d0a1299cSGanesh Goudar default: 1268d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; 1269d0a1299cSGanesh Goudar break; 1270d0a1299cSGanesh Goudar } 1271d0a1299cSGanesh Goudar 1272d0a1299cSGanesh Goudar tnl_lso->UdpLenSetOut_to_TnlHdrLen |= 1273d0a1299cSGanesh Goudar htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | 1274d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); 1275d0a1299cSGanesh Goudar 1276d0a1299cSGanesh Goudar tnl_lso->r1 = 0; 1277d0a1299cSGanesh Goudar 1278d0a1299cSGanesh Goudar val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | 1279d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | 1280d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | 1281d0a1299cSGanesh Goudar CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); 1282d0a1299cSGanesh Goudar tnl_lso->Flow_to_TcpHdrLen = htonl(val); 1283d0a1299cSGanesh Goudar 1284d0a1299cSGanesh Goudar tnl_lso->IpIdOffset = htons(0); 1285d0a1299cSGanesh Goudar 1286d0a1299cSGanesh Goudar tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); 1287d0a1299cSGanesh Goudar tnl_lso->TCPSeqOffset = htonl(0); 1288d0a1299cSGanesh Goudar tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); 1289d0a1299cSGanesh Goudar } 1290d0a1299cSGanesh Goudar 1291f7917c00SJeff Kirsher /** 1292f7917c00SJeff Kirsher * t4_eth_xmit - add a packet to an Ethernet Tx queue 1293f7917c00SJeff Kirsher * @skb: the packet 1294f7917c00SJeff Kirsher * @dev: the egress net device 1295f7917c00SJeff Kirsher * 1296f7917c00SJeff Kirsher * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1297f7917c00SJeff Kirsher */ 1298f7917c00SJeff Kirsher netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1299f7917c00SJeff Kirsher { 1300a4569504SAtul Gupta u32 wr_mid, ctrl0, op; 1301c50ae55eSGanesh Goudar u64 cntrl, *end, *sgl; 1302f7917c00SJeff Kirsher int qidx, credits; 1303f7917c00SJeff Kirsher unsigned int flits, ndesc; 1304f7917c00SJeff Kirsher struct adapter *adap; 1305f7917c00SJeff Kirsher struct sge_eth_txq *q; 1306f7917c00SJeff Kirsher const struct port_info *pi; 1307f7917c00SJeff Kirsher struct fw_eth_tx_pkt_wr *wr; 1308f7917c00SJeff Kirsher struct cpl_tx_pkt_core *cpl; 1309f7917c00SJeff Kirsher const struct skb_shared_info *ssi; 1310f7917c00SJeff Kirsher dma_addr_t addr[MAX_SKB_FRAGS + 1]; 13110034b298SKumar Sanghvi bool immediate = false; 1312637d3e99SHariprasad Shenai int len, max_pkt_len; 1313a4569504SAtul Gupta bool ptp_enabled = is_ptp_enabled(skb, dev); 1314d0a1299cSGanesh Goudar unsigned int chip_ver; 1315d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1316d0a1299cSGanesh Goudar 131784a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 131884a200b3SVarun Prakash int err; 131984a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 1320f7917c00SJeff Kirsher 1321f7917c00SJeff Kirsher /* 1322f7917c00SJeff Kirsher * The chip min packet length is 10 octets but play safe and reject 1323f7917c00SJeff Kirsher * anything shorter than an Ethernet header. 1324f7917c00SJeff Kirsher */ 1325f7917c00SJeff Kirsher if (unlikely(skb->len < ETH_HLEN)) { 1326a7525198SEric W. Biederman out_free: dev_kfree_skb_any(skb); 1327f7917c00SJeff Kirsher return NETDEV_TX_OK; 1328f7917c00SJeff Kirsher } 1329f7917c00SJeff Kirsher 1330637d3e99SHariprasad Shenai /* Discard the packet if the length is greater than mtu */ 1331637d3e99SHariprasad Shenai max_pkt_len = ETH_HLEN + dev->mtu; 13328d09e6b8SHariprasad Shenai if (skb_vlan_tagged(skb)) 1333637d3e99SHariprasad Shenai max_pkt_len += VLAN_HLEN; 1334637d3e99SHariprasad Shenai if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) 1335637d3e99SHariprasad Shenai goto out_free; 1336637d3e99SHariprasad Shenai 1337f7917c00SJeff Kirsher pi = netdev_priv(dev); 1338f7917c00SJeff Kirsher adap = pi->adapter; 1339a6ec572bSAtul Gupta ssi = skb_shinfo(skb); 1340a6ec572bSAtul Gupta #ifdef CONFIG_CHELSIO_IPSEC_INLINE 1341a6ec572bSAtul Gupta if (xfrm_offload(skb) && !ssi->gso_size) 1342a6ec572bSAtul Gupta return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); 1343a6ec572bSAtul Gupta #endif /* CHELSIO_IPSEC_INLINE */ 1344a6ec572bSAtul Gupta 1345f7917c00SJeff Kirsher qidx = skb_get_queue_mapping(skb); 1346a4569504SAtul Gupta if (ptp_enabled) { 1347a4569504SAtul Gupta spin_lock(&adap->ptp_lock); 1348a4569504SAtul Gupta if (!(adap->ptp_tx_skb)) { 1349a4569504SAtul Gupta skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1350a4569504SAtul Gupta adap->ptp_tx_skb = skb_get(skb); 1351a4569504SAtul Gupta } else { 1352a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 1353a4569504SAtul Gupta goto out_free; 1354a4569504SAtul Gupta } 1355a4569504SAtul Gupta q = &adap->sge.ptptxq; 1356a4569504SAtul Gupta } else { 1357f7917c00SJeff Kirsher q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1358a4569504SAtul Gupta } 1359a4569504SAtul Gupta skb_tx_timestamp(skb); 1360f7917c00SJeff Kirsher 1361a6ec572bSAtul Gupta cxgb4_reclaim_completed_tx(adap, &q->q, true); 13621ecc7b7aSHariprasad Shenai cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 136384a200b3SVarun Prakash 136484a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 136584a200b3SVarun Prakash err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1366a4569504SAtul Gupta if (unlikely(err == -ENOTSUPP)) { 1367a4569504SAtul Gupta if (ptp_enabled) 1368a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 136984a200b3SVarun Prakash goto out_free; 1370a4569504SAtul Gupta } 137184a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 1372f7917c00SJeff Kirsher 1373d0a1299cSGanesh Goudar chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1374d0a1299cSGanesh Goudar flits = calc_tx_flits(skb, chip_ver); 1375f7917c00SJeff Kirsher ndesc = flits_to_desc(flits); 1376f7917c00SJeff Kirsher credits = txq_avail(&q->q) - ndesc; 1377f7917c00SJeff Kirsher 1378f7917c00SJeff Kirsher if (unlikely(credits < 0)) { 1379f7917c00SJeff Kirsher eth_txq_stop(q); 1380f7917c00SJeff Kirsher dev_err(adap->pdev_dev, 1381f7917c00SJeff Kirsher "%s: Tx ring %u full while queue awake!\n", 1382f7917c00SJeff Kirsher dev->name, qidx); 1383a4569504SAtul Gupta if (ptp_enabled) 1384a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 1385f7917c00SJeff Kirsher return NETDEV_TX_BUSY; 1386f7917c00SJeff Kirsher } 1387f7917c00SJeff Kirsher 1388d0a1299cSGanesh Goudar if (is_eth_imm(skb, chip_ver)) 13890034b298SKumar Sanghvi immediate = true; 13900034b298SKumar Sanghvi 1391d0a1299cSGanesh Goudar if (skb->encapsulation && chip_ver > CHELSIO_T5) 1392d0a1299cSGanesh Goudar tnl_type = cxgb_encap_offload_supported(skb); 1393d0a1299cSGanesh Goudar 13940034b298SKumar Sanghvi if (!immediate && 1395a6ec572bSAtul Gupta unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { 1396f7917c00SJeff Kirsher q->mapping_err++; 1397a4569504SAtul Gupta if (ptp_enabled) 1398a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 1399f7917c00SJeff Kirsher goto out_free; 1400f7917c00SJeff Kirsher } 1401f7917c00SJeff Kirsher 1402e2ac9628SHariprasad Shenai wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1403f7917c00SJeff Kirsher if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1404f7917c00SJeff Kirsher eth_txq_stop(q); 1405e2ac9628SHariprasad Shenai wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1406f7917c00SJeff Kirsher } 1407f7917c00SJeff Kirsher 1408f7917c00SJeff Kirsher wr = (void *)&q->q.desc[q->q.pidx]; 1409f7917c00SJeff Kirsher wr->equiq_to_len16 = htonl(wr_mid); 1410f7917c00SJeff Kirsher wr->r3 = cpu_to_be64(0); 1411f7917c00SJeff Kirsher end = (u64 *)wr + flits; 1412f7917c00SJeff Kirsher 14130034b298SKumar Sanghvi len = immediate ? skb->len : 0; 1414a6076fcdSGanesh Goudar len += sizeof(*cpl); 1415f7917c00SJeff Kirsher if (ssi->gso_size) { 1416a6076fcdSGanesh Goudar struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1417f7917c00SJeff Kirsher bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1418f7917c00SJeff Kirsher int l3hdr_len = skb_network_header_len(skb); 1419f7917c00SJeff Kirsher int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1420d0a1299cSGanesh Goudar struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); 1421f7917c00SJeff Kirsher 1422d0a1299cSGanesh Goudar if (tnl_type) 1423d0a1299cSGanesh Goudar len += sizeof(*tnl_lso); 1424d0a1299cSGanesh Goudar else 14250034b298SKumar Sanghvi len += sizeof(*lso); 1426d0a1299cSGanesh Goudar 1427e2ac9628SHariprasad Shenai wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1428e2ac9628SHariprasad Shenai FW_WR_IMMDLEN_V(len)); 1429d0a1299cSGanesh Goudar if (tnl_type) { 1430d0a1299cSGanesh Goudar struct iphdr *iph = ip_hdr(skb); 1431d0a1299cSGanesh Goudar 1432d0a1299cSGanesh Goudar t6_fill_tnl_lso(skb, tnl_lso, tnl_type); 1433d0a1299cSGanesh Goudar cpl = (void *)(tnl_lso + 1); 1434d0a1299cSGanesh Goudar /* Driver is expected to compute partial checksum that 1435d0a1299cSGanesh Goudar * does not include the IP Total Length. 1436d0a1299cSGanesh Goudar */ 1437d0a1299cSGanesh Goudar if (iph->version == 4) { 1438d0a1299cSGanesh Goudar iph->check = 0; 1439d0a1299cSGanesh Goudar iph->tot_len = 0; 1440d0a1299cSGanesh Goudar iph->check = (u16)(~ip_fast_csum((u8 *)iph, 1441d0a1299cSGanesh Goudar iph->ihl)); 1442d0a1299cSGanesh Goudar } 1443d0a1299cSGanesh Goudar if (skb->ip_summed == CHECKSUM_PARTIAL) 1444d0a1299cSGanesh Goudar cntrl = hwcsum(adap->params.chip, skb); 1445d0a1299cSGanesh Goudar } else { 1446a6076fcdSGanesh Goudar lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 14471ecc7b7aSHariprasad Shenai LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | 14481ecc7b7aSHariprasad Shenai LSO_IPV6_V(v6) | 14491ecc7b7aSHariprasad Shenai LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 14501ecc7b7aSHariprasad Shenai LSO_IPHDR_LEN_V(l3hdr_len / 4) | 14511ecc7b7aSHariprasad Shenai LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1452a6076fcdSGanesh Goudar lso->ipid_ofst = htons(0); 1453a6076fcdSGanesh Goudar lso->mss = htons(ssi->gso_size); 1454a6076fcdSGanesh Goudar lso->seqno_offset = htonl(0); 14557207c0d1SHariprasad Shenai if (is_t4(adap->params.chip)) 1456a6076fcdSGanesh Goudar lso->len = htonl(skb->len); 14577207c0d1SHariprasad Shenai else 1458a6076fcdSGanesh Goudar lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); 1459f7917c00SJeff Kirsher cpl = (void *)(lso + 1); 14603ccc6cf7SHariprasad Shenai 1461d0a1299cSGanesh Goudar if (CHELSIO_CHIP_VERSION(adap->params.chip) 1462d0a1299cSGanesh Goudar <= CHELSIO_T5) 14633ccc6cf7SHariprasad Shenai cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); 14643ccc6cf7SHariprasad Shenai else 14653ccc6cf7SHariprasad Shenai cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); 14663ccc6cf7SHariprasad Shenai 14673ccc6cf7SHariprasad Shenai cntrl |= TXPKT_CSUM_TYPE_V(v6 ? 14683ccc6cf7SHariprasad Shenai TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 14693ccc6cf7SHariprasad Shenai TXPKT_IPHDR_LEN_V(l3hdr_len); 1470d0a1299cSGanesh Goudar } 1471c50ae55eSGanesh Goudar sgl = (u64 *)(cpl + 1); /* sgl start here */ 1472c50ae55eSGanesh Goudar if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { 1473c50ae55eSGanesh Goudar /* If current position is already at the end of the 1474c50ae55eSGanesh Goudar * txq, reset the current to point to start of the queue 1475c50ae55eSGanesh Goudar * and update the end ptr as well. 1476c50ae55eSGanesh Goudar */ 1477c50ae55eSGanesh Goudar if (sgl == (u64 *)q->q.stat) { 1478c50ae55eSGanesh Goudar int left = (u8 *)end - (u8 *)q->q.stat; 1479c50ae55eSGanesh Goudar 1480c50ae55eSGanesh Goudar end = (void *)q->q.desc + left; 1481c50ae55eSGanesh Goudar sgl = (void *)q->q.desc; 1482c50ae55eSGanesh Goudar } 1483c50ae55eSGanesh Goudar } 1484f7917c00SJeff Kirsher q->tso++; 1485f7917c00SJeff Kirsher q->tx_cso += ssi->gso_segs; 1486f7917c00SJeff Kirsher } else { 1487a4569504SAtul Gupta if (ptp_enabled) 1488a4569504SAtul Gupta op = FW_PTP_TX_PKT_WR; 1489a4569504SAtul Gupta else 1490a4569504SAtul Gupta op = FW_ETH_TX_PKT_WR; 1491a4569504SAtul Gupta wr->op_immdlen = htonl(FW_WR_OP_V(op) | 1492e2ac9628SHariprasad Shenai FW_WR_IMMDLEN_V(len)); 1493f7917c00SJeff Kirsher cpl = (void *)(wr + 1); 1494c50ae55eSGanesh Goudar sgl = (u64 *)(cpl + 1); 1495f7917c00SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) { 14963ccc6cf7SHariprasad Shenai cntrl = hwcsum(adap->params.chip, skb) | 14973ccc6cf7SHariprasad Shenai TXPKT_IPCSUM_DIS_F; 1498f7917c00SJeff Kirsher q->tx_cso++; 149984a200b3SVarun Prakash } 1500f7917c00SJeff Kirsher } 1501f7917c00SJeff Kirsher 1502df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 1503f7917c00SJeff Kirsher q->vlan_ins++; 15041ecc7b7aSHariprasad Shenai cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 150584a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 150684a200b3SVarun Prakash if (skb->protocol == htons(ETH_P_FCOE)) 15071ecc7b7aSHariprasad Shenai cntrl |= TXPKT_VLAN_V( 150884a200b3SVarun Prakash ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 150984a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 1510f7917c00SJeff Kirsher } 1511f7917c00SJeff Kirsher 1512397665daSAnish Bhatt ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 1513397665daSAnish Bhatt TXPKT_PF_V(adap->pf); 1514a4569504SAtul Gupta if (ptp_enabled) 1515a4569504SAtul Gupta ctrl0 |= TXPKT_TSTAMP_F; 1516397665daSAnish Bhatt #ifdef CONFIG_CHELSIO_T4_DCB 1517397665daSAnish Bhatt if (is_t4(adap->params.chip)) 1518397665daSAnish Bhatt ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); 1519397665daSAnish Bhatt else 1520397665daSAnish Bhatt ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); 1521397665daSAnish Bhatt #endif 1522397665daSAnish Bhatt cpl->ctrl0 = htonl(ctrl0); 1523f7917c00SJeff Kirsher cpl->pack = htons(0); 1524f7917c00SJeff Kirsher cpl->len = htons(skb->len); 1525f7917c00SJeff Kirsher cpl->ctrl1 = cpu_to_be64(cntrl); 1526f7917c00SJeff Kirsher 15270034b298SKumar Sanghvi if (immediate) { 1528c50ae55eSGanesh Goudar cxgb4_inline_tx_skb(skb, &q->q, sgl); 1529a7525198SEric W. Biederman dev_consume_skb_any(skb); 1530f7917c00SJeff Kirsher } else { 1531f7917c00SJeff Kirsher int last_desc; 1532f7917c00SJeff Kirsher 1533c50ae55eSGanesh Goudar cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); 1534f7917c00SJeff Kirsher skb_orphan(skb); 1535f7917c00SJeff Kirsher 1536f7917c00SJeff Kirsher last_desc = q->q.pidx + ndesc - 1; 1537f7917c00SJeff Kirsher if (last_desc >= q->q.size) 1538f7917c00SJeff Kirsher last_desc -= q->q.size; 1539f7917c00SJeff Kirsher q->q.sdesc[last_desc].skb = skb; 1540a6076fcdSGanesh Goudar q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; 1541f7917c00SJeff Kirsher } 1542f7917c00SJeff Kirsher 1543f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 1544f7917c00SJeff Kirsher 1545a6ec572bSAtul Gupta cxgb4_ring_tx_db(adap, &q->q, ndesc); 1546a4569504SAtul Gupta if (ptp_enabled) 1547a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 1548f7917c00SJeff Kirsher return NETDEV_TX_OK; 1549f7917c00SJeff Kirsher } 1550f7917c00SJeff Kirsher 1551f7917c00SJeff Kirsher /** 1552f7917c00SJeff Kirsher * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1553f7917c00SJeff Kirsher * @q: the SGE control Tx queue 1554f7917c00SJeff Kirsher * 1555a6ec572bSAtul Gupta * This is a variant of cxgb4_reclaim_completed_tx() that is used 1556a6ec572bSAtul Gupta * for Tx queues that send only immediate data (presently just 1557a6ec572bSAtul Gupta * the control queues) and thus do not have any sk_buffs to release. 1558f7917c00SJeff Kirsher */ 1559f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1560f7917c00SJeff Kirsher { 15616aa7de05SMark Rutland int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 1562f7917c00SJeff Kirsher int reclaim = hw_cidx - q->cidx; 1563f7917c00SJeff Kirsher 1564f7917c00SJeff Kirsher if (reclaim < 0) 1565f7917c00SJeff Kirsher reclaim += q->size; 1566f7917c00SJeff Kirsher 1567f7917c00SJeff Kirsher q->in_use -= reclaim; 1568f7917c00SJeff Kirsher q->cidx = hw_cidx; 1569f7917c00SJeff Kirsher } 1570f7917c00SJeff Kirsher 1571f7917c00SJeff Kirsher /** 1572f7917c00SJeff Kirsher * is_imm - check whether a packet can be sent as immediate data 1573f7917c00SJeff Kirsher * @skb: the packet 1574f7917c00SJeff Kirsher * 1575f7917c00SJeff Kirsher * Returns true if a packet can be sent as a WR with immediate data. 1576f7917c00SJeff Kirsher */ 1577f7917c00SJeff Kirsher static inline int is_imm(const struct sk_buff *skb) 1578f7917c00SJeff Kirsher { 1579f7917c00SJeff Kirsher return skb->len <= MAX_CTRL_WR_LEN; 1580f7917c00SJeff Kirsher } 1581f7917c00SJeff Kirsher 1582f7917c00SJeff Kirsher /** 1583f7917c00SJeff Kirsher * ctrlq_check_stop - check if a control queue is full and should stop 1584f7917c00SJeff Kirsher * @q: the queue 1585f7917c00SJeff Kirsher * @wr: most recent WR written to the queue 1586f7917c00SJeff Kirsher * 1587f7917c00SJeff Kirsher * Check if a control queue has become full and should be stopped. 1588f7917c00SJeff Kirsher * We clean up control queue descriptors very lazily, only when we are out. 1589f7917c00SJeff Kirsher * If the queue is still full after reclaiming any completed descriptors 1590f7917c00SJeff Kirsher * we suspend it and have the last WR wake it up. 1591f7917c00SJeff Kirsher */ 1592f7917c00SJeff Kirsher static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 1593f7917c00SJeff Kirsher { 1594f7917c00SJeff Kirsher reclaim_completed_tx_imm(&q->q); 1595f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1596e2ac9628SHariprasad Shenai wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 1597f7917c00SJeff Kirsher q->q.stops++; 1598f7917c00SJeff Kirsher q->full = 1; 1599f7917c00SJeff Kirsher } 1600f7917c00SJeff Kirsher } 1601f7917c00SJeff Kirsher 1602f7917c00SJeff Kirsher /** 1603f7917c00SJeff Kirsher * ctrl_xmit - send a packet through an SGE control Tx queue 1604f7917c00SJeff Kirsher * @q: the control queue 1605f7917c00SJeff Kirsher * @skb: the packet 1606f7917c00SJeff Kirsher * 1607f7917c00SJeff Kirsher * Send a packet through an SGE control Tx queue. Packets sent through 1608f7917c00SJeff Kirsher * a control queue must fit entirely as immediate data. 1609f7917c00SJeff Kirsher */ 1610f7917c00SJeff Kirsher static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 1611f7917c00SJeff Kirsher { 1612f7917c00SJeff Kirsher unsigned int ndesc; 1613f7917c00SJeff Kirsher struct fw_wr_hdr *wr; 1614f7917c00SJeff Kirsher 1615f7917c00SJeff Kirsher if (unlikely(!is_imm(skb))) { 1616f7917c00SJeff Kirsher WARN_ON(1); 1617f7917c00SJeff Kirsher dev_kfree_skb(skb); 1618f7917c00SJeff Kirsher return NET_XMIT_DROP; 1619f7917c00SJeff Kirsher } 1620f7917c00SJeff Kirsher 1621f7917c00SJeff Kirsher ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 1622f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1623f7917c00SJeff Kirsher 1624f7917c00SJeff Kirsher if (unlikely(q->full)) { 1625f7917c00SJeff Kirsher skb->priority = ndesc; /* save for restart */ 1626f7917c00SJeff Kirsher __skb_queue_tail(&q->sendq, skb); 1627f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1628f7917c00SJeff Kirsher return NET_XMIT_CN; 1629f7917c00SJeff Kirsher } 1630f7917c00SJeff Kirsher 1631f7917c00SJeff Kirsher wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1632a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, wr); 1633f7917c00SJeff Kirsher 1634f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 1635f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 1636f7917c00SJeff Kirsher ctrlq_check_stop(q, wr); 1637f7917c00SJeff Kirsher 1638a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 1639f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1640f7917c00SJeff Kirsher 1641f7917c00SJeff Kirsher kfree_skb(skb); 1642f7917c00SJeff Kirsher return NET_XMIT_SUCCESS; 1643f7917c00SJeff Kirsher } 1644f7917c00SJeff Kirsher 1645f7917c00SJeff Kirsher /** 1646f7917c00SJeff Kirsher * restart_ctrlq - restart a suspended control queue 1647f7917c00SJeff Kirsher * @data: the control queue to restart 1648f7917c00SJeff Kirsher * 1649f7917c00SJeff Kirsher * Resumes transmission on a suspended Tx control queue. 1650f7917c00SJeff Kirsher */ 1651f7917c00SJeff Kirsher static void restart_ctrlq(unsigned long data) 1652f7917c00SJeff Kirsher { 1653f7917c00SJeff Kirsher struct sk_buff *skb; 1654f7917c00SJeff Kirsher unsigned int written = 0; 1655f7917c00SJeff Kirsher struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; 1656f7917c00SJeff Kirsher 1657f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1658f7917c00SJeff Kirsher reclaim_completed_tx_imm(&q->q); 1659f7917c00SJeff Kirsher BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 1660f7917c00SJeff Kirsher 1661f7917c00SJeff Kirsher while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 1662f7917c00SJeff Kirsher struct fw_wr_hdr *wr; 1663f7917c00SJeff Kirsher unsigned int ndesc = skb->priority; /* previously saved */ 1664f7917c00SJeff Kirsher 1665a4011fd4SHariprasad Shenai written += ndesc; 1666a4011fd4SHariprasad Shenai /* Write descriptors and free skbs outside the lock to limit 1667f7917c00SJeff Kirsher * wait times. q->full is still set so new skbs will be queued. 1668f7917c00SJeff Kirsher */ 1669a4011fd4SHariprasad Shenai wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1670a4011fd4SHariprasad Shenai txq_advance(&q->q, ndesc); 1671f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1672f7917c00SJeff Kirsher 1673a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, wr); 1674f7917c00SJeff Kirsher kfree_skb(skb); 1675f7917c00SJeff Kirsher 1676f7917c00SJeff Kirsher if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1677f7917c00SJeff Kirsher unsigned long old = q->q.stops; 1678f7917c00SJeff Kirsher 1679f7917c00SJeff Kirsher ctrlq_check_stop(q, wr); 1680f7917c00SJeff Kirsher if (q->q.stops != old) { /* suspended anew */ 1681f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1682f7917c00SJeff Kirsher goto ringdb; 1683f7917c00SJeff Kirsher } 1684f7917c00SJeff Kirsher } 1685f7917c00SJeff Kirsher if (written > 16) { 1686a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 1687f7917c00SJeff Kirsher written = 0; 1688f7917c00SJeff Kirsher } 1689f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1690f7917c00SJeff Kirsher } 1691f7917c00SJeff Kirsher q->full = 0; 1692a6ec572bSAtul Gupta ringdb: 1693a6ec572bSAtul Gupta if (written) 1694a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 1695f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1696f7917c00SJeff Kirsher } 1697f7917c00SJeff Kirsher 1698f7917c00SJeff Kirsher /** 1699f7917c00SJeff Kirsher * t4_mgmt_tx - send a management message 1700f7917c00SJeff Kirsher * @adap: the adapter 1701f7917c00SJeff Kirsher * @skb: the packet containing the management message 1702f7917c00SJeff Kirsher * 1703f7917c00SJeff Kirsher * Send a management message through control queue 0. 1704f7917c00SJeff Kirsher */ 1705f7917c00SJeff Kirsher int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1706f7917c00SJeff Kirsher { 1707f7917c00SJeff Kirsher int ret; 1708f7917c00SJeff Kirsher 1709f7917c00SJeff Kirsher local_bh_disable(); 1710f7917c00SJeff Kirsher ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 1711f7917c00SJeff Kirsher local_bh_enable(); 1712f7917c00SJeff Kirsher return ret; 1713f7917c00SJeff Kirsher } 1714f7917c00SJeff Kirsher 1715f7917c00SJeff Kirsher /** 1716f7917c00SJeff Kirsher * is_ofld_imm - check whether a packet can be sent as immediate data 1717f7917c00SJeff Kirsher * @skb: the packet 1718f7917c00SJeff Kirsher * 1719f7917c00SJeff Kirsher * Returns true if a packet can be sent as an offload WR with immediate 1720f7917c00SJeff Kirsher * data. We currently use the same limit as for Ethernet packets. 1721f7917c00SJeff Kirsher */ 1722f7917c00SJeff Kirsher static inline int is_ofld_imm(const struct sk_buff *skb) 1723f7917c00SJeff Kirsher { 17242f47d580SHarsh Jain struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 17252f47d580SHarsh Jain unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 17262f47d580SHarsh Jain 17272f47d580SHarsh Jain if (opcode == FW_CRYPTO_LOOKASIDE_WR) 17282f47d580SHarsh Jain return skb->len <= SGE_MAX_WR_LEN; 17292f47d580SHarsh Jain else 1730f7917c00SJeff Kirsher return skb->len <= MAX_IMM_TX_PKT_LEN; 1731f7917c00SJeff Kirsher } 1732f7917c00SJeff Kirsher 1733f7917c00SJeff Kirsher /** 1734f7917c00SJeff Kirsher * calc_tx_flits_ofld - calculate # of flits for an offload packet 1735f7917c00SJeff Kirsher * @skb: the packet 1736f7917c00SJeff Kirsher * 1737f7917c00SJeff Kirsher * Returns the number of flits needed for the given offload packet. 1738f7917c00SJeff Kirsher * These packets are already fully constructed and no additional headers 1739f7917c00SJeff Kirsher * will be added. 1740f7917c00SJeff Kirsher */ 1741f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 1742f7917c00SJeff Kirsher { 1743f7917c00SJeff Kirsher unsigned int flits, cnt; 1744f7917c00SJeff Kirsher 1745f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 1746f7917c00SJeff Kirsher return DIV_ROUND_UP(skb->len, 8); 1747f7917c00SJeff Kirsher 1748f7917c00SJeff Kirsher flits = skb_transport_offset(skb) / 8U; /* headers */ 1749f7917c00SJeff Kirsher cnt = skb_shinfo(skb)->nr_frags; 175015dd16c2SLi RongQing if (skb_tail_pointer(skb) != skb_transport_header(skb)) 1751f7917c00SJeff Kirsher cnt++; 1752f7917c00SJeff Kirsher return flits + sgl_len(cnt); 1753f7917c00SJeff Kirsher } 1754f7917c00SJeff Kirsher 1755f7917c00SJeff Kirsher /** 1756f7917c00SJeff Kirsher * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 1757f7917c00SJeff Kirsher * @adap: the adapter 1758f7917c00SJeff Kirsher * @q: the queue to stop 1759f7917c00SJeff Kirsher * 1760f7917c00SJeff Kirsher * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 1761f7917c00SJeff Kirsher * inability to map packets. A periodic timer attempts to restart 1762f7917c00SJeff Kirsher * queues so marked. 1763f7917c00SJeff Kirsher */ 1764ab677ff4SHariprasad Shenai static void txq_stop_maperr(struct sge_uld_txq *q) 1765f7917c00SJeff Kirsher { 1766f7917c00SJeff Kirsher q->mapping_err++; 1767f7917c00SJeff Kirsher q->q.stops++; 1768f7917c00SJeff Kirsher set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 1769f7917c00SJeff Kirsher q->adap->sge.txq_maperr); 1770f7917c00SJeff Kirsher } 1771f7917c00SJeff Kirsher 1772f7917c00SJeff Kirsher /** 1773f7917c00SJeff Kirsher * ofldtxq_stop - stop an offload Tx queue that has become full 1774f7917c00SJeff Kirsher * @q: the queue to stop 1775e383f248SAtul Gupta * @wr: the Work Request causing the queue to become full 1776f7917c00SJeff Kirsher * 1777f7917c00SJeff Kirsher * Stops an offload Tx queue that has become full and modifies the packet 1778f7917c00SJeff Kirsher * being written to request a wakeup. 1779f7917c00SJeff Kirsher */ 1780e383f248SAtul Gupta static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) 1781f7917c00SJeff Kirsher { 1782e2ac9628SHariprasad Shenai wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 1783f7917c00SJeff Kirsher q->q.stops++; 1784f7917c00SJeff Kirsher q->full = 1; 1785f7917c00SJeff Kirsher } 1786f7917c00SJeff Kirsher 1787f7917c00SJeff Kirsher /** 1788126fca64SHariprasad Shenai * service_ofldq - service/restart a suspended offload queue 1789f7917c00SJeff Kirsher * @q: the offload queue 1790f7917c00SJeff Kirsher * 1791126fca64SHariprasad Shenai * Services an offload Tx queue by moving packets from its Pending Send 1792126fca64SHariprasad Shenai * Queue to the Hardware TX ring. The function starts and ends with the 1793126fca64SHariprasad Shenai * Send Queue locked, but drops the lock while putting the skb at the 1794126fca64SHariprasad Shenai * head of the Send Queue onto the Hardware TX Ring. Dropping the lock 1795126fca64SHariprasad Shenai * allows more skbs to be added to the Send Queue by other threads. 1796126fca64SHariprasad Shenai * The packet being processed at the head of the Pending Send Queue is 1797126fca64SHariprasad Shenai * left on the queue in case we experience DMA Mapping errors, etc. 1798126fca64SHariprasad Shenai * and need to give up and restart later. 1799126fca64SHariprasad Shenai * 1800126fca64SHariprasad Shenai * service_ofldq() can be thought of as a task which opportunistically 1801126fca64SHariprasad Shenai * uses other threads execution contexts. We use the Offload Queue 1802126fca64SHariprasad Shenai * boolean "service_ofldq_running" to make sure that only one instance 1803126fca64SHariprasad Shenai * is ever running at a time ... 1804f7917c00SJeff Kirsher */ 1805ab677ff4SHariprasad Shenai static void service_ofldq(struct sge_uld_txq *q) 1806f7917c00SJeff Kirsher { 18078d0557d2SHariprasad Shenai u64 *pos, *before, *end; 1808f7917c00SJeff Kirsher int credits; 1809f7917c00SJeff Kirsher struct sk_buff *skb; 18108d0557d2SHariprasad Shenai struct sge_txq *txq; 18118d0557d2SHariprasad Shenai unsigned int left; 1812f7917c00SJeff Kirsher unsigned int written = 0; 1813f7917c00SJeff Kirsher unsigned int flits, ndesc; 1814f7917c00SJeff Kirsher 1815126fca64SHariprasad Shenai /* If another thread is currently in service_ofldq() processing the 1816126fca64SHariprasad Shenai * Pending Send Queue then there's nothing to do. Otherwise, flag 1817126fca64SHariprasad Shenai * that we're doing the work and continue. Examining/modifying 1818126fca64SHariprasad Shenai * the Offload Queue boolean "service_ofldq_running" must be done 1819126fca64SHariprasad Shenai * while holding the Pending Send Queue Lock. 1820126fca64SHariprasad Shenai */ 1821126fca64SHariprasad Shenai if (q->service_ofldq_running) 1822126fca64SHariprasad Shenai return; 1823126fca64SHariprasad Shenai q->service_ofldq_running = true; 1824126fca64SHariprasad Shenai 1825f7917c00SJeff Kirsher while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 1826126fca64SHariprasad Shenai /* We drop the lock while we're working with the skb at the 1827126fca64SHariprasad Shenai * head of the Pending Send Queue. This allows more skbs to 1828126fca64SHariprasad Shenai * be added to the Pending Send Queue while we're working on 1829126fca64SHariprasad Shenai * this one. We don't need to lock to guard the TX Ring 1830126fca64SHariprasad Shenai * updates because only one thread of execution is ever 1831126fca64SHariprasad Shenai * allowed into service_ofldq() at a time. 1832f7917c00SJeff Kirsher */ 1833f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1834f7917c00SJeff Kirsher 1835a6ec572bSAtul Gupta cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 1836f7917c00SJeff Kirsher 1837f7917c00SJeff Kirsher flits = skb->priority; /* previously saved */ 1838f7917c00SJeff Kirsher ndesc = flits_to_desc(flits); 1839f7917c00SJeff Kirsher credits = txq_avail(&q->q) - ndesc; 1840f7917c00SJeff Kirsher BUG_ON(credits < 0); 1841f7917c00SJeff Kirsher if (unlikely(credits < TXQ_STOP_THRES)) 1842e383f248SAtul Gupta ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); 1843f7917c00SJeff Kirsher 1844f7917c00SJeff Kirsher pos = (u64 *)&q->q.desc[q->q.pidx]; 1845f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 1846a6ec572bSAtul Gupta cxgb4_inline_tx_skb(skb, &q->q, pos); 1847a6ec572bSAtul Gupta else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 1848f7917c00SJeff Kirsher (dma_addr_t *)skb->head)) { 1849f7917c00SJeff Kirsher txq_stop_maperr(q); 1850f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1851f7917c00SJeff Kirsher break; 1852f7917c00SJeff Kirsher } else { 1853f7917c00SJeff Kirsher int last_desc, hdr_len = skb_transport_offset(skb); 1854f7917c00SJeff Kirsher 18558d0557d2SHariprasad Shenai /* The WR headers may not fit within one descriptor. 18568d0557d2SHariprasad Shenai * So we need to deal with wrap-around here. 18578d0557d2SHariprasad Shenai */ 18588d0557d2SHariprasad Shenai before = (u64 *)pos; 18598d0557d2SHariprasad Shenai end = (u64 *)pos + flits; 18608d0557d2SHariprasad Shenai txq = &q->q; 18618d0557d2SHariprasad Shenai pos = (void *)inline_tx_skb_header(skb, &q->q, 18628d0557d2SHariprasad Shenai (void *)pos, 18638d0557d2SHariprasad Shenai hdr_len); 18648d0557d2SHariprasad Shenai if (before > (u64 *)pos) { 18658d0557d2SHariprasad Shenai left = (u8 *)end - (u8 *)txq->stat; 18668d0557d2SHariprasad Shenai end = (void *)txq->desc + left; 18678d0557d2SHariprasad Shenai } 18688d0557d2SHariprasad Shenai 18698d0557d2SHariprasad Shenai /* If current position is already at the end of the 18708d0557d2SHariprasad Shenai * ofld queue, reset the current to point to 18718d0557d2SHariprasad Shenai * start of the queue and update the end ptr as well. 18728d0557d2SHariprasad Shenai */ 18738d0557d2SHariprasad Shenai if (pos == (u64 *)txq->stat) { 18748d0557d2SHariprasad Shenai left = (u8 *)end - (u8 *)txq->stat; 18758d0557d2SHariprasad Shenai end = (void *)txq->desc + left; 18768d0557d2SHariprasad Shenai pos = (void *)txq->desc; 18778d0557d2SHariprasad Shenai } 18788d0557d2SHariprasad Shenai 1879a6ec572bSAtul Gupta cxgb4_write_sgl(skb, &q->q, (void *)pos, 18808d0557d2SHariprasad Shenai end, hdr_len, 1881f7917c00SJeff Kirsher (dma_addr_t *)skb->head); 1882f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE 1883f7917c00SJeff Kirsher skb->dev = q->adap->port[0]; 1884f7917c00SJeff Kirsher skb->destructor = deferred_unmap_destructor; 1885f7917c00SJeff Kirsher #endif 1886f7917c00SJeff Kirsher last_desc = q->q.pidx + ndesc - 1; 1887f7917c00SJeff Kirsher if (last_desc >= q->q.size) 1888f7917c00SJeff Kirsher last_desc -= q->q.size; 1889f7917c00SJeff Kirsher q->q.sdesc[last_desc].skb = skb; 1890f7917c00SJeff Kirsher } 1891f7917c00SJeff Kirsher 1892f7917c00SJeff Kirsher txq_advance(&q->q, ndesc); 1893f7917c00SJeff Kirsher written += ndesc; 1894f7917c00SJeff Kirsher if (unlikely(written > 32)) { 1895a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 1896f7917c00SJeff Kirsher written = 0; 1897f7917c00SJeff Kirsher } 1898f7917c00SJeff Kirsher 1899126fca64SHariprasad Shenai /* Reacquire the Pending Send Queue Lock so we can unlink the 1900126fca64SHariprasad Shenai * skb we've just successfully transferred to the TX Ring and 1901126fca64SHariprasad Shenai * loop for the next skb which may be at the head of the 1902126fca64SHariprasad Shenai * Pending Send Queue. 1903126fca64SHariprasad Shenai */ 1904f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1905f7917c00SJeff Kirsher __skb_unlink(skb, &q->sendq); 1906f7917c00SJeff Kirsher if (is_ofld_imm(skb)) 1907f7917c00SJeff Kirsher kfree_skb(skb); 1908f7917c00SJeff Kirsher } 1909f7917c00SJeff Kirsher if (likely(written)) 1910a6ec572bSAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, written); 1911126fca64SHariprasad Shenai 1912126fca64SHariprasad Shenai /*Indicate that no thread is processing the Pending Send Queue 1913126fca64SHariprasad Shenai * currently. 1914126fca64SHariprasad Shenai */ 1915126fca64SHariprasad Shenai q->service_ofldq_running = false; 1916f7917c00SJeff Kirsher } 1917f7917c00SJeff Kirsher 1918f7917c00SJeff Kirsher /** 1919f7917c00SJeff Kirsher * ofld_xmit - send a packet through an offload queue 1920f7917c00SJeff Kirsher * @q: the Tx offload queue 1921f7917c00SJeff Kirsher * @skb: the packet 1922f7917c00SJeff Kirsher * 1923f7917c00SJeff Kirsher * Send an offload packet through an SGE offload queue. 1924f7917c00SJeff Kirsher */ 1925ab677ff4SHariprasad Shenai static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) 1926f7917c00SJeff Kirsher { 1927f7917c00SJeff Kirsher skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 1928f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1929126fca64SHariprasad Shenai 1930126fca64SHariprasad Shenai /* Queue the new skb onto the Offload Queue's Pending Send Queue. If 1931126fca64SHariprasad Shenai * that results in this new skb being the only one on the queue, start 1932126fca64SHariprasad Shenai * servicing it. If there are other skbs already on the list, then 1933126fca64SHariprasad Shenai * either the queue is currently being processed or it's been stopped 1934126fca64SHariprasad Shenai * for some reason and it'll be restarted at a later time. Restart 1935126fca64SHariprasad Shenai * paths are triggered by events like experiencing a DMA Mapping Error 1936126fca64SHariprasad Shenai * or filling the Hardware TX Ring. 1937126fca64SHariprasad Shenai */ 1938f7917c00SJeff Kirsher __skb_queue_tail(&q->sendq, skb); 1939f7917c00SJeff Kirsher if (q->sendq.qlen == 1) 1940f7917c00SJeff Kirsher service_ofldq(q); 1941126fca64SHariprasad Shenai 1942f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1943f7917c00SJeff Kirsher return NET_XMIT_SUCCESS; 1944f7917c00SJeff Kirsher } 1945f7917c00SJeff Kirsher 1946f7917c00SJeff Kirsher /** 1947f7917c00SJeff Kirsher * restart_ofldq - restart a suspended offload queue 1948f7917c00SJeff Kirsher * @data: the offload queue to restart 1949f7917c00SJeff Kirsher * 1950f7917c00SJeff Kirsher * Resumes transmission on a suspended Tx offload queue. 1951f7917c00SJeff Kirsher */ 1952f7917c00SJeff Kirsher static void restart_ofldq(unsigned long data) 1953f7917c00SJeff Kirsher { 1954ab677ff4SHariprasad Shenai struct sge_uld_txq *q = (struct sge_uld_txq *)data; 1955f7917c00SJeff Kirsher 1956f7917c00SJeff Kirsher spin_lock(&q->sendq.lock); 1957f7917c00SJeff Kirsher q->full = 0; /* the queue actually is completely empty now */ 1958f7917c00SJeff Kirsher service_ofldq(q); 1959f7917c00SJeff Kirsher spin_unlock(&q->sendq.lock); 1960f7917c00SJeff Kirsher } 1961f7917c00SJeff Kirsher 1962f7917c00SJeff Kirsher /** 1963f7917c00SJeff Kirsher * skb_txq - return the Tx queue an offload packet should use 1964f7917c00SJeff Kirsher * @skb: the packet 1965f7917c00SJeff Kirsher * 1966f7917c00SJeff Kirsher * Returns the Tx queue an offload packet should use as indicated by bits 1967f7917c00SJeff Kirsher * 1-15 in the packet's queue_mapping. 1968f7917c00SJeff Kirsher */ 1969f7917c00SJeff Kirsher static inline unsigned int skb_txq(const struct sk_buff *skb) 1970f7917c00SJeff Kirsher { 1971f7917c00SJeff Kirsher return skb->queue_mapping >> 1; 1972f7917c00SJeff Kirsher } 1973f7917c00SJeff Kirsher 1974f7917c00SJeff Kirsher /** 1975f7917c00SJeff Kirsher * is_ctrl_pkt - return whether an offload packet is a control packet 1976f7917c00SJeff Kirsher * @skb: the packet 1977f7917c00SJeff Kirsher * 1978f7917c00SJeff Kirsher * Returns whether an offload packet should use an OFLD or a CTRL 1979f7917c00SJeff Kirsher * Tx queue as indicated by bit 0 in the packet's queue_mapping. 1980f7917c00SJeff Kirsher */ 1981f7917c00SJeff Kirsher static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 1982f7917c00SJeff Kirsher { 1983f7917c00SJeff Kirsher return skb->queue_mapping & 1; 1984f7917c00SJeff Kirsher } 1985f7917c00SJeff Kirsher 1986ab677ff4SHariprasad Shenai static inline int uld_send(struct adapter *adap, struct sk_buff *skb, 1987ab677ff4SHariprasad Shenai unsigned int tx_uld_type) 1988f7917c00SJeff Kirsher { 1989ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 1990ab677ff4SHariprasad Shenai struct sge_uld_txq *txq; 1991f7917c00SJeff Kirsher unsigned int idx = skb_txq(skb); 1992f7917c00SJeff Kirsher 19934fe44dd7SKumar Sanghvi if (unlikely(is_ctrl_pkt(skb))) { 19944fe44dd7SKumar Sanghvi /* Single ctrl queue is a requirement for LE workaround path */ 19954fe44dd7SKumar Sanghvi if (adap->tids.nsftids) 19964fe44dd7SKumar Sanghvi idx = 0; 1997f7917c00SJeff Kirsher return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 19984fe44dd7SKumar Sanghvi } 19990d4b729dSArjun V 20000d4b729dSArjun V txq_info = adap->sge.uld_txq_info[tx_uld_type]; 20010d4b729dSArjun V if (unlikely(!txq_info)) { 20020d4b729dSArjun V WARN_ON(true); 20030d4b729dSArjun V return NET_XMIT_DROP; 20040d4b729dSArjun V } 20050d4b729dSArjun V 20060d4b729dSArjun V txq = &txq_info->uldtxq[idx]; 2007ab677ff4SHariprasad Shenai return ofld_xmit(txq, skb); 2008f7917c00SJeff Kirsher } 2009f7917c00SJeff Kirsher 2010f7917c00SJeff Kirsher /** 2011f7917c00SJeff Kirsher * t4_ofld_send - send an offload packet 2012f7917c00SJeff Kirsher * @adap: the adapter 2013f7917c00SJeff Kirsher * @skb: the packet 2014f7917c00SJeff Kirsher * 2015f7917c00SJeff Kirsher * Sends an offload packet. We use the packet queue_mapping to select the 2016f7917c00SJeff Kirsher * appropriate Tx queue as follows: bit 0 indicates whether the packet 2017f7917c00SJeff Kirsher * should be sent as regular or control, bits 1-15 select the queue. 2018f7917c00SJeff Kirsher */ 2019f7917c00SJeff Kirsher int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 2020f7917c00SJeff Kirsher { 2021f7917c00SJeff Kirsher int ret; 2022f7917c00SJeff Kirsher 2023f7917c00SJeff Kirsher local_bh_disable(); 2024ab677ff4SHariprasad Shenai ret = uld_send(adap, skb, CXGB4_TX_OFLD); 2025f7917c00SJeff Kirsher local_bh_enable(); 2026f7917c00SJeff Kirsher return ret; 2027f7917c00SJeff Kirsher } 2028f7917c00SJeff Kirsher 2029f7917c00SJeff Kirsher /** 2030f7917c00SJeff Kirsher * cxgb4_ofld_send - send an offload packet 2031f7917c00SJeff Kirsher * @dev: the net device 2032f7917c00SJeff Kirsher * @skb: the packet 2033f7917c00SJeff Kirsher * 2034f7917c00SJeff Kirsher * Sends an offload packet. This is an exported version of @t4_ofld_send, 2035f7917c00SJeff Kirsher * intended for ULDs. 2036f7917c00SJeff Kirsher */ 2037f7917c00SJeff Kirsher int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 2038f7917c00SJeff Kirsher { 2039f7917c00SJeff Kirsher return t4_ofld_send(netdev2adap(dev), skb); 2040f7917c00SJeff Kirsher } 2041f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_ofld_send); 2042f7917c00SJeff Kirsher 2043e383f248SAtul Gupta static void *inline_tx_header(const void *src, 2044e383f248SAtul Gupta const struct sge_txq *q, 2045e383f248SAtul Gupta void *pos, int length) 2046e383f248SAtul Gupta { 2047e383f248SAtul Gupta int left = (void *)q->stat - pos; 2048e383f248SAtul Gupta u64 *p; 2049e383f248SAtul Gupta 2050e383f248SAtul Gupta if (likely(length <= left)) { 2051e383f248SAtul Gupta memcpy(pos, src, length); 2052e383f248SAtul Gupta pos += length; 2053e383f248SAtul Gupta } else { 2054e383f248SAtul Gupta memcpy(pos, src, left); 2055e383f248SAtul Gupta memcpy(q->desc, src + left, length - left); 2056e383f248SAtul Gupta pos = (void *)q->desc + (length - left); 2057e383f248SAtul Gupta } 2058e383f248SAtul Gupta /* 0-pad to multiple of 16 */ 2059e383f248SAtul Gupta p = PTR_ALIGN(pos, 8); 2060e383f248SAtul Gupta if ((uintptr_t)p & 8) { 2061e383f248SAtul Gupta *p = 0; 2062e383f248SAtul Gupta return p + 1; 2063e383f248SAtul Gupta } 2064e383f248SAtul Gupta return p; 2065e383f248SAtul Gupta } 2066e383f248SAtul Gupta 2067e383f248SAtul Gupta /** 2068e383f248SAtul Gupta * ofld_xmit_direct - copy a WR into offload queue 2069e383f248SAtul Gupta * @q: the Tx offload queue 2070e383f248SAtul Gupta * @src: location of WR 2071e383f248SAtul Gupta * @len: WR length 2072e383f248SAtul Gupta * 2073e383f248SAtul Gupta * Copy an immediate WR into an uncontended SGE offload queue. 2074e383f248SAtul Gupta */ 2075e383f248SAtul Gupta static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, 2076e383f248SAtul Gupta unsigned int len) 2077e383f248SAtul Gupta { 2078e383f248SAtul Gupta unsigned int ndesc; 2079e383f248SAtul Gupta int credits; 2080e383f248SAtul Gupta u64 *pos; 2081e383f248SAtul Gupta 2082e383f248SAtul Gupta /* Use the lower limit as the cut-off */ 2083e383f248SAtul Gupta if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { 2084e383f248SAtul Gupta WARN_ON(1); 2085e383f248SAtul Gupta return NET_XMIT_DROP; 2086e383f248SAtul Gupta } 2087e383f248SAtul Gupta 2088e383f248SAtul Gupta /* Don't return NET_XMIT_CN here as the current 2089e383f248SAtul Gupta * implementation doesn't queue the request 2090e383f248SAtul Gupta * using an skb when the following conditions not met 2091e383f248SAtul Gupta */ 2092e383f248SAtul Gupta if (!spin_trylock(&q->sendq.lock)) 2093e383f248SAtul Gupta return NET_XMIT_DROP; 2094e383f248SAtul Gupta 2095e383f248SAtul Gupta if (q->full || !skb_queue_empty(&q->sendq) || 2096e383f248SAtul Gupta q->service_ofldq_running) { 2097e383f248SAtul Gupta spin_unlock(&q->sendq.lock); 2098e383f248SAtul Gupta return NET_XMIT_DROP; 2099e383f248SAtul Gupta } 2100e383f248SAtul Gupta ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); 2101e383f248SAtul Gupta credits = txq_avail(&q->q) - ndesc; 2102e383f248SAtul Gupta pos = (u64 *)&q->q.desc[q->q.pidx]; 2103e383f248SAtul Gupta 2104e383f248SAtul Gupta /* ofldtxq_stop modifies WR header in-situ */ 2105e383f248SAtul Gupta inline_tx_header(src, &q->q, pos, len); 2106e383f248SAtul Gupta if (unlikely(credits < TXQ_STOP_THRES)) 2107e383f248SAtul Gupta ofldtxq_stop(q, (struct fw_wr_hdr *)pos); 2108e383f248SAtul Gupta txq_advance(&q->q, ndesc); 2109e383f248SAtul Gupta cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 2110e383f248SAtul Gupta 2111e383f248SAtul Gupta spin_unlock(&q->sendq.lock); 2112e383f248SAtul Gupta return NET_XMIT_SUCCESS; 2113e383f248SAtul Gupta } 2114e383f248SAtul Gupta 2115e383f248SAtul Gupta int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, 2116e383f248SAtul Gupta const void *src, unsigned int len) 2117e383f248SAtul Gupta { 2118e383f248SAtul Gupta struct sge_uld_txq_info *txq_info; 2119e383f248SAtul Gupta struct sge_uld_txq *txq; 2120e383f248SAtul Gupta struct adapter *adap; 2121e383f248SAtul Gupta int ret; 2122e383f248SAtul Gupta 2123e383f248SAtul Gupta adap = netdev2adap(dev); 2124e383f248SAtul Gupta 2125e383f248SAtul Gupta local_bh_disable(); 2126e383f248SAtul Gupta txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 2127e383f248SAtul Gupta if (unlikely(!txq_info)) { 2128e383f248SAtul Gupta WARN_ON(true); 2129e383f248SAtul Gupta local_bh_enable(); 2130e383f248SAtul Gupta return NET_XMIT_DROP; 2131e383f248SAtul Gupta } 2132e383f248SAtul Gupta txq = &txq_info->uldtxq[idx]; 2133e383f248SAtul Gupta 2134e383f248SAtul Gupta ret = ofld_xmit_direct(txq, src, len); 2135e383f248SAtul Gupta local_bh_enable(); 2136e383f248SAtul Gupta return net_xmit_eval(ret); 2137e383f248SAtul Gupta } 2138e383f248SAtul Gupta EXPORT_SYMBOL(cxgb4_immdata_send); 2139e383f248SAtul Gupta 2140ab677ff4SHariprasad Shenai /** 2141ab677ff4SHariprasad Shenai * t4_crypto_send - send crypto packet 2142ab677ff4SHariprasad Shenai * @adap: the adapter 2143ab677ff4SHariprasad Shenai * @skb: the packet 2144ab677ff4SHariprasad Shenai * 2145ab677ff4SHariprasad Shenai * Sends crypto packet. We use the packet queue_mapping to select the 2146ab677ff4SHariprasad Shenai * appropriate Tx queue as follows: bit 0 indicates whether the packet 2147ab677ff4SHariprasad Shenai * should be sent as regular or control, bits 1-15 select the queue. 2148ab677ff4SHariprasad Shenai */ 2149ab677ff4SHariprasad Shenai static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) 2150ab677ff4SHariprasad Shenai { 2151ab677ff4SHariprasad Shenai int ret; 2152ab677ff4SHariprasad Shenai 2153ab677ff4SHariprasad Shenai local_bh_disable(); 2154ab677ff4SHariprasad Shenai ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); 2155ab677ff4SHariprasad Shenai local_bh_enable(); 2156ab677ff4SHariprasad Shenai return ret; 2157ab677ff4SHariprasad Shenai } 2158ab677ff4SHariprasad Shenai 2159ab677ff4SHariprasad Shenai /** 2160ab677ff4SHariprasad Shenai * cxgb4_crypto_send - send crypto packet 2161ab677ff4SHariprasad Shenai * @dev: the net device 2162ab677ff4SHariprasad Shenai * @skb: the packet 2163ab677ff4SHariprasad Shenai * 2164ab677ff4SHariprasad Shenai * Sends crypto packet. This is an exported version of @t4_crypto_send, 2165ab677ff4SHariprasad Shenai * intended for ULDs. 2166ab677ff4SHariprasad Shenai */ 2167ab677ff4SHariprasad Shenai int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) 2168ab677ff4SHariprasad Shenai { 2169ab677ff4SHariprasad Shenai return t4_crypto_send(netdev2adap(dev), skb); 2170ab677ff4SHariprasad Shenai } 2171ab677ff4SHariprasad Shenai EXPORT_SYMBOL(cxgb4_crypto_send); 2172ab677ff4SHariprasad Shenai 2173e91b0f24SIan Campbell static inline void copy_frags(struct sk_buff *skb, 2174f7917c00SJeff Kirsher const struct pkt_gl *gl, unsigned int offset) 2175f7917c00SJeff Kirsher { 2176e91b0f24SIan Campbell int i; 2177f7917c00SJeff Kirsher 2178f7917c00SJeff Kirsher /* usually there's just one frag */ 2179e91b0f24SIan Campbell __skb_fill_page_desc(skb, 0, gl->frags[0].page, 2180e91b0f24SIan Campbell gl->frags[0].offset + offset, 2181e91b0f24SIan Campbell gl->frags[0].size - offset); 2182e91b0f24SIan Campbell skb_shinfo(skb)->nr_frags = gl->nfrags; 2183e91b0f24SIan Campbell for (i = 1; i < gl->nfrags; i++) 2184e91b0f24SIan Campbell __skb_fill_page_desc(skb, i, gl->frags[i].page, 2185e91b0f24SIan Campbell gl->frags[i].offset, 2186e91b0f24SIan Campbell gl->frags[i].size); 2187f7917c00SJeff Kirsher 2188f7917c00SJeff Kirsher /* get a reference to the last page, we don't own it */ 2189e91b0f24SIan Campbell get_page(gl->frags[gl->nfrags - 1].page); 2190f7917c00SJeff Kirsher } 2191f7917c00SJeff Kirsher 2192f7917c00SJeff Kirsher /** 2193f7917c00SJeff Kirsher * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 2194f7917c00SJeff Kirsher * @gl: the gather list 2195f7917c00SJeff Kirsher * @skb_len: size of sk_buff main body if it carries fragments 2196f7917c00SJeff Kirsher * @pull_len: amount of data to move to the sk_buff's main body 2197f7917c00SJeff Kirsher * 2198f7917c00SJeff Kirsher * Builds an sk_buff from the given packet gather list. Returns the 2199f7917c00SJeff Kirsher * sk_buff or %NULL if sk_buff allocation failed. 2200f7917c00SJeff Kirsher */ 2201f7917c00SJeff Kirsher struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 2202f7917c00SJeff Kirsher unsigned int skb_len, unsigned int pull_len) 2203f7917c00SJeff Kirsher { 2204f7917c00SJeff Kirsher struct sk_buff *skb; 2205f7917c00SJeff Kirsher 2206f7917c00SJeff Kirsher /* 2207f7917c00SJeff Kirsher * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 2208f7917c00SJeff Kirsher * size, which is expected since buffers are at least PAGE_SIZEd. 2209f7917c00SJeff Kirsher * In this case packets up to RX_COPY_THRES have only one fragment. 2210f7917c00SJeff Kirsher */ 2211f7917c00SJeff Kirsher if (gl->tot_len <= RX_COPY_THRES) { 2212f7917c00SJeff Kirsher skb = dev_alloc_skb(gl->tot_len); 2213f7917c00SJeff Kirsher if (unlikely(!skb)) 2214f7917c00SJeff Kirsher goto out; 2215f7917c00SJeff Kirsher __skb_put(skb, gl->tot_len); 2216f7917c00SJeff Kirsher skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 2217f7917c00SJeff Kirsher } else { 2218f7917c00SJeff Kirsher skb = dev_alloc_skb(skb_len); 2219f7917c00SJeff Kirsher if (unlikely(!skb)) 2220f7917c00SJeff Kirsher goto out; 2221f7917c00SJeff Kirsher __skb_put(skb, pull_len); 2222f7917c00SJeff Kirsher skb_copy_to_linear_data(skb, gl->va, pull_len); 2223f7917c00SJeff Kirsher 2224e91b0f24SIan Campbell copy_frags(skb, gl, pull_len); 2225f7917c00SJeff Kirsher skb->len = gl->tot_len; 2226f7917c00SJeff Kirsher skb->data_len = skb->len - pull_len; 2227f7917c00SJeff Kirsher skb->truesize += skb->data_len; 2228f7917c00SJeff Kirsher } 2229f7917c00SJeff Kirsher out: return skb; 2230f7917c00SJeff Kirsher } 2231f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 2232f7917c00SJeff Kirsher 2233f7917c00SJeff Kirsher /** 2234f7917c00SJeff Kirsher * t4_pktgl_free - free a packet gather list 2235f7917c00SJeff Kirsher * @gl: the gather list 2236f7917c00SJeff Kirsher * 2237f7917c00SJeff Kirsher * Releases the pages of a packet gather list. We do not own the last 2238f7917c00SJeff Kirsher * page on the list and do not free it. 2239f7917c00SJeff Kirsher */ 2240f7917c00SJeff Kirsher static void t4_pktgl_free(const struct pkt_gl *gl) 2241f7917c00SJeff Kirsher { 2242f7917c00SJeff Kirsher int n; 2243e91b0f24SIan Campbell const struct page_frag *p; 2244f7917c00SJeff Kirsher 2245f7917c00SJeff Kirsher for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 2246f7917c00SJeff Kirsher put_page(p->page); 2247f7917c00SJeff Kirsher } 2248f7917c00SJeff Kirsher 2249f7917c00SJeff Kirsher /* 2250f7917c00SJeff Kirsher * Process an MPS trace packet. Give it an unused protocol number so it won't 2251f7917c00SJeff Kirsher * be delivered to anyone and send it to the stack for capture. 2252f7917c00SJeff Kirsher */ 2253f7917c00SJeff Kirsher static noinline int handle_trace_pkt(struct adapter *adap, 2254f7917c00SJeff Kirsher const struct pkt_gl *gl) 2255f7917c00SJeff Kirsher { 2256f7917c00SJeff Kirsher struct sk_buff *skb; 2257f7917c00SJeff Kirsher 2258f7917c00SJeff Kirsher skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 2259f7917c00SJeff Kirsher if (unlikely(!skb)) { 2260f7917c00SJeff Kirsher t4_pktgl_free(gl); 2261f7917c00SJeff Kirsher return 0; 2262f7917c00SJeff Kirsher } 2263f7917c00SJeff Kirsher 2264d14807ddSHariprasad Shenai if (is_t4(adap->params.chip)) 22650a57a536SSantosh Rastapur __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 22660a57a536SSantosh Rastapur else 22670a57a536SSantosh Rastapur __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 22680a57a536SSantosh Rastapur 2269f7917c00SJeff Kirsher skb_reset_mac_header(skb); 2270f7917c00SJeff Kirsher skb->protocol = htons(0xffff); 2271f7917c00SJeff Kirsher skb->dev = adap->port[0]; 2272f7917c00SJeff Kirsher netif_receive_skb(skb); 2273f7917c00SJeff Kirsher return 0; 2274f7917c00SJeff Kirsher } 2275f7917c00SJeff Kirsher 22765e2a5ebcSHariprasad Shenai /** 22775e2a5ebcSHariprasad Shenai * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp 22785e2a5ebcSHariprasad Shenai * @adap: the adapter 22795e2a5ebcSHariprasad Shenai * @hwtstamps: time stamp structure to update 22805e2a5ebcSHariprasad Shenai * @sgetstamp: 60bit iqe timestamp 22815e2a5ebcSHariprasad Shenai * 22825e2a5ebcSHariprasad Shenai * Every ingress queue entry has the 60-bit timestamp, convert that timestamp 22835e2a5ebcSHariprasad Shenai * which is in Core Clock ticks into ktime_t and assign it 22845e2a5ebcSHariprasad Shenai **/ 22855e2a5ebcSHariprasad Shenai static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, 22865e2a5ebcSHariprasad Shenai struct skb_shared_hwtstamps *hwtstamps, 22875e2a5ebcSHariprasad Shenai u64 sgetstamp) 22885e2a5ebcSHariprasad Shenai { 22895e2a5ebcSHariprasad Shenai u64 ns; 22905e2a5ebcSHariprasad Shenai u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); 22915e2a5ebcSHariprasad Shenai 22925e2a5ebcSHariprasad Shenai ns = div_u64(tmp, adap->params.vpd.cclk); 22935e2a5ebcSHariprasad Shenai 22945e2a5ebcSHariprasad Shenai memset(hwtstamps, 0, sizeof(*hwtstamps)); 22955e2a5ebcSHariprasad Shenai hwtstamps->hwtstamp = ns_to_ktime(ns); 22965e2a5ebcSHariprasad Shenai } 22975e2a5ebcSHariprasad Shenai 2298f7917c00SJeff Kirsher static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 2299c50ae55eSGanesh Goudar const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) 2300f7917c00SJeff Kirsher { 230152367a76SVipul Pandya struct adapter *adapter = rxq->rspq.adap; 230252367a76SVipul Pandya struct sge *s = &adapter->sge; 23035e2a5ebcSHariprasad Shenai struct port_info *pi; 2304f7917c00SJeff Kirsher int ret; 2305f7917c00SJeff Kirsher struct sk_buff *skb; 2306f7917c00SJeff Kirsher 2307f7917c00SJeff Kirsher skb = napi_get_frags(&rxq->rspq.napi); 2308f7917c00SJeff Kirsher if (unlikely(!skb)) { 2309f7917c00SJeff Kirsher t4_pktgl_free(gl); 2310f7917c00SJeff Kirsher rxq->stats.rx_drops++; 2311f7917c00SJeff Kirsher return; 2312f7917c00SJeff Kirsher } 2313f7917c00SJeff Kirsher 231452367a76SVipul Pandya copy_frags(skb, gl, s->pktshift); 2315c50ae55eSGanesh Goudar if (tnl_hdr_len) 2316c50ae55eSGanesh Goudar skb->csum_level = 1; 231752367a76SVipul Pandya skb->len = gl->tot_len - s->pktshift; 2318f7917c00SJeff Kirsher skb->data_len = skb->len; 2319f7917c00SJeff Kirsher skb->truesize += skb->data_len; 2320f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 2321f7917c00SJeff Kirsher skb_record_rx_queue(skb, rxq->rspq.idx); 23225e2a5ebcSHariprasad Shenai pi = netdev_priv(skb->dev); 23235e2a5ebcSHariprasad Shenai if (pi->rxtstamp) 23245e2a5ebcSHariprasad Shenai cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), 23255e2a5ebcSHariprasad Shenai gl->sgetstamp); 2326f7917c00SJeff Kirsher if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 23278264989cSTom Herbert skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 23288264989cSTom Herbert PKT_HASH_TYPE_L3); 2329f7917c00SJeff Kirsher 2330f7917c00SJeff Kirsher if (unlikely(pkt->vlan_ex)) { 233186a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 2332f7917c00SJeff Kirsher rxq->stats.vlan_ex++; 2333f7917c00SJeff Kirsher } 2334f7917c00SJeff Kirsher ret = napi_gro_frags(&rxq->rspq.napi); 2335f7917c00SJeff Kirsher if (ret == GRO_HELD) 2336f7917c00SJeff Kirsher rxq->stats.lro_pkts++; 2337f7917c00SJeff Kirsher else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 2338f7917c00SJeff Kirsher rxq->stats.lro_merged++; 2339f7917c00SJeff Kirsher rxq->stats.pkts++; 2340f7917c00SJeff Kirsher rxq->stats.rx_cso++; 2341f7917c00SJeff Kirsher } 2342f7917c00SJeff Kirsher 2343a4569504SAtul Gupta enum { 2344a4569504SAtul Gupta RX_NON_PTP_PKT = 0, 2345a4569504SAtul Gupta RX_PTP_PKT_SUC = 1, 2346a4569504SAtul Gupta RX_PTP_PKT_ERR = 2 2347a4569504SAtul Gupta }; 2348a4569504SAtul Gupta 2349a4569504SAtul Gupta /** 2350a4569504SAtul Gupta * t4_systim_to_hwstamp - read hardware time stamp 2351a4569504SAtul Gupta * @adap: the adapter 2352a4569504SAtul Gupta * @skb: the packet 2353a4569504SAtul Gupta * 2354a4569504SAtul Gupta * Read Time Stamp from MPS packet and insert in skb which 2355a4569504SAtul Gupta * is forwarded to PTP application 2356a4569504SAtul Gupta */ 2357a4569504SAtul Gupta static noinline int t4_systim_to_hwstamp(struct adapter *adapter, 2358a4569504SAtul Gupta struct sk_buff *skb) 2359a4569504SAtul Gupta { 2360a4569504SAtul Gupta struct skb_shared_hwtstamps *hwtstamps; 2361a4569504SAtul Gupta struct cpl_rx_mps_pkt *cpl = NULL; 2362a4569504SAtul Gupta unsigned char *data; 2363a4569504SAtul Gupta int offset; 2364a4569504SAtul Gupta 2365a4569504SAtul Gupta cpl = (struct cpl_rx_mps_pkt *)skb->data; 2366a4569504SAtul Gupta if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & 2367a4569504SAtul Gupta X_CPL_RX_MPS_PKT_TYPE_PTP)) 2368a4569504SAtul Gupta return RX_PTP_PKT_ERR; 2369a4569504SAtul Gupta 2370a4569504SAtul Gupta data = skb->data + sizeof(*cpl); 2371a4569504SAtul Gupta skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); 2372a4569504SAtul Gupta offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; 2373a4569504SAtul Gupta if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) 2374a4569504SAtul Gupta return RX_PTP_PKT_ERR; 2375a4569504SAtul Gupta 2376a4569504SAtul Gupta hwtstamps = skb_hwtstamps(skb); 2377a4569504SAtul Gupta memset(hwtstamps, 0, sizeof(*hwtstamps)); 2378a4569504SAtul Gupta hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); 2379a4569504SAtul Gupta 2380a4569504SAtul Gupta return RX_PTP_PKT_SUC; 2381a4569504SAtul Gupta } 2382a4569504SAtul Gupta 2383a4569504SAtul Gupta /** 2384a4569504SAtul Gupta * t4_rx_hststamp - Recv PTP Event Message 2385a4569504SAtul Gupta * @adap: the adapter 2386a4569504SAtul Gupta * @rsp: the response queue descriptor holding the RX_PKT message 2387a4569504SAtul Gupta * @skb: the packet 2388a4569504SAtul Gupta * 2389a4569504SAtul Gupta * PTP enabled and MPS packet, read HW timestamp 2390a4569504SAtul Gupta */ 2391a4569504SAtul Gupta static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, 2392a4569504SAtul Gupta struct sge_eth_rxq *rxq, struct sk_buff *skb) 2393a4569504SAtul Gupta { 2394a4569504SAtul Gupta int ret; 2395a4569504SAtul Gupta 2396a4569504SAtul Gupta if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && 2397a4569504SAtul Gupta !is_t4(adapter->params.chip))) { 2398a4569504SAtul Gupta ret = t4_systim_to_hwstamp(adapter, skb); 2399a4569504SAtul Gupta if (ret == RX_PTP_PKT_ERR) { 2400a4569504SAtul Gupta kfree_skb(skb); 2401a4569504SAtul Gupta rxq->stats.rx_drops++; 2402a4569504SAtul Gupta } 2403a4569504SAtul Gupta return ret; 2404a4569504SAtul Gupta } 2405a4569504SAtul Gupta return RX_NON_PTP_PKT; 2406a4569504SAtul Gupta } 2407a4569504SAtul Gupta 2408a4569504SAtul Gupta /** 2409a4569504SAtul Gupta * t4_tx_hststamp - Loopback PTP Transmit Event Message 2410a4569504SAtul Gupta * @adap: the adapter 2411a4569504SAtul Gupta * @skb: the packet 2412a4569504SAtul Gupta * @dev: the ingress net device 2413a4569504SAtul Gupta * 2414a4569504SAtul Gupta * Read hardware timestamp for the loopback PTP Tx event message 2415a4569504SAtul Gupta */ 2416a4569504SAtul Gupta static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, 2417a4569504SAtul Gupta struct net_device *dev) 2418a4569504SAtul Gupta { 2419a4569504SAtul Gupta struct port_info *pi = netdev_priv(dev); 2420a4569504SAtul Gupta 2421a4569504SAtul Gupta if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { 2422a4569504SAtul Gupta cxgb4_ptp_read_hwstamp(adapter, pi); 2423a4569504SAtul Gupta kfree_skb(skb); 2424a4569504SAtul Gupta return 0; 2425a4569504SAtul Gupta } 2426a4569504SAtul Gupta return 1; 2427a4569504SAtul Gupta } 2428a4569504SAtul Gupta 2429f7917c00SJeff Kirsher /** 2430f7917c00SJeff Kirsher * t4_ethrx_handler - process an ingress ethernet packet 2431f7917c00SJeff Kirsher * @q: the response queue that received the packet 2432f7917c00SJeff Kirsher * @rsp: the response queue descriptor holding the RX_PKT message 2433f7917c00SJeff Kirsher * @si: the gather list of packet fragments 2434f7917c00SJeff Kirsher * 2435f7917c00SJeff Kirsher * Process an ingress ethernet packet and deliver it to the stack. 2436f7917c00SJeff Kirsher */ 2437f7917c00SJeff Kirsher int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 2438f7917c00SJeff Kirsher const struct pkt_gl *si) 2439f7917c00SJeff Kirsher { 2440f7917c00SJeff Kirsher bool csum_ok; 2441f7917c00SJeff Kirsher struct sk_buff *skb; 2442f7917c00SJeff Kirsher const struct cpl_rx_pkt *pkt; 2443f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 2444a4569504SAtul Gupta struct adapter *adapter = q->adap; 244552367a76SVipul Pandya struct sge *s = &q->adap->sge; 2446d14807ddSHariprasad Shenai int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 24470a57a536SSantosh Rastapur CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 2448c50ae55eSGanesh Goudar u16 err_vec, tnl_hdr_len = 0; 244984a200b3SVarun Prakash struct port_info *pi; 2450a4569504SAtul Gupta int ret = 0; 2451f7917c00SJeff Kirsher 24520a57a536SSantosh Rastapur if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 2453f7917c00SJeff Kirsher return handle_trace_pkt(q->adap, si); 2454f7917c00SJeff Kirsher 2455f7917c00SJeff Kirsher pkt = (const struct cpl_rx_pkt *)rsp; 24568eb9f2f9SArjun V /* Compressed error vector is enabled for T6 only */ 2457c50ae55eSGanesh Goudar if (q->adap->params.tp.rx_pkt_encap) { 24588eb9f2f9SArjun V err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); 2459c50ae55eSGanesh Goudar tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); 2460c50ae55eSGanesh Goudar } else { 24618eb9f2f9SArjun V err_vec = be16_to_cpu(pkt->err_vec); 2462c50ae55eSGanesh Goudar } 24638eb9f2f9SArjun V 24648eb9f2f9SArjun V csum_ok = pkt->csum_calc && !err_vec && 2465cca2822dSHariprasad Shenai (q->netdev->features & NETIF_F_RXCSUM); 2466c50ae55eSGanesh Goudar if (((pkt->l2info & htonl(RXF_TCP_F)) || 2467c50ae55eSGanesh Goudar tnl_hdr_len) && 2468f7917c00SJeff Kirsher (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 2469c50ae55eSGanesh Goudar do_gro(rxq, si, pkt, tnl_hdr_len); 2470f7917c00SJeff Kirsher return 0; 2471f7917c00SJeff Kirsher } 2472f7917c00SJeff Kirsher 2473f7917c00SJeff Kirsher skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 2474f7917c00SJeff Kirsher if (unlikely(!skb)) { 2475f7917c00SJeff Kirsher t4_pktgl_free(si); 2476f7917c00SJeff Kirsher rxq->stats.rx_drops++; 2477f7917c00SJeff Kirsher return 0; 2478f7917c00SJeff Kirsher } 2479a4569504SAtul Gupta pi = netdev_priv(q->netdev); 2480f7917c00SJeff Kirsher 2481a4569504SAtul Gupta /* Handle PTP Event Rx packet */ 2482a4569504SAtul Gupta if (unlikely(pi->ptp_enable)) { 2483a4569504SAtul Gupta ret = t4_rx_hststamp(adapter, rsp, rxq, skb); 2484a4569504SAtul Gupta if (ret == RX_PTP_PKT_ERR) 2485a4569504SAtul Gupta return 0; 2486a4569504SAtul Gupta } 2487a4569504SAtul Gupta if (likely(!ret)) 2488a4569504SAtul Gupta __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ 2489a4569504SAtul Gupta 2490a4569504SAtul Gupta /* Handle the PTP Event Tx Loopback packet */ 2491a4569504SAtul Gupta if (unlikely(pi->ptp_enable && !ret && 2492a4569504SAtul Gupta (pkt->l2info & htonl(RXF_UDP_F)) && 2493a4569504SAtul Gupta cxgb4_ptp_is_ptp_rx(skb))) { 2494a4569504SAtul Gupta if (!t4_tx_hststamp(adapter, skb, q->netdev)) 2495a4569504SAtul Gupta return 0; 2496a4569504SAtul Gupta } 2497a4569504SAtul Gupta 2498f7917c00SJeff Kirsher skb->protocol = eth_type_trans(skb, q->netdev); 2499f7917c00SJeff Kirsher skb_record_rx_queue(skb, q->idx); 2500f7917c00SJeff Kirsher if (skb->dev->features & NETIF_F_RXHASH) 25018264989cSTom Herbert skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 25028264989cSTom Herbert PKT_HASH_TYPE_L3); 2503f7917c00SJeff Kirsher 2504f7917c00SJeff Kirsher rxq->stats.pkts++; 2505f7917c00SJeff Kirsher 25065e2a5ebcSHariprasad Shenai if (pi->rxtstamp) 25075e2a5ebcSHariprasad Shenai cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), 25085e2a5ebcSHariprasad Shenai si->sgetstamp); 2509bdc590b9SHariprasad Shenai if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 2510f7917c00SJeff Kirsher if (!pkt->ip_frag) { 2511f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 2512f7917c00SJeff Kirsher rxq->stats.rx_cso++; 2513bdc590b9SHariprasad Shenai } else if (pkt->l2info & htonl(RXF_IP_F)) { 2514f7917c00SJeff Kirsher __sum16 c = (__force __sum16)pkt->csum; 2515f7917c00SJeff Kirsher skb->csum = csum_unfold(c); 2516c50ae55eSGanesh Goudar 2517c50ae55eSGanesh Goudar if (tnl_hdr_len) { 2518c50ae55eSGanesh Goudar skb->ip_summed = CHECKSUM_UNNECESSARY; 2519c50ae55eSGanesh Goudar skb->csum_level = 1; 2520c50ae55eSGanesh Goudar } else { 2521f7917c00SJeff Kirsher skb->ip_summed = CHECKSUM_COMPLETE; 2522c50ae55eSGanesh Goudar } 2523f7917c00SJeff Kirsher rxq->stats.rx_cso++; 2524f7917c00SJeff Kirsher } 252584a200b3SVarun Prakash } else { 2526f7917c00SJeff Kirsher skb_checksum_none_assert(skb); 252784a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE 252884a200b3SVarun Prakash #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ 252984a200b3SVarun Prakash RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) 253084a200b3SVarun Prakash 253184a200b3SVarun Prakash if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { 253284a200b3SVarun Prakash if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && 253384a200b3SVarun Prakash (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { 25348eb9f2f9SArjun V if (q->adap->params.tp.rx_pkt_encap) 25358eb9f2f9SArjun V csum_ok = err_vec & 25368eb9f2f9SArjun V T6_COMPR_RXERR_SUM_F; 25378eb9f2f9SArjun V else 25388eb9f2f9SArjun V csum_ok = err_vec & RXERR_CSUM_F; 25398eb9f2f9SArjun V if (!csum_ok) 254084a200b3SVarun Prakash skb->ip_summed = CHECKSUM_UNNECESSARY; 254184a200b3SVarun Prakash } 254284a200b3SVarun Prakash } 254384a200b3SVarun Prakash 254484a200b3SVarun Prakash #undef CPL_RX_PKT_FLAGS 254584a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */ 254684a200b3SVarun Prakash } 2547f7917c00SJeff Kirsher 2548f7917c00SJeff Kirsher if (unlikely(pkt->vlan_ex)) { 254986a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 2550f7917c00SJeff Kirsher rxq->stats.vlan_ex++; 2551f7917c00SJeff Kirsher } 25523a336cb1SHariprasad Shenai skb_mark_napi_id(skb, &q->napi); 2553f7917c00SJeff Kirsher netif_receive_skb(skb); 2554f7917c00SJeff Kirsher return 0; 2555f7917c00SJeff Kirsher } 2556f7917c00SJeff Kirsher 2557f7917c00SJeff Kirsher /** 2558f7917c00SJeff Kirsher * restore_rx_bufs - put back a packet's Rx buffers 2559f7917c00SJeff Kirsher * @si: the packet gather list 2560f7917c00SJeff Kirsher * @q: the SGE free list 2561f7917c00SJeff Kirsher * @frags: number of FL buffers to restore 2562f7917c00SJeff Kirsher * 2563f7917c00SJeff Kirsher * Puts back on an FL the Rx buffers associated with @si. The buffers 2564f7917c00SJeff Kirsher * have already been unmapped and are left unmapped, we mark them so to 2565f7917c00SJeff Kirsher * prevent further unmapping attempts. 2566f7917c00SJeff Kirsher * 2567f7917c00SJeff Kirsher * This function undoes a series of @unmap_rx_buf calls when we find out 2568f7917c00SJeff Kirsher * that the current packet can't be processed right away afterall and we 2569f7917c00SJeff Kirsher * need to come back to it later. This is a very rare event and there's 2570f7917c00SJeff Kirsher * no effort to make this particularly efficient. 2571f7917c00SJeff Kirsher */ 2572f7917c00SJeff Kirsher static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 2573f7917c00SJeff Kirsher int frags) 2574f7917c00SJeff Kirsher { 2575f7917c00SJeff Kirsher struct rx_sw_desc *d; 2576f7917c00SJeff Kirsher 2577f7917c00SJeff Kirsher while (frags--) { 2578f7917c00SJeff Kirsher if (q->cidx == 0) 2579f7917c00SJeff Kirsher q->cidx = q->size - 1; 2580f7917c00SJeff Kirsher else 2581f7917c00SJeff Kirsher q->cidx--; 2582f7917c00SJeff Kirsher d = &q->sdesc[q->cidx]; 2583f7917c00SJeff Kirsher d->page = si->frags[frags].page; 2584f7917c00SJeff Kirsher d->dma_addr |= RX_UNMAPPED_BUF; 2585f7917c00SJeff Kirsher q->avail++; 2586f7917c00SJeff Kirsher } 2587f7917c00SJeff Kirsher } 2588f7917c00SJeff Kirsher 2589f7917c00SJeff Kirsher /** 2590f7917c00SJeff Kirsher * is_new_response - check if a response is newly written 2591f7917c00SJeff Kirsher * @r: the response descriptor 2592f7917c00SJeff Kirsher * @q: the response queue 2593f7917c00SJeff Kirsher * 2594f7917c00SJeff Kirsher * Returns true if a response descriptor contains a yet unprocessed 2595f7917c00SJeff Kirsher * response. 2596f7917c00SJeff Kirsher */ 2597f7917c00SJeff Kirsher static inline bool is_new_response(const struct rsp_ctrl *r, 2598f7917c00SJeff Kirsher const struct sge_rspq *q) 2599f7917c00SJeff Kirsher { 26001ecc7b7aSHariprasad Shenai return (r->type_gen >> RSPD_GEN_S) == q->gen; 2601f7917c00SJeff Kirsher } 2602f7917c00SJeff Kirsher 2603f7917c00SJeff Kirsher /** 2604f7917c00SJeff Kirsher * rspq_next - advance to the next entry in a response queue 2605f7917c00SJeff Kirsher * @q: the queue 2606f7917c00SJeff Kirsher * 2607f7917c00SJeff Kirsher * Updates the state of a response queue to advance it to the next entry. 2608f7917c00SJeff Kirsher */ 2609f7917c00SJeff Kirsher static inline void rspq_next(struct sge_rspq *q) 2610f7917c00SJeff Kirsher { 2611f7917c00SJeff Kirsher q->cur_desc = (void *)q->cur_desc + q->iqe_len; 2612f7917c00SJeff Kirsher if (unlikely(++q->cidx == q->size)) { 2613f7917c00SJeff Kirsher q->cidx = 0; 2614f7917c00SJeff Kirsher q->gen ^= 1; 2615f7917c00SJeff Kirsher q->cur_desc = q->desc; 2616f7917c00SJeff Kirsher } 2617f7917c00SJeff Kirsher } 2618f7917c00SJeff Kirsher 2619f7917c00SJeff Kirsher /** 2620f7917c00SJeff Kirsher * process_responses - process responses from an SGE response queue 2621f7917c00SJeff Kirsher * @q: the ingress queue to process 2622f7917c00SJeff Kirsher * @budget: how many responses can be processed in this round 2623f7917c00SJeff Kirsher * 2624f7917c00SJeff Kirsher * Process responses from an SGE response queue up to the supplied budget. 2625f7917c00SJeff Kirsher * Responses include received packets as well as control messages from FW 2626f7917c00SJeff Kirsher * or HW. 2627f7917c00SJeff Kirsher * 2628f7917c00SJeff Kirsher * Additionally choose the interrupt holdoff time for the next interrupt 2629f7917c00SJeff Kirsher * on this queue. If the system is under memory shortage use a fairly 2630f7917c00SJeff Kirsher * long delay to help recovery. 2631f7917c00SJeff Kirsher */ 2632f7917c00SJeff Kirsher static int process_responses(struct sge_rspq *q, int budget) 2633f7917c00SJeff Kirsher { 2634f7917c00SJeff Kirsher int ret, rsp_type; 2635f7917c00SJeff Kirsher int budget_left = budget; 2636f7917c00SJeff Kirsher const struct rsp_ctrl *rc; 2637f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 263852367a76SVipul Pandya struct adapter *adapter = q->adap; 263952367a76SVipul Pandya struct sge *s = &adapter->sge; 2640f7917c00SJeff Kirsher 2641f7917c00SJeff Kirsher while (likely(budget_left)) { 2642f7917c00SJeff Kirsher rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 26432337ba42SVarun Prakash if (!is_new_response(rc, q)) { 26442337ba42SVarun Prakash if (q->flush_handler) 26452337ba42SVarun Prakash q->flush_handler(q); 2646f7917c00SJeff Kirsher break; 26472337ba42SVarun Prakash } 2648f7917c00SJeff Kirsher 2649019be1cfSAlexander Duyck dma_rmb(); 26501ecc7b7aSHariprasad Shenai rsp_type = RSPD_TYPE_G(rc->type_gen); 26511ecc7b7aSHariprasad Shenai if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { 2652e91b0f24SIan Campbell struct page_frag *fp; 2653f7917c00SJeff Kirsher struct pkt_gl si; 2654f7917c00SJeff Kirsher const struct rx_sw_desc *rsd; 2655f7917c00SJeff Kirsher u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 2656f7917c00SJeff Kirsher 26571ecc7b7aSHariprasad Shenai if (len & RSPD_NEWBUF_F) { 2658f7917c00SJeff Kirsher if (likely(q->offset > 0)) { 2659f7917c00SJeff Kirsher free_rx_bufs(q->adap, &rxq->fl, 1); 2660f7917c00SJeff Kirsher q->offset = 0; 2661f7917c00SJeff Kirsher } 26621ecc7b7aSHariprasad Shenai len = RSPD_LEN_G(len); 2663f7917c00SJeff Kirsher } 2664f7917c00SJeff Kirsher si.tot_len = len; 2665f7917c00SJeff Kirsher 2666f7917c00SJeff Kirsher /* gather packet fragments */ 2667f7917c00SJeff Kirsher for (frags = 0, fp = si.frags; ; frags++, fp++) { 2668f7917c00SJeff Kirsher rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 266952367a76SVipul Pandya bufsz = get_buf_size(adapter, rsd); 2670f7917c00SJeff Kirsher fp->page = rsd->page; 2671e91b0f24SIan Campbell fp->offset = q->offset; 2672e91b0f24SIan Campbell fp->size = min(bufsz, len); 2673e91b0f24SIan Campbell len -= fp->size; 2674f7917c00SJeff Kirsher if (!len) 2675f7917c00SJeff Kirsher break; 2676f7917c00SJeff Kirsher unmap_rx_buf(q->adap, &rxq->fl); 2677f7917c00SJeff Kirsher } 2678f7917c00SJeff Kirsher 26795e2a5ebcSHariprasad Shenai si.sgetstamp = SGE_TIMESTAMP_G( 26805e2a5ebcSHariprasad Shenai be64_to_cpu(rc->last_flit)); 2681f7917c00SJeff Kirsher /* 2682f7917c00SJeff Kirsher * Last buffer remains mapped so explicitly make it 2683f7917c00SJeff Kirsher * coherent for CPU access. 2684f7917c00SJeff Kirsher */ 2685f7917c00SJeff Kirsher dma_sync_single_for_cpu(q->adap->pdev_dev, 2686f7917c00SJeff Kirsher get_buf_addr(rsd), 2687e91b0f24SIan Campbell fp->size, DMA_FROM_DEVICE); 2688f7917c00SJeff Kirsher 2689f7917c00SJeff Kirsher si.va = page_address(si.frags[0].page) + 2690e91b0f24SIan Campbell si.frags[0].offset; 2691f7917c00SJeff Kirsher prefetch(si.va); 2692f7917c00SJeff Kirsher 2693f7917c00SJeff Kirsher si.nfrags = frags + 1; 2694f7917c00SJeff Kirsher ret = q->handler(q, q->cur_desc, &si); 2695f7917c00SJeff Kirsher if (likely(ret == 0)) 269652367a76SVipul Pandya q->offset += ALIGN(fp->size, s->fl_align); 2697f7917c00SJeff Kirsher else 2698f7917c00SJeff Kirsher restore_rx_bufs(&si, &rxq->fl, frags); 26991ecc7b7aSHariprasad Shenai } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { 2700f7917c00SJeff Kirsher ret = q->handler(q, q->cur_desc, NULL); 2701f7917c00SJeff Kirsher } else { 2702f7917c00SJeff Kirsher ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 2703f7917c00SJeff Kirsher } 2704f7917c00SJeff Kirsher 2705f7917c00SJeff Kirsher if (unlikely(ret)) { 2706f7917c00SJeff Kirsher /* couldn't process descriptor, back off for recovery */ 27071ecc7b7aSHariprasad Shenai q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); 2708f7917c00SJeff Kirsher break; 2709f7917c00SJeff Kirsher } 2710f7917c00SJeff Kirsher 2711f7917c00SJeff Kirsher rspq_next(q); 2712f7917c00SJeff Kirsher budget_left--; 2713f7917c00SJeff Kirsher } 2714f7917c00SJeff Kirsher 2715da08e425SHariprasad Shenai if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) 2716f7917c00SJeff Kirsher __refill_fl(q->adap, &rxq->fl); 2717f7917c00SJeff Kirsher return budget - budget_left; 2718f7917c00SJeff Kirsher } 2719f7917c00SJeff Kirsher 2720f7917c00SJeff Kirsher /** 2721f7917c00SJeff Kirsher * napi_rx_handler - the NAPI handler for Rx processing 2722f7917c00SJeff Kirsher * @napi: the napi instance 2723f7917c00SJeff Kirsher * @budget: how many packets we can process in this round 2724f7917c00SJeff Kirsher * 2725f7917c00SJeff Kirsher * Handler for new data events when using NAPI. This does not need any 2726f7917c00SJeff Kirsher * locking or protection from interrupts as data interrupts are off at 2727f7917c00SJeff Kirsher * this point and other adapter interrupts do not interfere (the latter 2728f7917c00SJeff Kirsher * in not a concern at all with MSI-X as non-data interrupts then have 2729f7917c00SJeff Kirsher * a separate handler). 2730f7917c00SJeff Kirsher */ 2731f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget) 2732f7917c00SJeff Kirsher { 2733f7917c00SJeff Kirsher unsigned int params; 2734f7917c00SJeff Kirsher struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 27353a336cb1SHariprasad Shenai int work_done; 2736d63a6dcfSHariprasad Shenai u32 val; 2737f7917c00SJeff Kirsher 27383a336cb1SHariprasad Shenai work_done = process_responses(q, budget); 2739f7917c00SJeff Kirsher if (likely(work_done < budget)) { 2740e553ec3fSHariprasad Shenai int timer_index; 2741e553ec3fSHariprasad Shenai 2742812787b8SHariprasad Shenai napi_complete_done(napi, work_done); 27431ecc7b7aSHariprasad Shenai timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); 2744e553ec3fSHariprasad Shenai 2745e553ec3fSHariprasad Shenai if (q->adaptive_rx) { 2746e553ec3fSHariprasad Shenai if (work_done > max(timer_pkt_quota[timer_index], 2747e553ec3fSHariprasad Shenai MIN_NAPI_WORK)) 2748e553ec3fSHariprasad Shenai timer_index = (timer_index + 1); 2749e553ec3fSHariprasad Shenai else 2750e553ec3fSHariprasad Shenai timer_index = timer_index - 1; 2751e553ec3fSHariprasad Shenai 2752e553ec3fSHariprasad Shenai timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 27531ecc7b7aSHariprasad Shenai q->next_intr_params = 27541ecc7b7aSHariprasad Shenai QINTR_TIMER_IDX_V(timer_index) | 27551ecc7b7aSHariprasad Shenai QINTR_CNT_EN_V(0); 2756e553ec3fSHariprasad Shenai params = q->next_intr_params; 2757e553ec3fSHariprasad Shenai } else { 2758f7917c00SJeff Kirsher params = q->next_intr_params; 2759f7917c00SJeff Kirsher q->next_intr_params = q->intr_params; 2760e553ec3fSHariprasad Shenai } 2761f7917c00SJeff Kirsher } else 27621ecc7b7aSHariprasad Shenai params = QINTR_TIMER_IDX_V(7); 2763f7917c00SJeff Kirsher 2764f612b815SHariprasad Shenai val = CIDXINC_V(work_done) | SEINTARM_V(params); 2765df64e4d3SHariprasad Shenai 2766df64e4d3SHariprasad Shenai /* If we don't have access to the new User GTS (T5+), use the old 2767df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 2768df64e4d3SHariprasad Shenai */ 2769df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 2770f612b815SHariprasad Shenai t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), 2771f612b815SHariprasad Shenai val | INGRESSQID_V((u32)q->cntxt_id)); 2772d63a6dcfSHariprasad Shenai } else { 2773f612b815SHariprasad Shenai writel(val | INGRESSQID_V(q->bar2_qid), 2774df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_GTS); 2775d63a6dcfSHariprasad Shenai wmb(); 2776d63a6dcfSHariprasad Shenai } 2777f7917c00SJeff Kirsher return work_done; 2778f7917c00SJeff Kirsher } 2779f7917c00SJeff Kirsher 2780f7917c00SJeff Kirsher /* 2781f7917c00SJeff Kirsher * The MSI-X interrupt handler for an SGE response queue. 2782f7917c00SJeff Kirsher */ 2783f7917c00SJeff Kirsher irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 2784f7917c00SJeff Kirsher { 2785f7917c00SJeff Kirsher struct sge_rspq *q = cookie; 2786f7917c00SJeff Kirsher 2787f7917c00SJeff Kirsher napi_schedule(&q->napi); 2788f7917c00SJeff Kirsher return IRQ_HANDLED; 2789f7917c00SJeff Kirsher } 2790f7917c00SJeff Kirsher 2791f7917c00SJeff Kirsher /* 2792f7917c00SJeff Kirsher * Process the indirect interrupt entries in the interrupt queue and kick off 2793f7917c00SJeff Kirsher * NAPI for each queue that has generated an entry. 2794f7917c00SJeff Kirsher */ 2795f7917c00SJeff Kirsher static unsigned int process_intrq(struct adapter *adap) 2796f7917c00SJeff Kirsher { 2797f7917c00SJeff Kirsher unsigned int credits; 2798f7917c00SJeff Kirsher const struct rsp_ctrl *rc; 2799f7917c00SJeff Kirsher struct sge_rspq *q = &adap->sge.intrq; 2800d63a6dcfSHariprasad Shenai u32 val; 2801f7917c00SJeff Kirsher 2802f7917c00SJeff Kirsher spin_lock(&adap->sge.intrq_lock); 2803f7917c00SJeff Kirsher for (credits = 0; ; credits++) { 2804f7917c00SJeff Kirsher rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 2805f7917c00SJeff Kirsher if (!is_new_response(rc, q)) 2806f7917c00SJeff Kirsher break; 2807f7917c00SJeff Kirsher 2808019be1cfSAlexander Duyck dma_rmb(); 28091ecc7b7aSHariprasad Shenai if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { 2810f7917c00SJeff Kirsher unsigned int qid = ntohl(rc->pldbuflen_qid); 2811f7917c00SJeff Kirsher 2812f7917c00SJeff Kirsher qid -= adap->sge.ingr_start; 2813f7917c00SJeff Kirsher napi_schedule(&adap->sge.ingr_map[qid]->napi); 2814f7917c00SJeff Kirsher } 2815f7917c00SJeff Kirsher 2816f7917c00SJeff Kirsher rspq_next(q); 2817f7917c00SJeff Kirsher } 2818f7917c00SJeff Kirsher 2819f612b815SHariprasad Shenai val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); 2820df64e4d3SHariprasad Shenai 2821df64e4d3SHariprasad Shenai /* If we don't have access to the new User GTS (T5+), use the old 2822df64e4d3SHariprasad Shenai * doorbell mechanism; otherwise use the new BAR2 mechanism. 2823df64e4d3SHariprasad Shenai */ 2824df64e4d3SHariprasad Shenai if (unlikely(q->bar2_addr == NULL)) { 2825f612b815SHariprasad Shenai t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 2826f612b815SHariprasad Shenai val | INGRESSQID_V(q->cntxt_id)); 2827d63a6dcfSHariprasad Shenai } else { 2828f612b815SHariprasad Shenai writel(val | INGRESSQID_V(q->bar2_qid), 2829df64e4d3SHariprasad Shenai q->bar2_addr + SGE_UDB_GTS); 2830d63a6dcfSHariprasad Shenai wmb(); 2831d63a6dcfSHariprasad Shenai } 2832f7917c00SJeff Kirsher spin_unlock(&adap->sge.intrq_lock); 2833f7917c00SJeff Kirsher return credits; 2834f7917c00SJeff Kirsher } 2835f7917c00SJeff Kirsher 2836f7917c00SJeff Kirsher /* 2837f7917c00SJeff Kirsher * The MSI interrupt handler, which handles data events from SGE response queues 2838f7917c00SJeff Kirsher * as well as error and other async events as they all use the same MSI vector. 2839f7917c00SJeff Kirsher */ 2840f7917c00SJeff Kirsher static irqreturn_t t4_intr_msi(int irq, void *cookie) 2841f7917c00SJeff Kirsher { 2842f7917c00SJeff Kirsher struct adapter *adap = cookie; 2843f7917c00SJeff Kirsher 2844c3c7b121SHariprasad Shenai if (adap->flags & MASTER_PF) 2845f7917c00SJeff Kirsher t4_slow_intr_handler(adap); 2846f7917c00SJeff Kirsher process_intrq(adap); 2847f7917c00SJeff Kirsher return IRQ_HANDLED; 2848f7917c00SJeff Kirsher } 2849f7917c00SJeff Kirsher 2850f7917c00SJeff Kirsher /* 2851f7917c00SJeff Kirsher * Interrupt handler for legacy INTx interrupts. 2852f7917c00SJeff Kirsher * Handles data events from SGE response queues as well as error and other 2853f7917c00SJeff Kirsher * async events as they all use the same interrupt line. 2854f7917c00SJeff Kirsher */ 2855f7917c00SJeff Kirsher static irqreturn_t t4_intr_intx(int irq, void *cookie) 2856f7917c00SJeff Kirsher { 2857f7917c00SJeff Kirsher struct adapter *adap = cookie; 2858f7917c00SJeff Kirsher 2859f061de42SHariprasad Shenai t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); 2860c3c7b121SHariprasad Shenai if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | 2861c3c7b121SHariprasad Shenai process_intrq(adap)) 2862f7917c00SJeff Kirsher return IRQ_HANDLED; 2863f7917c00SJeff Kirsher return IRQ_NONE; /* probably shared interrupt */ 2864f7917c00SJeff Kirsher } 2865f7917c00SJeff Kirsher 2866f7917c00SJeff Kirsher /** 2867f7917c00SJeff Kirsher * t4_intr_handler - select the top-level interrupt handler 2868f7917c00SJeff Kirsher * @adap: the adapter 2869f7917c00SJeff Kirsher * 2870f7917c00SJeff Kirsher * Selects the top-level interrupt handler based on the type of interrupts 2871f7917c00SJeff Kirsher * (MSI-X, MSI, or INTx). 2872f7917c00SJeff Kirsher */ 2873f7917c00SJeff Kirsher irq_handler_t t4_intr_handler(struct adapter *adap) 2874f7917c00SJeff Kirsher { 2875f7917c00SJeff Kirsher if (adap->flags & USING_MSIX) 2876f7917c00SJeff Kirsher return t4_sge_intr_msix; 2877f7917c00SJeff Kirsher if (adap->flags & USING_MSI) 2878f7917c00SJeff Kirsher return t4_intr_msi; 2879f7917c00SJeff Kirsher return t4_intr_intx; 2880f7917c00SJeff Kirsher } 2881f7917c00SJeff Kirsher 28820e23daebSKees Cook static void sge_rx_timer_cb(struct timer_list *t) 2883f7917c00SJeff Kirsher { 2884f7917c00SJeff Kirsher unsigned long m; 2885a3bfb617SHariprasad Shenai unsigned int i; 28860e23daebSKees Cook struct adapter *adap = from_timer(adap, t, sge.rx_timer); 2887f7917c00SJeff Kirsher struct sge *s = &adap->sge; 2888f7917c00SJeff Kirsher 28894b8e27a8SHariprasad Shenai for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 2890f7917c00SJeff Kirsher for (m = s->starving_fl[i]; m; m &= m - 1) { 2891f7917c00SJeff Kirsher struct sge_eth_rxq *rxq; 2892f7917c00SJeff Kirsher unsigned int id = __ffs(m) + i * BITS_PER_LONG; 2893f7917c00SJeff Kirsher struct sge_fl *fl = s->egr_map[id]; 2894f7917c00SJeff Kirsher 2895f7917c00SJeff Kirsher clear_bit(id, s->starving_fl); 28964e857c58SPeter Zijlstra smp_mb__after_atomic(); 2897f7917c00SJeff Kirsher 2898c098b026SHariprasad Shenai if (fl_starving(adap, fl)) { 2899f7917c00SJeff Kirsher rxq = container_of(fl, struct sge_eth_rxq, fl); 2900f7917c00SJeff Kirsher if (napi_reschedule(&rxq->rspq.napi)) 2901f7917c00SJeff Kirsher fl->starving++; 2902f7917c00SJeff Kirsher else 2903f7917c00SJeff Kirsher set_bit(id, s->starving_fl); 2904f7917c00SJeff Kirsher } 2905f7917c00SJeff Kirsher } 2906a3bfb617SHariprasad Shenai /* The remainder of the SGE RX Timer Callback routine is dedicated to 2907a3bfb617SHariprasad Shenai * global Master PF activities like checking for chip ingress stalls, 2908a3bfb617SHariprasad Shenai * etc. 29090f4d201fSKumar Sanghvi */ 2910a3bfb617SHariprasad Shenai if (!(adap->flags & MASTER_PF)) 2911a3bfb617SHariprasad Shenai goto done; 29120f4d201fSKumar Sanghvi 2913a3bfb617SHariprasad Shenai t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); 29140f4d201fSKumar Sanghvi 2915a3bfb617SHariprasad Shenai done: 2916f7917c00SJeff Kirsher mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 2917f7917c00SJeff Kirsher } 2918f7917c00SJeff Kirsher 29190e23daebSKees Cook static void sge_tx_timer_cb(struct timer_list *t) 2920f7917c00SJeff Kirsher { 2921f7917c00SJeff Kirsher unsigned long m; 2922f7917c00SJeff Kirsher unsigned int i, budget; 29230e23daebSKees Cook struct adapter *adap = from_timer(adap, t, sge.tx_timer); 2924f7917c00SJeff Kirsher struct sge *s = &adap->sge; 2925f7917c00SJeff Kirsher 29264b8e27a8SHariprasad Shenai for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 2927f7917c00SJeff Kirsher for (m = s->txq_maperr[i]; m; m &= m - 1) { 2928f7917c00SJeff Kirsher unsigned long id = __ffs(m) + i * BITS_PER_LONG; 2929ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = s->egr_map[id]; 2930f7917c00SJeff Kirsher 2931f7917c00SJeff Kirsher clear_bit(id, s->txq_maperr); 2932f7917c00SJeff Kirsher tasklet_schedule(&txq->qresume_tsk); 2933f7917c00SJeff Kirsher } 2934f7917c00SJeff Kirsher 2935a4569504SAtul Gupta if (!is_t4(adap->params.chip)) { 2936a4569504SAtul Gupta struct sge_eth_txq *q = &s->ptptxq; 2937a4569504SAtul Gupta int avail; 2938a4569504SAtul Gupta 2939a4569504SAtul Gupta spin_lock(&adap->ptp_lock); 2940a4569504SAtul Gupta avail = reclaimable(&q->q); 2941a4569504SAtul Gupta 2942a4569504SAtul Gupta if (avail) { 2943a4569504SAtul Gupta free_tx_desc(adap, &q->q, avail, false); 2944a4569504SAtul Gupta q->q.in_use -= avail; 2945a4569504SAtul Gupta } 2946a4569504SAtul Gupta spin_unlock(&adap->ptp_lock); 2947a4569504SAtul Gupta } 2948a4569504SAtul Gupta 2949f7917c00SJeff Kirsher budget = MAX_TIMER_TX_RECLAIM; 2950f7917c00SJeff Kirsher i = s->ethtxq_rover; 2951f7917c00SJeff Kirsher do { 2952f7917c00SJeff Kirsher struct sge_eth_txq *q = &s->ethtxq[i]; 2953f7917c00SJeff Kirsher 2954f7917c00SJeff Kirsher if (q->q.in_use && 2955f7917c00SJeff Kirsher time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && 2956f7917c00SJeff Kirsher __netif_tx_trylock(q->txq)) { 2957f7917c00SJeff Kirsher int avail = reclaimable(&q->q); 2958f7917c00SJeff Kirsher 2959f7917c00SJeff Kirsher if (avail) { 2960f7917c00SJeff Kirsher if (avail > budget) 2961f7917c00SJeff Kirsher avail = budget; 2962f7917c00SJeff Kirsher 2963f7917c00SJeff Kirsher free_tx_desc(adap, &q->q, avail, true); 2964f7917c00SJeff Kirsher q->q.in_use -= avail; 2965f7917c00SJeff Kirsher budget -= avail; 2966f7917c00SJeff Kirsher } 2967f7917c00SJeff Kirsher __netif_tx_unlock(q->txq); 2968f7917c00SJeff Kirsher } 2969f7917c00SJeff Kirsher 2970f7917c00SJeff Kirsher if (++i >= s->ethqsets) 2971f7917c00SJeff Kirsher i = 0; 2972f7917c00SJeff Kirsher } while (budget && i != s->ethtxq_rover); 2973f7917c00SJeff Kirsher s->ethtxq_rover = i; 2974f7917c00SJeff Kirsher mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); 2975f7917c00SJeff Kirsher } 2976f7917c00SJeff Kirsher 2977d63a6dcfSHariprasad Shenai /** 2978df64e4d3SHariprasad Shenai * bar2_address - return the BAR2 address for an SGE Queue's Registers 2979df64e4d3SHariprasad Shenai * @adapter: the adapter 2980df64e4d3SHariprasad Shenai * @qid: the SGE Queue ID 2981df64e4d3SHariprasad Shenai * @qtype: the SGE Queue Type (Egress or Ingress) 2982df64e4d3SHariprasad Shenai * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 2983d63a6dcfSHariprasad Shenai * 2984df64e4d3SHariprasad Shenai * Returns the BAR2 address for the SGE Queue Registers associated with 2985df64e4d3SHariprasad Shenai * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 2986df64e4d3SHariprasad Shenai * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 2987df64e4d3SHariprasad Shenai * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 2988df64e4d3SHariprasad Shenai * Registers are supported (e.g. the Write Combining Doorbell Buffer). 2989d63a6dcfSHariprasad Shenai */ 2990df64e4d3SHariprasad Shenai static void __iomem *bar2_address(struct adapter *adapter, 2991df64e4d3SHariprasad Shenai unsigned int qid, 2992df64e4d3SHariprasad Shenai enum t4_bar2_qtype qtype, 2993df64e4d3SHariprasad Shenai unsigned int *pbar2_qid) 2994d63a6dcfSHariprasad Shenai { 2995df64e4d3SHariprasad Shenai u64 bar2_qoffset; 2996df64e4d3SHariprasad Shenai int ret; 2997d63a6dcfSHariprasad Shenai 2998e0456717SLinus Torvalds ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, 2999df64e4d3SHariprasad Shenai &bar2_qoffset, pbar2_qid); 3000df64e4d3SHariprasad Shenai if (ret) 3001df64e4d3SHariprasad Shenai return NULL; 3002d63a6dcfSHariprasad Shenai 3003df64e4d3SHariprasad Shenai return adapter->bar2 + bar2_qoffset; 3004d63a6dcfSHariprasad Shenai } 3005d63a6dcfSHariprasad Shenai 3006145ef8a5SHariprasad Shenai /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 3007145ef8a5SHariprasad Shenai * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 3008145ef8a5SHariprasad Shenai */ 3009f7917c00SJeff Kirsher int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 3010f7917c00SJeff Kirsher struct net_device *dev, int intr_idx, 30112337ba42SVarun Prakash struct sge_fl *fl, rspq_handler_t hnd, 30122337ba42SVarun Prakash rspq_flush_handler_t flush_hnd, int cong) 3013f7917c00SJeff Kirsher { 3014f7917c00SJeff Kirsher int ret, flsz = 0; 3015f7917c00SJeff Kirsher struct fw_iq_cmd c; 301652367a76SVipul Pandya struct sge *s = &adap->sge; 3017f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 3018b0ba9d5fSCasey Leedom int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); 3019f7917c00SJeff Kirsher 3020f7917c00SJeff Kirsher /* Size needs to be multiple of 16, including status entry. */ 3021f7917c00SJeff Kirsher iq->size = roundup(iq->size, 16); 3022f7917c00SJeff Kirsher 3023f7917c00SJeff Kirsher iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 30240ac5b708SHariprasad Shenai &iq->phys_addr, NULL, 0, 30250ac5b708SHariprasad Shenai dev_to_node(adap->pdev_dev)); 3026f7917c00SJeff Kirsher if (!iq->desc) 3027f7917c00SJeff Kirsher return -ENOMEM; 3028f7917c00SJeff Kirsher 3029f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 3030e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 3031e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3032b2612722SHariprasad Shenai FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); 30336e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | 3034f7917c00SJeff Kirsher FW_LEN16(c)); 30356e4b51a6SHariprasad Shenai c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 30366e4b51a6SHariprasad Shenai FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 30371ecc7b7aSHariprasad Shenai FW_IQ_CMD_IQANDST_V(intr_idx < 0) | 30381ecc7b7aSHariprasad Shenai FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | 30396e4b51a6SHariprasad Shenai FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 3040f7917c00SJeff Kirsher -intr_idx - 1)); 30416e4b51a6SHariprasad Shenai c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 30426e4b51a6SHariprasad Shenai FW_IQ_CMD_IQGTSMODE_F | 30436e4b51a6SHariprasad Shenai FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | 30446e4b51a6SHariprasad Shenai FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); 3045f7917c00SJeff Kirsher c.iqsize = htons(iq->size); 3046f7917c00SJeff Kirsher c.iqaddr = cpu_to_be64(iq->phys_addr); 3047145ef8a5SHariprasad Shenai if (cong >= 0) 3048145ef8a5SHariprasad Shenai c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); 3049f7917c00SJeff Kirsher 3050f7917c00SJeff Kirsher if (fl) { 30513ccc6cf7SHariprasad Shenai enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); 30523ccc6cf7SHariprasad Shenai 305313432997SHariprasad Shenai /* Allocate the ring for the hardware free list (with space 305413432997SHariprasad Shenai * for its status page) along with the associated software 305513432997SHariprasad Shenai * descriptor ring. The free list size needs to be a multiple 305613432997SHariprasad Shenai * of the Egress Queue Unit and at least 2 Egress Units larger 305713432997SHariprasad Shenai * than the SGE's Egress Congrestion Threshold 305813432997SHariprasad Shenai * (fl_starve_thres - 1). 305913432997SHariprasad Shenai */ 306013432997SHariprasad Shenai if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 306113432997SHariprasad Shenai fl->size = s->fl_starve_thres - 1 + 2 * 8; 3062f7917c00SJeff Kirsher fl->size = roundup(fl->size, 8); 3063f7917c00SJeff Kirsher fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 3064f7917c00SJeff Kirsher sizeof(struct rx_sw_desc), &fl->addr, 30650ac5b708SHariprasad Shenai &fl->sdesc, s->stat_len, 30660ac5b708SHariprasad Shenai dev_to_node(adap->pdev_dev)); 3067f7917c00SJeff Kirsher if (!fl->desc) 3068f7917c00SJeff Kirsher goto fl_nomem; 3069f7917c00SJeff Kirsher 307052367a76SVipul Pandya flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 3071145ef8a5SHariprasad Shenai c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 3072b0ba9d5fSCasey Leedom FW_IQ_CMD_FL0FETCHRO_V(relaxed) | 3073b0ba9d5fSCasey Leedom FW_IQ_CMD_FL0DATARO_V(relaxed) | 30746e4b51a6SHariprasad Shenai FW_IQ_CMD_FL0PADEN_F); 3075145ef8a5SHariprasad Shenai if (cong >= 0) 3076145ef8a5SHariprasad Shenai c.iqns_to_fl0congen |= 3077145ef8a5SHariprasad Shenai htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 3078145ef8a5SHariprasad Shenai FW_IQ_CMD_FL0CONGCIF_F | 3079145ef8a5SHariprasad Shenai FW_IQ_CMD_FL0CONGEN_F); 3080edadad80SHariprasad Shenai /* In T6, for egress queue type FL there is internal overhead 3081edadad80SHariprasad Shenai * of 16B for header going into FLM module. Hence the maximum 3082edadad80SHariprasad Shenai * allowed burst size is 448 bytes. For T4/T5, the hardware 3083edadad80SHariprasad Shenai * doesn't coalesce fetch requests if more than 64 bytes of 3084edadad80SHariprasad Shenai * Free List pointers are provided, so we use a 128-byte Fetch 3085edadad80SHariprasad Shenai * Burst Minimum there (T6 implements coalescing so we can use 3086edadad80SHariprasad Shenai * the smaller 64-byte value there). 3087edadad80SHariprasad Shenai */ 30881ecc7b7aSHariprasad Shenai c.fl0dcaen_to_fl0cidxfthresh = 3089edadad80SHariprasad Shenai htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? 3090edadad80SHariprasad Shenai FETCHBURSTMIN_128B_X : 3091edadad80SHariprasad Shenai FETCHBURSTMIN_64B_X) | 30923ccc6cf7SHariprasad Shenai FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? 30933ccc6cf7SHariprasad Shenai FETCHBURSTMAX_512B_X : 30943ccc6cf7SHariprasad Shenai FETCHBURSTMAX_256B_X)); 3095f7917c00SJeff Kirsher c.fl0size = htons(flsz); 3096f7917c00SJeff Kirsher c.fl0addr = cpu_to_be64(fl->addr); 3097f7917c00SJeff Kirsher } 3098f7917c00SJeff Kirsher 3099b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3100f7917c00SJeff Kirsher if (ret) 3101f7917c00SJeff Kirsher goto err; 3102f7917c00SJeff Kirsher 3103f7917c00SJeff Kirsher netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 3104f7917c00SJeff Kirsher iq->cur_desc = iq->desc; 3105f7917c00SJeff Kirsher iq->cidx = 0; 3106f7917c00SJeff Kirsher iq->gen = 1; 3107f7917c00SJeff Kirsher iq->next_intr_params = iq->intr_params; 3108f7917c00SJeff Kirsher iq->cntxt_id = ntohs(c.iqid); 3109f7917c00SJeff Kirsher iq->abs_id = ntohs(c.physiqid); 3110df64e4d3SHariprasad Shenai iq->bar2_addr = bar2_address(adap, 3111df64e4d3SHariprasad Shenai iq->cntxt_id, 3112df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_INGRESS, 3113df64e4d3SHariprasad Shenai &iq->bar2_qid); 3114f7917c00SJeff Kirsher iq->size--; /* subtract status entry */ 3115f7917c00SJeff Kirsher iq->netdev = dev; 3116f7917c00SJeff Kirsher iq->handler = hnd; 31172337ba42SVarun Prakash iq->flush_handler = flush_hnd; 31182337ba42SVarun Prakash 31192337ba42SVarun Prakash memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); 31202337ba42SVarun Prakash skb_queue_head_init(&iq->lro_mgr.lroq); 3121f7917c00SJeff Kirsher 3122f7917c00SJeff Kirsher /* set offset to -1 to distinguish ingress queues without FL */ 3123f7917c00SJeff Kirsher iq->offset = fl ? 0 : -1; 3124f7917c00SJeff Kirsher 3125f7917c00SJeff Kirsher adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 3126f7917c00SJeff Kirsher 3127f7917c00SJeff Kirsher if (fl) { 3128f7917c00SJeff Kirsher fl->cntxt_id = ntohs(c.fl0id); 3129f7917c00SJeff Kirsher fl->avail = fl->pend_cred = 0; 3130f7917c00SJeff Kirsher fl->pidx = fl->cidx = 0; 3131f7917c00SJeff Kirsher fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 3132f7917c00SJeff Kirsher adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 3133d63a6dcfSHariprasad Shenai 3134df64e4d3SHariprasad Shenai /* Note, we must initialize the BAR2 Free List User Doorbell 3135df64e4d3SHariprasad Shenai * information before refilling the Free List! 3136d63a6dcfSHariprasad Shenai */ 3137df64e4d3SHariprasad Shenai fl->bar2_addr = bar2_address(adap, 3138df64e4d3SHariprasad Shenai fl->cntxt_id, 3139df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_EGRESS, 3140df64e4d3SHariprasad Shenai &fl->bar2_qid); 3141f7917c00SJeff Kirsher refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 3142f7917c00SJeff Kirsher } 3143b8b1ae99SHariprasad Shenai 3144b8b1ae99SHariprasad Shenai /* For T5 and later we attempt to set up the Congestion Manager values 3145b8b1ae99SHariprasad Shenai * of the new RX Ethernet Queue. This should really be handled by 3146b8b1ae99SHariprasad Shenai * firmware because it's more complex than any host driver wants to 3147b8b1ae99SHariprasad Shenai * get involved with and it's different per chip and this is almost 3148b8b1ae99SHariprasad Shenai * certainly wrong. Firmware would be wrong as well, but it would be 3149b8b1ae99SHariprasad Shenai * a lot easier to fix in one place ... For now we do something very 3150b8b1ae99SHariprasad Shenai * simple (and hopefully less wrong). 3151b8b1ae99SHariprasad Shenai */ 3152b8b1ae99SHariprasad Shenai if (!is_t4(adap->params.chip) && cong >= 0) { 31532216d014SHariprasad Shenai u32 param, val, ch_map = 0; 3154b8b1ae99SHariprasad Shenai int i; 31552216d014SHariprasad Shenai u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 3156b8b1ae99SHariprasad Shenai 3157b8b1ae99SHariprasad Shenai param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 3158b8b1ae99SHariprasad Shenai FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3159b8b1ae99SHariprasad Shenai FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); 3160b8b1ae99SHariprasad Shenai if (cong == 0) { 3161b8b1ae99SHariprasad Shenai val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); 3162b8b1ae99SHariprasad Shenai } else { 3163b8b1ae99SHariprasad Shenai val = 3164b8b1ae99SHariprasad Shenai CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); 3165b8b1ae99SHariprasad Shenai for (i = 0; i < 4; i++) { 3166b8b1ae99SHariprasad Shenai if (cong & (1 << i)) 31672216d014SHariprasad Shenai ch_map |= 1 << (i << cng_ch_bits_log); 3168b8b1ae99SHariprasad Shenai } 31692216d014SHariprasad Shenai val |= CONMCTXT_CNGCHMAP_V(ch_map); 3170b8b1ae99SHariprasad Shenai } 3171b2612722SHariprasad Shenai ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 3172b8b1ae99SHariprasad Shenai ¶m, &val); 3173b8b1ae99SHariprasad Shenai if (ret) 3174b8b1ae99SHariprasad Shenai dev_warn(adap->pdev_dev, "Failed to set Congestion" 3175b8b1ae99SHariprasad Shenai " Manager Context for Ingress Queue %d: %d\n", 3176b8b1ae99SHariprasad Shenai iq->cntxt_id, -ret); 3177b8b1ae99SHariprasad Shenai } 3178b8b1ae99SHariprasad Shenai 3179f7917c00SJeff Kirsher return 0; 3180f7917c00SJeff Kirsher 3181f7917c00SJeff Kirsher fl_nomem: 3182f7917c00SJeff Kirsher ret = -ENOMEM; 3183f7917c00SJeff Kirsher err: 3184f7917c00SJeff Kirsher if (iq->desc) { 3185f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 3186f7917c00SJeff Kirsher iq->desc, iq->phys_addr); 3187f7917c00SJeff Kirsher iq->desc = NULL; 3188f7917c00SJeff Kirsher } 3189f7917c00SJeff Kirsher if (fl && fl->desc) { 3190f7917c00SJeff Kirsher kfree(fl->sdesc); 3191f7917c00SJeff Kirsher fl->sdesc = NULL; 3192f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 3193f7917c00SJeff Kirsher fl->desc, fl->addr); 3194f7917c00SJeff Kirsher fl->desc = NULL; 3195f7917c00SJeff Kirsher } 3196f7917c00SJeff Kirsher return ret; 3197f7917c00SJeff Kirsher } 3198f7917c00SJeff Kirsher 3199f7917c00SJeff Kirsher static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 3200f7917c00SJeff Kirsher { 320122adfe0aSSantosh Rastapur q->cntxt_id = id; 3202df64e4d3SHariprasad Shenai q->bar2_addr = bar2_address(adap, 3203df64e4d3SHariprasad Shenai q->cntxt_id, 3204df64e4d3SHariprasad Shenai T4_BAR2_QTYPE_EGRESS, 3205df64e4d3SHariprasad Shenai &q->bar2_qid); 3206f7917c00SJeff Kirsher q->in_use = 0; 3207f7917c00SJeff Kirsher q->cidx = q->pidx = 0; 3208f7917c00SJeff Kirsher q->stops = q->restarts = 0; 3209f7917c00SJeff Kirsher q->stat = (void *)&q->desc[q->size]; 32103069ee9bSVipul Pandya spin_lock_init(&q->db_lock); 3211f7917c00SJeff Kirsher adap->sge.egr_map[id - adap->sge.egr_start] = q; 3212f7917c00SJeff Kirsher } 3213f7917c00SJeff Kirsher 3214f7917c00SJeff Kirsher int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 3215f7917c00SJeff Kirsher struct net_device *dev, struct netdev_queue *netdevq, 3216f7917c00SJeff Kirsher unsigned int iqid) 3217f7917c00SJeff Kirsher { 3218f7917c00SJeff Kirsher int ret, nentries; 3219f7917c00SJeff Kirsher struct fw_eq_eth_cmd c; 322052367a76SVipul Pandya struct sge *s = &adap->sge; 3221f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 3222f7917c00SJeff Kirsher 3223f7917c00SJeff Kirsher /* Add status entries */ 322452367a76SVipul Pandya nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3225f7917c00SJeff Kirsher 3226f7917c00SJeff Kirsher txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 3227f7917c00SJeff Kirsher sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 322852367a76SVipul Pandya &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 3229f7917c00SJeff Kirsher netdev_queue_numa_node_read(netdevq)); 3230f7917c00SJeff Kirsher if (!txq->q.desc) 3231f7917c00SJeff Kirsher return -ENOMEM; 3232f7917c00SJeff Kirsher 3233f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 3234e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 3235e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3236b2612722SHariprasad Shenai FW_EQ_ETH_CMD_PFN_V(adap->pf) | 32376e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_VFN_V(0)); 32386e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | 32396e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 32406e4b51a6SHariprasad Shenai c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | 32416e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_VIID_V(pi->viid)); 32421ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 32431ecc7b7aSHariprasad Shenai htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 32446e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 32451ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 32461ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 32471ecc7b7aSHariprasad Shenai htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 32481ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 32491ecc7b7aSHariprasad Shenai FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 32506e4b51a6SHariprasad Shenai FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 3251f7917c00SJeff Kirsher c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3252f7917c00SJeff Kirsher 3253b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3254f7917c00SJeff Kirsher if (ret) { 3255f7917c00SJeff Kirsher kfree(txq->q.sdesc); 3256f7917c00SJeff Kirsher txq->q.sdesc = NULL; 3257f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 3258f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 3259f7917c00SJeff Kirsher txq->q.desc, txq->q.phys_addr); 3260f7917c00SJeff Kirsher txq->q.desc = NULL; 3261f7917c00SJeff Kirsher return ret; 3262f7917c00SJeff Kirsher } 3263f7917c00SJeff Kirsher 3264ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_ETH; 32656e4b51a6SHariprasad Shenai init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); 3266f7917c00SJeff Kirsher txq->txq = netdevq; 3267f7917c00SJeff Kirsher txq->tso = txq->tx_cso = txq->vlan_ins = 0; 3268f7917c00SJeff Kirsher txq->mapping_err = 0; 3269f7917c00SJeff Kirsher return 0; 3270f7917c00SJeff Kirsher } 3271f7917c00SJeff Kirsher 3272f7917c00SJeff Kirsher int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 3273f7917c00SJeff Kirsher struct net_device *dev, unsigned int iqid, 3274f7917c00SJeff Kirsher unsigned int cmplqid) 3275f7917c00SJeff Kirsher { 3276f7917c00SJeff Kirsher int ret, nentries; 3277f7917c00SJeff Kirsher struct fw_eq_ctrl_cmd c; 327852367a76SVipul Pandya struct sge *s = &adap->sge; 3279f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 3280f7917c00SJeff Kirsher 3281f7917c00SJeff Kirsher /* Add status entries */ 328252367a76SVipul Pandya nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3283f7917c00SJeff Kirsher 3284f7917c00SJeff Kirsher txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 3285f7917c00SJeff Kirsher sizeof(struct tx_desc), 0, &txq->q.phys_addr, 3286982b81ebSHariprasad Shenai NULL, 0, dev_to_node(adap->pdev_dev)); 3287f7917c00SJeff Kirsher if (!txq->q.desc) 3288f7917c00SJeff Kirsher return -ENOMEM; 3289f7917c00SJeff Kirsher 3290e2ac9628SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 3291e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3292b2612722SHariprasad Shenai FW_EQ_CTRL_CMD_PFN_V(adap->pf) | 32936e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_VFN_V(0)); 32946e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | 32956e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 32966e4b51a6SHariprasad Shenai c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 3297f7917c00SJeff Kirsher c.physeqid_pkd = htonl(0); 32981ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 32991ecc7b7aSHariprasad Shenai htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 33006e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 33011ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); 33021ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 33031ecc7b7aSHariprasad Shenai htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 33041ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 33051ecc7b7aSHariprasad Shenai FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 33066e4b51a6SHariprasad Shenai FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 3307f7917c00SJeff Kirsher c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3308f7917c00SJeff Kirsher 3309b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3310f7917c00SJeff Kirsher if (ret) { 3311f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 3312f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 3313f7917c00SJeff Kirsher txq->q.desc, txq->q.phys_addr); 3314f7917c00SJeff Kirsher txq->q.desc = NULL; 3315f7917c00SJeff Kirsher return ret; 3316f7917c00SJeff Kirsher } 3317f7917c00SJeff Kirsher 3318ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_CTRL; 33196e4b51a6SHariprasad Shenai init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); 3320f7917c00SJeff Kirsher txq->adap = adap; 3321f7917c00SJeff Kirsher skb_queue_head_init(&txq->sendq); 3322f7917c00SJeff Kirsher tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); 3323f7917c00SJeff Kirsher txq->full = 0; 3324f7917c00SJeff Kirsher return 0; 3325f7917c00SJeff Kirsher } 3326f7917c00SJeff Kirsher 33270fbc81b3SHariprasad Shenai int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, 33280fbc81b3SHariprasad Shenai unsigned int cmplqid) 33290fbc81b3SHariprasad Shenai { 33300fbc81b3SHariprasad Shenai u32 param, val; 33310fbc81b3SHariprasad Shenai 33320fbc81b3SHariprasad Shenai param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 33330fbc81b3SHariprasad Shenai FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | 33340fbc81b3SHariprasad Shenai FW_PARAMS_PARAM_YZ_V(eqid)); 33350fbc81b3SHariprasad Shenai val = cmplqid; 33360fbc81b3SHariprasad Shenai return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 33370fbc81b3SHariprasad Shenai } 33380fbc81b3SHariprasad Shenai 3339ab677ff4SHariprasad Shenai int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, 3340ab677ff4SHariprasad Shenai struct net_device *dev, unsigned int iqid, 3341ab677ff4SHariprasad Shenai unsigned int uld_type) 3342f7917c00SJeff Kirsher { 3343f7917c00SJeff Kirsher int ret, nentries; 3344f7917c00SJeff Kirsher struct fw_eq_ofld_cmd c; 334552367a76SVipul Pandya struct sge *s = &adap->sge; 3346f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 3347ab677ff4SHariprasad Shenai int cmd = FW_EQ_OFLD_CMD; 3348f7917c00SJeff Kirsher 3349f7917c00SJeff Kirsher /* Add status entries */ 335052367a76SVipul Pandya nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3351f7917c00SJeff Kirsher 3352f7917c00SJeff Kirsher txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 3353f7917c00SJeff Kirsher sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 335452367a76SVipul Pandya &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 3355f7917c00SJeff Kirsher NUMA_NO_NODE); 3356f7917c00SJeff Kirsher if (!txq->q.desc) 3357f7917c00SJeff Kirsher return -ENOMEM; 3358f7917c00SJeff Kirsher 3359f7917c00SJeff Kirsher memset(&c, 0, sizeof(c)); 3360ab677ff4SHariprasad Shenai if (unlikely(uld_type == CXGB4_TX_CRYPTO)) 3361ab677ff4SHariprasad Shenai cmd = FW_EQ_CTRL_CMD; 3362ab677ff4SHariprasad Shenai c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | 3363e2ac9628SHariprasad Shenai FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3364b2612722SHariprasad Shenai FW_EQ_OFLD_CMD_PFN_V(adap->pf) | 33656e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_VFN_V(0)); 33666e4b51a6SHariprasad Shenai c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 33676e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 33681ecc7b7aSHariprasad Shenai c.fetchszm_to_iqid = 33691ecc7b7aSHariprasad Shenai htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 33706e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 33711ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); 33721ecc7b7aSHariprasad Shenai c.dcaen_to_eqsize = 33731ecc7b7aSHariprasad Shenai htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 33741ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 33751ecc7b7aSHariprasad Shenai FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 33766e4b51a6SHariprasad Shenai FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 3377f7917c00SJeff Kirsher c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3378f7917c00SJeff Kirsher 3379b2612722SHariprasad Shenai ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3380f7917c00SJeff Kirsher if (ret) { 3381f7917c00SJeff Kirsher kfree(txq->q.sdesc); 3382f7917c00SJeff Kirsher txq->q.sdesc = NULL; 3383f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 3384f7917c00SJeff Kirsher nentries * sizeof(struct tx_desc), 3385f7917c00SJeff Kirsher txq->q.desc, txq->q.phys_addr); 3386f7917c00SJeff Kirsher txq->q.desc = NULL; 3387f7917c00SJeff Kirsher return ret; 3388f7917c00SJeff Kirsher } 3389f7917c00SJeff Kirsher 3390ab677ff4SHariprasad Shenai txq->q.q_type = CXGB4_TXQ_ULD; 33916e4b51a6SHariprasad Shenai init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); 3392f7917c00SJeff Kirsher txq->adap = adap; 3393f7917c00SJeff Kirsher skb_queue_head_init(&txq->sendq); 3394f7917c00SJeff Kirsher tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); 3395f7917c00SJeff Kirsher txq->full = 0; 3396f7917c00SJeff Kirsher txq->mapping_err = 0; 3397f7917c00SJeff Kirsher return 0; 3398f7917c00SJeff Kirsher } 3399f7917c00SJeff Kirsher 3400ab677ff4SHariprasad Shenai void free_txq(struct adapter *adap, struct sge_txq *q) 3401f7917c00SJeff Kirsher { 340252367a76SVipul Pandya struct sge *s = &adap->sge; 340352367a76SVipul Pandya 3404f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, 340552367a76SVipul Pandya q->size * sizeof(struct tx_desc) + s->stat_len, 3406f7917c00SJeff Kirsher q->desc, q->phys_addr); 3407f7917c00SJeff Kirsher q->cntxt_id = 0; 3408f7917c00SJeff Kirsher q->sdesc = NULL; 3409f7917c00SJeff Kirsher q->desc = NULL; 3410f7917c00SJeff Kirsher } 3411f7917c00SJeff Kirsher 341294cdb8bbSHariprasad Shenai void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 3413f7917c00SJeff Kirsher struct sge_fl *fl) 3414f7917c00SJeff Kirsher { 341552367a76SVipul Pandya struct sge *s = &adap->sge; 3416f7917c00SJeff Kirsher unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 3417f7917c00SJeff Kirsher 3418f7917c00SJeff Kirsher adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 3419b2612722SHariprasad Shenai t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 3420f7917c00SJeff Kirsher rq->cntxt_id, fl_id, 0xffff); 3421f7917c00SJeff Kirsher dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 3422f7917c00SJeff Kirsher rq->desc, rq->phys_addr); 3423f7917c00SJeff Kirsher netif_napi_del(&rq->napi); 3424f7917c00SJeff Kirsher rq->netdev = NULL; 3425f7917c00SJeff Kirsher rq->cntxt_id = rq->abs_id = 0; 3426f7917c00SJeff Kirsher rq->desc = NULL; 3427f7917c00SJeff Kirsher 3428f7917c00SJeff Kirsher if (fl) { 3429f7917c00SJeff Kirsher free_rx_bufs(adap, fl, fl->avail); 343052367a76SVipul Pandya dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 3431f7917c00SJeff Kirsher fl->desc, fl->addr); 3432f7917c00SJeff Kirsher kfree(fl->sdesc); 3433f7917c00SJeff Kirsher fl->sdesc = NULL; 3434f7917c00SJeff Kirsher fl->cntxt_id = 0; 3435f7917c00SJeff Kirsher fl->desc = NULL; 3436f7917c00SJeff Kirsher } 3437f7917c00SJeff Kirsher } 3438f7917c00SJeff Kirsher 3439f7917c00SJeff Kirsher /** 34405fa76694SHariprasad Shenai * t4_free_ofld_rxqs - free a block of consecutive Rx queues 34415fa76694SHariprasad Shenai * @adap: the adapter 34425fa76694SHariprasad Shenai * @n: number of queues 34435fa76694SHariprasad Shenai * @q: pointer to first queue 34445fa76694SHariprasad Shenai * 34455fa76694SHariprasad Shenai * Release the resources of a consecutive block of offload Rx queues. 34465fa76694SHariprasad Shenai */ 34475fa76694SHariprasad Shenai void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) 34485fa76694SHariprasad Shenai { 34495fa76694SHariprasad Shenai for ( ; n; n--, q++) 34505fa76694SHariprasad Shenai if (q->rspq.desc) 34515fa76694SHariprasad Shenai free_rspq_fl(adap, &q->rspq, 34525fa76694SHariprasad Shenai q->fl.size ? &q->fl : NULL); 34535fa76694SHariprasad Shenai } 34545fa76694SHariprasad Shenai 34555fa76694SHariprasad Shenai /** 3456f7917c00SJeff Kirsher * t4_free_sge_resources - free SGE resources 3457f7917c00SJeff Kirsher * @adap: the adapter 3458f7917c00SJeff Kirsher * 3459f7917c00SJeff Kirsher * Frees resources used by the SGE queue sets. 3460f7917c00SJeff Kirsher */ 3461f7917c00SJeff Kirsher void t4_free_sge_resources(struct adapter *adap) 3462f7917c00SJeff Kirsher { 3463f7917c00SJeff Kirsher int i; 3464ebf4dc2bSHariprasad Shenai struct sge_eth_rxq *eq; 3465ebf4dc2bSHariprasad Shenai struct sge_eth_txq *etq; 3466ebf4dc2bSHariprasad Shenai 3467ebf4dc2bSHariprasad Shenai /* stop all Rx queues in order to start them draining */ 3468ebf4dc2bSHariprasad Shenai for (i = 0; i < adap->sge.ethqsets; i++) { 3469ebf4dc2bSHariprasad Shenai eq = &adap->sge.ethrxq[i]; 3470ebf4dc2bSHariprasad Shenai if (eq->rspq.desc) 3471ebf4dc2bSHariprasad Shenai t4_iq_stop(adap, adap->mbox, adap->pf, 0, 3472ebf4dc2bSHariprasad Shenai FW_IQ_TYPE_FL_INT_CAP, 3473ebf4dc2bSHariprasad Shenai eq->rspq.cntxt_id, 3474ebf4dc2bSHariprasad Shenai eq->fl.size ? eq->fl.cntxt_id : 0xffff, 3475ebf4dc2bSHariprasad Shenai 0xffff); 3476ebf4dc2bSHariprasad Shenai } 3477f7917c00SJeff Kirsher 3478f7917c00SJeff Kirsher /* clean up Ethernet Tx/Rx queues */ 3479ebf4dc2bSHariprasad Shenai for (i = 0; i < adap->sge.ethqsets; i++) { 3480ebf4dc2bSHariprasad Shenai eq = &adap->sge.ethrxq[i]; 3481f7917c00SJeff Kirsher if (eq->rspq.desc) 34825fa76694SHariprasad Shenai free_rspq_fl(adap, &eq->rspq, 34835fa76694SHariprasad Shenai eq->fl.size ? &eq->fl : NULL); 3484ebf4dc2bSHariprasad Shenai 3485ebf4dc2bSHariprasad Shenai etq = &adap->sge.ethtxq[i]; 3486f7917c00SJeff Kirsher if (etq->q.desc) { 3487b2612722SHariprasad Shenai t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3488f7917c00SJeff Kirsher etq->q.cntxt_id); 3489fbe80776SHariprasad Shenai __netif_tx_lock_bh(etq->txq); 3490f7917c00SJeff Kirsher free_tx_desc(adap, &etq->q, etq->q.in_use, true); 3491fbe80776SHariprasad Shenai __netif_tx_unlock_bh(etq->txq); 3492f7917c00SJeff Kirsher kfree(etq->q.sdesc); 3493f7917c00SJeff Kirsher free_txq(adap, &etq->q); 3494f7917c00SJeff Kirsher } 3495f7917c00SJeff Kirsher } 3496f7917c00SJeff Kirsher 3497f7917c00SJeff Kirsher /* clean up control Tx queues */ 3498f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 3499f7917c00SJeff Kirsher struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 3500f7917c00SJeff Kirsher 3501f7917c00SJeff Kirsher if (cq->q.desc) { 3502f7917c00SJeff Kirsher tasklet_kill(&cq->qresume_tsk); 3503b2612722SHariprasad Shenai t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 3504f7917c00SJeff Kirsher cq->q.cntxt_id); 3505f7917c00SJeff Kirsher __skb_queue_purge(&cq->sendq); 3506f7917c00SJeff Kirsher free_txq(adap, &cq->q); 3507f7917c00SJeff Kirsher } 3508f7917c00SJeff Kirsher } 3509f7917c00SJeff Kirsher 3510f7917c00SJeff Kirsher if (adap->sge.fw_evtq.desc) 3511f7917c00SJeff Kirsher free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 3512f7917c00SJeff Kirsher 3513f7917c00SJeff Kirsher if (adap->sge.intrq.desc) 3514f7917c00SJeff Kirsher free_rspq_fl(adap, &adap->sge.intrq, NULL); 3515f7917c00SJeff Kirsher 3516a4569504SAtul Gupta if (!is_t4(adap->params.chip)) { 3517a4569504SAtul Gupta etq = &adap->sge.ptptxq; 3518a4569504SAtul Gupta if (etq->q.desc) { 3519a4569504SAtul Gupta t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3520a4569504SAtul Gupta etq->q.cntxt_id); 3521a4569504SAtul Gupta spin_lock_bh(&adap->ptp_lock); 3522a4569504SAtul Gupta free_tx_desc(adap, &etq->q, etq->q.in_use, true); 3523a4569504SAtul Gupta spin_unlock_bh(&adap->ptp_lock); 3524a4569504SAtul Gupta kfree(etq->q.sdesc); 3525a4569504SAtul Gupta free_txq(adap, &etq->q); 3526a4569504SAtul Gupta } 3527a4569504SAtul Gupta } 3528a4569504SAtul Gupta 3529f7917c00SJeff Kirsher /* clear the reverse egress queue map */ 35304b8e27a8SHariprasad Shenai memset(adap->sge.egr_map, 0, 35314b8e27a8SHariprasad Shenai adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); 3532f7917c00SJeff Kirsher } 3533f7917c00SJeff Kirsher 3534f7917c00SJeff Kirsher void t4_sge_start(struct adapter *adap) 3535f7917c00SJeff Kirsher { 3536f7917c00SJeff Kirsher adap->sge.ethtxq_rover = 0; 3537f7917c00SJeff Kirsher mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 3538f7917c00SJeff Kirsher mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 3539f7917c00SJeff Kirsher } 3540f7917c00SJeff Kirsher 3541f7917c00SJeff Kirsher /** 3542f7917c00SJeff Kirsher * t4_sge_stop - disable SGE operation 3543f7917c00SJeff Kirsher * @adap: the adapter 3544f7917c00SJeff Kirsher * 3545f7917c00SJeff Kirsher * Stop tasklets and timers associated with the DMA engine. Note that 3546f7917c00SJeff Kirsher * this is effective only if measures have been taken to disable any HW 3547f7917c00SJeff Kirsher * events that may restart them. 3548f7917c00SJeff Kirsher */ 3549f7917c00SJeff Kirsher void t4_sge_stop(struct adapter *adap) 3550f7917c00SJeff Kirsher { 3551f7917c00SJeff Kirsher int i; 3552f7917c00SJeff Kirsher struct sge *s = &adap->sge; 3553f7917c00SJeff Kirsher 3554f7917c00SJeff Kirsher if (in_interrupt()) /* actions below require waiting */ 3555f7917c00SJeff Kirsher return; 3556f7917c00SJeff Kirsher 3557f7917c00SJeff Kirsher if (s->rx_timer.function) 3558f7917c00SJeff Kirsher del_timer_sync(&s->rx_timer); 3559f7917c00SJeff Kirsher if (s->tx_timer.function) 3560f7917c00SJeff Kirsher del_timer_sync(&s->tx_timer); 3561f7917c00SJeff Kirsher 3562ab677ff4SHariprasad Shenai if (is_offload(adap)) { 3563ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 3564f7917c00SJeff Kirsher 3565ab677ff4SHariprasad Shenai txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 3566ab677ff4SHariprasad Shenai if (txq_info) { 3567ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = txq_info->uldtxq; 3568ab677ff4SHariprasad Shenai 3569ab677ff4SHariprasad Shenai for_each_ofldtxq(&adap->sge, i) { 3570ab677ff4SHariprasad Shenai if (txq->q.desc) 3571ab677ff4SHariprasad Shenai tasklet_kill(&txq->qresume_tsk); 3572f7917c00SJeff Kirsher } 3573ab677ff4SHariprasad Shenai } 3574ab677ff4SHariprasad Shenai } 3575ab677ff4SHariprasad Shenai 3576ab677ff4SHariprasad Shenai if (is_pci_uld(adap)) { 3577ab677ff4SHariprasad Shenai struct sge_uld_txq_info *txq_info; 3578ab677ff4SHariprasad Shenai 3579ab677ff4SHariprasad Shenai txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 3580ab677ff4SHariprasad Shenai if (txq_info) { 3581ab677ff4SHariprasad Shenai struct sge_uld_txq *txq = txq_info->uldtxq; 3582ab677ff4SHariprasad Shenai 3583ab677ff4SHariprasad Shenai for_each_ofldtxq(&adap->sge, i) { 3584ab677ff4SHariprasad Shenai if (txq->q.desc) 3585ab677ff4SHariprasad Shenai tasklet_kill(&txq->qresume_tsk); 3586ab677ff4SHariprasad Shenai } 3587ab677ff4SHariprasad Shenai } 3588ab677ff4SHariprasad Shenai } 3589ab677ff4SHariprasad Shenai 3590f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 3591f7917c00SJeff Kirsher struct sge_ctrl_txq *cq = &s->ctrlq[i]; 3592f7917c00SJeff Kirsher 3593f7917c00SJeff Kirsher if (cq->q.desc) 3594f7917c00SJeff Kirsher tasklet_kill(&cq->qresume_tsk); 3595f7917c00SJeff Kirsher } 3596f7917c00SJeff Kirsher } 3597f7917c00SJeff Kirsher 3598f7917c00SJeff Kirsher /** 359906640310SHariprasad Shenai * t4_sge_init_soft - grab core SGE values needed by SGE code 3600f7917c00SJeff Kirsher * @adap: the adapter 3601f7917c00SJeff Kirsher * 360206640310SHariprasad Shenai * We need to grab the SGE operating parameters that we need to have 360306640310SHariprasad Shenai * in order to do our job and make sure we can live with them. 3604f7917c00SJeff Kirsher */ 3605f7917c00SJeff Kirsher 360652367a76SVipul Pandya static int t4_sge_init_soft(struct adapter *adap) 360752367a76SVipul Pandya { 360852367a76SVipul Pandya struct sge *s = &adap->sge; 360952367a76SVipul Pandya u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 361052367a76SVipul Pandya u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 361152367a76SVipul Pandya u32 ingress_rx_threshold; 361252367a76SVipul Pandya 361352367a76SVipul Pandya /* 361452367a76SVipul Pandya * Verify that CPL messages are going to the Ingress Queue for 361552367a76SVipul Pandya * process_responses() and that only packet data is going to the 361652367a76SVipul Pandya * Free Lists. 361752367a76SVipul Pandya */ 3618f612b815SHariprasad Shenai if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != 3619f612b815SHariprasad Shenai RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { 362052367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 362152367a76SVipul Pandya return -EINVAL; 362252367a76SVipul Pandya } 362352367a76SVipul Pandya 362452367a76SVipul Pandya /* 362552367a76SVipul Pandya * Validate the Host Buffer Register Array indices that we want to 362652367a76SVipul Pandya * use ... 362752367a76SVipul Pandya * 362852367a76SVipul Pandya * XXX Note that we should really read through the Host Buffer Size 362952367a76SVipul Pandya * XXX register array and find the indices of the Buffer Sizes which 363052367a76SVipul Pandya * XXX meet our needs! 363152367a76SVipul Pandya */ 363252367a76SVipul Pandya #define READ_FL_BUF(x) \ 3633f612b815SHariprasad Shenai t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) 363452367a76SVipul Pandya 363552367a76SVipul Pandya fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 363652367a76SVipul Pandya fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 363752367a76SVipul Pandya fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 363852367a76SVipul Pandya fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 363952367a76SVipul Pandya 364092ddcc7bSKumar Sanghvi /* We only bother using the Large Page logic if the Large Page Buffer 364192ddcc7bSKumar Sanghvi * is larger than our Page Size Buffer. 364292ddcc7bSKumar Sanghvi */ 364392ddcc7bSKumar Sanghvi if (fl_large_pg <= fl_small_pg) 364492ddcc7bSKumar Sanghvi fl_large_pg = 0; 364592ddcc7bSKumar Sanghvi 364652367a76SVipul Pandya #undef READ_FL_BUF 364752367a76SVipul Pandya 364892ddcc7bSKumar Sanghvi /* The Page Size Buffer must be exactly equal to our Page Size and the 364992ddcc7bSKumar Sanghvi * Large Page Size Buffer should be 0 (per above) or a power of 2. 365092ddcc7bSKumar Sanghvi */ 365152367a76SVipul Pandya if (fl_small_pg != PAGE_SIZE || 365292ddcc7bSKumar Sanghvi (fl_large_pg & (fl_large_pg-1)) != 0) { 365352367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 365452367a76SVipul Pandya fl_small_pg, fl_large_pg); 365552367a76SVipul Pandya return -EINVAL; 365652367a76SVipul Pandya } 365752367a76SVipul Pandya if (fl_large_pg) 365852367a76SVipul Pandya s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 365952367a76SVipul Pandya 366052367a76SVipul Pandya if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 366152367a76SVipul Pandya fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 366252367a76SVipul Pandya dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 366352367a76SVipul Pandya fl_small_mtu, fl_large_mtu); 366452367a76SVipul Pandya return -EINVAL; 366552367a76SVipul Pandya } 366652367a76SVipul Pandya 366752367a76SVipul Pandya /* 366852367a76SVipul Pandya * Retrieve our RX interrupt holdoff timer values and counter 366952367a76SVipul Pandya * threshold values from the SGE parameters. 367052367a76SVipul Pandya */ 3671f061de42SHariprasad Shenai timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); 3672f061de42SHariprasad Shenai timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); 3673f061de42SHariprasad Shenai timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); 367452367a76SVipul Pandya s->timer_val[0] = core_ticks_to_us(adap, 3675f061de42SHariprasad Shenai TIMERVALUE0_G(timer_value_0_and_1)); 367652367a76SVipul Pandya s->timer_val[1] = core_ticks_to_us(adap, 3677f061de42SHariprasad Shenai TIMERVALUE1_G(timer_value_0_and_1)); 367852367a76SVipul Pandya s->timer_val[2] = core_ticks_to_us(adap, 3679f061de42SHariprasad Shenai TIMERVALUE2_G(timer_value_2_and_3)); 368052367a76SVipul Pandya s->timer_val[3] = core_ticks_to_us(adap, 3681f061de42SHariprasad Shenai TIMERVALUE3_G(timer_value_2_and_3)); 368252367a76SVipul Pandya s->timer_val[4] = core_ticks_to_us(adap, 3683f061de42SHariprasad Shenai TIMERVALUE4_G(timer_value_4_and_5)); 368452367a76SVipul Pandya s->timer_val[5] = core_ticks_to_us(adap, 3685f061de42SHariprasad Shenai TIMERVALUE5_G(timer_value_4_and_5)); 368652367a76SVipul Pandya 3687f612b815SHariprasad Shenai ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); 3688f612b815SHariprasad Shenai s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); 3689f612b815SHariprasad Shenai s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); 3690f612b815SHariprasad Shenai s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); 3691f612b815SHariprasad Shenai s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); 369252367a76SVipul Pandya 369352367a76SVipul Pandya return 0; 369452367a76SVipul Pandya } 369552367a76SVipul Pandya 369606640310SHariprasad Shenai /** 369706640310SHariprasad Shenai * t4_sge_init - initialize SGE 369806640310SHariprasad Shenai * @adap: the adapter 369906640310SHariprasad Shenai * 370006640310SHariprasad Shenai * Perform low-level SGE code initialization needed every time after a 370106640310SHariprasad Shenai * chip reset. 370252367a76SVipul Pandya */ 370352367a76SVipul Pandya int t4_sge_init(struct adapter *adap) 370452367a76SVipul Pandya { 370552367a76SVipul Pandya struct sge *s = &adap->sge; 3706acac5962SHariprasad Shenai u32 sge_control, sge_conm_ctrl; 3707c2b955e0SKumar Sanghvi int ret, egress_threshold; 370852367a76SVipul Pandya 370952367a76SVipul Pandya /* 371052367a76SVipul Pandya * Ingress Padding Boundary and Egress Status Page Size are set up by 371152367a76SVipul Pandya * t4_fixup_host_params(). 371252367a76SVipul Pandya */ 3713f612b815SHariprasad Shenai sge_control = t4_read_reg(adap, SGE_CONTROL_A); 3714f612b815SHariprasad Shenai s->pktshift = PKTSHIFT_G(sge_control); 3715f612b815SHariprasad Shenai s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; 3716ce8f407aSHariprasad Shenai 3717acac5962SHariprasad Shenai s->fl_align = t4_fl_pkt_align(adap); 371852367a76SVipul Pandya ret = t4_sge_init_soft(adap); 371952367a76SVipul Pandya if (ret < 0) 372052367a76SVipul Pandya return ret; 372152367a76SVipul Pandya 372252367a76SVipul Pandya /* 372352367a76SVipul Pandya * A FL with <= fl_starve_thres buffers is starving and a periodic 372452367a76SVipul Pandya * timer will attempt to refill it. This needs to be larger than the 372552367a76SVipul Pandya * SGE's Egress Congestion Threshold. If it isn't, then we can get 372652367a76SVipul Pandya * stuck waiting for new packets while the SGE is waiting for us to 372752367a76SVipul Pandya * give it more Free List entries. (Note that the SGE's Egress 3728c2b955e0SKumar Sanghvi * Congestion Threshold is in units of 2 Free List pointers.) For T4, 3729c2b955e0SKumar Sanghvi * there was only a single field to control this. For T5 there's the 3730c2b955e0SKumar Sanghvi * original field which now only applies to Unpacked Mode Free List 3731c2b955e0SKumar Sanghvi * buffers and a new field which only applies to Packed Mode Free List 3732c2b955e0SKumar Sanghvi * buffers. 373352367a76SVipul Pandya */ 3734f612b815SHariprasad Shenai sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); 3735676d6a75SHariprasad Shenai switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 3736676d6a75SHariprasad Shenai case CHELSIO_T4: 3737f612b815SHariprasad Shenai egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); 3738676d6a75SHariprasad Shenai break; 3739676d6a75SHariprasad Shenai case CHELSIO_T5: 3740f612b815SHariprasad Shenai egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 3741676d6a75SHariprasad Shenai break; 3742676d6a75SHariprasad Shenai case CHELSIO_T6: 3743676d6a75SHariprasad Shenai egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 3744676d6a75SHariprasad Shenai break; 3745676d6a75SHariprasad Shenai default: 3746676d6a75SHariprasad Shenai dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", 3747676d6a75SHariprasad Shenai CHELSIO_CHIP_VERSION(adap->params.chip)); 3748676d6a75SHariprasad Shenai return -EINVAL; 3749676d6a75SHariprasad Shenai } 3750c2b955e0SKumar Sanghvi s->fl_starve_thres = 2*egress_threshold + 1; 375152367a76SVipul Pandya 3752a3bfb617SHariprasad Shenai t4_idma_monitor_init(adap, &s->idma_monitor); 3753a3bfb617SHariprasad Shenai 37541ecc7b7aSHariprasad Shenai /* Set up timers used for recuring callbacks to process RX and TX 37551ecc7b7aSHariprasad Shenai * administrative tasks. 37561ecc7b7aSHariprasad Shenai */ 37570e23daebSKees Cook timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); 37580e23daebSKees Cook timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); 3759a3bfb617SHariprasad Shenai 3760f7917c00SJeff Kirsher spin_lock_init(&s->intrq_lock); 376152367a76SVipul Pandya 376252367a76SVipul Pandya return 0; 3763f7917c00SJeff Kirsher } 3764