10f3154e6SShannon Nelson // SPDX-License-Identifier: GPL-2.0 20f3154e6SShannon Nelson /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 30f3154e6SShannon Nelson 40f3154e6SShannon Nelson #include <linux/ip.h> 50f3154e6SShannon Nelson #include <linux/ipv6.h> 60f3154e6SShannon Nelson #include <linux/if_vlan.h> 70f3154e6SShannon Nelson #include <net/ip6_checksum.h> 80f3154e6SShannon Nelson 90f3154e6SShannon Nelson #include "ionic.h" 100f3154e6SShannon Nelson #include "ionic_lif.h" 110f3154e6SShannon Nelson #include "ionic_txrx.h" 120f3154e6SShannon Nelson 135b3f3f2aSShannon Nelson static void ionic_rx_clean(struct ionic_queue *q, 145b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 155b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 165b3f3f2aSShannon Nelson void *cb_arg); 170f3154e6SShannon Nelson 18b14e4e95SShannon Nelson static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 19b14e4e95SShannon Nelson 20b14e4e95SShannon Nelson static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 21b14e4e95SShannon Nelson 220f3154e6SShannon Nelson static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 230f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 240f3154e6SShannon Nelson { 250f3154e6SShannon Nelson DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); 260f3154e6SShannon Nelson 270f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 280f3154e6SShannon Nelson } 290f3154e6SShannon Nelson 300f3154e6SShannon Nelson static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 310f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 320f3154e6SShannon Nelson { 330f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 340f3154e6SShannon Nelson 350f3154e6SShannon Nelson DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); 360f3154e6SShannon Nelson } 370f3154e6SShannon Nelson 380f3154e6SShannon Nelson static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 390f3154e6SShannon Nelson { 400f3154e6SShannon Nelson return netdev_get_tx_queue(q->lif->netdev, q->index); 410f3154e6SShannon Nelson } 420f3154e6SShannon Nelson 4308f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, 4408f2e4b2SShannon Nelson unsigned int len, bool frags) 450f3154e6SShannon Nelson { 4608f2e4b2SShannon Nelson struct ionic_lif *lif = q->lif; 4708f2e4b2SShannon Nelson struct ionic_rx_stats *stats; 4808f2e4b2SShannon Nelson struct net_device *netdev; 4908f2e4b2SShannon Nelson struct sk_buff *skb; 500f3154e6SShannon Nelson 5108f2e4b2SShannon Nelson netdev = lif->netdev; 5208f2e4b2SShannon Nelson stats = q_to_rx_stats(q); 530f3154e6SShannon Nelson 5408f2e4b2SShannon Nelson if (frags) 5508f2e4b2SShannon Nelson skb = napi_get_frags(&q_to_qcq(q)->napi); 5608f2e4b2SShannon Nelson else 5708f2e4b2SShannon Nelson skb = netdev_alloc_skb_ip_align(netdev, len); 5808f2e4b2SShannon Nelson 5908f2e4b2SShannon Nelson if (unlikely(!skb)) { 6008f2e4b2SShannon Nelson net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 6108f2e4b2SShannon Nelson netdev->name, q->name); 6208f2e4b2SShannon Nelson stats->alloc_err++; 6308f2e4b2SShannon Nelson return NULL; 640f3154e6SShannon Nelson } 650f3154e6SShannon Nelson 6608f2e4b2SShannon Nelson return skb; 6708f2e4b2SShannon Nelson } 6808f2e4b2SShannon Nelson 6908f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 7008f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 7108f2e4b2SShannon Nelson struct ionic_cq_info *cq_info) 720f3154e6SShannon Nelson { 730f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 740f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 7508f2e4b2SShannon Nelson struct ionic_page_info *page_info; 7608f2e4b2SShannon Nelson struct sk_buff *skb; 7708f2e4b2SShannon Nelson unsigned int i; 7808f2e4b2SShannon Nelson u16 frag_len; 7908f2e4b2SShannon Nelson u16 len; 800f3154e6SShannon Nelson 8108f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 8208f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 8308f2e4b2SShannon Nelson 8408f2e4b2SShannon Nelson prefetch(page_address(page_info->page) + NET_IP_ALIGN); 8508f2e4b2SShannon Nelson 8608f2e4b2SShannon Nelson skb = ionic_rx_skb_alloc(q, len, true); 8708f2e4b2SShannon Nelson if (unlikely(!skb)) 8808f2e4b2SShannon Nelson return NULL; 8908f2e4b2SShannon Nelson 9008f2e4b2SShannon Nelson i = comp->num_sg_elems + 1; 9108f2e4b2SShannon Nelson do { 9208f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 9308f2e4b2SShannon Nelson struct napi_struct *napi = &q_to_qcq(q)->napi; 9408f2e4b2SShannon Nelson 9508f2e4b2SShannon Nelson napi->skb = NULL; 9608f2e4b2SShannon Nelson dev_kfree_skb(skb); 9708f2e4b2SShannon Nelson return NULL; 980f3154e6SShannon Nelson } 990f3154e6SShannon Nelson 10008f2e4b2SShannon Nelson frag_len = min(len, (u16)PAGE_SIZE); 10108f2e4b2SShannon Nelson len -= frag_len; 10208f2e4b2SShannon Nelson 10308f2e4b2SShannon Nelson dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), 10408f2e4b2SShannon Nelson PAGE_SIZE, DMA_FROM_DEVICE); 10508f2e4b2SShannon Nelson skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 10608f2e4b2SShannon Nelson page_info->page, 0, frag_len, PAGE_SIZE); 10708f2e4b2SShannon Nelson page_info->page = NULL; 10808f2e4b2SShannon Nelson page_info++; 10908f2e4b2SShannon Nelson i--; 11008f2e4b2SShannon Nelson } while (i > 0); 11108f2e4b2SShannon Nelson 11208f2e4b2SShannon Nelson return skb; 1130f3154e6SShannon Nelson } 1140f3154e6SShannon Nelson 11508f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 11608f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 11708f2e4b2SShannon Nelson struct ionic_cq_info *cq_info) 11808f2e4b2SShannon Nelson { 11908f2e4b2SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 12008f2e4b2SShannon Nelson struct device *dev = q->lif->ionic->dev; 12108f2e4b2SShannon Nelson struct ionic_page_info *page_info; 12208f2e4b2SShannon Nelson struct sk_buff *skb; 12308f2e4b2SShannon Nelson u16 len; 1240f3154e6SShannon Nelson 12508f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 12608f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 1270f3154e6SShannon Nelson 12808f2e4b2SShannon Nelson skb = ionic_rx_skb_alloc(q, len, false); 12908f2e4b2SShannon Nelson if (unlikely(!skb)) 13008f2e4b2SShannon Nelson return NULL; 1310f3154e6SShannon Nelson 13208f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 13308f2e4b2SShannon Nelson dev_kfree_skb(skb); 13408f2e4b2SShannon Nelson return NULL; 13508f2e4b2SShannon Nelson } 13608f2e4b2SShannon Nelson 13708f2e4b2SShannon Nelson dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), 13808f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 13908f2e4b2SShannon Nelson skb_copy_to_linear_data(skb, page_address(page_info->page), len); 14008f2e4b2SShannon Nelson dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), 14108f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 14208f2e4b2SShannon Nelson 14308f2e4b2SShannon Nelson skb_put(skb, len); 14408f2e4b2SShannon Nelson skb->protocol = eth_type_trans(skb, q->lif->netdev); 14508f2e4b2SShannon Nelson 14608f2e4b2SShannon Nelson return skb; 1470f3154e6SShannon Nelson } 1480f3154e6SShannon Nelson 1495b3f3f2aSShannon Nelson static void ionic_rx_clean(struct ionic_queue *q, 1505b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 1515b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 1525b3f3f2aSShannon Nelson void *cb_arg) 1530f3154e6SShannon Nelson { 1540f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 1550f3154e6SShannon Nelson struct ionic_qcq *qcq = q_to_qcq(q); 1560f3154e6SShannon Nelson struct ionic_rx_stats *stats; 1570f3154e6SShannon Nelson struct net_device *netdev; 15808f2e4b2SShannon Nelson struct sk_buff *skb; 1590f3154e6SShannon Nelson 1600f3154e6SShannon Nelson stats = q_to_rx_stats(q); 1610f3154e6SShannon Nelson netdev = q->lif->netdev; 1620f3154e6SShannon Nelson 16324cfa8c7SShannon Nelson if (comp->status) { 16424cfa8c7SShannon Nelson stats->dropped++; 1650f3154e6SShannon Nelson return; 16624cfa8c7SShannon Nelson } 1670f3154e6SShannon Nelson 1680f3154e6SShannon Nelson stats->pkts++; 1690f3154e6SShannon Nelson stats->bytes += le16_to_cpu(comp->len); 1700f3154e6SShannon Nelson 17108f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 17208f2e4b2SShannon Nelson skb = ionic_rx_copybreak(q, desc_info, cq_info); 17308f2e4b2SShannon Nelson else 17408f2e4b2SShannon Nelson skb = ionic_rx_frags(q, desc_info, cq_info); 1750f3154e6SShannon Nelson 17624cfa8c7SShannon Nelson if (unlikely(!skb)) { 17724cfa8c7SShannon Nelson stats->dropped++; 17808f2e4b2SShannon Nelson return; 17924cfa8c7SShannon Nelson } 1800f3154e6SShannon Nelson 1810f3154e6SShannon Nelson skb_record_rx_queue(skb, q->index); 1820f3154e6SShannon Nelson 18308f2e4b2SShannon Nelson if (likely(netdev->features & NETIF_F_RXHASH)) { 1840f3154e6SShannon Nelson switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 1850f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4: 1860f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6: 1870f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 1880f3154e6SShannon Nelson PKT_HASH_TYPE_L3); 1890f3154e6SShannon Nelson break; 1900f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_TCP: 1910f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_TCP: 1920f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_UDP: 1930f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_UDP: 1940f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 1950f3154e6SShannon Nelson PKT_HASH_TYPE_L4); 1960f3154e6SShannon Nelson break; 1970f3154e6SShannon Nelson } 1980f3154e6SShannon Nelson } 1990f3154e6SShannon Nelson 20008f2e4b2SShannon Nelson if (likely(netdev->features & NETIF_F_RXCSUM)) { 2010f3154e6SShannon Nelson if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 2020f3154e6SShannon Nelson skb->ip_summed = CHECKSUM_COMPLETE; 2030f3154e6SShannon Nelson skb->csum = (__wsum)le16_to_cpu(comp->csum); 2040f3154e6SShannon Nelson stats->csum_complete++; 2050f3154e6SShannon Nelson } 2060f3154e6SShannon Nelson } else { 2070f3154e6SShannon Nelson stats->csum_none++; 2080f3154e6SShannon Nelson } 2090f3154e6SShannon Nelson 21008f2e4b2SShannon Nelson if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 2110f3154e6SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 21208f2e4b2SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 2130f3154e6SShannon Nelson stats->csum_error++; 2140f3154e6SShannon Nelson 215f64e0c56SShannon Nelson if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 216f64e0c56SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 2170f3154e6SShannon Nelson __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2180f3154e6SShannon Nelson le16_to_cpu(comp->vlan_tci)); 219f64e0c56SShannon Nelson stats->vlan_stripped++; 2200f3154e6SShannon Nelson } 2210f3154e6SShannon Nelson 22208f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 2230f3154e6SShannon Nelson napi_gro_receive(&qcq->napi, skb); 22408f2e4b2SShannon Nelson else 22508f2e4b2SShannon Nelson napi_gro_frags(&qcq->napi); 2260f3154e6SShannon Nelson } 2270f3154e6SShannon Nelson 2280f3154e6SShannon Nelson static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 2290f3154e6SShannon Nelson { 2300f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 2310f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 2320f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 2330f3154e6SShannon Nelson 2340f3154e6SShannon Nelson if (!color_match(comp->pkt_type_color, cq->done_color)) 2350f3154e6SShannon Nelson return false; 2360f3154e6SShannon Nelson 2370f3154e6SShannon Nelson /* check for empty queue */ 2380f3154e6SShannon Nelson if (q->tail->index == q->head->index) 2390f3154e6SShannon Nelson return false; 2400f3154e6SShannon Nelson 2410f3154e6SShannon Nelson desc_info = q->tail; 2420f3154e6SShannon Nelson if (desc_info->index != le16_to_cpu(comp->comp_index)) 2430f3154e6SShannon Nelson return false; 2440f3154e6SShannon Nelson 2450f3154e6SShannon Nelson q->tail = desc_info->next; 2460f3154e6SShannon Nelson 2470f3154e6SShannon Nelson /* clean the related q entry, only one per qc completion */ 2480f3154e6SShannon Nelson ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 2490f3154e6SShannon Nelson 2500f3154e6SShannon Nelson desc_info->cb = NULL; 2510f3154e6SShannon Nelson desc_info->cb_arg = NULL; 2520f3154e6SShannon Nelson 2530f3154e6SShannon Nelson return true; 2540f3154e6SShannon Nelson } 2550f3154e6SShannon Nelson 2560f3154e6SShannon Nelson void ionic_rx_flush(struct ionic_cq *cq) 2570f3154e6SShannon Nelson { 2580f3154e6SShannon Nelson struct ionic_dev *idev = &cq->lif->ionic->idev; 2590f3154e6SShannon Nelson u32 work_done; 2600f3154e6SShannon Nelson 261b14e4e95SShannon Nelson work_done = ionic_cq_service(cq, cq->num_descs, 262b14e4e95SShannon Nelson ionic_rx_service, NULL, NULL); 2630f3154e6SShannon Nelson 2640f3154e6SShannon Nelson if (work_done) 2650f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 2660f3154e6SShannon Nelson work_done, IONIC_INTR_CRED_RESET_COALESCE); 2670f3154e6SShannon Nelson } 2680f3154e6SShannon Nelson 26908f2e4b2SShannon Nelson static struct page *ionic_rx_page_alloc(struct ionic_queue *q, 2700f3154e6SShannon Nelson dma_addr_t *dma_addr) 2710f3154e6SShannon Nelson { 2720f3154e6SShannon Nelson struct ionic_lif *lif = q->lif; 2730f3154e6SShannon Nelson struct ionic_rx_stats *stats; 2740f3154e6SShannon Nelson struct net_device *netdev; 2750f3154e6SShannon Nelson struct device *dev; 27608f2e4b2SShannon Nelson struct page *page; 2770f3154e6SShannon Nelson 2780f3154e6SShannon Nelson netdev = lif->netdev; 2790f3154e6SShannon Nelson dev = lif->ionic->dev; 2800f3154e6SShannon Nelson stats = q_to_rx_stats(q); 28108f2e4b2SShannon Nelson page = alloc_page(GFP_ATOMIC); 28208f2e4b2SShannon Nelson if (unlikely(!page)) { 28308f2e4b2SShannon Nelson net_err_ratelimited("%s: Page alloc failed on %s!\n", 2840f3154e6SShannon Nelson netdev->name, q->name); 2850f3154e6SShannon Nelson stats->alloc_err++; 2860f3154e6SShannon Nelson return NULL; 2870f3154e6SShannon Nelson } 2880f3154e6SShannon Nelson 28908f2e4b2SShannon Nelson *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 29008f2e4b2SShannon Nelson if (unlikely(dma_mapping_error(dev, *dma_addr))) { 29108f2e4b2SShannon Nelson __free_page(page); 29208f2e4b2SShannon Nelson net_err_ratelimited("%s: DMA single map failed on %s!\n", 2930f3154e6SShannon Nelson netdev->name, q->name); 2940f3154e6SShannon Nelson stats->dma_map_err++; 2950f3154e6SShannon Nelson return NULL; 2960f3154e6SShannon Nelson } 2970f3154e6SShannon Nelson 29808f2e4b2SShannon Nelson return page; 2990f3154e6SShannon Nelson } 3000f3154e6SShannon Nelson 30108f2e4b2SShannon Nelson static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, 30208f2e4b2SShannon Nelson dma_addr_t dma_addr) 30308f2e4b2SShannon Nelson { 30408f2e4b2SShannon Nelson struct ionic_lif *lif = q->lif; 30508f2e4b2SShannon Nelson struct net_device *netdev; 30608f2e4b2SShannon Nelson struct device *dev; 30708f2e4b2SShannon Nelson 30808f2e4b2SShannon Nelson netdev = lif->netdev; 30908f2e4b2SShannon Nelson dev = lif->ionic->dev; 31008f2e4b2SShannon Nelson 31108f2e4b2SShannon Nelson if (unlikely(!page)) { 31208f2e4b2SShannon Nelson net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n", 31308f2e4b2SShannon Nelson netdev->name, q->name); 31408f2e4b2SShannon Nelson return; 31508f2e4b2SShannon Nelson } 31608f2e4b2SShannon Nelson 31708f2e4b2SShannon Nelson dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 31808f2e4b2SShannon Nelson 31908f2e4b2SShannon Nelson __free_page(page); 32008f2e4b2SShannon Nelson } 32108f2e4b2SShannon Nelson 3220f3154e6SShannon Nelson void ionic_rx_fill(struct ionic_queue *q) 3230f3154e6SShannon Nelson { 3240f3154e6SShannon Nelson struct net_device *netdev = q->lif->netdev; 32508f2e4b2SShannon Nelson struct ionic_desc_info *desc_info; 32608f2e4b2SShannon Nelson struct ionic_page_info *page_info; 32708f2e4b2SShannon Nelson struct ionic_rxq_sg_desc *sg_desc; 32808f2e4b2SShannon Nelson struct ionic_rxq_sg_elem *sg_elem; 3290f3154e6SShannon Nelson struct ionic_rxq_desc *desc; 330c37d6e3fSShannon Nelson unsigned int remain_len; 331c37d6e3fSShannon Nelson unsigned int seg_len; 33208f2e4b2SShannon Nelson unsigned int nfrags; 33308f2e4b2SShannon Nelson unsigned int i, j; 3340f3154e6SShannon Nelson unsigned int len; 3350f3154e6SShannon Nelson 3360f3154e6SShannon Nelson len = netdev->mtu + ETH_HLEN; 33708f2e4b2SShannon Nelson nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; 3380f3154e6SShannon Nelson 3390f3154e6SShannon Nelson for (i = ionic_q_space_avail(q); i; i--) { 340c37d6e3fSShannon Nelson remain_len = len; 34108f2e4b2SShannon Nelson desc_info = q->head; 34208f2e4b2SShannon Nelson desc = desc_info->desc; 34308f2e4b2SShannon Nelson sg_desc = desc_info->sg_desc; 34408f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 3450f3154e6SShannon Nelson 34608f2e4b2SShannon Nelson if (page_info->page) { /* recycle the buffer */ 347155f15adSShannon Nelson ionic_rxq_post(q, false, ionic_rx_clean, NULL); 34808f2e4b2SShannon Nelson continue; 34908f2e4b2SShannon Nelson } 35008f2e4b2SShannon Nelson 35108f2e4b2SShannon Nelson /* fill main descriptor - pages[0] */ 35208f2e4b2SShannon Nelson desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 35308f2e4b2SShannon Nelson IONIC_RXQ_DESC_OPCODE_SIMPLE; 35408f2e4b2SShannon Nelson desc_info->npages = nfrags; 35508f2e4b2SShannon Nelson page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); 35608f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 35708f2e4b2SShannon Nelson desc->addr = 0; 35808f2e4b2SShannon Nelson desc->len = 0; 35908f2e4b2SShannon Nelson return; 36008f2e4b2SShannon Nelson } 36108f2e4b2SShannon Nelson desc->addr = cpu_to_le64(page_info->dma_addr); 362c37d6e3fSShannon Nelson seg_len = min_t(unsigned int, PAGE_SIZE, len); 363c37d6e3fSShannon Nelson desc->len = cpu_to_le16(seg_len); 364c37d6e3fSShannon Nelson remain_len -= seg_len; 36508f2e4b2SShannon Nelson page_info++; 36608f2e4b2SShannon Nelson 36708f2e4b2SShannon Nelson /* fill sg descriptors - pages[1..n] */ 36808f2e4b2SShannon Nelson for (j = 0; j < nfrags - 1; j++) { 36908f2e4b2SShannon Nelson if (page_info->page) /* recycle the sg buffer */ 37008f2e4b2SShannon Nelson continue; 37108f2e4b2SShannon Nelson 37208f2e4b2SShannon Nelson sg_elem = &sg_desc->elems[j]; 37308f2e4b2SShannon Nelson page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); 37408f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 37508f2e4b2SShannon Nelson sg_elem->addr = 0; 37608f2e4b2SShannon Nelson sg_elem->len = 0; 37708f2e4b2SShannon Nelson return; 37808f2e4b2SShannon Nelson } 37908f2e4b2SShannon Nelson sg_elem->addr = cpu_to_le64(page_info->dma_addr); 380c37d6e3fSShannon Nelson seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); 381c37d6e3fSShannon Nelson sg_elem->len = cpu_to_le16(seg_len); 382c37d6e3fSShannon Nelson remain_len -= seg_len; 38308f2e4b2SShannon Nelson page_info++; 38408f2e4b2SShannon Nelson } 3850f3154e6SShannon Nelson 386155f15adSShannon Nelson ionic_rxq_post(q, false, ionic_rx_clean, NULL); 3870f3154e6SShannon Nelson } 388155f15adSShannon Nelson 389155f15adSShannon Nelson ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 390155f15adSShannon Nelson q->dbval | q->head->index); 3910f3154e6SShannon Nelson } 3920f3154e6SShannon Nelson 3930f3154e6SShannon Nelson static void ionic_rx_fill_cb(void *arg) 3940f3154e6SShannon Nelson { 3950f3154e6SShannon Nelson ionic_rx_fill(arg); 3960f3154e6SShannon Nelson } 3970f3154e6SShannon Nelson 3980f3154e6SShannon Nelson void ionic_rx_empty(struct ionic_queue *q) 3990f3154e6SShannon Nelson { 4000f3154e6SShannon Nelson struct ionic_desc_info *cur; 4010f3154e6SShannon Nelson struct ionic_rxq_desc *desc; 40208f2e4b2SShannon Nelson unsigned int i; 4030f3154e6SShannon Nelson 4040f3154e6SShannon Nelson for (cur = q->tail; cur != q->head; cur = cur->next) { 4050f3154e6SShannon Nelson desc = cur->desc; 40608f2e4b2SShannon Nelson desc->addr = 0; 40708f2e4b2SShannon Nelson desc->len = 0; 40808f2e4b2SShannon Nelson 40908f2e4b2SShannon Nelson for (i = 0; i < cur->npages; i++) { 41008f2e4b2SShannon Nelson if (likely(cur->pages[i].page)) { 41108f2e4b2SShannon Nelson ionic_rx_page_free(q, cur->pages[i].page, 41208f2e4b2SShannon Nelson cur->pages[i].dma_addr); 41308f2e4b2SShannon Nelson cur->pages[i].page = NULL; 41408f2e4b2SShannon Nelson cur->pages[i].dma_addr = 0; 41508f2e4b2SShannon Nelson } 41608f2e4b2SShannon Nelson } 41708f2e4b2SShannon Nelson 4180f3154e6SShannon Nelson cur->cb_arg = NULL; 4190f3154e6SShannon Nelson } 4200f3154e6SShannon Nelson } 4210f3154e6SShannon Nelson 4220f3154e6SShannon Nelson int ionic_rx_napi(struct napi_struct *napi, int budget) 4230f3154e6SShannon Nelson { 4240f3154e6SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 4250f3154e6SShannon Nelson struct ionic_cq *rxcq = napi_to_cq(napi); 4260f3154e6SShannon Nelson unsigned int qi = rxcq->bound_q->index; 4270f3154e6SShannon Nelson struct ionic_dev *idev; 4280f3154e6SShannon Nelson struct ionic_lif *lif; 4290f3154e6SShannon Nelson struct ionic_cq *txcq; 430b14e4e95SShannon Nelson u32 rx_work_done = 0; 431b14e4e95SShannon Nelson u32 tx_work_done = 0; 4320f3154e6SShannon Nelson u32 work_done = 0; 4330f3154e6SShannon Nelson u32 flags = 0; 434b14e4e95SShannon Nelson bool unmask; 4350f3154e6SShannon Nelson 4360f3154e6SShannon Nelson lif = rxcq->bound_q->lif; 4370f3154e6SShannon Nelson idev = &lif->ionic->idev; 4380f3154e6SShannon Nelson txcq = &lif->txqcqs[qi].qcq->cq; 4390f3154e6SShannon Nelson 440b14e4e95SShannon Nelson tx_work_done = ionic_cq_service(txcq, lif->tx_budget, 441b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 4420f3154e6SShannon Nelson 443b14e4e95SShannon Nelson rx_work_done = ionic_cq_service(rxcq, budget, 444b14e4e95SShannon Nelson ionic_rx_service, NULL, NULL); 445b14e4e95SShannon Nelson if (rx_work_done) 4460f3154e6SShannon Nelson ionic_rx_fill_cb(rxcq->bound_q); 4470f3154e6SShannon Nelson 448b14e4e95SShannon Nelson unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); 449b14e4e95SShannon Nelson 450b14e4e95SShannon Nelson if (unmask && napi_complete_done(napi, rx_work_done)) { 4510f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 4520f3154e6SShannon Nelson DEBUG_STATS_INTR_REARM(rxcq->bound_intr); 453b14e4e95SShannon Nelson work_done = rx_work_done; 454b14e4e95SShannon Nelson } else { 455b14e4e95SShannon Nelson work_done = budget; 4560f3154e6SShannon Nelson } 4570f3154e6SShannon Nelson 4580f3154e6SShannon Nelson if (work_done || flags) { 4590f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 4600f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 461b14e4e95SShannon Nelson tx_work_done + rx_work_done, flags); 4620f3154e6SShannon Nelson } 4630f3154e6SShannon Nelson 464b14e4e95SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); 465b14e4e95SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); 4660f3154e6SShannon Nelson 4670f3154e6SShannon Nelson return work_done; 4680f3154e6SShannon Nelson } 4690f3154e6SShannon Nelson 4705b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 4715b3f3f2aSShannon Nelson void *data, size_t len) 4720f3154e6SShannon Nelson { 4730f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 4740f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 4750f3154e6SShannon Nelson dma_addr_t dma_addr; 4760f3154e6SShannon Nelson 4770f3154e6SShannon Nelson dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 4780f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 4790f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA single map failed on %s!\n", 4800f3154e6SShannon Nelson q->lif->netdev->name, q->name); 4810f3154e6SShannon Nelson stats->dma_map_err++; 4820f3154e6SShannon Nelson return 0; 4830f3154e6SShannon Nelson } 4840f3154e6SShannon Nelson return dma_addr; 4850f3154e6SShannon Nelson } 4860f3154e6SShannon Nelson 4875b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 4885b3f3f2aSShannon Nelson const skb_frag_t *frag, 4890f3154e6SShannon Nelson size_t offset, size_t len) 4900f3154e6SShannon Nelson { 4910f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 4920f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 4930f3154e6SShannon Nelson dma_addr_t dma_addr; 4940f3154e6SShannon Nelson 4950f3154e6SShannon Nelson dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 4960f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 4970f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 4980f3154e6SShannon Nelson q->lif->netdev->name, q->name); 4990f3154e6SShannon Nelson stats->dma_map_err++; 5000f3154e6SShannon Nelson } 5010f3154e6SShannon Nelson return dma_addr; 5020f3154e6SShannon Nelson } 5030f3154e6SShannon Nelson 5045b3f3f2aSShannon Nelson static void ionic_tx_clean(struct ionic_queue *q, 5055b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 5065b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 5075b3f3f2aSShannon Nelson void *cb_arg) 5080f3154e6SShannon Nelson { 5090f3154e6SShannon Nelson struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; 5100f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem = sg_desc->elems; 5110f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 5120f3154e6SShannon Nelson struct ionic_txq_desc *desc = desc_info->desc; 5130f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 5140f3154e6SShannon Nelson u8 opcode, flags, nsge; 5150f3154e6SShannon Nelson u16 queue_index; 5160f3154e6SShannon Nelson unsigned int i; 5170f3154e6SShannon Nelson u64 addr; 5180f3154e6SShannon Nelson 5190f3154e6SShannon Nelson decode_txq_desc_cmd(le64_to_cpu(desc->cmd), 5200f3154e6SShannon Nelson &opcode, &flags, &nsge, &addr); 5210f3154e6SShannon Nelson 5220f3154e6SShannon Nelson /* use unmap_single only if either this is not TSO, 5230f3154e6SShannon Nelson * or this is first descriptor of a TSO 5240f3154e6SShannon Nelson */ 5250f3154e6SShannon Nelson if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || 5260f3154e6SShannon Nelson flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) 5270f3154e6SShannon Nelson dma_unmap_single(dev, (dma_addr_t)addr, 5280f3154e6SShannon Nelson le16_to_cpu(desc->len), DMA_TO_DEVICE); 5290f3154e6SShannon Nelson else 5300f3154e6SShannon Nelson dma_unmap_page(dev, (dma_addr_t)addr, 5310f3154e6SShannon Nelson le16_to_cpu(desc->len), DMA_TO_DEVICE); 5320f3154e6SShannon Nelson 5330f3154e6SShannon Nelson for (i = 0; i < nsge; i++, elem++) 5340f3154e6SShannon Nelson dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), 5350f3154e6SShannon Nelson le16_to_cpu(elem->len), DMA_TO_DEVICE); 5360f3154e6SShannon Nelson 5370f3154e6SShannon Nelson if (cb_arg) { 5380f3154e6SShannon Nelson struct sk_buff *skb = cb_arg; 5390f3154e6SShannon Nelson u32 len = skb->len; 5400f3154e6SShannon Nelson 5410f3154e6SShannon Nelson queue_index = skb_get_queue_mapping(skb); 5420f3154e6SShannon Nelson if (unlikely(__netif_subqueue_stopped(q->lif->netdev, 5430f3154e6SShannon Nelson queue_index))) { 5440f3154e6SShannon Nelson netif_wake_subqueue(q->lif->netdev, queue_index); 5450f3154e6SShannon Nelson q->wake++; 5460f3154e6SShannon Nelson } 5470f3154e6SShannon Nelson dev_kfree_skb_any(skb); 5480f3154e6SShannon Nelson stats->clean++; 5490f3154e6SShannon Nelson netdev_tx_completed_queue(q_to_ndq(q), 1, len); 5500f3154e6SShannon Nelson } 5510f3154e6SShannon Nelson } 5520f3154e6SShannon Nelson 553b14e4e95SShannon Nelson static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 5540f3154e6SShannon Nelson { 555b14e4e95SShannon Nelson struct ionic_txq_comp *comp = cq_info->cq_desc; 5560f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 5570f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 5580f3154e6SShannon Nelson 559b14e4e95SShannon Nelson if (!color_match(comp->color, cq->done_color)) 560b14e4e95SShannon Nelson return false; 5610f3154e6SShannon Nelson 5620f3154e6SShannon Nelson /* clean the related q entries, there could be 5630f3154e6SShannon Nelson * several q entries completed for each cq completion 5640f3154e6SShannon Nelson */ 5650f3154e6SShannon Nelson do { 5660f3154e6SShannon Nelson desc_info = q->tail; 5670f3154e6SShannon Nelson q->tail = desc_info->next; 568b14e4e95SShannon Nelson ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg); 5690f3154e6SShannon Nelson desc_info->cb = NULL; 5700f3154e6SShannon Nelson desc_info->cb_arg = NULL; 5710f3154e6SShannon Nelson } while (desc_info->index != le16_to_cpu(comp->comp_index)); 5720f3154e6SShannon Nelson 573b14e4e95SShannon Nelson return true; 5740f3154e6SShannon Nelson } 5750f3154e6SShannon Nelson 576b14e4e95SShannon Nelson void ionic_tx_flush(struct ionic_cq *cq) 577b14e4e95SShannon Nelson { 578b14e4e95SShannon Nelson struct ionic_dev *idev = &cq->lif->ionic->idev; 579b14e4e95SShannon Nelson u32 work_done; 580b14e4e95SShannon Nelson 581b14e4e95SShannon Nelson work_done = ionic_cq_service(cq, cq->num_descs, 582b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 5830f3154e6SShannon Nelson if (work_done) 5840f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 585b14e4e95SShannon Nelson work_done, IONIC_INTR_CRED_RESET_COALESCE); 5860f3154e6SShannon Nelson } 5870f3154e6SShannon Nelson 588f9c00e2cSShannon Nelson void ionic_tx_empty(struct ionic_queue *q) 589f9c00e2cSShannon Nelson { 590f9c00e2cSShannon Nelson struct ionic_desc_info *desc_info; 591f9c00e2cSShannon Nelson int done = 0; 592f9c00e2cSShannon Nelson 593f9c00e2cSShannon Nelson /* walk the not completed tx entries, if any */ 594f9c00e2cSShannon Nelson while (q->head != q->tail) { 595f9c00e2cSShannon Nelson desc_info = q->tail; 596f9c00e2cSShannon Nelson q->tail = desc_info->next; 597f9c00e2cSShannon Nelson ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 598f9c00e2cSShannon Nelson desc_info->cb = NULL; 599f9c00e2cSShannon Nelson desc_info->cb_arg = NULL; 600f9c00e2cSShannon Nelson done++; 601f9c00e2cSShannon Nelson } 602f9c00e2cSShannon Nelson } 603f9c00e2cSShannon Nelson 6040f3154e6SShannon Nelson static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 6050f3154e6SShannon Nelson { 6060f3154e6SShannon Nelson int err; 6070f3154e6SShannon Nelson 6080f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 6090f3154e6SShannon Nelson if (err) 6100f3154e6SShannon Nelson return err; 6110f3154e6SShannon Nelson 6120f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 6130f3154e6SShannon Nelson inner_ip_hdr(skb)->check = 0; 6140f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 6150f3154e6SShannon Nelson ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 6160f3154e6SShannon Nelson inner_ip_hdr(skb)->daddr, 6170f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 6180f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 6190f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 6200f3154e6SShannon Nelson ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 6210f3154e6SShannon Nelson &inner_ipv6_hdr(skb)->daddr, 6220f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 6230f3154e6SShannon Nelson } 6240f3154e6SShannon Nelson 6250f3154e6SShannon Nelson return 0; 6260f3154e6SShannon Nelson } 6270f3154e6SShannon Nelson 6280f3154e6SShannon Nelson static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 6290f3154e6SShannon Nelson { 6300f3154e6SShannon Nelson int err; 6310f3154e6SShannon Nelson 6320f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 6330f3154e6SShannon Nelson if (err) 6340f3154e6SShannon Nelson return err; 6350f3154e6SShannon Nelson 6360f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 6370f3154e6SShannon Nelson ip_hdr(skb)->check = 0; 6380f3154e6SShannon Nelson tcp_hdr(skb)->check = 6390f3154e6SShannon Nelson ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 6400f3154e6SShannon Nelson ip_hdr(skb)->daddr, 6410f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 6420f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 643fa6b8429SHeiner Kallweit tcp_v6_gso_csum_prep(skb); 6440f3154e6SShannon Nelson } 6450f3154e6SShannon Nelson 6460f3154e6SShannon Nelson return 0; 6470f3154e6SShannon Nelson } 6480f3154e6SShannon Nelson 6490f3154e6SShannon Nelson static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 6500f3154e6SShannon Nelson struct sk_buff *skb, 6510f3154e6SShannon Nelson dma_addr_t addr, u8 nsge, u16 len, 6520f3154e6SShannon Nelson unsigned int hdrlen, unsigned int mss, 6530f3154e6SShannon Nelson bool outer_csum, 6540f3154e6SShannon Nelson u16 vlan_tci, bool has_vlan, 6550f3154e6SShannon Nelson bool start, bool done) 6560f3154e6SShannon Nelson { 6570f3154e6SShannon Nelson u8 flags = 0; 6580f3154e6SShannon Nelson u64 cmd; 6590f3154e6SShannon Nelson 6600f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 6610f3154e6SShannon Nelson flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 6620f3154e6SShannon Nelson flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 6630f3154e6SShannon Nelson flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 6640f3154e6SShannon Nelson 6650f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 6660f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 6670f3154e6SShannon Nelson desc->len = cpu_to_le16(len); 6680f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(vlan_tci); 6690f3154e6SShannon Nelson desc->hdr_len = cpu_to_le16(hdrlen); 6700f3154e6SShannon Nelson desc->mss = cpu_to_le16(mss); 6710f3154e6SShannon Nelson 6720f3154e6SShannon Nelson if (done) { 6730f3154e6SShannon Nelson skb_tx_timestamp(skb); 6740f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 6750f3154e6SShannon Nelson ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 6760f3154e6SShannon Nelson } else { 6770f3154e6SShannon Nelson ionic_txq_post(q, false, ionic_tx_clean, NULL); 6780f3154e6SShannon Nelson } 6790f3154e6SShannon Nelson } 6800f3154e6SShannon Nelson 6810f3154e6SShannon Nelson static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, 6820f3154e6SShannon Nelson struct ionic_txq_sg_elem **elem) 6830f3154e6SShannon Nelson { 6840f3154e6SShannon Nelson struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; 6850f3154e6SShannon Nelson struct ionic_txq_desc *desc = q->head->desc; 6860f3154e6SShannon Nelson 6870f3154e6SShannon Nelson *elem = sg_desc->elems; 6880f3154e6SShannon Nelson return desc; 6890f3154e6SShannon Nelson } 6900f3154e6SShannon Nelson 6910f3154e6SShannon Nelson static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 6920f3154e6SShannon Nelson { 6930f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 6940f3154e6SShannon Nelson struct ionic_desc_info *abort = q->head; 6950f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 6960f3154e6SShannon Nelson struct ionic_desc_info *rewind = abort; 6970f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem; 6980f3154e6SShannon Nelson struct ionic_txq_desc *desc; 6990f3154e6SShannon Nelson unsigned int frag_left = 0; 7000f3154e6SShannon Nelson unsigned int offset = 0; 7010f3154e6SShannon Nelson unsigned int len_left; 7020f3154e6SShannon Nelson dma_addr_t desc_addr; 7030f3154e6SShannon Nelson unsigned int hdrlen; 7040f3154e6SShannon Nelson unsigned int nfrags; 7050f3154e6SShannon Nelson unsigned int seglen; 7060f3154e6SShannon Nelson u64 total_bytes = 0; 7070f3154e6SShannon Nelson u64 total_pkts = 0; 7080f3154e6SShannon Nelson unsigned int left; 7090f3154e6SShannon Nelson unsigned int len; 7100f3154e6SShannon Nelson unsigned int mss; 7110f3154e6SShannon Nelson skb_frag_t *frag; 7120f3154e6SShannon Nelson bool start, done; 7130f3154e6SShannon Nelson bool outer_csum; 7140f3154e6SShannon Nelson bool has_vlan; 7150f3154e6SShannon Nelson u16 desc_len; 7160f3154e6SShannon Nelson u8 desc_nsge; 7170f3154e6SShannon Nelson u16 vlan_tci; 7180f3154e6SShannon Nelson bool encap; 7190f3154e6SShannon Nelson int err; 7200f3154e6SShannon Nelson 7210f3154e6SShannon Nelson mss = skb_shinfo(skb)->gso_size; 7220f3154e6SShannon Nelson nfrags = skb_shinfo(skb)->nr_frags; 7230f3154e6SShannon Nelson len_left = skb->len - skb_headlen(skb); 7240f3154e6SShannon Nelson outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || 7250f3154e6SShannon Nelson (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 7260f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 7270f3154e6SShannon Nelson vlan_tci = skb_vlan_tag_get(skb); 7280f3154e6SShannon Nelson encap = skb->encapsulation; 7290f3154e6SShannon Nelson 7300f3154e6SShannon Nelson /* Preload inner-most TCP csum field with IP pseudo hdr 7310f3154e6SShannon Nelson * calculated with IP length set to zero. HW will later 7320f3154e6SShannon Nelson * add in length to each TCP segment resulting from the TSO. 7330f3154e6SShannon Nelson */ 7340f3154e6SShannon Nelson 7350f3154e6SShannon Nelson if (encap) 7360f3154e6SShannon Nelson err = ionic_tx_tcp_inner_pseudo_csum(skb); 7370f3154e6SShannon Nelson else 7380f3154e6SShannon Nelson err = ionic_tx_tcp_pseudo_csum(skb); 7390f3154e6SShannon Nelson if (err) 7400f3154e6SShannon Nelson return err; 7410f3154e6SShannon Nelson 7420f3154e6SShannon Nelson if (encap) 7430f3154e6SShannon Nelson hdrlen = skb_inner_transport_header(skb) - skb->data + 7440f3154e6SShannon Nelson inner_tcp_hdrlen(skb); 7450f3154e6SShannon Nelson else 7460f3154e6SShannon Nelson hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 7470f3154e6SShannon Nelson 7480f3154e6SShannon Nelson seglen = hdrlen + mss; 7490f3154e6SShannon Nelson left = skb_headlen(skb); 7500f3154e6SShannon Nelson 7510f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 7520f3154e6SShannon Nelson start = true; 7530f3154e6SShannon Nelson 7540f3154e6SShannon Nelson /* Chop skb->data up into desc segments */ 7550f3154e6SShannon Nelson 7560f3154e6SShannon Nelson while (left > 0) { 7570f3154e6SShannon Nelson len = min(seglen, left); 7580f3154e6SShannon Nelson frag_left = seglen - len; 7590f3154e6SShannon Nelson desc_addr = ionic_tx_map_single(q, skb->data + offset, len); 7600f3154e6SShannon Nelson if (dma_mapping_error(dev, desc_addr)) 7610f3154e6SShannon Nelson goto err_out_abort; 7620f3154e6SShannon Nelson desc_len = len; 7630f3154e6SShannon Nelson desc_nsge = 0; 7640f3154e6SShannon Nelson left -= len; 7650f3154e6SShannon Nelson offset += len; 7660f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 7670f3154e6SShannon Nelson continue; 7680f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 7690f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, 7700f3154e6SShannon Nelson desc_addr, desc_nsge, desc_len, 7710f3154e6SShannon Nelson hdrlen, mss, 7720f3154e6SShannon Nelson outer_csum, 7730f3154e6SShannon Nelson vlan_tci, has_vlan, 7740f3154e6SShannon Nelson start, done); 7750f3154e6SShannon Nelson total_pkts++; 7760f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 7770f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 7780f3154e6SShannon Nelson start = false; 7790f3154e6SShannon Nelson seglen = mss; 7800f3154e6SShannon Nelson } 7810f3154e6SShannon Nelson 7820f3154e6SShannon Nelson /* Chop skb frags into desc segments */ 7830f3154e6SShannon Nelson 7840f3154e6SShannon Nelson for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 7850f3154e6SShannon Nelson offset = 0; 7860f3154e6SShannon Nelson left = skb_frag_size(frag); 7870f3154e6SShannon Nelson len_left -= left; 7880f3154e6SShannon Nelson nfrags--; 7890f3154e6SShannon Nelson stats->frags++; 7900f3154e6SShannon Nelson 7910f3154e6SShannon Nelson while (left > 0) { 7920f3154e6SShannon Nelson if (frag_left > 0) { 7930f3154e6SShannon Nelson len = min(frag_left, left); 7940f3154e6SShannon Nelson frag_left -= len; 7950f3154e6SShannon Nelson elem->addr = 7960f3154e6SShannon Nelson cpu_to_le64(ionic_tx_map_frag(q, frag, 7970f3154e6SShannon Nelson offset, len)); 7980f3154e6SShannon Nelson if (dma_mapping_error(dev, elem->addr)) 7990f3154e6SShannon Nelson goto err_out_abort; 8000f3154e6SShannon Nelson elem->len = cpu_to_le16(len); 8010f3154e6SShannon Nelson elem++; 8020f3154e6SShannon Nelson desc_nsge++; 8030f3154e6SShannon Nelson left -= len; 8040f3154e6SShannon Nelson offset += len; 8050f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 8060f3154e6SShannon Nelson continue; 8070f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 8080f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, desc_addr, 8090f3154e6SShannon Nelson desc_nsge, desc_len, 8100f3154e6SShannon Nelson hdrlen, mss, outer_csum, 8110f3154e6SShannon Nelson vlan_tci, has_vlan, 8120f3154e6SShannon Nelson start, done); 8130f3154e6SShannon Nelson total_pkts++; 8140f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 8150f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 8160f3154e6SShannon Nelson start = false; 8170f3154e6SShannon Nelson } else { 8180f3154e6SShannon Nelson len = min(mss, left); 8190f3154e6SShannon Nelson frag_left = mss - len; 8200f3154e6SShannon Nelson desc_addr = ionic_tx_map_frag(q, frag, 8210f3154e6SShannon Nelson offset, len); 8220f3154e6SShannon Nelson if (dma_mapping_error(dev, desc_addr)) 8230f3154e6SShannon Nelson goto err_out_abort; 8240f3154e6SShannon Nelson desc_len = len; 8250f3154e6SShannon Nelson desc_nsge = 0; 8260f3154e6SShannon Nelson left -= len; 8270f3154e6SShannon Nelson offset += len; 8280f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 8290f3154e6SShannon Nelson continue; 8300f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 8310f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, desc_addr, 8320f3154e6SShannon Nelson desc_nsge, desc_len, 8330f3154e6SShannon Nelson hdrlen, mss, outer_csum, 8340f3154e6SShannon Nelson vlan_tci, has_vlan, 8350f3154e6SShannon Nelson start, done); 8360f3154e6SShannon Nelson total_pkts++; 8370f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 8380f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 8390f3154e6SShannon Nelson start = false; 8400f3154e6SShannon Nelson } 8410f3154e6SShannon Nelson } 8420f3154e6SShannon Nelson } 8430f3154e6SShannon Nelson 8440f3154e6SShannon Nelson stats->pkts += total_pkts; 8450f3154e6SShannon Nelson stats->bytes += total_bytes; 8460f3154e6SShannon Nelson stats->tso++; 847f64e0c56SShannon Nelson stats->tso_bytes += total_bytes; 8480f3154e6SShannon Nelson 8490f3154e6SShannon Nelson return 0; 8500f3154e6SShannon Nelson 8510f3154e6SShannon Nelson err_out_abort: 8520f3154e6SShannon Nelson while (rewind->desc != q->head->desc) { 8530f3154e6SShannon Nelson ionic_tx_clean(q, rewind, NULL, NULL); 8540f3154e6SShannon Nelson rewind = rewind->next; 8550f3154e6SShannon Nelson } 8560f3154e6SShannon Nelson q->head = abort; 8570f3154e6SShannon Nelson 8580f3154e6SShannon Nelson return -ENOMEM; 8590f3154e6SShannon Nelson } 8600f3154e6SShannon Nelson 8610f3154e6SShannon Nelson static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) 8620f3154e6SShannon Nelson { 8630f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 8640f3154e6SShannon Nelson struct ionic_txq_desc *desc = q->head->desc; 8650f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 8660f3154e6SShannon Nelson dma_addr_t dma_addr; 8670f3154e6SShannon Nelson bool has_vlan; 8680f3154e6SShannon Nelson u8 flags = 0; 8690f3154e6SShannon Nelson bool encap; 8700f3154e6SShannon Nelson u64 cmd; 8710f3154e6SShannon Nelson 8720f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 8730f3154e6SShannon Nelson encap = skb->encapsulation; 8740f3154e6SShannon Nelson 8750f3154e6SShannon Nelson dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 8760f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 8770f3154e6SShannon Nelson return -ENOMEM; 8780f3154e6SShannon Nelson 8790f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 8800f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 8810f3154e6SShannon Nelson 8820f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 8830f3154e6SShannon Nelson flags, skb_shinfo(skb)->nr_frags, dma_addr); 8840f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 8850f3154e6SShannon Nelson desc->len = cpu_to_le16(skb_headlen(skb)); 8860f3154e6SShannon Nelson desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 8870f3154e6SShannon Nelson desc->csum_offset = cpu_to_le16(skb->csum_offset); 888f64e0c56SShannon Nelson if (has_vlan) { 889f64e0c56SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 890f64e0c56SShannon Nelson stats->vlan_inserted++; 891f64e0c56SShannon Nelson } 8920f3154e6SShannon Nelson 8930f3154e6SShannon Nelson if (skb->csum_not_inet) 8940f3154e6SShannon Nelson stats->crc32_csum++; 8950f3154e6SShannon Nelson else 8960f3154e6SShannon Nelson stats->csum++; 8970f3154e6SShannon Nelson 8980f3154e6SShannon Nelson return 0; 8990f3154e6SShannon Nelson } 9000f3154e6SShannon Nelson 9010f3154e6SShannon Nelson static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) 9020f3154e6SShannon Nelson { 9030f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9040f3154e6SShannon Nelson struct ionic_txq_desc *desc = q->head->desc; 9050f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 9060f3154e6SShannon Nelson dma_addr_t dma_addr; 9070f3154e6SShannon Nelson bool has_vlan; 9080f3154e6SShannon Nelson u8 flags = 0; 9090f3154e6SShannon Nelson bool encap; 9100f3154e6SShannon Nelson u64 cmd; 9110f3154e6SShannon Nelson 9120f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 9130f3154e6SShannon Nelson encap = skb->encapsulation; 9140f3154e6SShannon Nelson 9150f3154e6SShannon Nelson dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 9160f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 9170f3154e6SShannon Nelson return -ENOMEM; 9180f3154e6SShannon Nelson 9190f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 9200f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 9210f3154e6SShannon Nelson 9220f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 9230f3154e6SShannon Nelson flags, skb_shinfo(skb)->nr_frags, dma_addr); 9240f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 9250f3154e6SShannon Nelson desc->len = cpu_to_le16(skb_headlen(skb)); 926f64e0c56SShannon Nelson if (has_vlan) { 9270f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 928f64e0c56SShannon Nelson stats->vlan_inserted++; 929f64e0c56SShannon Nelson } 9300f3154e6SShannon Nelson 931f64e0c56SShannon Nelson stats->csum_none++; 9320f3154e6SShannon Nelson 9330f3154e6SShannon Nelson return 0; 9340f3154e6SShannon Nelson } 9350f3154e6SShannon Nelson 9360f3154e6SShannon Nelson static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) 9370f3154e6SShannon Nelson { 9380f3154e6SShannon Nelson struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; 9390f3154e6SShannon Nelson unsigned int len_left = skb->len - skb_headlen(skb); 9400f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem = sg_desc->elems; 9410f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9420f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 9430f3154e6SShannon Nelson dma_addr_t dma_addr; 9440f3154e6SShannon Nelson skb_frag_t *frag; 9450f3154e6SShannon Nelson u16 len; 9460f3154e6SShannon Nelson 9470f3154e6SShannon Nelson for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { 9480f3154e6SShannon Nelson len = skb_frag_size(frag); 9490f3154e6SShannon Nelson elem->len = cpu_to_le16(len); 9500f3154e6SShannon Nelson dma_addr = ionic_tx_map_frag(q, frag, 0, len); 9510f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 9520f3154e6SShannon Nelson return -ENOMEM; 9530f3154e6SShannon Nelson elem->addr = cpu_to_le64(dma_addr); 9540f3154e6SShannon Nelson len_left -= len; 9550f3154e6SShannon Nelson stats->frags++; 9560f3154e6SShannon Nelson } 9570f3154e6SShannon Nelson 9580f3154e6SShannon Nelson return 0; 9590f3154e6SShannon Nelson } 9600f3154e6SShannon Nelson 9610f3154e6SShannon Nelson static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 9620f3154e6SShannon Nelson { 9630f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9640f3154e6SShannon Nelson int err; 9650f3154e6SShannon Nelson 9660f3154e6SShannon Nelson /* set up the initial descriptor */ 9670f3154e6SShannon Nelson if (skb->ip_summed == CHECKSUM_PARTIAL) 9680f3154e6SShannon Nelson err = ionic_tx_calc_csum(q, skb); 9690f3154e6SShannon Nelson else 9700f3154e6SShannon Nelson err = ionic_tx_calc_no_csum(q, skb); 9710f3154e6SShannon Nelson if (err) 9720f3154e6SShannon Nelson return err; 9730f3154e6SShannon Nelson 9740f3154e6SShannon Nelson /* add frags */ 9750f3154e6SShannon Nelson err = ionic_tx_skb_frags(q, skb); 9760f3154e6SShannon Nelson if (err) 9770f3154e6SShannon Nelson return err; 9780f3154e6SShannon Nelson 9790f3154e6SShannon Nelson skb_tx_timestamp(skb); 9800f3154e6SShannon Nelson stats->pkts++; 9810f3154e6SShannon Nelson stats->bytes += skb->len; 9820f3154e6SShannon Nelson 9830f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 9840f3154e6SShannon Nelson ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 9850f3154e6SShannon Nelson 9860f3154e6SShannon Nelson return 0; 9870f3154e6SShannon Nelson } 9880f3154e6SShannon Nelson 9890f3154e6SShannon Nelson static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 9900f3154e6SShannon Nelson { 9915b3f3f2aSShannon Nelson int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; 9920f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9930f3154e6SShannon Nelson int err; 9940f3154e6SShannon Nelson 9950f3154e6SShannon Nelson /* If TSO, need roundup(skb->len/mss) descs */ 9960f3154e6SShannon Nelson if (skb_is_gso(skb)) 9970f3154e6SShannon Nelson return (skb->len / skb_shinfo(skb)->gso_size) + 1; 9980f3154e6SShannon Nelson 9990f3154e6SShannon Nelson /* If non-TSO, just need 1 desc and nr_frags sg elems */ 10005b3f3f2aSShannon Nelson if (skb_shinfo(skb)->nr_frags <= sg_elems) 10010f3154e6SShannon Nelson return 1; 10020f3154e6SShannon Nelson 10030f3154e6SShannon Nelson /* Too many frags, so linearize */ 10040f3154e6SShannon Nelson err = skb_linearize(skb); 10050f3154e6SShannon Nelson if (err) 10060f3154e6SShannon Nelson return err; 10070f3154e6SShannon Nelson 10080f3154e6SShannon Nelson stats->linearize++; 10090f3154e6SShannon Nelson 10100f3154e6SShannon Nelson /* Need 1 desc and zero sg elems */ 10110f3154e6SShannon Nelson return 1; 10120f3154e6SShannon Nelson } 10130f3154e6SShannon Nelson 10140f3154e6SShannon Nelson static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 10150f3154e6SShannon Nelson { 10160f3154e6SShannon Nelson int stopped = 0; 10170f3154e6SShannon Nelson 10180f3154e6SShannon Nelson if (unlikely(!ionic_q_has_space(q, ndescs))) { 10190f3154e6SShannon Nelson netif_stop_subqueue(q->lif->netdev, q->index); 10200f3154e6SShannon Nelson q->stop++; 10210f3154e6SShannon Nelson stopped = 1; 10220f3154e6SShannon Nelson 10230f3154e6SShannon Nelson /* Might race with ionic_tx_clean, check again */ 10240f3154e6SShannon Nelson smp_rmb(); 10250f3154e6SShannon Nelson if (ionic_q_has_space(q, ndescs)) { 10260f3154e6SShannon Nelson netif_wake_subqueue(q->lif->netdev, q->index); 10270f3154e6SShannon Nelson stopped = 0; 10280f3154e6SShannon Nelson } 10290f3154e6SShannon Nelson } 10300f3154e6SShannon Nelson 10310f3154e6SShannon Nelson return stopped; 10320f3154e6SShannon Nelson } 10330f3154e6SShannon Nelson 10340f3154e6SShannon Nelson netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 10350f3154e6SShannon Nelson { 10360f3154e6SShannon Nelson u16 queue_index = skb_get_queue_mapping(skb); 10370f3154e6SShannon Nelson struct ionic_lif *lif = netdev_priv(netdev); 10380f3154e6SShannon Nelson struct ionic_queue *q; 10390f3154e6SShannon Nelson int ndescs; 10400f3154e6SShannon Nelson int err; 10410f3154e6SShannon Nelson 1042c6d3d73aSShannon Nelson if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 10430f3154e6SShannon Nelson dev_kfree_skb(skb); 10440f3154e6SShannon Nelson return NETDEV_TX_OK; 10450f3154e6SShannon Nelson } 10460f3154e6SShannon Nelson 10470f3154e6SShannon Nelson if (unlikely(!lif_to_txqcq(lif, queue_index))) 10480f3154e6SShannon Nelson queue_index = 0; 10490f3154e6SShannon Nelson q = lif_to_txq(lif, queue_index); 10500f3154e6SShannon Nelson 10510f3154e6SShannon Nelson ndescs = ionic_tx_descs_needed(q, skb); 10520f3154e6SShannon Nelson if (ndescs < 0) 10530f3154e6SShannon Nelson goto err_out_drop; 10540f3154e6SShannon Nelson 10550f3154e6SShannon Nelson if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 10560f3154e6SShannon Nelson return NETDEV_TX_BUSY; 10570f3154e6SShannon Nelson 10580f3154e6SShannon Nelson if (skb_is_gso(skb)) 10590f3154e6SShannon Nelson err = ionic_tx_tso(q, skb); 10600f3154e6SShannon Nelson else 10610f3154e6SShannon Nelson err = ionic_tx(q, skb); 10620f3154e6SShannon Nelson 10630f3154e6SShannon Nelson if (err) 10640f3154e6SShannon Nelson goto err_out_drop; 10650f3154e6SShannon Nelson 10660f3154e6SShannon Nelson /* Stop the queue if there aren't descriptors for the next packet. 10670f3154e6SShannon Nelson * Since our SG lists per descriptor take care of most of the possible 10680f3154e6SShannon Nelson * fragmentation, we don't need to have many descriptors available. 10690f3154e6SShannon Nelson */ 10700f3154e6SShannon Nelson ionic_maybe_stop_tx(q, 4); 10710f3154e6SShannon Nelson 10720f3154e6SShannon Nelson return NETDEV_TX_OK; 10730f3154e6SShannon Nelson 10740f3154e6SShannon Nelson err_out_drop: 10750f3154e6SShannon Nelson q->stop++; 10760f3154e6SShannon Nelson q->drop++; 10770f3154e6SShannon Nelson dev_kfree_skb(skb); 10780f3154e6SShannon Nelson return NETDEV_TX_OK; 10790f3154e6SShannon Nelson } 1080