10f3154e6SShannon Nelson // SPDX-License-Identifier: GPL-2.0 20f3154e6SShannon Nelson /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 30f3154e6SShannon Nelson 40f3154e6SShannon Nelson #include <linux/ip.h> 50f3154e6SShannon Nelson #include <linux/ipv6.h> 60f3154e6SShannon Nelson #include <linux/if_vlan.h> 70f3154e6SShannon Nelson #include <net/ip6_checksum.h> 80f3154e6SShannon Nelson 90f3154e6SShannon Nelson #include "ionic.h" 100f3154e6SShannon Nelson #include "ionic_lif.h" 110f3154e6SShannon Nelson #include "ionic_txrx.h" 120f3154e6SShannon Nelson 135b3f3f2aSShannon Nelson static void ionic_rx_clean(struct ionic_queue *q, 145b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 155b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 165b3f3f2aSShannon Nelson void *cb_arg); 170f3154e6SShannon Nelson 18b14e4e95SShannon Nelson static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 19b14e4e95SShannon Nelson 20b14e4e95SShannon Nelson static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 21b14e4e95SShannon Nelson 220f3154e6SShannon Nelson static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 230f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 240f3154e6SShannon Nelson { 25f1d2e894SShannon Nelson DEBUG_STATS_TXQ_POST(q, ring_dbell); 260f3154e6SShannon Nelson 270f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 280f3154e6SShannon Nelson } 290f3154e6SShannon Nelson 300f3154e6SShannon Nelson static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 310f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 320f3154e6SShannon Nelson { 330f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 340f3154e6SShannon Nelson 3534dec947SShannon Nelson DEBUG_STATS_RX_BUFF_CNT(q); 360f3154e6SShannon Nelson } 370f3154e6SShannon Nelson 380f3154e6SShannon Nelson static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 390f3154e6SShannon Nelson { 400f3154e6SShannon Nelson return netdev_get_tx_queue(q->lif->netdev, q->index); 410f3154e6SShannon Nelson } 420f3154e6SShannon Nelson 4308f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, 4408f2e4b2SShannon Nelson unsigned int len, bool frags) 450f3154e6SShannon Nelson { 4608f2e4b2SShannon Nelson struct ionic_lif *lif = q->lif; 4708f2e4b2SShannon Nelson struct ionic_rx_stats *stats; 4808f2e4b2SShannon Nelson struct net_device *netdev; 4908f2e4b2SShannon Nelson struct sk_buff *skb; 500f3154e6SShannon Nelson 5108f2e4b2SShannon Nelson netdev = lif->netdev; 5234dec947SShannon Nelson stats = &q->lif->rxqstats[q->index]; 530f3154e6SShannon Nelson 5408f2e4b2SShannon Nelson if (frags) 5508f2e4b2SShannon Nelson skb = napi_get_frags(&q_to_qcq(q)->napi); 5608f2e4b2SShannon Nelson else 5708f2e4b2SShannon Nelson skb = netdev_alloc_skb_ip_align(netdev, len); 5808f2e4b2SShannon Nelson 5908f2e4b2SShannon Nelson if (unlikely(!skb)) { 6008f2e4b2SShannon Nelson net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 6108f2e4b2SShannon Nelson netdev->name, q->name); 6208f2e4b2SShannon Nelson stats->alloc_err++; 6308f2e4b2SShannon Nelson return NULL; 640f3154e6SShannon Nelson } 650f3154e6SShannon Nelson 6608f2e4b2SShannon Nelson return skb; 6708f2e4b2SShannon Nelson } 6808f2e4b2SShannon Nelson 69*2b5720f2SShannon Nelson static int ionic_rx_page_alloc(struct ionic_queue *q, 70*2b5720f2SShannon Nelson struct ionic_page_info *page_info) 71*2b5720f2SShannon Nelson { 72*2b5720f2SShannon Nelson struct ionic_lif *lif = q->lif; 73*2b5720f2SShannon Nelson struct ionic_rx_stats *stats; 74*2b5720f2SShannon Nelson struct net_device *netdev; 75*2b5720f2SShannon Nelson struct device *dev; 76*2b5720f2SShannon Nelson 77*2b5720f2SShannon Nelson netdev = lif->netdev; 78*2b5720f2SShannon Nelson dev = lif->ionic->dev; 79*2b5720f2SShannon Nelson stats = q_to_rx_stats(q); 80*2b5720f2SShannon Nelson 81*2b5720f2SShannon Nelson if (unlikely(!page_info)) { 82*2b5720f2SShannon Nelson net_err_ratelimited("%s: %s invalid page_info in alloc\n", 83*2b5720f2SShannon Nelson netdev->name, q->name); 84*2b5720f2SShannon Nelson return -EINVAL; 85*2b5720f2SShannon Nelson } 86*2b5720f2SShannon Nelson 87*2b5720f2SShannon Nelson page_info->page = dev_alloc_page(); 88*2b5720f2SShannon Nelson if (unlikely(!page_info->page)) { 89*2b5720f2SShannon Nelson net_err_ratelimited("%s: %s page alloc failed\n", 90*2b5720f2SShannon Nelson netdev->name, q->name); 91*2b5720f2SShannon Nelson stats->alloc_err++; 92*2b5720f2SShannon Nelson return -ENOMEM; 93*2b5720f2SShannon Nelson } 94*2b5720f2SShannon Nelson 95*2b5720f2SShannon Nelson page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, 96*2b5720f2SShannon Nelson DMA_FROM_DEVICE); 97*2b5720f2SShannon Nelson if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { 98*2b5720f2SShannon Nelson put_page(page_info->page); 99*2b5720f2SShannon Nelson page_info->dma_addr = 0; 100*2b5720f2SShannon Nelson page_info->page = NULL; 101*2b5720f2SShannon Nelson net_err_ratelimited("%s: %s dma map failed\n", 102*2b5720f2SShannon Nelson netdev->name, q->name); 103*2b5720f2SShannon Nelson stats->dma_map_err++; 104*2b5720f2SShannon Nelson return -EIO; 105*2b5720f2SShannon Nelson } 106*2b5720f2SShannon Nelson 107*2b5720f2SShannon Nelson return 0; 108*2b5720f2SShannon Nelson } 109*2b5720f2SShannon Nelson 110*2b5720f2SShannon Nelson static void ionic_rx_page_free(struct ionic_queue *q, 111*2b5720f2SShannon Nelson struct ionic_page_info *page_info) 112*2b5720f2SShannon Nelson { 113*2b5720f2SShannon Nelson struct ionic_lif *lif = q->lif; 114*2b5720f2SShannon Nelson struct net_device *netdev; 115*2b5720f2SShannon Nelson struct device *dev; 116*2b5720f2SShannon Nelson 117*2b5720f2SShannon Nelson netdev = lif->netdev; 118*2b5720f2SShannon Nelson dev = lif->ionic->dev; 119*2b5720f2SShannon Nelson 120*2b5720f2SShannon Nelson if (unlikely(!page_info)) { 121*2b5720f2SShannon Nelson net_err_ratelimited("%s: %s invalid page_info in free\n", 122*2b5720f2SShannon Nelson netdev->name, q->name); 123*2b5720f2SShannon Nelson return; 124*2b5720f2SShannon Nelson } 125*2b5720f2SShannon Nelson 126*2b5720f2SShannon Nelson if (unlikely(!page_info->page)) { 127*2b5720f2SShannon Nelson net_err_ratelimited("%s: %s invalid page in free\n", 128*2b5720f2SShannon Nelson netdev->name, q->name); 129*2b5720f2SShannon Nelson return; 130*2b5720f2SShannon Nelson } 131*2b5720f2SShannon Nelson 132*2b5720f2SShannon Nelson dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 133*2b5720f2SShannon Nelson 134*2b5720f2SShannon Nelson put_page(page_info->page); 135*2b5720f2SShannon Nelson page_info->dma_addr = 0; 136*2b5720f2SShannon Nelson page_info->page = NULL; 137*2b5720f2SShannon Nelson } 138*2b5720f2SShannon Nelson 13908f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 14008f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 14108f2e4b2SShannon Nelson struct ionic_cq_info *cq_info) 1420f3154e6SShannon Nelson { 1430f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 1440f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 14508f2e4b2SShannon Nelson struct ionic_page_info *page_info; 14608f2e4b2SShannon Nelson struct sk_buff *skb; 14708f2e4b2SShannon Nelson unsigned int i; 14808f2e4b2SShannon Nelson u16 frag_len; 14908f2e4b2SShannon Nelson u16 len; 1500f3154e6SShannon Nelson 15108f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 15208f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 15308f2e4b2SShannon Nelson 15408f2e4b2SShannon Nelson prefetch(page_address(page_info->page) + NET_IP_ALIGN); 15508f2e4b2SShannon Nelson 15608f2e4b2SShannon Nelson skb = ionic_rx_skb_alloc(q, len, true); 15708f2e4b2SShannon Nelson if (unlikely(!skb)) 15808f2e4b2SShannon Nelson return NULL; 15908f2e4b2SShannon Nelson 16008f2e4b2SShannon Nelson i = comp->num_sg_elems + 1; 16108f2e4b2SShannon Nelson do { 16208f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 16308f2e4b2SShannon Nelson struct napi_struct *napi = &q_to_qcq(q)->napi; 16408f2e4b2SShannon Nelson 16508f2e4b2SShannon Nelson napi->skb = NULL; 16608f2e4b2SShannon Nelson dev_kfree_skb(skb); 16708f2e4b2SShannon Nelson return NULL; 1680f3154e6SShannon Nelson } 1690f3154e6SShannon Nelson 17008f2e4b2SShannon Nelson frag_len = min(len, (u16)PAGE_SIZE); 17108f2e4b2SShannon Nelson len -= frag_len; 17208f2e4b2SShannon Nelson 17308f2e4b2SShannon Nelson dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), 17408f2e4b2SShannon Nelson PAGE_SIZE, DMA_FROM_DEVICE); 17508f2e4b2SShannon Nelson skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 17608f2e4b2SShannon Nelson page_info->page, 0, frag_len, PAGE_SIZE); 17708f2e4b2SShannon Nelson page_info->page = NULL; 17808f2e4b2SShannon Nelson page_info++; 17908f2e4b2SShannon Nelson i--; 18008f2e4b2SShannon Nelson } while (i > 0); 18108f2e4b2SShannon Nelson 18208f2e4b2SShannon Nelson return skb; 1830f3154e6SShannon Nelson } 1840f3154e6SShannon Nelson 18508f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 18608f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 18708f2e4b2SShannon Nelson struct ionic_cq_info *cq_info) 18808f2e4b2SShannon Nelson { 18908f2e4b2SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 19008f2e4b2SShannon Nelson struct device *dev = q->lif->ionic->dev; 19108f2e4b2SShannon Nelson struct ionic_page_info *page_info; 19208f2e4b2SShannon Nelson struct sk_buff *skb; 19308f2e4b2SShannon Nelson u16 len; 1940f3154e6SShannon Nelson 19508f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 19608f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 1970f3154e6SShannon Nelson 19808f2e4b2SShannon Nelson skb = ionic_rx_skb_alloc(q, len, false); 19908f2e4b2SShannon Nelson if (unlikely(!skb)) 20008f2e4b2SShannon Nelson return NULL; 2010f3154e6SShannon Nelson 20208f2e4b2SShannon Nelson if (unlikely(!page_info->page)) { 20308f2e4b2SShannon Nelson dev_kfree_skb(skb); 20408f2e4b2SShannon Nelson return NULL; 20508f2e4b2SShannon Nelson } 20608f2e4b2SShannon Nelson 20708f2e4b2SShannon Nelson dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), 20808f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 20908f2e4b2SShannon Nelson skb_copy_to_linear_data(skb, page_address(page_info->page), len); 21008f2e4b2SShannon Nelson dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), 21108f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 21208f2e4b2SShannon Nelson 21308f2e4b2SShannon Nelson skb_put(skb, len); 21408f2e4b2SShannon Nelson skb->protocol = eth_type_trans(skb, q->lif->netdev); 21508f2e4b2SShannon Nelson 21608f2e4b2SShannon Nelson return skb; 2170f3154e6SShannon Nelson } 2180f3154e6SShannon Nelson 2195b3f3f2aSShannon Nelson static void ionic_rx_clean(struct ionic_queue *q, 2205b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 2215b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 2225b3f3f2aSShannon Nelson void *cb_arg) 2230f3154e6SShannon Nelson { 2240f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 2250f3154e6SShannon Nelson struct ionic_qcq *qcq = q_to_qcq(q); 2260f3154e6SShannon Nelson struct ionic_rx_stats *stats; 2270f3154e6SShannon Nelson struct net_device *netdev; 22808f2e4b2SShannon Nelson struct sk_buff *skb; 2290f3154e6SShannon Nelson 2300f3154e6SShannon Nelson stats = q_to_rx_stats(q); 2310f3154e6SShannon Nelson netdev = q->lif->netdev; 2320f3154e6SShannon Nelson 23324cfa8c7SShannon Nelson if (comp->status) { 23424cfa8c7SShannon Nelson stats->dropped++; 2350f3154e6SShannon Nelson return; 23624cfa8c7SShannon Nelson } 2370f3154e6SShannon Nelson 2380f3154e6SShannon Nelson stats->pkts++; 2390f3154e6SShannon Nelson stats->bytes += le16_to_cpu(comp->len); 2400f3154e6SShannon Nelson 24108f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 24208f2e4b2SShannon Nelson skb = ionic_rx_copybreak(q, desc_info, cq_info); 24308f2e4b2SShannon Nelson else 24408f2e4b2SShannon Nelson skb = ionic_rx_frags(q, desc_info, cq_info); 2450f3154e6SShannon Nelson 24624cfa8c7SShannon Nelson if (unlikely(!skb)) { 24724cfa8c7SShannon Nelson stats->dropped++; 24808f2e4b2SShannon Nelson return; 24924cfa8c7SShannon Nelson } 2500f3154e6SShannon Nelson 2510f3154e6SShannon Nelson skb_record_rx_queue(skb, q->index); 2520f3154e6SShannon Nelson 25308f2e4b2SShannon Nelson if (likely(netdev->features & NETIF_F_RXHASH)) { 2540f3154e6SShannon Nelson switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 2550f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4: 2560f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6: 2570f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 2580f3154e6SShannon Nelson PKT_HASH_TYPE_L3); 2590f3154e6SShannon Nelson break; 2600f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_TCP: 2610f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_TCP: 2620f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_UDP: 2630f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_UDP: 2640f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 2650f3154e6SShannon Nelson PKT_HASH_TYPE_L4); 2660f3154e6SShannon Nelson break; 2670f3154e6SShannon Nelson } 2680f3154e6SShannon Nelson } 2690f3154e6SShannon Nelson 27008f2e4b2SShannon Nelson if (likely(netdev->features & NETIF_F_RXCSUM)) { 2710f3154e6SShannon Nelson if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 2720f3154e6SShannon Nelson skb->ip_summed = CHECKSUM_COMPLETE; 273d701ec32SShannon Nelson skb->csum = (__force __wsum)le16_to_cpu(comp->csum); 2740f3154e6SShannon Nelson stats->csum_complete++; 2750f3154e6SShannon Nelson } 2760f3154e6SShannon Nelson } else { 2770f3154e6SShannon Nelson stats->csum_none++; 2780f3154e6SShannon Nelson } 2790f3154e6SShannon Nelson 28008f2e4b2SShannon Nelson if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 2810f3154e6SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 28208f2e4b2SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 2830f3154e6SShannon Nelson stats->csum_error++; 2840f3154e6SShannon Nelson 285f64e0c56SShannon Nelson if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 286f64e0c56SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 2870f3154e6SShannon Nelson __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2880f3154e6SShannon Nelson le16_to_cpu(comp->vlan_tci)); 289f64e0c56SShannon Nelson stats->vlan_stripped++; 2900f3154e6SShannon Nelson } 2910f3154e6SShannon Nelson 29208f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 2930f3154e6SShannon Nelson napi_gro_receive(&qcq->napi, skb); 29408f2e4b2SShannon Nelson else 29508f2e4b2SShannon Nelson napi_gro_frags(&qcq->napi); 2960f3154e6SShannon Nelson } 2970f3154e6SShannon Nelson 2980f3154e6SShannon Nelson static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 2990f3154e6SShannon Nelson { 3000f3154e6SShannon Nelson struct ionic_rxq_comp *comp = cq_info->cq_desc; 3010f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 3020f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 3030f3154e6SShannon Nelson 3040f3154e6SShannon Nelson if (!color_match(comp->pkt_type_color, cq->done_color)) 3050f3154e6SShannon Nelson return false; 3060f3154e6SShannon Nelson 3070f3154e6SShannon Nelson /* check for empty queue */ 308f1d2e894SShannon Nelson if (q->tail_idx == q->head_idx) 3090f3154e6SShannon Nelson return false; 3100f3154e6SShannon Nelson 311339dcf7fSShannon Nelson if (q->tail_idx != le16_to_cpu(comp->comp_index)) 3120f3154e6SShannon Nelson return false; 3130f3154e6SShannon Nelson 314339dcf7fSShannon Nelson desc_info = &q->info[q->tail_idx]; 315f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 3160f3154e6SShannon Nelson 3170f3154e6SShannon Nelson /* clean the related q entry, only one per qc completion */ 3180f3154e6SShannon Nelson ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 3190f3154e6SShannon Nelson 3200f3154e6SShannon Nelson desc_info->cb = NULL; 3210f3154e6SShannon Nelson desc_info->cb_arg = NULL; 3220f3154e6SShannon Nelson 3230f3154e6SShannon Nelson return true; 3240f3154e6SShannon Nelson } 3250f3154e6SShannon Nelson 3260f3154e6SShannon Nelson void ionic_rx_fill(struct ionic_queue *q) 3270f3154e6SShannon Nelson { 3280f3154e6SShannon Nelson struct net_device *netdev = q->lif->netdev; 32908f2e4b2SShannon Nelson struct ionic_desc_info *desc_info; 33008f2e4b2SShannon Nelson struct ionic_page_info *page_info; 33108f2e4b2SShannon Nelson struct ionic_rxq_sg_desc *sg_desc; 33208f2e4b2SShannon Nelson struct ionic_rxq_sg_elem *sg_elem; 3330f3154e6SShannon Nelson struct ionic_rxq_desc *desc; 334c37d6e3fSShannon Nelson unsigned int remain_len; 335c37d6e3fSShannon Nelson unsigned int seg_len; 33608f2e4b2SShannon Nelson unsigned int nfrags; 33708f2e4b2SShannon Nelson unsigned int i, j; 3380f3154e6SShannon Nelson unsigned int len; 3390f3154e6SShannon Nelson 34083469893SShannon Nelson len = netdev->mtu + ETH_HLEN + VLAN_HLEN; 34108f2e4b2SShannon Nelson nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; 3420f3154e6SShannon Nelson 3430f3154e6SShannon Nelson for (i = ionic_q_space_avail(q); i; i--) { 344c37d6e3fSShannon Nelson remain_len = len; 345f1d2e894SShannon Nelson desc_info = &q->info[q->head_idx]; 34608f2e4b2SShannon Nelson desc = desc_info->desc; 34708f2e4b2SShannon Nelson sg_desc = desc_info->sg_desc; 34808f2e4b2SShannon Nelson page_info = &desc_info->pages[0]; 3490f3154e6SShannon Nelson 35008f2e4b2SShannon Nelson if (page_info->page) { /* recycle the buffer */ 351155f15adSShannon Nelson ionic_rxq_post(q, false, ionic_rx_clean, NULL); 35208f2e4b2SShannon Nelson continue; 35308f2e4b2SShannon Nelson } 35408f2e4b2SShannon Nelson 35508f2e4b2SShannon Nelson /* fill main descriptor - pages[0] */ 35608f2e4b2SShannon Nelson desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 35708f2e4b2SShannon Nelson IONIC_RXQ_DESC_OPCODE_SIMPLE; 35808f2e4b2SShannon Nelson desc_info->npages = nfrags; 35963cd9083SShannon Nelson if (unlikely(ionic_rx_page_alloc(q, page_info))) { 36008f2e4b2SShannon Nelson desc->addr = 0; 36108f2e4b2SShannon Nelson desc->len = 0; 36208f2e4b2SShannon Nelson return; 36308f2e4b2SShannon Nelson } 36408f2e4b2SShannon Nelson desc->addr = cpu_to_le64(page_info->dma_addr); 365c37d6e3fSShannon Nelson seg_len = min_t(unsigned int, PAGE_SIZE, len); 366c37d6e3fSShannon Nelson desc->len = cpu_to_le16(seg_len); 367c37d6e3fSShannon Nelson remain_len -= seg_len; 36808f2e4b2SShannon Nelson page_info++; 36908f2e4b2SShannon Nelson 37008f2e4b2SShannon Nelson /* fill sg descriptors - pages[1..n] */ 37108f2e4b2SShannon Nelson for (j = 0; j < nfrags - 1; j++) { 37208f2e4b2SShannon Nelson if (page_info->page) /* recycle the sg buffer */ 37308f2e4b2SShannon Nelson continue; 37408f2e4b2SShannon Nelson 37508f2e4b2SShannon Nelson sg_elem = &sg_desc->elems[j]; 37663cd9083SShannon Nelson if (unlikely(ionic_rx_page_alloc(q, page_info))) { 37708f2e4b2SShannon Nelson sg_elem->addr = 0; 37808f2e4b2SShannon Nelson sg_elem->len = 0; 37908f2e4b2SShannon Nelson return; 38008f2e4b2SShannon Nelson } 38108f2e4b2SShannon Nelson sg_elem->addr = cpu_to_le64(page_info->dma_addr); 382c37d6e3fSShannon Nelson seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); 383c37d6e3fSShannon Nelson sg_elem->len = cpu_to_le16(seg_len); 384c37d6e3fSShannon Nelson remain_len -= seg_len; 38508f2e4b2SShannon Nelson page_info++; 38608f2e4b2SShannon Nelson } 3870f3154e6SShannon Nelson 388155f15adSShannon Nelson ionic_rxq_post(q, false, ionic_rx_clean, NULL); 3890f3154e6SShannon Nelson } 390155f15adSShannon Nelson 391155f15adSShannon Nelson ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 392f1d2e894SShannon Nelson q->dbval | q->head_idx); 3930f3154e6SShannon Nelson } 3940f3154e6SShannon Nelson 3950f3154e6SShannon Nelson void ionic_rx_empty(struct ionic_queue *q) 3960f3154e6SShannon Nelson { 397f1d2e894SShannon Nelson struct ionic_desc_info *desc_info; 3980c32a28eSShannon Nelson struct ionic_page_info *page_info; 3990c32a28eSShannon Nelson unsigned int i, j; 4000f3154e6SShannon Nelson 4010c32a28eSShannon Nelson for (i = 0; i < q->num_descs; i++) { 4020c32a28eSShannon Nelson desc_info = &q->info[i]; 4030c32a28eSShannon Nelson for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { 4040c32a28eSShannon Nelson page_info = &desc_info->pages[j]; 4050c32a28eSShannon Nelson if (page_info->page) 4060c32a28eSShannon Nelson ionic_rx_page_free(q, page_info); 4070c32a28eSShannon Nelson } 40808f2e4b2SShannon Nelson 4090c32a28eSShannon Nelson desc_info->npages = 0; 4100c32a28eSShannon Nelson desc_info->cb = NULL; 411f1d2e894SShannon Nelson desc_info->cb_arg = NULL; 4120f3154e6SShannon Nelson } 4130f3154e6SShannon Nelson } 4140f3154e6SShannon Nelson 41504a83459SShannon Nelson static void ionic_dim_update(struct ionic_qcq *qcq) 41604a83459SShannon Nelson { 41704a83459SShannon Nelson struct dim_sample dim_sample; 41804a83459SShannon Nelson struct ionic_lif *lif; 41904a83459SShannon Nelson unsigned int qi; 42004a83459SShannon Nelson 42104a83459SShannon Nelson if (!qcq->intr.dim_coal_hw) 42204a83459SShannon Nelson return; 42304a83459SShannon Nelson 42404a83459SShannon Nelson lif = qcq->q.lif; 42504a83459SShannon Nelson qi = qcq->cq.bound_q->index; 42604a83459SShannon Nelson 42704a83459SShannon Nelson ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 42804a83459SShannon Nelson lif->rxqcqs[qi]->intr.index, 42904a83459SShannon Nelson qcq->intr.dim_coal_hw); 43004a83459SShannon Nelson 43104a83459SShannon Nelson dim_update_sample(qcq->cq.bound_intr->rearm_count, 43204a83459SShannon Nelson lif->txqstats[qi].pkts, 43304a83459SShannon Nelson lif->txqstats[qi].bytes, 43404a83459SShannon Nelson &dim_sample); 43504a83459SShannon Nelson 43604a83459SShannon Nelson net_dim(&qcq->dim, dim_sample); 43704a83459SShannon Nelson } 43804a83459SShannon Nelson 439fe8c30b5SShannon Nelson int ionic_tx_napi(struct napi_struct *napi, int budget) 440fe8c30b5SShannon Nelson { 441fe8c30b5SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 442fe8c30b5SShannon Nelson struct ionic_cq *cq = napi_to_cq(napi); 443fe8c30b5SShannon Nelson struct ionic_dev *idev; 444fe8c30b5SShannon Nelson struct ionic_lif *lif; 445fe8c30b5SShannon Nelson u32 work_done = 0; 446fe8c30b5SShannon Nelson u32 flags = 0; 447fe8c30b5SShannon Nelson 448fe8c30b5SShannon Nelson lif = cq->bound_q->lif; 449fe8c30b5SShannon Nelson idev = &lif->ionic->idev; 450fe8c30b5SShannon Nelson 451fe8c30b5SShannon Nelson work_done = ionic_cq_service(cq, budget, 452fe8c30b5SShannon Nelson ionic_tx_service, NULL, NULL); 453fe8c30b5SShannon Nelson 454fe8c30b5SShannon Nelson if (work_done < budget && napi_complete_done(napi, work_done)) { 45504a83459SShannon Nelson ionic_dim_update(qcq); 456fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 45704a83459SShannon Nelson cq->bound_intr->rearm_count++; 458fe8c30b5SShannon Nelson } 459fe8c30b5SShannon Nelson 460fe8c30b5SShannon Nelson if (work_done || flags) { 461fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 462fe8c30b5SShannon Nelson ionic_intr_credits(idev->intr_ctrl, 463fe8c30b5SShannon Nelson cq->bound_intr->index, 464fe8c30b5SShannon Nelson work_done, flags); 465fe8c30b5SShannon Nelson } 466fe8c30b5SShannon Nelson 467fe8c30b5SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, work_done); 468fe8c30b5SShannon Nelson 469fe8c30b5SShannon Nelson return work_done; 470fe8c30b5SShannon Nelson } 471fe8c30b5SShannon Nelson 4720f3154e6SShannon Nelson int ionic_rx_napi(struct napi_struct *napi, int budget) 4730f3154e6SShannon Nelson { 4740f3154e6SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 475fe8c30b5SShannon Nelson struct ionic_cq *cq = napi_to_cq(napi); 476fe8c30b5SShannon Nelson struct ionic_dev *idev; 477fe8c30b5SShannon Nelson struct ionic_lif *lif; 478a8205ab6SShannon Nelson u16 rx_fill_threshold; 479fe8c30b5SShannon Nelson u32 work_done = 0; 480fe8c30b5SShannon Nelson u32 flags = 0; 481fe8c30b5SShannon Nelson 482fe8c30b5SShannon Nelson lif = cq->bound_q->lif; 483fe8c30b5SShannon Nelson idev = &lif->ionic->idev; 484fe8c30b5SShannon Nelson 485fe8c30b5SShannon Nelson work_done = ionic_cq_service(cq, budget, 486fe8c30b5SShannon Nelson ionic_rx_service, NULL, NULL); 487fe8c30b5SShannon Nelson 488a8205ab6SShannon Nelson rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, 489a8205ab6SShannon Nelson cq->num_descs / IONIC_RX_FILL_DIV); 490a8205ab6SShannon Nelson if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold) 491fe8c30b5SShannon Nelson ionic_rx_fill(cq->bound_q); 492fe8c30b5SShannon Nelson 493fe8c30b5SShannon Nelson if (work_done < budget && napi_complete_done(napi, work_done)) { 49404a83459SShannon Nelson ionic_dim_update(qcq); 495fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 49604a83459SShannon Nelson cq->bound_intr->rearm_count++; 497fe8c30b5SShannon Nelson } 498fe8c30b5SShannon Nelson 499fe8c30b5SShannon Nelson if (work_done || flags) { 500fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 501fe8c30b5SShannon Nelson ionic_intr_credits(idev->intr_ctrl, 502fe8c30b5SShannon Nelson cq->bound_intr->index, 503fe8c30b5SShannon Nelson work_done, flags); 504fe8c30b5SShannon Nelson } 505fe8c30b5SShannon Nelson 506fe8c30b5SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, work_done); 507fe8c30b5SShannon Nelson 508fe8c30b5SShannon Nelson return work_done; 509fe8c30b5SShannon Nelson } 510fe8c30b5SShannon Nelson 511fe8c30b5SShannon Nelson int ionic_txrx_napi(struct napi_struct *napi, int budget) 512fe8c30b5SShannon Nelson { 513fe8c30b5SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 5140f3154e6SShannon Nelson struct ionic_cq *rxcq = napi_to_cq(napi); 5150f3154e6SShannon Nelson unsigned int qi = rxcq->bound_q->index; 5160f3154e6SShannon Nelson struct ionic_dev *idev; 5170f3154e6SShannon Nelson struct ionic_lif *lif; 5180f3154e6SShannon Nelson struct ionic_cq *txcq; 519a8205ab6SShannon Nelson u16 rx_fill_threshold; 520b14e4e95SShannon Nelson u32 rx_work_done = 0; 521b14e4e95SShannon Nelson u32 tx_work_done = 0; 5220f3154e6SShannon Nelson u32 flags = 0; 5230f3154e6SShannon Nelson 5240f3154e6SShannon Nelson lif = rxcq->bound_q->lif; 5250f3154e6SShannon Nelson idev = &lif->ionic->idev; 52634dec947SShannon Nelson txcq = &lif->txqcqs[qi]->cq; 5270f3154e6SShannon Nelson 528b14e4e95SShannon Nelson tx_work_done = ionic_cq_service(txcq, lif->tx_budget, 529b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 5300f3154e6SShannon Nelson 531b14e4e95SShannon Nelson rx_work_done = ionic_cq_service(rxcq, budget, 532b14e4e95SShannon Nelson ionic_rx_service, NULL, NULL); 533a8205ab6SShannon Nelson 534a8205ab6SShannon Nelson rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, 535a8205ab6SShannon Nelson rxcq->num_descs / IONIC_RX_FILL_DIV); 536a8205ab6SShannon Nelson if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold) 537a8205ab6SShannon Nelson ionic_rx_fill(rxcq->bound_q); 5380f3154e6SShannon Nelson 5399dda5110SShannon Nelson if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { 54004a83459SShannon Nelson ionic_dim_update(qcq); 5410f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 54204a83459SShannon Nelson rxcq->bound_intr->rearm_count++; 5430f3154e6SShannon Nelson } 5440f3154e6SShannon Nelson 5459dda5110SShannon Nelson if (rx_work_done || flags) { 5460f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 5470f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 548b14e4e95SShannon Nelson tx_work_done + rx_work_done, flags); 5490f3154e6SShannon Nelson } 5500f3154e6SShannon Nelson 551b14e4e95SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); 552b14e4e95SShannon Nelson DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); 5530f3154e6SShannon Nelson 5549dda5110SShannon Nelson return rx_work_done; 5550f3154e6SShannon Nelson } 5560f3154e6SShannon Nelson 5575b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 5585b3f3f2aSShannon Nelson void *data, size_t len) 5590f3154e6SShannon Nelson { 5600f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 5610f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 5620f3154e6SShannon Nelson dma_addr_t dma_addr; 5630f3154e6SShannon Nelson 5640f3154e6SShannon Nelson dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 5650f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 5660f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA single map failed on %s!\n", 5670f3154e6SShannon Nelson q->lif->netdev->name, q->name); 5680f3154e6SShannon Nelson stats->dma_map_err++; 5690f3154e6SShannon Nelson return 0; 5700f3154e6SShannon Nelson } 5710f3154e6SShannon Nelson return dma_addr; 5720f3154e6SShannon Nelson } 5730f3154e6SShannon Nelson 5745b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 5755b3f3f2aSShannon Nelson const skb_frag_t *frag, 5760f3154e6SShannon Nelson size_t offset, size_t len) 5770f3154e6SShannon Nelson { 5780f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 5790f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 5800f3154e6SShannon Nelson dma_addr_t dma_addr; 5810f3154e6SShannon Nelson 5820f3154e6SShannon Nelson dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 5830f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 5840f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 5850f3154e6SShannon Nelson q->lif->netdev->name, q->name); 5860f3154e6SShannon Nelson stats->dma_map_err++; 5870f3154e6SShannon Nelson } 5880f3154e6SShannon Nelson return dma_addr; 5890f3154e6SShannon Nelson } 5900f3154e6SShannon Nelson 5915b3f3f2aSShannon Nelson static void ionic_tx_clean(struct ionic_queue *q, 5925b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 5935b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 5945b3f3f2aSShannon Nelson void *cb_arg) 5950f3154e6SShannon Nelson { 5960f3154e6SShannon Nelson struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; 5970f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem = sg_desc->elems; 5980f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 5990f3154e6SShannon Nelson struct ionic_txq_desc *desc = desc_info->desc; 6000f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 6010f3154e6SShannon Nelson u8 opcode, flags, nsge; 6020f3154e6SShannon Nelson u16 queue_index; 6030f3154e6SShannon Nelson unsigned int i; 6040f3154e6SShannon Nelson u64 addr; 6050f3154e6SShannon Nelson 6060f3154e6SShannon Nelson decode_txq_desc_cmd(le64_to_cpu(desc->cmd), 6070f3154e6SShannon Nelson &opcode, &flags, &nsge, &addr); 6080f3154e6SShannon Nelson 6090f3154e6SShannon Nelson /* use unmap_single only if either this is not TSO, 6100f3154e6SShannon Nelson * or this is first descriptor of a TSO 6110f3154e6SShannon Nelson */ 6120f3154e6SShannon Nelson if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || 6130f3154e6SShannon Nelson flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) 6140f3154e6SShannon Nelson dma_unmap_single(dev, (dma_addr_t)addr, 6150f3154e6SShannon Nelson le16_to_cpu(desc->len), DMA_TO_DEVICE); 6160f3154e6SShannon Nelson else 6170f3154e6SShannon Nelson dma_unmap_page(dev, (dma_addr_t)addr, 6180f3154e6SShannon Nelson le16_to_cpu(desc->len), DMA_TO_DEVICE); 6190f3154e6SShannon Nelson 6200f3154e6SShannon Nelson for (i = 0; i < nsge; i++, elem++) 6210f3154e6SShannon Nelson dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), 6220f3154e6SShannon Nelson le16_to_cpu(elem->len), DMA_TO_DEVICE); 6230f3154e6SShannon Nelson 6240f3154e6SShannon Nelson if (cb_arg) { 6250f3154e6SShannon Nelson struct sk_buff *skb = cb_arg; 6260f3154e6SShannon Nelson u32 len = skb->len; 6270f3154e6SShannon Nelson 6280f3154e6SShannon Nelson queue_index = skb_get_queue_mapping(skb); 6290f3154e6SShannon Nelson if (unlikely(__netif_subqueue_stopped(q->lif->netdev, 6300f3154e6SShannon Nelson queue_index))) { 6310f3154e6SShannon Nelson netif_wake_subqueue(q->lif->netdev, queue_index); 6320f3154e6SShannon Nelson q->wake++; 6330f3154e6SShannon Nelson } 6340f3154e6SShannon Nelson dev_kfree_skb_any(skb); 6350f3154e6SShannon Nelson stats->clean++; 6360f3154e6SShannon Nelson netdev_tx_completed_queue(q_to_ndq(q), 1, len); 6370f3154e6SShannon Nelson } 6380f3154e6SShannon Nelson } 6390f3154e6SShannon Nelson 640b14e4e95SShannon Nelson static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 6410f3154e6SShannon Nelson { 642b14e4e95SShannon Nelson struct ionic_txq_comp *comp = cq_info->cq_desc; 6430f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 6440f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 645339dcf7fSShannon Nelson u16 index; 6460f3154e6SShannon Nelson 647b14e4e95SShannon Nelson if (!color_match(comp->color, cq->done_color)) 648b14e4e95SShannon Nelson return false; 6490f3154e6SShannon Nelson 6500f3154e6SShannon Nelson /* clean the related q entries, there could be 6510f3154e6SShannon Nelson * several q entries completed for each cq completion 6520f3154e6SShannon Nelson */ 6530f3154e6SShannon Nelson do { 654f1d2e894SShannon Nelson desc_info = &q->info[q->tail_idx]; 655339dcf7fSShannon Nelson index = q->tail_idx; 656f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 657f1d2e894SShannon Nelson ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); 6580f3154e6SShannon Nelson desc_info->cb = NULL; 6590f3154e6SShannon Nelson desc_info->cb_arg = NULL; 660339dcf7fSShannon Nelson } while (index != le16_to_cpu(comp->comp_index)); 6610f3154e6SShannon Nelson 662b14e4e95SShannon Nelson return true; 6630f3154e6SShannon Nelson } 6640f3154e6SShannon Nelson 665b14e4e95SShannon Nelson void ionic_tx_flush(struct ionic_cq *cq) 666b14e4e95SShannon Nelson { 667b14e4e95SShannon Nelson struct ionic_dev *idev = &cq->lif->ionic->idev; 668b14e4e95SShannon Nelson u32 work_done; 669b14e4e95SShannon Nelson 670b14e4e95SShannon Nelson work_done = ionic_cq_service(cq, cq->num_descs, 671b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 6720f3154e6SShannon Nelson if (work_done) 6730f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 674b14e4e95SShannon Nelson work_done, IONIC_INTR_CRED_RESET_COALESCE); 6750f3154e6SShannon Nelson } 6760f3154e6SShannon Nelson 677f9c00e2cSShannon Nelson void ionic_tx_empty(struct ionic_queue *q) 678f9c00e2cSShannon Nelson { 679f9c00e2cSShannon Nelson struct ionic_desc_info *desc_info; 680f9c00e2cSShannon Nelson 681f9c00e2cSShannon Nelson /* walk the not completed tx entries, if any */ 682f1d2e894SShannon Nelson while (q->head_idx != q->tail_idx) { 683f1d2e894SShannon Nelson desc_info = &q->info[q->tail_idx]; 684f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 685f9c00e2cSShannon Nelson ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 686f9c00e2cSShannon Nelson desc_info->cb = NULL; 687f9c00e2cSShannon Nelson desc_info->cb_arg = NULL; 688f9c00e2cSShannon Nelson } 689f9c00e2cSShannon Nelson } 690f9c00e2cSShannon Nelson 6910f3154e6SShannon Nelson static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 6920f3154e6SShannon Nelson { 6930f3154e6SShannon Nelson int err; 6940f3154e6SShannon Nelson 6950f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 6960f3154e6SShannon Nelson if (err) 6970f3154e6SShannon Nelson return err; 6980f3154e6SShannon Nelson 6990f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 7000f3154e6SShannon Nelson inner_ip_hdr(skb)->check = 0; 7010f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 7020f3154e6SShannon Nelson ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 7030f3154e6SShannon Nelson inner_ip_hdr(skb)->daddr, 7040f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 7050f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 7060f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 7070f3154e6SShannon Nelson ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 7080f3154e6SShannon Nelson &inner_ipv6_hdr(skb)->daddr, 7090f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 7100f3154e6SShannon Nelson } 7110f3154e6SShannon Nelson 7120f3154e6SShannon Nelson return 0; 7130f3154e6SShannon Nelson } 7140f3154e6SShannon Nelson 7150f3154e6SShannon Nelson static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 7160f3154e6SShannon Nelson { 7170f3154e6SShannon Nelson int err; 7180f3154e6SShannon Nelson 7190f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 7200f3154e6SShannon Nelson if (err) 7210f3154e6SShannon Nelson return err; 7220f3154e6SShannon Nelson 7230f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 7240f3154e6SShannon Nelson ip_hdr(skb)->check = 0; 7250f3154e6SShannon Nelson tcp_hdr(skb)->check = 7260f3154e6SShannon Nelson ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 7270f3154e6SShannon Nelson ip_hdr(skb)->daddr, 7280f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 7290f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 730fa6b8429SHeiner Kallweit tcp_v6_gso_csum_prep(skb); 7310f3154e6SShannon Nelson } 7320f3154e6SShannon Nelson 7330f3154e6SShannon Nelson return 0; 7340f3154e6SShannon Nelson } 7350f3154e6SShannon Nelson 7360f3154e6SShannon Nelson static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 7370f3154e6SShannon Nelson struct sk_buff *skb, 7380f3154e6SShannon Nelson dma_addr_t addr, u8 nsge, u16 len, 7390f3154e6SShannon Nelson unsigned int hdrlen, unsigned int mss, 7400f3154e6SShannon Nelson bool outer_csum, 7410f3154e6SShannon Nelson u16 vlan_tci, bool has_vlan, 7420f3154e6SShannon Nelson bool start, bool done) 7430f3154e6SShannon Nelson { 7440f3154e6SShannon Nelson u8 flags = 0; 7450f3154e6SShannon Nelson u64 cmd; 7460f3154e6SShannon Nelson 7470f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 7480f3154e6SShannon Nelson flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 7490f3154e6SShannon Nelson flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 7500f3154e6SShannon Nelson flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 7510f3154e6SShannon Nelson 7520f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 7530f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 7540f3154e6SShannon Nelson desc->len = cpu_to_le16(len); 7550f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(vlan_tci); 7560f3154e6SShannon Nelson desc->hdr_len = cpu_to_le16(hdrlen); 7570f3154e6SShannon Nelson desc->mss = cpu_to_le16(mss); 7580f3154e6SShannon Nelson 7590f3154e6SShannon Nelson if (done) { 7600f3154e6SShannon Nelson skb_tx_timestamp(skb); 7610f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 7620f3154e6SShannon Nelson ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 7630f3154e6SShannon Nelson } else { 7640f3154e6SShannon Nelson ionic_txq_post(q, false, ionic_tx_clean, NULL); 7650f3154e6SShannon Nelson } 7660f3154e6SShannon Nelson } 7670f3154e6SShannon Nelson 7680f3154e6SShannon Nelson static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, 7690f3154e6SShannon Nelson struct ionic_txq_sg_elem **elem) 7700f3154e6SShannon Nelson { 771f1d2e894SShannon Nelson struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; 772f1d2e894SShannon Nelson struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 7730f3154e6SShannon Nelson 7740f3154e6SShannon Nelson *elem = sg_desc->elems; 7750f3154e6SShannon Nelson return desc; 7760f3154e6SShannon Nelson } 7770f3154e6SShannon Nelson 7780f3154e6SShannon Nelson static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 7790f3154e6SShannon Nelson { 7800f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 781f1d2e894SShannon Nelson struct ionic_desc_info *rewind_desc_info; 7820f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 7830f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem; 7840f3154e6SShannon Nelson struct ionic_txq_desc *desc; 7850f3154e6SShannon Nelson unsigned int frag_left = 0; 7860f3154e6SShannon Nelson unsigned int offset = 0; 787f1d2e894SShannon Nelson u16 abort = q->head_idx; 7880f3154e6SShannon Nelson unsigned int len_left; 7890f3154e6SShannon Nelson dma_addr_t desc_addr; 7900f3154e6SShannon Nelson unsigned int hdrlen; 7910f3154e6SShannon Nelson unsigned int nfrags; 7920f3154e6SShannon Nelson unsigned int seglen; 7930f3154e6SShannon Nelson u64 total_bytes = 0; 7940f3154e6SShannon Nelson u64 total_pkts = 0; 795f1d2e894SShannon Nelson u16 rewind = abort; 7960f3154e6SShannon Nelson unsigned int left; 7970f3154e6SShannon Nelson unsigned int len; 7980f3154e6SShannon Nelson unsigned int mss; 7990f3154e6SShannon Nelson skb_frag_t *frag; 8000f3154e6SShannon Nelson bool start, done; 8010f3154e6SShannon Nelson bool outer_csum; 802d701ec32SShannon Nelson dma_addr_t addr; 8030f3154e6SShannon Nelson bool has_vlan; 8040f3154e6SShannon Nelson u16 desc_len; 8050f3154e6SShannon Nelson u8 desc_nsge; 8060f3154e6SShannon Nelson u16 vlan_tci; 8070f3154e6SShannon Nelson bool encap; 8080f3154e6SShannon Nelson int err; 8090f3154e6SShannon Nelson 8100f3154e6SShannon Nelson mss = skb_shinfo(skb)->gso_size; 8110f3154e6SShannon Nelson nfrags = skb_shinfo(skb)->nr_frags; 8120f3154e6SShannon Nelson len_left = skb->len - skb_headlen(skb); 8130f3154e6SShannon Nelson outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || 8140f3154e6SShannon Nelson (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 8150f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 8160f3154e6SShannon Nelson vlan_tci = skb_vlan_tag_get(skb); 8170f3154e6SShannon Nelson encap = skb->encapsulation; 8180f3154e6SShannon Nelson 8190f3154e6SShannon Nelson /* Preload inner-most TCP csum field with IP pseudo hdr 8200f3154e6SShannon Nelson * calculated with IP length set to zero. HW will later 8210f3154e6SShannon Nelson * add in length to each TCP segment resulting from the TSO. 8220f3154e6SShannon Nelson */ 8230f3154e6SShannon Nelson 8240f3154e6SShannon Nelson if (encap) 8250f3154e6SShannon Nelson err = ionic_tx_tcp_inner_pseudo_csum(skb); 8260f3154e6SShannon Nelson else 8270f3154e6SShannon Nelson err = ionic_tx_tcp_pseudo_csum(skb); 8280f3154e6SShannon Nelson if (err) 8290f3154e6SShannon Nelson return err; 8300f3154e6SShannon Nelson 8310f3154e6SShannon Nelson if (encap) 8320f3154e6SShannon Nelson hdrlen = skb_inner_transport_header(skb) - skb->data + 8330f3154e6SShannon Nelson inner_tcp_hdrlen(skb); 8340f3154e6SShannon Nelson else 8350f3154e6SShannon Nelson hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 8360f3154e6SShannon Nelson 8370f3154e6SShannon Nelson seglen = hdrlen + mss; 8380f3154e6SShannon Nelson left = skb_headlen(skb); 8390f3154e6SShannon Nelson 8400f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 8410f3154e6SShannon Nelson start = true; 8420f3154e6SShannon Nelson 8430f3154e6SShannon Nelson /* Chop skb->data up into desc segments */ 8440f3154e6SShannon Nelson 8450f3154e6SShannon Nelson while (left > 0) { 8460f3154e6SShannon Nelson len = min(seglen, left); 8470f3154e6SShannon Nelson frag_left = seglen - len; 8480f3154e6SShannon Nelson desc_addr = ionic_tx_map_single(q, skb->data + offset, len); 8490f3154e6SShannon Nelson if (dma_mapping_error(dev, desc_addr)) 8500f3154e6SShannon Nelson goto err_out_abort; 8510f3154e6SShannon Nelson desc_len = len; 8520f3154e6SShannon Nelson desc_nsge = 0; 8530f3154e6SShannon Nelson left -= len; 8540f3154e6SShannon Nelson offset += len; 8550f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 8560f3154e6SShannon Nelson continue; 8570f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 8580f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, 8590f3154e6SShannon Nelson desc_addr, desc_nsge, desc_len, 8600f3154e6SShannon Nelson hdrlen, mss, 8610f3154e6SShannon Nelson outer_csum, 8620f3154e6SShannon Nelson vlan_tci, has_vlan, 8630f3154e6SShannon Nelson start, done); 8640f3154e6SShannon Nelson total_pkts++; 8650f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 8660f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 8670f3154e6SShannon Nelson start = false; 8680f3154e6SShannon Nelson seglen = mss; 8690f3154e6SShannon Nelson } 8700f3154e6SShannon Nelson 8710f3154e6SShannon Nelson /* Chop skb frags into desc segments */ 8720f3154e6SShannon Nelson 8730f3154e6SShannon Nelson for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 8740f3154e6SShannon Nelson offset = 0; 8750f3154e6SShannon Nelson left = skb_frag_size(frag); 8760f3154e6SShannon Nelson len_left -= left; 8770f3154e6SShannon Nelson nfrags--; 8780f3154e6SShannon Nelson stats->frags++; 8790f3154e6SShannon Nelson 8800f3154e6SShannon Nelson while (left > 0) { 8810f3154e6SShannon Nelson if (frag_left > 0) { 8820f3154e6SShannon Nelson len = min(frag_left, left); 8830f3154e6SShannon Nelson frag_left -= len; 884d701ec32SShannon Nelson addr = ionic_tx_map_frag(q, frag, offset, len); 885d701ec32SShannon Nelson if (dma_mapping_error(dev, addr)) 8860f3154e6SShannon Nelson goto err_out_abort; 887d701ec32SShannon Nelson elem->addr = cpu_to_le64(addr); 8880f3154e6SShannon Nelson elem->len = cpu_to_le16(len); 8890f3154e6SShannon Nelson elem++; 8900f3154e6SShannon Nelson desc_nsge++; 8910f3154e6SShannon Nelson left -= len; 8920f3154e6SShannon Nelson offset += len; 8930f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 8940f3154e6SShannon Nelson continue; 8950f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 8960f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, desc_addr, 8970f3154e6SShannon Nelson desc_nsge, desc_len, 8980f3154e6SShannon Nelson hdrlen, mss, outer_csum, 8990f3154e6SShannon Nelson vlan_tci, has_vlan, 9000f3154e6SShannon Nelson start, done); 9010f3154e6SShannon Nelson total_pkts++; 9020f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 9030f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 9040f3154e6SShannon Nelson start = false; 9050f3154e6SShannon Nelson } else { 9060f3154e6SShannon Nelson len = min(mss, left); 9070f3154e6SShannon Nelson frag_left = mss - len; 9080f3154e6SShannon Nelson desc_addr = ionic_tx_map_frag(q, frag, 9090f3154e6SShannon Nelson offset, len); 9100f3154e6SShannon Nelson if (dma_mapping_error(dev, desc_addr)) 9110f3154e6SShannon Nelson goto err_out_abort; 9120f3154e6SShannon Nelson desc_len = len; 9130f3154e6SShannon Nelson desc_nsge = 0; 9140f3154e6SShannon Nelson left -= len; 9150f3154e6SShannon Nelson offset += len; 9160f3154e6SShannon Nelson if (nfrags > 0 && frag_left > 0) 9170f3154e6SShannon Nelson continue; 9180f3154e6SShannon Nelson done = (nfrags == 0 && left == 0); 9190f3154e6SShannon Nelson ionic_tx_tso_post(q, desc, skb, desc_addr, 9200f3154e6SShannon Nelson desc_nsge, desc_len, 9210f3154e6SShannon Nelson hdrlen, mss, outer_csum, 9220f3154e6SShannon Nelson vlan_tci, has_vlan, 9230f3154e6SShannon Nelson start, done); 9240f3154e6SShannon Nelson total_pkts++; 9250f3154e6SShannon Nelson total_bytes += start ? len : len + hdrlen; 9260f3154e6SShannon Nelson desc = ionic_tx_tso_next(q, &elem); 9270f3154e6SShannon Nelson start = false; 9280f3154e6SShannon Nelson } 9290f3154e6SShannon Nelson } 9300f3154e6SShannon Nelson } 9310f3154e6SShannon Nelson 9320f3154e6SShannon Nelson stats->pkts += total_pkts; 9330f3154e6SShannon Nelson stats->bytes += total_bytes; 9340f3154e6SShannon Nelson stats->tso++; 935f64e0c56SShannon Nelson stats->tso_bytes += total_bytes; 9360f3154e6SShannon Nelson 9370f3154e6SShannon Nelson return 0; 9380f3154e6SShannon Nelson 9390f3154e6SShannon Nelson err_out_abort: 940f1d2e894SShannon Nelson while (rewind != q->head_idx) { 941f1d2e894SShannon Nelson rewind_desc_info = &q->info[rewind]; 942f1d2e894SShannon Nelson ionic_tx_clean(q, rewind_desc_info, NULL, NULL); 943f1d2e894SShannon Nelson rewind = (rewind + 1) & (q->num_descs - 1); 9440f3154e6SShannon Nelson } 945f1d2e894SShannon Nelson q->head_idx = abort; 9460f3154e6SShannon Nelson 9470f3154e6SShannon Nelson return -ENOMEM; 9480f3154e6SShannon Nelson } 9490f3154e6SShannon Nelson 9500f3154e6SShannon Nelson static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) 9510f3154e6SShannon Nelson { 952f1d2e894SShannon Nelson struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 9530f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9540f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 9550f3154e6SShannon Nelson dma_addr_t dma_addr; 9560f3154e6SShannon Nelson bool has_vlan; 9570f3154e6SShannon Nelson u8 flags = 0; 9580f3154e6SShannon Nelson bool encap; 9590f3154e6SShannon Nelson u64 cmd; 9600f3154e6SShannon Nelson 9610f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 9620f3154e6SShannon Nelson encap = skb->encapsulation; 9630f3154e6SShannon Nelson 9640f3154e6SShannon Nelson dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 9650f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 9660f3154e6SShannon Nelson return -ENOMEM; 9670f3154e6SShannon Nelson 9680f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 9690f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 9700f3154e6SShannon Nelson 9710f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 9720f3154e6SShannon Nelson flags, skb_shinfo(skb)->nr_frags, dma_addr); 9730f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 9740f3154e6SShannon Nelson desc->len = cpu_to_le16(skb_headlen(skb)); 9750f3154e6SShannon Nelson desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 9760f3154e6SShannon Nelson desc->csum_offset = cpu_to_le16(skb->csum_offset); 977f64e0c56SShannon Nelson if (has_vlan) { 978f64e0c56SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 979f64e0c56SShannon Nelson stats->vlan_inserted++; 980f64e0c56SShannon Nelson } 9810f3154e6SShannon Nelson 982fa821170SXin Long if (skb_csum_is_sctp(skb)) 9830f3154e6SShannon Nelson stats->crc32_csum++; 9840f3154e6SShannon Nelson else 9850f3154e6SShannon Nelson stats->csum++; 9860f3154e6SShannon Nelson 9870f3154e6SShannon Nelson return 0; 9880f3154e6SShannon Nelson } 9890f3154e6SShannon Nelson 9900f3154e6SShannon Nelson static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) 9910f3154e6SShannon Nelson { 992f1d2e894SShannon Nelson struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 9930f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9940f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 9950f3154e6SShannon Nelson dma_addr_t dma_addr; 9960f3154e6SShannon Nelson bool has_vlan; 9970f3154e6SShannon Nelson u8 flags = 0; 9980f3154e6SShannon Nelson bool encap; 9990f3154e6SShannon Nelson u64 cmd; 10000f3154e6SShannon Nelson 10010f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 10020f3154e6SShannon Nelson encap = skb->encapsulation; 10030f3154e6SShannon Nelson 10040f3154e6SShannon Nelson dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 10050f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 10060f3154e6SShannon Nelson return -ENOMEM; 10070f3154e6SShannon Nelson 10080f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 10090f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 10100f3154e6SShannon Nelson 10110f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 10120f3154e6SShannon Nelson flags, skb_shinfo(skb)->nr_frags, dma_addr); 10130f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 10140f3154e6SShannon Nelson desc->len = cpu_to_le16(skb_headlen(skb)); 1015f64e0c56SShannon Nelson if (has_vlan) { 10160f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1017f64e0c56SShannon Nelson stats->vlan_inserted++; 1018f64e0c56SShannon Nelson } 10190f3154e6SShannon Nelson 1020f64e0c56SShannon Nelson stats->csum_none++; 10210f3154e6SShannon Nelson 10220f3154e6SShannon Nelson return 0; 10230f3154e6SShannon Nelson } 10240f3154e6SShannon Nelson 10250f3154e6SShannon Nelson static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) 10260f3154e6SShannon Nelson { 1027f1d2e894SShannon Nelson struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; 10280f3154e6SShannon Nelson unsigned int len_left = skb->len - skb_headlen(skb); 10290f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem = sg_desc->elems; 10300f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 10310f3154e6SShannon Nelson struct device *dev = q->lif->ionic->dev; 10320f3154e6SShannon Nelson dma_addr_t dma_addr; 10330f3154e6SShannon Nelson skb_frag_t *frag; 10340f3154e6SShannon Nelson u16 len; 10350f3154e6SShannon Nelson 10360f3154e6SShannon Nelson for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { 10370f3154e6SShannon Nelson len = skb_frag_size(frag); 10380f3154e6SShannon Nelson elem->len = cpu_to_le16(len); 10390f3154e6SShannon Nelson dma_addr = ionic_tx_map_frag(q, frag, 0, len); 10400f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) 10410f3154e6SShannon Nelson return -ENOMEM; 10420f3154e6SShannon Nelson elem->addr = cpu_to_le64(dma_addr); 10430f3154e6SShannon Nelson len_left -= len; 10440f3154e6SShannon Nelson stats->frags++; 10450f3154e6SShannon Nelson } 10460f3154e6SShannon Nelson 10470f3154e6SShannon Nelson return 0; 10480f3154e6SShannon Nelson } 10490f3154e6SShannon Nelson 10500f3154e6SShannon Nelson static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 10510f3154e6SShannon Nelson { 10520f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 10530f3154e6SShannon Nelson int err; 10540f3154e6SShannon Nelson 10550f3154e6SShannon Nelson /* set up the initial descriptor */ 10560f3154e6SShannon Nelson if (skb->ip_summed == CHECKSUM_PARTIAL) 10570f3154e6SShannon Nelson err = ionic_tx_calc_csum(q, skb); 10580f3154e6SShannon Nelson else 10590f3154e6SShannon Nelson err = ionic_tx_calc_no_csum(q, skb); 10600f3154e6SShannon Nelson if (err) 10610f3154e6SShannon Nelson return err; 10620f3154e6SShannon Nelson 10630f3154e6SShannon Nelson /* add frags */ 10640f3154e6SShannon Nelson err = ionic_tx_skb_frags(q, skb); 10650f3154e6SShannon Nelson if (err) 10660f3154e6SShannon Nelson return err; 10670f3154e6SShannon Nelson 10680f3154e6SShannon Nelson skb_tx_timestamp(skb); 10690f3154e6SShannon Nelson stats->pkts++; 10700f3154e6SShannon Nelson stats->bytes += skb->len; 10710f3154e6SShannon Nelson 10720f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 10730f3154e6SShannon Nelson ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 10740f3154e6SShannon Nelson 10750f3154e6SShannon Nelson return 0; 10760f3154e6SShannon Nelson } 10770f3154e6SShannon Nelson 10780f3154e6SShannon Nelson static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 10790f3154e6SShannon Nelson { 10805b3f3f2aSShannon Nelson int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; 10810f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 10820f3154e6SShannon Nelson int err; 10830f3154e6SShannon Nelson 10840f3154e6SShannon Nelson /* If TSO, need roundup(skb->len/mss) descs */ 10850f3154e6SShannon Nelson if (skb_is_gso(skb)) 10860f3154e6SShannon Nelson return (skb->len / skb_shinfo(skb)->gso_size) + 1; 10870f3154e6SShannon Nelson 10880f3154e6SShannon Nelson /* If non-TSO, just need 1 desc and nr_frags sg elems */ 10895b3f3f2aSShannon Nelson if (skb_shinfo(skb)->nr_frags <= sg_elems) 10900f3154e6SShannon Nelson return 1; 10910f3154e6SShannon Nelson 10920f3154e6SShannon Nelson /* Too many frags, so linearize */ 10930f3154e6SShannon Nelson err = skb_linearize(skb); 10940f3154e6SShannon Nelson if (err) 10950f3154e6SShannon Nelson return err; 10960f3154e6SShannon Nelson 10970f3154e6SShannon Nelson stats->linearize++; 10980f3154e6SShannon Nelson 10990f3154e6SShannon Nelson /* Need 1 desc and zero sg elems */ 11000f3154e6SShannon Nelson return 1; 11010f3154e6SShannon Nelson } 11020f3154e6SShannon Nelson 11030f3154e6SShannon Nelson static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 11040f3154e6SShannon Nelson { 11050f3154e6SShannon Nelson int stopped = 0; 11060f3154e6SShannon Nelson 11070f3154e6SShannon Nelson if (unlikely(!ionic_q_has_space(q, ndescs))) { 11080f3154e6SShannon Nelson netif_stop_subqueue(q->lif->netdev, q->index); 11090f3154e6SShannon Nelson q->stop++; 11100f3154e6SShannon Nelson stopped = 1; 11110f3154e6SShannon Nelson 11120f3154e6SShannon Nelson /* Might race with ionic_tx_clean, check again */ 11130f3154e6SShannon Nelson smp_rmb(); 11140f3154e6SShannon Nelson if (ionic_q_has_space(q, ndescs)) { 11150f3154e6SShannon Nelson netif_wake_subqueue(q->lif->netdev, q->index); 11160f3154e6SShannon Nelson stopped = 0; 11170f3154e6SShannon Nelson } 11180f3154e6SShannon Nelson } 11190f3154e6SShannon Nelson 11200f3154e6SShannon Nelson return stopped; 11210f3154e6SShannon Nelson } 11220f3154e6SShannon Nelson 11230f3154e6SShannon Nelson netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 11240f3154e6SShannon Nelson { 11250f3154e6SShannon Nelson u16 queue_index = skb_get_queue_mapping(skb); 11260f3154e6SShannon Nelson struct ionic_lif *lif = netdev_priv(netdev); 11270f3154e6SShannon Nelson struct ionic_queue *q; 11280f3154e6SShannon Nelson int ndescs; 11290f3154e6SShannon Nelson int err; 11300f3154e6SShannon Nelson 1131c6d3d73aSShannon Nelson if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 11320f3154e6SShannon Nelson dev_kfree_skb(skb); 11330f3154e6SShannon Nelson return NETDEV_TX_OK; 11340f3154e6SShannon Nelson } 11350f3154e6SShannon Nelson 113634dec947SShannon Nelson if (unlikely(queue_index >= lif->nxqs)) 11370f3154e6SShannon Nelson queue_index = 0; 113834dec947SShannon Nelson q = &lif->txqcqs[queue_index]->q; 11390f3154e6SShannon Nelson 11400f3154e6SShannon Nelson ndescs = ionic_tx_descs_needed(q, skb); 11410f3154e6SShannon Nelson if (ndescs < 0) 11420f3154e6SShannon Nelson goto err_out_drop; 11430f3154e6SShannon Nelson 11440f3154e6SShannon Nelson if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 11450f3154e6SShannon Nelson return NETDEV_TX_BUSY; 11460f3154e6SShannon Nelson 11470f3154e6SShannon Nelson if (skb_is_gso(skb)) 11480f3154e6SShannon Nelson err = ionic_tx_tso(q, skb); 11490f3154e6SShannon Nelson else 11500f3154e6SShannon Nelson err = ionic_tx(q, skb); 11510f3154e6SShannon Nelson 11520f3154e6SShannon Nelson if (err) 11530f3154e6SShannon Nelson goto err_out_drop; 11540f3154e6SShannon Nelson 11550f3154e6SShannon Nelson /* Stop the queue if there aren't descriptors for the next packet. 11560f3154e6SShannon Nelson * Since our SG lists per descriptor take care of most of the possible 11570f3154e6SShannon Nelson * fragmentation, we don't need to have many descriptors available. 11580f3154e6SShannon Nelson */ 11590f3154e6SShannon Nelson ionic_maybe_stop_tx(q, 4); 11600f3154e6SShannon Nelson 11610f3154e6SShannon Nelson return NETDEV_TX_OK; 11620f3154e6SShannon Nelson 11630f3154e6SShannon Nelson err_out_drop: 11640f3154e6SShannon Nelson q->stop++; 11650f3154e6SShannon Nelson q->drop++; 11660f3154e6SShannon Nelson dev_kfree_skb(skb); 11670f3154e6SShannon Nelson return NETDEV_TX_OK; 11680f3154e6SShannon Nelson } 1169