10f3154e6SShannon Nelson // SPDX-License-Identifier: GPL-2.0 20f3154e6SShannon Nelson /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 30f3154e6SShannon Nelson 40f3154e6SShannon Nelson #include <linux/ip.h> 50f3154e6SShannon Nelson #include <linux/ipv6.h> 60f3154e6SShannon Nelson #include <linux/if_vlan.h> 70f3154e6SShannon Nelson #include <net/ip6_checksum.h> 80f3154e6SShannon Nelson 90f3154e6SShannon Nelson #include "ionic.h" 100f3154e6SShannon Nelson #include "ionic_lif.h" 110f3154e6SShannon Nelson #include "ionic_txrx.h" 120f3154e6SShannon Nelson 130f3154e6SShannon Nelson static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 140f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 150f3154e6SShannon Nelson { 160f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 170f3154e6SShannon Nelson } 180f3154e6SShannon Nelson 190f3154e6SShannon Nelson static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 200f3154e6SShannon Nelson ionic_desc_cb cb_func, void *cb_arg) 210f3154e6SShannon Nelson { 220f3154e6SShannon Nelson ionic_q_post(q, ring_dbell, cb_func, cb_arg); 230f3154e6SShannon Nelson } 240f3154e6SShannon Nelson 25b69585bfSAllen Hubbe bool ionic_txq_poke_doorbell(struct ionic_queue *q) 26b69585bfSAllen Hubbe { 27b69585bfSAllen Hubbe unsigned long now, then, dif; 28b69585bfSAllen Hubbe struct netdev_queue *netdev_txq; 29b69585bfSAllen Hubbe struct net_device *netdev; 30b69585bfSAllen Hubbe 31b69585bfSAllen Hubbe netdev = q->lif->netdev; 32b69585bfSAllen Hubbe netdev_txq = netdev_get_tx_queue(netdev, q->index); 33b69585bfSAllen Hubbe 34b69585bfSAllen Hubbe HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id()); 35b69585bfSAllen Hubbe 36b69585bfSAllen Hubbe if (q->tail_idx == q->head_idx) { 37b69585bfSAllen Hubbe HARD_TX_UNLOCK(netdev, netdev_txq); 38b69585bfSAllen Hubbe return false; 39b69585bfSAllen Hubbe } 40b69585bfSAllen Hubbe 41b69585bfSAllen Hubbe now = READ_ONCE(jiffies); 42b69585bfSAllen Hubbe then = q->dbell_jiffies; 43b69585bfSAllen Hubbe dif = now - then; 44b69585bfSAllen Hubbe 45b69585bfSAllen Hubbe if (dif > q->dbell_deadline) { 46b69585bfSAllen Hubbe ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 47b69585bfSAllen Hubbe q->dbval | q->head_idx); 48b69585bfSAllen Hubbe 49b69585bfSAllen Hubbe q->dbell_jiffies = now; 50b69585bfSAllen Hubbe } 51b69585bfSAllen Hubbe 52b69585bfSAllen Hubbe HARD_TX_UNLOCK(netdev, netdev_txq); 53b69585bfSAllen Hubbe 54b69585bfSAllen Hubbe return true; 55b69585bfSAllen Hubbe } 56b69585bfSAllen Hubbe 57b69585bfSAllen Hubbe bool ionic_rxq_poke_doorbell(struct ionic_queue *q) 58b69585bfSAllen Hubbe { 59b69585bfSAllen Hubbe unsigned long now, then, dif; 60b69585bfSAllen Hubbe 61b69585bfSAllen Hubbe /* no lock, called from rx napi or txrx napi, nothing else can fill */ 62b69585bfSAllen Hubbe 63b69585bfSAllen Hubbe if (q->tail_idx == q->head_idx) 64b69585bfSAllen Hubbe return false; 65b69585bfSAllen Hubbe 66b69585bfSAllen Hubbe now = READ_ONCE(jiffies); 67b69585bfSAllen Hubbe then = q->dbell_jiffies; 68b69585bfSAllen Hubbe dif = now - then; 69b69585bfSAllen Hubbe 70b69585bfSAllen Hubbe if (dif > q->dbell_deadline) { 71b69585bfSAllen Hubbe ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 72b69585bfSAllen Hubbe q->dbval | q->head_idx); 73b69585bfSAllen Hubbe 74b69585bfSAllen Hubbe q->dbell_jiffies = now; 75b69585bfSAllen Hubbe 76b69585bfSAllen Hubbe dif = 2 * q->dbell_deadline; 77b69585bfSAllen Hubbe if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE) 78b69585bfSAllen Hubbe dif = IONIC_RX_MAX_DOORBELL_DEADLINE; 79b69585bfSAllen Hubbe 80b69585bfSAllen Hubbe q->dbell_deadline = dif; 81b69585bfSAllen Hubbe } 82b69585bfSAllen Hubbe 83b69585bfSAllen Hubbe return true; 84b69585bfSAllen Hubbe } 85b69585bfSAllen Hubbe 860f3154e6SShannon Nelson static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 870f3154e6SShannon Nelson { 880f3154e6SShannon Nelson return netdev_get_tx_queue(q->lif->netdev, q->index); 890f3154e6SShannon Nelson } 900f3154e6SShannon Nelson 912b5720f2SShannon Nelson static int ionic_rx_page_alloc(struct ionic_queue *q, 924b0a7539SShannon Nelson struct ionic_buf_info *buf_info) 932b5720f2SShannon Nelson { 9489e572e7SShannon Nelson struct net_device *netdev = q->lif->netdev; 952b5720f2SShannon Nelson struct ionic_rx_stats *stats; 962b5720f2SShannon Nelson struct device *dev; 97e75ccac1SShannon Nelson struct page *page; 982b5720f2SShannon Nelson 99f37bc346SShannon Nelson dev = q->dev; 1002b5720f2SShannon Nelson stats = q_to_rx_stats(q); 1012b5720f2SShannon Nelson 1024b0a7539SShannon Nelson if (unlikely(!buf_info)) { 1034b0a7539SShannon Nelson net_err_ratelimited("%s: %s invalid buf_info in alloc\n", 1042b5720f2SShannon Nelson netdev->name, q->name); 1052b5720f2SShannon Nelson return -EINVAL; 1062b5720f2SShannon Nelson } 1072b5720f2SShannon Nelson 108e75ccac1SShannon Nelson page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); 109e75ccac1SShannon Nelson if (unlikely(!page)) { 1102b5720f2SShannon Nelson net_err_ratelimited("%s: %s page alloc failed\n", 1112b5720f2SShannon Nelson netdev->name, q->name); 1122b5720f2SShannon Nelson stats->alloc_err++; 1132b5720f2SShannon Nelson return -ENOMEM; 1142b5720f2SShannon Nelson } 1152b5720f2SShannon Nelson 116e75ccac1SShannon Nelson buf_info->dma_addr = dma_map_page(dev, page, 0, 1174b0a7539SShannon Nelson IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 1184b0a7539SShannon Nelson if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { 119e75ccac1SShannon Nelson __free_pages(page, 0); 1202b5720f2SShannon Nelson net_err_ratelimited("%s: %s dma map failed\n", 1212b5720f2SShannon Nelson netdev->name, q->name); 1222b5720f2SShannon Nelson stats->dma_map_err++; 1232b5720f2SShannon Nelson return -EIO; 1242b5720f2SShannon Nelson } 1252b5720f2SShannon Nelson 126e75ccac1SShannon Nelson buf_info->page = page; 127e75ccac1SShannon Nelson buf_info->page_offset = 0; 128e75ccac1SShannon Nelson 1292b5720f2SShannon Nelson return 0; 1302b5720f2SShannon Nelson } 1312b5720f2SShannon Nelson 1322b5720f2SShannon Nelson static void ionic_rx_page_free(struct ionic_queue *q, 1334b0a7539SShannon Nelson struct ionic_buf_info *buf_info) 1342b5720f2SShannon Nelson { 1354b0a7539SShannon Nelson struct net_device *netdev = q->lif->netdev; 136f37bc346SShannon Nelson struct device *dev = q->dev; 1372b5720f2SShannon Nelson 1384b0a7539SShannon Nelson if (unlikely(!buf_info)) { 1394b0a7539SShannon Nelson net_err_ratelimited("%s: %s invalid buf_info in free\n", 1402b5720f2SShannon Nelson netdev->name, q->name); 1412b5720f2SShannon Nelson return; 1422b5720f2SShannon Nelson } 1432b5720f2SShannon Nelson 1444b0a7539SShannon Nelson if (!buf_info->page) 1452b5720f2SShannon Nelson return; 1464b0a7539SShannon Nelson 1474b0a7539SShannon Nelson dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 1484b0a7539SShannon Nelson __free_pages(buf_info->page, 0); 149e75ccac1SShannon Nelson buf_info->page = NULL; 1502b5720f2SShannon Nelson } 1512b5720f2SShannon Nelson 1524b0a7539SShannon Nelson static bool ionic_rx_buf_recycle(struct ionic_queue *q, 1534b0a7539SShannon Nelson struct ionic_buf_info *buf_info, u32 used) 1544b0a7539SShannon Nelson { 1554b0a7539SShannon Nelson u32 size; 1562b5720f2SShannon Nelson 1574b0a7539SShannon Nelson /* don't re-use pages allocated in low-mem condition */ 1584b0a7539SShannon Nelson if (page_is_pfmemalloc(buf_info->page)) 1594b0a7539SShannon Nelson return false; 1604b0a7539SShannon Nelson 1614b0a7539SShannon Nelson /* don't re-use buffers from non-local numa nodes */ 1624b0a7539SShannon Nelson if (page_to_nid(buf_info->page) != numa_mem_id()) 1634b0a7539SShannon Nelson return false; 1644b0a7539SShannon Nelson 1654b0a7539SShannon Nelson size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); 1664b0a7539SShannon Nelson buf_info->page_offset += size; 1674b0a7539SShannon Nelson if (buf_info->page_offset >= IONIC_PAGE_SIZE) 1684b0a7539SShannon Nelson return false; 1694b0a7539SShannon Nelson 1704b0a7539SShannon Nelson get_page(buf_info->page); 1714b0a7539SShannon Nelson 1724b0a7539SShannon Nelson return true; 1732b5720f2SShannon Nelson } 1742b5720f2SShannon Nelson 17508f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 17608f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 177a25edab9SShannon Nelson struct ionic_rxq_comp *comp) 1780f3154e6SShannon Nelson { 17989e572e7SShannon Nelson struct net_device *netdev = q->lif->netdev; 1804b0a7539SShannon Nelson struct ionic_buf_info *buf_info; 18189e572e7SShannon Nelson struct ionic_rx_stats *stats; 182f37bc346SShannon Nelson struct device *dev = q->dev; 18308f2e4b2SShannon Nelson struct sk_buff *skb; 18408f2e4b2SShannon Nelson unsigned int i; 18508f2e4b2SShannon Nelson u16 frag_len; 18608f2e4b2SShannon Nelson u16 len; 1870f3154e6SShannon Nelson 18889e572e7SShannon Nelson stats = q_to_rx_stats(q); 18989e572e7SShannon Nelson 1904b0a7539SShannon Nelson buf_info = &desc_info->bufs[0]; 19108f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 19208f2e4b2SShannon Nelson 193e75ccac1SShannon Nelson prefetchw(buf_info->page); 19408f2e4b2SShannon Nelson 19589e572e7SShannon Nelson skb = napi_get_frags(&q_to_qcq(q)->napi); 19689e572e7SShannon Nelson if (unlikely(!skb)) { 19789e572e7SShannon Nelson net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 19889e572e7SShannon Nelson netdev->name, q->name); 19989e572e7SShannon Nelson stats->alloc_err++; 20008f2e4b2SShannon Nelson return NULL; 20189e572e7SShannon Nelson } 20208f2e4b2SShannon Nelson 20308f2e4b2SShannon Nelson i = comp->num_sg_elems + 1; 20408f2e4b2SShannon Nelson do { 2054b0a7539SShannon Nelson if (unlikely(!buf_info->page)) { 20608f2e4b2SShannon Nelson dev_kfree_skb(skb); 20708f2e4b2SShannon Nelson return NULL; 2080f3154e6SShannon Nelson } 2090f3154e6SShannon Nelson 2104b0a7539SShannon Nelson frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 21108f2e4b2SShannon Nelson len -= frag_len; 21208f2e4b2SShannon Nelson 2134b0a7539SShannon Nelson dma_sync_single_for_cpu(dev, 2144b0a7539SShannon Nelson buf_info->dma_addr + buf_info->page_offset, 2154b0a7539SShannon Nelson frag_len, DMA_FROM_DEVICE); 2164b0a7539SShannon Nelson 21708f2e4b2SShannon Nelson skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2184b0a7539SShannon Nelson buf_info->page, buf_info->page_offset, frag_len, 2194b0a7539SShannon Nelson IONIC_PAGE_SIZE); 2204b0a7539SShannon Nelson 2214b0a7539SShannon Nelson if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { 2224b0a7539SShannon Nelson dma_unmap_page(dev, buf_info->dma_addr, 2234b0a7539SShannon Nelson IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 224e75ccac1SShannon Nelson buf_info->page = NULL; 2254b0a7539SShannon Nelson } 2264b0a7539SShannon Nelson 2274b0a7539SShannon Nelson buf_info++; 2284b0a7539SShannon Nelson 22908f2e4b2SShannon Nelson i--; 23008f2e4b2SShannon Nelson } while (i > 0); 23108f2e4b2SShannon Nelson 23208f2e4b2SShannon Nelson return skb; 2330f3154e6SShannon Nelson } 2340f3154e6SShannon Nelson 23508f2e4b2SShannon Nelson static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 23608f2e4b2SShannon Nelson struct ionic_desc_info *desc_info, 237a25edab9SShannon Nelson struct ionic_rxq_comp *comp) 23808f2e4b2SShannon Nelson { 23989e572e7SShannon Nelson struct net_device *netdev = q->lif->netdev; 2404b0a7539SShannon Nelson struct ionic_buf_info *buf_info; 24189e572e7SShannon Nelson struct ionic_rx_stats *stats; 242f37bc346SShannon Nelson struct device *dev = q->dev; 24308f2e4b2SShannon Nelson struct sk_buff *skb; 24408f2e4b2SShannon Nelson u16 len; 2450f3154e6SShannon Nelson 24689e572e7SShannon Nelson stats = q_to_rx_stats(q); 24789e572e7SShannon Nelson 2484b0a7539SShannon Nelson buf_info = &desc_info->bufs[0]; 24908f2e4b2SShannon Nelson len = le16_to_cpu(comp->len); 2500f3154e6SShannon Nelson 25189e572e7SShannon Nelson skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); 25289e572e7SShannon Nelson if (unlikely(!skb)) { 25389e572e7SShannon Nelson net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 25489e572e7SShannon Nelson netdev->name, q->name); 25589e572e7SShannon Nelson stats->alloc_err++; 25608f2e4b2SShannon Nelson return NULL; 25789e572e7SShannon Nelson } 2580f3154e6SShannon Nelson 2594b0a7539SShannon Nelson if (unlikely(!buf_info->page)) { 26008f2e4b2SShannon Nelson dev_kfree_skb(skb); 26108f2e4b2SShannon Nelson return NULL; 26208f2e4b2SShannon Nelson } 26308f2e4b2SShannon Nelson 2644b0a7539SShannon Nelson dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, 26508f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 2664b0a7539SShannon Nelson skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); 2674b0a7539SShannon Nelson dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, 26808f2e4b2SShannon Nelson len, DMA_FROM_DEVICE); 26908f2e4b2SShannon Nelson 27008f2e4b2SShannon Nelson skb_put(skb, len); 27108f2e4b2SShannon Nelson skb->protocol = eth_type_trans(skb, q->lif->netdev); 27208f2e4b2SShannon Nelson 27308f2e4b2SShannon Nelson return skb; 2740f3154e6SShannon Nelson } 2750f3154e6SShannon Nelson 2765b3f3f2aSShannon Nelson static void ionic_rx_clean(struct ionic_queue *q, 2775b3f3f2aSShannon Nelson struct ionic_desc_info *desc_info, 2785b3f3f2aSShannon Nelson struct ionic_cq_info *cq_info, 2795b3f3f2aSShannon Nelson void *cb_arg) 2800f3154e6SShannon Nelson { 28189e572e7SShannon Nelson struct net_device *netdev = q->lif->netdev; 2820f3154e6SShannon Nelson struct ionic_qcq *qcq = q_to_qcq(q); 2830f3154e6SShannon Nelson struct ionic_rx_stats *stats; 2840ec9f666SShannon Nelson struct ionic_rxq_comp *comp; 28508f2e4b2SShannon Nelson struct sk_buff *skb; 2860f3154e6SShannon Nelson 2870ec9f666SShannon Nelson comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); 2880ec9f666SShannon Nelson 2890f3154e6SShannon Nelson stats = q_to_rx_stats(q); 2900f3154e6SShannon Nelson 29124cfa8c7SShannon Nelson if (comp->status) { 29224cfa8c7SShannon Nelson stats->dropped++; 2930f3154e6SShannon Nelson return; 29424cfa8c7SShannon Nelson } 2950f3154e6SShannon Nelson 2960f3154e6SShannon Nelson stats->pkts++; 2970f3154e6SShannon Nelson stats->bytes += le16_to_cpu(comp->len); 2980f3154e6SShannon Nelson 29908f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 300a25edab9SShannon Nelson skb = ionic_rx_copybreak(q, desc_info, comp); 30108f2e4b2SShannon Nelson else 302a25edab9SShannon Nelson skb = ionic_rx_frags(q, desc_info, comp); 3030f3154e6SShannon Nelson 30424cfa8c7SShannon Nelson if (unlikely(!skb)) { 30524cfa8c7SShannon Nelson stats->dropped++; 30608f2e4b2SShannon Nelson return; 30724cfa8c7SShannon Nelson } 3080f3154e6SShannon Nelson 3090f3154e6SShannon Nelson skb_record_rx_queue(skb, q->index); 3100f3154e6SShannon Nelson 31108f2e4b2SShannon Nelson if (likely(netdev->features & NETIF_F_RXHASH)) { 3120f3154e6SShannon Nelson switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 3130f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4: 3140f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6: 3150f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 3160f3154e6SShannon Nelson PKT_HASH_TYPE_L3); 3170f3154e6SShannon Nelson break; 3180f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_TCP: 3190f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_TCP: 3200f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV4_UDP: 3210f3154e6SShannon Nelson case IONIC_PKT_TYPE_IPV6_UDP: 3220f3154e6SShannon Nelson skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 3230f3154e6SShannon Nelson PKT_HASH_TYPE_L4); 3240f3154e6SShannon Nelson break; 3250f3154e6SShannon Nelson } 3260f3154e6SShannon Nelson } 3270f3154e6SShannon Nelson 328f07f9815SShannon Nelson if (likely(netdev->features & NETIF_F_RXCSUM) && 329f07f9815SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { 3300f3154e6SShannon Nelson skb->ip_summed = CHECKSUM_COMPLETE; 331d701ec32SShannon Nelson skb->csum = (__force __wsum)le16_to_cpu(comp->csum); 3320f3154e6SShannon Nelson stats->csum_complete++; 3330f3154e6SShannon Nelson } else { 3340f3154e6SShannon Nelson stats->csum_none++; 3350f3154e6SShannon Nelson } 3360f3154e6SShannon Nelson 33708f2e4b2SShannon Nelson if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 3380f3154e6SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 33908f2e4b2SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 3400f3154e6SShannon Nelson stats->csum_error++; 3410f3154e6SShannon Nelson 342f64e0c56SShannon Nelson if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 343f64e0c56SShannon Nelson (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 3440f3154e6SShannon Nelson __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3450f3154e6SShannon Nelson le16_to_cpu(comp->vlan_tci)); 346f64e0c56SShannon Nelson stats->vlan_stripped++; 3470f3154e6SShannon Nelson } 3480f3154e6SShannon Nelson 349a8771bfeSShannon Nelson if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { 350a8771bfeSShannon Nelson __le64 *cq_desc_hwstamp; 351a8771bfeSShannon Nelson u64 hwstamp; 352a8771bfeSShannon Nelson 353a8771bfeSShannon Nelson cq_desc_hwstamp = 354a8771bfeSShannon Nelson cq_info->cq_desc + 355a8771bfeSShannon Nelson qcq->cq.desc_size - 356a8771bfeSShannon Nelson sizeof(struct ionic_rxq_comp) - 357a8771bfeSShannon Nelson IONIC_HWSTAMP_CQ_NEGOFFSET; 358a8771bfeSShannon Nelson 359a8771bfeSShannon Nelson hwstamp = le64_to_cpu(*cq_desc_hwstamp); 360a8771bfeSShannon Nelson 361a8771bfeSShannon Nelson if (hwstamp != IONIC_HWSTAMP_INVALID) { 362a8771bfeSShannon Nelson skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 363a8771bfeSShannon Nelson stats->hwstamp_valid++; 364a8771bfeSShannon Nelson } else { 365a8771bfeSShannon Nelson stats->hwstamp_invalid++; 366a8771bfeSShannon Nelson } 367a8771bfeSShannon Nelson } 368a8771bfeSShannon Nelson 36908f2e4b2SShannon Nelson if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 3700f3154e6SShannon Nelson napi_gro_receive(&qcq->napi, skb); 37108f2e4b2SShannon Nelson else 37208f2e4b2SShannon Nelson napi_gro_frags(&qcq->napi); 3730f3154e6SShannon Nelson } 3740f3154e6SShannon Nelson 375a8771bfeSShannon Nelson bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 3760f3154e6SShannon Nelson { 3770f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 3780f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 3790ec9f666SShannon Nelson struct ionic_rxq_comp *comp; 3800ec9f666SShannon Nelson 3810ec9f666SShannon Nelson comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 3820f3154e6SShannon Nelson 3830f3154e6SShannon Nelson if (!color_match(comp->pkt_type_color, cq->done_color)) 3840f3154e6SShannon Nelson return false; 3850f3154e6SShannon Nelson 3860f3154e6SShannon Nelson /* check for empty queue */ 387f1d2e894SShannon Nelson if (q->tail_idx == q->head_idx) 3880f3154e6SShannon Nelson return false; 3890f3154e6SShannon Nelson 390339dcf7fSShannon Nelson if (q->tail_idx != le16_to_cpu(comp->comp_index)) 3910f3154e6SShannon Nelson return false; 3920f3154e6SShannon Nelson 393339dcf7fSShannon Nelson desc_info = &q->info[q->tail_idx]; 394f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 3950f3154e6SShannon Nelson 3960f3154e6SShannon Nelson /* clean the related q entry, only one per qc completion */ 3970f3154e6SShannon Nelson ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 3980f3154e6SShannon Nelson 3990f3154e6SShannon Nelson desc_info->cb = NULL; 4000f3154e6SShannon Nelson desc_info->cb_arg = NULL; 4010f3154e6SShannon Nelson 4020f3154e6SShannon Nelson return true; 4030f3154e6SShannon Nelson } 4040f3154e6SShannon Nelson 405*40bc471dSShannon Nelson static inline void ionic_write_cmb_desc(struct ionic_queue *q, 406*40bc471dSShannon Nelson void __iomem *cmb_desc, 407*40bc471dSShannon Nelson void *desc) 408*40bc471dSShannon Nelson { 409*40bc471dSShannon Nelson if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) 410*40bc471dSShannon Nelson memcpy_toio(cmb_desc, desc, q->desc_size); 411*40bc471dSShannon Nelson } 412*40bc471dSShannon Nelson 4130f3154e6SShannon Nelson void ionic_rx_fill(struct ionic_queue *q) 4140f3154e6SShannon Nelson { 4150f3154e6SShannon Nelson struct net_device *netdev = q->lif->netdev; 41608f2e4b2SShannon Nelson struct ionic_desc_info *desc_info; 41708f2e4b2SShannon Nelson struct ionic_rxq_sg_desc *sg_desc; 41808f2e4b2SShannon Nelson struct ionic_rxq_sg_elem *sg_elem; 4194b0a7539SShannon Nelson struct ionic_buf_info *buf_info; 420e55f0f5bSNeel Patel unsigned int fill_threshold; 4210f3154e6SShannon Nelson struct ionic_rxq_desc *desc; 422c37d6e3fSShannon Nelson unsigned int remain_len; 4234b0a7539SShannon Nelson unsigned int frag_len; 42408f2e4b2SShannon Nelson unsigned int nfrags; 425e55f0f5bSNeel Patel unsigned int n_fill; 42608f2e4b2SShannon Nelson unsigned int i, j; 4270f3154e6SShannon Nelson unsigned int len; 4280f3154e6SShannon Nelson 429e55f0f5bSNeel Patel n_fill = ionic_q_space_avail(q); 430e55f0f5bSNeel Patel 431e55f0f5bSNeel Patel fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD, 432e55f0f5bSNeel Patel q->num_descs / IONIC_RX_FILL_DIV); 433e55f0f5bSNeel Patel if (n_fill < fill_threshold) 434e55f0f5bSNeel Patel return; 435e55f0f5bSNeel Patel 43683469893SShannon Nelson len = netdev->mtu + ETH_HLEN + VLAN_HLEN; 4370f3154e6SShannon Nelson 438e55f0f5bSNeel Patel for (i = n_fill; i; i--) { 4394b0a7539SShannon Nelson nfrags = 0; 440c37d6e3fSShannon Nelson remain_len = len; 441f1d2e894SShannon Nelson desc_info = &q->info[q->head_idx]; 44208f2e4b2SShannon Nelson desc = desc_info->desc; 4434b0a7539SShannon Nelson buf_info = &desc_info->bufs[0]; 4440f3154e6SShannon Nelson 4454b0a7539SShannon Nelson if (!buf_info->page) { /* alloc a new buffer? */ 4464b0a7539SShannon Nelson if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 44708f2e4b2SShannon Nelson desc->addr = 0; 44808f2e4b2SShannon Nelson desc->len = 0; 44908f2e4b2SShannon Nelson return; 45008f2e4b2SShannon Nelson } 4514b0a7539SShannon Nelson } 45208f2e4b2SShannon Nelson 4534b0a7539SShannon Nelson /* fill main descriptor - buf[0] */ 4544b0a7539SShannon Nelson desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 4554b0a7539SShannon Nelson frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 4564b0a7539SShannon Nelson desc->len = cpu_to_le16(frag_len); 4574b0a7539SShannon Nelson remain_len -= frag_len; 4584b0a7539SShannon Nelson buf_info++; 4594b0a7539SShannon Nelson nfrags++; 46008f2e4b2SShannon Nelson 4614b0a7539SShannon Nelson /* fill sg descriptors - buf[1..n] */ 4624b0a7539SShannon Nelson sg_desc = desc_info->sg_desc; 463f37bc346SShannon Nelson for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { 46408f2e4b2SShannon Nelson sg_elem = &sg_desc->elems[j]; 4654b0a7539SShannon Nelson if (!buf_info->page) { /* alloc a new sg buffer? */ 4664b0a7539SShannon Nelson if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 46708f2e4b2SShannon Nelson sg_elem->addr = 0; 46808f2e4b2SShannon Nelson sg_elem->len = 0; 46908f2e4b2SShannon Nelson return; 47008f2e4b2SShannon Nelson } 47108f2e4b2SShannon Nelson } 4720f3154e6SShannon Nelson 4734b0a7539SShannon Nelson sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 4744b0a7539SShannon Nelson frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); 4754b0a7539SShannon Nelson sg_elem->len = cpu_to_le16(frag_len); 4764b0a7539SShannon Nelson remain_len -= frag_len; 4774b0a7539SShannon Nelson buf_info++; 4784b0a7539SShannon Nelson nfrags++; 4794b0a7539SShannon Nelson } 4804b0a7539SShannon Nelson 4814b0a7539SShannon Nelson /* clear end sg element as a sentinel */ 482f37bc346SShannon Nelson if (j < q->max_sg_elems) { 4834b0a7539SShannon Nelson sg_elem = &sg_desc->elems[j]; 4844b0a7539SShannon Nelson memset(sg_elem, 0, sizeof(*sg_elem)); 4854b0a7539SShannon Nelson } 4864b0a7539SShannon Nelson 4874b0a7539SShannon Nelson desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 4884b0a7539SShannon Nelson IONIC_RXQ_DESC_OPCODE_SIMPLE; 4894b0a7539SShannon Nelson desc_info->nbufs = nfrags; 4904b0a7539SShannon Nelson 491*40bc471dSShannon Nelson ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); 492*40bc471dSShannon Nelson 493155f15adSShannon Nelson ionic_rxq_post(q, false, ionic_rx_clean, NULL); 4940f3154e6SShannon Nelson } 495155f15adSShannon Nelson 496155f15adSShannon Nelson ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 497f1d2e894SShannon Nelson q->dbval | q->head_idx); 498b69585bfSAllen Hubbe 499b69585bfSAllen Hubbe q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 500b69585bfSAllen Hubbe q->dbell_jiffies = jiffies; 501b69585bfSAllen Hubbe 502b69585bfSAllen Hubbe mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, 503b69585bfSAllen Hubbe jiffies + IONIC_NAPI_DEADLINE); 5040f3154e6SShannon Nelson } 5050f3154e6SShannon Nelson 5060f3154e6SShannon Nelson void ionic_rx_empty(struct ionic_queue *q) 5070f3154e6SShannon Nelson { 508f1d2e894SShannon Nelson struct ionic_desc_info *desc_info; 5094b0a7539SShannon Nelson struct ionic_buf_info *buf_info; 5100c32a28eSShannon Nelson unsigned int i, j; 5110f3154e6SShannon Nelson 5120c32a28eSShannon Nelson for (i = 0; i < q->num_descs; i++) { 5130c32a28eSShannon Nelson desc_info = &q->info[i]; 5140c32a28eSShannon Nelson for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { 5154b0a7539SShannon Nelson buf_info = &desc_info->bufs[j]; 5164b0a7539SShannon Nelson if (buf_info->page) 5174b0a7539SShannon Nelson ionic_rx_page_free(q, buf_info); 5180c32a28eSShannon Nelson } 51908f2e4b2SShannon Nelson 5204b0a7539SShannon Nelson desc_info->nbufs = 0; 5210c32a28eSShannon Nelson desc_info->cb = NULL; 522f1d2e894SShannon Nelson desc_info->cb_arg = NULL; 5230f3154e6SShannon Nelson } 5244b0a7539SShannon Nelson 5254b0a7539SShannon Nelson q->head_idx = 0; 5264b0a7539SShannon Nelson q->tail_idx = 0; 5270f3154e6SShannon Nelson } 5280f3154e6SShannon Nelson 52976ed8a4aSShannon Nelson static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) 53004a83459SShannon Nelson { 53104a83459SShannon Nelson struct dim_sample dim_sample; 53204a83459SShannon Nelson struct ionic_lif *lif; 53304a83459SShannon Nelson unsigned int qi; 53476ed8a4aSShannon Nelson u64 pkts, bytes; 53504a83459SShannon Nelson 53604a83459SShannon Nelson if (!qcq->intr.dim_coal_hw) 53704a83459SShannon Nelson return; 53804a83459SShannon Nelson 53904a83459SShannon Nelson lif = qcq->q.lif; 54004a83459SShannon Nelson qi = qcq->cq.bound_q->index; 54104a83459SShannon Nelson 54276ed8a4aSShannon Nelson switch (napi_mode) { 54376ed8a4aSShannon Nelson case IONIC_LIF_F_TX_DIM_INTR: 54476ed8a4aSShannon Nelson pkts = lif->txqstats[qi].pkts; 54576ed8a4aSShannon Nelson bytes = lif->txqstats[qi].bytes; 54676ed8a4aSShannon Nelson break; 54776ed8a4aSShannon Nelson case IONIC_LIF_F_RX_DIM_INTR: 54876ed8a4aSShannon Nelson pkts = lif->rxqstats[qi].pkts; 54976ed8a4aSShannon Nelson bytes = lif->rxqstats[qi].bytes; 55076ed8a4aSShannon Nelson break; 55176ed8a4aSShannon Nelson default: 55276ed8a4aSShannon Nelson pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; 55376ed8a4aSShannon Nelson bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; 55476ed8a4aSShannon Nelson break; 55576ed8a4aSShannon Nelson } 55604a83459SShannon Nelson 55704a83459SShannon Nelson dim_update_sample(qcq->cq.bound_intr->rearm_count, 55876ed8a4aSShannon Nelson pkts, bytes, &dim_sample); 55904a83459SShannon Nelson 56004a83459SShannon Nelson net_dim(&qcq->dim, dim_sample); 56104a83459SShannon Nelson } 56204a83459SShannon Nelson 563fe8c30b5SShannon Nelson int ionic_tx_napi(struct napi_struct *napi, int budget) 564fe8c30b5SShannon Nelson { 565fe8c30b5SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 566fe8c30b5SShannon Nelson struct ionic_cq *cq = napi_to_cq(napi); 567fe8c30b5SShannon Nelson struct ionic_dev *idev; 568fe8c30b5SShannon Nelson struct ionic_lif *lif; 569fe8c30b5SShannon Nelson u32 work_done = 0; 570fe8c30b5SShannon Nelson u32 flags = 0; 571fe8c30b5SShannon Nelson 572fe8c30b5SShannon Nelson lif = cq->bound_q->lif; 573fe8c30b5SShannon Nelson idev = &lif->ionic->idev; 574fe8c30b5SShannon Nelson 575fe8c30b5SShannon Nelson work_done = ionic_cq_service(cq, budget, 576fe8c30b5SShannon Nelson ionic_tx_service, NULL, NULL); 577fe8c30b5SShannon Nelson 578fe8c30b5SShannon Nelson if (work_done < budget && napi_complete_done(napi, work_done)) { 57976ed8a4aSShannon Nelson ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); 580fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 58104a83459SShannon Nelson cq->bound_intr->rearm_count++; 582fe8c30b5SShannon Nelson } 583fe8c30b5SShannon Nelson 584fe8c30b5SShannon Nelson if (work_done || flags) { 585fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 586fe8c30b5SShannon Nelson ionic_intr_credits(idev->intr_ctrl, 587fe8c30b5SShannon Nelson cq->bound_intr->index, 588fe8c30b5SShannon Nelson work_done, flags); 589fe8c30b5SShannon Nelson } 590fe8c30b5SShannon Nelson 591b69585bfSAllen Hubbe if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) 592b69585bfSAllen Hubbe mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 593b69585bfSAllen Hubbe 594fe8c30b5SShannon Nelson return work_done; 595fe8c30b5SShannon Nelson } 596fe8c30b5SShannon Nelson 5970f3154e6SShannon Nelson int ionic_rx_napi(struct napi_struct *napi, int budget) 5980f3154e6SShannon Nelson { 5990f3154e6SShannon Nelson struct ionic_qcq *qcq = napi_to_qcq(napi); 600fe8c30b5SShannon Nelson struct ionic_cq *cq = napi_to_cq(napi); 601fe8c30b5SShannon Nelson struct ionic_dev *idev; 602fe8c30b5SShannon Nelson struct ionic_lif *lif; 603fe8c30b5SShannon Nelson u32 work_done = 0; 604fe8c30b5SShannon Nelson u32 flags = 0; 605fe8c30b5SShannon Nelson 606fe8c30b5SShannon Nelson lif = cq->bound_q->lif; 607fe8c30b5SShannon Nelson idev = &lif->ionic->idev; 608fe8c30b5SShannon Nelson 609fe8c30b5SShannon Nelson work_done = ionic_cq_service(cq, budget, 610fe8c30b5SShannon Nelson ionic_rx_service, NULL, NULL); 611fe8c30b5SShannon Nelson 612fe8c30b5SShannon Nelson ionic_rx_fill(cq->bound_q); 613fe8c30b5SShannon Nelson 614fe8c30b5SShannon Nelson if (work_done < budget && napi_complete_done(napi, work_done)) { 61576ed8a4aSShannon Nelson ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); 616fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 61704a83459SShannon Nelson cq->bound_intr->rearm_count++; 618fe8c30b5SShannon Nelson } 619fe8c30b5SShannon Nelson 620fe8c30b5SShannon Nelson if (work_done || flags) { 621fe8c30b5SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 622fe8c30b5SShannon Nelson ionic_intr_credits(idev->intr_ctrl, 623fe8c30b5SShannon Nelson cq->bound_intr->index, 624fe8c30b5SShannon Nelson work_done, flags); 625fe8c30b5SShannon Nelson } 626fe8c30b5SShannon Nelson 627b69585bfSAllen Hubbe if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) 628b69585bfSAllen Hubbe mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 629b69585bfSAllen Hubbe 630fe8c30b5SShannon Nelson return work_done; 631fe8c30b5SShannon Nelson } 632fe8c30b5SShannon Nelson 633fe8c30b5SShannon Nelson int ionic_txrx_napi(struct napi_struct *napi, int budget) 634fe8c30b5SShannon Nelson { 635b69585bfSAllen Hubbe struct ionic_qcq *rxqcq = napi_to_qcq(napi); 6360f3154e6SShannon Nelson struct ionic_cq *rxcq = napi_to_cq(napi); 6370f3154e6SShannon Nelson unsigned int qi = rxcq->bound_q->index; 638b69585bfSAllen Hubbe struct ionic_qcq *txqcq; 6390f3154e6SShannon Nelson struct ionic_dev *idev; 6400f3154e6SShannon Nelson struct ionic_lif *lif; 6410f3154e6SShannon Nelson struct ionic_cq *txcq; 642b69585bfSAllen Hubbe bool resched = false; 643b14e4e95SShannon Nelson u32 rx_work_done = 0; 644b14e4e95SShannon Nelson u32 tx_work_done = 0; 6450f3154e6SShannon Nelson u32 flags = 0; 6460f3154e6SShannon Nelson 6470f3154e6SShannon Nelson lif = rxcq->bound_q->lif; 6480f3154e6SShannon Nelson idev = &lif->ionic->idev; 649b69585bfSAllen Hubbe txqcq = lif->txqcqs[qi]; 65034dec947SShannon Nelson txcq = &lif->txqcqs[qi]->cq; 6510f3154e6SShannon Nelson 652f37bc346SShannon Nelson tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, 653b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 6540f3154e6SShannon Nelson 655b14e4e95SShannon Nelson rx_work_done = ionic_cq_service(rxcq, budget, 656b14e4e95SShannon Nelson ionic_rx_service, NULL, NULL); 657a8205ab6SShannon Nelson 658a8205ab6SShannon Nelson ionic_rx_fill(rxcq->bound_q); 6590f3154e6SShannon Nelson 6609dda5110SShannon Nelson if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { 661b69585bfSAllen Hubbe ionic_dim_update(rxqcq, 0); 6620f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_UNMASK; 66304a83459SShannon Nelson rxcq->bound_intr->rearm_count++; 6640f3154e6SShannon Nelson } 6650f3154e6SShannon Nelson 6669dda5110SShannon Nelson if (rx_work_done || flags) { 6670f3154e6SShannon Nelson flags |= IONIC_INTR_CRED_RESET_COALESCE; 6680f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 669b14e4e95SShannon Nelson tx_work_done + rx_work_done, flags); 6700f3154e6SShannon Nelson } 6710f3154e6SShannon Nelson 672b69585bfSAllen Hubbe if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) 673b69585bfSAllen Hubbe resched = true; 674b69585bfSAllen Hubbe if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) 675b69585bfSAllen Hubbe resched = true; 676b69585bfSAllen Hubbe if (resched) 677b69585bfSAllen Hubbe mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 678b69585bfSAllen Hubbe 6799dda5110SShannon Nelson return rx_work_done; 6800f3154e6SShannon Nelson } 6810f3154e6SShannon Nelson 6825b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 6835b3f3f2aSShannon Nelson void *data, size_t len) 6840f3154e6SShannon Nelson { 6850f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 686f37bc346SShannon Nelson struct device *dev = q->dev; 6870f3154e6SShannon Nelson dma_addr_t dma_addr; 6880f3154e6SShannon Nelson 6890f3154e6SShannon Nelson dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 6900f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 6910f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA single map failed on %s!\n", 6920f3154e6SShannon Nelson q->lif->netdev->name, q->name); 6930f3154e6SShannon Nelson stats->dma_map_err++; 6940f3154e6SShannon Nelson return 0; 6950f3154e6SShannon Nelson } 6960f3154e6SShannon Nelson return dma_addr; 6970f3154e6SShannon Nelson } 6980f3154e6SShannon Nelson 6995b3f3f2aSShannon Nelson static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 7005b3f3f2aSShannon Nelson const skb_frag_t *frag, 7010f3154e6SShannon Nelson size_t offset, size_t len) 7020f3154e6SShannon Nelson { 7030f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 704f37bc346SShannon Nelson struct device *dev = q->dev; 7050f3154e6SShannon Nelson dma_addr_t dma_addr; 7060f3154e6SShannon Nelson 7070f3154e6SShannon Nelson dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 7080f3154e6SShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 7090f3154e6SShannon Nelson net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 7100f3154e6SShannon Nelson q->lif->netdev->name, q->name); 7110f3154e6SShannon Nelson stats->dma_map_err++; 7120f3154e6SShannon Nelson } 7130f3154e6SShannon Nelson return dma_addr; 7140f3154e6SShannon Nelson } 7150f3154e6SShannon Nelson 7162da479caSShannon Nelson static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, 7172da479caSShannon Nelson struct ionic_desc_info *desc_info) 7185b039241SShannon Nelson { 7192da479caSShannon Nelson struct ionic_buf_info *buf_info = desc_info->bufs; 7200f4e7f4eSShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 7215b039241SShannon Nelson struct device *dev = q->dev; 7225b039241SShannon Nelson dma_addr_t dma_addr; 7232da479caSShannon Nelson unsigned int nfrags; 7245b039241SShannon Nelson skb_frag_t *frag; 7255b039241SShannon Nelson int frag_idx; 7265b039241SShannon Nelson 7275b039241SShannon Nelson dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 7280f4e7f4eSShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 7290f4e7f4eSShannon Nelson stats->dma_map_err++; 7305b039241SShannon Nelson return -EIO; 7310f4e7f4eSShannon Nelson } 7325b039241SShannon Nelson buf_info->dma_addr = dma_addr; 7335b039241SShannon Nelson buf_info->len = skb_headlen(skb); 7345b039241SShannon Nelson buf_info++; 7355b039241SShannon Nelson 7362da479caSShannon Nelson frag = skb_shinfo(skb)->frags; 7372da479caSShannon Nelson nfrags = skb_shinfo(skb)->nr_frags; 7382da479caSShannon Nelson for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { 7395b039241SShannon Nelson dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); 7400f4e7f4eSShannon Nelson if (dma_mapping_error(dev, dma_addr)) { 7410f4e7f4eSShannon Nelson stats->dma_map_err++; 7425b039241SShannon Nelson goto dma_fail; 7430f4e7f4eSShannon Nelson } 7445b039241SShannon Nelson buf_info->dma_addr = dma_addr; 7455b039241SShannon Nelson buf_info->len = skb_frag_size(frag); 7462da479caSShannon Nelson buf_info++; 7475b039241SShannon Nelson } 7485b039241SShannon Nelson 7492da479caSShannon Nelson desc_info->nbufs = 1 + nfrags; 7502da479caSShannon Nelson 7515b039241SShannon Nelson return 0; 7525b039241SShannon Nelson 7535b039241SShannon Nelson dma_fail: 7545b039241SShannon Nelson /* unwind the frag mappings and the head mapping */ 7555b039241SShannon Nelson while (frag_idx > 0) { 7565b039241SShannon Nelson frag_idx--; 7575b039241SShannon Nelson buf_info--; 7585b039241SShannon Nelson dma_unmap_page(dev, buf_info->dma_addr, 7595b039241SShannon Nelson buf_info->len, DMA_TO_DEVICE); 7605b039241SShannon Nelson } 7615b039241SShannon Nelson dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); 7625b039241SShannon Nelson return -EIO; 7635b039241SShannon Nelson } 7645b039241SShannon Nelson 765238a0f7cSBrett Creeley static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, 766238a0f7cSBrett Creeley struct ionic_desc_info *desc_info) 7670f3154e6SShannon Nelson { 7685b039241SShannon Nelson struct ionic_buf_info *buf_info = desc_info->bufs; 769f37bc346SShannon Nelson struct device *dev = q->dev; 7700f3154e6SShannon Nelson unsigned int i; 7710f3154e6SShannon Nelson 772238a0f7cSBrett Creeley if (!desc_info->nbufs) 773238a0f7cSBrett Creeley return; 774238a0f7cSBrett Creeley 7755b039241SShannon Nelson dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, 7765b039241SShannon Nelson buf_info->len, DMA_TO_DEVICE); 7775b039241SShannon Nelson buf_info++; 7785b039241SShannon Nelson for (i = 1; i < desc_info->nbufs; i++, buf_info++) 7795b039241SShannon Nelson dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, 7805b039241SShannon Nelson buf_info->len, DMA_TO_DEVICE); 781238a0f7cSBrett Creeley 782238a0f7cSBrett Creeley desc_info->nbufs = 0; 7835b039241SShannon Nelson } 7840f3154e6SShannon Nelson 785238a0f7cSBrett Creeley static void ionic_tx_clean(struct ionic_queue *q, 786238a0f7cSBrett Creeley struct ionic_desc_info *desc_info, 787238a0f7cSBrett Creeley struct ionic_cq_info *cq_info, 788238a0f7cSBrett Creeley void *cb_arg) 789238a0f7cSBrett Creeley { 790238a0f7cSBrett Creeley struct ionic_tx_stats *stats = q_to_tx_stats(q); 791238a0f7cSBrett Creeley struct ionic_qcq *qcq = q_to_qcq(q); 792238a0f7cSBrett Creeley struct sk_buff *skb = cb_arg; 793238a0f7cSBrett Creeley u16 qi; 794238a0f7cSBrett Creeley 795238a0f7cSBrett Creeley ionic_tx_desc_unmap_bufs(q, desc_info); 796238a0f7cSBrett Creeley 797a8771bfeSShannon Nelson if (!skb) 798a8771bfeSShannon Nelson return; 7990f3154e6SShannon Nelson 800a8771bfeSShannon Nelson qi = skb_get_queue_mapping(skb); 801a8771bfeSShannon Nelson 802a8771bfeSShannon Nelson if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { 803a8771bfeSShannon Nelson if (cq_info) { 804a8771bfeSShannon Nelson struct skb_shared_hwtstamps hwts = {}; 805a8771bfeSShannon Nelson __le64 *cq_desc_hwstamp; 806a8771bfeSShannon Nelson u64 hwstamp; 807a8771bfeSShannon Nelson 808a8771bfeSShannon Nelson cq_desc_hwstamp = 809a8771bfeSShannon Nelson cq_info->cq_desc + 810a8771bfeSShannon Nelson qcq->cq.desc_size - 811a8771bfeSShannon Nelson sizeof(struct ionic_txq_comp) - 812a8771bfeSShannon Nelson IONIC_HWSTAMP_CQ_NEGOFFSET; 813a8771bfeSShannon Nelson 814a8771bfeSShannon Nelson hwstamp = le64_to_cpu(*cq_desc_hwstamp); 815a8771bfeSShannon Nelson 816a8771bfeSShannon Nelson if (hwstamp != IONIC_HWSTAMP_INVALID) { 817a8771bfeSShannon Nelson hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 818a8771bfeSShannon Nelson 819a8771bfeSShannon Nelson skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 820a8771bfeSShannon Nelson skb_tstamp_tx(skb, &hwts); 821a8771bfeSShannon Nelson 822a8771bfeSShannon Nelson stats->hwstamp_valid++; 823a8771bfeSShannon Nelson } else { 824a8771bfeSShannon Nelson stats->hwstamp_invalid++; 825a8771bfeSShannon Nelson } 826a8771bfeSShannon Nelson } 827a8771bfeSShannon Nelson 828a8771bfeSShannon Nelson } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { 829a8771bfeSShannon Nelson netif_wake_subqueue(q->lif->netdev, qi); 8300f3154e6SShannon Nelson } 831633eddf1SShannon Nelson 832633eddf1SShannon Nelson desc_info->bytes = skb->len; 8330f3154e6SShannon Nelson stats->clean++; 834633eddf1SShannon Nelson 835633eddf1SShannon Nelson dev_consume_skb_any(skb); 8360f3154e6SShannon Nelson } 8370f3154e6SShannon Nelson 838a8771bfeSShannon Nelson bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 8390f3154e6SShannon Nelson { 8400f3154e6SShannon Nelson struct ionic_queue *q = cq->bound_q; 8410f3154e6SShannon Nelson struct ionic_desc_info *desc_info; 8420ec9f666SShannon Nelson struct ionic_txq_comp *comp; 843633eddf1SShannon Nelson int bytes = 0; 844633eddf1SShannon Nelson int pkts = 0; 845339dcf7fSShannon Nelson u16 index; 8460f3154e6SShannon Nelson 8470ec9f666SShannon Nelson comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 8480ec9f666SShannon Nelson 849b14e4e95SShannon Nelson if (!color_match(comp->color, cq->done_color)) 850b14e4e95SShannon Nelson return false; 8510f3154e6SShannon Nelson 8520f3154e6SShannon Nelson /* clean the related q entries, there could be 8530f3154e6SShannon Nelson * several q entries completed for each cq completion 8540f3154e6SShannon Nelson */ 8550f3154e6SShannon Nelson do { 856f1d2e894SShannon Nelson desc_info = &q->info[q->tail_idx]; 857633eddf1SShannon Nelson desc_info->bytes = 0; 858339dcf7fSShannon Nelson index = q->tail_idx; 859f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 860f1d2e894SShannon Nelson ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); 861633eddf1SShannon Nelson if (desc_info->cb_arg) { 862633eddf1SShannon Nelson pkts++; 863633eddf1SShannon Nelson bytes += desc_info->bytes; 864633eddf1SShannon Nelson } 8650f3154e6SShannon Nelson desc_info->cb = NULL; 8660f3154e6SShannon Nelson desc_info->cb_arg = NULL; 867339dcf7fSShannon Nelson } while (index != le16_to_cpu(comp->comp_index)); 8680f3154e6SShannon Nelson 869a8771bfeSShannon Nelson if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 870633eddf1SShannon Nelson netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 871633eddf1SShannon Nelson 872b14e4e95SShannon Nelson return true; 8730f3154e6SShannon Nelson } 8740f3154e6SShannon Nelson 875b14e4e95SShannon Nelson void ionic_tx_flush(struct ionic_cq *cq) 876b14e4e95SShannon Nelson { 877b14e4e95SShannon Nelson struct ionic_dev *idev = &cq->lif->ionic->idev; 878b14e4e95SShannon Nelson u32 work_done; 879b14e4e95SShannon Nelson 880b14e4e95SShannon Nelson work_done = ionic_cq_service(cq, cq->num_descs, 881b14e4e95SShannon Nelson ionic_tx_service, NULL, NULL); 8820f3154e6SShannon Nelson if (work_done) 8830f3154e6SShannon Nelson ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 884b14e4e95SShannon Nelson work_done, IONIC_INTR_CRED_RESET_COALESCE); 8850f3154e6SShannon Nelson } 8860f3154e6SShannon Nelson 887f9c00e2cSShannon Nelson void ionic_tx_empty(struct ionic_queue *q) 888f9c00e2cSShannon Nelson { 889f9c00e2cSShannon Nelson struct ionic_desc_info *desc_info; 890633eddf1SShannon Nelson int bytes = 0; 891633eddf1SShannon Nelson int pkts = 0; 892f9c00e2cSShannon Nelson 893f9c00e2cSShannon Nelson /* walk the not completed tx entries, if any */ 894f1d2e894SShannon Nelson while (q->head_idx != q->tail_idx) { 895f1d2e894SShannon Nelson desc_info = &q->info[q->tail_idx]; 896633eddf1SShannon Nelson desc_info->bytes = 0; 897f1d2e894SShannon Nelson q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 898f9c00e2cSShannon Nelson ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 899633eddf1SShannon Nelson if (desc_info->cb_arg) { 900633eddf1SShannon Nelson pkts++; 901633eddf1SShannon Nelson bytes += desc_info->bytes; 902633eddf1SShannon Nelson } 903f9c00e2cSShannon Nelson desc_info->cb = NULL; 904f9c00e2cSShannon Nelson desc_info->cb_arg = NULL; 905f9c00e2cSShannon Nelson } 906633eddf1SShannon Nelson 907a8771bfeSShannon Nelson if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 908633eddf1SShannon Nelson netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 909f9c00e2cSShannon Nelson } 910f9c00e2cSShannon Nelson 9110f3154e6SShannon Nelson static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 9120f3154e6SShannon Nelson { 9130f3154e6SShannon Nelson int err; 9140f3154e6SShannon Nelson 9150f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 9160f3154e6SShannon Nelson if (err) 9170f3154e6SShannon Nelson return err; 9180f3154e6SShannon Nelson 9190f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 9200f3154e6SShannon Nelson inner_ip_hdr(skb)->check = 0; 9210f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 9220f3154e6SShannon Nelson ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 9230f3154e6SShannon Nelson inner_ip_hdr(skb)->daddr, 9240f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 9250f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 9260f3154e6SShannon Nelson inner_tcp_hdr(skb)->check = 9270f3154e6SShannon Nelson ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 9280f3154e6SShannon Nelson &inner_ipv6_hdr(skb)->daddr, 9290f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 9300f3154e6SShannon Nelson } 9310f3154e6SShannon Nelson 9320f3154e6SShannon Nelson return 0; 9330f3154e6SShannon Nelson } 9340f3154e6SShannon Nelson 9350f3154e6SShannon Nelson static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 9360f3154e6SShannon Nelson { 9370f3154e6SShannon Nelson int err; 9380f3154e6SShannon Nelson 9390f3154e6SShannon Nelson err = skb_cow_head(skb, 0); 9400f3154e6SShannon Nelson if (err) 9410f3154e6SShannon Nelson return err; 9420f3154e6SShannon Nelson 9430f3154e6SShannon Nelson if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 9440f3154e6SShannon Nelson ip_hdr(skb)->check = 0; 9450f3154e6SShannon Nelson tcp_hdr(skb)->check = 9460f3154e6SShannon Nelson ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 9470f3154e6SShannon Nelson ip_hdr(skb)->daddr, 9480f3154e6SShannon Nelson 0, IPPROTO_TCP, 0); 9490f3154e6SShannon Nelson } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 950fa6b8429SHeiner Kallweit tcp_v6_gso_csum_prep(skb); 9510f3154e6SShannon Nelson } 9520f3154e6SShannon Nelson 9530f3154e6SShannon Nelson return 0; 9540f3154e6SShannon Nelson } 9550f3154e6SShannon Nelson 956*40bc471dSShannon Nelson static void ionic_tx_tso_post(struct ionic_queue *q, 957*40bc471dSShannon Nelson struct ionic_desc_info *desc_info, 9580f3154e6SShannon Nelson struct sk_buff *skb, 9590f3154e6SShannon Nelson dma_addr_t addr, u8 nsge, u16 len, 9600f3154e6SShannon Nelson unsigned int hdrlen, unsigned int mss, 9610f3154e6SShannon Nelson bool outer_csum, 9620f3154e6SShannon Nelson u16 vlan_tci, bool has_vlan, 9630f3154e6SShannon Nelson bool start, bool done) 9640f3154e6SShannon Nelson { 965*40bc471dSShannon Nelson struct ionic_txq_desc *desc = desc_info->desc; 9660f3154e6SShannon Nelson u8 flags = 0; 9670f3154e6SShannon Nelson u64 cmd; 9680f3154e6SShannon Nelson 9690f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 9700f3154e6SShannon Nelson flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 9710f3154e6SShannon Nelson flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 9720f3154e6SShannon Nelson flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 9730f3154e6SShannon Nelson 9740f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 9750f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 9760f3154e6SShannon Nelson desc->len = cpu_to_le16(len); 9770f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(vlan_tci); 9780f3154e6SShannon Nelson desc->hdr_len = cpu_to_le16(hdrlen); 9790f3154e6SShannon Nelson desc->mss = cpu_to_le16(mss); 9800f3154e6SShannon Nelson 981*40bc471dSShannon Nelson ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); 982*40bc471dSShannon Nelson 9832da479caSShannon Nelson if (start) { 9840f3154e6SShannon Nelson skb_tx_timestamp(skb); 985a8771bfeSShannon Nelson if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 9860f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 9872da479caSShannon Nelson ionic_txq_post(q, false, ionic_tx_clean, skb); 9880f3154e6SShannon Nelson } else { 9892da479caSShannon Nelson ionic_txq_post(q, done, NULL, NULL); 9900f3154e6SShannon Nelson } 9910f3154e6SShannon Nelson } 9920f3154e6SShannon Nelson 9930f3154e6SShannon Nelson static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 9940f3154e6SShannon Nelson { 9950f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 9962da479caSShannon Nelson struct ionic_desc_info *desc_info; 9972da479caSShannon Nelson struct ionic_buf_info *buf_info; 9980f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem; 9990f3154e6SShannon Nelson struct ionic_txq_desc *desc; 10005b039241SShannon Nelson unsigned int chunk_len; 10015b039241SShannon Nelson unsigned int frag_rem; 10025b039241SShannon Nelson unsigned int tso_rem; 10035b039241SShannon Nelson unsigned int seg_rem; 10040f3154e6SShannon Nelson dma_addr_t desc_addr; 10055b039241SShannon Nelson dma_addr_t frag_addr; 10060f3154e6SShannon Nelson unsigned int hdrlen; 10070f3154e6SShannon Nelson unsigned int len; 10080f3154e6SShannon Nelson unsigned int mss; 10090f3154e6SShannon Nelson bool start, done; 10100f3154e6SShannon Nelson bool outer_csum; 10110f3154e6SShannon Nelson bool has_vlan; 10120f3154e6SShannon Nelson u16 desc_len; 10130f3154e6SShannon Nelson u8 desc_nsge; 10140f3154e6SShannon Nelson u16 vlan_tci; 10150f3154e6SShannon Nelson bool encap; 10160f3154e6SShannon Nelson int err; 10170f3154e6SShannon Nelson 10182da479caSShannon Nelson desc_info = &q->info[q->head_idx]; 10192da479caSShannon Nelson buf_info = desc_info->bufs; 10202da479caSShannon Nelson 10212da479caSShannon Nelson if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 10225b039241SShannon Nelson return -EIO; 10235b039241SShannon Nelson 10245b039241SShannon Nelson len = skb->len; 10250f3154e6SShannon Nelson mss = skb_shinfo(skb)->gso_size; 1026cad478c7SNeel Patel outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1027cad478c7SNeel Patel SKB_GSO_GRE_CSUM | 1028cad478c7SNeel Patel SKB_GSO_IPXIP4 | 1029cad478c7SNeel Patel SKB_GSO_IPXIP6 | 1030cad478c7SNeel Patel SKB_GSO_UDP_TUNNEL | 1031cad478c7SNeel Patel SKB_GSO_UDP_TUNNEL_CSUM)); 10320f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 10330f3154e6SShannon Nelson vlan_tci = skb_vlan_tag_get(skb); 10340f3154e6SShannon Nelson encap = skb->encapsulation; 10350f3154e6SShannon Nelson 10360f3154e6SShannon Nelson /* Preload inner-most TCP csum field with IP pseudo hdr 10370f3154e6SShannon Nelson * calculated with IP length set to zero. HW will later 10380f3154e6SShannon Nelson * add in length to each TCP segment resulting from the TSO. 10390f3154e6SShannon Nelson */ 10400f3154e6SShannon Nelson 10410f3154e6SShannon Nelson if (encap) 10420f3154e6SShannon Nelson err = ionic_tx_tcp_inner_pseudo_csum(skb); 10430f3154e6SShannon Nelson else 10440f3154e6SShannon Nelson err = ionic_tx_tcp_pseudo_csum(skb); 1045238a0f7cSBrett Creeley if (err) { 1046238a0f7cSBrett Creeley /* clean up mapping from ionic_tx_map_skb */ 1047238a0f7cSBrett Creeley ionic_tx_desc_unmap_bufs(q, desc_info); 10480f3154e6SShannon Nelson return err; 1049238a0f7cSBrett Creeley } 10500f3154e6SShannon Nelson 10510f3154e6SShannon Nelson if (encap) 1052504148feSEric Dumazet hdrlen = skb_inner_tcp_all_headers(skb); 10530f3154e6SShannon Nelson else 1054504148feSEric Dumazet hdrlen = skb_tcp_all_headers(skb); 10550f3154e6SShannon Nelson 10565b039241SShannon Nelson tso_rem = len; 10575b039241SShannon Nelson seg_rem = min(tso_rem, hdrlen + mss); 10580f3154e6SShannon Nelson 10595b039241SShannon Nelson frag_addr = 0; 10605b039241SShannon Nelson frag_rem = 0; 10615b039241SShannon Nelson 10620f3154e6SShannon Nelson start = true; 10630f3154e6SShannon Nelson 10645b039241SShannon Nelson while (tso_rem > 0) { 10655b039241SShannon Nelson desc = NULL; 10665b039241SShannon Nelson elem = NULL; 10675b039241SShannon Nelson desc_addr = 0; 10685b039241SShannon Nelson desc_len = 0; 10690f3154e6SShannon Nelson desc_nsge = 0; 10702da479caSShannon Nelson /* use fragments until we have enough to post a single descriptor */ 10715b039241SShannon Nelson while (seg_rem > 0) { 10722da479caSShannon Nelson /* if the fragment is exhausted then move to the next one */ 10735b039241SShannon Nelson if (frag_rem == 0) { 10745b039241SShannon Nelson /* grab the next fragment */ 10752da479caSShannon Nelson frag_addr = buf_info->dma_addr; 10762da479caSShannon Nelson frag_rem = buf_info->len; 10772da479caSShannon Nelson buf_info++; 10780f3154e6SShannon Nelson } 10795b039241SShannon Nelson chunk_len = min(frag_rem, seg_rem); 10805b039241SShannon Nelson if (!desc) { 10815b039241SShannon Nelson /* fill main descriptor */ 10822da479caSShannon Nelson desc = desc_info->txq_desc; 10832da479caSShannon Nelson elem = desc_info->txq_sg_desc->elems; 10845b039241SShannon Nelson desc_addr = frag_addr; 10855b039241SShannon Nelson desc_len = chunk_len; 10865b039241SShannon Nelson } else { 10875b039241SShannon Nelson /* fill sg descriptor */ 10885b039241SShannon Nelson elem->addr = cpu_to_le64(frag_addr); 10895b039241SShannon Nelson elem->len = cpu_to_le16(chunk_len); 10900f3154e6SShannon Nelson elem++; 10910f3154e6SShannon Nelson desc_nsge++; 10920f3154e6SShannon Nelson } 10935b039241SShannon Nelson frag_addr += chunk_len; 10945b039241SShannon Nelson frag_rem -= chunk_len; 10955b039241SShannon Nelson tso_rem -= chunk_len; 10965b039241SShannon Nelson seg_rem -= chunk_len; 10970f3154e6SShannon Nelson } 10985b039241SShannon Nelson seg_rem = min(tso_rem, mss); 10995b039241SShannon Nelson done = (tso_rem == 0); 11005b039241SShannon Nelson /* post descriptor */ 1101*40bc471dSShannon Nelson ionic_tx_tso_post(q, desc_info, skb, 11025b039241SShannon Nelson desc_addr, desc_nsge, desc_len, 11035b039241SShannon Nelson hdrlen, mss, outer_csum, vlan_tci, has_vlan, 11045b039241SShannon Nelson start, done); 11055b039241SShannon Nelson start = false; 11062da479caSShannon Nelson /* Buffer information is stored with the first tso descriptor */ 11072da479caSShannon Nelson desc_info = &q->info[q->head_idx]; 11082da479caSShannon Nelson desc_info->nbufs = 0; 11090f3154e6SShannon Nelson } 11100f3154e6SShannon Nelson 11115b039241SShannon Nelson stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); 11125b039241SShannon Nelson stats->bytes += len; 11130f3154e6SShannon Nelson stats->tso++; 11145b039241SShannon Nelson stats->tso_bytes = len; 11150f3154e6SShannon Nelson 11160f3154e6SShannon Nelson return 0; 11170f3154e6SShannon Nelson } 11180f3154e6SShannon Nelson 1119238a0f7cSBrett Creeley static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, 11202da479caSShannon Nelson struct ionic_desc_info *desc_info) 11210f3154e6SShannon Nelson { 11222da479caSShannon Nelson struct ionic_txq_desc *desc = desc_info->txq_desc; 11232da479caSShannon Nelson struct ionic_buf_info *buf_info = desc_info->bufs; 11240f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 11250f3154e6SShannon Nelson bool has_vlan; 11260f3154e6SShannon Nelson u8 flags = 0; 11270f3154e6SShannon Nelson bool encap; 11280f3154e6SShannon Nelson u64 cmd; 11290f3154e6SShannon Nelson 11300f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 11310f3154e6SShannon Nelson encap = skb->encapsulation; 11320f3154e6SShannon Nelson 11330f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 11340f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 11350f3154e6SShannon Nelson 11360f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 11372da479caSShannon Nelson flags, skb_shinfo(skb)->nr_frags, 11382da479caSShannon Nelson buf_info->dma_addr); 11390f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 11402da479caSShannon Nelson desc->len = cpu_to_le16(buf_info->len); 1141f64e0c56SShannon Nelson if (has_vlan) { 1142f64e0c56SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1143f64e0c56SShannon Nelson stats->vlan_inserted++; 11442da479caSShannon Nelson } else { 11452da479caSShannon Nelson desc->vlan_tci = 0; 1146f64e0c56SShannon Nelson } 11472da479caSShannon Nelson desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 11482da479caSShannon Nelson desc->csum_offset = cpu_to_le16(skb->csum_offset); 11490f3154e6SShannon Nelson 1150*40bc471dSShannon Nelson ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); 1151*40bc471dSShannon Nelson 1152fa821170SXin Long if (skb_csum_is_sctp(skb)) 11530f3154e6SShannon Nelson stats->crc32_csum++; 11540f3154e6SShannon Nelson else 11550f3154e6SShannon Nelson stats->csum++; 11560f3154e6SShannon Nelson } 11570f3154e6SShannon Nelson 1158238a0f7cSBrett Creeley static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, 11592da479caSShannon Nelson struct ionic_desc_info *desc_info) 11600f3154e6SShannon Nelson { 11612da479caSShannon Nelson struct ionic_txq_desc *desc = desc_info->txq_desc; 11622da479caSShannon Nelson struct ionic_buf_info *buf_info = desc_info->bufs; 11630f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 11640f3154e6SShannon Nelson bool has_vlan; 11650f3154e6SShannon Nelson u8 flags = 0; 11660f3154e6SShannon Nelson bool encap; 11670f3154e6SShannon Nelson u64 cmd; 11680f3154e6SShannon Nelson 11690f3154e6SShannon Nelson has_vlan = !!skb_vlan_tag_present(skb); 11700f3154e6SShannon Nelson encap = skb->encapsulation; 11710f3154e6SShannon Nelson 11720f3154e6SShannon Nelson flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 11730f3154e6SShannon Nelson flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 11740f3154e6SShannon Nelson 11750f3154e6SShannon Nelson cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 11762da479caSShannon Nelson flags, skb_shinfo(skb)->nr_frags, 11772da479caSShannon Nelson buf_info->dma_addr); 11780f3154e6SShannon Nelson desc->cmd = cpu_to_le64(cmd); 11792da479caSShannon Nelson desc->len = cpu_to_le16(buf_info->len); 1180f64e0c56SShannon Nelson if (has_vlan) { 11810f3154e6SShannon Nelson desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1182f64e0c56SShannon Nelson stats->vlan_inserted++; 11832da479caSShannon Nelson } else { 11842da479caSShannon Nelson desc->vlan_tci = 0; 1185f64e0c56SShannon Nelson } 11862da479caSShannon Nelson desc->csum_start = 0; 11872da479caSShannon Nelson desc->csum_offset = 0; 11880f3154e6SShannon Nelson 1189*40bc471dSShannon Nelson ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); 1190*40bc471dSShannon Nelson 1191f64e0c56SShannon Nelson stats->csum_none++; 11920f3154e6SShannon Nelson } 11930f3154e6SShannon Nelson 1194238a0f7cSBrett Creeley static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, 11952da479caSShannon Nelson struct ionic_desc_info *desc_info) 11960f3154e6SShannon Nelson { 11972da479caSShannon Nelson struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; 11982da479caSShannon Nelson struct ionic_buf_info *buf_info = &desc_info->bufs[1]; 11990f3154e6SShannon Nelson struct ionic_txq_sg_elem *elem = sg_desc->elems; 12000f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 12012da479caSShannon Nelson unsigned int i; 12020f3154e6SShannon Nelson 12032da479caSShannon Nelson for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { 12042da479caSShannon Nelson elem->addr = cpu_to_le64(buf_info->dma_addr); 12052da479caSShannon Nelson elem->len = cpu_to_le16(buf_info->len); 12060f3154e6SShannon Nelson } 12070f3154e6SShannon Nelson 12082da479caSShannon Nelson stats->frags += skb_shinfo(skb)->nr_frags; 12090f3154e6SShannon Nelson } 12100f3154e6SShannon Nelson 12110f3154e6SShannon Nelson static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 12120f3154e6SShannon Nelson { 12132da479caSShannon Nelson struct ionic_desc_info *desc_info = &q->info[q->head_idx]; 12140f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 12150f3154e6SShannon Nelson 12162da479caSShannon Nelson if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 12172da479caSShannon Nelson return -EIO; 12182da479caSShannon Nelson 12190f3154e6SShannon Nelson /* set up the initial descriptor */ 12200f3154e6SShannon Nelson if (skb->ip_summed == CHECKSUM_PARTIAL) 1221238a0f7cSBrett Creeley ionic_tx_calc_csum(q, skb, desc_info); 12220f3154e6SShannon Nelson else 1223238a0f7cSBrett Creeley ionic_tx_calc_no_csum(q, skb, desc_info); 12240f3154e6SShannon Nelson 12250f3154e6SShannon Nelson /* add frags */ 1226238a0f7cSBrett Creeley ionic_tx_skb_frags(q, skb, desc_info); 12270f3154e6SShannon Nelson 12280f3154e6SShannon Nelson skb_tx_timestamp(skb); 12290f3154e6SShannon Nelson stats->pkts++; 12300f3154e6SShannon Nelson stats->bytes += skb->len; 12310f3154e6SShannon Nelson 1232a8771bfeSShannon Nelson if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 12330f3154e6SShannon Nelson netdev_tx_sent_queue(q_to_ndq(q), skb->len); 12340f3154e6SShannon Nelson ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 12350f3154e6SShannon Nelson 12360f3154e6SShannon Nelson return 0; 12370f3154e6SShannon Nelson } 12380f3154e6SShannon Nelson 12390f3154e6SShannon Nelson static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 12400f3154e6SShannon Nelson { 12410f3154e6SShannon Nelson struct ionic_tx_stats *stats = q_to_tx_stats(q); 1242d2c21422SShannon Nelson int ndescs; 12430f3154e6SShannon Nelson int err; 12440f3154e6SShannon Nelson 1245d2c21422SShannon Nelson /* Each desc is mss long max, so a descriptor for each gso_seg */ 12460f3154e6SShannon Nelson if (skb_is_gso(skb)) 1247d2c21422SShannon Nelson ndescs = skb_shinfo(skb)->gso_segs; 1248d2c21422SShannon Nelson else 1249d2c21422SShannon Nelson ndescs = 1; 12500f3154e6SShannon Nelson 12510f3154e6SShannon Nelson /* If non-TSO, just need 1 desc and nr_frags sg elems */ 1252f37bc346SShannon Nelson if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) 1253d2c21422SShannon Nelson return ndescs; 12540f3154e6SShannon Nelson 12550f3154e6SShannon Nelson /* Too many frags, so linearize */ 12560f3154e6SShannon Nelson err = skb_linearize(skb); 12570f3154e6SShannon Nelson if (err) 12580f3154e6SShannon Nelson return err; 12590f3154e6SShannon Nelson 12600f3154e6SShannon Nelson stats->linearize++; 12610f3154e6SShannon Nelson 1262d2c21422SShannon Nelson return ndescs; 12630f3154e6SShannon Nelson } 12640f3154e6SShannon Nelson 12650f3154e6SShannon Nelson static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 12660f3154e6SShannon Nelson { 12670f3154e6SShannon Nelson int stopped = 0; 12680f3154e6SShannon Nelson 12690f3154e6SShannon Nelson if (unlikely(!ionic_q_has_space(q, ndescs))) { 12700f3154e6SShannon Nelson netif_stop_subqueue(q->lif->netdev, q->index); 12710f3154e6SShannon Nelson stopped = 1; 12720f3154e6SShannon Nelson 12730f3154e6SShannon Nelson /* Might race with ionic_tx_clean, check again */ 12740f3154e6SShannon Nelson smp_rmb(); 12750f3154e6SShannon Nelson if (ionic_q_has_space(q, ndescs)) { 12760f3154e6SShannon Nelson netif_wake_subqueue(q->lif->netdev, q->index); 12770f3154e6SShannon Nelson stopped = 0; 12780f3154e6SShannon Nelson } 12790f3154e6SShannon Nelson } 12800f3154e6SShannon Nelson 12810f3154e6SShannon Nelson return stopped; 12820f3154e6SShannon Nelson } 12830f3154e6SShannon Nelson 1284a8771bfeSShannon Nelson static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, 1285a8771bfeSShannon Nelson struct net_device *netdev) 1286a8771bfeSShannon Nelson { 1287a8771bfeSShannon Nelson struct ionic_lif *lif = netdev_priv(netdev); 1288a8771bfeSShannon Nelson struct ionic_queue *q = &lif->hwstamp_txq->q; 1289a8771bfeSShannon Nelson int err, ndescs; 1290a8771bfeSShannon Nelson 1291a8771bfeSShannon Nelson /* Does not stop/start txq, because we post to a separate tx queue 1292a8771bfeSShannon Nelson * for timestamping, and if a packet can't be posted immediately to 1293a8771bfeSShannon Nelson * the timestamping queue, it is dropped. 1294a8771bfeSShannon Nelson */ 1295a8771bfeSShannon Nelson 1296a8771bfeSShannon Nelson ndescs = ionic_tx_descs_needed(q, skb); 1297a8771bfeSShannon Nelson if (unlikely(ndescs < 0)) 1298a8771bfeSShannon Nelson goto err_out_drop; 1299a8771bfeSShannon Nelson 1300a8771bfeSShannon Nelson if (unlikely(!ionic_q_has_space(q, ndescs))) 1301a8771bfeSShannon Nelson goto err_out_drop; 1302a8771bfeSShannon Nelson 1303bd7856bcSShannon Nelson skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; 1304a8771bfeSShannon Nelson if (skb_is_gso(skb)) 1305a8771bfeSShannon Nelson err = ionic_tx_tso(q, skb); 1306a8771bfeSShannon Nelson else 1307a8771bfeSShannon Nelson err = ionic_tx(q, skb); 1308a8771bfeSShannon Nelson 1309a8771bfeSShannon Nelson if (err) 1310a8771bfeSShannon Nelson goto err_out_drop; 1311a8771bfeSShannon Nelson 1312a8771bfeSShannon Nelson return NETDEV_TX_OK; 1313a8771bfeSShannon Nelson 1314a8771bfeSShannon Nelson err_out_drop: 1315a8771bfeSShannon Nelson q->drop++; 1316a8771bfeSShannon Nelson dev_kfree_skb(skb); 1317a8771bfeSShannon Nelson return NETDEV_TX_OK; 1318a8771bfeSShannon Nelson } 1319a8771bfeSShannon Nelson 13200f3154e6SShannon Nelson netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 13210f3154e6SShannon Nelson { 13220f3154e6SShannon Nelson u16 queue_index = skb_get_queue_mapping(skb); 13230f3154e6SShannon Nelson struct ionic_lif *lif = netdev_priv(netdev); 13240f3154e6SShannon Nelson struct ionic_queue *q; 13250f3154e6SShannon Nelson int ndescs; 13260f3154e6SShannon Nelson int err; 13270f3154e6SShannon Nelson 1328c6d3d73aSShannon Nelson if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 13290f3154e6SShannon Nelson dev_kfree_skb(skb); 13300f3154e6SShannon Nelson return NETDEV_TX_OK; 13310f3154e6SShannon Nelson } 13320f3154e6SShannon Nelson 1333a8771bfeSShannon Nelson if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1334e2ce148eSShannon Nelson if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) 1335a8771bfeSShannon Nelson return ionic_start_hwstamp_xmit(skb, netdev); 1336a8771bfeSShannon Nelson 133734dec947SShannon Nelson if (unlikely(queue_index >= lif->nxqs)) 13380f3154e6SShannon Nelson queue_index = 0; 133934dec947SShannon Nelson q = &lif->txqcqs[queue_index]->q; 13400f3154e6SShannon Nelson 13410f3154e6SShannon Nelson ndescs = ionic_tx_descs_needed(q, skb); 13420f3154e6SShannon Nelson if (ndescs < 0) 13430f3154e6SShannon Nelson goto err_out_drop; 13440f3154e6SShannon Nelson 13450f3154e6SShannon Nelson if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 13460f3154e6SShannon Nelson return NETDEV_TX_BUSY; 13470f3154e6SShannon Nelson 13480f3154e6SShannon Nelson if (skb_is_gso(skb)) 13490f3154e6SShannon Nelson err = ionic_tx_tso(q, skb); 13500f3154e6SShannon Nelson else 13510f3154e6SShannon Nelson err = ionic_tx(q, skb); 13520f3154e6SShannon Nelson 13530f3154e6SShannon Nelson if (err) 13540f3154e6SShannon Nelson goto err_out_drop; 13550f3154e6SShannon Nelson 13560f3154e6SShannon Nelson /* Stop the queue if there aren't descriptors for the next packet. 13570f3154e6SShannon Nelson * Since our SG lists per descriptor take care of most of the possible 13580f3154e6SShannon Nelson * fragmentation, we don't need to have many descriptors available. 13590f3154e6SShannon Nelson */ 13600f3154e6SShannon Nelson ionic_maybe_stop_tx(q, 4); 13610f3154e6SShannon Nelson 13620f3154e6SShannon Nelson return NETDEV_TX_OK; 13630f3154e6SShannon Nelson 13640f3154e6SShannon Nelson err_out_drop: 13650f3154e6SShannon Nelson q->drop++; 13660f3154e6SShannon Nelson dev_kfree_skb(skb); 13670f3154e6SShannon Nelson return NETDEV_TX_OK; 13680f3154e6SShannon Nelson } 1369