12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c3e79bafSAviad Krawczyk /* 3c3e79bafSAviad Krawczyk * Huawei HiNIC PCI Express Linux driver 4c3e79bafSAviad Krawczyk * Copyright(c) 2017 Huawei Technologies Co., Ltd 5c3e79bafSAviad Krawczyk */ 6c3e79bafSAviad Krawczyk 7e2585ea7SAviad Krawczyk #include <linux/kernel.h> 8e2585ea7SAviad Krawczyk #include <linux/types.h> 9e2585ea7SAviad Krawczyk #include <linux/errno.h> 10e2585ea7SAviad Krawczyk #include <linux/pci.h> 11e2585ea7SAviad Krawczyk #include <linux/device.h> 12c3e79bafSAviad Krawczyk #include <linux/netdevice.h> 13e2585ea7SAviad Krawczyk #include <linux/etherdevice.h> 14c3e79bafSAviad Krawczyk #include <linux/u64_stats_sync.h> 15e2585ea7SAviad Krawczyk #include <linux/slab.h> 16e2585ea7SAviad Krawczyk #include <linux/interrupt.h> 17e2585ea7SAviad Krawczyk #include <linux/skbuff.h> 18e2585ea7SAviad Krawczyk #include <linux/dma-mapping.h> 19e2585ea7SAviad Krawczyk #include <linux/prefetch.h> 20352f58b0SAviad Krawczyk #include <linux/cpumask.h> 21aebd17b7SXue Chaojing #include <linux/if_vlan.h> 22e2585ea7SAviad Krawczyk #include <asm/barrier.h> 23c3e79bafSAviad Krawczyk 24e2585ea7SAviad Krawczyk #include "hinic_common.h" 25e2585ea7SAviad Krawczyk #include "hinic_hw_if.h" 26e2585ea7SAviad Krawczyk #include "hinic_hw_wqe.h" 27e2585ea7SAviad Krawczyk #include "hinic_hw_wq.h" 28c3e79bafSAviad Krawczyk #include "hinic_hw_qp.h" 29e2585ea7SAviad Krawczyk #include "hinic_hw_dev.h" 30c3e79bafSAviad Krawczyk #include "hinic_rx.h" 31e2585ea7SAviad Krawczyk #include "hinic_dev.h" 32e2585ea7SAviad Krawczyk 33e2585ea7SAviad Krawczyk #define RX_IRQ_NO_PENDING 0 34e2585ea7SAviad Krawczyk #define RX_IRQ_NO_COALESC 0 35e2585ea7SAviad Krawczyk #define RX_IRQ_NO_LLI_TIMER 0 36e2585ea7SAviad Krawczyk #define RX_IRQ_NO_CREDIT 0 37e2585ea7SAviad Krawczyk #define RX_IRQ_NO_RESEND_TIMER 0 38e1a76515SXue Chaojing #define HINIC_RX_BUFFER_WRITE 16 39c3e79bafSAviad Krawczyk 401e007181SXue Chaojing #define HINIC_RX_IPV6_PKT 7 411e007181SXue Chaojing #define LRO_PKT_HDR_LEN_IPV4 66 421e007181SXue Chaojing #define LRO_PKT_HDR_LEN_IPV6 86 431e007181SXue Chaojing #define LRO_REPLENISH_THLD 256 441e007181SXue Chaojing 451e007181SXue Chaojing #define LRO_PKT_HDR_LEN(cqe) \ 461e007181SXue Chaojing (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ 471e007181SXue Chaojing HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) 481e007181SXue Chaojing 49c3e79bafSAviad Krawczyk /** 50c3e79bafSAviad Krawczyk * hinic_rxq_clean_stats - Clean the statistics of specific queue 51c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue 52c3e79bafSAviad Krawczyk **/ 53c3e79bafSAviad Krawczyk void hinic_rxq_clean_stats(struct hinic_rxq *rxq) 54c3e79bafSAviad Krawczyk { 55c3e79bafSAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 56c3e79bafSAviad Krawczyk 57c3e79bafSAviad Krawczyk u64_stats_update_begin(&rxq_stats->syncp); 58c3e79bafSAviad Krawczyk rxq_stats->pkts = 0; 59c3e79bafSAviad Krawczyk rxq_stats->bytes = 0; 60e54fbbdfSXue Chaojing rxq_stats->errors = 0; 61e54fbbdfSXue Chaojing rxq_stats->csum_errors = 0; 62e54fbbdfSXue Chaojing rxq_stats->other_errors = 0; 63c3e79bafSAviad Krawczyk u64_stats_update_end(&rxq_stats->syncp); 64c3e79bafSAviad Krawczyk } 65c3e79bafSAviad Krawczyk 66c3e79bafSAviad Krawczyk /** 67edd384f6SAviad Krawczyk * hinic_rxq_get_stats - get statistics of Rx Queue 68edd384f6SAviad Krawczyk * @rxq: Logical Rx Queue 69edd384f6SAviad Krawczyk * @stats: return updated stats here 70edd384f6SAviad Krawczyk **/ 71edd384f6SAviad Krawczyk void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) 72edd384f6SAviad Krawczyk { 73edd384f6SAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 74edd384f6SAviad Krawczyk unsigned int start; 75edd384f6SAviad Krawczyk 76edd384f6SAviad Krawczyk u64_stats_update_begin(&stats->syncp); 77edd384f6SAviad Krawczyk do { 78edd384f6SAviad Krawczyk start = u64_stats_fetch_begin(&rxq_stats->syncp); 79edd384f6SAviad Krawczyk stats->pkts = rxq_stats->pkts; 80edd384f6SAviad Krawczyk stats->bytes = rxq_stats->bytes; 81e54fbbdfSXue Chaojing stats->errors = rxq_stats->csum_errors + 82e54fbbdfSXue Chaojing rxq_stats->other_errors; 83e54fbbdfSXue Chaojing stats->csum_errors = rxq_stats->csum_errors; 84e54fbbdfSXue Chaojing stats->other_errors = rxq_stats->other_errors; 85edd384f6SAviad Krawczyk } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); 86edd384f6SAviad Krawczyk u64_stats_update_end(&stats->syncp); 87edd384f6SAviad Krawczyk } 88edd384f6SAviad Krawczyk 89edd384f6SAviad Krawczyk /** 90c3e79bafSAviad Krawczyk * rxq_stats_init - Initialize the statistics of specific queue 91c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue 92c3e79bafSAviad Krawczyk **/ 93c3e79bafSAviad Krawczyk static void rxq_stats_init(struct hinic_rxq *rxq) 94c3e79bafSAviad Krawczyk { 95c3e79bafSAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 96c3e79bafSAviad Krawczyk 97c3e79bafSAviad Krawczyk u64_stats_init(&rxq_stats->syncp); 98c3e79bafSAviad Krawczyk hinic_rxq_clean_stats(rxq); 99c3e79bafSAviad Krawczyk } 100c3e79bafSAviad Krawczyk 1011e007181SXue Chaojing static void rx_csum(struct hinic_rxq *rxq, u32 status, 1024a61abb1SXue Chaojing struct sk_buff *skb) 1034a61abb1SXue Chaojing { 1044a61abb1SXue Chaojing struct net_device *netdev = rxq->netdev; 1054a61abb1SXue Chaojing u32 csum_err; 1064a61abb1SXue Chaojing 1074a61abb1SXue Chaojing csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); 1084a61abb1SXue Chaojing 1094a61abb1SXue Chaojing if (!(netdev->features & NETIF_F_RXCSUM)) 1104a61abb1SXue Chaojing return; 1114a61abb1SXue Chaojing 112e54fbbdfSXue Chaojing if (!csum_err) { 1134a61abb1SXue Chaojing skb->ip_summed = CHECKSUM_UNNECESSARY; 114e54fbbdfSXue Chaojing } else { 115e54fbbdfSXue Chaojing if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | 116e54fbbdfSXue Chaojing HINIC_RX_CSUM_IPSU_OTHER_ERR))) 117e54fbbdfSXue Chaojing rxq->rxq_stats.csum_errors++; 1184a61abb1SXue Chaojing skb->ip_summed = CHECKSUM_NONE; 1194a61abb1SXue Chaojing } 120e54fbbdfSXue Chaojing } 121c3e79bafSAviad Krawczyk /** 122e2585ea7SAviad Krawczyk * rx_alloc_skb - allocate skb and map it to dma address 123e2585ea7SAviad Krawczyk * @rxq: rx queue 124e2585ea7SAviad Krawczyk * @dma_addr: returned dma address for the skb 125e2585ea7SAviad Krawczyk * 126e2585ea7SAviad Krawczyk * Return skb 127e2585ea7SAviad Krawczyk **/ 128e2585ea7SAviad Krawczyk static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, 129e2585ea7SAviad Krawczyk dma_addr_t *dma_addr) 130e2585ea7SAviad Krawczyk { 131e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 132e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev; 133e2585ea7SAviad Krawczyk struct hinic_hwif *hwif = hwdev->hwif; 134e2585ea7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev; 135e2585ea7SAviad Krawczyk struct sk_buff *skb; 136e2585ea7SAviad Krawczyk dma_addr_t addr; 137e2585ea7SAviad Krawczyk int err; 138e2585ea7SAviad Krawczyk 139e2585ea7SAviad Krawczyk skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); 140e2585ea7SAviad Krawczyk if (!skb) { 141e2585ea7SAviad Krawczyk netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); 142e2585ea7SAviad Krawczyk return NULL; 143e2585ea7SAviad Krawczyk } 144e2585ea7SAviad Krawczyk 145e2585ea7SAviad Krawczyk addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, 146e2585ea7SAviad Krawczyk DMA_FROM_DEVICE); 147e2585ea7SAviad Krawczyk err = dma_mapping_error(&pdev->dev, addr); 148e2585ea7SAviad Krawczyk if (err) { 149e2585ea7SAviad Krawczyk dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); 150e2585ea7SAviad Krawczyk goto err_rx_map; 151e2585ea7SAviad Krawczyk } 152e2585ea7SAviad Krawczyk 153e2585ea7SAviad Krawczyk *dma_addr = addr; 154e2585ea7SAviad Krawczyk return skb; 155e2585ea7SAviad Krawczyk 156e2585ea7SAviad Krawczyk err_rx_map: 157e2585ea7SAviad Krawczyk dev_kfree_skb_any(skb); 158e2585ea7SAviad Krawczyk return NULL; 159e2585ea7SAviad Krawczyk } 160e2585ea7SAviad Krawczyk 161e2585ea7SAviad Krawczyk /** 162e2585ea7SAviad Krawczyk * rx_unmap_skb - unmap the dma address of the skb 163e2585ea7SAviad Krawczyk * @rxq: rx queue 164e2585ea7SAviad Krawczyk * @dma_addr: dma address of the skb 165e2585ea7SAviad Krawczyk **/ 166e2585ea7SAviad Krawczyk static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) 167e2585ea7SAviad Krawczyk { 168e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 169e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev; 170e2585ea7SAviad Krawczyk struct hinic_hwif *hwif = hwdev->hwif; 171e2585ea7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev; 172e2585ea7SAviad Krawczyk 173e2585ea7SAviad Krawczyk dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, 174e2585ea7SAviad Krawczyk DMA_FROM_DEVICE); 175e2585ea7SAviad Krawczyk } 176e2585ea7SAviad Krawczyk 177e2585ea7SAviad Krawczyk /** 178e2585ea7SAviad Krawczyk * rx_free_skb - unmap and free skb 179e2585ea7SAviad Krawczyk * @rxq: rx queue 180e2585ea7SAviad Krawczyk * @skb: skb to free 181e2585ea7SAviad Krawczyk * @dma_addr: dma address of the skb 182e2585ea7SAviad Krawczyk **/ 183e2585ea7SAviad Krawczyk static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, 184e2585ea7SAviad Krawczyk dma_addr_t dma_addr) 185e2585ea7SAviad Krawczyk { 186e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, dma_addr); 187e2585ea7SAviad Krawczyk dev_kfree_skb_any(skb); 188e2585ea7SAviad Krawczyk } 189e2585ea7SAviad Krawczyk 190e2585ea7SAviad Krawczyk /** 191e2585ea7SAviad Krawczyk * rx_alloc_pkts - allocate pkts in rx queue 192e2585ea7SAviad Krawczyk * @rxq: rx queue 193e2585ea7SAviad Krawczyk * 194e2585ea7SAviad Krawczyk * Return number of skbs allocated 195e2585ea7SAviad Krawczyk **/ 196e2585ea7SAviad Krawczyk static int rx_alloc_pkts(struct hinic_rxq *rxq) 197e2585ea7SAviad Krawczyk { 198e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 199e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe; 200e2585ea7SAviad Krawczyk unsigned int free_wqebbs; 201e2585ea7SAviad Krawczyk struct hinic_sge sge; 202e2585ea7SAviad Krawczyk dma_addr_t dma_addr; 203e2585ea7SAviad Krawczyk struct sk_buff *skb; 204e2585ea7SAviad Krawczyk u16 prod_idx; 205352f58b0SAviad Krawczyk int i; 206e2585ea7SAviad Krawczyk 207e2585ea7SAviad Krawczyk free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); 208e2585ea7SAviad Krawczyk 209e2585ea7SAviad Krawczyk /* Limit the allocation chunks */ 210e2585ea7SAviad Krawczyk if (free_wqebbs > nic_dev->rx_weight) 211e2585ea7SAviad Krawczyk free_wqebbs = nic_dev->rx_weight; 212e2585ea7SAviad Krawczyk 213e2585ea7SAviad Krawczyk for (i = 0; i < free_wqebbs; i++) { 214e2585ea7SAviad Krawczyk skb = rx_alloc_skb(rxq, &dma_addr); 215e2585ea7SAviad Krawczyk if (!skb) { 216e2585ea7SAviad Krawczyk netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); 217e2585ea7SAviad Krawczyk goto skb_out; 218e2585ea7SAviad Krawczyk } 219e2585ea7SAviad Krawczyk 220e2585ea7SAviad Krawczyk hinic_set_sge(&sge, dma_addr, skb->len); 221e2585ea7SAviad Krawczyk 222e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, 223e2585ea7SAviad Krawczyk &prod_idx); 224e2585ea7SAviad Krawczyk if (!rq_wqe) { 225e2585ea7SAviad Krawczyk rx_free_skb(rxq, skb, dma_addr); 226e2585ea7SAviad Krawczyk goto skb_out; 227e2585ea7SAviad Krawczyk } 228e2585ea7SAviad Krawczyk 229e2585ea7SAviad Krawczyk hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); 230e2585ea7SAviad Krawczyk 231e2585ea7SAviad Krawczyk hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); 232e2585ea7SAviad Krawczyk } 233e2585ea7SAviad Krawczyk 234e2585ea7SAviad Krawczyk skb_out: 235e2585ea7SAviad Krawczyk if (i) { 236e2585ea7SAviad Krawczyk wmb(); /* write all the wqes before update PI */ 237e2585ea7SAviad Krawczyk 238e2585ea7SAviad Krawczyk hinic_rq_update(rxq->rq, prod_idx); 239e2585ea7SAviad Krawczyk } 240e2585ea7SAviad Krawczyk 241e2585ea7SAviad Krawczyk return i; 242e2585ea7SAviad Krawczyk } 243e2585ea7SAviad Krawczyk 244e2585ea7SAviad Krawczyk /** 245e2585ea7SAviad Krawczyk * free_all_rx_skbs - free all skbs in rx queue 246e2585ea7SAviad Krawczyk * @rxq: rx queue 247e2585ea7SAviad Krawczyk **/ 248e2585ea7SAviad Krawczyk static void free_all_rx_skbs(struct hinic_rxq *rxq) 249e2585ea7SAviad Krawczyk { 250e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq; 251e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe; 252e2585ea7SAviad Krawczyk struct hinic_sge sge; 253e2585ea7SAviad Krawczyk u16 ci; 254e2585ea7SAviad Krawczyk 255e2585ea7SAviad Krawczyk while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { 256e2585ea7SAviad Krawczyk if (IS_ERR(hw_wqe)) 257e2585ea7SAviad Krawczyk break; 258e2585ea7SAviad Krawczyk 259e2585ea7SAviad Krawczyk hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); 260e2585ea7SAviad Krawczyk 261e2585ea7SAviad Krawczyk hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); 262e2585ea7SAviad Krawczyk 263e2585ea7SAviad Krawczyk rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); 264e2585ea7SAviad Krawczyk } 265e2585ea7SAviad Krawczyk } 266e2585ea7SAviad Krawczyk 267e2585ea7SAviad Krawczyk /** 268e2585ea7SAviad Krawczyk * rx_recv_jumbo_pkt - Rx handler for jumbo pkt 269e2585ea7SAviad Krawczyk * @rxq: rx queue 270e2585ea7SAviad Krawczyk * @head_skb: the first skb in the list 271e2585ea7SAviad Krawczyk * @left_pkt_len: left size of the pkt exclude head skb 272e2585ea7SAviad Krawczyk * @ci: consumer index 273e2585ea7SAviad Krawczyk * 274e2585ea7SAviad Krawczyk * Return number of wqes that used for the left of the pkt 275e2585ea7SAviad Krawczyk **/ 276e2585ea7SAviad Krawczyk static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, 277e2585ea7SAviad Krawczyk unsigned int left_pkt_len, u16 ci) 278e2585ea7SAviad Krawczyk { 279e2585ea7SAviad Krawczyk struct sk_buff *skb, *curr_skb = head_skb; 280e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe; 281e2585ea7SAviad Krawczyk unsigned int curr_len; 282e2585ea7SAviad Krawczyk struct hinic_sge sge; 283e2585ea7SAviad Krawczyk int num_wqes = 0; 284e2585ea7SAviad Krawczyk 285e2585ea7SAviad Krawczyk while (left_pkt_len > 0) { 286e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, 287e2585ea7SAviad Krawczyk &skb, &ci); 288e2585ea7SAviad Krawczyk 289e2585ea7SAviad Krawczyk num_wqes++; 290e2585ea7SAviad Krawczyk 291e2585ea7SAviad Krawczyk hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); 292e2585ea7SAviad Krawczyk 293e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); 294e2585ea7SAviad Krawczyk 295e2585ea7SAviad Krawczyk prefetch(skb->data); 296e2585ea7SAviad Krawczyk 297e2585ea7SAviad Krawczyk curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : 298e2585ea7SAviad Krawczyk left_pkt_len; 299e2585ea7SAviad Krawczyk 300e2585ea7SAviad Krawczyk left_pkt_len -= curr_len; 301e2585ea7SAviad Krawczyk 302e2585ea7SAviad Krawczyk __skb_put(skb, curr_len); 303e2585ea7SAviad Krawczyk 304e2585ea7SAviad Krawczyk if (curr_skb == head_skb) 305e2585ea7SAviad Krawczyk skb_shinfo(head_skb)->frag_list = skb; 306e2585ea7SAviad Krawczyk else 307e2585ea7SAviad Krawczyk curr_skb->next = skb; 308e2585ea7SAviad Krawczyk 309e2585ea7SAviad Krawczyk head_skb->len += skb->len; 310e2585ea7SAviad Krawczyk head_skb->data_len += skb->len; 311e2585ea7SAviad Krawczyk head_skb->truesize += skb->truesize; 312e2585ea7SAviad Krawczyk 313e2585ea7SAviad Krawczyk curr_skb = skb; 314e2585ea7SAviad Krawczyk } 315e2585ea7SAviad Krawczyk 316e2585ea7SAviad Krawczyk return num_wqes; 317e2585ea7SAviad Krawczyk } 318e2585ea7SAviad Krawczyk 3194aa218a4SLuo bin static void hinic_copy_lp_data(struct hinic_dev *nic_dev, 3204aa218a4SLuo bin struct sk_buff *skb) 3214aa218a4SLuo bin { 3224aa218a4SLuo bin struct net_device *netdev = nic_dev->netdev; 3234aa218a4SLuo bin u8 *lb_buf = nic_dev->lb_test_rx_buf; 3244aa218a4SLuo bin int lb_len = nic_dev->lb_pkt_len; 3254aa218a4SLuo bin int pkt_offset, frag_len, i; 3264aa218a4SLuo bin void *frag_data = NULL; 3274aa218a4SLuo bin 3284aa218a4SLuo bin if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { 3294aa218a4SLuo bin nic_dev->lb_test_rx_idx = 0; 3304aa218a4SLuo bin netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n"); 3314aa218a4SLuo bin } 3324aa218a4SLuo bin 3334aa218a4SLuo bin if (skb->len != nic_dev->lb_pkt_len) { 3344aa218a4SLuo bin netif_warn(nic_dev, drv, netdev, "Wrong packet length\n"); 3354aa218a4SLuo bin nic_dev->lb_test_rx_idx++; 3364aa218a4SLuo bin return; 3374aa218a4SLuo bin } 3384aa218a4SLuo bin 3394aa218a4SLuo bin pkt_offset = nic_dev->lb_test_rx_idx * lb_len; 3404aa218a4SLuo bin frag_len = (int)skb_headlen(skb); 3414aa218a4SLuo bin memcpy(lb_buf + pkt_offset, skb->data, frag_len); 3424aa218a4SLuo bin pkt_offset += frag_len; 3434aa218a4SLuo bin for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3444aa218a4SLuo bin frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); 3454aa218a4SLuo bin frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); 3464aa218a4SLuo bin memcpy((lb_buf + pkt_offset), frag_data, frag_len); 3474aa218a4SLuo bin pkt_offset += frag_len; 3484aa218a4SLuo bin } 3494aa218a4SLuo bin nic_dev->lb_test_rx_idx++; 3504aa218a4SLuo bin } 3514aa218a4SLuo bin 352e2585ea7SAviad Krawczyk /** 353e2585ea7SAviad Krawczyk * rxq_recv - Rx handler 354e2585ea7SAviad Krawczyk * @rxq: rx queue 355e2585ea7SAviad Krawczyk * @budget: maximum pkts to process 356e2585ea7SAviad Krawczyk * 357e2585ea7SAviad Krawczyk * Return number of pkts received 358e2585ea7SAviad Krawczyk **/ 359e2585ea7SAviad Krawczyk static int rxq_recv(struct hinic_rxq *rxq, int budget) 360e2585ea7SAviad Krawczyk { 361e2585ea7SAviad Krawczyk struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); 362aebd17b7SXue Chaojing struct net_device *netdev = rxq->netdev; 363e2585ea7SAviad Krawczyk u64 pkt_len = 0, rx_bytes = 0; 3641e007181SXue Chaojing struct hinic_rq *rq = rxq->rq; 365e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe; 3664aa218a4SLuo bin struct hinic_dev *nic_dev; 367e1a76515SXue Chaojing unsigned int free_wqebbs; 3681e007181SXue Chaojing struct hinic_rq_cqe *cqe; 369e2585ea7SAviad Krawczyk int num_wqes, pkts = 0; 370e2585ea7SAviad Krawczyk struct hinic_sge sge; 3711e007181SXue Chaojing unsigned int status; 372e2585ea7SAviad Krawczyk struct sk_buff *skb; 373aebd17b7SXue Chaojing u32 offload_type; 3741e007181SXue Chaojing u16 ci, num_lro; 3751e007181SXue Chaojing u16 num_wqe = 0; 376aebd17b7SXue Chaojing u32 vlan_len; 377aebd17b7SXue Chaojing u16 vid; 378e2585ea7SAviad Krawczyk 3794aa218a4SLuo bin nic_dev = netdev_priv(netdev); 3804aa218a4SLuo bin 381e2585ea7SAviad Krawczyk while (pkts < budget) { 382e2585ea7SAviad Krawczyk num_wqes = 0; 383e2585ea7SAviad Krawczyk 384e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, 385e2585ea7SAviad Krawczyk &ci); 386e2585ea7SAviad Krawczyk if (!rq_wqe) 387e2585ea7SAviad Krawczyk break; 388e2585ea7SAviad Krawczyk 38933f15da2SLuo bin /* make sure we read rx_done before packet length */ 39033f15da2SLuo bin dma_rmb(); 39133f15da2SLuo bin 3921e007181SXue Chaojing cqe = rq->cqe[ci]; 3931e007181SXue Chaojing status = be32_to_cpu(cqe->status); 394e2585ea7SAviad Krawczyk hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); 395e2585ea7SAviad Krawczyk 396e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); 397e2585ea7SAviad Krawczyk 3981e007181SXue Chaojing rx_csum(rxq, status, skb); 3994a61abb1SXue Chaojing 400e2585ea7SAviad Krawczyk prefetch(skb->data); 401e2585ea7SAviad Krawczyk 402e2585ea7SAviad Krawczyk pkt_len = sge.len; 403e2585ea7SAviad Krawczyk 404e2585ea7SAviad Krawczyk if (pkt_len <= HINIC_RX_BUF_SZ) { 405e2585ea7SAviad Krawczyk __skb_put(skb, pkt_len); 406e2585ea7SAviad Krawczyk } else { 407e2585ea7SAviad Krawczyk __skb_put(skb, HINIC_RX_BUF_SZ); 408e2585ea7SAviad Krawczyk num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - 409e2585ea7SAviad Krawczyk HINIC_RX_BUF_SZ, ci); 410e2585ea7SAviad Krawczyk } 411e2585ea7SAviad Krawczyk 4121e007181SXue Chaojing hinic_rq_put_wqe(rq, ci, 413e2585ea7SAviad Krawczyk (num_wqes + 1) * HINIC_RQ_WQE_SIZE); 414e2585ea7SAviad Krawczyk 415aebd17b7SXue Chaojing offload_type = be32_to_cpu(cqe->offload_type); 416aebd17b7SXue Chaojing vlan_len = be32_to_cpu(cqe->len); 417aebd17b7SXue Chaojing if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 418aebd17b7SXue Chaojing HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { 419aebd17b7SXue Chaojing vid = HINIC_GET_RX_VLAN_TAG(vlan_len); 420aebd17b7SXue Chaojing __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 421aebd17b7SXue Chaojing } 422aebd17b7SXue Chaojing 4234aa218a4SLuo bin if (unlikely(nic_dev->flags & HINIC_LP_TEST)) 4244aa218a4SLuo bin hinic_copy_lp_data(nic_dev, skb); 4254aa218a4SLuo bin 426e2585ea7SAviad Krawczyk skb_record_rx_queue(skb, qp->q_id); 427e2585ea7SAviad Krawczyk skb->protocol = eth_type_trans(skb, rxq->netdev); 428e2585ea7SAviad Krawczyk 429e2585ea7SAviad Krawczyk napi_gro_receive(&rxq->napi, skb); 430e2585ea7SAviad Krawczyk 431e2585ea7SAviad Krawczyk pkts++; 432e2585ea7SAviad Krawczyk rx_bytes += pkt_len; 4331e007181SXue Chaojing 4341e007181SXue Chaojing num_lro = HINIC_GET_RX_NUM_LRO(status); 4351e007181SXue Chaojing if (num_lro) { 4361e007181SXue Chaojing rx_bytes += ((num_lro - 1) * 4371e007181SXue Chaojing LRO_PKT_HDR_LEN(cqe)); 4381e007181SXue Chaojing 4391e007181SXue Chaojing num_wqe += 4401e007181SXue Chaojing (u16)(pkt_len >> rxq->rx_buff_shift) + 4411e007181SXue Chaojing ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); 4421e007181SXue Chaojing } 4431e007181SXue Chaojing 4441e007181SXue Chaojing cqe->status = 0; 4451e007181SXue Chaojing 4461e007181SXue Chaojing if (num_wqe >= LRO_REPLENISH_THLD) 4471e007181SXue Chaojing break; 448e2585ea7SAviad Krawczyk } 449e2585ea7SAviad Krawczyk 450e1a76515SXue Chaojing free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); 451e1a76515SXue Chaojing if (free_wqebbs > HINIC_RX_BUFFER_WRITE) 452e1a76515SXue Chaojing rx_alloc_pkts(rxq); 453e2585ea7SAviad Krawczyk 454e2585ea7SAviad Krawczyk u64_stats_update_begin(&rxq->rxq_stats.syncp); 455e2585ea7SAviad Krawczyk rxq->rxq_stats.pkts += pkts; 456e2585ea7SAviad Krawczyk rxq->rxq_stats.bytes += rx_bytes; 457e2585ea7SAviad Krawczyk u64_stats_update_end(&rxq->rxq_stats.syncp); 458e2585ea7SAviad Krawczyk 459e2585ea7SAviad Krawczyk return pkts; 460e2585ea7SAviad Krawczyk } 461e2585ea7SAviad Krawczyk 462e2585ea7SAviad Krawczyk static int rx_poll(struct napi_struct *napi, int budget) 463e2585ea7SAviad Krawczyk { 464e2585ea7SAviad Krawczyk struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); 465905b464aSXue Chaojing struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 466e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq; 467e2585ea7SAviad Krawczyk int pkts; 468e2585ea7SAviad Krawczyk 469e2585ea7SAviad Krawczyk pkts = rxq_recv(rxq, budget); 470e2585ea7SAviad Krawczyk if (pkts >= budget) 471e2585ea7SAviad Krawczyk return budget; 472e2585ea7SAviad Krawczyk 473e2585ea7SAviad Krawczyk napi_complete(napi); 4747dd29ee1SLuo bin 4757dd29ee1SLuo bin if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 476905b464aSXue Chaojing hinic_hwdev_set_msix_state(nic_dev->hwdev, 477905b464aSXue Chaojing rq->msix_entry, 478905b464aSXue Chaojing HINIC_MSIX_ENABLE); 479905b464aSXue Chaojing 480e2585ea7SAviad Krawczyk return pkts; 481e2585ea7SAviad Krawczyk } 482e2585ea7SAviad Krawczyk 483e2585ea7SAviad Krawczyk static void rx_add_napi(struct hinic_rxq *rxq) 484e2585ea7SAviad Krawczyk { 485e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 486e2585ea7SAviad Krawczyk 487e2585ea7SAviad Krawczyk netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); 488e2585ea7SAviad Krawczyk napi_enable(&rxq->napi); 489e2585ea7SAviad Krawczyk } 490e2585ea7SAviad Krawczyk 491e2585ea7SAviad Krawczyk static void rx_del_napi(struct hinic_rxq *rxq) 492e2585ea7SAviad Krawczyk { 493e2585ea7SAviad Krawczyk napi_disable(&rxq->napi); 494e2585ea7SAviad Krawczyk netif_napi_del(&rxq->napi); 495e2585ea7SAviad Krawczyk } 496e2585ea7SAviad Krawczyk 497e2585ea7SAviad Krawczyk static irqreturn_t rx_irq(int irq, void *data) 498e2585ea7SAviad Krawczyk { 499e2585ea7SAviad Krawczyk struct hinic_rxq *rxq = (struct hinic_rxq *)data; 500e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq; 501e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev; 502e2585ea7SAviad Krawczyk 503e2585ea7SAviad Krawczyk /* Disable the interrupt until napi will be completed */ 504905b464aSXue Chaojing nic_dev = netdev_priv(rxq->netdev); 5057dd29ee1SLuo bin if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 506905b464aSXue Chaojing hinic_hwdev_set_msix_state(nic_dev->hwdev, 507905b464aSXue Chaojing rq->msix_entry, 508905b464aSXue Chaojing HINIC_MSIX_DISABLE); 509e2585ea7SAviad Krawczyk 510e2585ea7SAviad Krawczyk nic_dev = netdev_priv(rxq->netdev); 511e2585ea7SAviad Krawczyk hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); 512e2585ea7SAviad Krawczyk 513e2585ea7SAviad Krawczyk napi_schedule(&rxq->napi); 514e2585ea7SAviad Krawczyk return IRQ_HANDLED; 515e2585ea7SAviad Krawczyk } 516e2585ea7SAviad Krawczyk 517e2585ea7SAviad Krawczyk static int rx_request_irq(struct hinic_rxq *rxq) 518e2585ea7SAviad Krawczyk { 519e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 520a0337c0dSLuo bin struct hinic_msix_config interrupt_info = {0}; 521a0337c0dSLuo bin struct hinic_intr_coal_info *intr_coal = NULL; 522e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev; 523e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq; 524352f58b0SAviad Krawczyk struct hinic_qp *qp; 525e2585ea7SAviad Krawczyk int err; 526e2585ea7SAviad Krawczyk 527a0337c0dSLuo bin qp = container_of(rq, struct hinic_qp, rq); 528a0337c0dSLuo bin 529e2585ea7SAviad Krawczyk rx_add_napi(rxq); 530e2585ea7SAviad Krawczyk 531e2585ea7SAviad Krawczyk hinic_hwdev_msix_set(hwdev, rq->msix_entry, 532e2585ea7SAviad Krawczyk RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, 533e2585ea7SAviad Krawczyk RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, 534e2585ea7SAviad Krawczyk RX_IRQ_NO_RESEND_TIMER); 535e2585ea7SAviad Krawczyk 536a0337c0dSLuo bin intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; 537a0337c0dSLuo bin interrupt_info.msix_index = rq->msix_entry; 538a0337c0dSLuo bin interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; 539a0337c0dSLuo bin interrupt_info.pending_cnt = intr_coal->pending_limt; 540a0337c0dSLuo bin interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; 541a0337c0dSLuo bin 542a0337c0dSLuo bin err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); 543a0337c0dSLuo bin if (err) { 544a0337c0dSLuo bin netif_err(nic_dev, drv, rxq->netdev, 545a0337c0dSLuo bin "Failed to set RX interrupt coalescing attribute\n"); 546a0337c0dSLuo bin rx_del_napi(rxq); 547a0337c0dSLuo bin return err; 548a0337c0dSLuo bin } 549a0337c0dSLuo bin 550e2585ea7SAviad Krawczyk err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); 551e2585ea7SAviad Krawczyk if (err) { 552e2585ea7SAviad Krawczyk rx_del_napi(rxq); 553e2585ea7SAviad Krawczyk return err; 554e2585ea7SAviad Krawczyk } 555e2585ea7SAviad Krawczyk 5560bff777bSLuo bin cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); 5570bff777bSLuo bin return irq_set_affinity_hint(rq->irq, &rq->affinity_mask); 558e2585ea7SAviad Krawczyk } 559e2585ea7SAviad Krawczyk 560e2585ea7SAviad Krawczyk static void rx_free_irq(struct hinic_rxq *rxq) 561e2585ea7SAviad Krawczyk { 562e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq; 563e2585ea7SAviad Krawczyk 56482be2ab1SWei Yongjun irq_set_affinity_hint(rq->irq, NULL); 565e2585ea7SAviad Krawczyk free_irq(rq->irq, rxq); 566e2585ea7SAviad Krawczyk rx_del_napi(rxq); 567e2585ea7SAviad Krawczyk } 568e2585ea7SAviad Krawczyk 569e2585ea7SAviad Krawczyk /** 570c3e79bafSAviad Krawczyk * hinic_init_rxq - Initialize the Rx Queue 571c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue 572c3e79bafSAviad Krawczyk * @rq: Hardware Rx Queue to connect the Logical queue with 573c3e79bafSAviad Krawczyk * @netdev: network device to connect the Logical queue with 574c3e79bafSAviad Krawczyk * 575c3e79bafSAviad Krawczyk * Return 0 - Success, negative - Failure 576c3e79bafSAviad Krawczyk **/ 577c3e79bafSAviad Krawczyk int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 578c3e79bafSAviad Krawczyk struct net_device *netdev) 579c3e79bafSAviad Krawczyk { 580e2585ea7SAviad Krawczyk struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); 581930cfe0fSChristophe JAILLET int err, pkts; 582e2585ea7SAviad Krawczyk 583c3e79bafSAviad Krawczyk rxq->netdev = netdev; 584c3e79bafSAviad Krawczyk rxq->rq = rq; 5851e007181SXue Chaojing rxq->buf_len = HINIC_RX_BUF_SZ; 5861e007181SXue Chaojing rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ); 587c3e79bafSAviad Krawczyk 588c3e79bafSAviad Krawczyk rxq_stats_init(rxq); 589e2585ea7SAviad Krawczyk 590930cfe0fSChristophe JAILLET rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, 591930cfe0fSChristophe JAILLET "hinic_rxq%d", qp->q_id); 592e2585ea7SAviad Krawczyk if (!rxq->irq_name) 593e2585ea7SAviad Krawczyk return -ENOMEM; 594e2585ea7SAviad Krawczyk 595e2585ea7SAviad Krawczyk pkts = rx_alloc_pkts(rxq); 596e2585ea7SAviad Krawczyk if (!pkts) { 597e2585ea7SAviad Krawczyk err = -ENOMEM; 598e2585ea7SAviad Krawczyk goto err_rx_pkts; 599e2585ea7SAviad Krawczyk } 600e2585ea7SAviad Krawczyk 601e2585ea7SAviad Krawczyk err = rx_request_irq(rxq); 602e2585ea7SAviad Krawczyk if (err) { 603e2585ea7SAviad Krawczyk netdev_err(netdev, "Failed to request Rx irq\n"); 604e2585ea7SAviad Krawczyk goto err_req_rx_irq; 605e2585ea7SAviad Krawczyk } 606e2585ea7SAviad Krawczyk 607c3e79bafSAviad Krawczyk return 0; 608e2585ea7SAviad Krawczyk 609e2585ea7SAviad Krawczyk err_req_rx_irq: 610e2585ea7SAviad Krawczyk err_rx_pkts: 611e2585ea7SAviad Krawczyk free_all_rx_skbs(rxq); 612e2585ea7SAviad Krawczyk devm_kfree(&netdev->dev, rxq->irq_name); 613e2585ea7SAviad Krawczyk return err; 614c3e79bafSAviad Krawczyk } 615c3e79bafSAviad Krawczyk 616c3e79bafSAviad Krawczyk /** 617c3e79bafSAviad Krawczyk * hinic_clean_rxq - Clean the Rx Queue 618c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue 619c3e79bafSAviad Krawczyk **/ 620c3e79bafSAviad Krawczyk void hinic_clean_rxq(struct hinic_rxq *rxq) 621c3e79bafSAviad Krawczyk { 622e2585ea7SAviad Krawczyk struct net_device *netdev = rxq->netdev; 623e2585ea7SAviad Krawczyk 624e2585ea7SAviad Krawczyk rx_free_irq(rxq); 625e2585ea7SAviad Krawczyk 626e2585ea7SAviad Krawczyk free_all_rx_skbs(rxq); 627e2585ea7SAviad Krawczyk devm_kfree(&netdev->dev, rxq->irq_name); 628c3e79bafSAviad Krawczyk } 629