1ca9c54d2SDexuan Cui // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2ca9c54d2SDexuan Cui /* Copyright (c) 2021, Microsoft Corporation. */ 3ca9c54d2SDexuan Cui 43b80b73aSJakub Kicinski #include <uapi/linux/bpf.h> 53b80b73aSJakub Kicinski 6ca9c54d2SDexuan Cui #include <linux/inetdevice.h> 7ca9c54d2SDexuan Cui #include <linux/etherdevice.h> 8ca9c54d2SDexuan Cui #include <linux/ethtool.h> 97a8938cdSHaiyang Zhang #include <linux/filter.h> 10ca9c54d2SDexuan Cui #include <linux/mm.h> 11ca9c54d2SDexuan Cui 12ca9c54d2SDexuan Cui #include <net/checksum.h> 13ca9c54d2SDexuan Cui #include <net/ip6_checksum.h> 14*a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h> 1592272ec4SJakub Kicinski #include <net/xdp.h> 16ca9c54d2SDexuan Cui 17fd325cd6SLong Li #include <net/mana/mana.h> 18fd325cd6SLong Li #include <net/mana/mana_auxiliary.h> 19a69839d4SLong Li 20a69839d4SLong Li static DEFINE_IDA(mana_adev_ida); 21a69839d4SLong Li 22a69839d4SLong Li static int mana_adev_idx_alloc(void) 23a69839d4SLong Li { 24a69839d4SLong Li return ida_alloc(&mana_adev_ida, GFP_KERNEL); 25a69839d4SLong Li } 26a69839d4SLong Li 27a69839d4SLong Li static void mana_adev_idx_free(int idx) 28a69839d4SLong Li { 29a69839d4SLong Li ida_free(&mana_adev_ida, idx); 30a69839d4SLong Li } 31ca9c54d2SDexuan Cui 32ca9c54d2SDexuan Cui /* Microsoft Azure Network Adapter (MANA) functions */ 33ca9c54d2SDexuan Cui 34ca9c54d2SDexuan Cui static int mana_open(struct net_device *ndev) 35ca9c54d2SDexuan Cui { 36ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 37ca9c54d2SDexuan Cui int err; 38ca9c54d2SDexuan Cui 39ca9c54d2SDexuan Cui err = mana_alloc_queues(ndev); 40ca9c54d2SDexuan Cui if (err) 41ca9c54d2SDexuan Cui return err; 42ca9c54d2SDexuan Cui 43ca9c54d2SDexuan Cui apc->port_is_up = true; 44ca9c54d2SDexuan Cui 45ca9c54d2SDexuan Cui /* Ensure port state updated before txq state */ 46ca9c54d2SDexuan Cui smp_wmb(); 47ca9c54d2SDexuan Cui 48ca9c54d2SDexuan Cui netif_carrier_on(ndev); 49ca9c54d2SDexuan Cui netif_tx_wake_all_queues(ndev); 50ca9c54d2SDexuan Cui 51ca9c54d2SDexuan Cui return 0; 52ca9c54d2SDexuan Cui } 53ca9c54d2SDexuan Cui 54ca9c54d2SDexuan Cui static int mana_close(struct net_device *ndev) 55ca9c54d2SDexuan Cui { 56ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 57ca9c54d2SDexuan Cui 58ca9c54d2SDexuan Cui if (!apc->port_is_up) 59ca9c54d2SDexuan Cui return 0; 60ca9c54d2SDexuan Cui 61ca9c54d2SDexuan Cui return mana_detach(ndev, true); 62ca9c54d2SDexuan Cui } 63ca9c54d2SDexuan Cui 64ca9c54d2SDexuan Cui static bool mana_can_tx(struct gdma_queue *wq) 65ca9c54d2SDexuan Cui { 66ca9c54d2SDexuan Cui return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; 67ca9c54d2SDexuan Cui } 68ca9c54d2SDexuan Cui 69ca9c54d2SDexuan Cui static unsigned int mana_checksum_info(struct sk_buff *skb) 70ca9c54d2SDexuan Cui { 71ca9c54d2SDexuan Cui if (skb->protocol == htons(ETH_P_IP)) { 72ca9c54d2SDexuan Cui struct iphdr *ip = ip_hdr(skb); 73ca9c54d2SDexuan Cui 74ca9c54d2SDexuan Cui if (ip->protocol == IPPROTO_TCP) 75ca9c54d2SDexuan Cui return IPPROTO_TCP; 76ca9c54d2SDexuan Cui 77ca9c54d2SDexuan Cui if (ip->protocol == IPPROTO_UDP) 78ca9c54d2SDexuan Cui return IPPROTO_UDP; 79ca9c54d2SDexuan Cui } else if (skb->protocol == htons(ETH_P_IPV6)) { 80ca9c54d2SDexuan Cui struct ipv6hdr *ip6 = ipv6_hdr(skb); 81ca9c54d2SDexuan Cui 82ca9c54d2SDexuan Cui if (ip6->nexthdr == IPPROTO_TCP) 83ca9c54d2SDexuan Cui return IPPROTO_TCP; 84ca9c54d2SDexuan Cui 85ca9c54d2SDexuan Cui if (ip6->nexthdr == IPPROTO_UDP) 86ca9c54d2SDexuan Cui return IPPROTO_UDP; 87ca9c54d2SDexuan Cui } 88ca9c54d2SDexuan Cui 89ca9c54d2SDexuan Cui /* No csum offloading */ 90ca9c54d2SDexuan Cui return 0; 91ca9c54d2SDexuan Cui } 92ca9c54d2SDexuan Cui 93ca9c54d2SDexuan Cui static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, 94ca9c54d2SDexuan Cui struct mana_tx_package *tp) 95ca9c54d2SDexuan Cui { 96ca9c54d2SDexuan Cui struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 97ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 98ca9c54d2SDexuan Cui struct gdma_context *gc; 99ca9c54d2SDexuan Cui struct device *dev; 100ca9c54d2SDexuan Cui skb_frag_t *frag; 101ca9c54d2SDexuan Cui dma_addr_t da; 102ca9c54d2SDexuan Cui int i; 103ca9c54d2SDexuan Cui 104ca9c54d2SDexuan Cui gc = gd->gdma_context; 105ca9c54d2SDexuan Cui dev = gc->dev; 106ca9c54d2SDexuan Cui da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 107ca9c54d2SDexuan Cui 108ca9c54d2SDexuan Cui if (dma_mapping_error(dev, da)) 109ca9c54d2SDexuan Cui return -ENOMEM; 110ca9c54d2SDexuan Cui 111ca9c54d2SDexuan Cui ash->dma_handle[0] = da; 112ca9c54d2SDexuan Cui ash->size[0] = skb_headlen(skb); 113ca9c54d2SDexuan Cui 114ca9c54d2SDexuan Cui tp->wqe_req.sgl[0].address = ash->dma_handle[0]; 115ca9c54d2SDexuan Cui tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; 116ca9c54d2SDexuan Cui tp->wqe_req.sgl[0].size = ash->size[0]; 117ca9c54d2SDexuan Cui 118ca9c54d2SDexuan Cui for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 119ca9c54d2SDexuan Cui frag = &skb_shinfo(skb)->frags[i]; 120ca9c54d2SDexuan Cui da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 121ca9c54d2SDexuan Cui DMA_TO_DEVICE); 122ca9c54d2SDexuan Cui 123ca9c54d2SDexuan Cui if (dma_mapping_error(dev, da)) 124ca9c54d2SDexuan Cui goto frag_err; 125ca9c54d2SDexuan Cui 126ca9c54d2SDexuan Cui ash->dma_handle[i + 1] = da; 127ca9c54d2SDexuan Cui ash->size[i + 1] = skb_frag_size(frag); 128ca9c54d2SDexuan Cui 129ca9c54d2SDexuan Cui tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; 130ca9c54d2SDexuan Cui tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; 131ca9c54d2SDexuan Cui tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; 132ca9c54d2SDexuan Cui } 133ca9c54d2SDexuan Cui 134ca9c54d2SDexuan Cui return 0; 135ca9c54d2SDexuan Cui 136ca9c54d2SDexuan Cui frag_err: 137ca9c54d2SDexuan Cui for (i = i - 1; i >= 0; i--) 138ca9c54d2SDexuan Cui dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], 139ca9c54d2SDexuan Cui DMA_TO_DEVICE); 140ca9c54d2SDexuan Cui 141ca9c54d2SDexuan Cui dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); 142ca9c54d2SDexuan Cui 143ca9c54d2SDexuan Cui return -ENOMEM; 144ca9c54d2SDexuan Cui } 145ca9c54d2SDexuan Cui 1460c9ef08aSNathan Huckleberry netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) 147ca9c54d2SDexuan Cui { 148ca9c54d2SDexuan Cui enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; 149ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 150ca9c54d2SDexuan Cui u16 txq_idx = skb_get_queue_mapping(skb); 151ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 152ca9c54d2SDexuan Cui bool ipv4 = false, ipv6 = false; 153ca9c54d2SDexuan Cui struct mana_tx_package pkg = {}; 154ca9c54d2SDexuan Cui struct netdev_queue *net_txq; 155f90f8420SHaiyang Zhang struct mana_stats_tx *tx_stats; 156ca9c54d2SDexuan Cui struct gdma_queue *gdma_sq; 157ca9c54d2SDexuan Cui unsigned int csum_type; 158ca9c54d2SDexuan Cui struct mana_txq *txq; 159ca9c54d2SDexuan Cui struct mana_cq *cq; 160ca9c54d2SDexuan Cui int err, len; 161bd7fc6e1SShradha Gupta u16 ihs; 162ca9c54d2SDexuan Cui 163ca9c54d2SDexuan Cui if (unlikely(!apc->port_is_up)) 164ca9c54d2SDexuan Cui goto tx_drop; 165ca9c54d2SDexuan Cui 166ca9c54d2SDexuan Cui if (skb_cow_head(skb, MANA_HEADROOM)) 167ca9c54d2SDexuan Cui goto tx_drop_count; 168ca9c54d2SDexuan Cui 169ca9c54d2SDexuan Cui txq = &apc->tx_qp[txq_idx].txq; 170ca9c54d2SDexuan Cui gdma_sq = txq->gdma_sq; 171ca9c54d2SDexuan Cui cq = &apc->tx_qp[txq_idx].tx_cq; 172bd7fc6e1SShradha Gupta tx_stats = &txq->stats; 173ca9c54d2SDexuan Cui 174ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; 175ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; 176ca9c54d2SDexuan Cui 177ca9c54d2SDexuan Cui if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { 178ca9c54d2SDexuan Cui pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; 179ca9c54d2SDexuan Cui pkt_fmt = MANA_LONG_PKT_FMT; 180ca9c54d2SDexuan Cui } else { 181ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; 182ca9c54d2SDexuan Cui } 183ca9c54d2SDexuan Cui 184b803d1fdSHaiyang Zhang if (skb_vlan_tag_present(skb)) { 185b803d1fdSHaiyang Zhang pkt_fmt = MANA_LONG_PKT_FMT; 186b803d1fdSHaiyang Zhang pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1; 187b803d1fdSHaiyang Zhang pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb); 188b803d1fdSHaiyang Zhang pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb); 189b803d1fdSHaiyang Zhang pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb); 190b803d1fdSHaiyang Zhang } 191b803d1fdSHaiyang Zhang 192ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; 193ca9c54d2SDexuan Cui 194bd7fc6e1SShradha Gupta if (pkt_fmt == MANA_SHORT_PKT_FMT) { 195ca9c54d2SDexuan Cui pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); 196bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 197bd7fc6e1SShradha Gupta tx_stats->short_pkt_fmt++; 198bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 199bd7fc6e1SShradha Gupta } else { 200ca9c54d2SDexuan Cui pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); 201bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 202bd7fc6e1SShradha Gupta tx_stats->long_pkt_fmt++; 203bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 204bd7fc6e1SShradha Gupta } 205ca9c54d2SDexuan Cui 206ca9c54d2SDexuan Cui pkg.wqe_req.inline_oob_data = &pkg.tx_oob; 207ca9c54d2SDexuan Cui pkg.wqe_req.flags = 0; 208ca9c54d2SDexuan Cui pkg.wqe_req.client_data_unit = 0; 209ca9c54d2SDexuan Cui 210ca9c54d2SDexuan Cui pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; 211aa565497SLong Li WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); 212ca9c54d2SDexuan Cui 213ca9c54d2SDexuan Cui if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { 214ca9c54d2SDexuan Cui pkg.wqe_req.sgl = pkg.sgl_array; 215ca9c54d2SDexuan Cui } else { 216ca9c54d2SDexuan Cui pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, 217ca9c54d2SDexuan Cui sizeof(struct gdma_sge), 218ca9c54d2SDexuan Cui GFP_ATOMIC); 219ca9c54d2SDexuan Cui if (!pkg.sgl_ptr) 220ca9c54d2SDexuan Cui goto tx_drop_count; 221ca9c54d2SDexuan Cui 222ca9c54d2SDexuan Cui pkg.wqe_req.sgl = pkg.sgl_ptr; 223ca9c54d2SDexuan Cui } 224ca9c54d2SDexuan Cui 225ca9c54d2SDexuan Cui if (skb->protocol == htons(ETH_P_IP)) 226ca9c54d2SDexuan Cui ipv4 = true; 227ca9c54d2SDexuan Cui else if (skb->protocol == htons(ETH_P_IPV6)) 228ca9c54d2SDexuan Cui ipv6 = true; 229ca9c54d2SDexuan Cui 230ca9c54d2SDexuan Cui if (skb_is_gso(skb)) { 231ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 232ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 233ca9c54d2SDexuan Cui 234ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 235ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.comp_tcp_csum = 1; 236ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 237ca9c54d2SDexuan Cui 238ca9c54d2SDexuan Cui pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; 239ca9c54d2SDexuan Cui pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; 240ca9c54d2SDexuan Cui if (ipv4) { 241ca9c54d2SDexuan Cui ip_hdr(skb)->tot_len = 0; 242ca9c54d2SDexuan Cui ip_hdr(skb)->check = 0; 243ca9c54d2SDexuan Cui tcp_hdr(skb)->check = 244ca9c54d2SDexuan Cui ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 245ca9c54d2SDexuan Cui ip_hdr(skb)->daddr, 0, 246ca9c54d2SDexuan Cui IPPROTO_TCP, 0); 247ca9c54d2SDexuan Cui } else { 248ca9c54d2SDexuan Cui ipv6_hdr(skb)->payload_len = 0; 249ca9c54d2SDexuan Cui tcp_hdr(skb)->check = 250ca9c54d2SDexuan Cui ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 251ca9c54d2SDexuan Cui &ipv6_hdr(skb)->daddr, 0, 252ca9c54d2SDexuan Cui IPPROTO_TCP, 0); 253ca9c54d2SDexuan Cui } 254bd7fc6e1SShradha Gupta 255bd7fc6e1SShradha Gupta if (skb->encapsulation) { 256bd7fc6e1SShradha Gupta ihs = skb_inner_tcp_all_headers(skb); 257bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 258bd7fc6e1SShradha Gupta tx_stats->tso_inner_packets++; 259bd7fc6e1SShradha Gupta tx_stats->tso_inner_bytes += skb->len - ihs; 260bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 261bd7fc6e1SShradha Gupta } else { 262bd7fc6e1SShradha Gupta if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 263bd7fc6e1SShradha Gupta ihs = skb_transport_offset(skb) + sizeof(struct udphdr); 264bd7fc6e1SShradha Gupta } else { 265bd7fc6e1SShradha Gupta ihs = skb_tcp_all_headers(skb); 266bd7fc6e1SShradha Gupta if (ipv6_has_hopopt_jumbo(skb)) 267bd7fc6e1SShradha Gupta ihs -= sizeof(struct hop_jumbo_hdr); 268bd7fc6e1SShradha Gupta } 269bd7fc6e1SShradha Gupta 270bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 271bd7fc6e1SShradha Gupta tx_stats->tso_packets++; 272bd7fc6e1SShradha Gupta tx_stats->tso_bytes += skb->len - ihs; 273bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 274bd7fc6e1SShradha Gupta } 275bd7fc6e1SShradha Gupta 276ca9c54d2SDexuan Cui } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 277ca9c54d2SDexuan Cui csum_type = mana_checksum_info(skb); 278ca9c54d2SDexuan Cui 279bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 280bd7fc6e1SShradha Gupta tx_stats->csum_partial++; 281bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 282bd7fc6e1SShradha Gupta 283ca9c54d2SDexuan Cui if (csum_type == IPPROTO_TCP) { 284ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 285ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 286ca9c54d2SDexuan Cui 287ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.comp_tcp_csum = 1; 288ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 289ca9c54d2SDexuan Cui 290ca9c54d2SDexuan Cui } else if (csum_type == IPPROTO_UDP) { 291ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 292ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 293ca9c54d2SDexuan Cui 294ca9c54d2SDexuan Cui pkg.tx_oob.s_oob.comp_udp_csum = 1; 295ca9c54d2SDexuan Cui } else { 296ca9c54d2SDexuan Cui /* Can't do offload of this type of checksum */ 297ca9c54d2SDexuan Cui if (skb_checksum_help(skb)) 298ca9c54d2SDexuan Cui goto free_sgl_ptr; 299ca9c54d2SDexuan Cui } 300ca9c54d2SDexuan Cui } 301ca9c54d2SDexuan Cui 302bd7fc6e1SShradha Gupta if (mana_map_skb(skb, apc, &pkg)) { 303bd7fc6e1SShradha Gupta u64_stats_update_begin(&tx_stats->syncp); 304bd7fc6e1SShradha Gupta tx_stats->mana_map_err++; 305bd7fc6e1SShradha Gupta u64_stats_update_end(&tx_stats->syncp); 306ca9c54d2SDexuan Cui goto free_sgl_ptr; 307bd7fc6e1SShradha Gupta } 308ca9c54d2SDexuan Cui 309ca9c54d2SDexuan Cui skb_queue_tail(&txq->pending_skbs, skb); 310ca9c54d2SDexuan Cui 311ca9c54d2SDexuan Cui len = skb->len; 312ca9c54d2SDexuan Cui net_txq = netdev_get_tx_queue(ndev, txq_idx); 313ca9c54d2SDexuan Cui 314ca9c54d2SDexuan Cui err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, 315ca9c54d2SDexuan Cui (struct gdma_posted_wqe_info *)skb->cb); 316ca9c54d2SDexuan Cui if (!mana_can_tx(gdma_sq)) { 317ca9c54d2SDexuan Cui netif_tx_stop_queue(net_txq); 318ca9c54d2SDexuan Cui apc->eth_stats.stop_queue++; 319ca9c54d2SDexuan Cui } 320ca9c54d2SDexuan Cui 321ca9c54d2SDexuan Cui if (err) { 322ca9c54d2SDexuan Cui (void)skb_dequeue_tail(&txq->pending_skbs); 323ca9c54d2SDexuan Cui netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); 324ca9c54d2SDexuan Cui err = NETDEV_TX_BUSY; 325ca9c54d2SDexuan Cui goto tx_busy; 326ca9c54d2SDexuan Cui } 327ca9c54d2SDexuan Cui 328ca9c54d2SDexuan Cui err = NETDEV_TX_OK; 329ca9c54d2SDexuan Cui atomic_inc(&txq->pending_sends); 330ca9c54d2SDexuan Cui 331ca9c54d2SDexuan Cui mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); 332ca9c54d2SDexuan Cui 333ca9c54d2SDexuan Cui /* skb may be freed after mana_gd_post_work_request. Do not use it. */ 334ca9c54d2SDexuan Cui skb = NULL; 335ca9c54d2SDexuan Cui 336ca9c54d2SDexuan Cui tx_stats = &txq->stats; 337ca9c54d2SDexuan Cui u64_stats_update_begin(&tx_stats->syncp); 338ca9c54d2SDexuan Cui tx_stats->packets++; 339ca9c54d2SDexuan Cui tx_stats->bytes += len; 340ca9c54d2SDexuan Cui u64_stats_update_end(&tx_stats->syncp); 341ca9c54d2SDexuan Cui 342ca9c54d2SDexuan Cui tx_busy: 343ca9c54d2SDexuan Cui if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { 344ca9c54d2SDexuan Cui netif_tx_wake_queue(net_txq); 345ca9c54d2SDexuan Cui apc->eth_stats.wake_queue++; 346ca9c54d2SDexuan Cui } 347ca9c54d2SDexuan Cui 348ca9c54d2SDexuan Cui kfree(pkg.sgl_ptr); 349ca9c54d2SDexuan Cui return err; 350ca9c54d2SDexuan Cui 351ca9c54d2SDexuan Cui free_sgl_ptr: 352ca9c54d2SDexuan Cui kfree(pkg.sgl_ptr); 353ca9c54d2SDexuan Cui tx_drop_count: 354ca9c54d2SDexuan Cui ndev->stats.tx_dropped++; 355ca9c54d2SDexuan Cui tx_drop: 356ca9c54d2SDexuan Cui dev_kfree_skb_any(skb); 357ca9c54d2SDexuan Cui return NETDEV_TX_OK; 358ca9c54d2SDexuan Cui } 359ca9c54d2SDexuan Cui 360ca9c54d2SDexuan Cui static void mana_get_stats64(struct net_device *ndev, 361ca9c54d2SDexuan Cui struct rtnl_link_stats64 *st) 362ca9c54d2SDexuan Cui { 363ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 364ca9c54d2SDexuan Cui unsigned int num_queues = apc->num_queues; 365f90f8420SHaiyang Zhang struct mana_stats_rx *rx_stats; 366f90f8420SHaiyang Zhang struct mana_stats_tx *tx_stats; 367ca9c54d2SDexuan Cui unsigned int start; 368ca9c54d2SDexuan Cui u64 packets, bytes; 369ca9c54d2SDexuan Cui int q; 370ca9c54d2SDexuan Cui 371ca9c54d2SDexuan Cui if (!apc->port_is_up) 372ca9c54d2SDexuan Cui return; 373ca9c54d2SDexuan Cui 374ca9c54d2SDexuan Cui netdev_stats_to_stats64(st, &ndev->stats); 375ca9c54d2SDexuan Cui 376ca9c54d2SDexuan Cui for (q = 0; q < num_queues; q++) { 377f90f8420SHaiyang Zhang rx_stats = &apc->rxqs[q]->stats; 378ca9c54d2SDexuan Cui 379ca9c54d2SDexuan Cui do { 380068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rx_stats->syncp); 381f90f8420SHaiyang Zhang packets = rx_stats->packets; 382f90f8420SHaiyang Zhang bytes = rx_stats->bytes; 383068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 384ca9c54d2SDexuan Cui 385ca9c54d2SDexuan Cui st->rx_packets += packets; 386ca9c54d2SDexuan Cui st->rx_bytes += bytes; 387ca9c54d2SDexuan Cui } 388ca9c54d2SDexuan Cui 389ca9c54d2SDexuan Cui for (q = 0; q < num_queues; q++) { 390f90f8420SHaiyang Zhang tx_stats = &apc->tx_qp[q].txq.stats; 391ca9c54d2SDexuan Cui 392ca9c54d2SDexuan Cui do { 393068c38adSThomas Gleixner start = u64_stats_fetch_begin(&tx_stats->syncp); 394f90f8420SHaiyang Zhang packets = tx_stats->packets; 395f90f8420SHaiyang Zhang bytes = tx_stats->bytes; 396068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 397ca9c54d2SDexuan Cui 398ca9c54d2SDexuan Cui st->tx_packets += packets; 399ca9c54d2SDexuan Cui st->tx_bytes += bytes; 400ca9c54d2SDexuan Cui } 401ca9c54d2SDexuan Cui } 402ca9c54d2SDexuan Cui 403ca9c54d2SDexuan Cui static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, 404ca9c54d2SDexuan Cui int old_q) 405ca9c54d2SDexuan Cui { 406ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 407ca9c54d2SDexuan Cui u32 hash = skb_get_hash(skb); 408ca9c54d2SDexuan Cui struct sock *sk = skb->sk; 409ca9c54d2SDexuan Cui int txq; 410ca9c54d2SDexuan Cui 411ca9c54d2SDexuan Cui txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; 412ca9c54d2SDexuan Cui 413ca9c54d2SDexuan Cui if (txq != old_q && sk && sk_fullsock(sk) && 414ca9c54d2SDexuan Cui rcu_access_pointer(sk->sk_dst_cache)) 415ca9c54d2SDexuan Cui sk_tx_queue_set(sk, txq); 416ca9c54d2SDexuan Cui 417ca9c54d2SDexuan Cui return txq; 418ca9c54d2SDexuan Cui } 419ca9c54d2SDexuan Cui 420ca9c54d2SDexuan Cui static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, 421ca9c54d2SDexuan Cui struct net_device *sb_dev) 422ca9c54d2SDexuan Cui { 423ca9c54d2SDexuan Cui int txq; 424ca9c54d2SDexuan Cui 425ca9c54d2SDexuan Cui if (ndev->real_num_tx_queues == 1) 426ca9c54d2SDexuan Cui return 0; 427ca9c54d2SDexuan Cui 428ca9c54d2SDexuan Cui txq = sk_tx_queue_get(skb->sk); 429ca9c54d2SDexuan Cui 430ca9c54d2SDexuan Cui if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { 431ca9c54d2SDexuan Cui if (skb_rx_queue_recorded(skb)) 432ca9c54d2SDexuan Cui txq = skb_get_rx_queue(skb); 433ca9c54d2SDexuan Cui else 434ca9c54d2SDexuan Cui txq = mana_get_tx_queue(ndev, skb, txq); 435ca9c54d2SDexuan Cui } 436ca9c54d2SDexuan Cui 437ca9c54d2SDexuan Cui return txq; 438ca9c54d2SDexuan Cui } 439ca9c54d2SDexuan Cui 44080f6215bSHaiyang Zhang /* Release pre-allocated RX buffers */ 44180f6215bSHaiyang Zhang static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) 44280f6215bSHaiyang Zhang { 44380f6215bSHaiyang Zhang struct device *dev; 44480f6215bSHaiyang Zhang int i; 44580f6215bSHaiyang Zhang 44680f6215bSHaiyang Zhang dev = mpc->ac->gdma_dev->gdma_context->dev; 44780f6215bSHaiyang Zhang 44880f6215bSHaiyang Zhang if (!mpc->rxbufs_pre) 44980f6215bSHaiyang Zhang goto out1; 45080f6215bSHaiyang Zhang 45180f6215bSHaiyang Zhang if (!mpc->das_pre) 45280f6215bSHaiyang Zhang goto out2; 45380f6215bSHaiyang Zhang 45480f6215bSHaiyang Zhang while (mpc->rxbpre_total) { 45580f6215bSHaiyang Zhang i = --mpc->rxbpre_total; 45680f6215bSHaiyang Zhang dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, 45780f6215bSHaiyang Zhang DMA_FROM_DEVICE); 45880f6215bSHaiyang Zhang put_page(virt_to_head_page(mpc->rxbufs_pre[i])); 45980f6215bSHaiyang Zhang } 46080f6215bSHaiyang Zhang 46180f6215bSHaiyang Zhang kfree(mpc->das_pre); 46280f6215bSHaiyang Zhang mpc->das_pre = NULL; 46380f6215bSHaiyang Zhang 46480f6215bSHaiyang Zhang out2: 46580f6215bSHaiyang Zhang kfree(mpc->rxbufs_pre); 46680f6215bSHaiyang Zhang mpc->rxbufs_pre = NULL; 46780f6215bSHaiyang Zhang 46880f6215bSHaiyang Zhang out1: 46980f6215bSHaiyang Zhang mpc->rxbpre_datasize = 0; 47080f6215bSHaiyang Zhang mpc->rxbpre_alloc_size = 0; 47180f6215bSHaiyang Zhang mpc->rxbpre_headroom = 0; 47280f6215bSHaiyang Zhang } 47380f6215bSHaiyang Zhang 47480f6215bSHaiyang Zhang /* Get a buffer from the pre-allocated RX buffers */ 47580f6215bSHaiyang Zhang static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) 47680f6215bSHaiyang Zhang { 47780f6215bSHaiyang Zhang struct net_device *ndev = rxq->ndev; 47880f6215bSHaiyang Zhang struct mana_port_context *mpc; 47980f6215bSHaiyang Zhang void *va; 48080f6215bSHaiyang Zhang 48180f6215bSHaiyang Zhang mpc = netdev_priv(ndev); 48280f6215bSHaiyang Zhang 48380f6215bSHaiyang Zhang if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { 48480f6215bSHaiyang Zhang netdev_err(ndev, "No RX pre-allocated bufs\n"); 48580f6215bSHaiyang Zhang return NULL; 48680f6215bSHaiyang Zhang } 48780f6215bSHaiyang Zhang 48880f6215bSHaiyang Zhang /* Check sizes to catch unexpected coding error */ 48980f6215bSHaiyang Zhang if (mpc->rxbpre_datasize != rxq->datasize) { 49080f6215bSHaiyang Zhang netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n", 49180f6215bSHaiyang Zhang mpc->rxbpre_datasize, rxq->datasize); 49280f6215bSHaiyang Zhang return NULL; 49380f6215bSHaiyang Zhang } 49480f6215bSHaiyang Zhang 49580f6215bSHaiyang Zhang if (mpc->rxbpre_alloc_size != rxq->alloc_size) { 49680f6215bSHaiyang Zhang netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n", 49780f6215bSHaiyang Zhang mpc->rxbpre_alloc_size, rxq->alloc_size); 49880f6215bSHaiyang Zhang return NULL; 49980f6215bSHaiyang Zhang } 50080f6215bSHaiyang Zhang 50180f6215bSHaiyang Zhang if (mpc->rxbpre_headroom != rxq->headroom) { 50280f6215bSHaiyang Zhang netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n", 50380f6215bSHaiyang Zhang mpc->rxbpre_headroom, rxq->headroom); 50480f6215bSHaiyang Zhang return NULL; 50580f6215bSHaiyang Zhang } 50680f6215bSHaiyang Zhang 50780f6215bSHaiyang Zhang mpc->rxbpre_total--; 50880f6215bSHaiyang Zhang 50980f6215bSHaiyang Zhang *da = mpc->das_pre[mpc->rxbpre_total]; 51080f6215bSHaiyang Zhang va = mpc->rxbufs_pre[mpc->rxbpre_total]; 51180f6215bSHaiyang Zhang mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; 51280f6215bSHaiyang Zhang 51380f6215bSHaiyang Zhang /* Deallocate the array after all buffers are gone */ 51480f6215bSHaiyang Zhang if (!mpc->rxbpre_total) 51580f6215bSHaiyang Zhang mana_pre_dealloc_rxbufs(mpc); 51680f6215bSHaiyang Zhang 51780f6215bSHaiyang Zhang return va; 51880f6215bSHaiyang Zhang } 51980f6215bSHaiyang Zhang 52080f6215bSHaiyang Zhang /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ 52180f6215bSHaiyang Zhang static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, 52280f6215bSHaiyang Zhang u32 *headroom) 52380f6215bSHaiyang Zhang { 52480f6215bSHaiyang Zhang if (mtu > MANA_XDP_MTU_MAX) 52580f6215bSHaiyang Zhang *headroom = 0; /* no support for XDP */ 52680f6215bSHaiyang Zhang else 52780f6215bSHaiyang Zhang *headroom = XDP_PACKET_HEADROOM; 52880f6215bSHaiyang Zhang 52980f6215bSHaiyang Zhang *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; 53080f6215bSHaiyang Zhang 53180f6215bSHaiyang Zhang *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN); 53280f6215bSHaiyang Zhang } 53380f6215bSHaiyang Zhang 53480f6215bSHaiyang Zhang static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) 53580f6215bSHaiyang Zhang { 53680f6215bSHaiyang Zhang struct device *dev; 53780f6215bSHaiyang Zhang struct page *page; 53880f6215bSHaiyang Zhang dma_addr_t da; 53980f6215bSHaiyang Zhang int num_rxb; 54080f6215bSHaiyang Zhang void *va; 54180f6215bSHaiyang Zhang int i; 54280f6215bSHaiyang Zhang 54380f6215bSHaiyang Zhang mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, 54480f6215bSHaiyang Zhang &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); 54580f6215bSHaiyang Zhang 54680f6215bSHaiyang Zhang dev = mpc->ac->gdma_dev->gdma_context->dev; 54780f6215bSHaiyang Zhang 54880f6215bSHaiyang Zhang num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; 54980f6215bSHaiyang Zhang 55080f6215bSHaiyang Zhang WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); 55180f6215bSHaiyang Zhang mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); 55280f6215bSHaiyang Zhang if (!mpc->rxbufs_pre) 55380f6215bSHaiyang Zhang goto error; 55480f6215bSHaiyang Zhang 55580f6215bSHaiyang Zhang mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); 55680f6215bSHaiyang Zhang if (!mpc->das_pre) 55780f6215bSHaiyang Zhang goto error; 55880f6215bSHaiyang Zhang 55980f6215bSHaiyang Zhang mpc->rxbpre_total = 0; 56080f6215bSHaiyang Zhang 56180f6215bSHaiyang Zhang for (i = 0; i < num_rxb; i++) { 56280f6215bSHaiyang Zhang if (mpc->rxbpre_alloc_size > PAGE_SIZE) { 56380f6215bSHaiyang Zhang va = netdev_alloc_frag(mpc->rxbpre_alloc_size); 56480f6215bSHaiyang Zhang if (!va) 56580f6215bSHaiyang Zhang goto error; 566df18f2daSHaiyang Zhang 567df18f2daSHaiyang Zhang page = virt_to_head_page(va); 568df18f2daSHaiyang Zhang /* Check if the frag falls back to single page */ 569df18f2daSHaiyang Zhang if (compound_order(page) < 570df18f2daSHaiyang Zhang get_order(mpc->rxbpre_alloc_size)) { 571df18f2daSHaiyang Zhang put_page(page); 572df18f2daSHaiyang Zhang goto error; 573df18f2daSHaiyang Zhang } 57480f6215bSHaiyang Zhang } else { 57580f6215bSHaiyang Zhang page = dev_alloc_page(); 57680f6215bSHaiyang Zhang if (!page) 57780f6215bSHaiyang Zhang goto error; 57880f6215bSHaiyang Zhang 57980f6215bSHaiyang Zhang va = page_to_virt(page); 58080f6215bSHaiyang Zhang } 58180f6215bSHaiyang Zhang 58280f6215bSHaiyang Zhang da = dma_map_single(dev, va + mpc->rxbpre_headroom, 58380f6215bSHaiyang Zhang mpc->rxbpre_datasize, DMA_FROM_DEVICE); 58480f6215bSHaiyang Zhang if (dma_mapping_error(dev, da)) { 58580f6215bSHaiyang Zhang put_page(virt_to_head_page(va)); 58680f6215bSHaiyang Zhang goto error; 58780f6215bSHaiyang Zhang } 58880f6215bSHaiyang Zhang 58980f6215bSHaiyang Zhang mpc->rxbufs_pre[i] = va; 59080f6215bSHaiyang Zhang mpc->das_pre[i] = da; 59180f6215bSHaiyang Zhang mpc->rxbpre_total = i + 1; 59280f6215bSHaiyang Zhang } 59380f6215bSHaiyang Zhang 59480f6215bSHaiyang Zhang return 0; 59580f6215bSHaiyang Zhang 59680f6215bSHaiyang Zhang error: 59780f6215bSHaiyang Zhang mana_pre_dealloc_rxbufs(mpc); 59880f6215bSHaiyang Zhang return -ENOMEM; 59980f6215bSHaiyang Zhang } 60080f6215bSHaiyang Zhang 60180f6215bSHaiyang Zhang static int mana_change_mtu(struct net_device *ndev, int new_mtu) 60280f6215bSHaiyang Zhang { 60380f6215bSHaiyang Zhang struct mana_port_context *mpc = netdev_priv(ndev); 60480f6215bSHaiyang Zhang unsigned int old_mtu = ndev->mtu; 60580f6215bSHaiyang Zhang int err; 60680f6215bSHaiyang Zhang 60780f6215bSHaiyang Zhang /* Pre-allocate buffers to prevent failure in mana_attach later */ 60880f6215bSHaiyang Zhang err = mana_pre_alloc_rxbufs(mpc, new_mtu); 60980f6215bSHaiyang Zhang if (err) { 61080f6215bSHaiyang Zhang netdev_err(ndev, "Insufficient memory for new MTU\n"); 61180f6215bSHaiyang Zhang return err; 61280f6215bSHaiyang Zhang } 61380f6215bSHaiyang Zhang 61480f6215bSHaiyang Zhang err = mana_detach(ndev, false); 61580f6215bSHaiyang Zhang if (err) { 61680f6215bSHaiyang Zhang netdev_err(ndev, "mana_detach failed: %d\n", err); 61780f6215bSHaiyang Zhang goto out; 61880f6215bSHaiyang Zhang } 61980f6215bSHaiyang Zhang 62080f6215bSHaiyang Zhang ndev->mtu = new_mtu; 62180f6215bSHaiyang Zhang 62280f6215bSHaiyang Zhang err = mana_attach(ndev); 62380f6215bSHaiyang Zhang if (err) { 62480f6215bSHaiyang Zhang netdev_err(ndev, "mana_attach failed: %d\n", err); 62580f6215bSHaiyang Zhang ndev->mtu = old_mtu; 62680f6215bSHaiyang Zhang } 62780f6215bSHaiyang Zhang 62880f6215bSHaiyang Zhang out: 62980f6215bSHaiyang Zhang mana_pre_dealloc_rxbufs(mpc); 63080f6215bSHaiyang Zhang return err; 63180f6215bSHaiyang Zhang } 63280f6215bSHaiyang Zhang 633ca9c54d2SDexuan Cui static const struct net_device_ops mana_devops = { 634ca9c54d2SDexuan Cui .ndo_open = mana_open, 635ca9c54d2SDexuan Cui .ndo_stop = mana_close, 636ca9c54d2SDexuan Cui .ndo_select_queue = mana_select_queue, 637ca9c54d2SDexuan Cui .ndo_start_xmit = mana_start_xmit, 638ca9c54d2SDexuan Cui .ndo_validate_addr = eth_validate_addr, 639ca9c54d2SDexuan Cui .ndo_get_stats64 = mana_get_stats64, 640ed5356b5SHaiyang Zhang .ndo_bpf = mana_bpf, 6417a8938cdSHaiyang Zhang .ndo_xdp_xmit = mana_xdp_xmit, 64280f6215bSHaiyang Zhang .ndo_change_mtu = mana_change_mtu, 643ca9c54d2SDexuan Cui }; 644ca9c54d2SDexuan Cui 645ca9c54d2SDexuan Cui static void mana_cleanup_port_context(struct mana_port_context *apc) 646ca9c54d2SDexuan Cui { 647ca9c54d2SDexuan Cui kfree(apc->rxqs); 648ca9c54d2SDexuan Cui apc->rxqs = NULL; 649ca9c54d2SDexuan Cui } 650ca9c54d2SDexuan Cui 651ca9c54d2SDexuan Cui static int mana_init_port_context(struct mana_port_context *apc) 652ca9c54d2SDexuan Cui { 653ca9c54d2SDexuan Cui apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), 654ca9c54d2SDexuan Cui GFP_KERNEL); 655ca9c54d2SDexuan Cui 656ca9c54d2SDexuan Cui return !apc->rxqs ? -ENOMEM : 0; 657ca9c54d2SDexuan Cui } 658ca9c54d2SDexuan Cui 659ca9c54d2SDexuan Cui static int mana_send_request(struct mana_context *ac, void *in_buf, 660ca9c54d2SDexuan Cui u32 in_len, void *out_buf, u32 out_len) 661ca9c54d2SDexuan Cui { 662ca9c54d2SDexuan Cui struct gdma_context *gc = ac->gdma_dev->gdma_context; 663ca9c54d2SDexuan Cui struct gdma_resp_hdr *resp = out_buf; 664ca9c54d2SDexuan Cui struct gdma_req_hdr *req = in_buf; 665ca9c54d2SDexuan Cui struct device *dev = gc->dev; 666ca9c54d2SDexuan Cui static atomic_t activity_id; 667ca9c54d2SDexuan Cui int err; 668ca9c54d2SDexuan Cui 669ca9c54d2SDexuan Cui req->dev_id = gc->mana.dev_id; 670ca9c54d2SDexuan Cui req->activity_id = atomic_inc_return(&activity_id); 671ca9c54d2SDexuan Cui 672ca9c54d2SDexuan Cui err = mana_gd_send_request(gc, in_len, in_buf, out_len, 673ca9c54d2SDexuan Cui out_buf); 674ca9c54d2SDexuan Cui if (err || resp->status) { 675ca9c54d2SDexuan Cui dev_err(dev, "Failed to send mana message: %d, 0x%x\n", 676ca9c54d2SDexuan Cui err, resp->status); 677ca9c54d2SDexuan Cui return err ? err : -EPROTO; 678ca9c54d2SDexuan Cui } 679ca9c54d2SDexuan Cui 680ca9c54d2SDexuan Cui if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || 681ca9c54d2SDexuan Cui req->activity_id != resp->activity_id) { 682ca9c54d2SDexuan Cui dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n", 683ca9c54d2SDexuan Cui req->dev_id.as_uint32, resp->dev_id.as_uint32, 684ca9c54d2SDexuan Cui req->activity_id, resp->activity_id); 685ca9c54d2SDexuan Cui return -EPROTO; 686ca9c54d2SDexuan Cui } 687ca9c54d2SDexuan Cui 688ca9c54d2SDexuan Cui return 0; 689ca9c54d2SDexuan Cui } 690ca9c54d2SDexuan Cui 691ca9c54d2SDexuan Cui static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, 692ca9c54d2SDexuan Cui const enum mana_command_code expected_code, 693ca9c54d2SDexuan Cui const u32 min_size) 694ca9c54d2SDexuan Cui { 695ca9c54d2SDexuan Cui if (resp_hdr->response.msg_type != expected_code) 696ca9c54d2SDexuan Cui return -EPROTO; 697ca9c54d2SDexuan Cui 698ca9c54d2SDexuan Cui if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) 699ca9c54d2SDexuan Cui return -EPROTO; 700ca9c54d2SDexuan Cui 701ca9c54d2SDexuan Cui if (resp_hdr->response.msg_size < min_size) 702ca9c54d2SDexuan Cui return -EPROTO; 703ca9c54d2SDexuan Cui 704ca9c54d2SDexuan Cui return 0; 705ca9c54d2SDexuan Cui } 706ca9c54d2SDexuan Cui 7071566e7d6SDexuan Cui static int mana_pf_register_hw_vport(struct mana_port_context *apc) 7081566e7d6SDexuan Cui { 7091566e7d6SDexuan Cui struct mana_register_hw_vport_resp resp = {}; 7101566e7d6SDexuan Cui struct mana_register_hw_vport_req req = {}; 7111566e7d6SDexuan Cui int err; 7121566e7d6SDexuan Cui 7131566e7d6SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT, 7141566e7d6SDexuan Cui sizeof(req), sizeof(resp)); 7151566e7d6SDexuan Cui req.attached_gfid = 1; 7161566e7d6SDexuan Cui req.is_pf_default_vport = 1; 7171566e7d6SDexuan Cui req.allow_all_ether_types = 1; 7181566e7d6SDexuan Cui 7191566e7d6SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 7201566e7d6SDexuan Cui sizeof(resp)); 7211566e7d6SDexuan Cui if (err) { 7221566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); 7231566e7d6SDexuan Cui return err; 7241566e7d6SDexuan Cui } 7251566e7d6SDexuan Cui 7261566e7d6SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT, 7271566e7d6SDexuan Cui sizeof(resp)); 7281566e7d6SDexuan Cui if (err || resp.hdr.status) { 7291566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", 7301566e7d6SDexuan Cui err, resp.hdr.status); 7311566e7d6SDexuan Cui return err ? err : -EPROTO; 7321566e7d6SDexuan Cui } 7331566e7d6SDexuan Cui 7341566e7d6SDexuan Cui apc->port_handle = resp.hw_vport_handle; 7351566e7d6SDexuan Cui return 0; 7361566e7d6SDexuan Cui } 7371566e7d6SDexuan Cui 7381566e7d6SDexuan Cui static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) 7391566e7d6SDexuan Cui { 7401566e7d6SDexuan Cui struct mana_deregister_hw_vport_resp resp = {}; 7411566e7d6SDexuan Cui struct mana_deregister_hw_vport_req req = {}; 7421566e7d6SDexuan Cui int err; 7431566e7d6SDexuan Cui 7441566e7d6SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT, 7451566e7d6SDexuan Cui sizeof(req), sizeof(resp)); 7461566e7d6SDexuan Cui req.hw_vport_handle = apc->port_handle; 7471566e7d6SDexuan Cui 7481566e7d6SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 7491566e7d6SDexuan Cui sizeof(resp)); 7501566e7d6SDexuan Cui if (err) { 7511566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", 7521566e7d6SDexuan Cui err); 7531566e7d6SDexuan Cui return; 7541566e7d6SDexuan Cui } 7551566e7d6SDexuan Cui 7561566e7d6SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT, 7571566e7d6SDexuan Cui sizeof(resp)); 7581566e7d6SDexuan Cui if (err || resp.hdr.status) 7591566e7d6SDexuan Cui netdev_err(apc->ndev, 7601566e7d6SDexuan Cui "Failed to deregister hw vPort: %d, 0x%x\n", 7611566e7d6SDexuan Cui err, resp.hdr.status); 7621566e7d6SDexuan Cui } 7631566e7d6SDexuan Cui 7641566e7d6SDexuan Cui static int mana_pf_register_filter(struct mana_port_context *apc) 7651566e7d6SDexuan Cui { 7661566e7d6SDexuan Cui struct mana_register_filter_resp resp = {}; 7671566e7d6SDexuan Cui struct mana_register_filter_req req = {}; 7681566e7d6SDexuan Cui int err; 7691566e7d6SDexuan Cui 7701566e7d6SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER, 7711566e7d6SDexuan Cui sizeof(req), sizeof(resp)); 7721566e7d6SDexuan Cui req.vport = apc->port_handle; 7731566e7d6SDexuan Cui memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); 7741566e7d6SDexuan Cui 7751566e7d6SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 7761566e7d6SDexuan Cui sizeof(resp)); 7771566e7d6SDexuan Cui if (err) { 7781566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to register filter: %d\n", err); 7791566e7d6SDexuan Cui return err; 7801566e7d6SDexuan Cui } 7811566e7d6SDexuan Cui 7821566e7d6SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER, 7831566e7d6SDexuan Cui sizeof(resp)); 7841566e7d6SDexuan Cui if (err || resp.hdr.status) { 7851566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", 7861566e7d6SDexuan Cui err, resp.hdr.status); 7871566e7d6SDexuan Cui return err ? err : -EPROTO; 7881566e7d6SDexuan Cui } 7891566e7d6SDexuan Cui 7901566e7d6SDexuan Cui apc->pf_filter_handle = resp.filter_handle; 7911566e7d6SDexuan Cui return 0; 7921566e7d6SDexuan Cui } 7931566e7d6SDexuan Cui 7941566e7d6SDexuan Cui static void mana_pf_deregister_filter(struct mana_port_context *apc) 7951566e7d6SDexuan Cui { 7961566e7d6SDexuan Cui struct mana_deregister_filter_resp resp = {}; 7971566e7d6SDexuan Cui struct mana_deregister_filter_req req = {}; 7981566e7d6SDexuan Cui int err; 7991566e7d6SDexuan Cui 8001566e7d6SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER, 8011566e7d6SDexuan Cui sizeof(req), sizeof(resp)); 8021566e7d6SDexuan Cui req.filter_handle = apc->pf_filter_handle; 8031566e7d6SDexuan Cui 8041566e7d6SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 8051566e7d6SDexuan Cui sizeof(resp)); 8061566e7d6SDexuan Cui if (err) { 8071566e7d6SDexuan Cui netdev_err(apc->ndev, "Failed to unregister filter: %d\n", 8081566e7d6SDexuan Cui err); 8091566e7d6SDexuan Cui return; 8101566e7d6SDexuan Cui } 8111566e7d6SDexuan Cui 8121566e7d6SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER, 8131566e7d6SDexuan Cui sizeof(resp)); 8141566e7d6SDexuan Cui if (err || resp.hdr.status) 8151566e7d6SDexuan Cui netdev_err(apc->ndev, 8161566e7d6SDexuan Cui "Failed to deregister filter: %d, 0x%x\n", 8171566e7d6SDexuan Cui err, resp.hdr.status); 8181566e7d6SDexuan Cui } 8191566e7d6SDexuan Cui 820ca9c54d2SDexuan Cui static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, 821ca9c54d2SDexuan Cui u32 proto_minor_ver, u32 proto_micro_ver, 822ca9c54d2SDexuan Cui u16 *max_num_vports) 823ca9c54d2SDexuan Cui { 824ca9c54d2SDexuan Cui struct gdma_context *gc = ac->gdma_dev->gdma_context; 825ca9c54d2SDexuan Cui struct mana_query_device_cfg_resp resp = {}; 826ca9c54d2SDexuan Cui struct mana_query_device_cfg_req req = {}; 827ca9c54d2SDexuan Cui struct device *dev = gc->dev; 828ca9c54d2SDexuan Cui int err = 0; 829ca9c54d2SDexuan Cui 830ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, 831ca9c54d2SDexuan Cui sizeof(req), sizeof(resp)); 83280f6215bSHaiyang Zhang 83380f6215bSHaiyang Zhang req.hdr.resp.msg_version = GDMA_MESSAGE_V2; 83480f6215bSHaiyang Zhang 835ca9c54d2SDexuan Cui req.proto_major_ver = proto_major_ver; 836ca9c54d2SDexuan Cui req.proto_minor_ver = proto_minor_ver; 837ca9c54d2SDexuan Cui req.proto_micro_ver = proto_micro_ver; 838ca9c54d2SDexuan Cui 839ca9c54d2SDexuan Cui err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); 840ca9c54d2SDexuan Cui if (err) { 841ca9c54d2SDexuan Cui dev_err(dev, "Failed to query config: %d", err); 842ca9c54d2SDexuan Cui return err; 843ca9c54d2SDexuan Cui } 844ca9c54d2SDexuan Cui 845ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, 846ca9c54d2SDexuan Cui sizeof(resp)); 847ca9c54d2SDexuan Cui if (err || resp.hdr.status) { 848ca9c54d2SDexuan Cui dev_err(dev, "Invalid query result: %d, 0x%x\n", err, 849ca9c54d2SDexuan Cui resp.hdr.status); 850ca9c54d2SDexuan Cui if (!err) 851ca9c54d2SDexuan Cui err = -EPROTO; 852ca9c54d2SDexuan Cui return err; 853ca9c54d2SDexuan Cui } 854ca9c54d2SDexuan Cui 855ca9c54d2SDexuan Cui *max_num_vports = resp.max_num_vports; 856ca9c54d2SDexuan Cui 85780f6215bSHaiyang Zhang if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2) 85880f6215bSHaiyang Zhang gc->adapter_mtu = resp.adapter_mtu; 85980f6215bSHaiyang Zhang else 86080f6215bSHaiyang Zhang gc->adapter_mtu = ETH_FRAME_LEN; 86180f6215bSHaiyang Zhang 862ca9c54d2SDexuan Cui return 0; 863ca9c54d2SDexuan Cui } 864ca9c54d2SDexuan Cui 865ca9c54d2SDexuan Cui static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, 866ca9c54d2SDexuan Cui u32 *max_sq, u32 *max_rq, u32 *num_indir_entry) 867ca9c54d2SDexuan Cui { 868ca9c54d2SDexuan Cui struct mana_query_vport_cfg_resp resp = {}; 869ca9c54d2SDexuan Cui struct mana_query_vport_cfg_req req = {}; 870ca9c54d2SDexuan Cui int err; 871ca9c54d2SDexuan Cui 872ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, 873ca9c54d2SDexuan Cui sizeof(req), sizeof(resp)); 874ca9c54d2SDexuan Cui 875ca9c54d2SDexuan Cui req.vport_index = vport_index; 876ca9c54d2SDexuan Cui 877ca9c54d2SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 878ca9c54d2SDexuan Cui sizeof(resp)); 879ca9c54d2SDexuan Cui if (err) 880ca9c54d2SDexuan Cui return err; 881ca9c54d2SDexuan Cui 882ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, 883ca9c54d2SDexuan Cui sizeof(resp)); 884ca9c54d2SDexuan Cui if (err) 885ca9c54d2SDexuan Cui return err; 886ca9c54d2SDexuan Cui 887ca9c54d2SDexuan Cui if (resp.hdr.status) 888ca9c54d2SDexuan Cui return -EPROTO; 889ca9c54d2SDexuan Cui 890ca9c54d2SDexuan Cui *max_sq = resp.max_num_sq; 891ca9c54d2SDexuan Cui *max_rq = resp.max_num_rq; 892ca9c54d2SDexuan Cui *num_indir_entry = resp.num_indirection_ent; 893ca9c54d2SDexuan Cui 894ca9c54d2SDexuan Cui apc->port_handle = resp.vport; 895ca9c54d2SDexuan Cui ether_addr_copy(apc->mac_addr, resp.mac_addr); 896ca9c54d2SDexuan Cui 897ca9c54d2SDexuan Cui return 0; 898ca9c54d2SDexuan Cui } 899ca9c54d2SDexuan Cui 900b5c1c985SLong Li void mana_uncfg_vport(struct mana_port_context *apc) 901b5c1c985SLong Li { 902b5c1c985SLong Li mutex_lock(&apc->vport_mutex); 903b5c1c985SLong Li apc->vport_use_count--; 904b5c1c985SLong Li WARN_ON(apc->vport_use_count < 0); 905b5c1c985SLong Li mutex_unlock(&apc->vport_mutex); 906b5c1c985SLong Li } 907b5c1c985SLong Li EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA); 908b5c1c985SLong Li 909b5c1c985SLong Li int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 910ca9c54d2SDexuan Cui u32 doorbell_pg_id) 911ca9c54d2SDexuan Cui { 912ca9c54d2SDexuan Cui struct mana_config_vport_resp resp = {}; 913ca9c54d2SDexuan Cui struct mana_config_vport_req req = {}; 914ca9c54d2SDexuan Cui int err; 915ca9c54d2SDexuan Cui 916b5c1c985SLong Li /* This function is used to program the Ethernet port in the hardware 917b5c1c985SLong Li * table. It can be called from the Ethernet driver or the RDMA driver. 918b5c1c985SLong Li * 919b5c1c985SLong Li * For Ethernet usage, the hardware supports only one active user on a 920b5c1c985SLong Li * physical port. The driver checks on the port usage before programming 921b5c1c985SLong Li * the hardware when creating the RAW QP (RDMA driver) or exposing the 922b5c1c985SLong Li * device to kernel NET layer (Ethernet driver). 923b5c1c985SLong Li * 924b5c1c985SLong Li * Because the RDMA driver doesn't know in advance which QP type the 925b5c1c985SLong Li * user will create, it exposes the device with all its ports. The user 926b5c1c985SLong Li * may not be able to create RAW QP on a port if this port is already 927b5c1c985SLong Li * in used by the Ethernet driver from the kernel. 928b5c1c985SLong Li * 929b5c1c985SLong Li * This physical port limitation only applies to the RAW QP. For RC QP, 930b5c1c985SLong Li * the hardware doesn't have this limitation. The user can create RC 931b5c1c985SLong Li * QPs on a physical port up to the hardware limits independent of the 932b5c1c985SLong Li * Ethernet usage on the same port. 933b5c1c985SLong Li */ 934b5c1c985SLong Li mutex_lock(&apc->vport_mutex); 935b5c1c985SLong Li if (apc->vport_use_count > 0) { 936b5c1c985SLong Li mutex_unlock(&apc->vport_mutex); 937b5c1c985SLong Li return -EBUSY; 938b5c1c985SLong Li } 939b5c1c985SLong Li apc->vport_use_count++; 940b5c1c985SLong Li mutex_unlock(&apc->vport_mutex); 941b5c1c985SLong Li 942ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, 943ca9c54d2SDexuan Cui sizeof(req), sizeof(resp)); 944ca9c54d2SDexuan Cui req.vport = apc->port_handle; 945ca9c54d2SDexuan Cui req.pdid = protection_dom_id; 946ca9c54d2SDexuan Cui req.doorbell_pageid = doorbell_pg_id; 947ca9c54d2SDexuan Cui 948ca9c54d2SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 949ca9c54d2SDexuan Cui sizeof(resp)); 950ca9c54d2SDexuan Cui if (err) { 951ca9c54d2SDexuan Cui netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); 952ca9c54d2SDexuan Cui goto out; 953ca9c54d2SDexuan Cui } 954ca9c54d2SDexuan Cui 955ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, 956ca9c54d2SDexuan Cui sizeof(resp)); 957ca9c54d2SDexuan Cui if (err || resp.hdr.status) { 958ca9c54d2SDexuan Cui netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", 959ca9c54d2SDexuan Cui err, resp.hdr.status); 960ca9c54d2SDexuan Cui if (!err) 961ca9c54d2SDexuan Cui err = -EPROTO; 962ca9c54d2SDexuan Cui 963ca9c54d2SDexuan Cui goto out; 964ca9c54d2SDexuan Cui } 965ca9c54d2SDexuan Cui 966ca9c54d2SDexuan Cui apc->tx_shortform_allowed = resp.short_form_allowed; 967ca9c54d2SDexuan Cui apc->tx_vp_offset = resp.tx_vport_offset; 968b5c1c985SLong Li 969b5c1c985SLong Li netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", 970b5c1c985SLong Li apc->port_handle, protection_dom_id, doorbell_pg_id); 971ca9c54d2SDexuan Cui out: 972b5c1c985SLong Li if (err) 973b5c1c985SLong Li mana_uncfg_vport(apc); 974b5c1c985SLong Li 975ca9c54d2SDexuan Cui return err; 976ca9c54d2SDexuan Cui } 977b5c1c985SLong Li EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA); 978ca9c54d2SDexuan Cui 979ca9c54d2SDexuan Cui static int mana_cfg_vport_steering(struct mana_port_context *apc, 980ca9c54d2SDexuan Cui enum TRI_STATE rx, 981ca9c54d2SDexuan Cui bool update_default_rxobj, bool update_key, 982ca9c54d2SDexuan Cui bool update_tab) 983ca9c54d2SDexuan Cui { 984ca9c54d2SDexuan Cui u16 num_entries = MANA_INDIRECT_TABLE_SIZE; 98521453285SLong Li struct mana_cfg_rx_steer_req_v2 *req; 986ca9c54d2SDexuan Cui struct mana_cfg_rx_steer_resp resp = {}; 987ca9c54d2SDexuan Cui struct net_device *ndev = apc->ndev; 988ca9c54d2SDexuan Cui mana_handle_t *req_indir_tab; 989ca9c54d2SDexuan Cui u32 req_buf_size; 990ca9c54d2SDexuan Cui int err; 991ca9c54d2SDexuan Cui 992ca9c54d2SDexuan Cui req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; 993ca9c54d2SDexuan Cui req = kzalloc(req_buf_size, GFP_KERNEL); 994ca9c54d2SDexuan Cui if (!req) 995ca9c54d2SDexuan Cui return -ENOMEM; 996ca9c54d2SDexuan Cui 997ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, 998ca9c54d2SDexuan Cui sizeof(resp)); 999ca9c54d2SDexuan Cui 100021453285SLong Li req->hdr.req.msg_version = GDMA_MESSAGE_V2; 100121453285SLong Li 1002ca9c54d2SDexuan Cui req->vport = apc->port_handle; 1003ca9c54d2SDexuan Cui req->num_indir_entries = num_entries; 1004ca9c54d2SDexuan Cui req->indir_tab_offset = sizeof(*req); 1005ca9c54d2SDexuan Cui req->rx_enable = rx; 1006ca9c54d2SDexuan Cui req->rss_enable = apc->rss_state; 1007ca9c54d2SDexuan Cui req->update_default_rxobj = update_default_rxobj; 1008ca9c54d2SDexuan Cui req->update_hashkey = update_key; 1009ca9c54d2SDexuan Cui req->update_indir_tab = update_tab; 1010ca9c54d2SDexuan Cui req->default_rxobj = apc->default_rxobj; 101121453285SLong Li req->cqe_coalescing_enable = 0; 1012ca9c54d2SDexuan Cui 1013ca9c54d2SDexuan Cui if (update_key) 1014ca9c54d2SDexuan Cui memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); 1015ca9c54d2SDexuan Cui 1016ca9c54d2SDexuan Cui if (update_tab) { 1017ca9c54d2SDexuan Cui req_indir_tab = (mana_handle_t *)(req + 1); 1018ca9c54d2SDexuan Cui memcpy(req_indir_tab, apc->rxobj_table, 1019ca9c54d2SDexuan Cui req->num_indir_entries * sizeof(mana_handle_t)); 1020ca9c54d2SDexuan Cui } 1021ca9c54d2SDexuan Cui 1022ca9c54d2SDexuan Cui err = mana_send_request(apc->ac, req, req_buf_size, &resp, 1023ca9c54d2SDexuan Cui sizeof(resp)); 1024ca9c54d2SDexuan Cui if (err) { 1025ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); 1026ca9c54d2SDexuan Cui goto out; 1027ca9c54d2SDexuan Cui } 1028ca9c54d2SDexuan Cui 1029ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, 1030ca9c54d2SDexuan Cui sizeof(resp)); 1031ca9c54d2SDexuan Cui if (err) { 1032ca9c54d2SDexuan Cui netdev_err(ndev, "vPort RX configuration failed: %d\n", err); 1033ca9c54d2SDexuan Cui goto out; 1034ca9c54d2SDexuan Cui } 1035ca9c54d2SDexuan Cui 1036ca9c54d2SDexuan Cui if (resp.hdr.status) { 1037ca9c54d2SDexuan Cui netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", 1038ca9c54d2SDexuan Cui resp.hdr.status); 1039ca9c54d2SDexuan Cui err = -EPROTO; 1040ca9c54d2SDexuan Cui } 1041b5c1c985SLong Li 1042b5c1c985SLong Li netdev_info(ndev, "Configured steering vPort %llu entries %u\n", 1043b5c1c985SLong Li apc->port_handle, num_entries); 1044ca9c54d2SDexuan Cui out: 1045ca9c54d2SDexuan Cui kfree(req); 1046ca9c54d2SDexuan Cui return err; 1047ca9c54d2SDexuan Cui } 1048ca9c54d2SDexuan Cui 10494c0ff7a1SLong Li int mana_create_wq_obj(struct mana_port_context *apc, 1050ca9c54d2SDexuan Cui mana_handle_t vport, 1051ca9c54d2SDexuan Cui u32 wq_type, struct mana_obj_spec *wq_spec, 1052ca9c54d2SDexuan Cui struct mana_obj_spec *cq_spec, 1053ca9c54d2SDexuan Cui mana_handle_t *wq_obj) 1054ca9c54d2SDexuan Cui { 1055ca9c54d2SDexuan Cui struct mana_create_wqobj_resp resp = {}; 1056ca9c54d2SDexuan Cui struct mana_create_wqobj_req req = {}; 1057ca9c54d2SDexuan Cui struct net_device *ndev = apc->ndev; 1058ca9c54d2SDexuan Cui int err; 1059ca9c54d2SDexuan Cui 1060ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, 1061ca9c54d2SDexuan Cui sizeof(req), sizeof(resp)); 1062ca9c54d2SDexuan Cui req.vport = vport; 1063ca9c54d2SDexuan Cui req.wq_type = wq_type; 1064ca9c54d2SDexuan Cui req.wq_gdma_region = wq_spec->gdma_region; 1065ca9c54d2SDexuan Cui req.cq_gdma_region = cq_spec->gdma_region; 1066ca9c54d2SDexuan Cui req.wq_size = wq_spec->queue_size; 1067ca9c54d2SDexuan Cui req.cq_size = cq_spec->queue_size; 1068ca9c54d2SDexuan Cui req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; 1069ca9c54d2SDexuan Cui req.cq_parent_qid = cq_spec->attached_eq; 1070ca9c54d2SDexuan Cui 1071ca9c54d2SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1072ca9c54d2SDexuan Cui sizeof(resp)); 1073ca9c54d2SDexuan Cui if (err) { 1074ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to create WQ object: %d\n", err); 1075ca9c54d2SDexuan Cui goto out; 1076ca9c54d2SDexuan Cui } 1077ca9c54d2SDexuan Cui 1078ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, 1079ca9c54d2SDexuan Cui sizeof(resp)); 1080ca9c54d2SDexuan Cui if (err || resp.hdr.status) { 1081ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err, 1082ca9c54d2SDexuan Cui resp.hdr.status); 1083ca9c54d2SDexuan Cui if (!err) 1084ca9c54d2SDexuan Cui err = -EPROTO; 1085ca9c54d2SDexuan Cui goto out; 1086ca9c54d2SDexuan Cui } 1087ca9c54d2SDexuan Cui 1088ca9c54d2SDexuan Cui if (resp.wq_obj == INVALID_MANA_HANDLE) { 1089ca9c54d2SDexuan Cui netdev_err(ndev, "Got an invalid WQ object handle\n"); 1090ca9c54d2SDexuan Cui err = -EPROTO; 1091ca9c54d2SDexuan Cui goto out; 1092ca9c54d2SDexuan Cui } 1093ca9c54d2SDexuan Cui 1094ca9c54d2SDexuan Cui *wq_obj = resp.wq_obj; 1095ca9c54d2SDexuan Cui wq_spec->queue_index = resp.wq_id; 1096ca9c54d2SDexuan Cui cq_spec->queue_index = resp.cq_id; 1097ca9c54d2SDexuan Cui 1098ca9c54d2SDexuan Cui return 0; 1099ca9c54d2SDexuan Cui out: 1100ca9c54d2SDexuan Cui return err; 1101ca9c54d2SDexuan Cui } 11024c0ff7a1SLong Li EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA); 1103ca9c54d2SDexuan Cui 11044c0ff7a1SLong Li void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 1105ca9c54d2SDexuan Cui mana_handle_t wq_obj) 1106ca9c54d2SDexuan Cui { 1107ca9c54d2SDexuan Cui struct mana_destroy_wqobj_resp resp = {}; 1108ca9c54d2SDexuan Cui struct mana_destroy_wqobj_req req = {}; 1109ca9c54d2SDexuan Cui struct net_device *ndev = apc->ndev; 1110ca9c54d2SDexuan Cui int err; 1111ca9c54d2SDexuan Cui 1112ca9c54d2SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, 1113ca9c54d2SDexuan Cui sizeof(req), sizeof(resp)); 1114ca9c54d2SDexuan Cui req.wq_type = wq_type; 1115ca9c54d2SDexuan Cui req.wq_obj_handle = wq_obj; 1116ca9c54d2SDexuan Cui 1117ca9c54d2SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1118ca9c54d2SDexuan Cui sizeof(resp)); 1119ca9c54d2SDexuan Cui if (err) { 1120ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to destroy WQ object: %d\n", err); 1121ca9c54d2SDexuan Cui return; 1122ca9c54d2SDexuan Cui } 1123ca9c54d2SDexuan Cui 1124ca9c54d2SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, 1125ca9c54d2SDexuan Cui sizeof(resp)); 1126ca9c54d2SDexuan Cui if (err || resp.hdr.status) 1127ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, 1128ca9c54d2SDexuan Cui resp.hdr.status); 1129ca9c54d2SDexuan Cui } 11304c0ff7a1SLong Li EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA); 1131ca9c54d2SDexuan Cui 11321e2d0824SHaiyang Zhang static void mana_destroy_eq(struct mana_context *ac) 1133ca9c54d2SDexuan Cui { 11341e2d0824SHaiyang Zhang struct gdma_context *gc = ac->gdma_dev->gdma_context; 1135ca9c54d2SDexuan Cui struct gdma_queue *eq; 1136ca9c54d2SDexuan Cui int i; 1137ca9c54d2SDexuan Cui 11381e2d0824SHaiyang Zhang if (!ac->eqs) 1139ca9c54d2SDexuan Cui return; 1140ca9c54d2SDexuan Cui 11411e2d0824SHaiyang Zhang for (i = 0; i < gc->max_num_queues; i++) { 11421e2d0824SHaiyang Zhang eq = ac->eqs[i].eq; 1143ca9c54d2SDexuan Cui if (!eq) 1144ca9c54d2SDexuan Cui continue; 1145ca9c54d2SDexuan Cui 1146ca9c54d2SDexuan Cui mana_gd_destroy_queue(gc, eq); 1147ca9c54d2SDexuan Cui } 1148ca9c54d2SDexuan Cui 11491e2d0824SHaiyang Zhang kfree(ac->eqs); 11501e2d0824SHaiyang Zhang ac->eqs = NULL; 1151ca9c54d2SDexuan Cui } 1152ca9c54d2SDexuan Cui 11531e2d0824SHaiyang Zhang static int mana_create_eq(struct mana_context *ac) 1154ca9c54d2SDexuan Cui { 11551e2d0824SHaiyang Zhang struct gdma_dev *gd = ac->gdma_dev; 11561e2d0824SHaiyang Zhang struct gdma_context *gc = gd->gdma_context; 1157ca9c54d2SDexuan Cui struct gdma_queue_spec spec = {}; 1158ca9c54d2SDexuan Cui int err; 1159ca9c54d2SDexuan Cui int i; 1160ca9c54d2SDexuan Cui 11611e2d0824SHaiyang Zhang ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), 1162ca9c54d2SDexuan Cui GFP_KERNEL); 11631e2d0824SHaiyang Zhang if (!ac->eqs) 1164ca9c54d2SDexuan Cui return -ENOMEM; 1165ca9c54d2SDexuan Cui 1166ca9c54d2SDexuan Cui spec.type = GDMA_EQ; 1167ca9c54d2SDexuan Cui spec.monitor_avl_buf = false; 1168ca9c54d2SDexuan Cui spec.queue_size = EQ_SIZE; 1169ca9c54d2SDexuan Cui spec.eq.callback = NULL; 11701e2d0824SHaiyang Zhang spec.eq.context = ac->eqs; 1171ca9c54d2SDexuan Cui spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; 1172ca9c54d2SDexuan Cui 11731e2d0824SHaiyang Zhang for (i = 0; i < gc->max_num_queues; i++) { 11741e2d0824SHaiyang Zhang err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); 1175ca9c54d2SDexuan Cui if (err) 1176ca9c54d2SDexuan Cui goto out; 1177ca9c54d2SDexuan Cui } 1178ca9c54d2SDexuan Cui 1179ca9c54d2SDexuan Cui return 0; 1180ca9c54d2SDexuan Cui out: 11811e2d0824SHaiyang Zhang mana_destroy_eq(ac); 1182ca9c54d2SDexuan Cui return err; 1183ca9c54d2SDexuan Cui } 1184ca9c54d2SDexuan Cui 11856cc74443SDexuan Cui static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) 11866cc74443SDexuan Cui { 11876cc74443SDexuan Cui struct mana_fence_rq_resp resp = {}; 11886cc74443SDexuan Cui struct mana_fence_rq_req req = {}; 11896cc74443SDexuan Cui int err; 11906cc74443SDexuan Cui 11916cc74443SDexuan Cui init_completion(&rxq->fence_event); 11926cc74443SDexuan Cui 11936cc74443SDexuan Cui mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ, 11946cc74443SDexuan Cui sizeof(req), sizeof(resp)); 11956cc74443SDexuan Cui req.wq_obj_handle = rxq->rxobj; 11966cc74443SDexuan Cui 11976cc74443SDexuan Cui err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 11986cc74443SDexuan Cui sizeof(resp)); 11996cc74443SDexuan Cui if (err) { 12006cc74443SDexuan Cui netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", 12016cc74443SDexuan Cui rxq->rxq_idx, err); 12026cc74443SDexuan Cui return err; 12036cc74443SDexuan Cui } 12046cc74443SDexuan Cui 12056cc74443SDexuan Cui err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp)); 12066cc74443SDexuan Cui if (err || resp.hdr.status) { 12076cc74443SDexuan Cui netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", 12086cc74443SDexuan Cui rxq->rxq_idx, err, resp.hdr.status); 12096cc74443SDexuan Cui if (!err) 12106cc74443SDexuan Cui err = -EPROTO; 12116cc74443SDexuan Cui 12126cc74443SDexuan Cui return err; 12136cc74443SDexuan Cui } 12146cc74443SDexuan Cui 12156cc74443SDexuan Cui if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { 12166cc74443SDexuan Cui netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", 12176cc74443SDexuan Cui rxq->rxq_idx); 12186cc74443SDexuan Cui return -ETIMEDOUT; 12196cc74443SDexuan Cui } 12206cc74443SDexuan Cui 12216cc74443SDexuan Cui return 0; 12226cc74443SDexuan Cui } 12236cc74443SDexuan Cui 12246cc74443SDexuan Cui static void mana_fence_rqs(struct mana_port_context *apc) 12256cc74443SDexuan Cui { 12266cc74443SDexuan Cui unsigned int rxq_idx; 12276cc74443SDexuan Cui struct mana_rxq *rxq; 12286cc74443SDexuan Cui int err; 12296cc74443SDexuan Cui 12306cc74443SDexuan Cui for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 12316cc74443SDexuan Cui rxq = apc->rxqs[rxq_idx]; 12326cc74443SDexuan Cui err = mana_fence_rq(apc, rxq); 12336cc74443SDexuan Cui 12346cc74443SDexuan Cui /* In case of any error, use sleep instead. */ 12356cc74443SDexuan Cui if (err) 12366cc74443SDexuan Cui msleep(100); 12376cc74443SDexuan Cui } 12386cc74443SDexuan Cui } 12396cc74443SDexuan Cui 1240ca9c54d2SDexuan Cui static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) 1241ca9c54d2SDexuan Cui { 1242ca9c54d2SDexuan Cui u32 used_space_old; 1243ca9c54d2SDexuan Cui u32 used_space_new; 1244ca9c54d2SDexuan Cui 1245ca9c54d2SDexuan Cui used_space_old = wq->head - wq->tail; 1246ca9c54d2SDexuan Cui used_space_new = wq->head - (wq->tail + num_units); 1247ca9c54d2SDexuan Cui 1248ca9c54d2SDexuan Cui if (WARN_ON_ONCE(used_space_new > used_space_old)) 1249ca9c54d2SDexuan Cui return -ERANGE; 1250ca9c54d2SDexuan Cui 1251ca9c54d2SDexuan Cui wq->tail += num_units; 1252ca9c54d2SDexuan Cui return 0; 1253ca9c54d2SDexuan Cui } 1254ca9c54d2SDexuan Cui 1255ca9c54d2SDexuan Cui static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) 1256ca9c54d2SDexuan Cui { 1257ca9c54d2SDexuan Cui struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 1258ca9c54d2SDexuan Cui struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1259ca9c54d2SDexuan Cui struct device *dev = gc->dev; 1260ca9c54d2SDexuan Cui int i; 1261ca9c54d2SDexuan Cui 1262ca9c54d2SDexuan Cui dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); 1263ca9c54d2SDexuan Cui 1264ca9c54d2SDexuan Cui for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 1265ca9c54d2SDexuan Cui dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], 1266ca9c54d2SDexuan Cui DMA_TO_DEVICE); 1267ca9c54d2SDexuan Cui } 1268ca9c54d2SDexuan Cui 1269ca9c54d2SDexuan Cui static void mana_poll_tx_cq(struct mana_cq *cq) 1270ca9c54d2SDexuan Cui { 1271ca9c54d2SDexuan Cui struct gdma_comp *completions = cq->gdma_comp_buf; 1272ca9c54d2SDexuan Cui struct gdma_posted_wqe_info *wqe_info; 1273ca9c54d2SDexuan Cui unsigned int pkt_transmitted = 0; 1274ca9c54d2SDexuan Cui unsigned int wqe_unit_cnt = 0; 1275ca9c54d2SDexuan Cui struct mana_txq *txq = cq->txq; 1276ca9c54d2SDexuan Cui struct mana_port_context *apc; 1277ca9c54d2SDexuan Cui struct netdev_queue *net_txq; 1278ca9c54d2SDexuan Cui struct gdma_queue *gdma_wq; 1279ca9c54d2SDexuan Cui unsigned int avail_space; 1280ca9c54d2SDexuan Cui struct net_device *ndev; 1281ca9c54d2SDexuan Cui struct sk_buff *skb; 1282ca9c54d2SDexuan Cui bool txq_stopped; 1283ca9c54d2SDexuan Cui int comp_read; 1284ca9c54d2SDexuan Cui int i; 1285ca9c54d2SDexuan Cui 1286ca9c54d2SDexuan Cui ndev = txq->ndev; 1287ca9c54d2SDexuan Cui apc = netdev_priv(ndev); 1288ca9c54d2SDexuan Cui 1289ca9c54d2SDexuan Cui comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, 1290ca9c54d2SDexuan Cui CQE_POLLING_BUFFER); 1291ca9c54d2SDexuan Cui 1292e1b5683fSHaiyang Zhang if (comp_read < 1) 1293e1b5683fSHaiyang Zhang return; 1294e1b5683fSHaiyang Zhang 1295ca9c54d2SDexuan Cui for (i = 0; i < comp_read; i++) { 1296ca9c54d2SDexuan Cui struct mana_tx_comp_oob *cqe_oob; 1297ca9c54d2SDexuan Cui 1298ca9c54d2SDexuan Cui if (WARN_ON_ONCE(!completions[i].is_sq)) 1299ca9c54d2SDexuan Cui return; 1300ca9c54d2SDexuan Cui 1301ca9c54d2SDexuan Cui cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; 1302ca9c54d2SDexuan Cui if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != 1303ca9c54d2SDexuan Cui MANA_CQE_COMPLETION)) 1304ca9c54d2SDexuan Cui return; 1305ca9c54d2SDexuan Cui 1306ca9c54d2SDexuan Cui switch (cqe_oob->cqe_hdr.cqe_type) { 1307ca9c54d2SDexuan Cui case CQE_TX_OKAY: 1308ca9c54d2SDexuan Cui break; 1309ca9c54d2SDexuan Cui 1310ca9c54d2SDexuan Cui case CQE_TX_SA_DROP: 1311ca9c54d2SDexuan Cui case CQE_TX_MTU_DROP: 1312ca9c54d2SDexuan Cui case CQE_TX_INVALID_OOB: 1313ca9c54d2SDexuan Cui case CQE_TX_INVALID_ETH_TYPE: 1314ca9c54d2SDexuan Cui case CQE_TX_HDR_PROCESSING_ERROR: 1315ca9c54d2SDexuan Cui case CQE_TX_VF_DISABLED: 1316ca9c54d2SDexuan Cui case CQE_TX_VPORT_IDX_OUT_OF_RANGE: 1317ca9c54d2SDexuan Cui case CQE_TX_VPORT_DISABLED: 1318ca9c54d2SDexuan Cui case CQE_TX_VLAN_TAGGING_VIOLATION: 1319ca9c54d2SDexuan Cui WARN_ONCE(1, "TX: CQE error %d: ignored.\n", 1320ca9c54d2SDexuan Cui cqe_oob->cqe_hdr.cqe_type); 1321bd7fc6e1SShradha Gupta apc->eth_stats.tx_cqe_err++; 1322ca9c54d2SDexuan Cui break; 1323ca9c54d2SDexuan Cui 1324ca9c54d2SDexuan Cui default: 1325ca9c54d2SDexuan Cui /* If the CQE type is unexpected, log an error, assert, 1326ca9c54d2SDexuan Cui * and go through the error path. 1327ca9c54d2SDexuan Cui */ 1328ca9c54d2SDexuan Cui WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", 1329ca9c54d2SDexuan Cui cqe_oob->cqe_hdr.cqe_type); 1330bd7fc6e1SShradha Gupta apc->eth_stats.tx_cqe_unknown_type++; 1331ca9c54d2SDexuan Cui return; 1332ca9c54d2SDexuan Cui } 1333ca9c54d2SDexuan Cui 1334ca9c54d2SDexuan Cui if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) 1335ca9c54d2SDexuan Cui return; 1336ca9c54d2SDexuan Cui 1337ca9c54d2SDexuan Cui skb = skb_dequeue(&txq->pending_skbs); 1338ca9c54d2SDexuan Cui if (WARN_ON_ONCE(!skb)) 1339ca9c54d2SDexuan Cui return; 1340ca9c54d2SDexuan Cui 1341ca9c54d2SDexuan Cui wqe_info = (struct gdma_posted_wqe_info *)skb->cb; 1342ca9c54d2SDexuan Cui wqe_unit_cnt += wqe_info->wqe_size_in_bu; 1343ca9c54d2SDexuan Cui 1344ca9c54d2SDexuan Cui mana_unmap_skb(skb, apc); 1345ca9c54d2SDexuan Cui 1346e1b5683fSHaiyang Zhang napi_consume_skb(skb, cq->budget); 1347ca9c54d2SDexuan Cui 1348ca9c54d2SDexuan Cui pkt_transmitted++; 1349ca9c54d2SDexuan Cui } 1350ca9c54d2SDexuan Cui 1351ca9c54d2SDexuan Cui if (WARN_ON_ONCE(wqe_unit_cnt == 0)) 1352ca9c54d2SDexuan Cui return; 1353ca9c54d2SDexuan Cui 1354ca9c54d2SDexuan Cui mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); 1355ca9c54d2SDexuan Cui 1356ca9c54d2SDexuan Cui gdma_wq = txq->gdma_sq; 1357ca9c54d2SDexuan Cui avail_space = mana_gd_wq_avail_space(gdma_wq); 1358ca9c54d2SDexuan Cui 1359ca9c54d2SDexuan Cui /* Ensure tail updated before checking q stop */ 1360ca9c54d2SDexuan Cui smp_mb(); 1361ca9c54d2SDexuan Cui 1362ca9c54d2SDexuan Cui net_txq = txq->net_txq; 1363ca9c54d2SDexuan Cui txq_stopped = netif_tx_queue_stopped(net_txq); 1364ca9c54d2SDexuan Cui 1365ca9c54d2SDexuan Cui /* Ensure checking txq_stopped before apc->port_is_up. */ 1366ca9c54d2SDexuan Cui smp_rmb(); 1367ca9c54d2SDexuan Cui 1368ca9c54d2SDexuan Cui if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { 1369ca9c54d2SDexuan Cui netif_tx_wake_queue(net_txq); 1370ca9c54d2SDexuan Cui apc->eth_stats.wake_queue++; 1371ca9c54d2SDexuan Cui } 1372ca9c54d2SDexuan Cui 1373ca9c54d2SDexuan Cui if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) 1374ca9c54d2SDexuan Cui WARN_ON_ONCE(1); 1375e1b5683fSHaiyang Zhang 1376e1b5683fSHaiyang Zhang cq->work_done = pkt_transmitted; 1377ca9c54d2SDexuan Cui } 1378ca9c54d2SDexuan Cui 1379ca9c54d2SDexuan Cui static void mana_post_pkt_rxq(struct mana_rxq *rxq) 1380ca9c54d2SDexuan Cui { 1381ca9c54d2SDexuan Cui struct mana_recv_buf_oob *recv_buf_oob; 1382ca9c54d2SDexuan Cui u32 curr_index; 1383ca9c54d2SDexuan Cui int err; 1384ca9c54d2SDexuan Cui 1385ca9c54d2SDexuan Cui curr_index = rxq->buf_index++; 1386ca9c54d2SDexuan Cui if (rxq->buf_index == rxq->num_rx_buf) 1387ca9c54d2SDexuan Cui rxq->buf_index = 0; 1388ca9c54d2SDexuan Cui 1389ca9c54d2SDexuan Cui recv_buf_oob = &rxq->rx_oobs[curr_index]; 1390ca9c54d2SDexuan Cui 1391da4e8648SLong Li err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, 1392ca9c54d2SDexuan Cui &recv_buf_oob->wqe_inf); 1393ca9c54d2SDexuan Cui if (WARN_ON_ONCE(err)) 1394ca9c54d2SDexuan Cui return; 1395ca9c54d2SDexuan Cui 1396ca9c54d2SDexuan Cui WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); 1397ca9c54d2SDexuan Cui } 1398ca9c54d2SDexuan Cui 13992fbbd712SHaiyang Zhang static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, 14002fbbd712SHaiyang Zhang uint pkt_len, struct xdp_buff *xdp) 1401ed5356b5SHaiyang Zhang { 14022fbbd712SHaiyang Zhang struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); 1403ed5356b5SHaiyang Zhang 1404ed5356b5SHaiyang Zhang if (!skb) 1405ed5356b5SHaiyang Zhang return NULL; 1406ed5356b5SHaiyang Zhang 1407ed5356b5SHaiyang Zhang if (xdp->data_hard_start) { 1408ed5356b5SHaiyang Zhang skb_reserve(skb, xdp->data - xdp->data_hard_start); 1409ed5356b5SHaiyang Zhang skb_put(skb, xdp->data_end - xdp->data); 14102fbbd712SHaiyang Zhang return skb; 1411ed5356b5SHaiyang Zhang } 1412ed5356b5SHaiyang Zhang 14132fbbd712SHaiyang Zhang skb_reserve(skb, rxq->headroom); 14142fbbd712SHaiyang Zhang skb_put(skb, pkt_len); 14152fbbd712SHaiyang Zhang 1416ed5356b5SHaiyang Zhang return skb; 1417ed5356b5SHaiyang Zhang } 1418ed5356b5SHaiyang Zhang 1419b1d13f7aSHaiyang Zhang static void mana_rx_skb(void *buf_va, bool from_pool, 1420b1d13f7aSHaiyang Zhang struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) 1421ca9c54d2SDexuan Cui { 1422f90f8420SHaiyang Zhang struct mana_stats_rx *rx_stats = &rxq->stats; 1423ca9c54d2SDexuan Cui struct net_device *ndev = rxq->ndev; 1424ca9c54d2SDexuan Cui uint pkt_len = cqe->ppi[0].pkt_len; 1425ca9c54d2SDexuan Cui u16 rxq_idx = rxq->rxq_idx; 1426ca9c54d2SDexuan Cui struct napi_struct *napi; 1427ed5356b5SHaiyang Zhang struct xdp_buff xdp = {}; 1428ca9c54d2SDexuan Cui struct sk_buff *skb; 1429ca9c54d2SDexuan Cui u32 hash_value; 1430ed5356b5SHaiyang Zhang u32 act; 1431ca9c54d2SDexuan Cui 1432e1b5683fSHaiyang Zhang rxq->rx_cq.work_done++; 1433e1b5683fSHaiyang Zhang napi = &rxq->rx_cq.napi; 1434ca9c54d2SDexuan Cui 1435ca9c54d2SDexuan Cui if (!buf_va) { 1436ca9c54d2SDexuan Cui ++ndev->stats.rx_dropped; 1437ca9c54d2SDexuan Cui return; 1438ca9c54d2SDexuan Cui } 1439ca9c54d2SDexuan Cui 1440ed5356b5SHaiyang Zhang act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); 1441ca9c54d2SDexuan Cui 14427a8938cdSHaiyang Zhang if (act == XDP_REDIRECT && !rxq->xdp_rc) 14437a8938cdSHaiyang Zhang return; 14447a8938cdSHaiyang Zhang 1445ed5356b5SHaiyang Zhang if (act != XDP_PASS && act != XDP_TX) 1446f90f8420SHaiyang Zhang goto drop_xdp; 1447ca9c54d2SDexuan Cui 14482fbbd712SHaiyang Zhang skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); 1449ed5356b5SHaiyang Zhang 1450ed5356b5SHaiyang Zhang if (!skb) 1451ed5356b5SHaiyang Zhang goto drop; 1452ed5356b5SHaiyang Zhang 1453b1d13f7aSHaiyang Zhang if (from_pool) 1454b1d13f7aSHaiyang Zhang skb_mark_for_recycle(skb); 1455b1d13f7aSHaiyang Zhang 1456ca9c54d2SDexuan Cui skb->dev = napi->dev; 1457ca9c54d2SDexuan Cui 1458ca9c54d2SDexuan Cui skb->protocol = eth_type_trans(skb, ndev); 1459ca9c54d2SDexuan Cui skb_checksum_none_assert(skb); 1460ca9c54d2SDexuan Cui skb_record_rx_queue(skb, rxq_idx); 1461ca9c54d2SDexuan Cui 1462ca9c54d2SDexuan Cui if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { 1463ca9c54d2SDexuan Cui if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) 1464ca9c54d2SDexuan Cui skb->ip_summed = CHECKSUM_UNNECESSARY; 1465ca9c54d2SDexuan Cui } 1466ca9c54d2SDexuan Cui 1467ca9c54d2SDexuan Cui if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { 1468ca9c54d2SDexuan Cui hash_value = cqe->ppi[0].pkt_hash; 1469ca9c54d2SDexuan Cui 1470ca9c54d2SDexuan Cui if (cqe->rx_hashtype & MANA_HASH_L4) 1471ca9c54d2SDexuan Cui skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4); 1472ca9c54d2SDexuan Cui else 1473ca9c54d2SDexuan Cui skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); 1474ca9c54d2SDexuan Cui } 1475ca9c54d2SDexuan Cui 1476b803d1fdSHaiyang Zhang if (cqe->rx_vlantag_present) { 1477b803d1fdSHaiyang Zhang u16 vlan_tci = cqe->rx_vlan_id; 1478b803d1fdSHaiyang Zhang 1479b803d1fdSHaiyang Zhang __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1480b803d1fdSHaiyang Zhang } 1481b803d1fdSHaiyang Zhang 1482d356abb9SHaiyang Zhang u64_stats_update_begin(&rx_stats->syncp); 1483d356abb9SHaiyang Zhang rx_stats->packets++; 1484d356abb9SHaiyang Zhang rx_stats->bytes += pkt_len; 1485d356abb9SHaiyang Zhang 1486d356abb9SHaiyang Zhang if (act == XDP_TX) 1487d356abb9SHaiyang Zhang rx_stats->xdp_tx++; 1488d356abb9SHaiyang Zhang u64_stats_update_end(&rx_stats->syncp); 1489d356abb9SHaiyang Zhang 1490ed5356b5SHaiyang Zhang if (act == XDP_TX) { 1491ed5356b5SHaiyang Zhang skb_set_queue_mapping(skb, rxq_idx); 1492ed5356b5SHaiyang Zhang mana_xdp_tx(skb, ndev); 1493ed5356b5SHaiyang Zhang return; 1494ed5356b5SHaiyang Zhang } 1495ed5356b5SHaiyang Zhang 1496ca9c54d2SDexuan Cui napi_gro_receive(napi, skb); 1497ca9c54d2SDexuan Cui 1498ed5356b5SHaiyang Zhang return; 1499ed5356b5SHaiyang Zhang 1500f90f8420SHaiyang Zhang drop_xdp: 1501f90f8420SHaiyang Zhang u64_stats_update_begin(&rx_stats->syncp); 1502f90f8420SHaiyang Zhang rx_stats->xdp_drop++; 1503f90f8420SHaiyang Zhang u64_stats_update_end(&rx_stats->syncp); 1504f90f8420SHaiyang Zhang 1505ed5356b5SHaiyang Zhang drop: 1506b1d13f7aSHaiyang Zhang if (from_pool) { 1507b1d13f7aSHaiyang Zhang page_pool_recycle_direct(rxq->page_pool, 1508b1d13f7aSHaiyang Zhang virt_to_head_page(buf_va)); 1509b1d13f7aSHaiyang Zhang } else { 1510a2917b23SHaiyang Zhang WARN_ON_ONCE(rxq->xdp_save_va); 1511a2917b23SHaiyang Zhang /* Save for reuse */ 1512a2917b23SHaiyang Zhang rxq->xdp_save_va = buf_va; 1513b1d13f7aSHaiyang Zhang } 1514a6bf5703SHaiyang Zhang 1515ed5356b5SHaiyang Zhang ++ndev->stats.rx_dropped; 1516f90f8420SHaiyang Zhang 1517ed5356b5SHaiyang Zhang return; 1518ca9c54d2SDexuan Cui } 1519ca9c54d2SDexuan Cui 1520a2917b23SHaiyang Zhang static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, 1521b1d13f7aSHaiyang Zhang dma_addr_t *da, bool *from_pool, bool is_napi) 1522a2917b23SHaiyang Zhang { 1523a2917b23SHaiyang Zhang struct page *page; 1524a2917b23SHaiyang Zhang void *va; 1525a2917b23SHaiyang Zhang 1526b1d13f7aSHaiyang Zhang *from_pool = false; 1527b1d13f7aSHaiyang Zhang 1528a2917b23SHaiyang Zhang /* Reuse XDP dropped page if available */ 1529a2917b23SHaiyang Zhang if (rxq->xdp_save_va) { 1530a2917b23SHaiyang Zhang va = rxq->xdp_save_va; 1531a2917b23SHaiyang Zhang rxq->xdp_save_va = NULL; 15322fbbd712SHaiyang Zhang } else if (rxq->alloc_size > PAGE_SIZE) { 15332fbbd712SHaiyang Zhang if (is_napi) 15342fbbd712SHaiyang Zhang va = napi_alloc_frag(rxq->alloc_size); 15352fbbd712SHaiyang Zhang else 15362fbbd712SHaiyang Zhang va = netdev_alloc_frag(rxq->alloc_size); 15372fbbd712SHaiyang Zhang 15382fbbd712SHaiyang Zhang if (!va) 15392fbbd712SHaiyang Zhang return NULL; 1540df18f2daSHaiyang Zhang 1541df18f2daSHaiyang Zhang page = virt_to_head_page(va); 1542df18f2daSHaiyang Zhang /* Check if the frag falls back to single page */ 1543df18f2daSHaiyang Zhang if (compound_order(page) < get_order(rxq->alloc_size)) { 1544df18f2daSHaiyang Zhang put_page(page); 1545df18f2daSHaiyang Zhang return NULL; 1546df18f2daSHaiyang Zhang } 1547a2917b23SHaiyang Zhang } else { 1548b1d13f7aSHaiyang Zhang page = page_pool_dev_alloc_pages(rxq->page_pool); 1549a2917b23SHaiyang Zhang if (!page) 1550a2917b23SHaiyang Zhang return NULL; 1551a2917b23SHaiyang Zhang 1552b1d13f7aSHaiyang Zhang *from_pool = true; 1553a2917b23SHaiyang Zhang va = page_to_virt(page); 1554a2917b23SHaiyang Zhang } 1555a2917b23SHaiyang Zhang 15562fbbd712SHaiyang Zhang *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, 1557a2917b23SHaiyang Zhang DMA_FROM_DEVICE); 1558a2917b23SHaiyang Zhang if (dma_mapping_error(dev, *da)) { 1559b1d13f7aSHaiyang Zhang if (*from_pool) 1560b1d13f7aSHaiyang Zhang page_pool_put_full_page(rxq->page_pool, page, false); 1561b1d13f7aSHaiyang Zhang else 1562a2917b23SHaiyang Zhang put_page(virt_to_head_page(va)); 1563b1d13f7aSHaiyang Zhang 1564a2917b23SHaiyang Zhang return NULL; 1565a2917b23SHaiyang Zhang } 1566a2917b23SHaiyang Zhang 1567a2917b23SHaiyang Zhang return va; 1568a2917b23SHaiyang Zhang } 1569a2917b23SHaiyang Zhang 1570a2917b23SHaiyang Zhang /* Allocate frag for rx buffer, and save the old buf */ 15715c74064fSHaiyang Zhang static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, 1572b1d13f7aSHaiyang Zhang struct mana_recv_buf_oob *rxoob, void **old_buf, 1573b1d13f7aSHaiyang Zhang bool *old_fp) 1574a2917b23SHaiyang Zhang { 1575b1d13f7aSHaiyang Zhang bool from_pool; 1576a2917b23SHaiyang Zhang dma_addr_t da; 1577a2917b23SHaiyang Zhang void *va; 1578a2917b23SHaiyang Zhang 1579b1d13f7aSHaiyang Zhang va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); 1580a2917b23SHaiyang Zhang if (!va) 1581a2917b23SHaiyang Zhang return; 1582a2917b23SHaiyang Zhang 1583a2917b23SHaiyang Zhang dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, 1584a2917b23SHaiyang Zhang DMA_FROM_DEVICE); 1585a2917b23SHaiyang Zhang *old_buf = rxoob->buf_va; 1586b1d13f7aSHaiyang Zhang *old_fp = rxoob->from_pool; 1587a2917b23SHaiyang Zhang 1588a2917b23SHaiyang Zhang rxoob->buf_va = va; 1589a2917b23SHaiyang Zhang rxoob->sgl[0].address = da; 1590b1d13f7aSHaiyang Zhang rxoob->from_pool = from_pool; 1591a2917b23SHaiyang Zhang } 1592a2917b23SHaiyang Zhang 1593ca9c54d2SDexuan Cui static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, 1594ca9c54d2SDexuan Cui struct gdma_comp *cqe) 1595ca9c54d2SDexuan Cui { 1596ca9c54d2SDexuan Cui struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; 1597ca9c54d2SDexuan Cui struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 1598ca9c54d2SDexuan Cui struct net_device *ndev = rxq->ndev; 1599ca9c54d2SDexuan Cui struct mana_recv_buf_oob *rxbuf_oob; 1600bd7fc6e1SShradha Gupta struct mana_port_context *apc; 1601ca9c54d2SDexuan Cui struct device *dev = gc->dev; 1602a2917b23SHaiyang Zhang void *old_buf = NULL; 1603ca9c54d2SDexuan Cui u32 curr, pktlen; 1604b1d13f7aSHaiyang Zhang bool old_fp; 1605ca9c54d2SDexuan Cui 1606bd7fc6e1SShradha Gupta apc = netdev_priv(ndev); 1607bd7fc6e1SShradha Gupta 1608ca9c54d2SDexuan Cui switch (oob->cqe_hdr.cqe_type) { 1609ca9c54d2SDexuan Cui case CQE_RX_OKAY: 1610ca9c54d2SDexuan Cui break; 1611ca9c54d2SDexuan Cui 1612ca9c54d2SDexuan Cui case CQE_RX_TRUNCATED: 1613e4b76219SHaiyang Zhang ++ndev->stats.rx_dropped; 1614e4b76219SHaiyang Zhang rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; 1615e4b76219SHaiyang Zhang netdev_warn_once(ndev, "Dropped a truncated packet\n"); 1616e4b76219SHaiyang Zhang goto drop; 1617ca9c54d2SDexuan Cui 1618ca9c54d2SDexuan Cui case CQE_RX_COALESCED_4: 1619ca9c54d2SDexuan Cui netdev_err(ndev, "RX coalescing is unsupported\n"); 1620bd7fc6e1SShradha Gupta apc->eth_stats.rx_coalesced_err++; 1621ca9c54d2SDexuan Cui return; 1622ca9c54d2SDexuan Cui 1623ca9c54d2SDexuan Cui case CQE_RX_OBJECT_FENCE: 16246cc74443SDexuan Cui complete(&rxq->fence_event); 1625ca9c54d2SDexuan Cui return; 1626ca9c54d2SDexuan Cui 1627ca9c54d2SDexuan Cui default: 1628ca9c54d2SDexuan Cui netdev_err(ndev, "Unknown RX CQE type = %d\n", 1629ca9c54d2SDexuan Cui oob->cqe_hdr.cqe_type); 1630bd7fc6e1SShradha Gupta apc->eth_stats.rx_cqe_unknown_type++; 1631ca9c54d2SDexuan Cui return; 1632ca9c54d2SDexuan Cui } 1633ca9c54d2SDexuan Cui 1634ca9c54d2SDexuan Cui pktlen = oob->ppi[0].pkt_len; 1635ca9c54d2SDexuan Cui 1636ca9c54d2SDexuan Cui if (pktlen == 0) { 1637ca9c54d2SDexuan Cui /* data packets should never have packetlength of zero */ 1638ca9c54d2SDexuan Cui netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", 1639ca9c54d2SDexuan Cui rxq->gdma_id, cq->gdma_id, rxq->rxobj); 1640ca9c54d2SDexuan Cui return; 1641ca9c54d2SDexuan Cui } 1642ca9c54d2SDexuan Cui 1643ca9c54d2SDexuan Cui curr = rxq->buf_index; 1644ca9c54d2SDexuan Cui rxbuf_oob = &rxq->rx_oobs[curr]; 1645ca9c54d2SDexuan Cui WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); 1646ca9c54d2SDexuan Cui 1647b1d13f7aSHaiyang Zhang mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); 1648ca9c54d2SDexuan Cui 1649a2917b23SHaiyang Zhang /* Unsuccessful refill will have old_buf == NULL. 1650a2917b23SHaiyang Zhang * In this case, mana_rx_skb() will drop the packet. 1651a2917b23SHaiyang Zhang */ 1652b1d13f7aSHaiyang Zhang mana_rx_skb(old_buf, old_fp, oob, rxq); 1653ca9c54d2SDexuan Cui 1654e4b76219SHaiyang Zhang drop: 1655ca9c54d2SDexuan Cui mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); 1656ca9c54d2SDexuan Cui 1657ca9c54d2SDexuan Cui mana_post_pkt_rxq(rxq); 1658ca9c54d2SDexuan Cui } 1659ca9c54d2SDexuan Cui 1660ca9c54d2SDexuan Cui static void mana_poll_rx_cq(struct mana_cq *cq) 1661ca9c54d2SDexuan Cui { 1662ca9c54d2SDexuan Cui struct gdma_comp *comp = cq->gdma_comp_buf; 16637a8938cdSHaiyang Zhang struct mana_rxq *rxq = cq->rxq; 1664d90a9468SDexuan Cui int comp_read, i; 1665ca9c54d2SDexuan Cui 1666ca9c54d2SDexuan Cui comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); 1667ca9c54d2SDexuan Cui WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); 1668ca9c54d2SDexuan Cui 16697a8938cdSHaiyang Zhang rxq->xdp_flush = false; 16707a8938cdSHaiyang Zhang 1671ca9c54d2SDexuan Cui for (i = 0; i < comp_read; i++) { 1672ca9c54d2SDexuan Cui if (WARN_ON_ONCE(comp[i].is_sq)) 1673ca9c54d2SDexuan Cui return; 1674ca9c54d2SDexuan Cui 1675ca9c54d2SDexuan Cui /* verify recv cqe references the right rxq */ 1676ca9c54d2SDexuan Cui if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) 1677ca9c54d2SDexuan Cui return; 1678ca9c54d2SDexuan Cui 16797a8938cdSHaiyang Zhang mana_process_rx_cqe(rxq, cq, &comp[i]); 1680ca9c54d2SDexuan Cui } 16817a8938cdSHaiyang Zhang 1682da4e8648SLong Li if (comp_read > 0) { 1683da4e8648SLong Li struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 1684da4e8648SLong Li 1685da4e8648SLong Li mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); 1686da4e8648SLong Li } 1687da4e8648SLong Li 16887a8938cdSHaiyang Zhang if (rxq->xdp_flush) 16897a8938cdSHaiyang Zhang xdp_do_flush(); 1690ca9c54d2SDexuan Cui } 1691ca9c54d2SDexuan Cui 169218010ff7SHaiyang Zhang static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) 1693ca9c54d2SDexuan Cui { 1694ca9c54d2SDexuan Cui struct mana_cq *cq = context; 1695e1b5683fSHaiyang Zhang u8 arm_bit; 169618010ff7SHaiyang Zhang int w; 1697ca9c54d2SDexuan Cui 1698ca9c54d2SDexuan Cui WARN_ON_ONCE(cq->gdma_cq != gdma_queue); 1699ca9c54d2SDexuan Cui 1700ca9c54d2SDexuan Cui if (cq->type == MANA_CQ_TYPE_RX) 1701ca9c54d2SDexuan Cui mana_poll_rx_cq(cq); 1702ca9c54d2SDexuan Cui else 1703ca9c54d2SDexuan Cui mana_poll_tx_cq(cq); 1704ca9c54d2SDexuan Cui 170518010ff7SHaiyang Zhang w = cq->work_done; 170618010ff7SHaiyang Zhang 170718010ff7SHaiyang Zhang if (w < cq->budget && 170818010ff7SHaiyang Zhang napi_complete_done(&cq->napi, w)) { 1709e1b5683fSHaiyang Zhang arm_bit = SET_ARM_BIT; 1710e1b5683fSHaiyang Zhang } else { 1711e1b5683fSHaiyang Zhang arm_bit = 0; 1712e1b5683fSHaiyang Zhang } 1713e1b5683fSHaiyang Zhang 1714e1b5683fSHaiyang Zhang mana_gd_ring_cq(gdma_queue, arm_bit); 171518010ff7SHaiyang Zhang 171618010ff7SHaiyang Zhang return w; 1717e1b5683fSHaiyang Zhang } 1718e1b5683fSHaiyang Zhang 1719e1b5683fSHaiyang Zhang static int mana_poll(struct napi_struct *napi, int budget) 1720e1b5683fSHaiyang Zhang { 1721e1b5683fSHaiyang Zhang struct mana_cq *cq = container_of(napi, struct mana_cq, napi); 172218010ff7SHaiyang Zhang int w; 1723e1b5683fSHaiyang Zhang 1724e1b5683fSHaiyang Zhang cq->work_done = 0; 1725e1b5683fSHaiyang Zhang cq->budget = budget; 1726e1b5683fSHaiyang Zhang 172718010ff7SHaiyang Zhang w = mana_cq_handler(cq, cq->gdma_cq); 1728e1b5683fSHaiyang Zhang 172918010ff7SHaiyang Zhang return min(w, budget); 1730e1b5683fSHaiyang Zhang } 1731e1b5683fSHaiyang Zhang 1732e1b5683fSHaiyang Zhang static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) 1733e1b5683fSHaiyang Zhang { 1734e1b5683fSHaiyang Zhang struct mana_cq *cq = context; 1735e1b5683fSHaiyang Zhang 1736e1b5683fSHaiyang Zhang napi_schedule_irqoff(&cq->napi); 1737ca9c54d2SDexuan Cui } 1738ca9c54d2SDexuan Cui 1739ca9c54d2SDexuan Cui static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) 1740ca9c54d2SDexuan Cui { 1741ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 1742ca9c54d2SDexuan Cui 1743ca9c54d2SDexuan Cui if (!cq->gdma_cq) 1744ca9c54d2SDexuan Cui return; 1745ca9c54d2SDexuan Cui 1746ca9c54d2SDexuan Cui mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); 1747ca9c54d2SDexuan Cui } 1748ca9c54d2SDexuan Cui 1749ca9c54d2SDexuan Cui static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) 1750ca9c54d2SDexuan Cui { 1751ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 1752ca9c54d2SDexuan Cui 1753ca9c54d2SDexuan Cui if (!txq->gdma_sq) 1754ca9c54d2SDexuan Cui return; 1755ca9c54d2SDexuan Cui 1756ca9c54d2SDexuan Cui mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); 1757ca9c54d2SDexuan Cui } 1758ca9c54d2SDexuan Cui 1759ca9c54d2SDexuan Cui static void mana_destroy_txq(struct mana_port_context *apc) 1760ca9c54d2SDexuan Cui { 1761e1b5683fSHaiyang Zhang struct napi_struct *napi; 1762ca9c54d2SDexuan Cui int i; 1763ca9c54d2SDexuan Cui 1764ca9c54d2SDexuan Cui if (!apc->tx_qp) 1765ca9c54d2SDexuan Cui return; 1766ca9c54d2SDexuan Cui 1767ca9c54d2SDexuan Cui for (i = 0; i < apc->num_queues; i++) { 1768e1b5683fSHaiyang Zhang napi = &apc->tx_qp[i].tx_cq.napi; 1769e1b5683fSHaiyang Zhang napi_synchronize(napi); 1770e1b5683fSHaiyang Zhang napi_disable(napi); 1771e1b5683fSHaiyang Zhang netif_napi_del(napi); 1772e1b5683fSHaiyang Zhang 1773ca9c54d2SDexuan Cui mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); 1774ca9c54d2SDexuan Cui 1775ca9c54d2SDexuan Cui mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); 1776ca9c54d2SDexuan Cui 1777ca9c54d2SDexuan Cui mana_deinit_txq(apc, &apc->tx_qp[i].txq); 1778ca9c54d2SDexuan Cui } 1779ca9c54d2SDexuan Cui 1780ca9c54d2SDexuan Cui kfree(apc->tx_qp); 1781ca9c54d2SDexuan Cui apc->tx_qp = NULL; 1782ca9c54d2SDexuan Cui } 1783ca9c54d2SDexuan Cui 1784ca9c54d2SDexuan Cui static int mana_create_txq(struct mana_port_context *apc, 1785ca9c54d2SDexuan Cui struct net_device *net) 1786ca9c54d2SDexuan Cui { 17871e2d0824SHaiyang Zhang struct mana_context *ac = apc->ac; 17881e2d0824SHaiyang Zhang struct gdma_dev *gd = ac->gdma_dev; 1789ca9c54d2SDexuan Cui struct mana_obj_spec wq_spec; 1790ca9c54d2SDexuan Cui struct mana_obj_spec cq_spec; 1791ca9c54d2SDexuan Cui struct gdma_queue_spec spec; 1792ca9c54d2SDexuan Cui struct gdma_context *gc; 1793ca9c54d2SDexuan Cui struct mana_txq *txq; 1794ca9c54d2SDexuan Cui struct mana_cq *cq; 1795ca9c54d2SDexuan Cui u32 txq_size; 1796ca9c54d2SDexuan Cui u32 cq_size; 1797ca9c54d2SDexuan Cui int err; 1798ca9c54d2SDexuan Cui int i; 1799ca9c54d2SDexuan Cui 1800ca9c54d2SDexuan Cui apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), 1801ca9c54d2SDexuan Cui GFP_KERNEL); 1802ca9c54d2SDexuan Cui if (!apc->tx_qp) 1803ca9c54d2SDexuan Cui return -ENOMEM; 1804ca9c54d2SDexuan Cui 1805ca9c54d2SDexuan Cui /* The minimum size of the WQE is 32 bytes, hence 1806ca9c54d2SDexuan Cui * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs 1807ca9c54d2SDexuan Cui * the SQ can store. This value is then used to size other queues 1808ca9c54d2SDexuan Cui * to prevent overflow. 1809ca9c54d2SDexuan Cui */ 1810ca9c54d2SDexuan Cui txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; 1811ca9c54d2SDexuan Cui BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); 1812ca9c54d2SDexuan Cui 1813ca9c54d2SDexuan Cui cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; 1814ca9c54d2SDexuan Cui cq_size = PAGE_ALIGN(cq_size); 1815ca9c54d2SDexuan Cui 1816ca9c54d2SDexuan Cui gc = gd->gdma_context; 1817ca9c54d2SDexuan Cui 1818ca9c54d2SDexuan Cui for (i = 0; i < apc->num_queues; i++) { 1819ca9c54d2SDexuan Cui apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; 1820ca9c54d2SDexuan Cui 1821ca9c54d2SDexuan Cui /* Create SQ */ 1822ca9c54d2SDexuan Cui txq = &apc->tx_qp[i].txq; 1823ca9c54d2SDexuan Cui 1824ca9c54d2SDexuan Cui u64_stats_init(&txq->stats.syncp); 1825ca9c54d2SDexuan Cui txq->ndev = net; 1826ca9c54d2SDexuan Cui txq->net_txq = netdev_get_tx_queue(net, i); 1827ca9c54d2SDexuan Cui txq->vp_offset = apc->tx_vp_offset; 1828ca9c54d2SDexuan Cui skb_queue_head_init(&txq->pending_skbs); 1829ca9c54d2SDexuan Cui 1830ca9c54d2SDexuan Cui memset(&spec, 0, sizeof(spec)); 1831ca9c54d2SDexuan Cui spec.type = GDMA_SQ; 1832ca9c54d2SDexuan Cui spec.monitor_avl_buf = true; 1833ca9c54d2SDexuan Cui spec.queue_size = txq_size; 1834ca9c54d2SDexuan Cui err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); 1835ca9c54d2SDexuan Cui if (err) 1836ca9c54d2SDexuan Cui goto out; 1837ca9c54d2SDexuan Cui 1838ca9c54d2SDexuan Cui /* Create SQ's CQ */ 1839ca9c54d2SDexuan Cui cq = &apc->tx_qp[i].tx_cq; 1840ca9c54d2SDexuan Cui cq->type = MANA_CQ_TYPE_TX; 1841ca9c54d2SDexuan Cui 1842ca9c54d2SDexuan Cui cq->txq = txq; 1843ca9c54d2SDexuan Cui 1844ca9c54d2SDexuan Cui memset(&spec, 0, sizeof(spec)); 1845ca9c54d2SDexuan Cui spec.type = GDMA_CQ; 1846ca9c54d2SDexuan Cui spec.monitor_avl_buf = false; 1847ca9c54d2SDexuan Cui spec.queue_size = cq_size; 1848e1b5683fSHaiyang Zhang spec.cq.callback = mana_schedule_napi; 18491e2d0824SHaiyang Zhang spec.cq.parent_eq = ac->eqs[i].eq; 1850ca9c54d2SDexuan Cui spec.cq.context = cq; 1851ca9c54d2SDexuan Cui err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 1852ca9c54d2SDexuan Cui if (err) 1853ca9c54d2SDexuan Cui goto out; 1854ca9c54d2SDexuan Cui 1855ca9c54d2SDexuan Cui memset(&wq_spec, 0, sizeof(wq_spec)); 1856ca9c54d2SDexuan Cui memset(&cq_spec, 0, sizeof(cq_spec)); 1857ca9c54d2SDexuan Cui 185828c66cfaSAjay Sharma wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; 1859ca9c54d2SDexuan Cui wq_spec.queue_size = txq->gdma_sq->queue_size; 1860ca9c54d2SDexuan Cui 186128c66cfaSAjay Sharma cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 1862ca9c54d2SDexuan Cui cq_spec.queue_size = cq->gdma_cq->queue_size; 1863ca9c54d2SDexuan Cui cq_spec.modr_ctx_id = 0; 1864ca9c54d2SDexuan Cui cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 1865ca9c54d2SDexuan Cui 1866ca9c54d2SDexuan Cui err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, 1867ca9c54d2SDexuan Cui &wq_spec, &cq_spec, 1868ca9c54d2SDexuan Cui &apc->tx_qp[i].tx_object); 1869ca9c54d2SDexuan Cui 1870ca9c54d2SDexuan Cui if (err) 1871ca9c54d2SDexuan Cui goto out; 1872ca9c54d2SDexuan Cui 1873ca9c54d2SDexuan Cui txq->gdma_sq->id = wq_spec.queue_index; 1874ca9c54d2SDexuan Cui cq->gdma_cq->id = cq_spec.queue_index; 1875ca9c54d2SDexuan Cui 187628c66cfaSAjay Sharma txq->gdma_sq->mem_info.dma_region_handle = 187728c66cfaSAjay Sharma GDMA_INVALID_DMA_REGION; 187828c66cfaSAjay Sharma cq->gdma_cq->mem_info.dma_region_handle = 187928c66cfaSAjay Sharma GDMA_INVALID_DMA_REGION; 1880ca9c54d2SDexuan Cui 1881ca9c54d2SDexuan Cui txq->gdma_txq_id = txq->gdma_sq->id; 1882ca9c54d2SDexuan Cui 1883ca9c54d2SDexuan Cui cq->gdma_id = cq->gdma_cq->id; 1884ca9c54d2SDexuan Cui 1885b9078845SChristophe JAILLET if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 1886b9078845SChristophe JAILLET err = -EINVAL; 1887b9078845SChristophe JAILLET goto out; 1888b9078845SChristophe JAILLET } 1889ca9c54d2SDexuan Cui 1890ca9c54d2SDexuan Cui gc->cq_table[cq->gdma_id] = cq->gdma_cq; 1891ca9c54d2SDexuan Cui 189216d083e2SJakub Kicinski netif_napi_add_tx(net, &cq->napi, mana_poll); 1893e1b5683fSHaiyang Zhang napi_enable(&cq->napi); 1894e1b5683fSHaiyang Zhang 1895e1b5683fSHaiyang Zhang mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 1896ca9c54d2SDexuan Cui } 1897ca9c54d2SDexuan Cui 1898ca9c54d2SDexuan Cui return 0; 1899ca9c54d2SDexuan Cui out: 1900ca9c54d2SDexuan Cui mana_destroy_txq(apc); 1901ca9c54d2SDexuan Cui return err; 1902ca9c54d2SDexuan Cui } 1903ca9c54d2SDexuan Cui 1904ca9c54d2SDexuan Cui static void mana_destroy_rxq(struct mana_port_context *apc, 1905ca9c54d2SDexuan Cui struct mana_rxq *rxq, bool validate_state) 1906ca9c54d2SDexuan Cui 1907ca9c54d2SDexuan Cui { 1908ca9c54d2SDexuan Cui struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1909ca9c54d2SDexuan Cui struct mana_recv_buf_oob *rx_oob; 1910ca9c54d2SDexuan Cui struct device *dev = gc->dev; 1911e1b5683fSHaiyang Zhang struct napi_struct *napi; 1912b1d13f7aSHaiyang Zhang struct page *page; 1913ca9c54d2SDexuan Cui int i; 1914ca9c54d2SDexuan Cui 1915ca9c54d2SDexuan Cui if (!rxq) 1916ca9c54d2SDexuan Cui return; 1917ca9c54d2SDexuan Cui 1918e1b5683fSHaiyang Zhang napi = &rxq->rx_cq.napi; 1919e1b5683fSHaiyang Zhang 1920ca9c54d2SDexuan Cui if (validate_state) 1921e1b5683fSHaiyang Zhang napi_synchronize(napi); 1922e1b5683fSHaiyang Zhang 1923e1b5683fSHaiyang Zhang napi_disable(napi); 1924ed5356b5SHaiyang Zhang 1925ed5356b5SHaiyang Zhang xdp_rxq_info_unreg(&rxq->xdp_rxq); 1926ed5356b5SHaiyang Zhang 1927e1b5683fSHaiyang Zhang netif_napi_del(napi); 1928ca9c54d2SDexuan Cui 1929ca9c54d2SDexuan Cui mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); 1930ca9c54d2SDexuan Cui 1931ca9c54d2SDexuan Cui mana_deinit_cq(apc, &rxq->rx_cq); 1932ca9c54d2SDexuan Cui 1933a2917b23SHaiyang Zhang if (rxq->xdp_save_va) 1934a2917b23SHaiyang Zhang put_page(virt_to_head_page(rxq->xdp_save_va)); 1935a6bf5703SHaiyang Zhang 1936ca9c54d2SDexuan Cui for (i = 0; i < rxq->num_rx_buf; i++) { 1937ca9c54d2SDexuan Cui rx_oob = &rxq->rx_oobs[i]; 1938ca9c54d2SDexuan Cui 1939ca9c54d2SDexuan Cui if (!rx_oob->buf_va) 1940ca9c54d2SDexuan Cui continue; 1941ca9c54d2SDexuan Cui 1942a2917b23SHaiyang Zhang dma_unmap_single(dev, rx_oob->sgl[0].address, 1943a2917b23SHaiyang Zhang rx_oob->sgl[0].size, DMA_FROM_DEVICE); 1944ca9c54d2SDexuan Cui 1945b1d13f7aSHaiyang Zhang page = virt_to_head_page(rx_oob->buf_va); 1946b1d13f7aSHaiyang Zhang 1947b1d13f7aSHaiyang Zhang if (rx_oob->from_pool) 1948b1d13f7aSHaiyang Zhang page_pool_put_full_page(rxq->page_pool, page, false); 1949b1d13f7aSHaiyang Zhang else 1950b1d13f7aSHaiyang Zhang put_page(page); 1951b1d13f7aSHaiyang Zhang 1952ca9c54d2SDexuan Cui rx_oob->buf_va = NULL; 1953ca9c54d2SDexuan Cui } 1954ca9c54d2SDexuan Cui 1955b1d13f7aSHaiyang Zhang page_pool_destroy(rxq->page_pool); 1956b1d13f7aSHaiyang Zhang 1957ca9c54d2SDexuan Cui if (rxq->gdma_rq) 1958ca9c54d2SDexuan Cui mana_gd_destroy_queue(gc, rxq->gdma_rq); 1959ca9c54d2SDexuan Cui 1960ca9c54d2SDexuan Cui kfree(rxq); 1961ca9c54d2SDexuan Cui } 1962ca9c54d2SDexuan Cui 1963a2917b23SHaiyang Zhang static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, 1964a2917b23SHaiyang Zhang struct mana_rxq *rxq, struct device *dev) 1965a2917b23SHaiyang Zhang { 196680f6215bSHaiyang Zhang struct mana_port_context *mpc = netdev_priv(rxq->ndev); 1967b1d13f7aSHaiyang Zhang bool from_pool = false; 1968a2917b23SHaiyang Zhang dma_addr_t da; 1969a2917b23SHaiyang Zhang void *va; 1970a2917b23SHaiyang Zhang 197180f6215bSHaiyang Zhang if (mpc->rxbufs_pre) 197280f6215bSHaiyang Zhang va = mana_get_rxbuf_pre(rxq, &da); 197380f6215bSHaiyang Zhang else 1974b1d13f7aSHaiyang Zhang va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); 1975a2917b23SHaiyang Zhang 1976a2917b23SHaiyang Zhang if (!va) 1977a2917b23SHaiyang Zhang return -ENOMEM; 1978a2917b23SHaiyang Zhang 1979a2917b23SHaiyang Zhang rx_oob->buf_va = va; 1980b1d13f7aSHaiyang Zhang rx_oob->from_pool = from_pool; 1981a2917b23SHaiyang Zhang 1982a2917b23SHaiyang Zhang rx_oob->sgl[0].address = da; 1983a2917b23SHaiyang Zhang rx_oob->sgl[0].size = rxq->datasize; 1984a2917b23SHaiyang Zhang rx_oob->sgl[0].mem_key = mem_key; 1985a2917b23SHaiyang Zhang 1986a2917b23SHaiyang Zhang return 0; 1987a2917b23SHaiyang Zhang } 1988a2917b23SHaiyang Zhang 1989ca9c54d2SDexuan Cui #define MANA_WQE_HEADER_SIZE 16 1990ca9c54d2SDexuan Cui #define MANA_WQE_SGE_SIZE 16 1991ca9c54d2SDexuan Cui 1992ca9c54d2SDexuan Cui static int mana_alloc_rx_wqe(struct mana_port_context *apc, 1993ca9c54d2SDexuan Cui struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) 1994ca9c54d2SDexuan Cui { 1995ca9c54d2SDexuan Cui struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1996ca9c54d2SDexuan Cui struct mana_recv_buf_oob *rx_oob; 1997ca9c54d2SDexuan Cui struct device *dev = gc->dev; 1998ca9c54d2SDexuan Cui u32 buf_idx; 1999a2917b23SHaiyang Zhang int ret; 2000ca9c54d2SDexuan Cui 20012fbbd712SHaiyang Zhang WARN_ON(rxq->datasize == 0); 2002ca9c54d2SDexuan Cui 2003ca9c54d2SDexuan Cui *rxq_size = 0; 2004ca9c54d2SDexuan Cui *cq_size = 0; 2005ca9c54d2SDexuan Cui 2006ca9c54d2SDexuan Cui for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2007ca9c54d2SDexuan Cui rx_oob = &rxq->rx_oobs[buf_idx]; 2008ca9c54d2SDexuan Cui memset(rx_oob, 0, sizeof(*rx_oob)); 2009ca9c54d2SDexuan Cui 2010ca9c54d2SDexuan Cui rx_oob->num_sge = 1; 2011a2917b23SHaiyang Zhang 2012a2917b23SHaiyang Zhang ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, 2013a2917b23SHaiyang Zhang dev); 2014a2917b23SHaiyang Zhang if (ret) 2015a2917b23SHaiyang Zhang return ret; 2016ca9c54d2SDexuan Cui 2017ca9c54d2SDexuan Cui rx_oob->wqe_req.sgl = rx_oob->sgl; 2018ca9c54d2SDexuan Cui rx_oob->wqe_req.num_sge = rx_oob->num_sge; 2019ca9c54d2SDexuan Cui rx_oob->wqe_req.inline_oob_size = 0; 2020ca9c54d2SDexuan Cui rx_oob->wqe_req.inline_oob_data = NULL; 2021ca9c54d2SDexuan Cui rx_oob->wqe_req.flags = 0; 2022ca9c54d2SDexuan Cui rx_oob->wqe_req.client_data_unit = 0; 2023ca9c54d2SDexuan Cui 2024ca9c54d2SDexuan Cui *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + 2025ca9c54d2SDexuan Cui MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); 2026ca9c54d2SDexuan Cui *cq_size += COMP_ENTRY_SIZE; 2027ca9c54d2SDexuan Cui } 2028ca9c54d2SDexuan Cui 2029ca9c54d2SDexuan Cui return 0; 2030ca9c54d2SDexuan Cui } 2031ca9c54d2SDexuan Cui 2032ca9c54d2SDexuan Cui static int mana_push_wqe(struct mana_rxq *rxq) 2033ca9c54d2SDexuan Cui { 2034ca9c54d2SDexuan Cui struct mana_recv_buf_oob *rx_oob; 2035ca9c54d2SDexuan Cui u32 buf_idx; 2036ca9c54d2SDexuan Cui int err; 2037ca9c54d2SDexuan Cui 2038ca9c54d2SDexuan Cui for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2039ca9c54d2SDexuan Cui rx_oob = &rxq->rx_oobs[buf_idx]; 2040ca9c54d2SDexuan Cui 2041ca9c54d2SDexuan Cui err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, 2042ca9c54d2SDexuan Cui &rx_oob->wqe_inf); 2043ca9c54d2SDexuan Cui if (err) 2044ca9c54d2SDexuan Cui return -ENOSPC; 2045ca9c54d2SDexuan Cui } 2046ca9c54d2SDexuan Cui 2047ca9c54d2SDexuan Cui return 0; 2048ca9c54d2SDexuan Cui } 2049ca9c54d2SDexuan Cui 2050b1d13f7aSHaiyang Zhang static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) 2051b1d13f7aSHaiyang Zhang { 2052b1d13f7aSHaiyang Zhang struct page_pool_params pprm = {}; 2053b1d13f7aSHaiyang Zhang int ret; 2054b1d13f7aSHaiyang Zhang 2055b1d13f7aSHaiyang Zhang pprm.pool_size = RX_BUFFERS_PER_QUEUE; 2056b1d13f7aSHaiyang Zhang pprm.nid = gc->numa_node; 2057b1d13f7aSHaiyang Zhang pprm.napi = &rxq->rx_cq.napi; 2058b1d13f7aSHaiyang Zhang 2059b1d13f7aSHaiyang Zhang rxq->page_pool = page_pool_create(&pprm); 2060b1d13f7aSHaiyang Zhang 2061b1d13f7aSHaiyang Zhang if (IS_ERR(rxq->page_pool)) { 2062b1d13f7aSHaiyang Zhang ret = PTR_ERR(rxq->page_pool); 2063b1d13f7aSHaiyang Zhang rxq->page_pool = NULL; 2064b1d13f7aSHaiyang Zhang return ret; 2065b1d13f7aSHaiyang Zhang } 2066b1d13f7aSHaiyang Zhang 2067b1d13f7aSHaiyang Zhang return 0; 2068b1d13f7aSHaiyang Zhang } 2069b1d13f7aSHaiyang Zhang 2070ca9c54d2SDexuan Cui static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, 2071ca9c54d2SDexuan Cui u32 rxq_idx, struct mana_eq *eq, 2072ca9c54d2SDexuan Cui struct net_device *ndev) 2073ca9c54d2SDexuan Cui { 2074ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 2075ca9c54d2SDexuan Cui struct mana_obj_spec wq_spec; 2076ca9c54d2SDexuan Cui struct mana_obj_spec cq_spec; 2077ca9c54d2SDexuan Cui struct gdma_queue_spec spec; 2078ca9c54d2SDexuan Cui struct mana_cq *cq = NULL; 2079ca9c54d2SDexuan Cui struct gdma_context *gc; 2080ca9c54d2SDexuan Cui u32 cq_size, rq_size; 2081ca9c54d2SDexuan Cui struct mana_rxq *rxq; 2082ca9c54d2SDexuan Cui int err; 2083ca9c54d2SDexuan Cui 2084ca9c54d2SDexuan Cui gc = gd->gdma_context; 2085ca9c54d2SDexuan Cui 2086ea89c862SGustavo A. R. Silva rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE), 2087ca9c54d2SDexuan Cui GFP_KERNEL); 2088ca9c54d2SDexuan Cui if (!rxq) 2089ca9c54d2SDexuan Cui return NULL; 2090ca9c54d2SDexuan Cui 2091ca9c54d2SDexuan Cui rxq->ndev = ndev; 2092ca9c54d2SDexuan Cui rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; 2093ca9c54d2SDexuan Cui rxq->rxq_idx = rxq_idx; 2094ca9c54d2SDexuan Cui rxq->rxobj = INVALID_MANA_HANDLE; 2095ca9c54d2SDexuan Cui 209680f6215bSHaiyang Zhang mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, 209780f6215bSHaiyang Zhang &rxq->headroom); 2098a2917b23SHaiyang Zhang 2099b1d13f7aSHaiyang Zhang /* Create page pool for RX queue */ 2100b1d13f7aSHaiyang Zhang err = mana_create_page_pool(rxq, gc); 2101b1d13f7aSHaiyang Zhang if (err) { 2102b1d13f7aSHaiyang Zhang netdev_err(ndev, "Create page pool err:%d\n", err); 2103b1d13f7aSHaiyang Zhang goto out; 2104b1d13f7aSHaiyang Zhang } 2105b1d13f7aSHaiyang Zhang 2106ca9c54d2SDexuan Cui err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); 2107ca9c54d2SDexuan Cui if (err) 2108ca9c54d2SDexuan Cui goto out; 2109ca9c54d2SDexuan Cui 2110ca9c54d2SDexuan Cui rq_size = PAGE_ALIGN(rq_size); 2111ca9c54d2SDexuan Cui cq_size = PAGE_ALIGN(cq_size); 2112ca9c54d2SDexuan Cui 2113ca9c54d2SDexuan Cui /* Create RQ */ 2114ca9c54d2SDexuan Cui memset(&spec, 0, sizeof(spec)); 2115ca9c54d2SDexuan Cui spec.type = GDMA_RQ; 2116ca9c54d2SDexuan Cui spec.monitor_avl_buf = true; 2117ca9c54d2SDexuan Cui spec.queue_size = rq_size; 2118ca9c54d2SDexuan Cui err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); 2119ca9c54d2SDexuan Cui if (err) 2120ca9c54d2SDexuan Cui goto out; 2121ca9c54d2SDexuan Cui 2122ca9c54d2SDexuan Cui /* Create RQ's CQ */ 2123ca9c54d2SDexuan Cui cq = &rxq->rx_cq; 2124ca9c54d2SDexuan Cui cq->type = MANA_CQ_TYPE_RX; 2125ca9c54d2SDexuan Cui cq->rxq = rxq; 2126ca9c54d2SDexuan Cui 2127ca9c54d2SDexuan Cui memset(&spec, 0, sizeof(spec)); 2128ca9c54d2SDexuan Cui spec.type = GDMA_CQ; 2129ca9c54d2SDexuan Cui spec.monitor_avl_buf = false; 2130ca9c54d2SDexuan Cui spec.queue_size = cq_size; 2131e1b5683fSHaiyang Zhang spec.cq.callback = mana_schedule_napi; 2132ca9c54d2SDexuan Cui spec.cq.parent_eq = eq->eq; 2133ca9c54d2SDexuan Cui spec.cq.context = cq; 2134ca9c54d2SDexuan Cui err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2135ca9c54d2SDexuan Cui if (err) 2136ca9c54d2SDexuan Cui goto out; 2137ca9c54d2SDexuan Cui 2138ca9c54d2SDexuan Cui memset(&wq_spec, 0, sizeof(wq_spec)); 2139ca9c54d2SDexuan Cui memset(&cq_spec, 0, sizeof(cq_spec)); 214028c66cfaSAjay Sharma wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; 2141ca9c54d2SDexuan Cui wq_spec.queue_size = rxq->gdma_rq->queue_size; 2142ca9c54d2SDexuan Cui 214328c66cfaSAjay Sharma cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2144ca9c54d2SDexuan Cui cq_spec.queue_size = cq->gdma_cq->queue_size; 2145ca9c54d2SDexuan Cui cq_spec.modr_ctx_id = 0; 2146ca9c54d2SDexuan Cui cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2147ca9c54d2SDexuan Cui 2148ca9c54d2SDexuan Cui err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, 2149ca9c54d2SDexuan Cui &wq_spec, &cq_spec, &rxq->rxobj); 2150ca9c54d2SDexuan Cui if (err) 2151ca9c54d2SDexuan Cui goto out; 2152ca9c54d2SDexuan Cui 2153ca9c54d2SDexuan Cui rxq->gdma_rq->id = wq_spec.queue_index; 2154ca9c54d2SDexuan Cui cq->gdma_cq->id = cq_spec.queue_index; 2155ca9c54d2SDexuan Cui 215628c66cfaSAjay Sharma rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 215728c66cfaSAjay Sharma cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2158ca9c54d2SDexuan Cui 2159ca9c54d2SDexuan Cui rxq->gdma_id = rxq->gdma_rq->id; 2160ca9c54d2SDexuan Cui cq->gdma_id = cq->gdma_cq->id; 2161ca9c54d2SDexuan Cui 2162ca9c54d2SDexuan Cui err = mana_push_wqe(rxq); 2163ca9c54d2SDexuan Cui if (err) 2164ca9c54d2SDexuan Cui goto out; 2165ca9c54d2SDexuan Cui 2166be049936SHaiyang Zhang if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 2167be049936SHaiyang Zhang err = -EINVAL; 2168ca9c54d2SDexuan Cui goto out; 2169be049936SHaiyang Zhang } 2170ca9c54d2SDexuan Cui 2171ca9c54d2SDexuan Cui gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2172ca9c54d2SDexuan Cui 2173b707b89fSJakub Kicinski netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); 2174ed5356b5SHaiyang Zhang 2175ed5356b5SHaiyang Zhang WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, 2176ed5356b5SHaiyang Zhang cq->napi.napi_id)); 2177b1d13f7aSHaiyang Zhang WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 2178b1d13f7aSHaiyang Zhang rxq->page_pool)); 2179ed5356b5SHaiyang Zhang 2180e1b5683fSHaiyang Zhang napi_enable(&cq->napi); 2181e1b5683fSHaiyang Zhang 2182e1b5683fSHaiyang Zhang mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2183ca9c54d2SDexuan Cui out: 2184ca9c54d2SDexuan Cui if (!err) 2185ca9c54d2SDexuan Cui return rxq; 2186ca9c54d2SDexuan Cui 2187ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); 2188ca9c54d2SDexuan Cui 2189ca9c54d2SDexuan Cui mana_destroy_rxq(apc, rxq, false); 2190ca9c54d2SDexuan Cui 2191ca9c54d2SDexuan Cui if (cq) 2192ca9c54d2SDexuan Cui mana_deinit_cq(apc, cq); 2193ca9c54d2SDexuan Cui 2194ca9c54d2SDexuan Cui return NULL; 2195ca9c54d2SDexuan Cui } 2196ca9c54d2SDexuan Cui 2197ca9c54d2SDexuan Cui static int mana_add_rx_queues(struct mana_port_context *apc, 2198ca9c54d2SDexuan Cui struct net_device *ndev) 2199ca9c54d2SDexuan Cui { 22001e2d0824SHaiyang Zhang struct mana_context *ac = apc->ac; 2201ca9c54d2SDexuan Cui struct mana_rxq *rxq; 2202ca9c54d2SDexuan Cui int err = 0; 2203ca9c54d2SDexuan Cui int i; 2204ca9c54d2SDexuan Cui 2205ca9c54d2SDexuan Cui for (i = 0; i < apc->num_queues; i++) { 22061e2d0824SHaiyang Zhang rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); 2207ca9c54d2SDexuan Cui if (!rxq) { 2208ca9c54d2SDexuan Cui err = -ENOMEM; 2209ca9c54d2SDexuan Cui goto out; 2210ca9c54d2SDexuan Cui } 2211ca9c54d2SDexuan Cui 2212ca9c54d2SDexuan Cui u64_stats_init(&rxq->stats.syncp); 2213ca9c54d2SDexuan Cui 2214ca9c54d2SDexuan Cui apc->rxqs[i] = rxq; 2215ca9c54d2SDexuan Cui } 2216ca9c54d2SDexuan Cui 2217ca9c54d2SDexuan Cui apc->default_rxobj = apc->rxqs[0]->rxobj; 2218ca9c54d2SDexuan Cui out: 2219ca9c54d2SDexuan Cui return err; 2220ca9c54d2SDexuan Cui } 2221ca9c54d2SDexuan Cui 2222ca9c54d2SDexuan Cui static void mana_destroy_vport(struct mana_port_context *apc) 2223ca9c54d2SDexuan Cui { 22241566e7d6SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 2225ca9c54d2SDexuan Cui struct mana_rxq *rxq; 2226ca9c54d2SDexuan Cui u32 rxq_idx; 2227ca9c54d2SDexuan Cui 2228ca9c54d2SDexuan Cui for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 2229ca9c54d2SDexuan Cui rxq = apc->rxqs[rxq_idx]; 2230ca9c54d2SDexuan Cui if (!rxq) 2231ca9c54d2SDexuan Cui continue; 2232ca9c54d2SDexuan Cui 2233ca9c54d2SDexuan Cui mana_destroy_rxq(apc, rxq, true); 2234ca9c54d2SDexuan Cui apc->rxqs[rxq_idx] = NULL; 2235ca9c54d2SDexuan Cui } 2236ca9c54d2SDexuan Cui 2237ca9c54d2SDexuan Cui mana_destroy_txq(apc); 2238b5c1c985SLong Li mana_uncfg_vport(apc); 22391566e7d6SDexuan Cui 22401566e7d6SDexuan Cui if (gd->gdma_context->is_pf) 22411566e7d6SDexuan Cui mana_pf_deregister_hw_vport(apc); 2242ca9c54d2SDexuan Cui } 2243ca9c54d2SDexuan Cui 2244ca9c54d2SDexuan Cui static int mana_create_vport(struct mana_port_context *apc, 2245ca9c54d2SDexuan Cui struct net_device *net) 2246ca9c54d2SDexuan Cui { 2247ca9c54d2SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 2248ca9c54d2SDexuan Cui int err; 2249ca9c54d2SDexuan Cui 2250ca9c54d2SDexuan Cui apc->default_rxobj = INVALID_MANA_HANDLE; 2251ca9c54d2SDexuan Cui 22521566e7d6SDexuan Cui if (gd->gdma_context->is_pf) { 22531566e7d6SDexuan Cui err = mana_pf_register_hw_vport(apc); 22541566e7d6SDexuan Cui if (err) 22551566e7d6SDexuan Cui return err; 22561566e7d6SDexuan Cui } 22571566e7d6SDexuan Cui 2258ca9c54d2SDexuan Cui err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); 2259ca9c54d2SDexuan Cui if (err) 2260ca9c54d2SDexuan Cui return err; 2261ca9c54d2SDexuan Cui 2262ca9c54d2SDexuan Cui return mana_create_txq(apc, net); 2263ca9c54d2SDexuan Cui } 2264ca9c54d2SDexuan Cui 2265ca9c54d2SDexuan Cui static void mana_rss_table_init(struct mana_port_context *apc) 2266ca9c54d2SDexuan Cui { 2267ca9c54d2SDexuan Cui int i; 2268ca9c54d2SDexuan Cui 2269ca9c54d2SDexuan Cui for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 2270ca9c54d2SDexuan Cui apc->indir_table[i] = 2271ca9c54d2SDexuan Cui ethtool_rxfh_indir_default(i, apc->num_queues); 2272ca9c54d2SDexuan Cui } 2273ca9c54d2SDexuan Cui 2274ca9c54d2SDexuan Cui int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, 2275ca9c54d2SDexuan Cui bool update_hash, bool update_tab) 2276ca9c54d2SDexuan Cui { 2277ca9c54d2SDexuan Cui u32 queue_idx; 22786cc74443SDexuan Cui int err; 2279ca9c54d2SDexuan Cui int i; 2280ca9c54d2SDexuan Cui 2281ca9c54d2SDexuan Cui if (update_tab) { 2282ca9c54d2SDexuan Cui for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { 2283ca9c54d2SDexuan Cui queue_idx = apc->indir_table[i]; 2284ca9c54d2SDexuan Cui apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; 2285ca9c54d2SDexuan Cui } 2286ca9c54d2SDexuan Cui } 2287ca9c54d2SDexuan Cui 22886cc74443SDexuan Cui err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); 22896cc74443SDexuan Cui if (err) 22906cc74443SDexuan Cui return err; 22916cc74443SDexuan Cui 22926cc74443SDexuan Cui mana_fence_rqs(apc); 22936cc74443SDexuan Cui 22946cc74443SDexuan Cui return 0; 2295ca9c54d2SDexuan Cui } 2296ca9c54d2SDexuan Cui 2297ca9c54d2SDexuan Cui static int mana_init_port(struct net_device *ndev) 2298ca9c54d2SDexuan Cui { 2299ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 2300ca9c54d2SDexuan Cui u32 max_txq, max_rxq, max_queues; 2301ca9c54d2SDexuan Cui int port_idx = apc->port_idx; 2302ca9c54d2SDexuan Cui u32 num_indirect_entries; 2303ca9c54d2SDexuan Cui int err; 2304ca9c54d2SDexuan Cui 2305ca9c54d2SDexuan Cui err = mana_init_port_context(apc); 2306ca9c54d2SDexuan Cui if (err) 2307ca9c54d2SDexuan Cui return err; 2308ca9c54d2SDexuan Cui 2309ca9c54d2SDexuan Cui err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, 2310ca9c54d2SDexuan Cui &num_indirect_entries); 2311ca9c54d2SDexuan Cui if (err) { 23126c7ea696SDexuan Cui netdev_err(ndev, "Failed to query info for vPort %d\n", 23136c7ea696SDexuan Cui port_idx); 2314ca9c54d2SDexuan Cui goto reset_apc; 2315ca9c54d2SDexuan Cui } 2316ca9c54d2SDexuan Cui 2317ca9c54d2SDexuan Cui max_queues = min_t(u32, max_txq, max_rxq); 2318ca9c54d2SDexuan Cui if (apc->max_queues > max_queues) 2319ca9c54d2SDexuan Cui apc->max_queues = max_queues; 2320ca9c54d2SDexuan Cui 2321ca9c54d2SDexuan Cui if (apc->num_queues > apc->max_queues) 2322ca9c54d2SDexuan Cui apc->num_queues = apc->max_queues; 2323ca9c54d2SDexuan Cui 2324f3956ebbSJakub Kicinski eth_hw_addr_set(ndev, apc->mac_addr); 2325ca9c54d2SDexuan Cui 2326ca9c54d2SDexuan Cui return 0; 2327ca9c54d2SDexuan Cui 2328ca9c54d2SDexuan Cui reset_apc: 2329ca9c54d2SDexuan Cui kfree(apc->rxqs); 2330ca9c54d2SDexuan Cui apc->rxqs = NULL; 2331ca9c54d2SDexuan Cui return err; 2332ca9c54d2SDexuan Cui } 2333ca9c54d2SDexuan Cui 2334ca9c54d2SDexuan Cui int mana_alloc_queues(struct net_device *ndev) 2335ca9c54d2SDexuan Cui { 2336ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 23371566e7d6SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 2338ca9c54d2SDexuan Cui int err; 2339ca9c54d2SDexuan Cui 2340ca9c54d2SDexuan Cui err = mana_create_vport(apc, ndev); 2341ca9c54d2SDexuan Cui if (err) 23421e2d0824SHaiyang Zhang return err; 2343ca9c54d2SDexuan Cui 2344ca9c54d2SDexuan Cui err = netif_set_real_num_tx_queues(ndev, apc->num_queues); 2345ca9c54d2SDexuan Cui if (err) 2346ca9c54d2SDexuan Cui goto destroy_vport; 2347ca9c54d2SDexuan Cui 2348ca9c54d2SDexuan Cui err = mana_add_rx_queues(apc, ndev); 2349ca9c54d2SDexuan Cui if (err) 2350ca9c54d2SDexuan Cui goto destroy_vport; 2351ca9c54d2SDexuan Cui 2352ca9c54d2SDexuan Cui apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; 2353ca9c54d2SDexuan Cui 2354ca9c54d2SDexuan Cui err = netif_set_real_num_rx_queues(ndev, apc->num_queues); 2355ca9c54d2SDexuan Cui if (err) 2356ca9c54d2SDexuan Cui goto destroy_vport; 2357ca9c54d2SDexuan Cui 2358ca9c54d2SDexuan Cui mana_rss_table_init(apc); 2359ca9c54d2SDexuan Cui 2360ca9c54d2SDexuan Cui err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); 2361ca9c54d2SDexuan Cui if (err) 2362ca9c54d2SDexuan Cui goto destroy_vport; 2363ca9c54d2SDexuan Cui 23641566e7d6SDexuan Cui if (gd->gdma_context->is_pf) { 23651566e7d6SDexuan Cui err = mana_pf_register_filter(apc); 23661566e7d6SDexuan Cui if (err) 23671566e7d6SDexuan Cui goto destroy_vport; 23681566e7d6SDexuan Cui } 23691566e7d6SDexuan Cui 2370ed5356b5SHaiyang Zhang mana_chn_setxdp(apc, mana_xdp_get(apc)); 2371ed5356b5SHaiyang Zhang 2372ca9c54d2SDexuan Cui return 0; 2373ca9c54d2SDexuan Cui 2374ca9c54d2SDexuan Cui destroy_vport: 2375ca9c54d2SDexuan Cui mana_destroy_vport(apc); 2376ca9c54d2SDexuan Cui return err; 2377ca9c54d2SDexuan Cui } 2378ca9c54d2SDexuan Cui 2379ca9c54d2SDexuan Cui int mana_attach(struct net_device *ndev) 2380ca9c54d2SDexuan Cui { 2381ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 2382ca9c54d2SDexuan Cui int err; 2383ca9c54d2SDexuan Cui 2384ca9c54d2SDexuan Cui ASSERT_RTNL(); 2385ca9c54d2SDexuan Cui 2386ca9c54d2SDexuan Cui err = mana_init_port(ndev); 2387ca9c54d2SDexuan Cui if (err) 2388ca9c54d2SDexuan Cui return err; 2389ca9c54d2SDexuan Cui 2390a137c069SHaiyang Zhang if (apc->port_st_save) { 2391ca9c54d2SDexuan Cui err = mana_alloc_queues(ndev); 2392ca9c54d2SDexuan Cui if (err) { 2393a137c069SHaiyang Zhang mana_cleanup_port_context(apc); 2394ca9c54d2SDexuan Cui return err; 2395ca9c54d2SDexuan Cui } 2396a137c069SHaiyang Zhang } 2397ca9c54d2SDexuan Cui 2398ca9c54d2SDexuan Cui apc->port_is_up = apc->port_st_save; 2399ca9c54d2SDexuan Cui 2400ca9c54d2SDexuan Cui /* Ensure port state updated before txq state */ 2401ca9c54d2SDexuan Cui smp_wmb(); 2402ca9c54d2SDexuan Cui 2403a137c069SHaiyang Zhang if (apc->port_is_up) 2404ca9c54d2SDexuan Cui netif_carrier_on(ndev); 2405a137c069SHaiyang Zhang 2406a137c069SHaiyang Zhang netif_device_attach(ndev); 2407ca9c54d2SDexuan Cui 2408ca9c54d2SDexuan Cui return 0; 2409ca9c54d2SDexuan Cui } 2410ca9c54d2SDexuan Cui 2411ca9c54d2SDexuan Cui static int mana_dealloc_queues(struct net_device *ndev) 2412ca9c54d2SDexuan Cui { 2413ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 24141566e7d6SDexuan Cui struct gdma_dev *gd = apc->ac->gdma_dev; 2415ca9c54d2SDexuan Cui struct mana_txq *txq; 2416ca9c54d2SDexuan Cui int i, err; 2417ca9c54d2SDexuan Cui 2418ca9c54d2SDexuan Cui if (apc->port_is_up) 2419ca9c54d2SDexuan Cui return -EINVAL; 2420ca9c54d2SDexuan Cui 2421ed5356b5SHaiyang Zhang mana_chn_setxdp(apc, NULL); 2422ed5356b5SHaiyang Zhang 24231566e7d6SDexuan Cui if (gd->gdma_context->is_pf) 24241566e7d6SDexuan Cui mana_pf_deregister_filter(apc); 24251566e7d6SDexuan Cui 2426ca9c54d2SDexuan Cui /* No packet can be transmitted now since apc->port_is_up is false. 2427ca9c54d2SDexuan Cui * There is still a tiny chance that mana_poll_tx_cq() can re-enable 2428ca9c54d2SDexuan Cui * a txq because it may not timely see apc->port_is_up being cleared 2429ca9c54d2SDexuan Cui * to false, but it doesn't matter since mana_start_xmit() drops any 2430ca9c54d2SDexuan Cui * new packets due to apc->port_is_up being false. 2431ca9c54d2SDexuan Cui * 2432ca9c54d2SDexuan Cui * Drain all the in-flight TX packets 2433ca9c54d2SDexuan Cui */ 2434ca9c54d2SDexuan Cui for (i = 0; i < apc->num_queues; i++) { 2435ca9c54d2SDexuan Cui txq = &apc->tx_qp[i].txq; 2436ca9c54d2SDexuan Cui 2437ca9c54d2SDexuan Cui while (atomic_read(&txq->pending_sends) > 0) 2438ca9c54d2SDexuan Cui usleep_range(1000, 2000); 2439ca9c54d2SDexuan Cui } 2440ca9c54d2SDexuan Cui 2441ca9c54d2SDexuan Cui /* We're 100% sure the queues can no longer be woken up, because 2442ca9c54d2SDexuan Cui * we're sure now mana_poll_tx_cq() can't be running. 2443ca9c54d2SDexuan Cui */ 2444ca9c54d2SDexuan Cui 2445ca9c54d2SDexuan Cui apc->rss_state = TRI_STATE_FALSE; 2446ca9c54d2SDexuan Cui err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); 2447ca9c54d2SDexuan Cui if (err) { 2448ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to disable vPort: %d\n", err); 2449ca9c54d2SDexuan Cui return err; 2450ca9c54d2SDexuan Cui } 2451ca9c54d2SDexuan Cui 2452ca9c54d2SDexuan Cui mana_destroy_vport(apc); 2453ca9c54d2SDexuan Cui 2454ca9c54d2SDexuan Cui return 0; 2455ca9c54d2SDexuan Cui } 2456ca9c54d2SDexuan Cui 2457ca9c54d2SDexuan Cui int mana_detach(struct net_device *ndev, bool from_close) 2458ca9c54d2SDexuan Cui { 2459ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev); 2460ca9c54d2SDexuan Cui int err; 2461ca9c54d2SDexuan Cui 2462ca9c54d2SDexuan Cui ASSERT_RTNL(); 2463ca9c54d2SDexuan Cui 2464ca9c54d2SDexuan Cui apc->port_st_save = apc->port_is_up; 2465ca9c54d2SDexuan Cui apc->port_is_up = false; 2466ca9c54d2SDexuan Cui 2467ca9c54d2SDexuan Cui /* Ensure port state updated before txq state */ 2468ca9c54d2SDexuan Cui smp_wmb(); 2469ca9c54d2SDexuan Cui 2470ca9c54d2SDexuan Cui netif_tx_disable(ndev); 2471ca9c54d2SDexuan Cui netif_carrier_off(ndev); 2472ca9c54d2SDexuan Cui 2473ca9c54d2SDexuan Cui if (apc->port_st_save) { 2474ca9c54d2SDexuan Cui err = mana_dealloc_queues(ndev); 2475ca9c54d2SDexuan Cui if (err) 2476ca9c54d2SDexuan Cui return err; 2477ca9c54d2SDexuan Cui } 2478ca9c54d2SDexuan Cui 2479ca9c54d2SDexuan Cui if (!from_close) { 2480ca9c54d2SDexuan Cui netif_device_detach(ndev); 2481ca9c54d2SDexuan Cui mana_cleanup_port_context(apc); 2482ca9c54d2SDexuan Cui } 2483ca9c54d2SDexuan Cui 2484ca9c54d2SDexuan Cui return 0; 2485ca9c54d2SDexuan Cui } 2486ca9c54d2SDexuan Cui 2487ca9c54d2SDexuan Cui static int mana_probe_port(struct mana_context *ac, int port_idx, 2488ca9c54d2SDexuan Cui struct net_device **ndev_storage) 2489ca9c54d2SDexuan Cui { 2490ca9c54d2SDexuan Cui struct gdma_context *gc = ac->gdma_dev->gdma_context; 2491ca9c54d2SDexuan Cui struct mana_port_context *apc; 2492ca9c54d2SDexuan Cui struct net_device *ndev; 2493ca9c54d2SDexuan Cui int err; 2494ca9c54d2SDexuan Cui 2495ca9c54d2SDexuan Cui ndev = alloc_etherdev_mq(sizeof(struct mana_port_context), 2496ca9c54d2SDexuan Cui gc->max_num_queues); 2497ca9c54d2SDexuan Cui if (!ndev) 2498ca9c54d2SDexuan Cui return -ENOMEM; 2499ca9c54d2SDexuan Cui 2500ca9c54d2SDexuan Cui *ndev_storage = ndev; 2501ca9c54d2SDexuan Cui 2502ca9c54d2SDexuan Cui apc = netdev_priv(ndev); 2503ca9c54d2SDexuan Cui apc->ac = ac; 2504ca9c54d2SDexuan Cui apc->ndev = ndev; 2505ca9c54d2SDexuan Cui apc->max_queues = gc->max_num_queues; 25061e2d0824SHaiyang Zhang apc->num_queues = gc->max_num_queues; 2507ca9c54d2SDexuan Cui apc->port_handle = INVALID_MANA_HANDLE; 25081566e7d6SDexuan Cui apc->pf_filter_handle = INVALID_MANA_HANDLE; 2509ca9c54d2SDexuan Cui apc->port_idx = port_idx; 2510ca9c54d2SDexuan Cui 2511b5c1c985SLong Li mutex_init(&apc->vport_mutex); 2512b5c1c985SLong Li apc->vport_use_count = 0; 2513b5c1c985SLong Li 2514ca9c54d2SDexuan Cui ndev->netdev_ops = &mana_devops; 2515ca9c54d2SDexuan Cui ndev->ethtool_ops = &mana_ethtool_ops; 2516ca9c54d2SDexuan Cui ndev->mtu = ETH_DATA_LEN; 251780f6215bSHaiyang Zhang ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; 251880f6215bSHaiyang Zhang ndev->min_mtu = ETH_MIN_MTU; 2519ca9c54d2SDexuan Cui ndev->needed_headroom = MANA_HEADROOM; 2520d44089e5SLong Li ndev->dev_port = port_idx; 2521ca9c54d2SDexuan Cui SET_NETDEV_DEV(ndev, gc->dev); 2522ca9c54d2SDexuan Cui 2523ca9c54d2SDexuan Cui netif_carrier_off(ndev); 2524ca9c54d2SDexuan Cui 2525ca9c54d2SDexuan Cui netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); 2526ca9c54d2SDexuan Cui 2527ca9c54d2SDexuan Cui err = mana_init_port(ndev); 2528ca9c54d2SDexuan Cui if (err) 2529ca9c54d2SDexuan Cui goto free_net; 2530ca9c54d2SDexuan Cui 2531ca9c54d2SDexuan Cui netdev_lockdep_set_classes(ndev); 2532ca9c54d2SDexuan Cui 2533ca9c54d2SDexuan Cui ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2534ca9c54d2SDexuan Cui ndev->hw_features |= NETIF_F_RXCSUM; 2535ca9c54d2SDexuan Cui ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 2536ca9c54d2SDexuan Cui ndev->hw_features |= NETIF_F_RXHASH; 2537b803d1fdSHaiyang Zhang ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | 2538b803d1fdSHaiyang Zhang NETIF_F_HW_VLAN_CTAG_RX; 2539b803d1fdSHaiyang Zhang ndev->vlan_features = ndev->features; 254066c0e13aSMarek Majtyka ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 254166c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT; 2542ca9c54d2SDexuan Cui 2543ca9c54d2SDexuan Cui err = register_netdev(ndev); 2544ca9c54d2SDexuan Cui if (err) { 2545ca9c54d2SDexuan Cui netdev_err(ndev, "Unable to register netdev.\n"); 2546ca9c54d2SDexuan Cui goto reset_apc; 2547ca9c54d2SDexuan Cui } 2548ca9c54d2SDexuan Cui 2549ca9c54d2SDexuan Cui return 0; 2550ca9c54d2SDexuan Cui 2551ca9c54d2SDexuan Cui reset_apc: 2552ca9c54d2SDexuan Cui kfree(apc->rxqs); 2553ca9c54d2SDexuan Cui apc->rxqs = NULL; 2554ca9c54d2SDexuan Cui free_net: 2555ca9c54d2SDexuan Cui *ndev_storage = NULL; 2556ca9c54d2SDexuan Cui netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); 2557ca9c54d2SDexuan Cui free_netdev(ndev); 2558ca9c54d2SDexuan Cui return err; 2559ca9c54d2SDexuan Cui } 2560ca9c54d2SDexuan Cui 2561a69839d4SLong Li static void adev_release(struct device *dev) 2562a69839d4SLong Li { 2563a69839d4SLong Li struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev); 2564a69839d4SLong Li 2565a69839d4SLong Li kfree(madev); 2566a69839d4SLong Li } 2567a69839d4SLong Li 2568a69839d4SLong Li static void remove_adev(struct gdma_dev *gd) 2569a69839d4SLong Li { 2570a69839d4SLong Li struct auxiliary_device *adev = gd->adev; 2571a69839d4SLong Li int id = adev->id; 2572a69839d4SLong Li 2573a69839d4SLong Li auxiliary_device_delete(adev); 2574a69839d4SLong Li auxiliary_device_uninit(adev); 2575a69839d4SLong Li 2576a69839d4SLong Li mana_adev_idx_free(id); 2577a69839d4SLong Li gd->adev = NULL; 2578a69839d4SLong Li } 2579a69839d4SLong Li 2580a69839d4SLong Li static int add_adev(struct gdma_dev *gd) 2581a69839d4SLong Li { 2582a69839d4SLong Li struct auxiliary_device *adev; 2583a69839d4SLong Li struct mana_adev *madev; 2584a69839d4SLong Li int ret; 2585a69839d4SLong Li 2586a69839d4SLong Li madev = kzalloc(sizeof(*madev), GFP_KERNEL); 2587a69839d4SLong Li if (!madev) 2588a69839d4SLong Li return -ENOMEM; 2589a69839d4SLong Li 2590a69839d4SLong Li adev = &madev->adev; 2591a69839d4SLong Li ret = mana_adev_idx_alloc(); 2592a69839d4SLong Li if (ret < 0) 2593a69839d4SLong Li goto idx_fail; 2594a69839d4SLong Li adev->id = ret; 2595a69839d4SLong Li 2596a69839d4SLong Li adev->name = "rdma"; 2597a69839d4SLong Li adev->dev.parent = gd->gdma_context->dev; 2598a69839d4SLong Li adev->dev.release = adev_release; 2599a69839d4SLong Li madev->mdev = gd; 2600a69839d4SLong Li 2601a69839d4SLong Li ret = auxiliary_device_init(adev); 2602a69839d4SLong Li if (ret) 2603a69839d4SLong Li goto init_fail; 2604a69839d4SLong Li 2605a69839d4SLong Li ret = auxiliary_device_add(adev); 2606a69839d4SLong Li if (ret) 2607a69839d4SLong Li goto add_fail; 2608a69839d4SLong Li 2609a69839d4SLong Li gd->adev = adev; 2610a69839d4SLong Li return 0; 2611a69839d4SLong Li 2612a69839d4SLong Li add_fail: 2613a69839d4SLong Li auxiliary_device_uninit(adev); 2614a69839d4SLong Li 2615a69839d4SLong Li init_fail: 2616a69839d4SLong Li mana_adev_idx_free(adev->id); 2617a69839d4SLong Li 2618a69839d4SLong Li idx_fail: 2619a69839d4SLong Li kfree(madev); 2620a69839d4SLong Li 2621a69839d4SLong Li return ret; 2622a69839d4SLong Li } 2623a69839d4SLong Li 2624635096a8SDexuan Cui int mana_probe(struct gdma_dev *gd, bool resuming) 2625ca9c54d2SDexuan Cui { 2626ca9c54d2SDexuan Cui struct gdma_context *gc = gd->gdma_context; 2627635096a8SDexuan Cui struct mana_context *ac = gd->driver_data; 2628ca9c54d2SDexuan Cui struct device *dev = gc->dev; 2629635096a8SDexuan Cui u16 num_ports = 0; 2630ca9c54d2SDexuan Cui int err; 2631ca9c54d2SDexuan Cui int i; 2632ca9c54d2SDexuan Cui 2633ca9c54d2SDexuan Cui dev_info(dev, 2634ca9c54d2SDexuan Cui "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n", 2635ca9c54d2SDexuan Cui MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); 2636ca9c54d2SDexuan Cui 2637ca9c54d2SDexuan Cui err = mana_gd_register_device(gd); 2638ca9c54d2SDexuan Cui if (err) 2639ca9c54d2SDexuan Cui return err; 2640ca9c54d2SDexuan Cui 2641635096a8SDexuan Cui if (!resuming) { 2642ca9c54d2SDexuan Cui ac = kzalloc(sizeof(*ac), GFP_KERNEL); 2643ca9c54d2SDexuan Cui if (!ac) 2644ca9c54d2SDexuan Cui return -ENOMEM; 2645ca9c54d2SDexuan Cui 2646ca9c54d2SDexuan Cui ac->gdma_dev = gd; 2647ca9c54d2SDexuan Cui gd->driver_data = ac; 2648635096a8SDexuan Cui } 2649ca9c54d2SDexuan Cui 26501e2d0824SHaiyang Zhang err = mana_create_eq(ac); 26511e2d0824SHaiyang Zhang if (err) 26521e2d0824SHaiyang Zhang goto out; 26531e2d0824SHaiyang Zhang 2654ca9c54d2SDexuan Cui err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, 2655635096a8SDexuan Cui MANA_MICRO_VERSION, &num_ports); 2656ca9c54d2SDexuan Cui if (err) 2657ca9c54d2SDexuan Cui goto out; 2658ca9c54d2SDexuan Cui 2659635096a8SDexuan Cui if (!resuming) { 2660635096a8SDexuan Cui ac->num_ports = num_ports; 2661635096a8SDexuan Cui } else { 2662635096a8SDexuan Cui if (ac->num_ports != num_ports) { 2663635096a8SDexuan Cui dev_err(dev, "The number of vPorts changed: %d->%d\n", 2664635096a8SDexuan Cui ac->num_ports, num_ports); 2665635096a8SDexuan Cui err = -EPROTO; 2666635096a8SDexuan Cui goto out; 2667635096a8SDexuan Cui } 2668635096a8SDexuan Cui } 2669635096a8SDexuan Cui 2670635096a8SDexuan Cui if (ac->num_ports == 0) 2671635096a8SDexuan Cui dev_err(dev, "Failed to detect any vPort\n"); 2672635096a8SDexuan Cui 2673ca9c54d2SDexuan Cui if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) 2674ca9c54d2SDexuan Cui ac->num_ports = MAX_PORTS_IN_MANA_DEV; 2675ca9c54d2SDexuan Cui 2676635096a8SDexuan Cui if (!resuming) { 2677ca9c54d2SDexuan Cui for (i = 0; i < ac->num_ports; i++) { 2678ca9c54d2SDexuan Cui err = mana_probe_port(ac, i, &ac->ports[i]); 2679ca9c54d2SDexuan Cui if (err) 2680ca9c54d2SDexuan Cui break; 2681ca9c54d2SDexuan Cui } 2682635096a8SDexuan Cui } else { 2683635096a8SDexuan Cui for (i = 0; i < ac->num_ports; i++) { 2684635096a8SDexuan Cui rtnl_lock(); 2685635096a8SDexuan Cui err = mana_attach(ac->ports[i]); 2686635096a8SDexuan Cui rtnl_unlock(); 2687635096a8SDexuan Cui if (err) 2688635096a8SDexuan Cui break; 2689635096a8SDexuan Cui } 2690635096a8SDexuan Cui } 2691a69839d4SLong Li 2692a69839d4SLong Li err = add_adev(gd); 2693ca9c54d2SDexuan Cui out: 2694ca9c54d2SDexuan Cui if (err) 2695635096a8SDexuan Cui mana_remove(gd, false); 2696ca9c54d2SDexuan Cui 2697ca9c54d2SDexuan Cui return err; 2698ca9c54d2SDexuan Cui } 2699ca9c54d2SDexuan Cui 2700635096a8SDexuan Cui void mana_remove(struct gdma_dev *gd, bool suspending) 2701ca9c54d2SDexuan Cui { 2702ca9c54d2SDexuan Cui struct gdma_context *gc = gd->gdma_context; 2703ca9c54d2SDexuan Cui struct mana_context *ac = gd->driver_data; 2704ca9c54d2SDexuan Cui struct device *dev = gc->dev; 2705ca9c54d2SDexuan Cui struct net_device *ndev; 2706635096a8SDexuan Cui int err; 2707ca9c54d2SDexuan Cui int i; 2708ca9c54d2SDexuan Cui 2709a69839d4SLong Li /* adev currently doesn't support suspending, always remove it */ 2710a69839d4SLong Li if (gd->adev) 2711a69839d4SLong Li remove_adev(gd); 2712a69839d4SLong Li 2713ca9c54d2SDexuan Cui for (i = 0; i < ac->num_ports; i++) { 2714ca9c54d2SDexuan Cui ndev = ac->ports[i]; 2715ca9c54d2SDexuan Cui if (!ndev) { 2716ca9c54d2SDexuan Cui if (i == 0) 2717ca9c54d2SDexuan Cui dev_err(dev, "No net device to remove\n"); 2718ca9c54d2SDexuan Cui goto out; 2719ca9c54d2SDexuan Cui } 2720ca9c54d2SDexuan Cui 2721ca9c54d2SDexuan Cui /* All cleanup actions should stay after rtnl_lock(), otherwise 2722ca9c54d2SDexuan Cui * other functions may access partially cleaned up data. 2723ca9c54d2SDexuan Cui */ 2724ca9c54d2SDexuan Cui rtnl_lock(); 2725ca9c54d2SDexuan Cui 2726635096a8SDexuan Cui err = mana_detach(ndev, false); 2727635096a8SDexuan Cui if (err) 2728635096a8SDexuan Cui netdev_err(ndev, "Failed to detach vPort %d: %d\n", 2729635096a8SDexuan Cui i, err); 2730635096a8SDexuan Cui 2731635096a8SDexuan Cui if (suspending) { 2732635096a8SDexuan Cui /* No need to unregister the ndev. */ 2733635096a8SDexuan Cui rtnl_unlock(); 2734635096a8SDexuan Cui continue; 2735635096a8SDexuan Cui } 2736ca9c54d2SDexuan Cui 2737ca9c54d2SDexuan Cui unregister_netdevice(ndev); 2738ca9c54d2SDexuan Cui 2739ca9c54d2SDexuan Cui rtnl_unlock(); 2740ca9c54d2SDexuan Cui 2741ca9c54d2SDexuan Cui free_netdev(ndev); 2742ca9c54d2SDexuan Cui } 27431e2d0824SHaiyang Zhang 27441e2d0824SHaiyang Zhang mana_destroy_eq(ac); 2745ca9c54d2SDexuan Cui out: 2746ca9c54d2SDexuan Cui mana_gd_deregister_device(gd); 2747635096a8SDexuan Cui 2748635096a8SDexuan Cui if (suspending) 2749635096a8SDexuan Cui return; 2750635096a8SDexuan Cui 2751ca9c54d2SDexuan Cui gd->driver_data = NULL; 2752ca9c54d2SDexuan Cui gd->gdma_context = NULL; 2753ca9c54d2SDexuan Cui kfree(ac); 2754ca9c54d2SDexuan Cui } 2755