Lines Matching +full:num +full:- +full:rxq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
44 apc->port_is_up = true; in mana_open()
59 if (!apc->port_is_up) in mana_close()
72 if (skb->protocol == htons(ETH_P_IP)) { in mana_checksum_info()
75 if (ip->protocol == IPPROTO_TCP) in mana_checksum_info()
78 if (ip->protocol == IPPROTO_UDP) in mana_checksum_info()
80 } else if (skb->protocol == htons(ETH_P_IPV6)) { in mana_checksum_info()
83 if (ip6->nexthdr == IPPROTO_TCP) in mana_checksum_info()
86 if (ip6->nexthdr == IPPROTO_UDP) in mana_checksum_info()
97 ash->dma_handle[sg_i] = da; in mana_add_sge()
98 ash->size[sg_i] = sge_len; in mana_add_sge()
100 tp->wqe_req.sgl[sg_i].address = da; in mana_add_sge()
101 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; in mana_add_sge()
102 tp->wqe_req.sgl[sg_i].size = sge_len; in mana_add_sge()
108 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_map_skb()
109 int hsg = 1; /* num of SGEs of linear part */ in mana_map_skb()
110 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
120 gc = gd->gdma_context; in mana_map_skb()
121 dev = gc->dev; in mana_map_skb()
125 sge1_len = skb_hlen - gso_hs; in mana_map_skb()
130 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); in mana_map_skb()
132 return -ENOMEM; in mana_map_skb()
134 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); in mana_map_skb()
138 da = dma_map_single(dev, skb->data + sge0_len, sge1_len, in mana_map_skb()
143 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); in mana_map_skb()
147 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mana_map_skb()
150 frag = &skb_shinfo(skb)->frags[i]; in mana_map_skb()
157 gd->gpa_mkey); in mana_map_skb()
163 for (i = sg_i - 1; i >= hsg; i--) in mana_map_skb()
164 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
167 for (i = hsg - 1; i >= 0; i--) in mana_map_skb()
168 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
171 return -ENOMEM; in mana_map_skb()
184 int num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_fix_skb_head()
195 return -EINVAL; in mana_fix_skb_head()
206 if (skb->encapsulation) { in mana_get_gso_hs()
209 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in mana_get_gso_hs()
224 int gso_hs = 0; /* zero for non-GSO pkts */ in mana_start_xmit()
226 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
237 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
243 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
244 gdma_sq = txq->gdma_sq; in mana_start_xmit()
245 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
246 tx_stats = &txq->stats; in mana_start_xmit()
248 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_start_xmit()
249 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
251 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
252 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
255 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
270 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
271 tx_stats->short_pkt_fmt++; in mana_start_xmit()
272 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
275 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
276 tx_stats->long_pkt_fmt++; in mana_start_xmit()
277 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
284 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
286 if (skb->protocol == htons(ETH_P_IP)) in mana_start_xmit()
288 else if (skb->protocol == htons(ETH_P_IPV6)) in mana_start_xmit()
302 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
303 if (skb->encapsulation) { in mana_start_xmit()
304 tx_stats->tso_inner_packets++; in mana_start_xmit()
305 tx_stats->tso_inner_bytes += skb->len - gso_hs; in mana_start_xmit()
307 tx_stats->tso_packets++; in mana_start_xmit()
308 tx_stats->tso_bytes += skb->len - gso_hs; in mana_start_xmit()
310 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
319 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; in mana_start_xmit()
322 ip_hdr(skb)->tot_len = 0; in mana_start_xmit()
323 ip_hdr(skb)->check = 0; in mana_start_xmit()
324 tcp_hdr(skb)->check = in mana_start_xmit()
325 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in mana_start_xmit()
326 ip_hdr(skb)->daddr, 0, in mana_start_xmit()
329 ipv6_hdr(skb)->payload_len = 0; in mana_start_xmit()
330 tcp_hdr(skb)->check = in mana_start_xmit()
331 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in mana_start_xmit()
332 &ipv6_hdr(skb)->daddr, 0, in mana_start_xmit()
335 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in mana_start_xmit()
338 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
339 tx_stats->csum_partial++; in mana_start_xmit()
340 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
376 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
377 tx_stats->mana_map_err++; in mana_start_xmit()
378 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
382 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
384 len = skb->len; in mana_start_xmit()
388 (struct gdma_posted_wqe_info *)skb->cb); in mana_start_xmit()
391 apc->eth_stats.stop_queue++; in mana_start_xmit()
395 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
402 atomic_inc(&txq->pending_sends); in mana_start_xmit()
404 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_start_xmit()
409 tx_stats = &txq->stats; in mana_start_xmit()
410 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
411 tx_stats->packets++; in mana_start_xmit()
412 tx_stats->bytes += len; in mana_start_xmit()
413 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
418 apc->eth_stats.wake_queue++; in mana_start_xmit()
427 ndev->stats.tx_dropped++; in mana_start_xmit()
437 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
444 if (!apc->port_is_up) in mana_get_stats64()
447 netdev_stats_to_stats64(st, &ndev->stats); in mana_get_stats64()
450 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
453 start = u64_stats_fetch_begin(&rx_stats->syncp); in mana_get_stats64()
454 packets = rx_stats->packets; in mana_get_stats64()
455 bytes = rx_stats->bytes; in mana_get_stats64()
456 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); in mana_get_stats64()
458 st->rx_packets += packets; in mana_get_stats64()
459 st->rx_bytes += bytes; in mana_get_stats64()
463 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
466 start = u64_stats_fetch_begin(&tx_stats->syncp); in mana_get_stats64()
467 packets = tx_stats->packets; in mana_get_stats64()
468 bytes = tx_stats->bytes; in mana_get_stats64()
469 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); in mana_get_stats64()
471 st->tx_packets += packets; in mana_get_stats64()
472 st->tx_bytes += bytes; in mana_get_stats64()
481 struct sock *sk = skb->sk; in mana_get_tx_queue()
484 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; in mana_get_tx_queue()
487 rcu_access_pointer(sk->sk_dst_cache)) in mana_get_tx_queue()
498 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
501 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
503 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
513 /* Release pre-allocated RX buffers */
519 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_dealloc_rxbufs()
521 if (!mpc->rxbufs_pre) in mana_pre_dealloc_rxbufs()
524 if (!mpc->das_pre) in mana_pre_dealloc_rxbufs()
527 while (mpc->rxbpre_total) { in mana_pre_dealloc_rxbufs()
528 i = --mpc->rxbpre_total; in mana_pre_dealloc_rxbufs()
529 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, in mana_pre_dealloc_rxbufs()
531 put_page(virt_to_head_page(mpc->rxbufs_pre[i])); in mana_pre_dealloc_rxbufs()
534 kfree(mpc->das_pre); in mana_pre_dealloc_rxbufs()
535 mpc->das_pre = NULL; in mana_pre_dealloc_rxbufs()
538 kfree(mpc->rxbufs_pre); in mana_pre_dealloc_rxbufs()
539 mpc->rxbufs_pre = NULL; in mana_pre_dealloc_rxbufs()
542 mpc->rxbpre_datasize = 0; in mana_pre_dealloc_rxbufs()
543 mpc->rxbpre_alloc_size = 0; in mana_pre_dealloc_rxbufs()
544 mpc->rxbpre_headroom = 0; in mana_pre_dealloc_rxbufs()
547 /* Get a buffer from the pre-allocated RX buffers */
548 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) in mana_get_rxbuf_pre() argument
550 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre()
556 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { in mana_get_rxbuf_pre()
557 netdev_err(ndev, "No RX pre-allocated bufs\n"); in mana_get_rxbuf_pre()
562 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre()
564 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre()
568 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre()
570 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre()
574 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre()
576 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre()
580 mpc->rxbpre_total--; in mana_get_rxbuf_pre()
582 *da = mpc->das_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
583 va = mpc->rxbufs_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
584 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; in mana_get_rxbuf_pre()
587 if (!mpc->rxbpre_total) in mana_get_rxbuf_pre()
620 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, in mana_pre_alloc_rxbufs()
621 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); in mana_pre_alloc_rxbufs()
623 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_alloc_rxbufs()
625 num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; in mana_pre_alloc_rxbufs()
627 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); in mana_pre_alloc_rxbufs()
628 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); in mana_pre_alloc_rxbufs()
629 if (!mpc->rxbufs_pre) in mana_pre_alloc_rxbufs()
632 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); in mana_pre_alloc_rxbufs()
633 if (!mpc->das_pre) in mana_pre_alloc_rxbufs()
636 mpc->rxbpre_total = 0; in mana_pre_alloc_rxbufs()
639 if (mpc->rxbpre_alloc_size > PAGE_SIZE) { in mana_pre_alloc_rxbufs()
640 va = netdev_alloc_frag(mpc->rxbpre_alloc_size); in mana_pre_alloc_rxbufs()
647 get_order(mpc->rxbpre_alloc_size)) { in mana_pre_alloc_rxbufs()
659 da = dma_map_single(dev, va + mpc->rxbpre_headroom, in mana_pre_alloc_rxbufs()
660 mpc->rxbpre_datasize, DMA_FROM_DEVICE); in mana_pre_alloc_rxbufs()
666 mpc->rxbufs_pre[i] = va; in mana_pre_alloc_rxbufs()
667 mpc->das_pre[i] = da; in mana_pre_alloc_rxbufs()
668 mpc->rxbpre_total = i + 1; in mana_pre_alloc_rxbufs()
675 return -ENOMEM; in mana_pre_alloc_rxbufs()
681 unsigned int old_mtu = ndev->mtu; in mana_change_mtu()
684 /* Pre-allocate buffers to prevent failure in mana_attach later */ in mana_change_mtu()
697 ndev->mtu = new_mtu; in mana_change_mtu()
702 ndev->mtu = old_mtu; in mana_change_mtu()
724 kfree(apc->rxqs); in mana_cleanup_port_context()
725 apc->rxqs = NULL; in mana_cleanup_port_context()
730 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
733 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
739 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
742 struct device *dev = gc->dev; in mana_send_request()
746 req->dev_id = gc->mana.dev_id; in mana_send_request()
747 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
751 if (err || resp->status) { in mana_send_request()
753 err, resp->status); in mana_send_request()
754 return err ? err : -EPROTO; in mana_send_request()
757 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
758 req->activity_id != resp->activity_id) { in mana_send_request()
760 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
761 req->activity_id, resp->activity_id); in mana_send_request()
762 return -EPROTO; in mana_send_request()
772 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
773 return -EPROTO; in mana_verify_resp_hdr()
775 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
776 return -EPROTO; in mana_verify_resp_hdr()
778 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
779 return -EPROTO; in mana_verify_resp_hdr()
796 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
799 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); in mana_pf_register_hw_vport()
806 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", in mana_pf_register_hw_vport()
808 return err ? err : -EPROTO; in mana_pf_register_hw_vport()
811 apc->port_handle = resp.hw_vport_handle; in mana_pf_register_hw_vport()
823 req.hw_vport_handle = apc->port_handle; in mana_pf_deregister_hw_vport()
825 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
828 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", in mana_pf_deregister_hw_vport()
836 netdev_err(apc->ndev, in mana_pf_deregister_hw_vport()
849 req.vport = apc->port_handle; in mana_pf_register_filter()
850 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); in mana_pf_register_filter()
852 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
855 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); in mana_pf_register_filter()
862 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", in mana_pf_register_filter()
864 return err ? err : -EPROTO; in mana_pf_register_filter()
867 apc->pf_filter_handle = resp.filter_handle; in mana_pf_register_filter()
879 req.filter_handle = apc->pf_filter_handle; in mana_pf_deregister_filter()
881 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
884 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", in mana_pf_deregister_filter()
892 netdev_err(apc->ndev, in mana_pf_deregister_filter()
901 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
904 struct device *dev = gc->dev; in mana_query_device_cfg()
928 err = -EPROTO; in mana_query_device_cfg()
935 gc->adapter_mtu = resp.adapter_mtu; in mana_query_device_cfg()
937 gc->adapter_mtu = ETH_FRAME_LEN; in mana_query_device_cfg()
954 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
965 return -EPROTO; in mana_query_vport_cfg()
971 apc->port_handle = resp.vport; in mana_query_vport_cfg()
972 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
979 mutex_lock(&apc->vport_mutex); in mana_uncfg_vport()
980 apc->vport_use_count--; in mana_uncfg_vport()
981 WARN_ON(apc->vport_use_count < 0); in mana_uncfg_vport()
982 mutex_unlock(&apc->vport_mutex); in mana_uncfg_vport()
1011 mutex_lock(&apc->vport_mutex); in mana_cfg_vport()
1012 if (apc->vport_use_count > 0) { in mana_cfg_vport()
1013 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1014 return -EBUSY; in mana_cfg_vport()
1016 apc->vport_use_count++; in mana_cfg_vport()
1017 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1021 req.vport = apc->port_handle; in mana_cfg_vport()
1025 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1028 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
1035 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
1038 err = -EPROTO; in mana_cfg_vport()
1043 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
1044 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
1046 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", in mana_cfg_vport()
1047 apc->port_handle, protection_dom_id, doorbell_pg_id); in mana_cfg_vport()
1064 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
1072 return -ENOMEM; in mana_cfg_vport_steering()
1074 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
1077 req->hdr.req.msg_version = GDMA_MESSAGE_V2; in mana_cfg_vport_steering()
1079 req->vport = apc->port_handle; in mana_cfg_vport_steering()
1080 req->num_indir_entries = num_entries; in mana_cfg_vport_steering()
1081 req->indir_tab_offset = sizeof(*req); in mana_cfg_vport_steering()
1082 req->rx_enable = rx; in mana_cfg_vport_steering()
1083 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
1084 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
1085 req->update_hashkey = update_key; in mana_cfg_vport_steering()
1086 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
1087 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
1088 req->cqe_coalescing_enable = 0; in mana_cfg_vport_steering()
1091 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
1095 memcpy(req_indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
1096 req->num_indir_entries * sizeof(mana_handle_t)); in mana_cfg_vport_steering()
1099 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1116 err = -EPROTO; in mana_cfg_vport_steering()
1120 apc->port_handle, num_entries); in mana_cfg_vport_steering()
1134 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
1141 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
1142 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
1143 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
1144 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
1145 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
1146 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
1148 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1161 err = -EPROTO; in mana_create_wq_obj()
1167 err = -EPROTO; in mana_create_wq_obj()
1172 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
1173 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
1186 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
1194 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1211 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
1215 if (!ac->eqs) in mana_destroy_eq()
1218 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
1219 eq = ac->eqs[i].eq; in mana_destroy_eq()
1226 kfree(ac->eqs); in mana_destroy_eq()
1227 ac->eqs = NULL; in mana_destroy_eq()
1232 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
1233 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
1238 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
1240 if (!ac->eqs) in mana_create_eq()
1241 return -ENOMEM; in mana_create_eq()
1247 spec.eq.context = ac->eqs; in mana_create_eq()
1250 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
1251 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
1262 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1268 init_completion(&rxq->fence_event); in mana_fence_rq()
1272 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1274 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1277 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
1278 rxq->rxq_idx, err); in mana_fence_rq()
1284 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
1285 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1287 err = -EPROTO; in mana_fence_rq()
1292 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { in mana_fence_rq()
1293 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
1294 rxq->rxq_idx); in mana_fence_rq()
1295 return -ETIMEDOUT; in mana_fence_rq()
1304 struct mana_rxq *rxq; in mana_fence_rqs() local
1307 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
1308 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1309 err = mana_fence_rq(apc, rxq); in mana_fence_rqs()
1322 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
1323 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
1326 return -ERANGE; in mana_move_wq_tail()
1328 wq->tail += num_units; in mana_move_wq_tail()
1334 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_unmap_skb()
1335 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
1336 struct device *dev = gc->dev; in mana_unmap_skb()
1340 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; in mana_unmap_skb()
1343 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1346 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) in mana_unmap_skb()
1347 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1353 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
1357 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
1368 ndev = txq->ndev; in mana_poll_tx_cq()
1371 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
1384 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
1388 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
1403 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1405 apc->eth_stats.tx_cqe_err++; in mana_poll_tx_cq()
1414 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1416 apc->eth_stats.tx_cqe_unknown_type++; in mana_poll_tx_cq()
1420 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
1423 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
1427 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; in mana_poll_tx_cq()
1428 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
1432 napi_consume_skb(skb, cq->budget); in mana_poll_tx_cq()
1440 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1442 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1448 net_txq = txq->net_txq; in mana_poll_tx_cq()
1451 /* Ensure checking txq_stopped before apc->port_is_up. */ in mana_poll_tx_cq()
1454 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1456 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
1459 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
1462 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
1465 static void mana_post_pkt_rxq(struct mana_rxq *rxq) in mana_post_pkt_rxq() argument
1471 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1472 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1473 rxq->buf_index = 0; in mana_post_pkt_rxq()
1475 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1477 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1478 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
1482 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
1485 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, in mana_build_skb() argument
1488 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); in mana_build_skb()
1493 if (xdp->data_hard_start) { in mana_build_skb()
1494 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mana_build_skb()
1495 skb_put(skb, xdp->data_end - xdp->data); in mana_build_skb()
1499 skb_reserve(skb, rxq->headroom); in mana_build_skb()
1506 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) in mana_rx_skb() argument
1508 struct mana_stats_rx *rx_stats = &rxq->stats; in mana_rx_skb()
1509 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
1510 uint pkt_len = cqe->ppi[0].pkt_len; in mana_rx_skb()
1511 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
1518 rxq->rx_cq.work_done++; in mana_rx_skb()
1519 napi = &rxq->rx_cq.napi; in mana_rx_skb()
1522 ++ndev->stats.rx_dropped; in mana_rx_skb()
1526 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); in mana_rx_skb()
1528 if (act == XDP_REDIRECT && !rxq->xdp_rc) in mana_rx_skb()
1534 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); in mana_rx_skb()
1542 skb->dev = napi->dev; in mana_rx_skb()
1544 skb->protocol = eth_type_trans(skb, ndev); in mana_rx_skb()
1548 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { in mana_rx_skb()
1549 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) in mana_rx_skb()
1550 skb->ip_summed = CHECKSUM_UNNECESSARY; in mana_rx_skb()
1553 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { in mana_rx_skb()
1554 hash_value = cqe->ppi[0].pkt_hash; in mana_rx_skb()
1556 if (cqe->rx_hashtype & MANA_HASH_L4) in mana_rx_skb()
1562 if (cqe->rx_vlantag_present) { in mana_rx_skb()
1563 u16 vlan_tci = cqe->rx_vlan_id; in mana_rx_skb()
1568 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1569 rx_stats->packets++; in mana_rx_skb()
1570 rx_stats->bytes += pkt_len; in mana_rx_skb()
1573 rx_stats->xdp_tx++; in mana_rx_skb()
1574 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1587 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1588 rx_stats->xdp_drop++; in mana_rx_skb()
1589 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1593 page_pool_recycle_direct(rxq->page_pool, in mana_rx_skb()
1596 WARN_ON_ONCE(rxq->xdp_save_va); in mana_rx_skb()
1598 rxq->xdp_save_va = buf_va; in mana_rx_skb()
1601 ++ndev->stats.rx_dropped; in mana_rx_skb()
1606 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, in mana_get_rxfrag() argument
1615 if (rxq->xdp_save_va) { in mana_get_rxfrag()
1616 va = rxq->xdp_save_va; in mana_get_rxfrag()
1617 rxq->xdp_save_va = NULL; in mana_get_rxfrag()
1618 } else if (rxq->alloc_size > PAGE_SIZE) { in mana_get_rxfrag()
1620 va = napi_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1622 va = netdev_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1629 if (compound_order(page) < get_order(rxq->alloc_size)) { in mana_get_rxfrag()
1634 page = page_pool_dev_alloc_pages(rxq->page_pool); in mana_get_rxfrag()
1642 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, in mana_get_rxfrag()
1646 page_pool_put_full_page(rxq->page_pool, page, false); in mana_get_rxfrag()
1657 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, in mana_refill_rx_oob() argument
1665 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); in mana_refill_rx_oob()
1669 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, in mana_refill_rx_oob()
1671 *old_buf = rxoob->buf_va; in mana_refill_rx_oob()
1672 *old_fp = rxoob->from_pool; in mana_refill_rx_oob()
1674 rxoob->buf_va = va; in mana_refill_rx_oob()
1675 rxoob->sgl[0].address = da; in mana_refill_rx_oob()
1676 rxoob->from_pool = from_pool; in mana_refill_rx_oob()
1679 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, in mana_process_rx_cqe() argument
1682 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
1683 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
1684 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
1687 struct device *dev = gc->dev; in mana_process_rx_cqe()
1694 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
1699 ++ndev->stats.rx_dropped; in mana_process_rx_cqe()
1700 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1706 apc->eth_stats.rx_coalesced_err++; in mana_process_rx_cqe()
1710 complete(&rxq->fence_event); in mana_process_rx_cqe()
1715 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1716 apc->eth_stats.rx_cqe_unknown_type++; in mana_process_rx_cqe()
1720 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1725 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1729 curr = rxq->buf_index; in mana_process_rx_cqe()
1730 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1731 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1733 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); in mana_process_rx_cqe()
1738 mana_rx_skb(old_buf, old_fp, oob, rxq); in mana_process_rx_cqe()
1741 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1743 mana_post_pkt_rxq(rxq); in mana_process_rx_cqe()
1748 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1749 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq() local
1752 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1755 rxq->xdp_flush = false; in mana_poll_rx_cq()
1761 /* verify recv cqe references the right rxq */ in mana_poll_rx_cq()
1762 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1765 mana_process_rx_cqe(rxq, cq, &comp[i]); in mana_poll_rx_cq()
1769 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_poll_rx_cq()
1771 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); in mana_poll_rx_cq()
1774 if (rxq->xdp_flush) in mana_poll_rx_cq()
1783 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); in mana_cq_handler()
1785 if (cq->type == MANA_CQ_TYPE_RX) in mana_cq_handler()
1790 w = cq->work_done; in mana_cq_handler()
1791 cq->work_done_since_doorbell += w; in mana_cq_handler()
1793 if (w < cq->budget) { in mana_cq_handler()
1795 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1796 napi_complete_done(&cq->napi, w); in mana_cq_handler()
1797 } else if (cq->work_done_since_doorbell > in mana_cq_handler()
1798 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { in mana_cq_handler()
1805 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1816 cq->work_done = 0; in mana_poll()
1817 cq->budget = budget; in mana_poll()
1819 w = mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1828 napi_schedule_irqoff(&cq->napi); in mana_schedule_napi()
1833 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1835 if (!cq->gdma_cq) in mana_deinit_cq()
1838 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1843 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1845 if (!txq->gdma_sq) in mana_deinit_txq()
1848 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
1856 if (!apc->tx_qp) in mana_destroy_txq()
1859 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
1860 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
1861 if (apc->tx_qp[i].txq.napi_initialized) { in mana_destroy_txq()
1865 apc->tx_qp[i].txq.napi_initialized = false; in mana_destroy_txq()
1867 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
1869 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
1871 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
1874 kfree(apc->tx_qp); in mana_destroy_txq()
1875 apc->tx_qp = NULL; in mana_destroy_txq()
1881 struct mana_context *ac = apc->ac; in mana_create_txq()
1882 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
1894 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
1896 if (!apc->tx_qp) in mana_create_txq()
1897 return -ENOMEM; in mana_create_txq()
1910 gc = gd->gdma_context; in mana_create_txq()
1912 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
1913 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
1916 txq = &apc->tx_qp[i].txq; in mana_create_txq()
1918 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
1919 txq->ndev = net; in mana_create_txq()
1920 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
1921 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
1922 txq->napi_initialized = false; in mana_create_txq()
1923 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
1929 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
1934 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
1935 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
1937 cq->txq = txq; in mana_create_txq()
1944 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
1946 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
1953 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; in mana_create_txq()
1954 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
1956 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_txq()
1957 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
1959 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
1961 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
1963 &apc->tx_qp[i].tx_object); in mana_create_txq()
1968 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
1969 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
1971 txq->gdma_sq->mem_info.dma_region_handle = in mana_create_txq()
1973 cq->gdma_cq->mem_info.dma_region_handle = in mana_create_txq()
1976 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
1978 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
1980 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_txq()
1981 err = -EINVAL; in mana_create_txq()
1985 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
1987 netif_napi_add_tx(net, &cq->napi, mana_poll); in mana_create_txq()
1988 napi_enable(&cq->napi); in mana_create_txq()
1989 txq->napi_initialized = true; in mana_create_txq()
1991 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
2001 struct mana_rxq *rxq, bool napi_initialized) in mana_destroy_rxq() argument
2004 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2006 struct device *dev = gc->dev; in mana_destroy_rxq()
2011 if (!rxq) in mana_destroy_rxq()
2014 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
2023 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mana_destroy_rxq()
2025 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2027 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2029 if (rxq->xdp_save_va) in mana_destroy_rxq()
2030 put_page(virt_to_head_page(rxq->xdp_save_va)); in mana_destroy_rxq()
2032 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2033 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2035 if (!rx_oob->buf_va) in mana_destroy_rxq()
2038 dma_unmap_single(dev, rx_oob->sgl[0].address, in mana_destroy_rxq()
2039 rx_oob->sgl[0].size, DMA_FROM_DEVICE); in mana_destroy_rxq()
2041 page = virt_to_head_page(rx_oob->buf_va); in mana_destroy_rxq()
2043 if (rx_oob->from_pool) in mana_destroy_rxq()
2044 page_pool_put_full_page(rxq->page_pool, page, false); in mana_destroy_rxq()
2048 rx_oob->buf_va = NULL; in mana_destroy_rxq()
2051 page_pool_destroy(rxq->page_pool); in mana_destroy_rxq()
2053 if (rxq->gdma_rq) in mana_destroy_rxq()
2054 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2056 kfree(rxq); in mana_destroy_rxq()
2060 struct mana_rxq *rxq, struct device *dev) in mana_fill_rx_oob() argument
2062 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_fill_rx_oob()
2067 if (mpc->rxbufs_pre) in mana_fill_rx_oob()
2068 va = mana_get_rxbuf_pre(rxq, &da); in mana_fill_rx_oob()
2070 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); in mana_fill_rx_oob()
2073 return -ENOMEM; in mana_fill_rx_oob()
2075 rx_oob->buf_va = va; in mana_fill_rx_oob()
2076 rx_oob->from_pool = from_pool; in mana_fill_rx_oob()
2078 rx_oob->sgl[0].address = da; in mana_fill_rx_oob()
2079 rx_oob->sgl[0].size = rxq->datasize; in mana_fill_rx_oob()
2080 rx_oob->sgl[0].mem_key = mem_key; in mana_fill_rx_oob()
2089 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) in mana_alloc_rx_wqe() argument
2091 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
2093 struct device *dev = gc->dev; in mana_alloc_rx_wqe()
2097 WARN_ON(rxq->datasize == 0); in mana_alloc_rx_wqe()
2102 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2103 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2106 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
2108 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2113 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
2114 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
2115 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
2116 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
2117 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
2118 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
2121 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
2128 static int mana_push_wqe(struct mana_rxq *rxq) in mana_push_wqe() argument
2134 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2135 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2137 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2138 &rx_oob->wqe_inf); in mana_push_wqe()
2140 return -ENOSPC; in mana_push_wqe()
2146 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) in mana_create_page_pool() argument
2152 pprm.nid = gc->numa_node; in mana_create_page_pool()
2153 pprm.napi = &rxq->rx_cq.napi; in mana_create_page_pool()
2155 rxq->page_pool = page_pool_create(&pprm); in mana_create_page_pool()
2157 if (IS_ERR(rxq->page_pool)) { in mana_create_page_pool()
2158 ret = PTR_ERR(rxq->page_pool); in mana_create_page_pool()
2159 rxq->page_pool = NULL; in mana_create_page_pool()
2170 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2177 struct mana_rxq *rxq; in mana_create_rxq() local
2180 gc = gd->gdma_context; in mana_create_rxq()
2182 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE), in mana_create_rxq()
2184 if (!rxq) in mana_create_rxq()
2187 rxq->ndev = ndev; in mana_create_rxq()
2188 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; in mana_create_rxq()
2189 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2190 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2192 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, in mana_create_rxq()
2193 &rxq->headroom); in mana_create_rxq()
2196 err = mana_create_page_pool(rxq, gc); in mana_create_rxq()
2202 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
2214 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2219 cq = &rxq->rx_cq; in mana_create_rxq()
2220 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
2221 cq->rxq = rxq; in mana_create_rxq()
2228 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
2230 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
2236 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2237 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2239 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_rxq()
2240 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
2242 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
2244 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
2245 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2249 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2250 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
2252 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2253 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2255 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2256 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
2258 err = mana_push_wqe(rxq); in mana_create_rxq()
2262 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_rxq()
2263 err = -EINVAL; in mana_create_rxq()
2267 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
2269 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
2271 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, in mana_create_rxq()
2272 cq->napi.napi_id)); in mana_create_rxq()
2273 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mana_create_rxq()
2274 rxq->page_pool)); in mana_create_rxq()
2276 napi_enable(&cq->napi); in mana_create_rxq()
2278 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
2281 return rxq; in mana_create_rxq()
2283 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); in mana_create_rxq()
2285 mana_destroy_rxq(apc, rxq, false); in mana_create_rxq()
2296 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
2297 struct mana_rxq *rxq; in mana_add_rx_queues() local
2301 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
2302 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2303 if (!rxq) { in mana_add_rx_queues()
2304 err = -ENOMEM; in mana_add_rx_queues()
2308 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
2310 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2313 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2320 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
2321 struct mana_rxq *rxq; in mana_destroy_vport() local
2324 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
2325 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2326 if (!rxq) in mana_destroy_vport()
2329 mana_destroy_rxq(apc, rxq, true); in mana_destroy_vport()
2330 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
2336 if (gd->gdma_context->is_pf) in mana_destroy_vport()
2343 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2346 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
2348 if (gd->gdma_context->is_pf) { in mana_create_vport()
2354 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
2366 apc->indir_table[i] = in mana_rss_table_init()
2367 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
2379 queue_idx = apc->indir_table[i]; in mana_config_rss()
2380 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
2397 struct net_device *ndev = apc->ndev; in mana_query_gf_stats()
2410 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_gf_stats()
2424 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; in mana_query_gf_stats()
2425 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; in mana_query_gf_stats()
2426 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; in mana_query_gf_stats()
2427 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; in mana_query_gf_stats()
2428 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; in mana_query_gf_stats()
2429 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; in mana_query_gf_stats()
2430 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; in mana_query_gf_stats()
2437 int port_idx = apc->port_idx; in mana_init_port()
2454 if (apc->max_queues > max_queues) in mana_init_port()
2455 apc->max_queues = max_queues; in mana_init_port()
2457 if (apc->num_queues > apc->max_queues) in mana_init_port()
2458 apc->num_queues = apc->max_queues; in mana_init_port()
2460 eth_hw_addr_set(ndev, apc->mac_addr); in mana_init_port()
2465 kfree(apc->rxqs); in mana_init_port()
2466 apc->rxqs = NULL; in mana_init_port()
2473 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
2480 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2488 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2490 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2500 if (gd->gdma_context->is_pf) { in mana_alloc_queues()
2526 if (apc->port_st_save) { in mana_attach()
2534 apc->port_is_up = apc->port_st_save; in mana_attach()
2539 if (apc->port_is_up) in mana_attach()
2551 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
2557 if (apc->port_is_up) in mana_dealloc_queues()
2558 return -EINVAL; in mana_dealloc_queues()
2562 if (gd->gdma_context->is_pf) in mana_dealloc_queues()
2565 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
2566 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
2567 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
2569 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
2571 * Drain all the in-flight TX packets. in mana_dealloc_queues()
2578 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2579 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2581 while (atomic_read(&txq->pending_sends) > 0 && in mana_dealloc_queues()
2586 if (atomic_read(&txq->pending_sends)) { in mana_dealloc_queues()
2587 err = pcie_flr(to_pci_dev(gd->gdma_context->dev)); in mana_dealloc_queues()
2590 err, atomic_read(&txq->pending_sends), in mana_dealloc_queues()
2591 txq->gdma_txq_id); in mana_dealloc_queues()
2597 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2598 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2599 while ((skb = skb_dequeue(&txq->pending_skbs))) { in mana_dealloc_queues()
2603 atomic_set(&txq->pending_sends, 0); in mana_dealloc_queues()
2609 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
2628 apc->port_st_save = apc->port_is_up; in mana_detach()
2629 apc->port_is_up = false; in mana_detach()
2637 if (apc->port_st_save) { in mana_detach()
2654 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2660 gc->max_num_queues); in mana_probe_port()
2662 return -ENOMEM; in mana_probe_port()
2667 apc->ac = ac; in mana_probe_port()
2668 apc->ndev = ndev; in mana_probe_port()
2669 apc->max_queues = gc->max_num_queues; in mana_probe_port()
2670 apc->num_queues = gc->max_num_queues; in mana_probe_port()
2671 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2672 apc->pf_filter_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2673 apc->port_idx = port_idx; in mana_probe_port()
2675 mutex_init(&apc->vport_mutex); in mana_probe_port()
2676 apc->vport_use_count = 0; in mana_probe_port()
2678 ndev->netdev_ops = &mana_devops; in mana_probe_port()
2679 ndev->ethtool_ops = &mana_ethtool_ops; in mana_probe_port()
2680 ndev->mtu = ETH_DATA_LEN; in mana_probe_port()
2681 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; in mana_probe_port()
2682 ndev->min_mtu = ETH_MIN_MTU; in mana_probe_port()
2683 ndev->needed_headroom = MANA_HEADROOM; in mana_probe_port()
2684 ndev->dev_port = port_idx; in mana_probe_port()
2685 SET_NETDEV_DEV(ndev, gc->dev); in mana_probe_port()
2689 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
2697 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in mana_probe_port()
2698 ndev->hw_features |= NETIF_F_RXCSUM; in mana_probe_port()
2699 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in mana_probe_port()
2700 ndev->hw_features |= NETIF_F_RXHASH; in mana_probe_port()
2701 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | in mana_probe_port()
2703 ndev->vlan_features = ndev->features; in mana_probe_port()
2704 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in mana_probe_port()
2716 kfree(apc->rxqs); in mana_probe_port()
2717 apc->rxqs = NULL; in mana_probe_port()
2734 struct auxiliary_device *adev = gd->adev; in remove_adev()
2735 int id = adev->id; in remove_adev()
2741 gd->adev = NULL; in remove_adev()
2752 return -ENOMEM; in add_adev()
2754 adev = &madev->adev; in add_adev()
2758 adev->id = ret; in add_adev()
2760 adev->name = "rdma"; in add_adev()
2761 adev->dev.parent = gd->gdma_context->dev; in add_adev()
2762 adev->dev.release = adev_release; in add_adev()
2763 madev->mdev = gd; in add_adev()
2775 gd->adev = adev; in add_adev()
2782 mana_adev_idx_free(adev->id); in add_adev()
2792 struct gdma_context *gc = gd->gdma_context; in mana_probe()
2793 struct mana_context *ac = gd->driver_data; in mana_probe()
2794 struct device *dev = gc->dev; in mana_probe()
2810 return -ENOMEM; in mana_probe()
2812 ac->gdma_dev = gd; in mana_probe()
2813 gd->driver_data = ac; in mana_probe()
2826 ac->num_ports = num_ports; in mana_probe()
2828 if (ac->num_ports != num_ports) { in mana_probe()
2829 dev_err(dev, "The number of vPorts changed: %d->%d\n", in mana_probe()
2830 ac->num_ports, num_ports); in mana_probe()
2831 err = -EPROTO; in mana_probe()
2836 if (ac->num_ports == 0) in mana_probe()
2839 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
2840 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
2843 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
2844 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
2849 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
2851 err = mana_attach(ac->ports[i]); in mana_probe()
2868 struct gdma_context *gc = gd->gdma_context; in mana_remove()
2869 struct mana_context *ac = gd->driver_data; in mana_remove()
2870 struct device *dev = gc->dev; in mana_remove()
2876 if (gd->adev) in mana_remove()
2879 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
2880 ndev = ac->ports[i]; in mana_remove()
2917 gd->driver_data = NULL; in mana_remove()
2918 gd->gdma_context = NULL; in mana_remove()