162d03330SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
262d03330SJakub Kicinski /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
362d03330SJakub Kicinski
462d03330SJakub Kicinski #include <linux/bpf_trace.h>
562d03330SJakub Kicinski #include <linux/netdevice.h>
667d2656bSDiana Wang #include <linux/bitfield.h>
757f273adSHuanhuan Wang #include <net/xfrm.h>
862d03330SJakub Kicinski
962d03330SJakub Kicinski #include "../nfp_app.h"
1062d03330SJakub Kicinski #include "../nfp_net.h"
1162d03330SJakub Kicinski #include "../nfp_net_dp.h"
1262d03330SJakub Kicinski #include "../nfp_net_xsk.h"
1362d03330SJakub Kicinski #include "../crypto/crypto.h"
1462d03330SJakub Kicinski #include "../crypto/fw.h"
1562d03330SJakub Kicinski #include "nfd3.h"
1662d03330SJakub Kicinski
1762d03330SJakub Kicinski /* Transmit processing
1862d03330SJakub Kicinski *
1962d03330SJakub Kicinski * One queue controller peripheral queue is used for transmit. The
2062d03330SJakub Kicinski * driver en-queues packets for transmit by advancing the write
2162d03330SJakub Kicinski * pointer. The device indicates that packets have transmitted by
2262d03330SJakub Kicinski * advancing the read pointer. The driver maintains a local copy of
2362d03330SJakub Kicinski * the read and write pointer in @struct nfp_net_tx_ring. The driver
2462d03330SJakub Kicinski * keeps @wr_p in sync with the queue controller write pointer and can
2562d03330SJakub Kicinski * determine how many packets have been transmitted by comparing its
2662d03330SJakub Kicinski * copy of the read pointer @rd_p with the read pointer maintained by
2762d03330SJakub Kicinski * the queue controller peripheral.
2862d03330SJakub Kicinski */
2962d03330SJakub Kicinski
3062d03330SJakub Kicinski /* Wrappers for deciding when to stop and restart TX queues */
nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring * tx_ring)3162d03330SJakub Kicinski static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
3262d03330SJakub Kicinski {
3362d03330SJakub Kicinski return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
3462d03330SJakub Kicinski }
3562d03330SJakub Kicinski
nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring * tx_ring)3662d03330SJakub Kicinski static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
3762d03330SJakub Kicinski {
3862d03330SJakub Kicinski return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
3962d03330SJakub Kicinski }
4062d03330SJakub Kicinski
4162d03330SJakub Kicinski /**
4262d03330SJakub Kicinski * nfp_nfd3_tx_ring_stop() - stop tx ring
4362d03330SJakub Kicinski * @nd_q: netdev queue
4462d03330SJakub Kicinski * @tx_ring: driver tx queue structure
4562d03330SJakub Kicinski *
4662d03330SJakub Kicinski * Safely stop TX ring. Remember that while we are running .start_xmit()
4762d03330SJakub Kicinski * someone else may be cleaning the TX ring completions so we need to be
4862d03330SJakub Kicinski * extra careful here.
4962d03330SJakub Kicinski */
5062d03330SJakub Kicinski static void
nfp_nfd3_tx_ring_stop(struct netdev_queue * nd_q,struct nfp_net_tx_ring * tx_ring)5162d03330SJakub Kicinski nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
5262d03330SJakub Kicinski struct nfp_net_tx_ring *tx_ring)
5362d03330SJakub Kicinski {
5462d03330SJakub Kicinski netif_tx_stop_queue(nd_q);
5562d03330SJakub Kicinski
5662d03330SJakub Kicinski /* We can race with the TX completion out of NAPI so recheck */
5762d03330SJakub Kicinski smp_mb();
5862d03330SJakub Kicinski if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
5962d03330SJakub Kicinski netif_tx_start_queue(nd_q);
6062d03330SJakub Kicinski }
6162d03330SJakub Kicinski
6262d03330SJakub Kicinski /**
6362d03330SJakub Kicinski * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
6462d03330SJakub Kicinski * @r_vec: per-ring structure
6562d03330SJakub Kicinski * @txbuf: Pointer to driver soft TX descriptor
6662d03330SJakub Kicinski * @txd: Pointer to HW TX descriptor
6762d03330SJakub Kicinski * @skb: Pointer to SKB
6862d03330SJakub Kicinski * @md_bytes: Prepend length
6962d03330SJakub Kicinski *
7062d03330SJakub Kicinski * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
7162d03330SJakub Kicinski * Return error on packet header greater than maximum supported LSO header size.
7262d03330SJakub Kicinski */
7362d03330SJakub Kicinski static void
nfp_nfd3_tx_tso(struct nfp_net_r_vector * r_vec,struct nfp_nfd3_tx_buf * txbuf,struct nfp_nfd3_tx_desc * txd,struct sk_buff * skb,u32 md_bytes)7462d03330SJakub Kicinski nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
7562d03330SJakub Kicinski struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
7662d03330SJakub Kicinski {
7762d03330SJakub Kicinski u32 l3_offset, l4_offset, hdrlen;
7862d03330SJakub Kicinski u16 mss;
7962d03330SJakub Kicinski
8062d03330SJakub Kicinski if (!skb_is_gso(skb))
8162d03330SJakub Kicinski return;
8262d03330SJakub Kicinski
8362d03330SJakub Kicinski if (!skb->encapsulation) {
8462d03330SJakub Kicinski l3_offset = skb_network_offset(skb);
8562d03330SJakub Kicinski l4_offset = skb_transport_offset(skb);
86504148feSEric Dumazet hdrlen = skb_tcp_all_headers(skb);
8762d03330SJakub Kicinski } else {
8862d03330SJakub Kicinski l3_offset = skb_inner_network_offset(skb);
8962d03330SJakub Kicinski l4_offset = skb_inner_transport_offset(skb);
90504148feSEric Dumazet hdrlen = skb_inner_tcp_all_headers(skb);
9162d03330SJakub Kicinski }
9262d03330SJakub Kicinski
9362d03330SJakub Kicinski txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
9462d03330SJakub Kicinski txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
9562d03330SJakub Kicinski
9662d03330SJakub Kicinski mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
9762d03330SJakub Kicinski txd->l3_offset = l3_offset - md_bytes;
9862d03330SJakub Kicinski txd->l4_offset = l4_offset - md_bytes;
9962d03330SJakub Kicinski txd->lso_hdrlen = hdrlen - md_bytes;
10062d03330SJakub Kicinski txd->mss = cpu_to_le16(mss);
10162d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_LSO;
10262d03330SJakub Kicinski
10362d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
10462d03330SJakub Kicinski r_vec->tx_lso++;
10562d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
10662d03330SJakub Kicinski }
10762d03330SJakub Kicinski
10862d03330SJakub Kicinski /**
10962d03330SJakub Kicinski * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
11062d03330SJakub Kicinski * @dp: NFP Net data path struct
11162d03330SJakub Kicinski * @r_vec: per-ring structure
11262d03330SJakub Kicinski * @txbuf: Pointer to driver soft TX descriptor
11362d03330SJakub Kicinski * @txd: Pointer to TX descriptor
11462d03330SJakub Kicinski * @skb: Pointer to SKB
11562d03330SJakub Kicinski *
11662d03330SJakub Kicinski * This function sets the TX checksum flags in the TX descriptor based
11762d03330SJakub Kicinski * on the configuration and the protocol of the packet to be transmitted.
11862d03330SJakub Kicinski */
11962d03330SJakub Kicinski static void
nfp_nfd3_tx_csum(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_nfd3_tx_buf * txbuf,struct nfp_nfd3_tx_desc * txd,struct sk_buff * skb)12062d03330SJakub Kicinski nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
12162d03330SJakub Kicinski struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
12262d03330SJakub Kicinski struct sk_buff *skb)
12362d03330SJakub Kicinski {
12462d03330SJakub Kicinski struct ipv6hdr *ipv6h;
12562d03330SJakub Kicinski struct iphdr *iph;
12662d03330SJakub Kicinski u8 l4_hdr;
12762d03330SJakub Kicinski
12862d03330SJakub Kicinski if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
12962d03330SJakub Kicinski return;
13062d03330SJakub Kicinski
13162d03330SJakub Kicinski if (skb->ip_summed != CHECKSUM_PARTIAL)
13262d03330SJakub Kicinski return;
13362d03330SJakub Kicinski
13462d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_CSUM;
13562d03330SJakub Kicinski if (skb->encapsulation)
13662d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_ENCAP;
13762d03330SJakub Kicinski
13862d03330SJakub Kicinski iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
13962d03330SJakub Kicinski ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
14062d03330SJakub Kicinski
14162d03330SJakub Kicinski if (iph->version == 4) {
14262d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_IP4_CSUM;
14362d03330SJakub Kicinski l4_hdr = iph->protocol;
14462d03330SJakub Kicinski } else if (ipv6h->version == 6) {
14562d03330SJakub Kicinski l4_hdr = ipv6h->nexthdr;
14662d03330SJakub Kicinski } else {
14762d03330SJakub Kicinski nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
14862d03330SJakub Kicinski return;
14962d03330SJakub Kicinski }
15062d03330SJakub Kicinski
15162d03330SJakub Kicinski switch (l4_hdr) {
15262d03330SJakub Kicinski case IPPROTO_TCP:
15362d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_TCP_CSUM;
15462d03330SJakub Kicinski break;
15562d03330SJakub Kicinski case IPPROTO_UDP:
15662d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_UDP_CSUM;
15762d03330SJakub Kicinski break;
15862d03330SJakub Kicinski default:
15962d03330SJakub Kicinski nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
16062d03330SJakub Kicinski return;
16162d03330SJakub Kicinski }
16262d03330SJakub Kicinski
16362d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
16462d03330SJakub Kicinski if (skb->encapsulation)
16562d03330SJakub Kicinski r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
16662d03330SJakub Kicinski else
16762d03330SJakub Kicinski r_vec->hw_csum_tx += txbuf->pkt_cnt;
16862d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
16962d03330SJakub Kicinski }
17062d03330SJakub Kicinski
nfp_nfd3_prep_tx_meta(struct nfp_net_dp * dp,struct sk_buff * skb,u64 tls_handle,bool * ipsec)17157f273adSHuanhuan Wang static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
17257f273adSHuanhuan Wang u64 tls_handle, bool *ipsec)
17362d03330SJakub Kicinski {
17462d03330SJakub Kicinski struct metadata_dst *md_dst = skb_metadata_dst(skb);
17557f273adSHuanhuan Wang struct nfp_ipsec_offload offload_info;
17662d03330SJakub Kicinski unsigned char *data;
177d80702ffSDiana Wang bool vlan_insert;
17862d03330SJakub Kicinski u32 meta_id = 0;
17962d03330SJakub Kicinski int md_bytes;
18062d03330SJakub Kicinski
18157f273adSHuanhuan Wang #ifdef CONFIG_NFP_NET_IPSEC
18257f273adSHuanhuan Wang if (xfrm_offload(skb))
18357f273adSHuanhuan Wang *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
18457f273adSHuanhuan Wang #endif
18557f273adSHuanhuan Wang
186d80702ffSDiana Wang if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
18762d03330SJakub Kicinski md_dst = NULL;
18862d03330SJakub Kicinski
189d80702ffSDiana Wang vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
190d80702ffSDiana Wang
19157f273adSHuanhuan Wang if (!(md_dst || tls_handle || vlan_insert || *ipsec))
192d80702ffSDiana Wang return 0;
193d80702ffSDiana Wang
194d80702ffSDiana Wang md_bytes = sizeof(meta_id) +
195436396f2SHuanhuan Wang (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
196436396f2SHuanhuan Wang (!!tls_handle ? NFP_NET_META_CONN_HANDLE_SIZE : 0) +
197436396f2SHuanhuan Wang (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
198436396f2SHuanhuan Wang (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
19962d03330SJakub Kicinski
20062d03330SJakub Kicinski if (unlikely(skb_cow_head(skb, md_bytes)))
20162d03330SJakub Kicinski return -ENOMEM;
20262d03330SJakub Kicinski
20362d03330SJakub Kicinski data = skb_push(skb, md_bytes) + md_bytes;
20462d03330SJakub Kicinski if (md_dst) {
205d80702ffSDiana Wang data -= NFP_NET_META_PORTID_SIZE;
20662d03330SJakub Kicinski put_unaligned_be32(md_dst->u.port_info.port_id, data);
20762d03330SJakub Kicinski meta_id = NFP_NET_META_PORTID;
20862d03330SJakub Kicinski }
20962d03330SJakub Kicinski if (tls_handle) {
21062d03330SJakub Kicinski /* conn handle is opaque, we just use u64 to be able to quickly
21162d03330SJakub Kicinski * compare it to zero
21262d03330SJakub Kicinski */
213d80702ffSDiana Wang data -= NFP_NET_META_CONN_HANDLE_SIZE;
21462d03330SJakub Kicinski memcpy(data, &tls_handle, sizeof(tls_handle));
21562d03330SJakub Kicinski meta_id <<= NFP_NET_META_FIELD_SIZE;
21662d03330SJakub Kicinski meta_id |= NFP_NET_META_CONN_HANDLE;
21762d03330SJakub Kicinski }
218d80702ffSDiana Wang if (vlan_insert) {
219d80702ffSDiana Wang data -= NFP_NET_META_VLAN_SIZE;
220d80702ffSDiana Wang /* data type of skb->vlan_proto is __be16
221d80702ffSDiana Wang * so it fills metadata without calling put_unaligned_be16
222d80702ffSDiana Wang */
223d80702ffSDiana Wang memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
224d80702ffSDiana Wang put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
225d80702ffSDiana Wang meta_id <<= NFP_NET_META_FIELD_SIZE;
226d80702ffSDiana Wang meta_id |= NFP_NET_META_VLAN;
227d80702ffSDiana Wang }
22857f273adSHuanhuan Wang if (*ipsec) {
22957f273adSHuanhuan Wang data -= NFP_NET_META_IPSEC_SIZE;
23057f273adSHuanhuan Wang put_unaligned_be32(offload_info.seq_hi, data);
23157f273adSHuanhuan Wang data -= NFP_NET_META_IPSEC_SIZE;
23257f273adSHuanhuan Wang put_unaligned_be32(offload_info.seq_low, data);
23357f273adSHuanhuan Wang data -= NFP_NET_META_IPSEC_SIZE;
23457f273adSHuanhuan Wang put_unaligned_be32(offload_info.handle - 1, data);
23557f273adSHuanhuan Wang meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
23657f273adSHuanhuan Wang meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
23757f273adSHuanhuan Wang }
23862d03330SJakub Kicinski
239d80702ffSDiana Wang data -= sizeof(meta_id);
24062d03330SJakub Kicinski put_unaligned_be32(meta_id, data);
24162d03330SJakub Kicinski
24262d03330SJakub Kicinski return md_bytes;
24362d03330SJakub Kicinski }
24462d03330SJakub Kicinski
24562d03330SJakub Kicinski /**
24662d03330SJakub Kicinski * nfp_nfd3_tx() - Main transmit entry point
24762d03330SJakub Kicinski * @skb: SKB to transmit
24862d03330SJakub Kicinski * @netdev: netdev structure
24962d03330SJakub Kicinski *
25062d03330SJakub Kicinski * Return: NETDEV_TX_OK on success.
25162d03330SJakub Kicinski */
nfp_nfd3_tx(struct sk_buff * skb,struct net_device * netdev)25262d03330SJakub Kicinski netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
25362d03330SJakub Kicinski {
25462d03330SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
25562d03330SJakub Kicinski int f, nr_frags, wr_idx, md_bytes;
25662d03330SJakub Kicinski struct nfp_net_tx_ring *tx_ring;
25762d03330SJakub Kicinski struct nfp_net_r_vector *r_vec;
25862d03330SJakub Kicinski struct nfp_nfd3_tx_buf *txbuf;
25962d03330SJakub Kicinski struct nfp_nfd3_tx_desc *txd;
26062d03330SJakub Kicinski struct netdev_queue *nd_q;
26162d03330SJakub Kicinski const skb_frag_t *frag;
26262d03330SJakub Kicinski struct nfp_net_dp *dp;
26362d03330SJakub Kicinski dma_addr_t dma_addr;
26462d03330SJakub Kicinski unsigned int fsize;
26562d03330SJakub Kicinski u64 tls_handle = 0;
26657f273adSHuanhuan Wang bool ipsec = false;
26762d03330SJakub Kicinski u16 qidx;
26862d03330SJakub Kicinski
26962d03330SJakub Kicinski dp = &nn->dp;
27062d03330SJakub Kicinski qidx = skb_get_queue_mapping(skb);
27162d03330SJakub Kicinski tx_ring = &dp->tx_rings[qidx];
27262d03330SJakub Kicinski r_vec = tx_ring->r_vec;
27362d03330SJakub Kicinski
27462d03330SJakub Kicinski nr_frags = skb_shinfo(skb)->nr_frags;
27562d03330SJakub Kicinski
27662d03330SJakub Kicinski if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
27762d03330SJakub Kicinski nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
27862d03330SJakub Kicinski qidx, tx_ring->wr_p, tx_ring->rd_p);
27962d03330SJakub Kicinski nd_q = netdev_get_tx_queue(dp->netdev, qidx);
28062d03330SJakub Kicinski netif_tx_stop_queue(nd_q);
28162d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
28262d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
28362d03330SJakub Kicinski r_vec->tx_busy++;
28462d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
28562d03330SJakub Kicinski return NETDEV_TX_BUSY;
28662d03330SJakub Kicinski }
28762d03330SJakub Kicinski
28862d03330SJakub Kicinski skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
28962d03330SJakub Kicinski if (unlikely(!skb)) {
29062d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
29162d03330SJakub Kicinski return NETDEV_TX_OK;
29262d03330SJakub Kicinski }
29362d03330SJakub Kicinski
29457f273adSHuanhuan Wang md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle, &ipsec);
29562d03330SJakub Kicinski if (unlikely(md_bytes < 0))
29662d03330SJakub Kicinski goto err_flush;
29762d03330SJakub Kicinski
29862d03330SJakub Kicinski /* Start with the head skbuf */
29962d03330SJakub Kicinski dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
30062d03330SJakub Kicinski DMA_TO_DEVICE);
30162d03330SJakub Kicinski if (dma_mapping_error(dp->dev, dma_addr))
30262d03330SJakub Kicinski goto err_dma_err;
30362d03330SJakub Kicinski
30462d03330SJakub Kicinski wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
30562d03330SJakub Kicinski
30662d03330SJakub Kicinski /* Stash the soft descriptor of the head then initialize it */
30762d03330SJakub Kicinski txbuf = &tx_ring->txbufs[wr_idx];
30862d03330SJakub Kicinski txbuf->skb = skb;
30962d03330SJakub Kicinski txbuf->dma_addr = dma_addr;
31062d03330SJakub Kicinski txbuf->fidx = -1;
31162d03330SJakub Kicinski txbuf->pkt_cnt = 1;
31262d03330SJakub Kicinski txbuf->real_len = skb->len;
31362d03330SJakub Kicinski
31462d03330SJakub Kicinski /* Build TX descriptor */
31562d03330SJakub Kicinski txd = &tx_ring->txds[wr_idx];
31662d03330SJakub Kicinski txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
31762d03330SJakub Kicinski txd->dma_len = cpu_to_le16(skb_headlen(skb));
3185f30671dSYinjun Zhang nfp_desc_set_dma_addr_40b(txd, dma_addr);
31962d03330SJakub Kicinski txd->data_len = cpu_to_le16(skb->len);
32062d03330SJakub Kicinski
32162d03330SJakub Kicinski txd->flags = 0;
32262d03330SJakub Kicinski txd->mss = 0;
32362d03330SJakub Kicinski txd->lso_hdrlen = 0;
32462d03330SJakub Kicinski
32562d03330SJakub Kicinski /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
32662d03330SJakub Kicinski nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
327*3e04419cSHuanhuan Wang if (ipsec)
328*3e04419cSHuanhuan Wang nfp_nfd3_ipsec_tx(txd, skb);
329*3e04419cSHuanhuan Wang else
33062d03330SJakub Kicinski nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
33162d03330SJakub Kicinski if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
33262d03330SJakub Kicinski txd->flags |= NFD3_DESC_TX_VLAN;
33362d03330SJakub Kicinski txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
33462d03330SJakub Kicinski }
33562d03330SJakub Kicinski
33662d03330SJakub Kicinski /* Gather DMA */
33762d03330SJakub Kicinski if (nr_frags > 0) {
33862d03330SJakub Kicinski __le64 second_half;
33962d03330SJakub Kicinski
34062d03330SJakub Kicinski /* all descs must match except for in addr, length and eop */
34162d03330SJakub Kicinski second_half = txd->vals8[1];
34262d03330SJakub Kicinski
34362d03330SJakub Kicinski for (f = 0; f < nr_frags; f++) {
34462d03330SJakub Kicinski frag = &skb_shinfo(skb)->frags[f];
34562d03330SJakub Kicinski fsize = skb_frag_size(frag);
34662d03330SJakub Kicinski
34762d03330SJakub Kicinski dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
34862d03330SJakub Kicinski fsize, DMA_TO_DEVICE);
34962d03330SJakub Kicinski if (dma_mapping_error(dp->dev, dma_addr))
35062d03330SJakub Kicinski goto err_unmap;
35162d03330SJakub Kicinski
35262d03330SJakub Kicinski wr_idx = D_IDX(tx_ring, wr_idx + 1);
35362d03330SJakub Kicinski tx_ring->txbufs[wr_idx].skb = skb;
35462d03330SJakub Kicinski tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
35562d03330SJakub Kicinski tx_ring->txbufs[wr_idx].fidx = f;
35662d03330SJakub Kicinski
35762d03330SJakub Kicinski txd = &tx_ring->txds[wr_idx];
35862d03330SJakub Kicinski txd->dma_len = cpu_to_le16(fsize);
3595f30671dSYinjun Zhang nfp_desc_set_dma_addr_40b(txd, dma_addr);
36062d03330SJakub Kicinski txd->offset_eop = md_bytes |
36162d03330SJakub Kicinski ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
36262d03330SJakub Kicinski txd->vals8[1] = second_half;
36362d03330SJakub Kicinski }
36462d03330SJakub Kicinski
36562d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
36662d03330SJakub Kicinski r_vec->tx_gather++;
36762d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
36862d03330SJakub Kicinski }
36962d03330SJakub Kicinski
37062d03330SJakub Kicinski skb_tx_timestamp(skb);
37162d03330SJakub Kicinski
37262d03330SJakub Kicinski nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
37362d03330SJakub Kicinski
37462d03330SJakub Kicinski tx_ring->wr_p += nr_frags + 1;
37562d03330SJakub Kicinski if (nfp_nfd3_tx_ring_should_stop(tx_ring))
37662d03330SJakub Kicinski nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
37762d03330SJakub Kicinski
37862d03330SJakub Kicinski tx_ring->wr_ptr_add += nr_frags + 1;
37962d03330SJakub Kicinski if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
38062d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
38162d03330SJakub Kicinski
38262d03330SJakub Kicinski return NETDEV_TX_OK;
38362d03330SJakub Kicinski
38462d03330SJakub Kicinski err_unmap:
38562d03330SJakub Kicinski while (--f >= 0) {
38662d03330SJakub Kicinski frag = &skb_shinfo(skb)->frags[f];
38762d03330SJakub Kicinski dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
38862d03330SJakub Kicinski skb_frag_size(frag), DMA_TO_DEVICE);
38962d03330SJakub Kicinski tx_ring->txbufs[wr_idx].skb = NULL;
39062d03330SJakub Kicinski tx_ring->txbufs[wr_idx].dma_addr = 0;
39162d03330SJakub Kicinski tx_ring->txbufs[wr_idx].fidx = -2;
39262d03330SJakub Kicinski wr_idx = wr_idx - 1;
39362d03330SJakub Kicinski if (wr_idx < 0)
39462d03330SJakub Kicinski wr_idx += tx_ring->cnt;
39562d03330SJakub Kicinski }
39662d03330SJakub Kicinski dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
39762d03330SJakub Kicinski skb_headlen(skb), DMA_TO_DEVICE);
39862d03330SJakub Kicinski tx_ring->txbufs[wr_idx].skb = NULL;
39962d03330SJakub Kicinski tx_ring->txbufs[wr_idx].dma_addr = 0;
40062d03330SJakub Kicinski tx_ring->txbufs[wr_idx].fidx = -2;
40162d03330SJakub Kicinski err_dma_err:
40262d03330SJakub Kicinski nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
40362d03330SJakub Kicinski err_flush:
40462d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
40562d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
40662d03330SJakub Kicinski r_vec->tx_errors++;
40762d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
40862d03330SJakub Kicinski nfp_net_tls_tx_undo(skb, tls_handle);
40962d03330SJakub Kicinski dev_kfree_skb_any(skb);
41062d03330SJakub Kicinski return NETDEV_TX_OK;
41162d03330SJakub Kicinski }
41262d03330SJakub Kicinski
41362d03330SJakub Kicinski /**
41462d03330SJakub Kicinski * nfp_nfd3_tx_complete() - Handled completed TX packets
41562d03330SJakub Kicinski * @tx_ring: TX ring structure
41662d03330SJakub Kicinski * @budget: NAPI budget (only used as bool to determine if in NAPI context)
41762d03330SJakub Kicinski */
nfp_nfd3_tx_complete(struct nfp_net_tx_ring * tx_ring,int budget)41862d03330SJakub Kicinski void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
41962d03330SJakub Kicinski {
42062d03330SJakub Kicinski struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
42162d03330SJakub Kicinski struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
42262d03330SJakub Kicinski u32 done_pkts = 0, done_bytes = 0;
42362d03330SJakub Kicinski struct netdev_queue *nd_q;
42462d03330SJakub Kicinski u32 qcp_rd_p;
42562d03330SJakub Kicinski int todo;
42662d03330SJakub Kicinski
42762d03330SJakub Kicinski if (tx_ring->wr_p == tx_ring->rd_p)
42862d03330SJakub Kicinski return;
42962d03330SJakub Kicinski
43062d03330SJakub Kicinski /* Work out how many descriptors have been transmitted */
4310dcf7f50SJakub Kicinski qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
43262d03330SJakub Kicinski
43362d03330SJakub Kicinski if (qcp_rd_p == tx_ring->qcp_rd_p)
43462d03330SJakub Kicinski return;
43562d03330SJakub Kicinski
43662d03330SJakub Kicinski todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
43762d03330SJakub Kicinski
43862d03330SJakub Kicinski while (todo--) {
43962d03330SJakub Kicinski const skb_frag_t *frag;
44062d03330SJakub Kicinski struct nfp_nfd3_tx_buf *tx_buf;
44162d03330SJakub Kicinski struct sk_buff *skb;
44262d03330SJakub Kicinski int fidx, nr_frags;
44362d03330SJakub Kicinski int idx;
44462d03330SJakub Kicinski
44562d03330SJakub Kicinski idx = D_IDX(tx_ring, tx_ring->rd_p++);
44662d03330SJakub Kicinski tx_buf = &tx_ring->txbufs[idx];
44762d03330SJakub Kicinski
44862d03330SJakub Kicinski skb = tx_buf->skb;
44962d03330SJakub Kicinski if (!skb)
45062d03330SJakub Kicinski continue;
45162d03330SJakub Kicinski
45262d03330SJakub Kicinski nr_frags = skb_shinfo(skb)->nr_frags;
45362d03330SJakub Kicinski fidx = tx_buf->fidx;
45462d03330SJakub Kicinski
45562d03330SJakub Kicinski if (fidx == -1) {
45662d03330SJakub Kicinski /* unmap head */
45762d03330SJakub Kicinski dma_unmap_single(dp->dev, tx_buf->dma_addr,
45862d03330SJakub Kicinski skb_headlen(skb), DMA_TO_DEVICE);
45962d03330SJakub Kicinski
46062d03330SJakub Kicinski done_pkts += tx_buf->pkt_cnt;
46162d03330SJakub Kicinski done_bytes += tx_buf->real_len;
46262d03330SJakub Kicinski } else {
46362d03330SJakub Kicinski /* unmap fragment */
46462d03330SJakub Kicinski frag = &skb_shinfo(skb)->frags[fidx];
46562d03330SJakub Kicinski dma_unmap_page(dp->dev, tx_buf->dma_addr,
46662d03330SJakub Kicinski skb_frag_size(frag), DMA_TO_DEVICE);
46762d03330SJakub Kicinski }
46862d03330SJakub Kicinski
46962d03330SJakub Kicinski /* check for last gather fragment */
47062d03330SJakub Kicinski if (fidx == nr_frags - 1)
47162d03330SJakub Kicinski napi_consume_skb(skb, budget);
47262d03330SJakub Kicinski
47362d03330SJakub Kicinski tx_buf->dma_addr = 0;
47462d03330SJakub Kicinski tx_buf->skb = NULL;
47562d03330SJakub Kicinski tx_buf->fidx = -2;
47662d03330SJakub Kicinski }
47762d03330SJakub Kicinski
47862d03330SJakub Kicinski tx_ring->qcp_rd_p = qcp_rd_p;
47962d03330SJakub Kicinski
48062d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
48162d03330SJakub Kicinski r_vec->tx_bytes += done_bytes;
48262d03330SJakub Kicinski r_vec->tx_pkts += done_pkts;
48362d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
48462d03330SJakub Kicinski
48562d03330SJakub Kicinski if (!dp->netdev)
48662d03330SJakub Kicinski return;
48762d03330SJakub Kicinski
48862d03330SJakub Kicinski nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
48962d03330SJakub Kicinski netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
49062d03330SJakub Kicinski if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
49162d03330SJakub Kicinski /* Make sure TX thread will see updated tx_ring->rd_p */
49262d03330SJakub Kicinski smp_mb();
49362d03330SJakub Kicinski
49462d03330SJakub Kicinski if (unlikely(netif_tx_queue_stopped(nd_q)))
49562d03330SJakub Kicinski netif_tx_wake_queue(nd_q);
49662d03330SJakub Kicinski }
49762d03330SJakub Kicinski
49862d03330SJakub Kicinski WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
49962d03330SJakub Kicinski "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
50062d03330SJakub Kicinski tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
50162d03330SJakub Kicinski }
50262d03330SJakub Kicinski
nfp_nfd3_xdp_complete(struct nfp_net_tx_ring * tx_ring)50362d03330SJakub Kicinski static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
50462d03330SJakub Kicinski {
50562d03330SJakub Kicinski struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
5060dcf7f50SJakub Kicinski struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
50762d03330SJakub Kicinski u32 done_pkts = 0, done_bytes = 0;
50862d03330SJakub Kicinski bool done_all;
50962d03330SJakub Kicinski int idx, todo;
51062d03330SJakub Kicinski u32 qcp_rd_p;
51162d03330SJakub Kicinski
51262d03330SJakub Kicinski /* Work out how many descriptors have been transmitted */
5130dcf7f50SJakub Kicinski qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
51462d03330SJakub Kicinski
51562d03330SJakub Kicinski if (qcp_rd_p == tx_ring->qcp_rd_p)
51662d03330SJakub Kicinski return true;
51762d03330SJakub Kicinski
51862d03330SJakub Kicinski todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
51962d03330SJakub Kicinski
52062d03330SJakub Kicinski done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
52162d03330SJakub Kicinski todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
52262d03330SJakub Kicinski
52362d03330SJakub Kicinski tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
52462d03330SJakub Kicinski
52562d03330SJakub Kicinski done_pkts = todo;
52662d03330SJakub Kicinski while (todo--) {
52762d03330SJakub Kicinski idx = D_IDX(tx_ring, tx_ring->rd_p);
52862d03330SJakub Kicinski tx_ring->rd_p++;
52962d03330SJakub Kicinski
53062d03330SJakub Kicinski done_bytes += tx_ring->txbufs[idx].real_len;
53162d03330SJakub Kicinski }
53262d03330SJakub Kicinski
53362d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
53462d03330SJakub Kicinski r_vec->tx_bytes += done_bytes;
53562d03330SJakub Kicinski r_vec->tx_pkts += done_pkts;
53662d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
53762d03330SJakub Kicinski
53862d03330SJakub Kicinski WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
53962d03330SJakub Kicinski "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
54062d03330SJakub Kicinski tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
54162d03330SJakub Kicinski
54262d03330SJakub Kicinski return done_all;
54362d03330SJakub Kicinski }
54462d03330SJakub Kicinski
54562d03330SJakub Kicinski /* Receive processing
54662d03330SJakub Kicinski */
54762d03330SJakub Kicinski
54862d03330SJakub Kicinski static void *
nfp_nfd3_napi_alloc_one(struct nfp_net_dp * dp,dma_addr_t * dma_addr)54962d03330SJakub Kicinski nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
55062d03330SJakub Kicinski {
55162d03330SJakub Kicinski void *frag;
55262d03330SJakub Kicinski
55362d03330SJakub Kicinski if (!dp->xdp_prog) {
55462d03330SJakub Kicinski frag = napi_alloc_frag(dp->fl_bufsz);
55562d03330SJakub Kicinski if (unlikely(!frag))
55662d03330SJakub Kicinski return NULL;
55762d03330SJakub Kicinski } else {
55862d03330SJakub Kicinski struct page *page;
55962d03330SJakub Kicinski
56062d03330SJakub Kicinski page = dev_alloc_page();
56162d03330SJakub Kicinski if (unlikely(!page))
56262d03330SJakub Kicinski return NULL;
56362d03330SJakub Kicinski frag = page_address(page);
56462d03330SJakub Kicinski }
56562d03330SJakub Kicinski
56662d03330SJakub Kicinski *dma_addr = nfp_net_dma_map_rx(dp, frag);
56762d03330SJakub Kicinski if (dma_mapping_error(dp->dev, *dma_addr)) {
56862d03330SJakub Kicinski nfp_net_free_frag(frag, dp->xdp_prog);
56962d03330SJakub Kicinski nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
57062d03330SJakub Kicinski return NULL;
57162d03330SJakub Kicinski }
57262d03330SJakub Kicinski
57362d03330SJakub Kicinski return frag;
57462d03330SJakub Kicinski }
57562d03330SJakub Kicinski
57662d03330SJakub Kicinski /**
57762d03330SJakub Kicinski * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
57862d03330SJakub Kicinski * @dp: NFP Net data path struct
57962d03330SJakub Kicinski * @rx_ring: RX ring structure
58062d03330SJakub Kicinski * @frag: page fragment buffer
58162d03330SJakub Kicinski * @dma_addr: DMA address of skb mapping
58262d03330SJakub Kicinski */
58362d03330SJakub Kicinski static void
nfp_nfd3_rx_give_one(const struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring,void * frag,dma_addr_t dma_addr)58462d03330SJakub Kicinski nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
58562d03330SJakub Kicinski struct nfp_net_rx_ring *rx_ring,
58662d03330SJakub Kicinski void *frag, dma_addr_t dma_addr)
58762d03330SJakub Kicinski {
58862d03330SJakub Kicinski unsigned int wr_idx;
58962d03330SJakub Kicinski
59062d03330SJakub Kicinski wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
59162d03330SJakub Kicinski
59262d03330SJakub Kicinski nfp_net_dma_sync_dev_rx(dp, dma_addr);
59362d03330SJakub Kicinski
59462d03330SJakub Kicinski /* Stash SKB and DMA address away */
59562d03330SJakub Kicinski rx_ring->rxbufs[wr_idx].frag = frag;
59662d03330SJakub Kicinski rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
59762d03330SJakub Kicinski
59862d03330SJakub Kicinski /* Fill freelist descriptor */
59962d03330SJakub Kicinski rx_ring->rxds[wr_idx].fld.reserved = 0;
60062d03330SJakub Kicinski rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
6015f30671dSYinjun Zhang /* DMA address is expanded to 48-bit width in freelist for NFP3800,
6025f30671dSYinjun Zhang * so the *_48b macro is used accordingly, it's also OK to fill
6035f30671dSYinjun Zhang * a 40-bit address since the top 8 bits are get set to 0.
6045f30671dSYinjun Zhang */
6055f30671dSYinjun Zhang nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
60662d03330SJakub Kicinski dma_addr + dp->rx_dma_off);
60762d03330SJakub Kicinski
60862d03330SJakub Kicinski rx_ring->wr_p++;
60962d03330SJakub Kicinski if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
61062d03330SJakub Kicinski /* Update write pointer of the freelist queue. Make
61162d03330SJakub Kicinski * sure all writes are flushed before telling the hardware.
61262d03330SJakub Kicinski */
61362d03330SJakub Kicinski wmb();
61462d03330SJakub Kicinski nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
61562d03330SJakub Kicinski }
61662d03330SJakub Kicinski }
61762d03330SJakub Kicinski
61862d03330SJakub Kicinski /**
61962d03330SJakub Kicinski * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
62062d03330SJakub Kicinski * @dp: NFP Net data path struct
62162d03330SJakub Kicinski * @rx_ring: RX ring to fill
62262d03330SJakub Kicinski */
nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)62362d03330SJakub Kicinski void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
62462d03330SJakub Kicinski struct nfp_net_rx_ring *rx_ring)
62562d03330SJakub Kicinski {
62662d03330SJakub Kicinski unsigned int i;
62762d03330SJakub Kicinski
62862d03330SJakub Kicinski if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
62962d03330SJakub Kicinski return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
63062d03330SJakub Kicinski
63162d03330SJakub Kicinski for (i = 0; i < rx_ring->cnt - 1; i++)
63262d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
63362d03330SJakub Kicinski rx_ring->rxbufs[i].dma_addr);
63462d03330SJakub Kicinski }
63562d03330SJakub Kicinski
63662d03330SJakub Kicinski /**
63762d03330SJakub Kicinski * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
63862d03330SJakub Kicinski * @flags: RX descriptor flags field in CPU byte order
63962d03330SJakub Kicinski */
nfp_nfd3_rx_csum_has_errors(u16 flags)64062d03330SJakub Kicinski static int nfp_nfd3_rx_csum_has_errors(u16 flags)
64162d03330SJakub Kicinski {
64262d03330SJakub Kicinski u16 csum_all_checked, csum_all_ok;
64362d03330SJakub Kicinski
64462d03330SJakub Kicinski csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
64562d03330SJakub Kicinski csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
64662d03330SJakub Kicinski
64762d03330SJakub Kicinski return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
64862d03330SJakub Kicinski }
64962d03330SJakub Kicinski
65062d03330SJakub Kicinski /**
65162d03330SJakub Kicinski * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
65262d03330SJakub Kicinski * @dp: NFP Net data path struct
65362d03330SJakub Kicinski * @r_vec: per-ring structure
65462d03330SJakub Kicinski * @rxd: Pointer to RX descriptor
65562d03330SJakub Kicinski * @meta: Parsed metadata prepend
65662d03330SJakub Kicinski * @skb: Pointer to SKB
65762d03330SJakub Kicinski */
65862d03330SJakub Kicinski void
nfp_nfd3_rx_csum(const struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,const struct nfp_net_rx_desc * rxd,const struct nfp_meta_parsed * meta,struct sk_buff * skb)65962d03330SJakub Kicinski nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
66062d03330SJakub Kicinski const struct nfp_net_rx_desc *rxd,
66162d03330SJakub Kicinski const struct nfp_meta_parsed *meta, struct sk_buff *skb)
66262d03330SJakub Kicinski {
66362d03330SJakub Kicinski skb_checksum_none_assert(skb);
66462d03330SJakub Kicinski
66562d03330SJakub Kicinski if (!(dp->netdev->features & NETIF_F_RXCSUM))
66662d03330SJakub Kicinski return;
66762d03330SJakub Kicinski
66862d03330SJakub Kicinski if (meta->csum_type) {
66962d03330SJakub Kicinski skb->ip_summed = meta->csum_type;
67062d03330SJakub Kicinski skb->csum = meta->csum;
67162d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
67262d03330SJakub Kicinski r_vec->hw_csum_rx_complete++;
67362d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
67462d03330SJakub Kicinski return;
67562d03330SJakub Kicinski }
67662d03330SJakub Kicinski
67762d03330SJakub Kicinski if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
67862d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
67962d03330SJakub Kicinski r_vec->hw_csum_rx_error++;
68062d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
68162d03330SJakub Kicinski return;
68262d03330SJakub Kicinski }
68362d03330SJakub Kicinski
68462d03330SJakub Kicinski /* Assume that the firmware will never report inner CSUM_OK unless outer
68562d03330SJakub Kicinski * L4 headers were successfully parsed. FW will always report zero UDP
68662d03330SJakub Kicinski * checksum as CSUM_OK.
68762d03330SJakub Kicinski */
68862d03330SJakub Kicinski if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
68962d03330SJakub Kicinski rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
69062d03330SJakub Kicinski __skb_incr_checksum_unnecessary(skb);
69162d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
69262d03330SJakub Kicinski r_vec->hw_csum_rx_ok++;
69362d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
69462d03330SJakub Kicinski }
69562d03330SJakub Kicinski
69662d03330SJakub Kicinski if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
69762d03330SJakub Kicinski rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
69862d03330SJakub Kicinski __skb_incr_checksum_unnecessary(skb);
69962d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
70062d03330SJakub Kicinski r_vec->hw_csum_rx_inner_ok++;
70162d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
70262d03330SJakub Kicinski }
70362d03330SJakub Kicinski }
70462d03330SJakub Kicinski
70562d03330SJakub Kicinski static void
nfp_nfd3_set_hash(struct net_device * netdev,struct nfp_meta_parsed * meta,unsigned int type,__be32 * hash)70662d03330SJakub Kicinski nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
70762d03330SJakub Kicinski unsigned int type, __be32 *hash)
70862d03330SJakub Kicinski {
70962d03330SJakub Kicinski if (!(netdev->features & NETIF_F_RXHASH))
71062d03330SJakub Kicinski return;
71162d03330SJakub Kicinski
71262d03330SJakub Kicinski switch (type) {
71362d03330SJakub Kicinski case NFP_NET_RSS_IPV4:
71462d03330SJakub Kicinski case NFP_NET_RSS_IPV6:
71562d03330SJakub Kicinski case NFP_NET_RSS_IPV6_EX:
71662d03330SJakub Kicinski meta->hash_type = PKT_HASH_TYPE_L3;
71762d03330SJakub Kicinski break;
71862d03330SJakub Kicinski default:
71962d03330SJakub Kicinski meta->hash_type = PKT_HASH_TYPE_L4;
72062d03330SJakub Kicinski break;
72162d03330SJakub Kicinski }
72262d03330SJakub Kicinski
72362d03330SJakub Kicinski meta->hash = get_unaligned_be32(hash);
72462d03330SJakub Kicinski }
72562d03330SJakub Kicinski
72662d03330SJakub Kicinski static void
nfp_nfd3_set_hash_desc(struct net_device * netdev,struct nfp_meta_parsed * meta,void * data,struct nfp_net_rx_desc * rxd)72762d03330SJakub Kicinski nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
72862d03330SJakub Kicinski void *data, struct nfp_net_rx_desc *rxd)
72962d03330SJakub Kicinski {
73062d03330SJakub Kicinski struct nfp_net_rx_hash *rx_hash = data;
73162d03330SJakub Kicinski
73262d03330SJakub Kicinski if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
73362d03330SJakub Kicinski return;
73462d03330SJakub Kicinski
73562d03330SJakub Kicinski nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
73662d03330SJakub Kicinski &rx_hash->hash);
73762d03330SJakub Kicinski }
73862d03330SJakub Kicinski
73962d03330SJakub Kicinski bool
nfp_nfd3_parse_meta(struct net_device * netdev,struct nfp_meta_parsed * meta,void * data,void * pkt,unsigned int pkt_len,int meta_len)74062d03330SJakub Kicinski nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
74162d03330SJakub Kicinski void *data, void *pkt, unsigned int pkt_len, int meta_len)
74262d03330SJakub Kicinski {
74367d2656bSDiana Wang u32 meta_info, vlan_info;
74462d03330SJakub Kicinski
74562d03330SJakub Kicinski meta_info = get_unaligned_be32(data);
74662d03330SJakub Kicinski data += 4;
74762d03330SJakub Kicinski
74862d03330SJakub Kicinski while (meta_info) {
74962d03330SJakub Kicinski switch (meta_info & NFP_NET_META_FIELD_MASK) {
75062d03330SJakub Kicinski case NFP_NET_META_HASH:
75162d03330SJakub Kicinski meta_info >>= NFP_NET_META_FIELD_SIZE;
75262d03330SJakub Kicinski nfp_nfd3_set_hash(netdev, meta,
75362d03330SJakub Kicinski meta_info & NFP_NET_META_FIELD_MASK,
75462d03330SJakub Kicinski (__be32 *)data);
75562d03330SJakub Kicinski data += 4;
75662d03330SJakub Kicinski break;
75762d03330SJakub Kicinski case NFP_NET_META_MARK:
75862d03330SJakub Kicinski meta->mark = get_unaligned_be32(data);
75962d03330SJakub Kicinski data += 4;
76062d03330SJakub Kicinski break;
76167d2656bSDiana Wang case NFP_NET_META_VLAN:
76267d2656bSDiana Wang vlan_info = get_unaligned_be32(data);
76367d2656bSDiana Wang if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
76467d2656bSDiana Wang meta->vlan.stripped = true;
76567d2656bSDiana Wang meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
76667d2656bSDiana Wang vlan_info);
76767d2656bSDiana Wang meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
76867d2656bSDiana Wang vlan_info);
76967d2656bSDiana Wang }
77067d2656bSDiana Wang data += 4;
77167d2656bSDiana Wang break;
77262d03330SJakub Kicinski case NFP_NET_META_PORTID:
77362d03330SJakub Kicinski meta->portid = get_unaligned_be32(data);
77462d03330SJakub Kicinski data += 4;
77562d03330SJakub Kicinski break;
77662d03330SJakub Kicinski case NFP_NET_META_CSUM:
77762d03330SJakub Kicinski meta->csum_type = CHECKSUM_COMPLETE;
77862d03330SJakub Kicinski meta->csum =
77962d03330SJakub Kicinski (__force __wsum)__get_unaligned_cpu32(data);
78062d03330SJakub Kicinski data += 4;
78162d03330SJakub Kicinski break;
78262d03330SJakub Kicinski case NFP_NET_META_RESYNC_INFO:
78362d03330SJakub Kicinski if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
78462d03330SJakub Kicinski pkt_len))
78562d03330SJakub Kicinski return false;
78662d03330SJakub Kicinski data += sizeof(struct nfp_net_tls_resync_req);
78762d03330SJakub Kicinski break;
78857f273adSHuanhuan Wang #ifdef CONFIG_NFP_NET_IPSEC
78957f273adSHuanhuan Wang case NFP_NET_META_IPSEC:
79057f273adSHuanhuan Wang /* Note: IPsec packet will have zero saidx, so need add 1
79157f273adSHuanhuan Wang * to indicate packet is IPsec packet within driver.
79257f273adSHuanhuan Wang */
79357f273adSHuanhuan Wang meta->ipsec_saidx = get_unaligned_be32(data) + 1;
79457f273adSHuanhuan Wang data += 4;
79557f273adSHuanhuan Wang break;
79657f273adSHuanhuan Wang #endif
79762d03330SJakub Kicinski default:
79862d03330SJakub Kicinski return true;
79962d03330SJakub Kicinski }
80062d03330SJakub Kicinski
80162d03330SJakub Kicinski meta_info >>= NFP_NET_META_FIELD_SIZE;
80262d03330SJakub Kicinski }
80362d03330SJakub Kicinski
80462d03330SJakub Kicinski return data != pkt;
80562d03330SJakub Kicinski }
80662d03330SJakub Kicinski
80762d03330SJakub Kicinski static void
nfp_nfd3_rx_drop(const struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_rx_ring * rx_ring,struct nfp_net_rx_buf * rxbuf,struct sk_buff * skb)80862d03330SJakub Kicinski nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
80962d03330SJakub Kicinski struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
81062d03330SJakub Kicinski struct sk_buff *skb)
81162d03330SJakub Kicinski {
81262d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
81362d03330SJakub Kicinski r_vec->rx_drops++;
81462d03330SJakub Kicinski /* If we have both skb and rxbuf the replacement buffer allocation
81562d03330SJakub Kicinski * must have failed, count this as an alloc failure.
81662d03330SJakub Kicinski */
81762d03330SJakub Kicinski if (skb && rxbuf)
81862d03330SJakub Kicinski r_vec->rx_replace_buf_alloc_fail++;
81962d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
82062d03330SJakub Kicinski
82162d03330SJakub Kicinski /* skb is build based on the frag, free_skb() would free the frag
82262d03330SJakub Kicinski * so to be able to reuse it we need an extra ref.
82362d03330SJakub Kicinski */
82462d03330SJakub Kicinski if (skb && rxbuf && skb->head == rxbuf->frag)
82562d03330SJakub Kicinski page_ref_inc(virt_to_head_page(rxbuf->frag));
82662d03330SJakub Kicinski if (rxbuf)
82762d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
82862d03330SJakub Kicinski if (skb)
82962d03330SJakub Kicinski dev_kfree_skb_any(skb);
83062d03330SJakub Kicinski }
83162d03330SJakub Kicinski
83262d03330SJakub Kicinski static bool
nfp_nfd3_tx_xdp_buf(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring,struct nfp_net_tx_ring * tx_ring,struct nfp_net_rx_buf * rxbuf,unsigned int dma_off,unsigned int pkt_len,bool * completed)83362d03330SJakub Kicinski nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
83462d03330SJakub Kicinski struct nfp_net_tx_ring *tx_ring,
83562d03330SJakub Kicinski struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
83662d03330SJakub Kicinski unsigned int pkt_len, bool *completed)
83762d03330SJakub Kicinski {
83862d03330SJakub Kicinski unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
83962d03330SJakub Kicinski struct nfp_nfd3_tx_buf *txbuf;
84062d03330SJakub Kicinski struct nfp_nfd3_tx_desc *txd;
84162d03330SJakub Kicinski int wr_idx;
84262d03330SJakub Kicinski
84362d03330SJakub Kicinski /* Reject if xdp_adjust_tail grow packet beyond DMA area */
84462d03330SJakub Kicinski if (pkt_len + dma_off > dma_map_sz)
84562d03330SJakub Kicinski return false;
84662d03330SJakub Kicinski
84762d03330SJakub Kicinski if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
84862d03330SJakub Kicinski if (!*completed) {
84962d03330SJakub Kicinski nfp_nfd3_xdp_complete(tx_ring);
85062d03330SJakub Kicinski *completed = true;
85162d03330SJakub Kicinski }
85262d03330SJakub Kicinski
85362d03330SJakub Kicinski if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
85462d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
85562d03330SJakub Kicinski NULL);
85662d03330SJakub Kicinski return false;
85762d03330SJakub Kicinski }
85862d03330SJakub Kicinski }
85962d03330SJakub Kicinski
86062d03330SJakub Kicinski wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
86162d03330SJakub Kicinski
86262d03330SJakub Kicinski /* Stash the soft descriptor of the head then initialize it */
86362d03330SJakub Kicinski txbuf = &tx_ring->txbufs[wr_idx];
86462d03330SJakub Kicinski
86562d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
86662d03330SJakub Kicinski
86762d03330SJakub Kicinski txbuf->frag = rxbuf->frag;
86862d03330SJakub Kicinski txbuf->dma_addr = rxbuf->dma_addr;
86962d03330SJakub Kicinski txbuf->fidx = -1;
87062d03330SJakub Kicinski txbuf->pkt_cnt = 1;
87162d03330SJakub Kicinski txbuf->real_len = pkt_len;
87262d03330SJakub Kicinski
87362d03330SJakub Kicinski dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
87462d03330SJakub Kicinski pkt_len, DMA_BIDIRECTIONAL);
87562d03330SJakub Kicinski
87662d03330SJakub Kicinski /* Build TX descriptor */
87762d03330SJakub Kicinski txd = &tx_ring->txds[wr_idx];
87862d03330SJakub Kicinski txd->offset_eop = NFD3_DESC_TX_EOP;
87962d03330SJakub Kicinski txd->dma_len = cpu_to_le16(pkt_len);
8805f30671dSYinjun Zhang nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off);
88162d03330SJakub Kicinski txd->data_len = cpu_to_le16(pkt_len);
88262d03330SJakub Kicinski
88362d03330SJakub Kicinski txd->flags = 0;
88462d03330SJakub Kicinski txd->mss = 0;
88562d03330SJakub Kicinski txd->lso_hdrlen = 0;
88662d03330SJakub Kicinski
88762d03330SJakub Kicinski tx_ring->wr_p++;
88862d03330SJakub Kicinski tx_ring->wr_ptr_add++;
88962d03330SJakub Kicinski return true;
89062d03330SJakub Kicinski }
89162d03330SJakub Kicinski
89262d03330SJakub Kicinski /**
89362d03330SJakub Kicinski * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
89462d03330SJakub Kicinski * @rx_ring: RX ring to receive from
89562d03330SJakub Kicinski * @budget: NAPI budget
89662d03330SJakub Kicinski *
89762d03330SJakub Kicinski * Note, this function is separated out from the napi poll function to
89862d03330SJakub Kicinski * more cleanly separate packet receive code from other bookkeeping
89962d03330SJakub Kicinski * functions performed in the napi poll function.
90062d03330SJakub Kicinski *
90162d03330SJakub Kicinski * Return: Number of packets received.
90262d03330SJakub Kicinski */
nfp_nfd3_rx(struct nfp_net_rx_ring * rx_ring,int budget)90362d03330SJakub Kicinski static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
90462d03330SJakub Kicinski {
90562d03330SJakub Kicinski struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
90662d03330SJakub Kicinski struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
90762d03330SJakub Kicinski struct nfp_net_tx_ring *tx_ring;
90862d03330SJakub Kicinski struct bpf_prog *xdp_prog;
90957f273adSHuanhuan Wang int idx, pkts_polled = 0;
91062d03330SJakub Kicinski bool xdp_tx_cmpl = false;
91162d03330SJakub Kicinski unsigned int true_bufsz;
91262d03330SJakub Kicinski struct sk_buff *skb;
91362d03330SJakub Kicinski struct xdp_buff xdp;
91462d03330SJakub Kicinski
91562d03330SJakub Kicinski xdp_prog = READ_ONCE(dp->xdp_prog);
91662d03330SJakub Kicinski true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
91762d03330SJakub Kicinski xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
91862d03330SJakub Kicinski &rx_ring->xdp_rxq);
91962d03330SJakub Kicinski tx_ring = r_vec->xdp_ring;
92062d03330SJakub Kicinski
92162d03330SJakub Kicinski while (pkts_polled < budget) {
92262d03330SJakub Kicinski unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
92362d03330SJakub Kicinski struct nfp_net_rx_buf *rxbuf;
92462d03330SJakub Kicinski struct nfp_net_rx_desc *rxd;
92562d03330SJakub Kicinski struct nfp_meta_parsed meta;
92662d03330SJakub Kicinski bool redir_egress = false;
92762d03330SJakub Kicinski struct net_device *netdev;
92862d03330SJakub Kicinski dma_addr_t new_dma_addr;
92962d03330SJakub Kicinski u32 meta_len_xdp = 0;
93062d03330SJakub Kicinski void *new_frag;
93162d03330SJakub Kicinski
93262d03330SJakub Kicinski idx = D_IDX(rx_ring, rx_ring->rd_p);
93362d03330SJakub Kicinski
93462d03330SJakub Kicinski rxd = &rx_ring->rxds[idx];
93562d03330SJakub Kicinski if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
93662d03330SJakub Kicinski break;
93762d03330SJakub Kicinski
93862d03330SJakub Kicinski /* Memory barrier to ensure that we won't do other reads
93962d03330SJakub Kicinski * before the DD bit.
94062d03330SJakub Kicinski */
94162d03330SJakub Kicinski dma_rmb();
94262d03330SJakub Kicinski
94362d03330SJakub Kicinski memset(&meta, 0, sizeof(meta));
94462d03330SJakub Kicinski
94562d03330SJakub Kicinski rx_ring->rd_p++;
94662d03330SJakub Kicinski pkts_polled++;
94762d03330SJakub Kicinski
94862d03330SJakub Kicinski rxbuf = &rx_ring->rxbufs[idx];
94962d03330SJakub Kicinski /* < meta_len >
95062d03330SJakub Kicinski * <-- [rx_offset] -->
95162d03330SJakub Kicinski * ---------------------------------------------------------
95262d03330SJakub Kicinski * | [XX] | metadata | packet | XXXX |
95362d03330SJakub Kicinski * ---------------------------------------------------------
95462d03330SJakub Kicinski * <---------------- data_len --------------->
95562d03330SJakub Kicinski *
95662d03330SJakub Kicinski * The rx_offset is fixed for all packets, the meta_len can vary
95762d03330SJakub Kicinski * on a packet by packet basis. If rx_offset is set to zero
95862d03330SJakub Kicinski * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
95962d03330SJakub Kicinski * buffer and is immediately followed by the packet (no [XX]).
96062d03330SJakub Kicinski */
96162d03330SJakub Kicinski meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
96262d03330SJakub Kicinski data_len = le16_to_cpu(rxd->rxd.data_len);
96362d03330SJakub Kicinski pkt_len = data_len - meta_len;
96462d03330SJakub Kicinski
96562d03330SJakub Kicinski pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
96662d03330SJakub Kicinski if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
96762d03330SJakub Kicinski pkt_off += meta_len;
96862d03330SJakub Kicinski else
96962d03330SJakub Kicinski pkt_off += dp->rx_offset;
97062d03330SJakub Kicinski meta_off = pkt_off - meta_len;
97162d03330SJakub Kicinski
97262d03330SJakub Kicinski /* Stats update */
97362d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
97462d03330SJakub Kicinski r_vec->rx_pkts++;
97562d03330SJakub Kicinski r_vec->rx_bytes += pkt_len;
97662d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
97762d03330SJakub Kicinski
97862d03330SJakub Kicinski if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
97962d03330SJakub Kicinski (dp->rx_offset && meta_len > dp->rx_offset))) {
98062d03330SJakub Kicinski nn_dp_warn(dp, "oversized RX packet metadata %u\n",
98162d03330SJakub Kicinski meta_len);
98262d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
98362d03330SJakub Kicinski continue;
98462d03330SJakub Kicinski }
98562d03330SJakub Kicinski
98662d03330SJakub Kicinski nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
98762d03330SJakub Kicinski data_len);
98862d03330SJakub Kicinski
98962d03330SJakub Kicinski if (!dp->chained_metadata_format) {
99062d03330SJakub Kicinski nfp_nfd3_set_hash_desc(dp->netdev, &meta,
99162d03330SJakub Kicinski rxbuf->frag + meta_off, rxd);
99262d03330SJakub Kicinski } else if (meta_len) {
99362d03330SJakub Kicinski if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
99462d03330SJakub Kicinski rxbuf->frag + meta_off,
99562d03330SJakub Kicinski rxbuf->frag + pkt_off,
99662d03330SJakub Kicinski pkt_len, meta_len))) {
99762d03330SJakub Kicinski nn_dp_warn(dp, "invalid RX packet metadata\n");
99862d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
99962d03330SJakub Kicinski NULL);
100062d03330SJakub Kicinski continue;
100162d03330SJakub Kicinski }
100262d03330SJakub Kicinski }
100362d03330SJakub Kicinski
100462d03330SJakub Kicinski if (xdp_prog && !meta.portid) {
100562d03330SJakub Kicinski void *orig_data = rxbuf->frag + pkt_off;
100662d03330SJakub Kicinski unsigned int dma_off;
100762d03330SJakub Kicinski int act;
100862d03330SJakub Kicinski
100962d03330SJakub Kicinski xdp_prepare_buff(&xdp,
101062d03330SJakub Kicinski rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
101162d03330SJakub Kicinski pkt_off - NFP_NET_RX_BUF_HEADROOM,
101262d03330SJakub Kicinski pkt_len, true);
101362d03330SJakub Kicinski
101462d03330SJakub Kicinski act = bpf_prog_run_xdp(xdp_prog, &xdp);
101562d03330SJakub Kicinski
101662d03330SJakub Kicinski pkt_len = xdp.data_end - xdp.data;
101762d03330SJakub Kicinski pkt_off += xdp.data - orig_data;
101862d03330SJakub Kicinski
101962d03330SJakub Kicinski switch (act) {
102062d03330SJakub Kicinski case XDP_PASS:
102162d03330SJakub Kicinski meta_len_xdp = xdp.data - xdp.data_meta;
102262d03330SJakub Kicinski break;
102362d03330SJakub Kicinski case XDP_TX:
102462d03330SJakub Kicinski dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
102562d03330SJakub Kicinski if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
102662d03330SJakub Kicinski tx_ring,
102762d03330SJakub Kicinski rxbuf,
102862d03330SJakub Kicinski dma_off,
102962d03330SJakub Kicinski pkt_len,
103062d03330SJakub Kicinski &xdp_tx_cmpl)))
103162d03330SJakub Kicinski trace_xdp_exception(dp->netdev,
103262d03330SJakub Kicinski xdp_prog, act);
103362d03330SJakub Kicinski continue;
103462d03330SJakub Kicinski default:
103562d03330SJakub Kicinski bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
103662d03330SJakub Kicinski fallthrough;
103762d03330SJakub Kicinski case XDP_ABORTED:
103862d03330SJakub Kicinski trace_xdp_exception(dp->netdev, xdp_prog, act);
103962d03330SJakub Kicinski fallthrough;
104062d03330SJakub Kicinski case XDP_DROP:
104162d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
104262d03330SJakub Kicinski rxbuf->dma_addr);
104362d03330SJakub Kicinski continue;
104462d03330SJakub Kicinski }
104562d03330SJakub Kicinski }
104662d03330SJakub Kicinski
104762d03330SJakub Kicinski if (likely(!meta.portid)) {
104862d03330SJakub Kicinski netdev = dp->netdev;
104962d03330SJakub Kicinski } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
105062d03330SJakub Kicinski struct nfp_net *nn = netdev_priv(dp->netdev);
105162d03330SJakub Kicinski
105262d03330SJakub Kicinski nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
105362d03330SJakub Kicinski pkt_len);
105462d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
105562d03330SJakub Kicinski rxbuf->dma_addr);
105662d03330SJakub Kicinski continue;
105762d03330SJakub Kicinski } else {
105862d03330SJakub Kicinski struct nfp_net *nn;
105962d03330SJakub Kicinski
106062d03330SJakub Kicinski nn = netdev_priv(dp->netdev);
106162d03330SJakub Kicinski netdev = nfp_app_dev_get(nn->app, meta.portid,
106262d03330SJakub Kicinski &redir_egress);
106362d03330SJakub Kicinski if (unlikely(!netdev)) {
106462d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
106562d03330SJakub Kicinski NULL);
106662d03330SJakub Kicinski continue;
106762d03330SJakub Kicinski }
106862d03330SJakub Kicinski
106962d03330SJakub Kicinski if (nfp_netdev_is_nfp_repr(netdev))
107062d03330SJakub Kicinski nfp_repr_inc_rx_stats(netdev, pkt_len);
107162d03330SJakub Kicinski }
107262d03330SJakub Kicinski
107362d03330SJakub Kicinski skb = build_skb(rxbuf->frag, true_bufsz);
107462d03330SJakub Kicinski if (unlikely(!skb)) {
107562d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
107662d03330SJakub Kicinski continue;
107762d03330SJakub Kicinski }
107862d03330SJakub Kicinski new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
107962d03330SJakub Kicinski if (unlikely(!new_frag)) {
108062d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
108162d03330SJakub Kicinski continue;
108262d03330SJakub Kicinski }
108362d03330SJakub Kicinski
108462d03330SJakub Kicinski nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
108562d03330SJakub Kicinski
108662d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
108762d03330SJakub Kicinski
108862d03330SJakub Kicinski skb_reserve(skb, pkt_off);
108962d03330SJakub Kicinski skb_put(skb, pkt_len);
109062d03330SJakub Kicinski
109162d03330SJakub Kicinski skb->mark = meta.mark;
109262d03330SJakub Kicinski skb_set_hash(skb, meta.hash, meta.hash_type);
109362d03330SJakub Kicinski
109462d03330SJakub Kicinski skb_record_rx_queue(skb, rx_ring->idx);
109562d03330SJakub Kicinski skb->protocol = eth_type_trans(skb, netdev);
109662d03330SJakub Kicinski
109762d03330SJakub Kicinski nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
109862d03330SJakub Kicinski
109962d03330SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
110062d03330SJakub Kicinski if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
110162d03330SJakub Kicinski skb->decrypted = true;
110262d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
110362d03330SJakub Kicinski r_vec->hw_tls_rx++;
110462d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
110562d03330SJakub Kicinski }
110662d03330SJakub Kicinski #endif
110762d03330SJakub Kicinski
110867d2656bSDiana Wang if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
110967d2656bSDiana Wang nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
111067d2656bSDiana Wang continue;
111167d2656bSDiana Wang }
111267d2656bSDiana Wang
111357f273adSHuanhuan Wang #ifdef CONFIG_NFP_NET_IPSEC
111457f273adSHuanhuan Wang if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
111557f273adSHuanhuan Wang nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
111657f273adSHuanhuan Wang continue;
111757f273adSHuanhuan Wang }
111857f273adSHuanhuan Wang #endif
111957f273adSHuanhuan Wang
112062d03330SJakub Kicinski if (meta_len_xdp)
112162d03330SJakub Kicinski skb_metadata_set(skb, meta_len_xdp);
112262d03330SJakub Kicinski
112362d03330SJakub Kicinski if (likely(!redir_egress)) {
112462d03330SJakub Kicinski napi_gro_receive(&rx_ring->r_vec->napi, skb);
112562d03330SJakub Kicinski } else {
112662d03330SJakub Kicinski skb->dev = netdev;
112762d03330SJakub Kicinski skb_reset_network_header(skb);
112862d03330SJakub Kicinski __skb_push(skb, ETH_HLEN);
112962d03330SJakub Kicinski dev_queue_xmit(skb);
113062d03330SJakub Kicinski }
113162d03330SJakub Kicinski }
113262d03330SJakub Kicinski
113362d03330SJakub Kicinski if (xdp_prog) {
113462d03330SJakub Kicinski if (tx_ring->wr_ptr_add)
113562d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
113662d03330SJakub Kicinski else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
113762d03330SJakub Kicinski !xdp_tx_cmpl)
113862d03330SJakub Kicinski if (!nfp_nfd3_xdp_complete(tx_ring))
113962d03330SJakub Kicinski pkts_polled = budget;
114062d03330SJakub Kicinski }
114162d03330SJakub Kicinski
114262d03330SJakub Kicinski return pkts_polled;
114362d03330SJakub Kicinski }
114462d03330SJakub Kicinski
114562d03330SJakub Kicinski /**
114662d03330SJakub Kicinski * nfp_nfd3_poll() - napi poll function
114762d03330SJakub Kicinski * @napi: NAPI structure
114862d03330SJakub Kicinski * @budget: NAPI budget
114962d03330SJakub Kicinski *
115062d03330SJakub Kicinski * Return: number of packets polled.
115162d03330SJakub Kicinski */
nfp_nfd3_poll(struct napi_struct * napi,int budget)115262d03330SJakub Kicinski int nfp_nfd3_poll(struct napi_struct *napi, int budget)
115362d03330SJakub Kicinski {
115462d03330SJakub Kicinski struct nfp_net_r_vector *r_vec =
115562d03330SJakub Kicinski container_of(napi, struct nfp_net_r_vector, napi);
115662d03330SJakub Kicinski unsigned int pkts_polled = 0;
115762d03330SJakub Kicinski
115862d03330SJakub Kicinski if (r_vec->tx_ring)
115962d03330SJakub Kicinski nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
116062d03330SJakub Kicinski if (r_vec->rx_ring)
116162d03330SJakub Kicinski pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
116262d03330SJakub Kicinski
116362d03330SJakub Kicinski if (pkts_polled < budget)
116462d03330SJakub Kicinski if (napi_complete_done(napi, pkts_polled))
116562d03330SJakub Kicinski nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
116662d03330SJakub Kicinski
116762d03330SJakub Kicinski if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
116862d03330SJakub Kicinski struct dim_sample dim_sample = {};
116962d03330SJakub Kicinski unsigned int start;
117062d03330SJakub Kicinski u64 pkts, bytes;
117162d03330SJakub Kicinski
117262d03330SJakub Kicinski do {
117362d03330SJakub Kicinski start = u64_stats_fetch_begin(&r_vec->rx_sync);
117462d03330SJakub Kicinski pkts = r_vec->rx_pkts;
117562d03330SJakub Kicinski bytes = r_vec->rx_bytes;
117662d03330SJakub Kicinski } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
117762d03330SJakub Kicinski
117862d03330SJakub Kicinski dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
117962d03330SJakub Kicinski net_dim(&r_vec->rx_dim, dim_sample);
118062d03330SJakub Kicinski }
118162d03330SJakub Kicinski
118262d03330SJakub Kicinski if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
118362d03330SJakub Kicinski struct dim_sample dim_sample = {};
118462d03330SJakub Kicinski unsigned int start;
118562d03330SJakub Kicinski u64 pkts, bytes;
118662d03330SJakub Kicinski
118762d03330SJakub Kicinski do {
118862d03330SJakub Kicinski start = u64_stats_fetch_begin(&r_vec->tx_sync);
118962d03330SJakub Kicinski pkts = r_vec->tx_pkts;
119062d03330SJakub Kicinski bytes = r_vec->tx_bytes;
119162d03330SJakub Kicinski } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
119262d03330SJakub Kicinski
119362d03330SJakub Kicinski dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
119462d03330SJakub Kicinski net_dim(&r_vec->tx_dim, dim_sample);
119562d03330SJakub Kicinski }
119662d03330SJakub Kicinski
119762d03330SJakub Kicinski return pkts_polled;
119862d03330SJakub Kicinski }
119962d03330SJakub Kicinski
120062d03330SJakub Kicinski /* Control device data path
120162d03330SJakub Kicinski */
120262d03330SJakub Kicinski
12036fd86efaSJakub Kicinski bool
nfp_nfd3_ctrl_tx_one(struct nfp_net * nn,struct nfp_net_r_vector * r_vec,struct sk_buff * skb,bool old)12046fd86efaSJakub Kicinski nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
120562d03330SJakub Kicinski struct sk_buff *skb, bool old)
120662d03330SJakub Kicinski {
120762d03330SJakub Kicinski unsigned int real_len = skb->len, meta_len = 0;
120862d03330SJakub Kicinski struct nfp_net_tx_ring *tx_ring;
120962d03330SJakub Kicinski struct nfp_nfd3_tx_buf *txbuf;
121062d03330SJakub Kicinski struct nfp_nfd3_tx_desc *txd;
121162d03330SJakub Kicinski struct nfp_net_dp *dp;
121262d03330SJakub Kicinski dma_addr_t dma_addr;
121362d03330SJakub Kicinski int wr_idx;
121462d03330SJakub Kicinski
121562d03330SJakub Kicinski dp = &r_vec->nfp_net->dp;
121662d03330SJakub Kicinski tx_ring = r_vec->tx_ring;
121762d03330SJakub Kicinski
121862d03330SJakub Kicinski if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
121962d03330SJakub Kicinski nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
122062d03330SJakub Kicinski goto err_free;
122162d03330SJakub Kicinski }
122262d03330SJakub Kicinski
122362d03330SJakub Kicinski if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
122462d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
122562d03330SJakub Kicinski r_vec->tx_busy++;
122662d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
122762d03330SJakub Kicinski if (!old)
122862d03330SJakub Kicinski __skb_queue_tail(&r_vec->queue, skb);
122962d03330SJakub Kicinski else
123062d03330SJakub Kicinski __skb_queue_head(&r_vec->queue, skb);
123162d03330SJakub Kicinski return true;
123262d03330SJakub Kicinski }
123362d03330SJakub Kicinski
123462d03330SJakub Kicinski if (nfp_app_ctrl_has_meta(nn->app)) {
123562d03330SJakub Kicinski if (unlikely(skb_headroom(skb) < 8)) {
123662d03330SJakub Kicinski nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
123762d03330SJakub Kicinski goto err_free;
123862d03330SJakub Kicinski }
123962d03330SJakub Kicinski meta_len = 8;
124062d03330SJakub Kicinski put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
124162d03330SJakub Kicinski put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
124262d03330SJakub Kicinski }
124362d03330SJakub Kicinski
124462d03330SJakub Kicinski /* Start with the head skbuf */
124562d03330SJakub Kicinski dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
124662d03330SJakub Kicinski DMA_TO_DEVICE);
124762d03330SJakub Kicinski if (dma_mapping_error(dp->dev, dma_addr))
124862d03330SJakub Kicinski goto err_dma_warn;
124962d03330SJakub Kicinski
125062d03330SJakub Kicinski wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
125162d03330SJakub Kicinski
125262d03330SJakub Kicinski /* Stash the soft descriptor of the head then initialize it */
125362d03330SJakub Kicinski txbuf = &tx_ring->txbufs[wr_idx];
125462d03330SJakub Kicinski txbuf->skb = skb;
125562d03330SJakub Kicinski txbuf->dma_addr = dma_addr;
125662d03330SJakub Kicinski txbuf->fidx = -1;
125762d03330SJakub Kicinski txbuf->pkt_cnt = 1;
125862d03330SJakub Kicinski txbuf->real_len = real_len;
125962d03330SJakub Kicinski
126062d03330SJakub Kicinski /* Build TX descriptor */
126162d03330SJakub Kicinski txd = &tx_ring->txds[wr_idx];
126262d03330SJakub Kicinski txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
126362d03330SJakub Kicinski txd->dma_len = cpu_to_le16(skb_headlen(skb));
12645f30671dSYinjun Zhang nfp_desc_set_dma_addr_40b(txd, dma_addr);
126562d03330SJakub Kicinski txd->data_len = cpu_to_le16(skb->len);
126662d03330SJakub Kicinski
126762d03330SJakub Kicinski txd->flags = 0;
126862d03330SJakub Kicinski txd->mss = 0;
126962d03330SJakub Kicinski txd->lso_hdrlen = 0;
127062d03330SJakub Kicinski
127162d03330SJakub Kicinski tx_ring->wr_p++;
127262d03330SJakub Kicinski tx_ring->wr_ptr_add++;
127362d03330SJakub Kicinski nfp_net_tx_xmit_more_flush(tx_ring);
127462d03330SJakub Kicinski
127562d03330SJakub Kicinski return false;
127662d03330SJakub Kicinski
127762d03330SJakub Kicinski err_dma_warn:
127862d03330SJakub Kicinski nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
127962d03330SJakub Kicinski err_free:
128062d03330SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
128162d03330SJakub Kicinski r_vec->tx_errors++;
128262d03330SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
128362d03330SJakub Kicinski dev_kfree_skb_any(skb);
128462d03330SJakub Kicinski return false;
128562d03330SJakub Kicinski }
128662d03330SJakub Kicinski
__nfp_ctrl_tx_queued(struct nfp_net_r_vector * r_vec)128762d03330SJakub Kicinski static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
128862d03330SJakub Kicinski {
128962d03330SJakub Kicinski struct sk_buff *skb;
129062d03330SJakub Kicinski
129162d03330SJakub Kicinski while ((skb = __skb_dequeue(&r_vec->queue)))
12926fd86efaSJakub Kicinski if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
129362d03330SJakub Kicinski return;
129462d03330SJakub Kicinski }
129562d03330SJakub Kicinski
129662d03330SJakub Kicinski static bool
nfp_ctrl_meta_ok(struct nfp_net * nn,void * data,unsigned int meta_len)129762d03330SJakub Kicinski nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
129862d03330SJakub Kicinski {
129962d03330SJakub Kicinski u32 meta_type, meta_tag;
130062d03330SJakub Kicinski
130162d03330SJakub Kicinski if (!nfp_app_ctrl_has_meta(nn->app))
130262d03330SJakub Kicinski return !meta_len;
130362d03330SJakub Kicinski
130462d03330SJakub Kicinski if (meta_len != 8)
130562d03330SJakub Kicinski return false;
130662d03330SJakub Kicinski
130762d03330SJakub Kicinski meta_type = get_unaligned_be32(data);
130862d03330SJakub Kicinski meta_tag = get_unaligned_be32(data + 4);
130962d03330SJakub Kicinski
131062d03330SJakub Kicinski return (meta_type == NFP_NET_META_PORTID &&
131162d03330SJakub Kicinski meta_tag == NFP_META_PORT_ID_CTRL);
131262d03330SJakub Kicinski }
131362d03330SJakub Kicinski
131462d03330SJakub Kicinski static bool
nfp_ctrl_rx_one(struct nfp_net * nn,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_rx_ring * rx_ring)131562d03330SJakub Kicinski nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
131662d03330SJakub Kicinski struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
131762d03330SJakub Kicinski {
131862d03330SJakub Kicinski unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
131962d03330SJakub Kicinski struct nfp_net_rx_buf *rxbuf;
132062d03330SJakub Kicinski struct nfp_net_rx_desc *rxd;
132162d03330SJakub Kicinski dma_addr_t new_dma_addr;
132262d03330SJakub Kicinski struct sk_buff *skb;
132362d03330SJakub Kicinski void *new_frag;
132462d03330SJakub Kicinski int idx;
132562d03330SJakub Kicinski
132662d03330SJakub Kicinski idx = D_IDX(rx_ring, rx_ring->rd_p);
132762d03330SJakub Kicinski
132862d03330SJakub Kicinski rxd = &rx_ring->rxds[idx];
132962d03330SJakub Kicinski if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
133062d03330SJakub Kicinski return false;
133162d03330SJakub Kicinski
133262d03330SJakub Kicinski /* Memory barrier to ensure that we won't do other reads
133362d03330SJakub Kicinski * before the DD bit.
133462d03330SJakub Kicinski */
133562d03330SJakub Kicinski dma_rmb();
133662d03330SJakub Kicinski
133762d03330SJakub Kicinski rx_ring->rd_p++;
133862d03330SJakub Kicinski
133962d03330SJakub Kicinski rxbuf = &rx_ring->rxbufs[idx];
134062d03330SJakub Kicinski meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
134162d03330SJakub Kicinski data_len = le16_to_cpu(rxd->rxd.data_len);
134262d03330SJakub Kicinski pkt_len = data_len - meta_len;
134362d03330SJakub Kicinski
134462d03330SJakub Kicinski pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
134562d03330SJakub Kicinski if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
134662d03330SJakub Kicinski pkt_off += meta_len;
134762d03330SJakub Kicinski else
134862d03330SJakub Kicinski pkt_off += dp->rx_offset;
134962d03330SJakub Kicinski meta_off = pkt_off - meta_len;
135062d03330SJakub Kicinski
135162d03330SJakub Kicinski /* Stats update */
135262d03330SJakub Kicinski u64_stats_update_begin(&r_vec->rx_sync);
135362d03330SJakub Kicinski r_vec->rx_pkts++;
135462d03330SJakub Kicinski r_vec->rx_bytes += pkt_len;
135562d03330SJakub Kicinski u64_stats_update_end(&r_vec->rx_sync);
135662d03330SJakub Kicinski
135762d03330SJakub Kicinski nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
135862d03330SJakub Kicinski
135962d03330SJakub Kicinski if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
136062d03330SJakub Kicinski nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
136162d03330SJakub Kicinski meta_len);
136262d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
136362d03330SJakub Kicinski return true;
136462d03330SJakub Kicinski }
136562d03330SJakub Kicinski
136662d03330SJakub Kicinski skb = build_skb(rxbuf->frag, dp->fl_bufsz);
136762d03330SJakub Kicinski if (unlikely(!skb)) {
136862d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
136962d03330SJakub Kicinski return true;
137062d03330SJakub Kicinski }
137162d03330SJakub Kicinski new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
137262d03330SJakub Kicinski if (unlikely(!new_frag)) {
137362d03330SJakub Kicinski nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
137462d03330SJakub Kicinski return true;
137562d03330SJakub Kicinski }
137662d03330SJakub Kicinski
137762d03330SJakub Kicinski nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
137862d03330SJakub Kicinski
137962d03330SJakub Kicinski nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
138062d03330SJakub Kicinski
138162d03330SJakub Kicinski skb_reserve(skb, pkt_off);
138262d03330SJakub Kicinski skb_put(skb, pkt_len);
138362d03330SJakub Kicinski
138462d03330SJakub Kicinski nfp_app_ctrl_rx(nn->app, skb);
138562d03330SJakub Kicinski
138662d03330SJakub Kicinski return true;
138762d03330SJakub Kicinski }
138862d03330SJakub Kicinski
nfp_ctrl_rx(struct nfp_net_r_vector * r_vec)138962d03330SJakub Kicinski static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
139062d03330SJakub Kicinski {
139162d03330SJakub Kicinski struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
139262d03330SJakub Kicinski struct nfp_net *nn = r_vec->nfp_net;
139362d03330SJakub Kicinski struct nfp_net_dp *dp = &nn->dp;
139462d03330SJakub Kicinski unsigned int budget = 512;
139562d03330SJakub Kicinski
139662d03330SJakub Kicinski while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
139762d03330SJakub Kicinski continue;
139862d03330SJakub Kicinski
139962d03330SJakub Kicinski return budget;
140062d03330SJakub Kicinski }
140162d03330SJakub Kicinski
nfp_nfd3_ctrl_poll(struct tasklet_struct * t)140262d03330SJakub Kicinski void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
140362d03330SJakub Kicinski {
140462d03330SJakub Kicinski struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
140562d03330SJakub Kicinski
140662d03330SJakub Kicinski spin_lock(&r_vec->lock);
140762d03330SJakub Kicinski nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
140862d03330SJakub Kicinski __nfp_ctrl_tx_queued(r_vec);
140962d03330SJakub Kicinski spin_unlock(&r_vec->lock);
141062d03330SJakub Kicinski
141162d03330SJakub Kicinski if (nfp_ctrl_rx(r_vec)) {
141262d03330SJakub Kicinski nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
141362d03330SJakub Kicinski } else {
141462d03330SJakub Kicinski tasklet_schedule(&r_vec->tasklet);
141562d03330SJakub Kicinski nn_dp_warn(&r_vec->nfp_net->dp,
141662d03330SJakub Kicinski "control message budget exceeded!\n");
141762d03330SJakub Kicinski }
141862d03330SJakub Kicinski }
1419