104a21ef3SSunil Goutham // SPDX-License-Identifier: GPL-2.0
2cb0e3ec4SSunil Goutham /* Marvell RVU Ethernet driver
304a21ef3SSunil Goutham *
4cb0e3ec4SSunil Goutham * Copyright (C) 2020 Marvell.
504a21ef3SSunil Goutham *
604a21ef3SSunil Goutham */
704a21ef3SSunil Goutham
804a21ef3SSunil Goutham #include <linux/etherdevice.h>
904a21ef3SSunil Goutham #include <net/ip.h>
1086d74760SSunil Goutham #include <net/tso.h>
1106059a1aSGeetha sowjanya #include <linux/bpf.h>
1206059a1aSGeetha sowjanya #include <linux/bpf_trace.h>
13edea0c5aSGeetha sowjanya #include <net/ip6_checksum.h>
1404a21ef3SSunil Goutham
1504a21ef3SSunil Goutham #include "otx2_reg.h"
1604a21ef3SSunil Goutham #include "otx2_common.h"
1704a21ef3SSunil Goutham #include "otx2_struct.h"
1804a21ef3SSunil Goutham #include "otx2_txrx.h"
19c9c12d33SAleksey Makarov #include "otx2_ptp.h"
204c236d5dSGeetha sowjanya #include "cn10k.h"
2104a21ef3SSunil Goutham
22abe02543SSunil Goutham #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
232958d17aSHariprasad Kelam #define PTP_PORT 0x13F
242958d17aSHariprasad Kelam /* PTPv2 header Original Timestamp starts at byte offset 34 and
252958d17aSHariprasad Kelam * contains 6 byte seconds field and 4 byte nano seconds field.
262958d17aSHariprasad Kelam */
272958d17aSHariprasad Kelam #define PTP_SYNC_SEC_OFFSET 34
282958d17aSHariprasad Kelam
2906059a1aSGeetha sowjanya static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
3006059a1aSGeetha sowjanya struct bpf_prog *prog,
3106059a1aSGeetha sowjanya struct nix_cqe_rx_s *cqe,
3270b2b689SSebastian Andrzej Siewior struct otx2_cq_queue *cq,
3370b2b689SSebastian Andrzej Siewior bool *need_xdp_flush);
34abe02543SSunil Goutham
otx2_nix_cq_op_status(struct otx2_nic * pfvf,struct otx2_cq_queue * cq)35af3826dbSGeetha sowjanya static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
36af3826dbSGeetha sowjanya struct otx2_cq_queue *cq)
37af3826dbSGeetha sowjanya {
38af3826dbSGeetha sowjanya u64 incr = (u64)(cq->cq_idx) << 32;
39af3826dbSGeetha sowjanya u64 status;
40af3826dbSGeetha sowjanya
41af3826dbSGeetha sowjanya status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
42af3826dbSGeetha sowjanya
43af3826dbSGeetha sowjanya if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
44af3826dbSGeetha sowjanya status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
45af3826dbSGeetha sowjanya dev_err(pfvf->dev, "CQ stopped due to error");
46af3826dbSGeetha sowjanya return -EINVAL;
47af3826dbSGeetha sowjanya }
48af3826dbSGeetha sowjanya
49af3826dbSGeetha sowjanya cq->cq_tail = status & 0xFFFFF;
50af3826dbSGeetha sowjanya cq->cq_head = (status >> 20) & 0xFFFFF;
51af3826dbSGeetha sowjanya if (cq->cq_tail < cq->cq_head)
52af3826dbSGeetha sowjanya cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
53af3826dbSGeetha sowjanya cq->cq_tail;
54af3826dbSGeetha sowjanya else
55af3826dbSGeetha sowjanya cq->pend_cqe = cq->cq_tail - cq->cq_head;
56af3826dbSGeetha sowjanya
57af3826dbSGeetha sowjanya return 0;
58af3826dbSGeetha sowjanya }
59af3826dbSGeetha sowjanya
otx2_get_next_cqe(struct otx2_cq_queue * cq)60abe02543SSunil Goutham static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
61abe02543SSunil Goutham {
62abe02543SSunil Goutham struct nix_cqe_hdr_s *cqe_hdr;
63abe02543SSunil Goutham
64abe02543SSunil Goutham cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
65abe02543SSunil Goutham if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
66abe02543SSunil Goutham return NULL;
67abe02543SSunil Goutham
68abe02543SSunil Goutham cq->cq_head++;
69abe02543SSunil Goutham cq->cq_head &= (cq->cqe_cnt - 1);
70abe02543SSunil Goutham
71abe02543SSunil Goutham return cqe_hdr;
72abe02543SSunil Goutham }
73abe02543SSunil Goutham
frag_num(unsigned int i)743ca6c4c8SSunil Goutham static unsigned int frag_num(unsigned int i)
753ca6c4c8SSunil Goutham {
763ca6c4c8SSunil Goutham #ifdef __BIG_ENDIAN
773ca6c4c8SSunil Goutham return (i & ~3) + 3 - (i & 3);
783ca6c4c8SSunil Goutham #else
793ca6c4c8SSunil Goutham return i;
803ca6c4c8SSunil Goutham #endif
813ca6c4c8SSunil Goutham }
823ca6c4c8SSunil Goutham
otx2_dma_map_skb_frag(struct otx2_nic * pfvf,struct sk_buff * skb,int seg,int * len)833ca6c4c8SSunil Goutham static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
843ca6c4c8SSunil Goutham struct sk_buff *skb, int seg, int *len)
853ca6c4c8SSunil Goutham {
863ca6c4c8SSunil Goutham const skb_frag_t *frag;
873ca6c4c8SSunil Goutham struct page *page;
883ca6c4c8SSunil Goutham int offset;
893ca6c4c8SSunil Goutham
903ca6c4c8SSunil Goutham /* First segment is always skb->data */
913ca6c4c8SSunil Goutham if (!seg) {
923ca6c4c8SSunil Goutham page = virt_to_page(skb->data);
933ca6c4c8SSunil Goutham offset = offset_in_page(skb->data);
943ca6c4c8SSunil Goutham *len = skb_headlen(skb);
953ca6c4c8SSunil Goutham } else {
963ca6c4c8SSunil Goutham frag = &skb_shinfo(skb)->frags[seg - 1];
973ca6c4c8SSunil Goutham page = skb_frag_page(frag);
983ca6c4c8SSunil Goutham offset = skb_frag_off(frag);
993ca6c4c8SSunil Goutham *len = skb_frag_size(frag);
1003ca6c4c8SSunil Goutham }
1013ca6c4c8SSunil Goutham return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
1023ca6c4c8SSunil Goutham }
1033ca6c4c8SSunil Goutham
otx2_dma_unmap_skb_frags(struct otx2_nic * pfvf,struct sg_list * sg)1043ca6c4c8SSunil Goutham static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
1053ca6c4c8SSunil Goutham {
1063ca6c4c8SSunil Goutham int seg;
1073ca6c4c8SSunil Goutham
1083ca6c4c8SSunil Goutham for (seg = 0; seg < sg->num_segs; seg++) {
1093ca6c4c8SSunil Goutham otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
1103ca6c4c8SSunil Goutham sg->size[seg], DMA_TO_DEVICE);
1113ca6c4c8SSunil Goutham }
1123ca6c4c8SSunil Goutham sg->num_segs = 0;
1133ca6c4c8SSunil Goutham }
1143ca6c4c8SSunil Goutham
otx2_xdp_snd_pkt_handler(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct nix_cqe_tx_s * cqe)11506059a1aSGeetha sowjanya static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
11606059a1aSGeetha sowjanya struct otx2_snd_queue *sq,
11706059a1aSGeetha sowjanya struct nix_cqe_tx_s *cqe)
11806059a1aSGeetha sowjanya {
11906059a1aSGeetha sowjanya struct nix_send_comp_s *snd_comp = &cqe->comp;
12006059a1aSGeetha sowjanya struct sg_list *sg;
12106059a1aSGeetha sowjanya struct page *page;
12206059a1aSGeetha sowjanya u64 pa;
12306059a1aSGeetha sowjanya
12406059a1aSGeetha sowjanya sg = &sq->sg[snd_comp->sqe_id];
12506059a1aSGeetha sowjanya
12606059a1aSGeetha sowjanya pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
12706059a1aSGeetha sowjanya otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
12806059a1aSGeetha sowjanya sg->size[0], DMA_TO_DEVICE);
12906059a1aSGeetha sowjanya page = virt_to_page(phys_to_virt(pa));
13006059a1aSGeetha sowjanya put_page(page);
13106059a1aSGeetha sowjanya }
13206059a1aSGeetha sowjanya
otx2_snd_pkt_handler(struct otx2_nic * pfvf,struct otx2_cq_queue * cq,struct otx2_snd_queue * sq,struct nix_cqe_tx_s * cqe,int budget,int * tx_pkts,int * tx_bytes)1333ca6c4c8SSunil Goutham static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
1343ca6c4c8SSunil Goutham struct otx2_cq_queue *cq,
1353ca6c4c8SSunil Goutham struct otx2_snd_queue *sq,
1363ca6c4c8SSunil Goutham struct nix_cqe_tx_s *cqe,
1373ca6c4c8SSunil Goutham int budget, int *tx_pkts, int *tx_bytes)
1383ca6c4c8SSunil Goutham {
1393ca6c4c8SSunil Goutham struct nix_send_comp_s *snd_comp = &cqe->comp;
140c9c12d33SAleksey Makarov struct skb_shared_hwtstamps ts;
1413ca6c4c8SSunil Goutham struct sk_buff *skb = NULL;
142c9c12d33SAleksey Makarov u64 timestamp, tsns;
1433ca6c4c8SSunil Goutham struct sg_list *sg;
144c9c12d33SAleksey Makarov int err;
1453ca6c4c8SSunil Goutham
1466e92d71bSSunil Goutham if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
1473ca6c4c8SSunil Goutham net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
1483ca6c4c8SSunil Goutham pfvf->netdev->name, cq->cint_idx,
1493ca6c4c8SSunil Goutham snd_comp->status);
1503ca6c4c8SSunil Goutham
1513ca6c4c8SSunil Goutham sg = &sq->sg[snd_comp->sqe_id];
1523ca6c4c8SSunil Goutham skb = (struct sk_buff *)sg->skb;
1533ca6c4c8SSunil Goutham if (unlikely(!skb))
1543ca6c4c8SSunil Goutham return;
1553ca6c4c8SSunil Goutham
156c9c12d33SAleksey Makarov if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
157c9c12d33SAleksey Makarov timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
158c9c12d33SAleksey Makarov if (timestamp != 1) {
15974c1b233SNaveen Mamindlapalli timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
160c9c12d33SAleksey Makarov err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
161c9c12d33SAleksey Makarov if (!err) {
162c9c12d33SAleksey Makarov memset(&ts, 0, sizeof(ts));
163c9c12d33SAleksey Makarov ts.hwtstamp = ns_to_ktime(tsns);
164c9c12d33SAleksey Makarov skb_tstamp_tx(skb, &ts);
165c9c12d33SAleksey Makarov }
166c9c12d33SAleksey Makarov }
167c9c12d33SAleksey Makarov }
168c9c12d33SAleksey Makarov
1693ca6c4c8SSunil Goutham *tx_bytes += skb->len;
1703ca6c4c8SSunil Goutham (*tx_pkts)++;
1713ca6c4c8SSunil Goutham otx2_dma_unmap_skb_frags(pfvf, sg);
1723ca6c4c8SSunil Goutham napi_consume_skb(skb, budget);
1733ca6c4c8SSunil Goutham sg->skb = (u64)NULL;
1743ca6c4c8SSunil Goutham }
1753ca6c4c8SSunil Goutham
otx2_set_rxtstamp(struct otx2_nic * pfvf,struct sk_buff * skb,void * data)176c9c12d33SAleksey Makarov static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
177c9c12d33SAleksey Makarov struct sk_buff *skb, void *data)
178c9c12d33SAleksey Makarov {
17974c1b233SNaveen Mamindlapalli u64 timestamp, tsns;
180c9c12d33SAleksey Makarov int err;
181c9c12d33SAleksey Makarov
182c9c12d33SAleksey Makarov if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
183c9c12d33SAleksey Makarov return;
184c9c12d33SAleksey Makarov
18574c1b233SNaveen Mamindlapalli timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
186c9c12d33SAleksey Makarov /* The first 8 bytes is the timestamp */
18774c1b233SNaveen Mamindlapalli err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
188c9c12d33SAleksey Makarov if (err)
189c9c12d33SAleksey Makarov return;
190c9c12d33SAleksey Makarov
191c9c12d33SAleksey Makarov skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
192c9c12d33SAleksey Makarov }
193c9c12d33SAleksey Makarov
otx2_skb_add_frag(struct otx2_nic * pfvf,struct sk_buff * skb,u64 iova,int len,struct nix_rx_parse_s * parse,int qidx)1940182d078SSubbaraya Sundeep static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
1950182d078SSubbaraya Sundeep u64 iova, int len, struct nix_rx_parse_s *parse,
1960182d078SSubbaraya Sundeep int qidx)
197abe02543SSunil Goutham {
198abe02543SSunil Goutham struct page *page;
199c9c12d33SAleksey Makarov int off = 0;
200abe02543SSunil Goutham void *va;
201abe02543SSunil Goutham
202abe02543SSunil Goutham va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
203c9c12d33SAleksey Makarov
204c9c12d33SAleksey Makarov if (likely(!skb_shinfo(skb)->nr_frags)) {
205c9c12d33SAleksey Makarov /* Check if data starts at some nonzero offset
206c9c12d33SAleksey Makarov * from the start of the buffer. For now the
207c9c12d33SAleksey Makarov * only possible offset is 8 bytes in the case
208c9c12d33SAleksey Makarov * where packet is prepended by a timestamp.
209c9c12d33SAleksey Makarov */
210c9c12d33SAleksey Makarov if (parse->laptr) {
211c9c12d33SAleksey Makarov otx2_set_rxtstamp(pfvf, skb, va);
212c9c12d33SAleksey Makarov off = OTX2_HW_TIMESTAMP_LEN;
213c9c12d33SAleksey Makarov }
214c9c12d33SAleksey Makarov }
215c9c12d33SAleksey Makarov
216abe02543SSunil Goutham page = virt_to_page(va);
2170182d078SSubbaraya Sundeep if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
218abe02543SSunil Goutham skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2190182d078SSubbaraya Sundeep va - page_address(page) + off,
2200182d078SSubbaraya Sundeep len - off, pfvf->rbsize);
2210182d078SSubbaraya Sundeep return true;
2220182d078SSubbaraya Sundeep }
2230182d078SSubbaraya Sundeep
2240182d078SSubbaraya Sundeep /* If more than MAX_SKB_FRAGS fragments are received then
2250182d078SSubbaraya Sundeep * give back those buffer pointers to hardware for reuse.
2260182d078SSubbaraya Sundeep */
2270182d078SSubbaraya Sundeep pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
2280182d078SSubbaraya Sundeep
2290182d078SSubbaraya Sundeep return false;
230abe02543SSunil Goutham }
231abe02543SSunil Goutham
otx2_set_rxhash(struct otx2_nic * pfvf,struct nix_cqe_rx_s * cqe,struct sk_buff * skb)23285069e95SSunil Goutham static void otx2_set_rxhash(struct otx2_nic *pfvf,
23385069e95SSunil Goutham struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
23485069e95SSunil Goutham {
23585069e95SSunil Goutham enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
23685069e95SSunil Goutham struct otx2_rss_info *rss;
23785069e95SSunil Goutham u32 hash = 0;
23885069e95SSunil Goutham
23985069e95SSunil Goutham if (!(pfvf->netdev->features & NETIF_F_RXHASH))
24085069e95SSunil Goutham return;
24185069e95SSunil Goutham
24285069e95SSunil Goutham rss = &pfvf->hw.rss_info;
24385069e95SSunil Goutham if (rss->flowkey_cfg) {
24485069e95SSunil Goutham if (rss->flowkey_cfg &
24585069e95SSunil Goutham ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
24685069e95SSunil Goutham hash_type = PKT_HASH_TYPE_L4;
24785069e95SSunil Goutham else
24885069e95SSunil Goutham hash_type = PKT_HASH_TYPE_L3;
24985069e95SSunil Goutham hash = cqe->hdr.flow_tag;
25085069e95SSunil Goutham }
25185069e95SSunil Goutham skb_set_hash(skb, hash, hash_type);
25285069e95SSunil Goutham }
25385069e95SSunil Goutham
otx2_free_rcv_seg(struct otx2_nic * pfvf,struct nix_cqe_rx_s * cqe,int qidx)254b1bc8457SGeetha sowjanya static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
255b1bc8457SGeetha sowjanya int qidx)
256b1bc8457SGeetha sowjanya {
257b1bc8457SGeetha sowjanya struct nix_rx_sg_s *sg = &cqe->sg;
258b1bc8457SGeetha sowjanya void *end, *start;
259b1bc8457SGeetha sowjanya u64 *seg_addr;
260b1bc8457SGeetha sowjanya int seg;
261b1bc8457SGeetha sowjanya
262b1bc8457SGeetha sowjanya start = (void *)sg;
263b1bc8457SGeetha sowjanya end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
264b1bc8457SGeetha sowjanya while (start < end) {
265b1bc8457SGeetha sowjanya sg = (struct nix_rx_sg_s *)start;
266b1bc8457SGeetha sowjanya seg_addr = &sg->seg_addr;
267b1bc8457SGeetha sowjanya for (seg = 0; seg < sg->segs; seg++, seg_addr++)
2684c236d5dSGeetha sowjanya pfvf->hw_ops->aura_freeptr(pfvf, qidx,
2694c236d5dSGeetha sowjanya *seg_addr & ~0x07ULL);
270b1bc8457SGeetha sowjanya start += sizeof(*sg);
271b1bc8457SGeetha sowjanya }
272b1bc8457SGeetha sowjanya }
273b1bc8457SGeetha sowjanya
otx2_check_rcv_errors(struct otx2_nic * pfvf,struct nix_cqe_rx_s * cqe,int qidx)274abe02543SSunil Goutham static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
275abe02543SSunil Goutham struct nix_cqe_rx_s *cqe, int qidx)
276abe02543SSunil Goutham {
277abe02543SSunil Goutham struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
278abe02543SSunil Goutham struct nix_rx_parse_s *parse = &cqe->parse;
279abe02543SSunil Goutham
2806e92d71bSSunil Goutham if (netif_msg_rx_err(pfvf))
2816e92d71bSSunil Goutham netdev_err(pfvf->netdev,
2826e92d71bSSunil Goutham "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
2836e92d71bSSunil Goutham qidx, parse->errlev, parse->errcode);
2846e92d71bSSunil Goutham
285abe02543SSunil Goutham if (parse->errlev == NPC_ERRLVL_RE) {
286abe02543SSunil Goutham switch (parse->errcode) {
287abe02543SSunil Goutham case ERRCODE_FCS:
288abe02543SSunil Goutham case ERRCODE_FCS_RCV:
289abe02543SSunil Goutham atomic_inc(&stats->rx_fcs_errs);
290abe02543SSunil Goutham break;
291abe02543SSunil Goutham case ERRCODE_UNDERSIZE:
292abe02543SSunil Goutham atomic_inc(&stats->rx_undersize_errs);
293abe02543SSunil Goutham break;
294abe02543SSunil Goutham case ERRCODE_OVERSIZE:
295abe02543SSunil Goutham atomic_inc(&stats->rx_oversize_errs);
296abe02543SSunil Goutham break;
297abe02543SSunil Goutham case ERRCODE_OL2_LEN_MISMATCH:
298abe02543SSunil Goutham atomic_inc(&stats->rx_len_errs);
299abe02543SSunil Goutham break;
300abe02543SSunil Goutham default:
301abe02543SSunil Goutham atomic_inc(&stats->rx_other_errs);
302abe02543SSunil Goutham break;
303abe02543SSunil Goutham }
304abe02543SSunil Goutham } else if (parse->errlev == NPC_ERRLVL_NIX) {
305abe02543SSunil Goutham switch (parse->errcode) {
306abe02543SSunil Goutham case ERRCODE_OL3_LEN:
307abe02543SSunil Goutham case ERRCODE_OL4_LEN:
308abe02543SSunil Goutham case ERRCODE_IL3_LEN:
309abe02543SSunil Goutham case ERRCODE_IL4_LEN:
310abe02543SSunil Goutham atomic_inc(&stats->rx_len_errs);
311abe02543SSunil Goutham break;
312abe02543SSunil Goutham case ERRCODE_OL4_CSUM:
313abe02543SSunil Goutham case ERRCODE_IL4_CSUM:
314abe02543SSunil Goutham atomic_inc(&stats->rx_csum_errs);
315abe02543SSunil Goutham break;
316abe02543SSunil Goutham default:
317abe02543SSunil Goutham atomic_inc(&stats->rx_other_errs);
318abe02543SSunil Goutham break;
319abe02543SSunil Goutham }
320abe02543SSunil Goutham } else {
321abe02543SSunil Goutham atomic_inc(&stats->rx_other_errs);
322abe02543SSunil Goutham /* For now ignore all the NPC parser errors and
323abe02543SSunil Goutham * pass the packets to stack.
324abe02543SSunil Goutham */
325abe02543SSunil Goutham return false;
326abe02543SSunil Goutham }
327abe02543SSunil Goutham
328abe02543SSunil Goutham /* If RXALL is enabled pass on packets to stack. */
329ab58a416SHariprasad Kelam if (pfvf->netdev->features & NETIF_F_RXALL)
330abe02543SSunil Goutham return false;
331abe02543SSunil Goutham
332abe02543SSunil Goutham /* Free buffer back to pool */
333abe02543SSunil Goutham if (cqe->sg.segs)
334b1bc8457SGeetha sowjanya otx2_free_rcv_seg(pfvf, cqe, qidx);
335abe02543SSunil Goutham return true;
336abe02543SSunil Goutham }
337abe02543SSunil Goutham
otx2_rcv_pkt_handler(struct otx2_nic * pfvf,struct napi_struct * napi,struct otx2_cq_queue * cq,struct nix_cqe_rx_s * cqe,bool * need_xdp_flush)338abe02543SSunil Goutham static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
339abe02543SSunil Goutham struct napi_struct *napi,
340abe02543SSunil Goutham struct otx2_cq_queue *cq,
34170b2b689SSebastian Andrzej Siewior struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
342abe02543SSunil Goutham {
343abe02543SSunil Goutham struct nix_rx_parse_s *parse = &cqe->parse;
344ab58a416SHariprasad Kelam struct nix_rx_sg_s *sg = &cqe->sg;
345abe02543SSunil Goutham struct sk_buff *skb = NULL;
346ab58a416SHariprasad Kelam void *end, *start;
347ab58a416SHariprasad Kelam u64 *seg_addr;
348ab58a416SHariprasad Kelam u16 *seg_size;
349ab58a416SHariprasad Kelam int seg;
350abe02543SSunil Goutham
351ab58a416SHariprasad Kelam if (unlikely(parse->errlev || parse->errcode)) {
352abe02543SSunil Goutham if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
353abe02543SSunil Goutham return;
354abe02543SSunil Goutham }
355abe02543SSunil Goutham
35606059a1aSGeetha sowjanya if (pfvf->xdp_prog)
35770b2b689SSebastian Andrzej Siewior if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
35806059a1aSGeetha sowjanya return;
35906059a1aSGeetha sowjanya
360abe02543SSunil Goutham skb = napi_get_frags(napi);
361abe02543SSunil Goutham if (unlikely(!skb))
362abe02543SSunil Goutham return;
363abe02543SSunil Goutham
364ab58a416SHariprasad Kelam start = (void *)sg;
365ab58a416SHariprasad Kelam end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
366ab58a416SHariprasad Kelam while (start < end) {
367ab58a416SHariprasad Kelam sg = (struct nix_rx_sg_s *)start;
368ab58a416SHariprasad Kelam seg_addr = &sg->seg_addr;
369ab58a416SHariprasad Kelam seg_size = (void *)sg;
370ab58a416SHariprasad Kelam for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
3710182d078SSubbaraya Sundeep if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
3720182d078SSubbaraya Sundeep seg_size[seg], parse, cq->cq_idx))
373abe02543SSunil Goutham cq->pool_ptrs++;
374ab58a416SHariprasad Kelam }
375ab58a416SHariprasad Kelam start += sizeof(*sg);
376ab58a416SHariprasad Kelam }
37785069e95SSunil Goutham otx2_set_rxhash(pfvf, cqe, skb);
37885069e95SSunil Goutham
379abe02543SSunil Goutham skb_record_rx_queue(skb, cq->cq_idx);
380abe02543SSunil Goutham if (pfvf->netdev->features & NETIF_F_RXCSUM)
381abe02543SSunil Goutham skb->ip_summed = CHECKSUM_UNNECESSARY;
382abe02543SSunil Goutham
383b2e3406aSRatheesh Kannoth skb_mark_for_recycle(skb);
384b2e3406aSRatheesh Kannoth
385abe02543SSunil Goutham napi_gro_frags(napi);
386abe02543SSunil Goutham }
387abe02543SSunil Goutham
otx2_rx_napi_handler(struct otx2_nic * pfvf,struct napi_struct * napi,struct otx2_cq_queue * cq,int budget)38804a21ef3SSunil Goutham static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
38904a21ef3SSunil Goutham struct napi_struct *napi,
39004a21ef3SSunil Goutham struct otx2_cq_queue *cq, int budget)
39104a21ef3SSunil Goutham {
39270b2b689SSebastian Andrzej Siewior bool need_xdp_flush = false;
393abe02543SSunil Goutham struct nix_cqe_rx_s *cqe;
394abe02543SSunil Goutham int processed_cqe = 0;
395abe02543SSunil Goutham
396af3826dbSGeetha sowjanya if (cq->pend_cqe >= budget)
397af3826dbSGeetha sowjanya goto process_cqe;
398af3826dbSGeetha sowjanya
399af3826dbSGeetha sowjanya if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
400af3826dbSGeetha sowjanya return 0;
401af3826dbSGeetha sowjanya
402af3826dbSGeetha sowjanya process_cqe:
403af3826dbSGeetha sowjanya while (likely(processed_cqe < budget) && cq->pend_cqe) {
404abe02543SSunil Goutham cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
405abe02543SSunil Goutham if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
406abe02543SSunil Goutham !cqe->sg.seg_addr) {
407abe02543SSunil Goutham if (!processed_cqe)
40804a21ef3SSunil Goutham return 0;
409abe02543SSunil Goutham break;
410abe02543SSunil Goutham }
411abe02543SSunil Goutham cq->cq_head++;
412abe02543SSunil Goutham cq->cq_head &= (cq->cqe_cnt - 1);
413abe02543SSunil Goutham
41470b2b689SSebastian Andrzej Siewior otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
415abe02543SSunil Goutham
416abe02543SSunil Goutham cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
417abe02543SSunil Goutham cqe->sg.seg_addr = 0x00;
418abe02543SSunil Goutham processed_cqe++;
419af3826dbSGeetha sowjanya cq->pend_cqe--;
420abe02543SSunil Goutham }
42170b2b689SSebastian Andrzej Siewior if (need_xdp_flush)
42270b2b689SSebastian Andrzej Siewior xdp_do_flush();
423abe02543SSunil Goutham
424abe02543SSunil Goutham /* Free CQEs to HW */
425abe02543SSunil Goutham otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
426abe02543SSunil Goutham ((u64)cq->cq_idx << 32) | processed_cqe);
427abe02543SSunil Goutham
4284c236d5dSGeetha sowjanya return processed_cqe;
4294ff7d148SGeetha sowjanya }
4304c236d5dSGeetha sowjanya
otx2_refill_pool_ptrs(void * dev,struct otx2_cq_queue * cq)43188e69af0SRatheesh Kannoth int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
4324c236d5dSGeetha sowjanya {
4334c236d5dSGeetha sowjanya struct otx2_nic *pfvf = dev;
43488e69af0SRatheesh Kannoth int cnt = cq->pool_ptrs;
4354c236d5dSGeetha sowjanya dma_addr_t bufptr;
4364c236d5dSGeetha sowjanya
4374c236d5dSGeetha sowjanya while (cq->pool_ptrs) {
4384c236d5dSGeetha sowjanya if (otx2_alloc_buffer(pfvf, cq, &bufptr))
439abe02543SSunil Goutham break;
440abe02543SSunil Goutham otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
441abe02543SSunil Goutham cq->pool_ptrs--;
442abe02543SSunil Goutham }
44388e69af0SRatheesh Kannoth
44488e69af0SRatheesh Kannoth return cnt - cq->pool_ptrs;
44504a21ef3SSunil Goutham }
44604a21ef3SSunil Goutham
otx2_tx_napi_handler(struct otx2_nic * pfvf,struct otx2_cq_queue * cq,int budget)44704a21ef3SSunil Goutham static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
44804a21ef3SSunil Goutham struct otx2_cq_queue *cq, int budget)
44904a21ef3SSunil Goutham {
45006059a1aSGeetha sowjanya int tx_pkts = 0, tx_bytes = 0, qidx;
451f0dfc4c8SRatheesh Kannoth struct otx2_snd_queue *sq;
4523ca6c4c8SSunil Goutham struct nix_cqe_tx_s *cqe;
4533ca6c4c8SSunil Goutham int processed_cqe = 0;
4543ca6c4c8SSunil Goutham
455af3826dbSGeetha sowjanya if (cq->pend_cqe >= budget)
456af3826dbSGeetha sowjanya goto process_cqe;
457af3826dbSGeetha sowjanya
458af3826dbSGeetha sowjanya if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
459af3826dbSGeetha sowjanya return 0;
460af3826dbSGeetha sowjanya
461af3826dbSGeetha sowjanya process_cqe:
462f0dfc4c8SRatheesh Kannoth qidx = cq->cq_idx - pfvf->hw.rx_queues;
463f0dfc4c8SRatheesh Kannoth sq = &pfvf->qset.sq[qidx];
464f0dfc4c8SRatheesh Kannoth
465af3826dbSGeetha sowjanya while (likely(processed_cqe < budget) && cq->pend_cqe) {
4663ca6c4c8SSunil Goutham cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
4673ca6c4c8SSunil Goutham if (unlikely(!cqe)) {
4683ca6c4c8SSunil Goutham if (!processed_cqe)
4693ca6c4c8SSunil Goutham return 0;
4703ca6c4c8SSunil Goutham break;
4713ca6c4c8SSunil Goutham }
472f0dfc4c8SRatheesh Kannoth
473ab6dddd2SSubbaraya Sundeep qidx = cq->cq_idx - pfvf->hw.rx_queues;
474ab6dddd2SSubbaraya Sundeep
475ab6dddd2SSubbaraya Sundeep if (cq->cq_type == CQ_XDP)
476f0dfc4c8SRatheesh Kannoth otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
477ab6dddd2SSubbaraya Sundeep else
478ab6dddd2SSubbaraya Sundeep otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
479ab6dddd2SSubbaraya Sundeep cqe, budget, &tx_pkts, &tx_bytes);
480f0dfc4c8SRatheesh Kannoth
4813ca6c4c8SSunil Goutham cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
4823ca6c4c8SSunil Goutham processed_cqe++;
483af3826dbSGeetha sowjanya cq->pend_cqe--;
484f0dfc4c8SRatheesh Kannoth
485f0dfc4c8SRatheesh Kannoth sq->cons_head++;
486f0dfc4c8SRatheesh Kannoth sq->cons_head &= (sq->sqe_cnt - 1);
4873ca6c4c8SSunil Goutham }
4883ca6c4c8SSunil Goutham
4893ca6c4c8SSunil Goutham /* Free CQEs to HW */
4903ca6c4c8SSunil Goutham otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
4913ca6c4c8SSunil Goutham ((u64)cq->cq_idx << 32) | processed_cqe);
4923ca6c4c8SSunil Goutham
4933ca6c4c8SSunil Goutham if (likely(tx_pkts)) {
4943ca6c4c8SSunil Goutham struct netdev_queue *txq;
4953ca6c4c8SSunil Goutham
496ab6dddd2SSubbaraya Sundeep qidx = cq->cq_idx - pfvf->hw.rx_queues;
497ab6dddd2SSubbaraya Sundeep
498ab6dddd2SSubbaraya Sundeep if (qidx >= pfvf->hw.tx_queues)
499ab6dddd2SSubbaraya Sundeep qidx -= pfvf->hw.xdp_queues;
500ab6dddd2SSubbaraya Sundeep txq = netdev_get_tx_queue(pfvf->netdev, qidx);
5013ca6c4c8SSunil Goutham netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
5023ca6c4c8SSunil Goutham /* Check if queue was stopped earlier due to ring full */
5033ca6c4c8SSunil Goutham smp_mb();
5043ca6c4c8SSunil Goutham if (netif_tx_queue_stopped(txq) &&
5053ca6c4c8SSunil Goutham netif_carrier_ok(pfvf->netdev))
5063ca6c4c8SSunil Goutham netif_tx_wake_queue(txq);
5073ca6c4c8SSunil Goutham }
50804a21ef3SSunil Goutham return 0;
50904a21ef3SSunil Goutham }
51004a21ef3SSunil Goutham
otx2_adjust_adaptive_coalese(struct otx2_nic * pfvf,struct otx2_cq_poll * cq_poll)5116e144b47SSuman Ghosh static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
5126e144b47SSuman Ghosh {
513*38608d07SRatheesh Kannoth struct dim_sample dim_sample = { 0 };
5146e144b47SSuman Ghosh u64 rx_frames, rx_bytes;
5159cc9fbe5SNaveen Mamindlapalli u64 tx_frames, tx_bytes;
5166e144b47SSuman Ghosh
5176e144b47SSuman Ghosh rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
5186e144b47SSuman Ghosh OTX2_GET_RX_STATS(RX_UCAST);
5196e144b47SSuman Ghosh rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
5209cc9fbe5SNaveen Mamindlapalli tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
5219cc9fbe5SNaveen Mamindlapalli tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
5229cc9fbe5SNaveen Mamindlapalli
5239cc9fbe5SNaveen Mamindlapalli dim_update_sample(pfvf->napi_events,
5249cc9fbe5SNaveen Mamindlapalli rx_frames + tx_frames,
5259cc9fbe5SNaveen Mamindlapalli rx_bytes + tx_bytes,
5269cc9fbe5SNaveen Mamindlapalli &dim_sample);
5276e144b47SSuman Ghosh net_dim(&cq_poll->dim, dim_sample);
5286e144b47SSuman Ghosh }
5296e144b47SSuman Ghosh
otx2_napi_handler(struct napi_struct * napi,int budget)53004a21ef3SSunil Goutham int otx2_napi_handler(struct napi_struct *napi, int budget)
53104a21ef3SSunil Goutham {
53206059a1aSGeetha sowjanya struct otx2_cq_queue *rx_cq = NULL;
53304a21ef3SSunil Goutham struct otx2_cq_poll *cq_poll;
53404a21ef3SSunil Goutham int workdone = 0, cq_idx, i;
53504a21ef3SSunil Goutham struct otx2_cq_queue *cq;
53604a21ef3SSunil Goutham struct otx2_qset *qset;
53704a21ef3SSunil Goutham struct otx2_nic *pfvf;
53888e69af0SRatheesh Kannoth int filled_cnt = -1;
53904a21ef3SSunil Goutham
54004a21ef3SSunil Goutham cq_poll = container_of(napi, struct otx2_cq_poll, napi);
54104a21ef3SSunil Goutham pfvf = (struct otx2_nic *)cq_poll->dev;
54204a21ef3SSunil Goutham qset = &pfvf->qset;
54304a21ef3SSunil Goutham
54406059a1aSGeetha sowjanya for (i = 0; i < CQS_PER_CINT; i++) {
54504a21ef3SSunil Goutham cq_idx = cq_poll->cq_ids[i];
54604a21ef3SSunil Goutham if (unlikely(cq_idx == CINT_INVALID_CQ))
54704a21ef3SSunil Goutham continue;
54804a21ef3SSunil Goutham cq = &qset->cq[cq_idx];
54904a21ef3SSunil Goutham if (cq->cq_type == CQ_RX) {
55006059a1aSGeetha sowjanya rx_cq = cq;
55104a21ef3SSunil Goutham workdone += otx2_rx_napi_handler(pfvf, napi,
55204a21ef3SSunil Goutham cq, budget);
55304a21ef3SSunil Goutham } else {
55404a21ef3SSunil Goutham workdone += otx2_tx_napi_handler(pfvf, cq, budget);
55504a21ef3SSunil Goutham }
55604a21ef3SSunil Goutham }
55704a21ef3SSunil Goutham
55806059a1aSGeetha sowjanya if (rx_cq && rx_cq->pool_ptrs)
55988e69af0SRatheesh Kannoth filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
56004a21ef3SSunil Goutham /* Clear the IRQ */
56104a21ef3SSunil Goutham otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
56204a21ef3SSunil Goutham
56304a21ef3SSunil Goutham if (workdone < budget && napi_complete_done(napi, workdone)) {
56450fe6c02SLinu Cherian /* If interface is going down, don't re-enable IRQ */
56550fe6c02SLinu Cherian if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
56650fe6c02SLinu Cherian return workdone;
56750fe6c02SLinu Cherian
5686e144b47SSuman Ghosh /* Adjust irq coalese using net_dim */
5699cc9fbe5SNaveen Mamindlapalli if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
5706e144b47SSuman Ghosh otx2_adjust_adaptive_coalese(pfvf, cq_poll);
5716e144b47SSuman Ghosh
57288e69af0SRatheesh Kannoth if (unlikely(!filled_cnt)) {
57388e69af0SRatheesh Kannoth struct refill_work *work;
57488e69af0SRatheesh Kannoth struct delayed_work *dwork;
57588e69af0SRatheesh Kannoth
57688e69af0SRatheesh Kannoth work = &pfvf->refill_wrk[cq->cq_idx];
57788e69af0SRatheesh Kannoth dwork = &work->pool_refill_work;
57888e69af0SRatheesh Kannoth /* Schedule a task if no other task is running */
57988e69af0SRatheesh Kannoth if (!cq->refill_task_sched) {
58088e69af0SRatheesh Kannoth work->napi = napi;
58188e69af0SRatheesh Kannoth cq->refill_task_sched = true;
58288e69af0SRatheesh Kannoth schedule_delayed_work(dwork,
58388e69af0SRatheesh Kannoth msecs_to_jiffies(100));
58488e69af0SRatheesh Kannoth }
58588e69af0SRatheesh Kannoth } else {
58604a21ef3SSunil Goutham /* Re-enable interrupts */
58788e69af0SRatheesh Kannoth otx2_write64(pfvf,
58888e69af0SRatheesh Kannoth NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
58904a21ef3SSunil Goutham BIT_ULL(0));
59004a21ef3SSunil Goutham }
59188e69af0SRatheesh Kannoth }
59204a21ef3SSunil Goutham return workdone;
59304a21ef3SSunil Goutham }
594abe02543SSunil Goutham
otx2_sqe_flush(void * dev,struct otx2_snd_queue * sq,int size,int qidx)5954c236d5dSGeetha sowjanya void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
5964c236d5dSGeetha sowjanya int size, int qidx)
5973ca6c4c8SSunil Goutham {
5983ca6c4c8SSunil Goutham u64 status;
5993ca6c4c8SSunil Goutham
6003ca6c4c8SSunil Goutham /* Packet data stores should finish before SQE is flushed to HW */
6013ca6c4c8SSunil Goutham dma_wmb();
6023ca6c4c8SSunil Goutham
6033ca6c4c8SSunil Goutham do {
6043ca6c4c8SSunil Goutham memcpy(sq->lmt_addr, sq->sqe_base, size);
6053ca6c4c8SSunil Goutham status = otx2_lmt_flush(sq->io_addr);
6063ca6c4c8SSunil Goutham } while (status == 0);
6073ca6c4c8SSunil Goutham
6083ca6c4c8SSunil Goutham sq->head++;
6093ca6c4c8SSunil Goutham sq->head &= (sq->sqe_cnt - 1);
6103ca6c4c8SSunil Goutham }
6113ca6c4c8SSunil Goutham
6123ca6c4c8SSunil Goutham #define MAX_SEGS_PER_SG 3
6133ca6c4c8SSunil Goutham /* Add SQE scatter/gather subdescriptor structure */
otx2_sqe_add_sg(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct sk_buff * skb,int num_segs,int * offset)6143ca6c4c8SSunil Goutham static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
6153ca6c4c8SSunil Goutham struct sk_buff *skb, int num_segs, int *offset)
6163ca6c4c8SSunil Goutham {
6173ca6c4c8SSunil Goutham struct nix_sqe_sg_s *sg = NULL;
6183ca6c4c8SSunil Goutham u64 dma_addr, *iova = NULL;
6193ca6c4c8SSunil Goutham u16 *sg_lens = NULL;
6203ca6c4c8SSunil Goutham int seg, len;
6213ca6c4c8SSunil Goutham
6223ca6c4c8SSunil Goutham sq->sg[sq->head].num_segs = 0;
6233ca6c4c8SSunil Goutham
6243ca6c4c8SSunil Goutham for (seg = 0; seg < num_segs; seg++) {
6253ca6c4c8SSunil Goutham if ((seg % MAX_SEGS_PER_SG) == 0) {
6263ca6c4c8SSunil Goutham sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
6273ca6c4c8SSunil Goutham sg->ld_type = NIX_SEND_LDTYPE_LDD;
6283ca6c4c8SSunil Goutham sg->subdc = NIX_SUBDC_SG;
6293ca6c4c8SSunil Goutham sg->segs = 0;
6303ca6c4c8SSunil Goutham sg_lens = (void *)sg;
6313ca6c4c8SSunil Goutham iova = (void *)sg + sizeof(*sg);
6323ca6c4c8SSunil Goutham /* Next subdc always starts at a 16byte boundary.
6333ca6c4c8SSunil Goutham * So if sg->segs is whether 2 or 3, offset += 16bytes.
6343ca6c4c8SSunil Goutham */
6353ca6c4c8SSunil Goutham if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
6363ca6c4c8SSunil Goutham *offset += sizeof(*sg) + (3 * sizeof(u64));
6373ca6c4c8SSunil Goutham else
6383ca6c4c8SSunil Goutham *offset += sizeof(*sg) + sizeof(u64);
6393ca6c4c8SSunil Goutham }
6403ca6c4c8SSunil Goutham dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
6413ca6c4c8SSunil Goutham if (dma_mapping_error(pfvf->dev, dma_addr))
6423ca6c4c8SSunil Goutham return false;
6433ca6c4c8SSunil Goutham
6443ca6c4c8SSunil Goutham sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
6453ca6c4c8SSunil Goutham sg->segs++;
6463ca6c4c8SSunil Goutham *iova++ = dma_addr;
6473ca6c4c8SSunil Goutham
6483ca6c4c8SSunil Goutham /* Save DMA mapping info for later unmapping */
6493ca6c4c8SSunil Goutham sq->sg[sq->head].dma_addr[seg] = dma_addr;
6503ca6c4c8SSunil Goutham sq->sg[sq->head].size[seg] = len;
6513ca6c4c8SSunil Goutham sq->sg[sq->head].num_segs++;
6523ca6c4c8SSunil Goutham }
6533ca6c4c8SSunil Goutham
6543ca6c4c8SSunil Goutham sq->sg[sq->head].skb = (u64)skb;
6553ca6c4c8SSunil Goutham return true;
6563ca6c4c8SSunil Goutham }
6573ca6c4c8SSunil Goutham
65886d74760SSunil Goutham /* Add SQE extended header subdescriptor */
otx2_sqe_add_ext(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct sk_buff * skb,int * offset)65986d74760SSunil Goutham static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
66086d74760SSunil Goutham struct sk_buff *skb, int *offset)
66186d74760SSunil Goutham {
66286d74760SSunil Goutham struct nix_sqe_ext_s *ext;
66386d74760SSunil Goutham
66486d74760SSunil Goutham ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
66586d74760SSunil Goutham ext->subdc = NIX_SUBDC_EXT;
66686d74760SSunil Goutham if (skb_shinfo(skb)->gso_size) {
66786d74760SSunil Goutham ext->lso = 1;
668504148feSEric Dumazet ext->lso_sb = skb_tcp_all_headers(skb);
66986d74760SSunil Goutham ext->lso_mps = skb_shinfo(skb)->gso_size;
67086d74760SSunil Goutham
67186d74760SSunil Goutham /* Only TSOv4 and TSOv6 GSO offloads are supported */
67286d74760SSunil Goutham if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
67386d74760SSunil Goutham ext->lso_format = pfvf->hw.lso_tsov4_idx;
67486d74760SSunil Goutham
67586d74760SSunil Goutham /* HW adds payload size to 'ip_hdr->tot_len' while
67686d74760SSunil Goutham * sending TSO segment, hence set payload length
67786d74760SSunil Goutham * in IP header of the packet to just header length.
67886d74760SSunil Goutham */
67986d74760SSunil Goutham ip_hdr(skb)->tot_len =
68086d74760SSunil Goutham htons(ext->lso_sb - skb_network_offset(skb));
681dc1a9bf2SSunil Goutham } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
68286d74760SSunil Goutham ext->lso_format = pfvf->hw.lso_tsov6_idx;
683de678ca3SSunil Goutham ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
684dc1a9bf2SSunil Goutham } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
685dc1a9bf2SSunil Goutham __be16 l3_proto = vlan_get_protocol(skb);
686dc1a9bf2SSunil Goutham struct udphdr *udph = udp_hdr(skb);
687dc1a9bf2SSunil Goutham u16 iplen;
688dc1a9bf2SSunil Goutham
689dc1a9bf2SSunil Goutham ext->lso_sb = skb_transport_offset(skb) +
690dc1a9bf2SSunil Goutham sizeof(struct udphdr);
691dc1a9bf2SSunil Goutham
692dc1a9bf2SSunil Goutham /* HW adds payload size to length fields in IP and
693dc1a9bf2SSunil Goutham * UDP headers while segmentation, hence adjust the
694dc1a9bf2SSunil Goutham * lengths to just header sizes.
695dc1a9bf2SSunil Goutham */
696dc1a9bf2SSunil Goutham iplen = htons(ext->lso_sb - skb_network_offset(skb));
697dc1a9bf2SSunil Goutham if (l3_proto == htons(ETH_P_IP)) {
698dc1a9bf2SSunil Goutham ip_hdr(skb)->tot_len = iplen;
699dc1a9bf2SSunil Goutham ext->lso_format = pfvf->hw.lso_udpv4_idx;
700dc1a9bf2SSunil Goutham } else {
701dc1a9bf2SSunil Goutham ipv6_hdr(skb)->payload_len = iplen;
702dc1a9bf2SSunil Goutham ext->lso_format = pfvf->hw.lso_udpv6_idx;
703dc1a9bf2SSunil Goutham }
704dc1a9bf2SSunil Goutham
705dc1a9bf2SSunil Goutham udph->len = htons(sizeof(struct udphdr));
70686d74760SSunil Goutham }
707c9c12d33SAleksey Makarov } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
708c9c12d33SAleksey Makarov ext->tstmp = 1;
70986d74760SSunil Goutham }
710c9c12d33SAleksey Makarov
711fd9d7859SHariprasad Kelam #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
712fd9d7859SHariprasad Kelam if (skb_vlan_tag_present(skb)) {
713fd9d7859SHariprasad Kelam if (skb->vlan_proto == htons(ETH_P_8021Q)) {
714fd9d7859SHariprasad Kelam ext->vlan1_ins_ena = 1;
715fd9d7859SHariprasad Kelam ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
716fd9d7859SHariprasad Kelam ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
717fd9d7859SHariprasad Kelam } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
718fd9d7859SHariprasad Kelam ext->vlan0_ins_ena = 1;
719fd9d7859SHariprasad Kelam ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
720fd9d7859SHariprasad Kelam ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
721fd9d7859SHariprasad Kelam }
722fd9d7859SHariprasad Kelam }
723fd9d7859SHariprasad Kelam
72486d74760SSunil Goutham *offset += sizeof(*ext);
72586d74760SSunil Goutham }
72686d74760SSunil Goutham
otx2_sqe_add_mem(struct otx2_snd_queue * sq,int * offset,int alg,u64 iova,int ptp_offset,u64 base_ns,bool udp_csum_crt)727c9c12d33SAleksey Makarov static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
7282958d17aSHariprasad Kelam int alg, u64 iova, int ptp_offset,
729edea0c5aSGeetha sowjanya u64 base_ns, bool udp_csum_crt)
730c9c12d33SAleksey Makarov {
731c9c12d33SAleksey Makarov struct nix_sqe_mem_s *mem;
732c9c12d33SAleksey Makarov
733c9c12d33SAleksey Makarov mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
734c9c12d33SAleksey Makarov mem->subdc = NIX_SUBDC_MEM;
735c9c12d33SAleksey Makarov mem->alg = alg;
736c9c12d33SAleksey Makarov mem->wmem = 1; /* wait for the memory operation */
737c9c12d33SAleksey Makarov mem->addr = iova;
738c9c12d33SAleksey Makarov
7392958d17aSHariprasad Kelam if (ptp_offset) {
7402958d17aSHariprasad Kelam mem->start_offset = ptp_offset;
741edea0c5aSGeetha sowjanya mem->udp_csum_crt = !!udp_csum_crt;
7422958d17aSHariprasad Kelam mem->base_ns = base_ns;
7432958d17aSHariprasad Kelam mem->step_type = 1;
7442958d17aSHariprasad Kelam }
7452958d17aSHariprasad Kelam
746c9c12d33SAleksey Makarov *offset += sizeof(*mem);
747c9c12d33SAleksey Makarov }
748c9c12d33SAleksey Makarov
7493ca6c4c8SSunil Goutham /* Add SQE header subdescriptor structure */
otx2_sqe_add_hdr(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct nix_sqe_hdr_s * sqe_hdr,struct sk_buff * skb,u16 qidx)7503ca6c4c8SSunil Goutham static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
7513ca6c4c8SSunil Goutham struct nix_sqe_hdr_s *sqe_hdr,
7523ca6c4c8SSunil Goutham struct sk_buff *skb, u16 qidx)
7533ca6c4c8SSunil Goutham {
7543ca6c4c8SSunil Goutham int proto = 0;
7553ca6c4c8SSunil Goutham
7563ca6c4c8SSunil Goutham /* Check if SQE was framed before, if yes then no need to
7573ca6c4c8SSunil Goutham * set these constants again and again.
7583ca6c4c8SSunil Goutham */
7593ca6c4c8SSunil Goutham if (!sqe_hdr->total) {
7603ca6c4c8SSunil Goutham /* Don't free Tx buffers to Aura */
7613ca6c4c8SSunil Goutham sqe_hdr->df = 1;
7623ca6c4c8SSunil Goutham sqe_hdr->aura = sq->aura_id;
7633ca6c4c8SSunil Goutham /* Post a CQE Tx after pkt transmission */
7643ca6c4c8SSunil Goutham sqe_hdr->pnc = 1;
765ab6dddd2SSubbaraya Sundeep sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
766ab6dddd2SSubbaraya Sundeep qidx + pfvf->hw.xdp_queues : qidx;
7673ca6c4c8SSunil Goutham }
7683ca6c4c8SSunil Goutham sqe_hdr->total = skb->len;
7693ca6c4c8SSunil Goutham /* Set SQE identifier which will be used later for freeing SKB */
7703ca6c4c8SSunil Goutham sqe_hdr->sqe_id = sq->head;
7713ca6c4c8SSunil Goutham
7723ca6c4c8SSunil Goutham /* Offload TCP/UDP checksum to HW */
7733ca6c4c8SSunil Goutham if (skb->ip_summed == CHECKSUM_PARTIAL) {
7743ca6c4c8SSunil Goutham sqe_hdr->ol3ptr = skb_network_offset(skb);
7753ca6c4c8SSunil Goutham sqe_hdr->ol4ptr = skb_transport_offset(skb);
7763ca6c4c8SSunil Goutham /* get vlan protocol Ethertype */
7773ca6c4c8SSunil Goutham if (eth_type_vlan(skb->protocol))
7783ca6c4c8SSunil Goutham skb->protocol = vlan_get_protocol(skb);
7793ca6c4c8SSunil Goutham
7803ca6c4c8SSunil Goutham if (skb->protocol == htons(ETH_P_IP)) {
7813ca6c4c8SSunil Goutham proto = ip_hdr(skb)->protocol;
7823ca6c4c8SSunil Goutham /* In case of TSO, HW needs this to be explicitly set.
7833ca6c4c8SSunil Goutham * So set this always, instead of adding a check.
7843ca6c4c8SSunil Goutham */
7853ca6c4c8SSunil Goutham sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
7863ca6c4c8SSunil Goutham } else if (skb->protocol == htons(ETH_P_IPV6)) {
7873ca6c4c8SSunil Goutham proto = ipv6_hdr(skb)->nexthdr;
78889eae5e8SGeetha sowjanya sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
7893ca6c4c8SSunil Goutham }
7903ca6c4c8SSunil Goutham
7913ca6c4c8SSunil Goutham if (proto == IPPROTO_TCP)
7923ca6c4c8SSunil Goutham sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
7933ca6c4c8SSunil Goutham else if (proto == IPPROTO_UDP)
7943ca6c4c8SSunil Goutham sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
7953ca6c4c8SSunil Goutham }
7963ca6c4c8SSunil Goutham }
7973ca6c4c8SSunil Goutham
otx2_dma_map_tso_skb(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct sk_buff * skb,int sqe,int hdr_len)79886d74760SSunil Goutham static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
79986d74760SSunil Goutham struct otx2_snd_queue *sq,
80086d74760SSunil Goutham struct sk_buff *skb, int sqe, int hdr_len)
80186d74760SSunil Goutham {
80286d74760SSunil Goutham int num_segs = skb_shinfo(skb)->nr_frags + 1;
80386d74760SSunil Goutham struct sg_list *sg = &sq->sg[sqe];
80486d74760SSunil Goutham u64 dma_addr;
80586d74760SSunil Goutham int seg, len;
80686d74760SSunil Goutham
80786d74760SSunil Goutham sg->num_segs = 0;
80886d74760SSunil Goutham
80986d74760SSunil Goutham /* Get payload length at skb->data */
81086d74760SSunil Goutham len = skb_headlen(skb) - hdr_len;
81186d74760SSunil Goutham
81286d74760SSunil Goutham for (seg = 0; seg < num_segs; seg++) {
81386d74760SSunil Goutham /* Skip skb->data, if there is no payload */
81486d74760SSunil Goutham if (!seg && !len)
81586d74760SSunil Goutham continue;
81686d74760SSunil Goutham dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
81786d74760SSunil Goutham if (dma_mapping_error(pfvf->dev, dma_addr))
81886d74760SSunil Goutham goto unmap;
81986d74760SSunil Goutham
82086d74760SSunil Goutham /* Save DMA mapping info for later unmapping */
82186d74760SSunil Goutham sg->dma_addr[sg->num_segs] = dma_addr;
82286d74760SSunil Goutham sg->size[sg->num_segs] = len;
82386d74760SSunil Goutham sg->num_segs++;
82486d74760SSunil Goutham }
82586d74760SSunil Goutham return 0;
82686d74760SSunil Goutham unmap:
82786d74760SSunil Goutham otx2_dma_unmap_skb_frags(pfvf, sg);
82886d74760SSunil Goutham return -EINVAL;
82986d74760SSunil Goutham }
83086d74760SSunil Goutham
otx2_tso_frag_dma_addr(struct otx2_snd_queue * sq,struct sk_buff * skb,int seg,u64 seg_addr,int hdr_len,int sqe)83186d74760SSunil Goutham static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
83286d74760SSunil Goutham struct sk_buff *skb, int seg,
83386d74760SSunil Goutham u64 seg_addr, int hdr_len, int sqe)
83486d74760SSunil Goutham {
83586d74760SSunil Goutham struct sg_list *sg = &sq->sg[sqe];
83686d74760SSunil Goutham const skb_frag_t *frag;
83786d74760SSunil Goutham int offset;
83886d74760SSunil Goutham
83986d74760SSunil Goutham if (seg < 0)
84086d74760SSunil Goutham return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
84186d74760SSunil Goutham
84286d74760SSunil Goutham frag = &skb_shinfo(skb)->frags[seg];
84386d74760SSunil Goutham offset = seg_addr - (u64)skb_frag_address(frag);
84486d74760SSunil Goutham if (skb_headlen(skb) - hdr_len)
84586d74760SSunil Goutham seg++;
84686d74760SSunil Goutham return sg->dma_addr[seg] + offset;
84786d74760SSunil Goutham }
84886d74760SSunil Goutham
otx2_sqe_tso_add_sg(struct otx2_snd_queue * sq,struct sg_list * list,int * offset)84986d74760SSunil Goutham static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
85086d74760SSunil Goutham struct sg_list *list, int *offset)
85186d74760SSunil Goutham {
85286d74760SSunil Goutham struct nix_sqe_sg_s *sg = NULL;
85386d74760SSunil Goutham u16 *sg_lens = NULL;
85486d74760SSunil Goutham u64 *iova = NULL;
85586d74760SSunil Goutham int seg;
85686d74760SSunil Goutham
85786d74760SSunil Goutham /* Add SG descriptors with buffer addresses */
85886d74760SSunil Goutham for (seg = 0; seg < list->num_segs; seg++) {
85986d74760SSunil Goutham if ((seg % MAX_SEGS_PER_SG) == 0) {
86086d74760SSunil Goutham sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
86186d74760SSunil Goutham sg->ld_type = NIX_SEND_LDTYPE_LDD;
86286d74760SSunil Goutham sg->subdc = NIX_SUBDC_SG;
86386d74760SSunil Goutham sg->segs = 0;
86486d74760SSunil Goutham sg_lens = (void *)sg;
86586d74760SSunil Goutham iova = (void *)sg + sizeof(*sg);
86686d74760SSunil Goutham /* Next subdc always starts at a 16byte boundary.
86786d74760SSunil Goutham * So if sg->segs is whether 2 or 3, offset += 16bytes.
86886d74760SSunil Goutham */
86986d74760SSunil Goutham if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
87086d74760SSunil Goutham *offset += sizeof(*sg) + (3 * sizeof(u64));
87186d74760SSunil Goutham else
87286d74760SSunil Goutham *offset += sizeof(*sg) + sizeof(u64);
87386d74760SSunil Goutham }
87486d74760SSunil Goutham sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
87586d74760SSunil Goutham *iova++ = list->dma_addr[seg];
87686d74760SSunil Goutham sg->segs++;
87786d74760SSunil Goutham }
87886d74760SSunil Goutham }
87986d74760SSunil Goutham
otx2_sq_append_tso(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct sk_buff * skb,u16 qidx)88086d74760SSunil Goutham static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
88186d74760SSunil Goutham struct sk_buff *skb, u16 qidx)
88286d74760SSunil Goutham {
88386d74760SSunil Goutham struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
884761b331cSEric Dumazet int hdr_len, tcp_data, seg_len, pkt_len, offset;
88586d74760SSunil Goutham struct nix_sqe_hdr_s *sqe_hdr;
88686d74760SSunil Goutham int first_sqe = sq->head;
88786d74760SSunil Goutham struct sg_list list;
88886d74760SSunil Goutham struct tso_t tso;
88986d74760SSunil Goutham
890761b331cSEric Dumazet hdr_len = tso_start(skb, &tso);
891761b331cSEric Dumazet
89286d74760SSunil Goutham /* Map SKB's fragments to DMA.
89386d74760SSunil Goutham * It's done here to avoid mapping for every TSO segment's packet.
89486d74760SSunil Goutham */
89586d74760SSunil Goutham if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
89686d74760SSunil Goutham dev_kfree_skb_any(skb);
89786d74760SSunil Goutham return;
89886d74760SSunil Goutham }
89986d74760SSunil Goutham
90086d74760SSunil Goutham netdev_tx_sent_queue(txq, skb->len);
90186d74760SSunil Goutham
90286d74760SSunil Goutham tcp_data = skb->len - hdr_len;
90386d74760SSunil Goutham while (tcp_data > 0) {
90486d74760SSunil Goutham char *hdr;
90586d74760SSunil Goutham
90686d74760SSunil Goutham seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
90786d74760SSunil Goutham tcp_data -= seg_len;
90886d74760SSunil Goutham
90986d74760SSunil Goutham /* Set SQE's SEND_HDR */
91086d74760SSunil Goutham memset(sq->sqe_base, 0, sq->sqe_size);
91186d74760SSunil Goutham sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
91286d74760SSunil Goutham otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
91386d74760SSunil Goutham offset = sizeof(*sqe_hdr);
91486d74760SSunil Goutham
91586d74760SSunil Goutham /* Add TSO segment's pkt header */
91686d74760SSunil Goutham hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
91786d74760SSunil Goutham tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
91886d74760SSunil Goutham list.dma_addr[0] =
91986d74760SSunil Goutham sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
92086d74760SSunil Goutham list.size[0] = hdr_len;
92186d74760SSunil Goutham list.num_segs = 1;
92286d74760SSunil Goutham
92386d74760SSunil Goutham /* Add TSO segment's payload data fragments */
92486d74760SSunil Goutham pkt_len = hdr_len;
92586d74760SSunil Goutham while (seg_len > 0) {
92686d74760SSunil Goutham int size;
92786d74760SSunil Goutham
92886d74760SSunil Goutham size = min_t(int, tso.size, seg_len);
92986d74760SSunil Goutham
93086d74760SSunil Goutham list.size[list.num_segs] = size;
93186d74760SSunil Goutham list.dma_addr[list.num_segs] =
93286d74760SSunil Goutham otx2_tso_frag_dma_addr(sq, skb,
93386d74760SSunil Goutham tso.next_frag_idx - 1,
93486d74760SSunil Goutham (u64)tso.data, hdr_len,
93586d74760SSunil Goutham first_sqe);
93686d74760SSunil Goutham list.num_segs++;
93786d74760SSunil Goutham pkt_len += size;
93886d74760SSunil Goutham seg_len -= size;
93986d74760SSunil Goutham tso_build_data(skb, &tso, size);
94086d74760SSunil Goutham }
94186d74760SSunil Goutham sqe_hdr->total = pkt_len;
94286d74760SSunil Goutham otx2_sqe_tso_add_sg(sq, &list, &offset);
94386d74760SSunil Goutham
94486d74760SSunil Goutham /* DMA mappings and skb needs to be freed only after last
94586d74760SSunil Goutham * TSO segment is transmitted out. So set 'PNC' only for
94686d74760SSunil Goutham * last segment. Also point last segment's sqe_id to first
94786d74760SSunil Goutham * segment's SQE index where skb address and DMA mappings
94886d74760SSunil Goutham * are saved.
94986d74760SSunil Goutham */
95086d74760SSunil Goutham if (!tcp_data) {
95186d74760SSunil Goutham sqe_hdr->pnc = 1;
95286d74760SSunil Goutham sqe_hdr->sqe_id = first_sqe;
95386d74760SSunil Goutham sq->sg[first_sqe].skb = (u64)skb;
95486d74760SSunil Goutham } else {
95586d74760SSunil Goutham sqe_hdr->pnc = 0;
95686d74760SSunil Goutham }
95786d74760SSunil Goutham
95886d74760SSunil Goutham sqe_hdr->sizem1 = (offset / 16) - 1;
95986d74760SSunil Goutham
96086d74760SSunil Goutham /* Flush SQE to HW */
9614c236d5dSGeetha sowjanya pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
96286d74760SSunil Goutham }
96386d74760SSunil Goutham }
96486d74760SSunil Goutham
is_hw_tso_supported(struct otx2_nic * pfvf,struct sk_buff * skb)96586d74760SSunil Goutham static bool is_hw_tso_supported(struct otx2_nic *pfvf,
96686d74760SSunil Goutham struct sk_buff *skb)
96786d74760SSunil Goutham {
96886d74760SSunil Goutham int payload_len, last_seg_size;
96986d74760SSunil Goutham
970786621d2SGeetha sowjanya if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
971786621d2SGeetha sowjanya return true;
972786621d2SGeetha sowjanya
973786621d2SGeetha sowjanya /* On 96xx A0, HW TSO not supported */
974786621d2SGeetha sowjanya if (!is_96xx_B0(pfvf->pdev))
975786621d2SGeetha sowjanya return false;
97686d74760SSunil Goutham
97786d74760SSunil Goutham /* HW has an issue due to which when the payload of the last LSO
97886d74760SSunil Goutham * segment is shorter than 16 bytes, some header fields may not
97986d74760SSunil Goutham * be correctly modified, hence don't offload such TSO segments.
98086d74760SSunil Goutham */
98186d74760SSunil Goutham
982504148feSEric Dumazet payload_len = skb->len - skb_tcp_all_headers(skb);
98386d74760SSunil Goutham last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
98486d74760SSunil Goutham if (last_seg_size && last_seg_size < 16)
98586d74760SSunil Goutham return false;
98686d74760SSunil Goutham
98786d74760SSunil Goutham return true;
98886d74760SSunil Goutham }
98986d74760SSunil Goutham
otx2_get_sqe_count(struct otx2_nic * pfvf,struct sk_buff * skb)99086d74760SSunil Goutham static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
99186d74760SSunil Goutham {
99286d74760SSunil Goutham if (!skb_shinfo(skb)->gso_size)
99386d74760SSunil Goutham return 1;
99486d74760SSunil Goutham
99586d74760SSunil Goutham /* HW TSO */
99686d74760SSunil Goutham if (is_hw_tso_supported(pfvf, skb))
99786d74760SSunil Goutham return 1;
99886d74760SSunil Goutham
99986d74760SSunil Goutham /* SW TSO */
100086d74760SSunil Goutham return skb_shinfo(skb)->gso_segs;
100186d74760SSunil Goutham }
100286d74760SSunil Goutham
otx2_validate_network_transport(struct sk_buff * skb)10032958d17aSHariprasad Kelam static bool otx2_validate_network_transport(struct sk_buff *skb)
10042958d17aSHariprasad Kelam {
10052958d17aSHariprasad Kelam if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
10062958d17aSHariprasad Kelam (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
10072958d17aSHariprasad Kelam struct udphdr *udph = udp_hdr(skb);
10082958d17aSHariprasad Kelam
10092958d17aSHariprasad Kelam if (udph->source == htons(PTP_PORT) &&
10102958d17aSHariprasad Kelam udph->dest == htons(PTP_PORT))
10112958d17aSHariprasad Kelam return true;
10122958d17aSHariprasad Kelam }
10132958d17aSHariprasad Kelam
10142958d17aSHariprasad Kelam return false;
10152958d17aSHariprasad Kelam }
10162958d17aSHariprasad Kelam
otx2_ptp_is_sync(struct sk_buff * skb,int * offset,bool * udp_csum_crt)1017edea0c5aSGeetha sowjanya static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
10182958d17aSHariprasad Kelam {
10192958d17aSHariprasad Kelam struct ethhdr *eth = (struct ethhdr *)(skb->data);
10202958d17aSHariprasad Kelam u16 nix_offload_hlen = 0, inner_vhlen = 0;
1021edea0c5aSGeetha sowjanya bool udp_hdr_present = false, is_sync;
10222958d17aSHariprasad Kelam u8 *data = skb->data, *msgtype;
10232958d17aSHariprasad Kelam __be16 proto = eth->h_proto;
10242958d17aSHariprasad Kelam int network_depth = 0;
10252958d17aSHariprasad Kelam
10262958d17aSHariprasad Kelam /* NIX is programmed to offload outer VLAN header
10272958d17aSHariprasad Kelam * in case of single vlan protocol field holds Network header ETH_IP/V6
10282958d17aSHariprasad Kelam * in case of stacked vlan protocol field holds Inner vlan (8100)
10292958d17aSHariprasad Kelam */
10302958d17aSHariprasad Kelam if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
10312958d17aSHariprasad Kelam skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
10322958d17aSHariprasad Kelam if (skb->vlan_proto == htons(ETH_P_8021AD)) {
10332958d17aSHariprasad Kelam /* Get vlan protocol */
10342958d17aSHariprasad Kelam proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
10352958d17aSHariprasad Kelam /* SKB APIs like skb_transport_offset does not include
10362958d17aSHariprasad Kelam * offloaded vlan header length. Need to explicitly add
10372958d17aSHariprasad Kelam * the length
10382958d17aSHariprasad Kelam */
10392958d17aSHariprasad Kelam nix_offload_hlen = VLAN_HLEN;
10402958d17aSHariprasad Kelam inner_vhlen = VLAN_HLEN;
10412958d17aSHariprasad Kelam } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
10422958d17aSHariprasad Kelam nix_offload_hlen = VLAN_HLEN;
10432958d17aSHariprasad Kelam }
10442958d17aSHariprasad Kelam } else if (eth_type_vlan(eth->h_proto)) {
10452958d17aSHariprasad Kelam proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
10462958d17aSHariprasad Kelam }
10472958d17aSHariprasad Kelam
10482958d17aSHariprasad Kelam switch (ntohs(proto)) {
10492958d17aSHariprasad Kelam case ETH_P_1588:
10502958d17aSHariprasad Kelam if (network_depth)
10512958d17aSHariprasad Kelam *offset = network_depth;
10522958d17aSHariprasad Kelam else
10532958d17aSHariprasad Kelam *offset = ETH_HLEN + nix_offload_hlen +
10542958d17aSHariprasad Kelam inner_vhlen;
10552958d17aSHariprasad Kelam break;
10562958d17aSHariprasad Kelam case ETH_P_IP:
10572958d17aSHariprasad Kelam case ETH_P_IPV6:
10582958d17aSHariprasad Kelam if (!otx2_validate_network_transport(skb))
10592958d17aSHariprasad Kelam return false;
10602958d17aSHariprasad Kelam
10612958d17aSHariprasad Kelam *offset = nix_offload_hlen + skb_transport_offset(skb) +
10622958d17aSHariprasad Kelam sizeof(struct udphdr);
1063edea0c5aSGeetha sowjanya udp_hdr_present = true;
1064edea0c5aSGeetha sowjanya
10652958d17aSHariprasad Kelam }
10662958d17aSHariprasad Kelam
10672958d17aSHariprasad Kelam msgtype = data + *offset;
10682958d17aSHariprasad Kelam /* Check PTP messageId is SYNC or not */
1069edea0c5aSGeetha sowjanya is_sync = !(*msgtype & 0xf);
1070edea0c5aSGeetha sowjanya if (is_sync)
1071edea0c5aSGeetha sowjanya *udp_csum_crt = udp_hdr_present;
1072edea0c5aSGeetha sowjanya else
1073edea0c5aSGeetha sowjanya *offset = 0;
1074edea0c5aSGeetha sowjanya
1075edea0c5aSGeetha sowjanya return is_sync;
10762958d17aSHariprasad Kelam }
10772958d17aSHariprasad Kelam
otx2_set_txtstamp(struct otx2_nic * pfvf,struct sk_buff * skb,struct otx2_snd_queue * sq,int * offset)1078c9c12d33SAleksey Makarov static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
1079c9c12d33SAleksey Makarov struct otx2_snd_queue *sq, int *offset)
1080c9c12d33SAleksey Makarov {
1081edea0c5aSGeetha sowjanya struct ethhdr *eth = (struct ethhdr *)(skb->data);
10822958d17aSHariprasad Kelam struct ptpv2_tstamp *origin_tstamp;
1083edea0c5aSGeetha sowjanya bool udp_csum_crt = false;
1084edea0c5aSGeetha sowjanya unsigned int udphoff;
10852958d17aSHariprasad Kelam struct timespec64 ts;
1086edea0c5aSGeetha sowjanya int ptp_offset = 0;
1087edea0c5aSGeetha sowjanya __wsum skb_csum;
1088c9c12d33SAleksey Makarov u64 iova;
1089c9c12d33SAleksey Makarov
10902958d17aSHariprasad Kelam if (unlikely(!skb_shinfo(skb)->gso_size &&
10912958d17aSHariprasad Kelam (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
1092edea0c5aSGeetha sowjanya if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
1093edea0c5aSGeetha sowjanya otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
10942958d17aSHariprasad Kelam origin_tstamp = (struct ptpv2_tstamp *)
10952958d17aSHariprasad Kelam ((u8 *)skb->data + ptp_offset +
10962958d17aSHariprasad Kelam PTP_SYNC_SEC_OFFSET);
10972958d17aSHariprasad Kelam ts = ns_to_timespec64(pfvf->ptp->tstamp);
10982958d17aSHariprasad Kelam origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
10992958d17aSHariprasad Kelam origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
11002958d17aSHariprasad Kelam origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
11012958d17aSHariprasad Kelam /* Point to correction field in PTP packet */
11022958d17aSHariprasad Kelam ptp_offset += 8;
1103edea0c5aSGeetha sowjanya
1104edea0c5aSGeetha sowjanya /* When user disables hw checksum, stack calculates the csum,
1105edea0c5aSGeetha sowjanya * but it does not cover ptp timestamp which is added later.
1106edea0c5aSGeetha sowjanya * Recalculate the checksum manually considering the timestamp.
1107edea0c5aSGeetha sowjanya */
1108edea0c5aSGeetha sowjanya if (udp_csum_crt) {
1109edea0c5aSGeetha sowjanya struct udphdr *uh = udp_hdr(skb);
1110edea0c5aSGeetha sowjanya
1111edea0c5aSGeetha sowjanya if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
1112edea0c5aSGeetha sowjanya udphoff = skb_transport_offset(skb);
1113edea0c5aSGeetha sowjanya uh->check = 0;
1114edea0c5aSGeetha sowjanya skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
1115edea0c5aSGeetha sowjanya 0);
1116edea0c5aSGeetha sowjanya if (ntohs(eth->h_proto) == ETH_P_IPV6)
1117edea0c5aSGeetha sowjanya uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1118edea0c5aSGeetha sowjanya &ipv6_hdr(skb)->daddr,
1119edea0c5aSGeetha sowjanya skb->len - udphoff,
1120edea0c5aSGeetha sowjanya ipv6_hdr(skb)->nexthdr,
1121edea0c5aSGeetha sowjanya skb_csum);
1122edea0c5aSGeetha sowjanya else
1123edea0c5aSGeetha sowjanya uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
1124edea0c5aSGeetha sowjanya ip_hdr(skb)->daddr,
1125edea0c5aSGeetha sowjanya skb->len - udphoff,
1126edea0c5aSGeetha sowjanya IPPROTO_UDP,
1127edea0c5aSGeetha sowjanya skb_csum);
1128edea0c5aSGeetha sowjanya }
11292958d17aSHariprasad Kelam }
11302958d17aSHariprasad Kelam } else {
1131c9c12d33SAleksey Makarov skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
11322958d17aSHariprasad Kelam }
1133c9c12d33SAleksey Makarov iova = sq->timestamps->iova + (sq->head * sizeof(u64));
11342958d17aSHariprasad Kelam otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
1135edea0c5aSGeetha sowjanya ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
1136c9c12d33SAleksey Makarov } else {
1137c9c12d33SAleksey Makarov skb_tx_timestamp(skb);
1138c9c12d33SAleksey Makarov }
1139c9c12d33SAleksey Makarov }
1140c9c12d33SAleksey Makarov
otx2_sq_append_skb(struct net_device * netdev,struct otx2_snd_queue * sq,struct sk_buff * skb,u16 qidx)11413ca6c4c8SSunil Goutham bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
11423ca6c4c8SSunil Goutham struct sk_buff *skb, u16 qidx)
11433ca6c4c8SSunil Goutham {
11443ca6c4c8SSunil Goutham struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
11453ca6c4c8SSunil Goutham struct otx2_nic *pfvf = netdev_priv(netdev);
1146f0dfc4c8SRatheesh Kannoth int offset, num_segs, free_desc;
11473ca6c4c8SSunil Goutham struct nix_sqe_hdr_s *sqe_hdr;
11483ca6c4c8SSunil Goutham
1149f0dfc4c8SRatheesh Kannoth /* Check if there is enough room between producer
1150f0dfc4c8SRatheesh Kannoth * and consumer index.
11513ca6c4c8SSunil Goutham */
1152f0dfc4c8SRatheesh Kannoth free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
1153f0dfc4c8SRatheesh Kannoth if (free_desc < sq->sqe_thresh)
1154f0dfc4c8SRatheesh Kannoth return false;
11553ca6c4c8SSunil Goutham
1156f0dfc4c8SRatheesh Kannoth if (free_desc < otx2_get_sqe_count(pfvf, skb))
11573ca6c4c8SSunil Goutham return false;
11583ca6c4c8SSunil Goutham
11593ca6c4c8SSunil Goutham num_segs = skb_shinfo(skb)->nr_frags + 1;
11603ca6c4c8SSunil Goutham
11613ca6c4c8SSunil Goutham /* If SKB doesn't fit in a single SQE, linearize it.
11623ca6c4c8SSunil Goutham * TODO: Consider adding JUMP descriptor instead.
11633ca6c4c8SSunil Goutham */
11643ca6c4c8SSunil Goutham if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
11653ca6c4c8SSunil Goutham if (__skb_linearize(skb)) {
11663ca6c4c8SSunil Goutham dev_kfree_skb_any(skb);
11673ca6c4c8SSunil Goutham return true;
11683ca6c4c8SSunil Goutham }
11693ca6c4c8SSunil Goutham num_segs = skb_shinfo(skb)->nr_frags + 1;
11703ca6c4c8SSunil Goutham }
11713ca6c4c8SSunil Goutham
117286d74760SSunil Goutham if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1173fd9d7859SHariprasad Kelam /* Insert vlan tag before giving pkt to tso */
1174096a93e1SSimon Horman if (skb_vlan_tag_present(skb)) {
1175fd9d7859SHariprasad Kelam skb = __vlan_hwaccel_push_inside(skb);
1176096a93e1SSimon Horman if (!skb)
1177096a93e1SSimon Horman return true;
1178096a93e1SSimon Horman }
117986d74760SSunil Goutham otx2_sq_append_tso(pfvf, sq, skb, qidx);
118086d74760SSunil Goutham return true;
118186d74760SSunil Goutham }
118286d74760SSunil Goutham
11833ca6c4c8SSunil Goutham /* Set SQE's SEND_HDR.
11843ca6c4c8SSunil Goutham * Do not clear the first 64bit as it contains constant info.
11853ca6c4c8SSunil Goutham */
11863ca6c4c8SSunil Goutham memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
11873ca6c4c8SSunil Goutham sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
11883ca6c4c8SSunil Goutham otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
11893ca6c4c8SSunil Goutham offset = sizeof(*sqe_hdr);
11903ca6c4c8SSunil Goutham
119186d74760SSunil Goutham /* Add extended header if needed */
119286d74760SSunil Goutham otx2_sqe_add_ext(pfvf, sq, skb, &offset);
119386d74760SSunil Goutham
11943ca6c4c8SSunil Goutham /* Add SG subdesc with data frags */
11953ca6c4c8SSunil Goutham if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
11963ca6c4c8SSunil Goutham otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
11973ca6c4c8SSunil Goutham return false;
11983ca6c4c8SSunil Goutham }
11993ca6c4c8SSunil Goutham
1200c9c12d33SAleksey Makarov otx2_set_txtstamp(pfvf, skb, sq, &offset);
1201c9c12d33SAleksey Makarov
12023ca6c4c8SSunil Goutham sqe_hdr->sizem1 = (offset / 16) - 1;
12033ca6c4c8SSunil Goutham
12043ca6c4c8SSunil Goutham netdev_tx_sent_queue(txq, skb->len);
12053ca6c4c8SSunil Goutham
12063ca6c4c8SSunil Goutham /* Flush SQE to HW */
12074c236d5dSGeetha sowjanya pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
12083ca6c4c8SSunil Goutham
12093ca6c4c8SSunil Goutham return true;
12103ca6c4c8SSunil Goutham }
12113184fb5bSTomasz Duszynski EXPORT_SYMBOL(otx2_sq_append_skb);
12123ca6c4c8SSunil Goutham
otx2_cleanup_rx_cqes(struct otx2_nic * pfvf,struct otx2_cq_queue * cq,int qidx)1213b2e3406aSRatheesh Kannoth void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
1214abe02543SSunil Goutham {
1215abe02543SSunil Goutham struct nix_cqe_rx_s *cqe;
1216b2e3406aSRatheesh Kannoth struct otx2_pool *pool;
1217abe02543SSunil Goutham int processed_cqe = 0;
1218b2e3406aSRatheesh Kannoth u16 pool_id;
1219b2e3406aSRatheesh Kannoth u64 iova;
1220abe02543SSunil Goutham
122106059a1aSGeetha sowjanya if (pfvf->xdp_prog)
122206059a1aSGeetha sowjanya xdp_rxq_info_unreg(&cq->xdp_rxq);
122306059a1aSGeetha sowjanya
1224af3826dbSGeetha sowjanya if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1225af3826dbSGeetha sowjanya return;
1226af3826dbSGeetha sowjanya
1227b2e3406aSRatheesh Kannoth pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1228b2e3406aSRatheesh Kannoth pool = &pfvf->qset.pool[pool_id];
1229b2e3406aSRatheesh Kannoth
1230af3826dbSGeetha sowjanya while (cq->pend_cqe) {
1231af3826dbSGeetha sowjanya cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1232b1bc8457SGeetha sowjanya processed_cqe++;
1233af3826dbSGeetha sowjanya cq->pend_cqe--;
1234af3826dbSGeetha sowjanya
1235af3826dbSGeetha sowjanya if (!cqe)
1236af3826dbSGeetha sowjanya continue;
1237b1bc8457SGeetha sowjanya if (cqe->sg.segs > 1) {
1238b1bc8457SGeetha sowjanya otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1239b1bc8457SGeetha sowjanya continue;
1240b1bc8457SGeetha sowjanya }
1241abe02543SSunil Goutham iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1242b2e3406aSRatheesh Kannoth
1243b2e3406aSRatheesh Kannoth otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
1244abe02543SSunil Goutham }
1245abe02543SSunil Goutham
1246abe02543SSunil Goutham /* Free CQEs to HW */
1247abe02543SSunil Goutham otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1248abe02543SSunil Goutham ((u64)cq->cq_idx << 32) | processed_cqe);
1249abe02543SSunil Goutham }
12503ca6c4c8SSunil Goutham
otx2_cleanup_tx_cqes(struct otx2_nic * pfvf,struct otx2_cq_queue * cq)12513ca6c4c8SSunil Goutham void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
12523ca6c4c8SSunil Goutham {
125392662d9fSGeetha sowjanya int tx_pkts = 0, tx_bytes = 0;
12543ca6c4c8SSunil Goutham struct sk_buff *skb = NULL;
12553ca6c4c8SSunil Goutham struct otx2_snd_queue *sq;
12563ca6c4c8SSunil Goutham struct nix_cqe_tx_s *cqe;
125792662d9fSGeetha sowjanya struct netdev_queue *txq;
12583ca6c4c8SSunil Goutham int processed_cqe = 0;
12593ca6c4c8SSunil Goutham struct sg_list *sg;
1260ab6dddd2SSubbaraya Sundeep int qidx;
12613ca6c4c8SSunil Goutham
1262ab6dddd2SSubbaraya Sundeep qidx = cq->cq_idx - pfvf->hw.rx_queues;
1263ab6dddd2SSubbaraya Sundeep sq = &pfvf->qset.sq[qidx];
12643ca6c4c8SSunil Goutham
1265af3826dbSGeetha sowjanya if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1266af3826dbSGeetha sowjanya return;
1267af3826dbSGeetha sowjanya
1268af3826dbSGeetha sowjanya while (cq->pend_cqe) {
1269af3826dbSGeetha sowjanya cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1270af3826dbSGeetha sowjanya processed_cqe++;
1271af3826dbSGeetha sowjanya cq->pend_cqe--;
1272af3826dbSGeetha sowjanya
1273af3826dbSGeetha sowjanya if (!cqe)
1274af3826dbSGeetha sowjanya continue;
12753ca6c4c8SSunil Goutham sg = &sq->sg[cqe->comp.sqe_id];
12763ca6c4c8SSunil Goutham skb = (struct sk_buff *)sg->skb;
12773ca6c4c8SSunil Goutham if (skb) {
127892662d9fSGeetha sowjanya tx_bytes += skb->len;
127992662d9fSGeetha sowjanya tx_pkts++;
12803ca6c4c8SSunil Goutham otx2_dma_unmap_skb_frags(pfvf, sg);
12813ca6c4c8SSunil Goutham dev_kfree_skb_any(skb);
12823ca6c4c8SSunil Goutham sg->skb = (u64)NULL;
12833ca6c4c8SSunil Goutham }
12843ca6c4c8SSunil Goutham }
12853ca6c4c8SSunil Goutham
128692662d9fSGeetha sowjanya if (likely(tx_pkts)) {
128792662d9fSGeetha sowjanya if (qidx >= pfvf->hw.tx_queues)
128892662d9fSGeetha sowjanya qidx -= pfvf->hw.xdp_queues;
128992662d9fSGeetha sowjanya txq = netdev_get_tx_queue(pfvf->netdev, qidx);
129092662d9fSGeetha sowjanya netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
129192662d9fSGeetha sowjanya }
12923ca6c4c8SSunil Goutham /* Free CQEs to HW */
12933ca6c4c8SSunil Goutham otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
12943ca6c4c8SSunil Goutham ((u64)cq->cq_idx << 32) | processed_cqe);
12953ca6c4c8SSunil Goutham }
129650fe6c02SLinu Cherian
otx2_rxtx_enable(struct otx2_nic * pfvf,bool enable)129750fe6c02SLinu Cherian int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
129850fe6c02SLinu Cherian {
129950fe6c02SLinu Cherian struct msg_req *msg;
130050fe6c02SLinu Cherian int err;
130150fe6c02SLinu Cherian
13024c3212f5SSunil Goutham mutex_lock(&pfvf->mbox.lock);
130350fe6c02SLinu Cherian if (enable)
130450fe6c02SLinu Cherian msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
130550fe6c02SLinu Cherian else
130650fe6c02SLinu Cherian msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
130750fe6c02SLinu Cherian
130850fe6c02SLinu Cherian if (!msg) {
13094c3212f5SSunil Goutham mutex_unlock(&pfvf->mbox.lock);
131050fe6c02SLinu Cherian return -ENOMEM;
131150fe6c02SLinu Cherian }
131250fe6c02SLinu Cherian
131350fe6c02SLinu Cherian err = otx2_sync_mbox_msg(&pfvf->mbox);
13144c3212f5SSunil Goutham mutex_unlock(&pfvf->mbox.lock);
131550fe6c02SLinu Cherian return err;
131650fe6c02SLinu Cherian }
131706059a1aSGeetha sowjanya
otx2_free_pending_sqe(struct otx2_nic * pfvf)131892662d9fSGeetha sowjanya void otx2_free_pending_sqe(struct otx2_nic *pfvf)
131992662d9fSGeetha sowjanya {
132092662d9fSGeetha sowjanya int tx_pkts = 0, tx_bytes = 0;
132192662d9fSGeetha sowjanya struct sk_buff *skb = NULL;
132292662d9fSGeetha sowjanya struct otx2_snd_queue *sq;
132392662d9fSGeetha sowjanya struct netdev_queue *txq;
132492662d9fSGeetha sowjanya struct sg_list *sg;
132592662d9fSGeetha sowjanya int sq_idx, sqe;
132692662d9fSGeetha sowjanya
132792662d9fSGeetha sowjanya for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
132892662d9fSGeetha sowjanya sq = &pfvf->qset.sq[sq_idx];
132992662d9fSGeetha sowjanya for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
133092662d9fSGeetha sowjanya sg = &sq->sg[sqe];
133192662d9fSGeetha sowjanya skb = (struct sk_buff *)sg->skb;
133292662d9fSGeetha sowjanya if (skb) {
133392662d9fSGeetha sowjanya tx_bytes += skb->len;
133492662d9fSGeetha sowjanya tx_pkts++;
133592662d9fSGeetha sowjanya otx2_dma_unmap_skb_frags(pfvf, sg);
133692662d9fSGeetha sowjanya dev_kfree_skb_any(skb);
133792662d9fSGeetha sowjanya sg->skb = (u64)NULL;
133892662d9fSGeetha sowjanya }
133992662d9fSGeetha sowjanya }
134092662d9fSGeetha sowjanya
134192662d9fSGeetha sowjanya if (!tx_pkts)
134292662d9fSGeetha sowjanya continue;
134392662d9fSGeetha sowjanya txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
134492662d9fSGeetha sowjanya netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
134592662d9fSGeetha sowjanya tx_pkts = 0;
134692662d9fSGeetha sowjanya tx_bytes = 0;
134792662d9fSGeetha sowjanya }
134892662d9fSGeetha sowjanya }
134992662d9fSGeetha sowjanya
otx2_xdp_sqe_add_sg(struct otx2_snd_queue * sq,u64 dma_addr,int len,int * offset)135006059a1aSGeetha sowjanya static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
135106059a1aSGeetha sowjanya int len, int *offset)
135206059a1aSGeetha sowjanya {
135306059a1aSGeetha sowjanya struct nix_sqe_sg_s *sg = NULL;
135406059a1aSGeetha sowjanya u64 *iova = NULL;
135506059a1aSGeetha sowjanya
135606059a1aSGeetha sowjanya sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
135706059a1aSGeetha sowjanya sg->ld_type = NIX_SEND_LDTYPE_LDD;
135806059a1aSGeetha sowjanya sg->subdc = NIX_SUBDC_SG;
135906059a1aSGeetha sowjanya sg->segs = 1;
136006059a1aSGeetha sowjanya sg->seg1_size = len;
136106059a1aSGeetha sowjanya iova = (void *)sg + sizeof(*sg);
136206059a1aSGeetha sowjanya *iova = dma_addr;
136306059a1aSGeetha sowjanya *offset += sizeof(*sg) + sizeof(u64);
136406059a1aSGeetha sowjanya
136506059a1aSGeetha sowjanya sq->sg[sq->head].dma_addr[0] = dma_addr;
136606059a1aSGeetha sowjanya sq->sg[sq->head].size[0] = len;
136706059a1aSGeetha sowjanya sq->sg[sq->head].num_segs = 1;
136806059a1aSGeetha sowjanya }
136906059a1aSGeetha sowjanya
otx2_xdp_sq_append_pkt(struct otx2_nic * pfvf,u64 iova,int len,u16 qidx)137006059a1aSGeetha sowjanya bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
137106059a1aSGeetha sowjanya {
137206059a1aSGeetha sowjanya struct nix_sqe_hdr_s *sqe_hdr;
137306059a1aSGeetha sowjanya struct otx2_snd_queue *sq;
137406059a1aSGeetha sowjanya int offset, free_sqe;
137506059a1aSGeetha sowjanya
137606059a1aSGeetha sowjanya sq = &pfvf->qset.sq[qidx];
137706059a1aSGeetha sowjanya free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
137806059a1aSGeetha sowjanya if (free_sqe < sq->sqe_thresh)
137906059a1aSGeetha sowjanya return false;
138006059a1aSGeetha sowjanya
138106059a1aSGeetha sowjanya memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
138206059a1aSGeetha sowjanya
138306059a1aSGeetha sowjanya sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
138406059a1aSGeetha sowjanya
138506059a1aSGeetha sowjanya if (!sqe_hdr->total) {
138606059a1aSGeetha sowjanya sqe_hdr->aura = sq->aura_id;
138706059a1aSGeetha sowjanya sqe_hdr->df = 1;
138806059a1aSGeetha sowjanya sqe_hdr->sq = qidx;
138906059a1aSGeetha sowjanya sqe_hdr->pnc = 1;
139006059a1aSGeetha sowjanya }
139106059a1aSGeetha sowjanya sqe_hdr->total = len;
139206059a1aSGeetha sowjanya sqe_hdr->sqe_id = sq->head;
139306059a1aSGeetha sowjanya
139406059a1aSGeetha sowjanya offset = sizeof(*sqe_hdr);
139506059a1aSGeetha sowjanya
139606059a1aSGeetha sowjanya otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
139706059a1aSGeetha sowjanya sqe_hdr->sizem1 = (offset / 16) - 1;
139806059a1aSGeetha sowjanya pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
139906059a1aSGeetha sowjanya
140006059a1aSGeetha sowjanya return true;
140106059a1aSGeetha sowjanya }
140206059a1aSGeetha sowjanya
otx2_xdp_rcv_pkt_handler(struct otx2_nic * pfvf,struct bpf_prog * prog,struct nix_cqe_rx_s * cqe,struct otx2_cq_queue * cq,bool * need_xdp_flush)140306059a1aSGeetha sowjanya static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
140406059a1aSGeetha sowjanya struct bpf_prog *prog,
140506059a1aSGeetha sowjanya struct nix_cqe_rx_s *cqe,
140670b2b689SSebastian Andrzej Siewior struct otx2_cq_queue *cq,
140770b2b689SSebastian Andrzej Siewior bool *need_xdp_flush)
140806059a1aSGeetha sowjanya {
1409e7a36b56SGeetha sowjanya unsigned char *hard_start;
141006059a1aSGeetha sowjanya int qidx = cq->cq_idx;
141106059a1aSGeetha sowjanya struct xdp_buff xdp;
141206059a1aSGeetha sowjanya struct page *page;
141306059a1aSGeetha sowjanya u64 iova, pa;
141406059a1aSGeetha sowjanya u32 act;
141506059a1aSGeetha sowjanya int err;
141606059a1aSGeetha sowjanya
141706059a1aSGeetha sowjanya iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
141806059a1aSGeetha sowjanya pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
141906059a1aSGeetha sowjanya page = virt_to_page(phys_to_virt(pa));
142006059a1aSGeetha sowjanya
142106059a1aSGeetha sowjanya xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
142206059a1aSGeetha sowjanya
1423e7a36b56SGeetha sowjanya hard_start = (unsigned char *)phys_to_virt(pa);
1424e7a36b56SGeetha sowjanya xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
142506059a1aSGeetha sowjanya cqe->sg.seg_size, false);
142606059a1aSGeetha sowjanya
142706059a1aSGeetha sowjanya act = bpf_prog_run_xdp(prog, &xdp);
142806059a1aSGeetha sowjanya
142906059a1aSGeetha sowjanya switch (act) {
143006059a1aSGeetha sowjanya case XDP_PASS:
143106059a1aSGeetha sowjanya break;
143206059a1aSGeetha sowjanya case XDP_TX:
143306059a1aSGeetha sowjanya qidx += pfvf->hw.tx_queues;
143406059a1aSGeetha sowjanya cq->pool_ptrs++;
143506059a1aSGeetha sowjanya return otx2_xdp_sq_append_pkt(pfvf, iova,
143606059a1aSGeetha sowjanya cqe->sg.seg_size, qidx);
143706059a1aSGeetha sowjanya case XDP_REDIRECT:
143806059a1aSGeetha sowjanya cq->pool_ptrs++;
143906059a1aSGeetha sowjanya err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
144006059a1aSGeetha sowjanya
144106059a1aSGeetha sowjanya otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
144206059a1aSGeetha sowjanya DMA_FROM_DEVICE);
144370b2b689SSebastian Andrzej Siewior if (!err) {
144470b2b689SSebastian Andrzej Siewior *need_xdp_flush = true;
144506059a1aSGeetha sowjanya return true;
144670b2b689SSebastian Andrzej Siewior }
144706059a1aSGeetha sowjanya put_page(page);
144806059a1aSGeetha sowjanya break;
144906059a1aSGeetha sowjanya default:
1450c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
145106059a1aSGeetha sowjanya break;
145206059a1aSGeetha sowjanya case XDP_ABORTED:
145306059a1aSGeetha sowjanya trace_xdp_exception(pfvf->netdev, prog, act);
145406059a1aSGeetha sowjanya break;
145506059a1aSGeetha sowjanya case XDP_DROP:
145606059a1aSGeetha sowjanya otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
145706059a1aSGeetha sowjanya DMA_FROM_DEVICE);
145806059a1aSGeetha sowjanya put_page(page);
145906059a1aSGeetha sowjanya cq->pool_ptrs++;
146006059a1aSGeetha sowjanya return true;
146106059a1aSGeetha sowjanya }
146206059a1aSGeetha sowjanya return false;
146306059a1aSGeetha sowjanya }
1464