1db37bc17SDimitris Michailidis // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2db37bc17SDimitris Michailidis
3db37bc17SDimitris Michailidis #include <linux/dma-mapping.h>
4db37bc17SDimitris Michailidis #include <linux/ip.h>
5db37bc17SDimitris Michailidis #include <linux/pci.h>
6db37bc17SDimitris Michailidis #include <linux/skbuff.h>
7db37bc17SDimitris Michailidis #include <linux/tcp.h>
8db37bc17SDimitris Michailidis #include <uapi/linux/udp.h>
9db37bc17SDimitris Michailidis #include "funeth.h"
10b23f9239SDimitris Michailidis #include "funeth_ktls.h"
11db37bc17SDimitris Michailidis #include "funeth_txrx.h"
12db37bc17SDimitris Michailidis #include "funeth_trace.h"
13db37bc17SDimitris Michailidis #include "fun_queue.h"
14db37bc17SDimitris Michailidis
15db37bc17SDimitris Michailidis #define FUN_XDP_CLEAN_THRES 32
16db37bc17SDimitris Michailidis #define FUN_XDP_CLEAN_BATCH 16
17db37bc17SDimitris Michailidis
18db37bc17SDimitris Michailidis /* DMA-map a packet and return the (length, DMA_address) pairs for its
191c45b0cdSDimitris Michailidis * segments. If a mapping error occurs -ENOMEM is returned. The packet
201c45b0cdSDimitris Michailidis * consists of an skb_shared_info and one additional address/length pair.
21db37bc17SDimitris Michailidis */
fun_map_pkt(struct device * dev,const struct skb_shared_info * si,void * data,unsigned int data_len,dma_addr_t * addr,unsigned int * len)221c45b0cdSDimitris Michailidis static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
231c45b0cdSDimitris Michailidis void *data, unsigned int data_len,
24db37bc17SDimitris Michailidis dma_addr_t *addr, unsigned int *len)
25db37bc17SDimitris Michailidis {
26db37bc17SDimitris Michailidis const skb_frag_t *fp, *end;
27db37bc17SDimitris Michailidis
281c45b0cdSDimitris Michailidis *len = data_len;
291c45b0cdSDimitris Michailidis *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
30db37bc17SDimitris Michailidis if (dma_mapping_error(dev, *addr))
31db37bc17SDimitris Michailidis return -ENOMEM;
32db37bc17SDimitris Michailidis
331c45b0cdSDimitris Michailidis if (!si)
341c45b0cdSDimitris Michailidis return 0;
35db37bc17SDimitris Michailidis
361c45b0cdSDimitris Michailidis for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
37db37bc17SDimitris Michailidis *++len = skb_frag_size(fp);
38db37bc17SDimitris Michailidis *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
39db37bc17SDimitris Michailidis if (dma_mapping_error(dev, *addr))
40db37bc17SDimitris Michailidis goto unwind;
41db37bc17SDimitris Michailidis }
42db37bc17SDimitris Michailidis return 0;
43db37bc17SDimitris Michailidis
44db37bc17SDimitris Michailidis unwind:
45db37bc17SDimitris Michailidis while (fp-- > si->frags)
46db37bc17SDimitris Michailidis dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
47db37bc17SDimitris Michailidis
481c45b0cdSDimitris Michailidis dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
49db37bc17SDimitris Michailidis return -ENOMEM;
50db37bc17SDimitris Michailidis }
51db37bc17SDimitris Michailidis
52db37bc17SDimitris Michailidis /* Return the address just past the end of a Tx queue's descriptor ring.
53db37bc17SDimitris Michailidis * It exploits the fact that the HW writeback area is just after the end
54db37bc17SDimitris Michailidis * of the descriptor ring.
55db37bc17SDimitris Michailidis */
txq_end(const struct funeth_txq * q)56db37bc17SDimitris Michailidis static void *txq_end(const struct funeth_txq *q)
57db37bc17SDimitris Michailidis {
58db37bc17SDimitris Michailidis return (void *)q->hw_wb;
59db37bc17SDimitris Michailidis }
60db37bc17SDimitris Michailidis
61db37bc17SDimitris Michailidis /* Return the amount of space within a Tx ring from the given address to the
62db37bc17SDimitris Michailidis * end.
63db37bc17SDimitris Michailidis */
txq_to_end(const struct funeth_txq * q,void * p)64db37bc17SDimitris Michailidis static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
65db37bc17SDimitris Michailidis {
66db37bc17SDimitris Michailidis return txq_end(q) - p;
67db37bc17SDimitris Michailidis }
68db37bc17SDimitris Michailidis
69db37bc17SDimitris Michailidis /* Return the number of Tx descriptors occupied by a Tx request. */
tx_req_ndesc(const struct fun_eth_tx_req * req)70db37bc17SDimitris Michailidis static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
71db37bc17SDimitris Michailidis {
72db37bc17SDimitris Michailidis return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
73db37bc17SDimitris Michailidis }
74db37bc17SDimitris Michailidis
75a3b461bbSDimitris Michailidis /* Write a gather list to the Tx descriptor at @req from @ngle address/length
76a3b461bbSDimitris Michailidis * pairs.
77a3b461bbSDimitris Michailidis */
fun_write_gl(const struct funeth_txq * q,struct fun_eth_tx_req * req,const dma_addr_t * addrs,const unsigned int * lens,unsigned int ngle)78a3b461bbSDimitris Michailidis static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
79a3b461bbSDimitris Michailidis struct fun_eth_tx_req *req,
80a3b461bbSDimitris Michailidis const dma_addr_t *addrs,
81a3b461bbSDimitris Michailidis const unsigned int *lens,
82a3b461bbSDimitris Michailidis unsigned int ngle)
83a3b461bbSDimitris Michailidis {
84a3b461bbSDimitris Michailidis struct fun_dataop_gl *gle;
85a3b461bbSDimitris Michailidis unsigned int i;
86a3b461bbSDimitris Michailidis
87a3b461bbSDimitris Michailidis req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
88a3b461bbSDimitris Michailidis
89a3b461bbSDimitris Michailidis for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
90a3b461bbSDimitris Michailidis i < ngle && txq_to_end(q, gle); i++, gle++)
91a3b461bbSDimitris Michailidis fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
92a3b461bbSDimitris Michailidis
93a3b461bbSDimitris Michailidis if (txq_to_end(q, gle) == 0) {
94a3b461bbSDimitris Michailidis gle = (struct fun_dataop_gl *)q->desc;
95a3b461bbSDimitris Michailidis for ( ; i < ngle; i++, gle++)
96a3b461bbSDimitris Michailidis fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
97a3b461bbSDimitris Michailidis }
98a3b461bbSDimitris Michailidis
99a3b461bbSDimitris Michailidis return gle;
100a3b461bbSDimitris Michailidis }
101a3b461bbSDimitris Michailidis
tcp_hdr_doff_flags(const struct tcphdr * th)102db37bc17SDimitris Michailidis static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
103db37bc17SDimitris Michailidis {
104db37bc17SDimitris Michailidis return *(__be16 *)&tcp_flag_word(th);
105db37bc17SDimitris Michailidis }
106db37bc17SDimitris Michailidis
fun_tls_tx(struct sk_buff * skb,struct funeth_txq * q,unsigned int * tls_len)107db37bc17SDimitris Michailidis static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
108db37bc17SDimitris Michailidis unsigned int *tls_len)
109db37bc17SDimitris Michailidis {
110b23f9239SDimitris Michailidis #if IS_ENABLED(CONFIG_TLS_DEVICE)
111db37bc17SDimitris Michailidis const struct fun_ktls_tx_ctx *tls_ctx;
112db37bc17SDimitris Michailidis u32 datalen, seq;
113db37bc17SDimitris Michailidis
114504148feSEric Dumazet datalen = skb->len - skb_tcp_all_headers(skb);
115db37bc17SDimitris Michailidis if (!datalen)
116db37bc17SDimitris Michailidis return skb;
117db37bc17SDimitris Michailidis
118db37bc17SDimitris Michailidis if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
119db37bc17SDimitris Michailidis seq = ntohl(tcp_hdr(skb)->seq);
120db37bc17SDimitris Michailidis tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
121db37bc17SDimitris Michailidis
122db37bc17SDimitris Michailidis if (likely(tls_ctx->next_seq == seq)) {
123db37bc17SDimitris Michailidis *tls_len = datalen;
124db37bc17SDimitris Michailidis return skb;
125db37bc17SDimitris Michailidis }
126db37bc17SDimitris Michailidis if (seq - tls_ctx->next_seq < U32_MAX / 4) {
127db37bc17SDimitris Michailidis tls_offload_tx_resync_request(skb->sk, seq,
128db37bc17SDimitris Michailidis tls_ctx->next_seq);
129db37bc17SDimitris Michailidis }
130db37bc17SDimitris Michailidis }
131db37bc17SDimitris Michailidis
132db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_tls_fallback);
133db37bc17SDimitris Michailidis skb = tls_encrypt_skb(skb);
134db37bc17SDimitris Michailidis if (!skb)
135db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_tls_drops);
136db37bc17SDimitris Michailidis
137db37bc17SDimitris Michailidis return skb;
138b23f9239SDimitris Michailidis #else
139b23f9239SDimitris Michailidis return NULL;
140db37bc17SDimitris Michailidis #endif
141b23f9239SDimitris Michailidis }
142db37bc17SDimitris Michailidis
143db37bc17SDimitris Michailidis /* Write as many descriptors as needed for the supplied skb starting at the
144db37bc17SDimitris Michailidis * current producer location. The caller has made certain enough descriptors
145db37bc17SDimitris Michailidis * are available.
146db37bc17SDimitris Michailidis *
147db37bc17SDimitris Michailidis * Returns the number of descriptors written, 0 on error.
148db37bc17SDimitris Michailidis */
write_pkt_desc(struct sk_buff * skb,struct funeth_txq * q,unsigned int tls_len)149db37bc17SDimitris Michailidis static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
150db37bc17SDimitris Michailidis unsigned int tls_len)
151db37bc17SDimitris Michailidis {
152db37bc17SDimitris Michailidis unsigned int extra_bytes = 0, extra_pkts = 0;
153db37bc17SDimitris Michailidis unsigned int idx = q->prod_cnt & q->mask;
154db37bc17SDimitris Michailidis const struct skb_shared_info *shinfo;
155db37bc17SDimitris Michailidis unsigned int lens[MAX_SKB_FRAGS + 1];
156db37bc17SDimitris Michailidis dma_addr_t addrs[MAX_SKB_FRAGS + 1];
157db37bc17SDimitris Michailidis struct fun_eth_tx_req *req;
158db37bc17SDimitris Michailidis struct fun_dataop_gl *gle;
159db37bc17SDimitris Michailidis const struct tcphdr *th;
1606ce1df88SDimitris Michailidis unsigned int l4_hlen;
161a3b461bbSDimitris Michailidis unsigned int ngle;
162db37bc17SDimitris Michailidis u16 flags;
163db37bc17SDimitris Michailidis
1641c45b0cdSDimitris Michailidis shinfo = skb_shinfo(skb);
1651c45b0cdSDimitris Michailidis if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
1661c45b0cdSDimitris Michailidis skb_headlen(skb), addrs, lens))) {
167db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_map_err);
168db37bc17SDimitris Michailidis return 0;
169db37bc17SDimitris Michailidis }
170db37bc17SDimitris Michailidis
171db37bc17SDimitris Michailidis req = fun_tx_desc_addr(q, idx);
172db37bc17SDimitris Michailidis req->op = FUN_ETH_OP_TX;
173db37bc17SDimitris Michailidis req->len8 = 0;
174db37bc17SDimitris Michailidis req->flags = 0;
175db37bc17SDimitris Michailidis req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
176db37bc17SDimitris Michailidis req->repr_idn = 0;
177db37bc17SDimitris Michailidis req->encap_proto = 0;
178db37bc17SDimitris Michailidis
179db37bc17SDimitris Michailidis if (likely(shinfo->gso_size)) {
180db37bc17SDimitris Michailidis if (skb->encapsulation) {
181db37bc17SDimitris Michailidis u16 ol4_ofst;
182db37bc17SDimitris Michailidis
183db37bc17SDimitris Michailidis flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
184db37bc17SDimitris Michailidis FUN_ETH_UPDATE_INNER_L4_CKSUM |
185db37bc17SDimitris Michailidis FUN_ETH_UPDATE_OUTER_L3_LEN;
186db37bc17SDimitris Michailidis if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
187db37bc17SDimitris Michailidis SKB_GSO_UDP_TUNNEL_CSUM)) {
188db37bc17SDimitris Michailidis flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
189db37bc17SDimitris Michailidis FUN_ETH_OUTER_UDP;
190db37bc17SDimitris Michailidis if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
191db37bc17SDimitris Michailidis flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
192db37bc17SDimitris Michailidis ol4_ofst = skb_transport_offset(skb);
193db37bc17SDimitris Michailidis } else {
194db37bc17SDimitris Michailidis ol4_ofst = skb_inner_network_offset(skb);
195db37bc17SDimitris Michailidis }
196db37bc17SDimitris Michailidis
197db37bc17SDimitris Michailidis if (ip_hdr(skb)->version == 4)
198db37bc17SDimitris Michailidis flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
199db37bc17SDimitris Michailidis else
200db37bc17SDimitris Michailidis flags |= FUN_ETH_OUTER_IPV6;
201db37bc17SDimitris Michailidis
202db37bc17SDimitris Michailidis if (skb->inner_network_header) {
203db37bc17SDimitris Michailidis if (inner_ip_hdr(skb)->version == 4)
204db37bc17SDimitris Michailidis flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
205db37bc17SDimitris Michailidis FUN_ETH_UPDATE_INNER_L3_LEN;
206db37bc17SDimitris Michailidis else
207db37bc17SDimitris Michailidis flags |= FUN_ETH_INNER_IPV6 |
208db37bc17SDimitris Michailidis FUN_ETH_UPDATE_INNER_L3_LEN;
209db37bc17SDimitris Michailidis }
210db37bc17SDimitris Michailidis th = inner_tcp_hdr(skb);
2116ce1df88SDimitris Michailidis l4_hlen = __tcp_hdrlen(th);
212db37bc17SDimitris Michailidis fun_eth_offload_init(&req->offload, flags,
213db37bc17SDimitris Michailidis shinfo->gso_size,
214db37bc17SDimitris Michailidis tcp_hdr_doff_flags(th), 0,
215db37bc17SDimitris Michailidis skb_inner_network_offset(skb),
216db37bc17SDimitris Michailidis skb_inner_transport_offset(skb),
217db37bc17SDimitris Michailidis skb_network_offset(skb), ol4_ofst);
218db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_encap_tso);
2196ce1df88SDimitris Michailidis } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
2206ce1df88SDimitris Michailidis flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
2216ce1df88SDimitris Michailidis FUN_ETH_UPDATE_INNER_L4_CKSUM |
2226ce1df88SDimitris Michailidis FUN_ETH_UPDATE_INNER_L4_LEN |
2236ce1df88SDimitris Michailidis FUN_ETH_UPDATE_INNER_L3_LEN;
2246ce1df88SDimitris Michailidis
2256ce1df88SDimitris Michailidis if (ip_hdr(skb)->version == 4)
2266ce1df88SDimitris Michailidis flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
2276ce1df88SDimitris Michailidis else
2286ce1df88SDimitris Michailidis flags |= FUN_ETH_INNER_IPV6;
2296ce1df88SDimitris Michailidis
2306ce1df88SDimitris Michailidis l4_hlen = sizeof(struct udphdr);
2316ce1df88SDimitris Michailidis fun_eth_offload_init(&req->offload, flags,
2326ce1df88SDimitris Michailidis shinfo->gso_size,
2336ce1df88SDimitris Michailidis cpu_to_be16(l4_hlen << 10), 0,
2346ce1df88SDimitris Michailidis skb_network_offset(skb),
2356ce1df88SDimitris Michailidis skb_transport_offset(skb), 0, 0);
2366ce1df88SDimitris Michailidis FUN_QSTAT_INC(q, tx_uso);
237db37bc17SDimitris Michailidis } else {
238db37bc17SDimitris Michailidis /* HW considers one set of headers as inner */
239db37bc17SDimitris Michailidis flags = FUN_ETH_INNER_LSO |
240db37bc17SDimitris Michailidis FUN_ETH_UPDATE_INNER_L4_CKSUM |
241db37bc17SDimitris Michailidis FUN_ETH_UPDATE_INNER_L3_LEN;
242db37bc17SDimitris Michailidis if (shinfo->gso_type & SKB_GSO_TCPV6)
243db37bc17SDimitris Michailidis flags |= FUN_ETH_INNER_IPV6;
244db37bc17SDimitris Michailidis else
245db37bc17SDimitris Michailidis flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
246db37bc17SDimitris Michailidis th = tcp_hdr(skb);
2476ce1df88SDimitris Michailidis l4_hlen = __tcp_hdrlen(th);
248db37bc17SDimitris Michailidis fun_eth_offload_init(&req->offload, flags,
249db37bc17SDimitris Michailidis shinfo->gso_size,
250db37bc17SDimitris Michailidis tcp_hdr_doff_flags(th), 0,
251db37bc17SDimitris Michailidis skb_network_offset(skb),
252db37bc17SDimitris Michailidis skb_transport_offset(skb), 0, 0);
253db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_tso);
254db37bc17SDimitris Michailidis }
255db37bc17SDimitris Michailidis
256db37bc17SDimitris Michailidis u64_stats_update_begin(&q->syncp);
257db37bc17SDimitris Michailidis q->stats.tx_cso += shinfo->gso_segs;
258db37bc17SDimitris Michailidis u64_stats_update_end(&q->syncp);
259db37bc17SDimitris Michailidis
260db37bc17SDimitris Michailidis extra_pkts = shinfo->gso_segs - 1;
261db37bc17SDimitris Michailidis extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
2626ce1df88SDimitris Michailidis l4_hlen) * extra_pkts;
263db37bc17SDimitris Michailidis } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
264db37bc17SDimitris Michailidis flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
265db37bc17SDimitris Michailidis if (skb->csum_offset == offsetof(struct udphdr, check))
266db37bc17SDimitris Michailidis flags |= FUN_ETH_INNER_UDP;
267db37bc17SDimitris Michailidis fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
268db37bc17SDimitris Michailidis skb_checksum_start_offset(skb), 0, 0);
269db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_cso);
270db37bc17SDimitris Michailidis } else {
271db37bc17SDimitris Michailidis fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
272db37bc17SDimitris Michailidis }
273db37bc17SDimitris Michailidis
274db37bc17SDimitris Michailidis ngle = shinfo->nr_frags + 1;
275db37bc17SDimitris Michailidis req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
276db37bc17SDimitris Michailidis
277a3b461bbSDimitris Michailidis gle = fun_write_gl(q, req, addrs, lens, ngle);
278db37bc17SDimitris Michailidis
279db37bc17SDimitris Michailidis if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
280db37bc17SDimitris Michailidis struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
281db37bc17SDimitris Michailidis struct fun_ktls_tx_ctx *tls_ctx;
282db37bc17SDimitris Michailidis
283db37bc17SDimitris Michailidis req->len8 += FUNETH_TLS_SZ / 8;
284db37bc17SDimitris Michailidis req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
285db37bc17SDimitris Michailidis
286db37bc17SDimitris Michailidis tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
287db37bc17SDimitris Michailidis tls->tlsid = tls_ctx->tlsid;
288db37bc17SDimitris Michailidis tls_ctx->next_seq += tls_len;
289db37bc17SDimitris Michailidis
290db37bc17SDimitris Michailidis u64_stats_update_begin(&q->syncp);
291db37bc17SDimitris Michailidis q->stats.tx_tls_bytes += tls_len;
292db37bc17SDimitris Michailidis q->stats.tx_tls_pkts += 1 + extra_pkts;
293db37bc17SDimitris Michailidis u64_stats_update_end(&q->syncp);
294db37bc17SDimitris Michailidis }
295db37bc17SDimitris Michailidis
296db37bc17SDimitris Michailidis u64_stats_update_begin(&q->syncp);
297db37bc17SDimitris Michailidis q->stats.tx_bytes += skb->len + extra_bytes;
298db37bc17SDimitris Michailidis q->stats.tx_pkts += 1 + extra_pkts;
299db37bc17SDimitris Michailidis u64_stats_update_end(&q->syncp);
300db37bc17SDimitris Michailidis
301db37bc17SDimitris Michailidis q->info[idx].skb = skb;
302db37bc17SDimitris Michailidis
303db37bc17SDimitris Michailidis trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
304db37bc17SDimitris Michailidis return tx_req_ndesc(req);
305db37bc17SDimitris Michailidis }
306db37bc17SDimitris Michailidis
307db37bc17SDimitris Michailidis /* Return the number of available descriptors of a Tx queue.
308db37bc17SDimitris Michailidis * HW assumes head==tail means the ring is empty so we need to keep one
309db37bc17SDimitris Michailidis * descriptor unused.
310db37bc17SDimitris Michailidis */
fun_txq_avail(const struct funeth_txq * q)311db37bc17SDimitris Michailidis static unsigned int fun_txq_avail(const struct funeth_txq *q)
312db37bc17SDimitris Michailidis {
313db37bc17SDimitris Michailidis return q->mask - q->prod_cnt + q->cons_cnt;
314db37bc17SDimitris Michailidis }
315db37bc17SDimitris Michailidis
316db37bc17SDimitris Michailidis /* Stop a queue if it can't handle another worst-case packet. */
fun_tx_check_stop(struct funeth_txq * q)317db37bc17SDimitris Michailidis static void fun_tx_check_stop(struct funeth_txq *q)
318db37bc17SDimitris Michailidis {
319db37bc17SDimitris Michailidis if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
320db37bc17SDimitris Michailidis return;
321db37bc17SDimitris Michailidis
322db37bc17SDimitris Michailidis netif_tx_stop_queue(q->ndq);
323db37bc17SDimitris Michailidis
324db37bc17SDimitris Michailidis /* NAPI reclaim is freeing packets in parallel with us and we may race.
325db37bc17SDimitris Michailidis * We have stopped the queue but check again after synchronizing with
326db37bc17SDimitris Michailidis * reclaim.
327db37bc17SDimitris Michailidis */
328db37bc17SDimitris Michailidis smp_mb();
329db37bc17SDimitris Michailidis if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
330db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_nstops);
331db37bc17SDimitris Michailidis else
332db37bc17SDimitris Michailidis netif_tx_start_queue(q->ndq);
333db37bc17SDimitris Michailidis }
334db37bc17SDimitris Michailidis
335db37bc17SDimitris Michailidis /* Return true if a queue has enough space to restart. Current condition is
336db37bc17SDimitris Michailidis * that the queue must be >= 1/4 empty.
337db37bc17SDimitris Michailidis */
fun_txq_may_restart(struct funeth_txq * q)338db37bc17SDimitris Michailidis static bool fun_txq_may_restart(struct funeth_txq *q)
339db37bc17SDimitris Michailidis {
340db37bc17SDimitris Michailidis return fun_txq_avail(q) >= q->mask / 4;
341db37bc17SDimitris Michailidis }
342db37bc17SDimitris Michailidis
fun_start_xmit(struct sk_buff * skb,struct net_device * netdev)343db37bc17SDimitris Michailidis netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
344db37bc17SDimitris Michailidis {
345db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(netdev);
346db37bc17SDimitris Michailidis unsigned int qid = skb_get_queue_mapping(skb);
347db37bc17SDimitris Michailidis struct funeth_txq *q = fp->txqs[qid];
348db37bc17SDimitris Michailidis unsigned int tls_len = 0;
349db37bc17SDimitris Michailidis unsigned int ndesc;
350db37bc17SDimitris Michailidis
351*ed3c9a2fSJakub Kicinski if (tls_is_skb_tx_device_offloaded(skb)) {
352db37bc17SDimitris Michailidis skb = fun_tls_tx(skb, q, &tls_len);
353db37bc17SDimitris Michailidis if (unlikely(!skb))
354db37bc17SDimitris Michailidis goto dropped;
355db37bc17SDimitris Michailidis }
356db37bc17SDimitris Michailidis
357db37bc17SDimitris Michailidis ndesc = write_pkt_desc(skb, q, tls_len);
358db37bc17SDimitris Michailidis if (unlikely(!ndesc)) {
359db37bc17SDimitris Michailidis dev_kfree_skb_any(skb);
360db37bc17SDimitris Michailidis goto dropped;
361db37bc17SDimitris Michailidis }
362db37bc17SDimitris Michailidis
363db37bc17SDimitris Michailidis q->prod_cnt += ndesc;
364db37bc17SDimitris Michailidis fun_tx_check_stop(q);
365db37bc17SDimitris Michailidis
366db37bc17SDimitris Michailidis skb_tx_timestamp(skb);
367db37bc17SDimitris Michailidis
368db37bc17SDimitris Michailidis if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
369db37bc17SDimitris Michailidis fun_txq_wr_db(q);
370db37bc17SDimitris Michailidis else
371db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_more);
372db37bc17SDimitris Michailidis
373db37bc17SDimitris Michailidis return NETDEV_TX_OK;
374db37bc17SDimitris Michailidis
375db37bc17SDimitris Michailidis dropped:
376db37bc17SDimitris Michailidis /* A dropped packet may be the last one in a xmit_more train,
377db37bc17SDimitris Michailidis * ring the doorbell just in case.
378db37bc17SDimitris Michailidis */
379db37bc17SDimitris Michailidis if (!netdev_xmit_more())
380db37bc17SDimitris Michailidis fun_txq_wr_db(q);
381db37bc17SDimitris Michailidis return NETDEV_TX_OK;
382db37bc17SDimitris Michailidis }
383db37bc17SDimitris Michailidis
384db37bc17SDimitris Michailidis /* Return a Tx queue's HW head index written back to host memory. */
txq_hw_head(const struct funeth_txq * q)385db37bc17SDimitris Michailidis static u16 txq_hw_head(const struct funeth_txq *q)
386db37bc17SDimitris Michailidis {
387db37bc17SDimitris Michailidis return (u16)be64_to_cpu(*q->hw_wb);
388db37bc17SDimitris Michailidis }
389db37bc17SDimitris Michailidis
390db37bc17SDimitris Michailidis /* Unmap the Tx packet starting at the given descriptor index and
391db37bc17SDimitris Michailidis * return the number of Tx descriptors it occupied.
392db37bc17SDimitris Michailidis */
fun_unmap_pkt(const struct funeth_txq * q,unsigned int idx)39316ead408SDimitris Michailidis static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
394db37bc17SDimitris Michailidis {
395db37bc17SDimitris Michailidis const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
396db37bc17SDimitris Michailidis unsigned int ngle = req->dataop.ngather;
397db37bc17SDimitris Michailidis struct fun_dataop_gl *gle;
398db37bc17SDimitris Michailidis
399db37bc17SDimitris Michailidis if (ngle) {
400db37bc17SDimitris Michailidis gle = (struct fun_dataop_gl *)req->dataop.imm;
401db37bc17SDimitris Michailidis dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
402db37bc17SDimitris Michailidis be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
403db37bc17SDimitris Michailidis
404db37bc17SDimitris Michailidis for (gle++; --ngle && txq_to_end(q, gle); gle++)
405db37bc17SDimitris Michailidis dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
406db37bc17SDimitris Michailidis be32_to_cpu(gle->sgl_len),
407db37bc17SDimitris Michailidis DMA_TO_DEVICE);
408db37bc17SDimitris Michailidis
409db37bc17SDimitris Michailidis for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
410db37bc17SDimitris Michailidis dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
411db37bc17SDimitris Michailidis be32_to_cpu(gle->sgl_len),
412db37bc17SDimitris Michailidis DMA_TO_DEVICE);
413db37bc17SDimitris Michailidis }
414db37bc17SDimitris Michailidis
415db37bc17SDimitris Michailidis return tx_req_ndesc(req);
416db37bc17SDimitris Michailidis }
417db37bc17SDimitris Michailidis
418db37bc17SDimitris Michailidis /* Reclaim completed Tx descriptors and free their packets. Restart a stopped
419db37bc17SDimitris Michailidis * queue if we freed enough descriptors.
420db37bc17SDimitris Michailidis *
421db37bc17SDimitris Michailidis * Return true if we exhausted the budget while there is more work to be done.
422db37bc17SDimitris Michailidis */
fun_txq_reclaim(struct funeth_txq * q,int budget)423db37bc17SDimitris Michailidis static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
424db37bc17SDimitris Michailidis {
425db37bc17SDimitris Michailidis unsigned int npkts = 0, nbytes = 0, ndesc = 0;
426db37bc17SDimitris Michailidis unsigned int head, limit, reclaim_idx;
427db37bc17SDimitris Michailidis
428db37bc17SDimitris Michailidis /* budget may be 0, e.g., netpoll */
429db37bc17SDimitris Michailidis limit = budget ? budget : UINT_MAX;
430db37bc17SDimitris Michailidis
431db37bc17SDimitris Michailidis for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
432db37bc17SDimitris Michailidis head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
433db37bc17SDimitris Michailidis /* The HW head is continually updated, ensure we don't read
434db37bc17SDimitris Michailidis * descriptor state before the head tells us to reclaim it.
435db37bc17SDimitris Michailidis * On the enqueue side the doorbell is an implicit write
436db37bc17SDimitris Michailidis * barrier.
437db37bc17SDimitris Michailidis */
438db37bc17SDimitris Michailidis rmb();
439db37bc17SDimitris Michailidis
440db37bc17SDimitris Michailidis do {
44116ead408SDimitris Michailidis unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
442db37bc17SDimitris Michailidis struct sk_buff *skb = q->info[reclaim_idx].skb;
443db37bc17SDimitris Michailidis
444db37bc17SDimitris Michailidis trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
445db37bc17SDimitris Michailidis
446db37bc17SDimitris Michailidis nbytes += skb->len;
447db37bc17SDimitris Michailidis napi_consume_skb(skb, budget);
448db37bc17SDimitris Michailidis ndesc += pkt_desc;
449db37bc17SDimitris Michailidis reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
450db37bc17SDimitris Michailidis npkts++;
451db37bc17SDimitris Michailidis } while (reclaim_idx != head && npkts < limit);
452db37bc17SDimitris Michailidis }
453db37bc17SDimitris Michailidis
454db37bc17SDimitris Michailidis q->cons_cnt += ndesc;
455db37bc17SDimitris Michailidis netdev_tx_completed_queue(q->ndq, npkts, nbytes);
456db37bc17SDimitris Michailidis smp_mb(); /* pairs with the one in fun_tx_check_stop() */
457db37bc17SDimitris Michailidis
458db37bc17SDimitris Michailidis if (unlikely(netif_tx_queue_stopped(q->ndq) &&
459db37bc17SDimitris Michailidis fun_txq_may_restart(q))) {
460db37bc17SDimitris Michailidis netif_tx_wake_queue(q->ndq);
461db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_nrestarts);
462db37bc17SDimitris Michailidis }
463db37bc17SDimitris Michailidis
464db37bc17SDimitris Michailidis return reclaim_idx != head;
465db37bc17SDimitris Michailidis }
466db37bc17SDimitris Michailidis
467db37bc17SDimitris Michailidis /* The NAPI handler for Tx queues. */
fun_txq_napi_poll(struct napi_struct * napi,int budget)468db37bc17SDimitris Michailidis int fun_txq_napi_poll(struct napi_struct *napi, int budget)
469db37bc17SDimitris Michailidis {
470db37bc17SDimitris Michailidis struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
471db37bc17SDimitris Michailidis struct funeth_txq *q = irq->txq;
472db37bc17SDimitris Michailidis unsigned int db_val;
473db37bc17SDimitris Michailidis
474db37bc17SDimitris Michailidis if (fun_txq_reclaim(q, budget))
475db37bc17SDimitris Michailidis return budget; /* exhausted budget */
476db37bc17SDimitris Michailidis
477db37bc17SDimitris Michailidis napi_complete(napi); /* exhausted pending work */
478db37bc17SDimitris Michailidis db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
479db37bc17SDimitris Michailidis writel(db_val, q->db);
480db37bc17SDimitris Michailidis return 0;
481db37bc17SDimitris Michailidis }
482db37bc17SDimitris Michailidis
48316ead408SDimitris Michailidis /* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
fun_xdpq_clean(struct funeth_txq * q,unsigned int budget)484db37bc17SDimitris Michailidis static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
485db37bc17SDimitris Michailidis {
48616ead408SDimitris Michailidis unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
487db37bc17SDimitris Michailidis
488db37bc17SDimitris Michailidis for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
489db37bc17SDimitris Michailidis head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
490db37bc17SDimitris Michailidis /* The HW head is continually updated, ensure we don't read
491db37bc17SDimitris Michailidis * descriptor state before the head tells us to reclaim it.
492db37bc17SDimitris Michailidis * On the enqueue side the doorbell is an implicit write
493db37bc17SDimitris Michailidis * barrier.
494db37bc17SDimitris Michailidis */
495db37bc17SDimitris Michailidis rmb();
496db37bc17SDimitris Michailidis
497db37bc17SDimitris Michailidis do {
49816ead408SDimitris Michailidis unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
49916ead408SDimitris Michailidis
50051a83391SDimitris Michailidis xdp_return_frame(q->info[reclaim_idx].xdpf);
501db37bc17SDimitris Michailidis
50216ead408SDimitris Michailidis trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
503db37bc17SDimitris Michailidis
50416ead408SDimitris Michailidis reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
50516ead408SDimitris Michailidis ndesc += pkt_desc;
506db37bc17SDimitris Michailidis npkts++;
507db37bc17SDimitris Michailidis } while (reclaim_idx != head && npkts < budget);
508db37bc17SDimitris Michailidis }
509db37bc17SDimitris Michailidis
51016ead408SDimitris Michailidis q->cons_cnt += ndesc;
511db37bc17SDimitris Michailidis return npkts;
512db37bc17SDimitris Michailidis }
513db37bc17SDimitris Michailidis
fun_xdp_tx(struct funeth_txq * q,struct xdp_frame * xdpf)51451a83391SDimitris Michailidis bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
515db37bc17SDimitris Michailidis {
5168b684570SDimitris Michailidis unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
5171c45b0cdSDimitris Michailidis const struct skb_shared_info *si = NULL;
5188b684570SDimitris Michailidis unsigned int lens[MAX_SKB_FRAGS + 1];
5198b684570SDimitris Michailidis dma_addr_t dma[MAX_SKB_FRAGS + 1];
520db37bc17SDimitris Michailidis struct fun_eth_tx_req *req;
521db37bc17SDimitris Michailidis
522db37bc17SDimitris Michailidis if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
523db37bc17SDimitris Michailidis fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
524db37bc17SDimitris Michailidis
5258b684570SDimitris Michailidis if (unlikely(xdp_frame_has_frags(xdpf))) {
5268b684570SDimitris Michailidis si = xdp_get_shared_info_from_frame(xdpf);
5278b684570SDimitris Michailidis tot_len = xdp_get_frame_len(xdpf);
5288b684570SDimitris Michailidis nfrags += si->nr_frags;
5298b684570SDimitris Michailidis ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
5308b684570SDimitris Michailidis sizeof(struct fun_dataop_gl)),
5318b684570SDimitris Michailidis FUNETH_SQE_SIZE);
5328b684570SDimitris Michailidis }
5338b684570SDimitris Michailidis
5348b684570SDimitris Michailidis if (unlikely(fun_txq_avail(q) < ndesc)) {
535db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_xdp_full);
536db37bc17SDimitris Michailidis return false;
537db37bc17SDimitris Michailidis }
538db37bc17SDimitris Michailidis
5398b684570SDimitris Michailidis if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
5408b684570SDimitris Michailidis lens))) {
541db37bc17SDimitris Michailidis FUN_QSTAT_INC(q, tx_map_err);
542db37bc17SDimitris Michailidis return false;
543db37bc17SDimitris Michailidis }
544db37bc17SDimitris Michailidis
545db37bc17SDimitris Michailidis idx = q->prod_cnt & q->mask;
546db37bc17SDimitris Michailidis req = fun_tx_desc_addr(q, idx);
547db37bc17SDimitris Michailidis req->op = FUN_ETH_OP_TX;
548a3b461bbSDimitris Michailidis req->len8 = 0;
549db37bc17SDimitris Michailidis req->flags = 0;
550db37bc17SDimitris Michailidis req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
551db37bc17SDimitris Michailidis req->repr_idn = 0;
552db37bc17SDimitris Michailidis req->encap_proto = 0;
553db37bc17SDimitris Michailidis fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
5548b684570SDimitris Michailidis req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
555db37bc17SDimitris Michailidis
5568b684570SDimitris Michailidis fun_write_gl(q, req, dma, lens, nfrags);
557db37bc17SDimitris Michailidis
55851a83391SDimitris Michailidis q->info[idx].xdpf = xdpf;
559db37bc17SDimitris Michailidis
560db37bc17SDimitris Michailidis u64_stats_update_begin(&q->syncp);
5618b684570SDimitris Michailidis q->stats.tx_bytes += tot_len;
562db37bc17SDimitris Michailidis q->stats.tx_pkts++;
563db37bc17SDimitris Michailidis u64_stats_update_end(&q->syncp);
564db37bc17SDimitris Michailidis
5658b684570SDimitris Michailidis trace_funeth_tx(q, tot_len, idx, nfrags);
5668b684570SDimitris Michailidis q->prod_cnt += ndesc;
567db37bc17SDimitris Michailidis
568db37bc17SDimitris Michailidis return true;
569db37bc17SDimitris Michailidis }
570db37bc17SDimitris Michailidis
fun_xdp_xmit_frames(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)571db37bc17SDimitris Michailidis int fun_xdp_xmit_frames(struct net_device *dev, int n,
572db37bc17SDimitris Michailidis struct xdp_frame **frames, u32 flags)
573db37bc17SDimitris Michailidis {
574db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(dev);
575db37bc17SDimitris Michailidis struct funeth_txq *q, **xdpqs;
576db37bc17SDimitris Michailidis int i, q_idx;
577db37bc17SDimitris Michailidis
578db37bc17SDimitris Michailidis if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
579db37bc17SDimitris Michailidis return -EINVAL;
580db37bc17SDimitris Michailidis
581db37bc17SDimitris Michailidis xdpqs = rcu_dereference_bh(fp->xdpqs);
582db37bc17SDimitris Michailidis if (unlikely(!xdpqs))
583db37bc17SDimitris Michailidis return -ENETDOWN;
584db37bc17SDimitris Michailidis
585db37bc17SDimitris Michailidis q_idx = smp_processor_id();
586db37bc17SDimitris Michailidis if (unlikely(q_idx >= fp->num_xdpqs))
587db37bc17SDimitris Michailidis return -ENXIO;
588db37bc17SDimitris Michailidis
58951a83391SDimitris Michailidis for (q = xdpqs[q_idx], i = 0; i < n; i++)
59051a83391SDimitris Michailidis if (!fun_xdp_tx(q, frames[i]))
591db37bc17SDimitris Michailidis break;
592db37bc17SDimitris Michailidis
593db37bc17SDimitris Michailidis if (unlikely(flags & XDP_XMIT_FLUSH))
594db37bc17SDimitris Michailidis fun_txq_wr_db(q);
595db37bc17SDimitris Michailidis return i;
596db37bc17SDimitris Michailidis }
597db37bc17SDimitris Michailidis
598db37bc17SDimitris Michailidis /* Purge a Tx queue of any queued packets. Should be called once HW access
599db37bc17SDimitris Michailidis * to the packets has been revoked, e.g., after the queue has been disabled.
600db37bc17SDimitris Michailidis */
fun_txq_purge(struct funeth_txq * q)601db37bc17SDimitris Michailidis static void fun_txq_purge(struct funeth_txq *q)
602db37bc17SDimitris Michailidis {
603db37bc17SDimitris Michailidis while (q->cons_cnt != q->prod_cnt) {
604db37bc17SDimitris Michailidis unsigned int idx = q->cons_cnt & q->mask;
605db37bc17SDimitris Michailidis
60616ead408SDimitris Michailidis q->cons_cnt += fun_unmap_pkt(q, idx);
607db37bc17SDimitris Michailidis dev_kfree_skb_any(q->info[idx].skb);
608db37bc17SDimitris Michailidis }
609db37bc17SDimitris Michailidis netdev_tx_reset_queue(q->ndq);
610db37bc17SDimitris Michailidis }
611db37bc17SDimitris Michailidis
fun_xdpq_purge(struct funeth_txq * q)612db37bc17SDimitris Michailidis static void fun_xdpq_purge(struct funeth_txq *q)
613db37bc17SDimitris Michailidis {
614db37bc17SDimitris Michailidis while (q->cons_cnt != q->prod_cnt) {
615db37bc17SDimitris Michailidis unsigned int idx = q->cons_cnt & q->mask;
616db37bc17SDimitris Michailidis
61716ead408SDimitris Michailidis q->cons_cnt += fun_unmap_pkt(q, idx);
61851a83391SDimitris Michailidis xdp_return_frame(q->info[idx].xdpf);
619db37bc17SDimitris Michailidis }
620db37bc17SDimitris Michailidis }
621db37bc17SDimitris Michailidis
622db37bc17SDimitris Michailidis /* Create a Tx queue, allocating all the host resources needed. */
fun_txq_create_sw(struct net_device * dev,unsigned int qidx,unsigned int ndesc,struct fun_irq * irq)623db37bc17SDimitris Michailidis static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
624db37bc17SDimitris Michailidis unsigned int qidx,
625db37bc17SDimitris Michailidis unsigned int ndesc,
626db37bc17SDimitris Michailidis struct fun_irq *irq)
627db37bc17SDimitris Michailidis {
628db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(dev);
629db37bc17SDimitris Michailidis struct funeth_txq *q;
630db37bc17SDimitris Michailidis int numa_node;
631db37bc17SDimitris Michailidis
632db37bc17SDimitris Michailidis if (irq)
633db37bc17SDimitris Michailidis numa_node = fun_irq_node(irq); /* skb Tx queue */
634db37bc17SDimitris Michailidis else
635db37bc17SDimitris Michailidis numa_node = cpu_to_node(qidx); /* XDP Tx queue */
636db37bc17SDimitris Michailidis
637db37bc17SDimitris Michailidis q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
638db37bc17SDimitris Michailidis if (!q)
639db37bc17SDimitris Michailidis goto err;
640db37bc17SDimitris Michailidis
641db37bc17SDimitris Michailidis q->dma_dev = &fp->pdev->dev;
642db37bc17SDimitris Michailidis q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
643db37bc17SDimitris Michailidis sizeof(*q->info), true, numa_node,
644db37bc17SDimitris Michailidis &q->dma_addr, (void **)&q->info,
645db37bc17SDimitris Michailidis &q->hw_wb);
646db37bc17SDimitris Michailidis if (!q->desc)
647db37bc17SDimitris Michailidis goto free_q;
648db37bc17SDimitris Michailidis
649db37bc17SDimitris Michailidis q->netdev = dev;
650db37bc17SDimitris Michailidis q->mask = ndesc - 1;
651db37bc17SDimitris Michailidis q->qidx = qidx;
652db37bc17SDimitris Michailidis q->numa_node = numa_node;
653db37bc17SDimitris Michailidis u64_stats_init(&q->syncp);
654db37bc17SDimitris Michailidis q->init_state = FUN_QSTATE_INIT_SW;
655db37bc17SDimitris Michailidis return q;
656db37bc17SDimitris Michailidis
657db37bc17SDimitris Michailidis free_q:
658db37bc17SDimitris Michailidis kfree(q);
659db37bc17SDimitris Michailidis err:
660db37bc17SDimitris Michailidis netdev_err(dev, "Can't allocate memory for %s queue %u\n",
661db37bc17SDimitris Michailidis irq ? "Tx" : "XDP", qidx);
662db37bc17SDimitris Michailidis return NULL;
663db37bc17SDimitris Michailidis }
664db37bc17SDimitris Michailidis
fun_txq_free_sw(struct funeth_txq * q)665db37bc17SDimitris Michailidis static void fun_txq_free_sw(struct funeth_txq *q)
666db37bc17SDimitris Michailidis {
667db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(q->netdev);
668db37bc17SDimitris Michailidis
669db37bc17SDimitris Michailidis fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
670db37bc17SDimitris Michailidis q->desc, q->dma_addr, q->info);
671db37bc17SDimitris Michailidis
672db37bc17SDimitris Michailidis fp->tx_packets += q->stats.tx_pkts;
673db37bc17SDimitris Michailidis fp->tx_bytes += q->stats.tx_bytes;
674db37bc17SDimitris Michailidis fp->tx_dropped += q->stats.tx_map_err;
675db37bc17SDimitris Michailidis
676db37bc17SDimitris Michailidis kfree(q);
677db37bc17SDimitris Michailidis }
678db37bc17SDimitris Michailidis
679db37bc17SDimitris Michailidis /* Allocate the device portion of a Tx queue. */
fun_txq_create_dev(struct funeth_txq * q,struct fun_irq * irq)680db37bc17SDimitris Michailidis int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
681db37bc17SDimitris Michailidis {
682db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(q->netdev);
683db37bc17SDimitris Michailidis unsigned int irq_idx, ndesc = q->mask + 1;
684db37bc17SDimitris Michailidis int err;
685db37bc17SDimitris Michailidis
686db37bc17SDimitris Michailidis q->irq = irq;
687db37bc17SDimitris Michailidis *q->hw_wb = 0;
688db37bc17SDimitris Michailidis q->prod_cnt = 0;
689db37bc17SDimitris Michailidis q->cons_cnt = 0;
690db37bc17SDimitris Michailidis irq_idx = irq ? irq->irq_idx : 0;
691db37bc17SDimitris Michailidis
692db37bc17SDimitris Michailidis err = fun_sq_create(fp->fdev,
693db37bc17SDimitris Michailidis FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
694db37bc17SDimitris Michailidis FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
695db37bc17SDimitris Michailidis FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
696db37bc17SDimitris Michailidis q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
697db37bc17SDimitris Michailidis irq_idx, 0, fp->fdev->kern_end_qid, 0,
698db37bc17SDimitris Michailidis &q->hw_qid, &q->db);
699db37bc17SDimitris Michailidis if (err)
700db37bc17SDimitris Michailidis goto out;
701db37bc17SDimitris Michailidis
702db37bc17SDimitris Michailidis err = fun_create_and_bind_tx(fp, q->hw_qid);
703db37bc17SDimitris Michailidis if (err < 0)
704db37bc17SDimitris Michailidis goto free_devq;
705db37bc17SDimitris Michailidis q->ethid = err;
706db37bc17SDimitris Michailidis
707db37bc17SDimitris Michailidis if (irq) {
708db37bc17SDimitris Michailidis irq->txq = q;
709db37bc17SDimitris Michailidis q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
710db37bc17SDimitris Michailidis q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
711db37bc17SDimitris Michailidis fp->tx_coal_count);
712db37bc17SDimitris Michailidis writel(q->irq_db_val, q->db);
713db37bc17SDimitris Michailidis }
714db37bc17SDimitris Michailidis
715db37bc17SDimitris Michailidis q->init_state = FUN_QSTATE_INIT_FULL;
716db37bc17SDimitris Michailidis netif_info(fp, ifup, q->netdev,
717db37bc17SDimitris Michailidis "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
718db37bc17SDimitris Michailidis irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
719db37bc17SDimitris Michailidis q->ethid, q->numa_node);
720db37bc17SDimitris Michailidis return 0;
721db37bc17SDimitris Michailidis
722db37bc17SDimitris Michailidis free_devq:
723db37bc17SDimitris Michailidis fun_destroy_sq(fp->fdev, q->hw_qid);
724db37bc17SDimitris Michailidis out:
725db37bc17SDimitris Michailidis netdev_err(q->netdev,
726db37bc17SDimitris Michailidis "Failed to create %s queue %u on device, error %d\n",
727db37bc17SDimitris Michailidis irq ? "Tx" : "XDP", q->qidx, err);
728db37bc17SDimitris Michailidis return err;
729db37bc17SDimitris Michailidis }
730db37bc17SDimitris Michailidis
fun_txq_free_dev(struct funeth_txq * q)731db37bc17SDimitris Michailidis static void fun_txq_free_dev(struct funeth_txq *q)
732db37bc17SDimitris Michailidis {
733db37bc17SDimitris Michailidis struct funeth_priv *fp = netdev_priv(q->netdev);
734db37bc17SDimitris Michailidis
735db37bc17SDimitris Michailidis if (q->init_state < FUN_QSTATE_INIT_FULL)
736db37bc17SDimitris Michailidis return;
737db37bc17SDimitris Michailidis
738db37bc17SDimitris Michailidis netif_info(fp, ifdown, q->netdev,
739db37bc17SDimitris Michailidis "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
740db37bc17SDimitris Michailidis q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
741db37bc17SDimitris Michailidis q->irq ? q->irq->irq_idx : 0, q->ethid);
742db37bc17SDimitris Michailidis
743db37bc17SDimitris Michailidis fun_destroy_sq(fp->fdev, q->hw_qid);
744db37bc17SDimitris Michailidis fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
745db37bc17SDimitris Michailidis
746db37bc17SDimitris Michailidis if (q->irq) {
747db37bc17SDimitris Michailidis q->irq->txq = NULL;
748db37bc17SDimitris Michailidis fun_txq_purge(q);
749db37bc17SDimitris Michailidis } else {
750db37bc17SDimitris Michailidis fun_xdpq_purge(q);
751db37bc17SDimitris Michailidis }
752db37bc17SDimitris Michailidis
753db37bc17SDimitris Michailidis q->init_state = FUN_QSTATE_INIT_SW;
754db37bc17SDimitris Michailidis }
755db37bc17SDimitris Michailidis
756db37bc17SDimitris Michailidis /* Create or advance a Tx queue, allocating all the host and device resources
757db37bc17SDimitris Michailidis * needed to reach the target state.
758db37bc17SDimitris Michailidis */
funeth_txq_create(struct net_device * dev,unsigned int qidx,unsigned int ndesc,struct fun_irq * irq,int state,struct funeth_txq ** qp)759db37bc17SDimitris Michailidis int funeth_txq_create(struct net_device *dev, unsigned int qidx,
760db37bc17SDimitris Michailidis unsigned int ndesc, struct fun_irq *irq, int state,
761db37bc17SDimitris Michailidis struct funeth_txq **qp)
762db37bc17SDimitris Michailidis {
763db37bc17SDimitris Michailidis struct funeth_txq *q = *qp;
764db37bc17SDimitris Michailidis int err;
765db37bc17SDimitris Michailidis
766db37bc17SDimitris Michailidis if (!q)
767db37bc17SDimitris Michailidis q = fun_txq_create_sw(dev, qidx, ndesc, irq);
768db37bc17SDimitris Michailidis if (!q)
769db37bc17SDimitris Michailidis return -ENOMEM;
770db37bc17SDimitris Michailidis
771db37bc17SDimitris Michailidis if (q->init_state >= state)
772db37bc17SDimitris Michailidis goto out;
773db37bc17SDimitris Michailidis
774db37bc17SDimitris Michailidis err = fun_txq_create_dev(q, irq);
775db37bc17SDimitris Michailidis if (err) {
776db37bc17SDimitris Michailidis if (!*qp)
777db37bc17SDimitris Michailidis fun_txq_free_sw(q);
778db37bc17SDimitris Michailidis return err;
779db37bc17SDimitris Michailidis }
780db37bc17SDimitris Michailidis
781db37bc17SDimitris Michailidis out:
782db37bc17SDimitris Michailidis *qp = q;
783db37bc17SDimitris Michailidis return 0;
784db37bc17SDimitris Michailidis }
785db37bc17SDimitris Michailidis
786db37bc17SDimitris Michailidis /* Free Tx queue resources until it reaches the target state.
787db37bc17SDimitris Michailidis * The queue must be already disconnected from the stack.
788db37bc17SDimitris Michailidis */
funeth_txq_free(struct funeth_txq * q,int state)789db37bc17SDimitris Michailidis struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
790db37bc17SDimitris Michailidis {
791db37bc17SDimitris Michailidis if (state < FUN_QSTATE_INIT_FULL)
792db37bc17SDimitris Michailidis fun_txq_free_dev(q);
793db37bc17SDimitris Michailidis
794db37bc17SDimitris Michailidis if (state == FUN_QSTATE_DESTROYED) {
795db37bc17SDimitris Michailidis fun_txq_free_sw(q);
796db37bc17SDimitris Michailidis q = NULL;
797db37bc17SDimitris Michailidis }
798db37bc17SDimitris Michailidis
799db37bc17SDimitris Michailidis return q;
800db37bc17SDimitris Michailidis }
801