1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 
8 #include "ixgbe.h"
9 #include "ixgbe_txrx_common.h"
10 
11 struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12 				     struct ixgbe_ring *ring)
13 {
14 	bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 	int qid = ring->ring_idx;
16 
17 	if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18 		return NULL;
19 
20 	return xsk_get_pool_from_qid(adapter->netdev, qid);
21 }
22 
23 static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
24 				 struct xsk_buff_pool *pool,
25 				 u16 qid)
26 {
27 	struct net_device *netdev = adapter->netdev;
28 	bool if_running;
29 	int err;
30 
31 	if (qid >= adapter->num_rx_queues)
32 		return -EINVAL;
33 
34 	if (qid >= netdev->real_num_rx_queues ||
35 	    qid >= netdev->real_num_tx_queues)
36 		return -EINVAL;
37 
38 	err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
39 	if (err)
40 		return err;
41 
42 	if_running = netif_running(adapter->netdev) &&
43 		     ixgbe_enabled_xdp_adapter(adapter);
44 
45 	if (if_running)
46 		ixgbe_txrx_ring_disable(adapter, qid);
47 
48 	set_bit(qid, adapter->af_xdp_zc_qps);
49 
50 	if (if_running) {
51 		ixgbe_txrx_ring_enable(adapter, qid);
52 
53 		/* Kick start the NAPI context so that receiving will start */
54 		err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
55 		if (err) {
56 			clear_bit(qid, adapter->af_xdp_zc_qps);
57 			xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
58 			return err;
59 		}
60 	}
61 
62 	return 0;
63 }
64 
65 static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
66 {
67 	struct xsk_buff_pool *pool;
68 	bool if_running;
69 
70 	pool = xsk_get_pool_from_qid(adapter->netdev, qid);
71 	if (!pool)
72 		return -EINVAL;
73 
74 	if_running = netif_running(adapter->netdev) &&
75 		     ixgbe_enabled_xdp_adapter(adapter);
76 
77 	if (if_running)
78 		ixgbe_txrx_ring_disable(adapter, qid);
79 
80 	clear_bit(qid, adapter->af_xdp_zc_qps);
81 	xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
82 
83 	if (if_running)
84 		ixgbe_txrx_ring_enable(adapter, qid);
85 
86 	return 0;
87 }
88 
89 int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
90 			 struct xsk_buff_pool *pool,
91 			 u16 qid)
92 {
93 	return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
94 		ixgbe_xsk_pool_disable(adapter, qid);
95 }
96 
97 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98 			    struct ixgbe_ring *rx_ring,
99 			    struct xdp_buff *xdp)
100 {
101 	int err, result = IXGBE_XDP_PASS;
102 	struct bpf_prog *xdp_prog;
103 	struct ixgbe_ring *ring;
104 	struct xdp_frame *xdpf;
105 	u32 act;
106 
107 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108 	act = bpf_prog_run_xdp(xdp_prog, xdp);
109 
110 	if (likely(act == XDP_REDIRECT)) {
111 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112 		if (err)
113 			goto out_failure;
114 		return IXGBE_XDP_REDIR;
115 	}
116 
117 	switch (act) {
118 	case XDP_PASS:
119 		break;
120 	case XDP_TX:
121 		xdpf = xdp_convert_buff_to_frame(xdp);
122 		if (unlikely(!xdpf))
123 			goto out_failure;
124 		ring = ixgbe_determine_xdp_ring(adapter);
125 		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
126 			spin_lock(&ring->tx_lock);
127 		result = ixgbe_xmit_xdp_ring(ring, xdpf);
128 		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
129 			spin_unlock(&ring->tx_lock);
130 		if (result == IXGBE_XDP_CONSUMED)
131 			goto out_failure;
132 		break;
133 	default:
134 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
135 		fallthrough;
136 	case XDP_ABORTED:
137 out_failure:
138 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
139 		fallthrough; /* handle aborts by dropping packet */
140 	case XDP_DROP:
141 		result = IXGBE_XDP_CONSUMED;
142 		break;
143 	}
144 	return result;
145 }
146 
147 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
148 {
149 	union ixgbe_adv_rx_desc *rx_desc;
150 	struct ixgbe_rx_buffer *bi;
151 	u16 i = rx_ring->next_to_use;
152 	dma_addr_t dma;
153 	bool ok = true;
154 
155 	/* nothing to do */
156 	if (!count)
157 		return true;
158 
159 	rx_desc = IXGBE_RX_DESC(rx_ring, i);
160 	bi = &rx_ring->rx_buffer_info[i];
161 	i -= rx_ring->count;
162 
163 	do {
164 		bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
165 		if (!bi->xdp) {
166 			ok = false;
167 			break;
168 		}
169 
170 		dma = xsk_buff_xdp_get_dma(bi->xdp);
171 
172 		/* Refresh the desc even if buffer_addrs didn't change
173 		 * because each write-back erases this info.
174 		 */
175 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
176 
177 		rx_desc++;
178 		bi++;
179 		i++;
180 		if (unlikely(!i)) {
181 			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
182 			bi = rx_ring->rx_buffer_info;
183 			i -= rx_ring->count;
184 		}
185 
186 		/* clear the length for the next_to_use descriptor */
187 		rx_desc->wb.upper.length = 0;
188 
189 		count--;
190 	} while (count);
191 
192 	i += rx_ring->count;
193 
194 	if (rx_ring->next_to_use != i) {
195 		rx_ring->next_to_use = i;
196 
197 		/* Force memory writes to complete before letting h/w
198 		 * know there are new descriptors to fetch.  (Only
199 		 * applicable for weak-ordered memory model archs,
200 		 * such as IA-64).
201 		 */
202 		wmb();
203 		writel(i, rx_ring->tail);
204 	}
205 
206 	return ok;
207 }
208 
209 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
210 					      struct ixgbe_rx_buffer *bi)
211 {
212 	unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
213 	unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
214 	struct sk_buff *skb;
215 
216 	/* allocate a skb to store the frags */
217 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
218 			       bi->xdp->data_end - bi->xdp->data_hard_start,
219 			       GFP_ATOMIC | __GFP_NOWARN);
220 	if (unlikely(!skb))
221 		return NULL;
222 
223 	skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
224 	memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
225 	if (metasize)
226 		skb_metadata_set(skb, metasize);
227 
228 	xsk_buff_free(bi->xdp);
229 	bi->xdp = NULL;
230 	return skb;
231 }
232 
233 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
234 {
235 	u32 ntc = rx_ring->next_to_clean + 1;
236 
237 	ntc = (ntc < rx_ring->count) ? ntc : 0;
238 	rx_ring->next_to_clean = ntc;
239 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
240 }
241 
242 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
243 			  struct ixgbe_ring *rx_ring,
244 			  const int budget)
245 {
246 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
247 	struct ixgbe_adapter *adapter = q_vector->adapter;
248 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
249 	unsigned int xdp_res, xdp_xmit = 0;
250 	bool failure = false;
251 	struct sk_buff *skb;
252 
253 	while (likely(total_rx_packets < budget)) {
254 		union ixgbe_adv_rx_desc *rx_desc;
255 		struct ixgbe_rx_buffer *bi;
256 		unsigned int size;
257 
258 		/* return some buffers to hardware, one at a time is too slow */
259 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
260 			failure = failure ||
261 				  !ixgbe_alloc_rx_buffers_zc(rx_ring,
262 							     cleaned_count);
263 			cleaned_count = 0;
264 		}
265 
266 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
267 		size = le16_to_cpu(rx_desc->wb.upper.length);
268 		if (!size)
269 			break;
270 
271 		/* This memory barrier is needed to keep us from reading
272 		 * any other fields out of the rx_desc until we know the
273 		 * descriptor has been written back
274 		 */
275 		dma_rmb();
276 
277 		bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
278 
279 		if (unlikely(!ixgbe_test_staterr(rx_desc,
280 						 IXGBE_RXD_STAT_EOP))) {
281 			struct ixgbe_rx_buffer *next_bi;
282 
283 			xsk_buff_free(bi->xdp);
284 			bi->xdp = NULL;
285 			ixgbe_inc_ntc(rx_ring);
286 			next_bi =
287 			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
288 			next_bi->discard = true;
289 			continue;
290 		}
291 
292 		if (unlikely(bi->discard)) {
293 			xsk_buff_free(bi->xdp);
294 			bi->xdp = NULL;
295 			bi->discard = false;
296 			ixgbe_inc_ntc(rx_ring);
297 			continue;
298 		}
299 
300 		bi->xdp->data_end = bi->xdp->data + size;
301 		xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
302 		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
303 
304 		if (xdp_res) {
305 			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
306 				xdp_xmit |= xdp_res;
307 			else
308 				xsk_buff_free(bi->xdp);
309 
310 			bi->xdp = NULL;
311 			total_rx_packets++;
312 			total_rx_bytes += size;
313 
314 			cleaned_count++;
315 			ixgbe_inc_ntc(rx_ring);
316 			continue;
317 		}
318 
319 		/* XDP_PASS path */
320 		skb = ixgbe_construct_skb_zc(rx_ring, bi);
321 		if (!skb) {
322 			rx_ring->rx_stats.alloc_rx_buff_failed++;
323 			break;
324 		}
325 
326 		cleaned_count++;
327 		ixgbe_inc_ntc(rx_ring);
328 
329 		if (eth_skb_pad(skb))
330 			continue;
331 
332 		total_rx_bytes += skb->len;
333 		total_rx_packets++;
334 
335 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
336 		ixgbe_rx_skb(q_vector, skb);
337 	}
338 
339 	if (xdp_xmit & IXGBE_XDP_REDIR)
340 		xdp_do_flush_map();
341 
342 	if (xdp_xmit & IXGBE_XDP_TX) {
343 		struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
344 
345 		ixgbe_xdp_ring_update_tail_locked(ring);
346 	}
347 
348 	u64_stats_update_begin(&rx_ring->syncp);
349 	rx_ring->stats.packets += total_rx_packets;
350 	rx_ring->stats.bytes += total_rx_bytes;
351 	u64_stats_update_end(&rx_ring->syncp);
352 	q_vector->rx.total_packets += total_rx_packets;
353 	q_vector->rx.total_bytes += total_rx_bytes;
354 
355 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
356 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
357 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
358 		else
359 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
360 
361 		return (int)total_rx_packets;
362 	}
363 	return failure ? budget : (int)total_rx_packets;
364 }
365 
366 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
367 {
368 	struct ixgbe_rx_buffer *bi;
369 	u16 i;
370 
371 	for (i = 0; i < rx_ring->count; i++) {
372 		bi = &rx_ring->rx_buffer_info[i];
373 
374 		if (!bi->xdp)
375 			continue;
376 
377 		xsk_buff_free(bi->xdp);
378 		bi->xdp = NULL;
379 	}
380 }
381 
382 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
383 {
384 	struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
385 	union ixgbe_adv_tx_desc *tx_desc = NULL;
386 	struct ixgbe_tx_buffer *tx_bi;
387 	bool work_done = true;
388 	struct xdp_desc desc;
389 	dma_addr_t dma;
390 	u32 cmd_type;
391 
392 	while (budget-- > 0) {
393 		if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
394 		    !netif_carrier_ok(xdp_ring->netdev)) {
395 			work_done = false;
396 			break;
397 		}
398 
399 		if (!xsk_tx_peek_desc(pool, &desc))
400 			break;
401 
402 		dma = xsk_buff_raw_get_dma(pool, desc.addr);
403 		xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
404 
405 		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
406 		tx_bi->bytecount = desc.len;
407 		tx_bi->xdpf = NULL;
408 		tx_bi->gso_segs = 1;
409 
410 		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
411 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
412 
413 		/* put descriptor type bits */
414 		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
415 			   IXGBE_ADVTXD_DCMD_DEXT |
416 			   IXGBE_ADVTXD_DCMD_IFCS;
417 		cmd_type |= desc.len | IXGBE_TXD_CMD;
418 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
419 		tx_desc->read.olinfo_status =
420 			cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
421 
422 		xdp_ring->next_to_use++;
423 		if (xdp_ring->next_to_use == xdp_ring->count)
424 			xdp_ring->next_to_use = 0;
425 	}
426 
427 	if (tx_desc) {
428 		ixgbe_xdp_ring_update_tail(xdp_ring);
429 		xsk_tx_release(pool);
430 	}
431 
432 	return !!budget && work_done;
433 }
434 
435 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
436 				      struct ixgbe_tx_buffer *tx_bi)
437 {
438 	xdp_return_frame(tx_bi->xdpf);
439 	dma_unmap_single(tx_ring->dev,
440 			 dma_unmap_addr(tx_bi, dma),
441 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
442 	dma_unmap_len_set(tx_bi, len, 0);
443 }
444 
445 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
446 			    struct ixgbe_ring *tx_ring, int napi_budget)
447 {
448 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
449 	unsigned int total_packets = 0, total_bytes = 0;
450 	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
451 	union ixgbe_adv_tx_desc *tx_desc;
452 	struct ixgbe_tx_buffer *tx_bi;
453 	u32 xsk_frames = 0;
454 
455 	tx_bi = &tx_ring->tx_buffer_info[ntc];
456 	tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
457 
458 	while (ntc != ntu) {
459 		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
460 			break;
461 
462 		total_bytes += tx_bi->bytecount;
463 		total_packets += tx_bi->gso_segs;
464 
465 		if (tx_bi->xdpf)
466 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
467 		else
468 			xsk_frames++;
469 
470 		tx_bi->xdpf = NULL;
471 
472 		tx_bi++;
473 		tx_desc++;
474 		ntc++;
475 		if (unlikely(ntc == tx_ring->count)) {
476 			ntc = 0;
477 			tx_bi = tx_ring->tx_buffer_info;
478 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
479 		}
480 
481 		/* issue prefetch for next Tx descriptor */
482 		prefetch(tx_desc);
483 	}
484 
485 	tx_ring->next_to_clean = ntc;
486 
487 	u64_stats_update_begin(&tx_ring->syncp);
488 	tx_ring->stats.bytes += total_bytes;
489 	tx_ring->stats.packets += total_packets;
490 	u64_stats_update_end(&tx_ring->syncp);
491 	q_vector->tx.total_bytes += total_bytes;
492 	q_vector->tx.total_packets += total_packets;
493 
494 	if (xsk_frames)
495 		xsk_tx_completed(pool, xsk_frames);
496 
497 	if (xsk_uses_need_wakeup(pool))
498 		xsk_set_tx_need_wakeup(pool);
499 
500 	return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
501 }
502 
503 int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
504 {
505 	struct ixgbe_adapter *adapter = netdev_priv(dev);
506 	struct ixgbe_ring *ring;
507 
508 	if (test_bit(__IXGBE_DOWN, &adapter->state))
509 		return -ENETDOWN;
510 
511 	if (!READ_ONCE(adapter->xdp_prog))
512 		return -ENXIO;
513 
514 	if (qid >= adapter->num_xdp_queues)
515 		return -ENXIO;
516 
517 	ring = adapter->xdp_ring[qid];
518 
519 	if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
520 		return -ENETDOWN;
521 
522 	if (!ring->xsk_pool)
523 		return -ENXIO;
524 
525 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
526 		u64 eics = BIT_ULL(ring->q_vector->v_idx);
527 
528 		ixgbe_irq_rearm_queues(adapter, eics);
529 	}
530 
531 	return 0;
532 }
533 
534 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
535 {
536 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
537 	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
538 	struct ixgbe_tx_buffer *tx_bi;
539 	u32 xsk_frames = 0;
540 
541 	while (ntc != ntu) {
542 		tx_bi = &tx_ring->tx_buffer_info[ntc];
543 
544 		if (tx_bi->xdpf)
545 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
546 		else
547 			xsk_frames++;
548 
549 		tx_bi->xdpf = NULL;
550 
551 		ntc++;
552 		if (ntc == tx_ring->count)
553 			ntc = 0;
554 	}
555 
556 	if (xsk_frames)
557 		xsk_tx_completed(pool, xsk_frames);
558 }
559