1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #ifndef I40E_TXRX_COMMON_
5 #define I40E_TXRX_COMMON_
6 
7 void i40e_fd_handle_status(struct i40e_ring *rx_ring,
8 			   union i40e_rx_desc *rx_desc, u8 prog_id);
9 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
10 struct i40e_rx_buffer *i40e_clean_programming_status(
11 	struct i40e_ring *rx_ring,
12 	union i40e_rx_desc *rx_desc,
13 	u64 qw);
14 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
15 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb);
16 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
17 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
18 			  unsigned int total_rx_bytes,
19 			  unsigned int total_rx_packets);
20 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
21 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
22 
23 #define I40E_XDP_PASS		0
24 #define I40E_XDP_CONSUMED	BIT(0)
25 #define I40E_XDP_TX		BIT(1)
26 #define I40E_XDP_REDIR		BIT(2)
27 
28 /**
29  * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
30  **/
31 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
32 				u32 td_tag)
33 {
34 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
35 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
36 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
37 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
38 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
39 }
40 
41 /**
42  * i40e_update_tx_stats - Update the egress statistics for the Tx ring
43  * @tx_ring: Tx ring to update
44  * @total_packet: total packets sent
45  * @total_bytes: total bytes sent
46  **/
47 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
48 					unsigned int total_packets,
49 					unsigned int total_bytes)
50 {
51 	u64_stats_update_begin(&tx_ring->syncp);
52 	tx_ring->stats.bytes += total_bytes;
53 	tx_ring->stats.packets += total_packets;
54 	u64_stats_update_end(&tx_ring->syncp);
55 	tx_ring->q_vector->tx.total_bytes += total_bytes;
56 	tx_ring->q_vector->tx.total_packets += total_packets;
57 }
58 
59 #define WB_STRIDE 4
60 
61 /**
62  * i40e_arm_wb - (Possibly) arms Tx write-back
63  * @tx_ring: Tx ring to update
64  * @vsi: the VSI
65  * @budget: the NAPI budget left
66  **/
67 static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
68 			       struct i40e_vsi *vsi,
69 			       int budget)
70 {
71 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
72 		/* check to see if there are < 4 descriptors
73 		 * waiting to be written back, then kick the hardware to force
74 		 * them to be written back in case we stay in NAPI.
75 		 * In this mode on X722 we do not enable Interrupt.
76 		 */
77 		unsigned int j = i40e_get_tx_pending(tx_ring, false);
78 
79 		if (budget &&
80 		    ((j / WB_STRIDE) == 0) && j > 0 &&
81 		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
82 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
83 			tx_ring->arm_wb = true;
84 	}
85 }
86 
87 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
88 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
89 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
90 
91 #endif /* I40E_TXRX_COMMON_ */
92