1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2018 Intel Corporation. */ 3 4 #ifndef I40E_TXRX_COMMON_ 5 #define I40E_TXRX_COMMON_ 6 7 void i40e_fd_handle_status(struct i40e_ring *rx_ring, 8 union i40e_rx_desc *rx_desc, u8 prog_id); 9 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); 10 struct i40e_rx_buffer *i40e_clean_programming_status( 11 struct i40e_ring *rx_ring, 12 union i40e_rx_desc *rx_desc, 13 u64 qw); 14 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 15 union i40e_rx_desc *rx_desc, struct sk_buff *skb, 16 u8 rx_ptype); 17 void i40e_receive_skb(struct i40e_ring *rx_ring, 18 struct sk_buff *skb, u16 vlan_tag); 19 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); 20 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 21 unsigned int total_rx_bytes, 22 unsigned int total_rx_packets); 23 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); 24 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); 25 26 #define I40E_XDP_PASS 0 27 #define I40E_XDP_CONSUMED BIT(0) 28 #define I40E_XDP_TX BIT(1) 29 #define I40E_XDP_REDIR BIT(2) 30 31 /** 32 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword 33 **/ 34 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 35 u32 td_tag) 36 { 37 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | 38 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | 39 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | 40 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | 41 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); 42 } 43 44 /** 45 * i40e_update_tx_stats - Update the egress statistics for the Tx ring 46 * @tx_ring: Tx ring to update 47 * @total_packet: total packets sent 48 * @total_bytes: total bytes sent 49 **/ 50 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, 51 unsigned int total_packets, 52 unsigned int total_bytes) 53 { 54 u64_stats_update_begin(&tx_ring->syncp); 55 tx_ring->stats.bytes += total_bytes; 56 tx_ring->stats.packets += total_packets; 57 u64_stats_update_end(&tx_ring->syncp); 58 tx_ring->q_vector->tx.total_bytes += total_bytes; 59 tx_ring->q_vector->tx.total_packets += total_packets; 60 } 61 62 #define WB_STRIDE 4 63 64 /** 65 * i40e_arm_wb - (Possibly) arms Tx write-back 66 * @tx_ring: Tx ring to update 67 * @vsi: the VSI 68 * @budget: the NAPI budget left 69 **/ 70 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, 71 struct i40e_vsi *vsi, 72 int budget) 73 { 74 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { 75 /* check to see if there are < 4 descriptors 76 * waiting to be written back, then kick the hardware to force 77 * them to be written back in case we stay in NAPI. 78 * In this mode on X722 we do not enable Interrupt. 79 */ 80 unsigned int j = i40e_get_tx_pending(tx_ring, false); 81 82 if (budget && 83 ((j / WB_STRIDE) == 0) && j > 0 && 84 !test_bit(__I40E_VSI_DOWN, vsi->state) && 85 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 86 tx_ring->arm_wb = true; 87 } 88 } 89 90 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); 91 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); 92 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); 93 94 #endif /* I40E_TXRX_COMMON_ */ 95