1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #ifndef _I40E_XSK_H_
5 #define _I40E_XSK_H_
6 
7 /* This value should match the pragma in the loop_unrolled_for
8  * macro. Why 4? It is strictly empirical. It seems to be a good
9  * compromise between the advantage of having simultaneous outstanding
10  * reads to the DMA array that can hide each others latency and the
11  * disadvantage of having a larger code path.
12  */
13 #define PKTS_PER_BATCH 4
14 
15 #ifdef __clang__
16 #define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
17 #elif __GNUC__ >= 8
18 #define loop_unrolled_for _Pragma("GCC unroll 4") for
19 #else
20 #define loop_unrolled_for for
21 #endif
22 
23 struct i40e_vsi;
24 struct xsk_buff_pool;
25 struct zero_copy_allocator;
26 
27 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
28 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
29 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
30 			u16 qid);
31 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
32 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
33 
34 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
35 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
36 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
37 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
38 
39 #endif /* _I40E_XSK_H_ */
40