1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #ifndef _ICE_TXRX_LIB_H_
5 #define _ICE_TXRX_LIB_H_
6 #include "ice.h"
7 
8 /**
9  * ice_set_rx_bufs_act - propagate Rx buffer action to frags
10  * @xdp: XDP buffer representing frame (linear and frags part)
11  * @rx_ring: Rx ring struct
12  * act: action to store onto Rx buffers related to XDP buffer parts
13  *
14  * Set action that should be taken before putting Rx buffer from first frag
15  * to one before last. Last one is handled by caller of this function as it
16  * is the EOP frag that is currently being processed. This function is
17  * supposed to be called only when XDP buffer contains frags.
18  */
19 static inline void
20 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
21 		    const unsigned int act)
22 {
23 	const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
24 	u32 first = rx_ring->first_desc;
25 	u32 nr_frags = sinfo->nr_frags;
26 	u32 cnt = rx_ring->count;
27 	struct ice_rx_buf *buf;
28 
29 	for (int i = 0; i < nr_frags; i++) {
30 		buf = &rx_ring->rx_buf[first];
31 		buf->act = act;
32 
33 		if (++first == cnt)
34 			first = 0;
35 	}
36 }
37 
38 /**
39  * ice_test_staterr - tests bits in Rx descriptor status and error fields
40  * @status_err_n: Rx descriptor status_error0 or status_error1 bits
41  * @stat_err_bits: value to mask
42  *
43  * This function does some fast chicanery in order to return the
44  * value of the mask which is really only used for boolean tests.
45  * The status_error_len doesn't need to be shifted because it begins
46  * at offset zero.
47  */
48 static inline bool
49 ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
50 {
51 	return !!(status_err_n & cpu_to_le16(stat_err_bits));
52 }
53 
54 /**
55  * ice_is_non_eop - process handling of non-EOP buffers
56  * @rx_ring: Rx ring being processed
57  * @rx_desc: Rx descriptor for current buffer
58  *
59  * If the buffer is an EOP buffer, this function exits returning false,
60  * otherwise return true indicating that this is in fact a non-EOP buffer.
61  */
62 static inline bool
63 ice_is_non_eop(const struct ice_rx_ring *rx_ring,
64 	       const union ice_32b_rx_flex_desc *rx_desc)
65 {
66 	/* if we are the last buffer then there is nothing else to do */
67 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
68 	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
69 		return false;
70 
71 	rx_ring->ring_stats->rx_stats.non_eop_descs++;
72 
73 	return true;
74 }
75 
76 static inline __le64
77 ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
78 {
79 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
80 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
81 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
82 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
83 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
84 }
85 
86 /**
87  * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
88  * @rx_desc: Rx 32b flex descriptor with RXDID=2
89  *
90  * The OS and current PF implementation only support stripping a single VLAN tag
91  * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
92  * one is found return the tag, else return 0 to mean no VLAN tag was found.
93  */
94 static inline u16
95 ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
96 {
97 	u16 stat_err_bits;
98 
99 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
100 	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
101 		return le16_to_cpu(rx_desc->wb.l2tag1);
102 
103 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
104 	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
105 		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
106 
107 	return 0;
108 }
109 
110 /**
111  * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
112  * @xdp_ring: XDP Tx ring
113  *
114  * This function updates the XDP Tx ring tail register.
115  */
116 static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
117 {
118 	/* Force memory writes to complete before letting h/w
119 	 * know there are new descriptors to fetch.
120 	 */
121 	wmb();
122 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
123 }
124 
125 /**
126  * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
127  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
128  *
129  * returns index of descriptor that had RS bit produced on
130  */
131 static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
132 {
133 	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
134 	struct ice_tx_desc *tx_desc;
135 
136 	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
137 	tx_desc->cmd_type_offset_bsz |=
138 		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
139 
140 	return rs_idx;
141 }
142 
143 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
144 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
145 int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring);
146 int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
147 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
148 void
149 ice_process_skb_fields(struct ice_rx_ring *rx_ring,
150 		       union ice_32b_rx_flex_desc *rx_desc,
151 		       struct sk_buff *skb, u16 ptype);
152 void
153 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
154 #endif /* !_ICE_TXRX_LIB_H_ */
155