1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_txrx_lib.h"
5 #include "ice_eswitch.h"
6 
7 /**
8  * ice_release_rx_desc - Store the new tail and head values
9  * @rx_ring: ring to bump
10  * @val: new head index
11  */
12 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
13 {
14 	u16 prev_ntu = rx_ring->next_to_use & ~0x7;
15 
16 	rx_ring->next_to_use = val;
17 
18 	/* update next to alloc since we have filled the ring */
19 	rx_ring->next_to_alloc = val;
20 
21 	/* QRX_TAIL will be updated with any tail value, but hardware ignores
22 	 * the lower 3 bits. This makes it so we only bump tail on meaningful
23 	 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
24 	 * the budget depending on the current traffic load.
25 	 */
26 	val &= ~0x7;
27 	if (prev_ntu != val) {
28 		/* Force memory writes to complete before letting h/w
29 		 * know there are new descriptors to fetch. (Only
30 		 * applicable for weak-ordered memory model archs,
31 		 * such as IA-64).
32 		 */
33 		wmb();
34 		writel(val, rx_ring->tail);
35 	}
36 }
37 
38 /**
39  * ice_ptype_to_htype - get a hash type
40  * @ptype: the ptype value from the descriptor
41  *
42  * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
43  * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
44  * Rx desc.
45  */
46 static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
47 {
48 	struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
49 
50 	if (!decoded.known)
51 		return PKT_HASH_TYPE_NONE;
52 	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
53 		return PKT_HASH_TYPE_L4;
54 	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
55 		return PKT_HASH_TYPE_L3;
56 	if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
57 		return PKT_HASH_TYPE_L2;
58 
59 	return PKT_HASH_TYPE_NONE;
60 }
61 
62 /**
63  * ice_rx_hash - set the hash value in the skb
64  * @rx_ring: descriptor ring
65  * @rx_desc: specific descriptor
66  * @skb: pointer to current skb
67  * @rx_ptype: the ptype value from the descriptor
68  */
69 static void
70 ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
71 	    struct sk_buff *skb, u16 rx_ptype)
72 {
73 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
74 	u32 hash;
75 
76 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
77 		return;
78 
79 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
80 		return;
81 
82 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
83 	hash = le32_to_cpu(nic_mdid->rss_hash);
84 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
85 }
86 
87 /**
88  * ice_rx_csum - Indicate in skb if checksum is good
89  * @ring: the ring we care about
90  * @skb: skb currently being received and modified
91  * @rx_desc: the receive descriptor
92  * @ptype: the packet type decoded by hardware
93  *
94  * skb->protocol must be set before this function is called
95  */
96 static void
97 ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
98 	    union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
99 {
100 	struct ice_rx_ptype_decoded decoded;
101 	u16 rx_status0, rx_status1;
102 	bool ipv4, ipv6;
103 
104 	rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
105 	rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
106 
107 	decoded = ice_decode_rx_desc_ptype(ptype);
108 
109 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
110 	skb->ip_summed = CHECKSUM_NONE;
111 	skb_checksum_none_assert(skb);
112 
113 	/* check if Rx checksum is enabled */
114 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
115 		return;
116 
117 	/* check if HW has decoded the packet and checksum */
118 	if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
119 		return;
120 
121 	if (!(decoded.known && decoded.outer_ip))
122 		return;
123 
124 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
125 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
126 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
127 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
128 
129 	if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
130 				   BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
131 		goto checksum_fail;
132 
133 	if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
134 		goto checksum_fail;
135 
136 	/* check for L4 errors and handle packets that were not able to be
137 	 * checksummed due to arrival speed
138 	 */
139 	if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
140 		goto checksum_fail;
141 
142 	/* check for outer UDP checksum error in tunneled packets */
143 	if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
144 	    (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
145 		goto checksum_fail;
146 
147 	/* If there is an outer header present that might contain a checksum
148 	 * we need to bump the checksum level by 1 to reflect the fact that
149 	 * we are indicating we validated the inner checksum.
150 	 */
151 	if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
152 		skb->csum_level = 1;
153 
154 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
155 	switch (decoded.inner_prot) {
156 	case ICE_RX_PTYPE_INNER_PROT_TCP:
157 	case ICE_RX_PTYPE_INNER_PROT_UDP:
158 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
159 		skb->ip_summed = CHECKSUM_UNNECESSARY;
160 		break;
161 	default:
162 		break;
163 	}
164 	return;
165 
166 checksum_fail:
167 	ring->vsi->back->hw_csum_rx_error++;
168 }
169 
170 /**
171  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
172  * @rx_ring: Rx descriptor ring packet is being transacted on
173  * @rx_desc: pointer to the EOP Rx descriptor
174  * @skb: pointer to current skb being populated
175  * @ptype: the packet type decoded by hardware
176  *
177  * This function checks the ring, descriptor, and packet information in
178  * order to populate the hash, checksum, VLAN, protocol, and
179  * other fields within the skb.
180  */
181 void
182 ice_process_skb_fields(struct ice_rx_ring *rx_ring,
183 		       union ice_32b_rx_flex_desc *rx_desc,
184 		       struct sk_buff *skb, u16 ptype)
185 {
186 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
187 
188 	/* modifies the skb - consumes the enet header */
189 	skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
190 				       (rx_ring, rx_desc));
191 
192 	ice_rx_csum(rx_ring, skb, rx_desc, ptype);
193 
194 	if (rx_ring->ptp_rx)
195 		ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
196 }
197 
198 /**
199  * ice_receive_skb - Send a completed packet up the stack
200  * @rx_ring: Rx ring in play
201  * @skb: packet to send up
202  * @vlan_tag: VLAN tag for packet
203  *
204  * This function sends the completed packet (via. skb) up the stack using
205  * gro receive functions (with/without VLAN tag)
206  */
207 void
208 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
209 {
210 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
211 	    (vlan_tag & VLAN_VID_MASK))
212 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
213 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
214 }
215 
216 /**
217  * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
218  * @data: packet data pointer
219  * @size: packet data size
220  * @xdp_ring: XDP ring for transmission
221  */
222 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
223 {
224 	u16 i = xdp_ring->next_to_use;
225 	struct ice_tx_desc *tx_desc;
226 	struct ice_tx_buf *tx_buf;
227 	dma_addr_t dma;
228 
229 	if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
230 		xdp_ring->tx_stats.tx_busy++;
231 		return ICE_XDP_CONSUMED;
232 	}
233 
234 	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
235 	if (dma_mapping_error(xdp_ring->dev, dma))
236 		return ICE_XDP_CONSUMED;
237 
238 	tx_buf = &xdp_ring->tx_buf[i];
239 	tx_buf->bytecount = size;
240 	tx_buf->gso_segs = 1;
241 	tx_buf->raw_buf = data;
242 
243 	/* record length, and DMA address */
244 	dma_unmap_len_set(tx_buf, len, size);
245 	dma_unmap_addr_set(tx_buf, dma, dma);
246 
247 	tx_desc = ICE_TX_DESC(xdp_ring, i);
248 	tx_desc->buf_addr = cpu_to_le64(dma);
249 	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
250 						      size, 0);
251 
252 	/* Make certain all of the status bits have been updated
253 	 * before next_to_watch is written.
254 	 */
255 	smp_wmb();
256 
257 	i++;
258 	if (i == xdp_ring->count)
259 		i = 0;
260 
261 	tx_buf->next_to_watch = tx_desc;
262 	xdp_ring->next_to_use = i;
263 
264 	return ICE_XDP_TX;
265 }
266 
267 /**
268  * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
269  * @xdp: XDP buffer
270  * @xdp_ring: XDP Tx ring
271  *
272  * Returns negative on failure, 0 on success.
273  */
274 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
275 {
276 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
277 
278 	if (unlikely(!xdpf))
279 		return ICE_XDP_CONSUMED;
280 
281 	return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
282 }
283 
284 /**
285  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
286  * @rx_ring: Rx ring
287  * @xdp_res: Result of the receive batch
288  *
289  * This function bumps XDP Tx tail and/or flush redirect map, and
290  * should be called when a batch of packets has been processed in the
291  * napi loop.
292  */
293 void ice_finalize_xdp_rx(struct ice_rx_ring *rx_ring, unsigned int xdp_res)
294 {
295 	if (xdp_res & ICE_XDP_REDIR)
296 		xdp_do_flush_map();
297 
298 	if (xdp_res & ICE_XDP_TX) {
299 		struct ice_tx_ring *xdp_ring =
300 			rx_ring->vsi->xdp_rings[rx_ring->q_index];
301 
302 		ice_xdp_ring_update_tail(xdp_ring);
303 	}
304 }
305