xref: /openbmc/linux/include/net/gro.h (revision d9565bf4)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
5 
6 #include <linux/indirect_call_wrapper.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
11 #include <net/udp.h>
12 
13 struct napi_gro_cb {
14 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
15 	void	*frag0;
16 
17 	/* Length of frag0. */
18 	unsigned int frag0_len;
19 
20 	/* This indicates where we are processing relative to skb->data. */
21 	int	data_offset;
22 
23 	/* This is non-zero if the packet cannot be merged with the new skb. */
24 	u16	flush;
25 
26 	/* Save the IP ID here and check when we get to the transport layer */
27 	u16	flush_id;
28 
29 	/* Number of segments aggregated. */
30 	u16	count;
31 
32 	/* Start offset for remote checksum offload */
33 	u16	gro_remcsum_start;
34 
35 	/* jiffies when first packet was created/queued */
36 	unsigned long age;
37 
38 	/* Used in ipv6_gro_receive() and foo-over-udp */
39 	u16	proto;
40 
41 	/* This is non-zero if the packet may be of the same flow. */
42 	u8	same_flow:1;
43 
44 	/* Used in tunnel GRO receive */
45 	u8	encap_mark:1;
46 
47 	/* GRO checksum is valid */
48 	u8	csum_valid:1;
49 
50 	/* Number of checksums via CHECKSUM_UNNECESSARY */
51 	u8	csum_cnt:3;
52 
53 	/* Free the skb? */
54 	u8	free:2;
55 #define NAPI_GRO_FREE		  1
56 #define NAPI_GRO_FREE_STOLEN_HEAD 2
57 
58 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
59 	u8	is_ipv6:1;
60 
61 	/* Used in GRE, set in fou/gue_gro_receive */
62 	u8	is_fou:1;
63 
64 	/* Used to determine if flush_id can be ignored */
65 	u8	is_atomic:1;
66 
67 	/* Number of gro_receive callbacks this packet already went through */
68 	u8 recursion_counter:4;
69 
70 	/* GRO is done by frag_list pointer chaining. */
71 	u8	is_flist:1;
72 
73 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
74 	__wsum	csum;
75 
76 	/* used in skb_gro_receive() slow path */
77 	struct sk_buff *last;
78 };
79 
80 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
81 
82 #define GRO_RECURSION_LIMIT 15
83 static inline int gro_recursion_inc_test(struct sk_buff *skb)
84 {
85 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
86 }
87 
88 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
89 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
90 					       struct list_head *head,
91 					       struct sk_buff *skb)
92 {
93 	if (unlikely(gro_recursion_inc_test(skb))) {
94 		NAPI_GRO_CB(skb)->flush |= 1;
95 		return NULL;
96 	}
97 
98 	return cb(head, skb);
99 }
100 
101 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
102 					    struct sk_buff *);
103 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
104 						  struct sock *sk,
105 						  struct list_head *head,
106 						  struct sk_buff *skb)
107 {
108 	if (unlikely(gro_recursion_inc_test(skb))) {
109 		NAPI_GRO_CB(skb)->flush |= 1;
110 		return NULL;
111 	}
112 
113 	return cb(sk, head, skb);
114 }
115 
116 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
117 {
118 	return NAPI_GRO_CB(skb)->data_offset;
119 }
120 
121 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
122 {
123 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
124 }
125 
126 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
127 {
128 	NAPI_GRO_CB(skb)->data_offset += len;
129 }
130 
131 static inline void *skb_gro_header_fast(struct sk_buff *skb,
132 					unsigned int offset)
133 {
134 	return NAPI_GRO_CB(skb)->frag0 + offset;
135 }
136 
137 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
138 {
139 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
140 }
141 
142 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
143 {
144 	NAPI_GRO_CB(skb)->frag0 = NULL;
145 	NAPI_GRO_CB(skb)->frag0_len = 0;
146 }
147 
148 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
149 					unsigned int offset)
150 {
151 	if (!pskb_may_pull(skb, hlen))
152 		return NULL;
153 
154 	skb_gro_frag0_invalidate(skb);
155 	return skb->data + offset;
156 }
157 
158 static inline void *skb_gro_network_header(struct sk_buff *skb)
159 {
160 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
161 	       skb_network_offset(skb);
162 }
163 
164 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
165 {
166 	const struct iphdr *iph = skb_gro_network_header(skb);
167 
168 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
169 				  skb_gro_len(skb), proto, 0);
170 }
171 
172 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
173 					const void *start, unsigned int len)
174 {
175 	if (NAPI_GRO_CB(skb)->csum_valid)
176 		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
177 						wsum_negate(NAPI_GRO_CB(skb)->csum)));
178 }
179 
180 /* GRO checksum functions. These are logical equivalents of the normal
181  * checksum functions (in skbuff.h) except that they operate on the GRO
182  * offsets and fields in sk_buff.
183  */
184 
185 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
186 
187 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
188 {
189 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
190 }
191 
192 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
193 						      bool zero_okay,
194 						      __sum16 check)
195 {
196 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
197 		skb_checksum_start_offset(skb) <
198 		 skb_gro_offset(skb)) &&
199 		!skb_at_gro_remcsum_start(skb) &&
200 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
201 		(!zero_okay || check));
202 }
203 
204 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
205 							   __wsum psum)
206 {
207 	if (NAPI_GRO_CB(skb)->csum_valid &&
208 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
209 		return 0;
210 
211 	NAPI_GRO_CB(skb)->csum = psum;
212 
213 	return __skb_gro_checksum_complete(skb);
214 }
215 
216 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
217 {
218 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
219 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
220 		NAPI_GRO_CB(skb)->csum_cnt--;
221 	} else {
222 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
223 		 * verified a new top level checksum or an encapsulated one
224 		 * during GRO. This saves work if we fallback to normal path.
225 		 */
226 		__skb_incr_checksum_unnecessary(skb);
227 	}
228 }
229 
230 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
231 				    compute_pseudo)			\
232 ({									\
233 	__sum16 __ret = 0;						\
234 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
235 		__ret = __skb_gro_checksum_validate_complete(skb,	\
236 				compute_pseudo(skb, proto));		\
237 	if (!__ret)							\
238 		skb_gro_incr_csum_unnecessary(skb);			\
239 	__ret;								\
240 })
241 
242 #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
243 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
244 
245 #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
246 					     compute_pseudo)		\
247 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
248 
249 #define skb_gro_checksum_simple_validate(skb)				\
250 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
251 
252 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
253 {
254 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
255 		!NAPI_GRO_CB(skb)->csum_valid);
256 }
257 
258 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
259 					      __wsum pseudo)
260 {
261 	NAPI_GRO_CB(skb)->csum = ~pseudo;
262 	NAPI_GRO_CB(skb)->csum_valid = 1;
263 }
264 
265 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
266 do {									\
267 	if (__skb_gro_checksum_convert_check(skb))			\
268 		__skb_gro_checksum_convert(skb, 			\
269 					   compute_pseudo(skb, proto));	\
270 } while (0)
271 
272 struct gro_remcsum {
273 	int offset;
274 	__wsum delta;
275 };
276 
277 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
278 {
279 	grc->offset = 0;
280 	grc->delta = 0;
281 }
282 
283 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
284 					    unsigned int off, size_t hdrlen,
285 					    int start, int offset,
286 					    struct gro_remcsum *grc,
287 					    bool nopartial)
288 {
289 	__wsum delta;
290 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
291 
292 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
293 
294 	if (!nopartial) {
295 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
296 		return ptr;
297 	}
298 
299 	ptr = skb_gro_header_fast(skb, off);
300 	if (skb_gro_header_hard(skb, off + plen)) {
301 		ptr = skb_gro_header_slow(skb, off + plen, off);
302 		if (!ptr)
303 			return NULL;
304 	}
305 
306 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
307 			       start, offset);
308 
309 	/* Adjust skb->csum since we changed the packet */
310 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
311 
312 	grc->offset = off + hdrlen + offset;
313 	grc->delta = delta;
314 
315 	return ptr;
316 }
317 
318 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
319 					   struct gro_remcsum *grc)
320 {
321 	void *ptr;
322 	size_t plen = grc->offset + sizeof(u16);
323 
324 	if (!grc->delta)
325 		return;
326 
327 	ptr = skb_gro_header_fast(skb, grc->offset);
328 	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
329 		ptr = skb_gro_header_slow(skb, plen, grc->offset);
330 		if (!ptr)
331 			return;
332 	}
333 
334 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
335 }
336 
337 #ifdef CONFIG_XFRM_OFFLOAD
338 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
339 {
340 	if (PTR_ERR(pp) != -EINPROGRESS)
341 		NAPI_GRO_CB(skb)->flush |= flush;
342 }
343 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
344 					       struct sk_buff *pp,
345 					       int flush,
346 					       struct gro_remcsum *grc)
347 {
348 	if (PTR_ERR(pp) != -EINPROGRESS) {
349 		NAPI_GRO_CB(skb)->flush |= flush;
350 		skb_gro_remcsum_cleanup(skb, grc);
351 		skb->remcsum_offload = 0;
352 	}
353 }
354 #else
355 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
356 {
357 	NAPI_GRO_CB(skb)->flush |= flush;
358 }
359 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
360 					       struct sk_buff *pp,
361 					       int flush,
362 					       struct gro_remcsum *grc)
363 {
364 	NAPI_GRO_CB(skb)->flush |= flush;
365 	skb_gro_remcsum_cleanup(skb, grc);
366 	skb->remcsum_offload = 0;
367 }
368 #endif
369 
370 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
371 							   struct sk_buff *));
372 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
373 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
374 							   struct sk_buff *));
375 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
376 
377 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
378 							   struct sk_buff *));
379 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
380 
381 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
382 							   struct sk_buff *));
383 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
384 
385 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
386 ({								\
387 	unlikely(gro_recursion_inc_test(skb)) ?			\
388 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
389 		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
390 })
391 
392 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
393 				struct udphdr *uh, struct sock *sk);
394 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
395 
396 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
397 {
398 	struct udphdr *uh;
399 	unsigned int hlen, off;
400 
401 	off  = skb_gro_offset(skb);
402 	hlen = off + sizeof(*uh);
403 	uh   = skb_gro_header_fast(skb, off);
404 	if (skb_gro_header_hard(skb, hlen))
405 		uh = skb_gro_header_slow(skb, hlen, off);
406 
407 	return uh;
408 }
409 
410 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
411 {
412 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
413 
414 	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
415 					    skb_gro_len(skb), proto, 0));
416 }
417 
418 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
419 
420 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
421 static inline void gro_normal_list(struct napi_struct *napi)
422 {
423 	if (!napi->rx_count)
424 		return;
425 	netif_receive_skb_list_internal(&napi->rx_list);
426 	INIT_LIST_HEAD(&napi->rx_list);
427 	napi->rx_count = 0;
428 }
429 
430 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
431  * pass the whole batch up to the stack.
432  */
433 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
434 {
435 	list_add_tail(&skb->list, &napi->rx_list);
436 	napi->rx_count += segs;
437 	if (napi->rx_count >= gro_normal_batch)
438 		gro_normal_list(napi);
439 }
440 
441 
442 #endif /* _NET_IPV6_GRO_H */
443