xref: /openbmc/linux/include/net/gro.h (revision 93696d8f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
5 
6 #include <linux/indirect_call_wrapper.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
11 #include <net/udp.h>
12 
13 struct napi_gro_cb {
14 	union {
15 		struct {
16 			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
17 			void	*frag0;
18 
19 			/* Length of frag0. */
20 			unsigned int frag0_len;
21 		};
22 
23 		struct {
24 			/* used in skb_gro_receive() slow path */
25 			struct sk_buff *last;
26 
27 			/* jiffies when first packet was created/queued */
28 			unsigned long age;
29 		};
30 	};
31 
32 	/* This indicates where we are processing relative to skb->data. */
33 	int	data_offset;
34 
35 	/* This is non-zero if the packet cannot be merged with the new skb. */
36 	u16	flush;
37 
38 	/* Save the IP ID here and check when we get to the transport layer */
39 	u16	flush_id;
40 
41 	/* Number of segments aggregated. */
42 	u16	count;
43 
44 	/* Used in ipv6_gro_receive() and foo-over-udp */
45 	u16	proto;
46 
47 /* Used in napi_gro_cb::free */
48 #define NAPI_GRO_FREE             1
49 #define NAPI_GRO_FREE_STOLEN_HEAD 2
50 	/* portion of the cb set to zero at every gro iteration */
51 	struct_group(zeroed,
52 
53 		/* Start offset for remote checksum offload */
54 		u16	gro_remcsum_start;
55 
56 		/* This is non-zero if the packet may be of the same flow. */
57 		u8	same_flow:1;
58 
59 		/* Used in tunnel GRO receive */
60 		u8	encap_mark:1;
61 
62 		/* GRO checksum is valid */
63 		u8	csum_valid:1;
64 
65 		/* Number of checksums via CHECKSUM_UNNECESSARY */
66 		u8	csum_cnt:3;
67 
68 		/* Free the skb? */
69 		u8	free:2;
70 
71 		/* Used in foo-over-udp, set in udp[46]_gro_receive */
72 		u8	is_ipv6:1;
73 
74 		/* Used in GRE, set in fou/gue_gro_receive */
75 		u8	is_fou:1;
76 
77 		/* Used to determine if flush_id can be ignored */
78 		u8	is_atomic:1;
79 
80 		/* Number of gro_receive callbacks this packet already went through */
81 		u8 recursion_counter:4;
82 
83 		/* GRO is done by frag_list pointer chaining. */
84 		u8	is_flist:1;
85 	);
86 
87 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
88 	__wsum	csum;
89 
90 	/* L3 offsets */
91 	union {
92 		struct {
93 			u16 network_offset;
94 			u16 inner_network_offset;
95 		};
96 		u16 network_offsets[2];
97 	};
98 };
99 
100 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
101 
102 #define GRO_RECURSION_LIMIT 15
103 static inline int gro_recursion_inc_test(struct sk_buff *skb)
104 {
105 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
106 }
107 
108 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
109 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
110 					       struct list_head *head,
111 					       struct sk_buff *skb)
112 {
113 	if (unlikely(gro_recursion_inc_test(skb))) {
114 		NAPI_GRO_CB(skb)->flush |= 1;
115 		return NULL;
116 	}
117 
118 	return cb(head, skb);
119 }
120 
121 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
122 					    struct sk_buff *);
123 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
124 						  struct sock *sk,
125 						  struct list_head *head,
126 						  struct sk_buff *skb)
127 {
128 	if (unlikely(gro_recursion_inc_test(skb))) {
129 		NAPI_GRO_CB(skb)->flush |= 1;
130 		return NULL;
131 	}
132 
133 	return cb(sk, head, skb);
134 }
135 
136 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
137 {
138 	return NAPI_GRO_CB(skb)->data_offset;
139 }
140 
141 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
142 {
143 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
144 }
145 
146 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
147 {
148 	NAPI_GRO_CB(skb)->data_offset += len;
149 }
150 
151 static inline void *skb_gro_header_fast(struct sk_buff *skb,
152 					unsigned int offset)
153 {
154 	return NAPI_GRO_CB(skb)->frag0 + offset;
155 }
156 
157 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
158 {
159 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
160 }
161 
162 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
163 {
164 	NAPI_GRO_CB(skb)->frag0 = NULL;
165 	NAPI_GRO_CB(skb)->frag0_len = 0;
166 }
167 
168 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
169 					unsigned int offset)
170 {
171 	if (!pskb_may_pull(skb, hlen))
172 		return NULL;
173 
174 	skb_gro_frag0_invalidate(skb);
175 	return skb->data + offset;
176 }
177 
178 static inline void *skb_gro_header(struct sk_buff *skb,
179 					unsigned int hlen, unsigned int offset)
180 {
181 	void *ptr;
182 
183 	ptr = skb_gro_header_fast(skb, offset);
184 	if (skb_gro_header_hard(skb, hlen))
185 		ptr = skb_gro_header_slow(skb, hlen, offset);
186 	return ptr;
187 }
188 
189 static inline void *skb_gro_network_header(struct sk_buff *skb)
190 {
191 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
192 	       skb_network_offset(skb);
193 }
194 
195 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
196 {
197 	const struct iphdr *iph = skb_gro_network_header(skb);
198 
199 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
200 				  skb_gro_len(skb), proto, 0);
201 }
202 
203 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
204 					const void *start, unsigned int len)
205 {
206 	if (NAPI_GRO_CB(skb)->csum_valid)
207 		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
208 						wsum_negate(NAPI_GRO_CB(skb)->csum)));
209 }
210 
211 /* GRO checksum functions. These are logical equivalents of the normal
212  * checksum functions (in skbuff.h) except that they operate on the GRO
213  * offsets and fields in sk_buff.
214  */
215 
216 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
217 
218 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
219 {
220 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
221 }
222 
223 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
224 						      bool zero_okay,
225 						      __sum16 check)
226 {
227 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
228 		skb_checksum_start_offset(skb) <
229 		 skb_gro_offset(skb)) &&
230 		!skb_at_gro_remcsum_start(skb) &&
231 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
232 		(!zero_okay || check));
233 }
234 
235 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
236 							   __wsum psum)
237 {
238 	if (NAPI_GRO_CB(skb)->csum_valid &&
239 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
240 		return 0;
241 
242 	NAPI_GRO_CB(skb)->csum = psum;
243 
244 	return __skb_gro_checksum_complete(skb);
245 }
246 
247 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
248 {
249 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
250 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
251 		NAPI_GRO_CB(skb)->csum_cnt--;
252 	} else {
253 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
254 		 * verified a new top level checksum or an encapsulated one
255 		 * during GRO. This saves work if we fallback to normal path.
256 		 */
257 		__skb_incr_checksum_unnecessary(skb);
258 	}
259 }
260 
261 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
262 				    compute_pseudo)			\
263 ({									\
264 	__sum16 __ret = 0;						\
265 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
266 		__ret = __skb_gro_checksum_validate_complete(skb,	\
267 				compute_pseudo(skb, proto));		\
268 	if (!__ret)							\
269 		skb_gro_incr_csum_unnecessary(skb);			\
270 	__ret;								\
271 })
272 
273 #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
274 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
275 
276 #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
277 					     compute_pseudo)		\
278 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
279 
280 #define skb_gro_checksum_simple_validate(skb)				\
281 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
282 
283 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
284 {
285 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
286 		!NAPI_GRO_CB(skb)->csum_valid);
287 }
288 
289 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
290 					      __wsum pseudo)
291 {
292 	NAPI_GRO_CB(skb)->csum = ~pseudo;
293 	NAPI_GRO_CB(skb)->csum_valid = 1;
294 }
295 
296 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
297 do {									\
298 	if (__skb_gro_checksum_convert_check(skb))			\
299 		__skb_gro_checksum_convert(skb, 			\
300 					   compute_pseudo(skb, proto));	\
301 } while (0)
302 
303 struct gro_remcsum {
304 	int offset;
305 	__wsum delta;
306 };
307 
308 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
309 {
310 	grc->offset = 0;
311 	grc->delta = 0;
312 }
313 
314 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
315 					    unsigned int off, size_t hdrlen,
316 					    int start, int offset,
317 					    struct gro_remcsum *grc,
318 					    bool nopartial)
319 {
320 	__wsum delta;
321 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
322 
323 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
324 
325 	if (!nopartial) {
326 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
327 		return ptr;
328 	}
329 
330 	ptr = skb_gro_header(skb, off + plen, off);
331 	if (!ptr)
332 		return NULL;
333 
334 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
335 			       start, offset);
336 
337 	/* Adjust skb->csum since we changed the packet */
338 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
339 
340 	grc->offset = off + hdrlen + offset;
341 	grc->delta = delta;
342 
343 	return ptr;
344 }
345 
346 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
347 					   struct gro_remcsum *grc)
348 {
349 	void *ptr;
350 	size_t plen = grc->offset + sizeof(u16);
351 
352 	if (!grc->delta)
353 		return;
354 
355 	ptr = skb_gro_header(skb, plen, grc->offset);
356 	if (!ptr)
357 		return;
358 
359 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
360 }
361 
362 #ifdef CONFIG_XFRM_OFFLOAD
363 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
364 {
365 	if (PTR_ERR(pp) != -EINPROGRESS)
366 		NAPI_GRO_CB(skb)->flush |= flush;
367 }
368 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
369 					       struct sk_buff *pp,
370 					       int flush,
371 					       struct gro_remcsum *grc)
372 {
373 	if (PTR_ERR(pp) != -EINPROGRESS) {
374 		NAPI_GRO_CB(skb)->flush |= flush;
375 		skb_gro_remcsum_cleanup(skb, grc);
376 		skb->remcsum_offload = 0;
377 	}
378 }
379 #else
380 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
381 {
382 	NAPI_GRO_CB(skb)->flush |= flush;
383 }
384 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
385 					       struct sk_buff *pp,
386 					       int flush,
387 					       struct gro_remcsum *grc)
388 {
389 	NAPI_GRO_CB(skb)->flush |= flush;
390 	skb_gro_remcsum_cleanup(skb, grc);
391 	skb->remcsum_offload = 0;
392 }
393 #endif
394 
395 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
396 							   struct sk_buff *));
397 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
398 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
399 							   struct sk_buff *));
400 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
401 
402 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
403 							   struct sk_buff *));
404 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
405 
406 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
407 							   struct sk_buff *));
408 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
409 
410 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
411 ({								\
412 	unlikely(gro_recursion_inc_test(skb)) ?			\
413 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
414 		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
415 })
416 
417 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
418 				struct udphdr *uh, struct sock *sk);
419 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
420 
421 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
422 {
423 	struct udphdr *uh;
424 	unsigned int hlen, off;
425 
426 	off  = skb_gro_offset(skb);
427 	hlen = off + sizeof(*uh);
428 	uh   = skb_gro_header(skb, hlen, off);
429 
430 	return uh;
431 }
432 
433 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
434 {
435 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
436 
437 	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
438 					    skb_gro_len(skb), proto, 0));
439 }
440 
441 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
442 
443 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
444 static inline void gro_normal_list(struct napi_struct *napi)
445 {
446 	if (!napi->rx_count)
447 		return;
448 	netif_receive_skb_list_internal(&napi->rx_list);
449 	INIT_LIST_HEAD(&napi->rx_list);
450 	napi->rx_count = 0;
451 }
452 
453 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
454  * pass the whole batch up to the stack.
455  */
456 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
457 {
458 	list_add_tail(&skb->list, &napi->rx_list);
459 	napi->rx_count += segs;
460 	if (napi->rx_count >= READ_ONCE(gro_normal_batch))
461 		gro_normal_list(napi);
462 }
463 
464 /* This function is the alternative of 'inet_iif' and 'inet_sdif'
465  * functions in case we can not rely on fields of IPCB.
466  *
467  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
468  * The caller must hold the RCU read lock.
469  */
470 static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
471 {
472 	*iif = inet_iif(skb) ?: skb->dev->ifindex;
473 	*sdif = 0;
474 
475 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
476 	if (netif_is_l3_slave(skb->dev)) {
477 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
478 
479 		*sdif = *iif;
480 		*iif = master ? master->ifindex : 0;
481 	}
482 #endif
483 }
484 
485 /* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
486  * functions in case we can not rely on fields of IP6CB.
487  *
488  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
489  * The caller must hold the RCU read lock.
490  */
491 static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
492 {
493 	/* using skb->dev->ifindex because skb_dst(skb) is not initialized */
494 	*iif = skb->dev->ifindex;
495 	*sdif = 0;
496 
497 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
498 	if (netif_is_l3_slave(skb->dev)) {
499 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
500 
501 		*sdif = *iif;
502 		*iif = master ? master->ifindex : 0;
503 	}
504 #endif
505 }
506 
507 extern struct list_head offload_base;
508 
509 #endif /* _NET_IPV6_GRO_H */
510