xref: /openbmc/linux/include/net/gro.h (revision e0d77d0f38aa60ca61b3ce6e60d64fad2aa0853d)
104f00ab2SLeon Romanovsky /* SPDX-License-Identifier: GPL-2.0-or-later */
204f00ab2SLeon Romanovsky 
304f00ab2SLeon Romanovsky #ifndef _NET_IPV6_GRO_H
404f00ab2SLeon Romanovsky #define _NET_IPV6_GRO_H
504f00ab2SLeon Romanovsky 
6e75ec151SAlexander Lobakin #include <linux/indirect_call_wrapper.h>
74721031cSEric Dumazet #include <linux/ip.h>
84721031cSEric Dumazet #include <linux/ipv6.h>
975082e7fSEric Dumazet #include <net/ip6_checksum.h>
104721031cSEric Dumazet #include <linux/skbuff.h>
114721031cSEric Dumazet #include <net/udp.h>
12e75ec151SAlexander Lobakin 
134721031cSEric Dumazet struct napi_gro_cb {
147b355b76SRichard Gobert 	union {
157b355b76SRichard Gobert 		struct {
164721031cSEric Dumazet 			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
174721031cSEric Dumazet 			void	*frag0;
184721031cSEric Dumazet 
194721031cSEric Dumazet 			/* Length of frag0. */
204721031cSEric Dumazet 			unsigned int frag0_len;
217b355b76SRichard Gobert 		};
227b355b76SRichard Gobert 
237b355b76SRichard Gobert 		struct {
247b355b76SRichard Gobert 			/* used in skb_gro_receive() slow path */
257b355b76SRichard Gobert 			struct sk_buff *last;
267b355b76SRichard Gobert 
277b355b76SRichard Gobert 			/* jiffies when first packet was created/queued */
287b355b76SRichard Gobert 			unsigned long age;
297b355b76SRichard Gobert 		};
307b355b76SRichard Gobert 	};
314721031cSEric Dumazet 
324721031cSEric Dumazet 	/* This indicates where we are processing relative to skb->data. */
334721031cSEric Dumazet 	int	data_offset;
344721031cSEric Dumazet 
354721031cSEric Dumazet 	/* This is non-zero if the packet cannot be merged with the new skb. */
364721031cSEric Dumazet 	u16	flush;
374721031cSEric Dumazet 
384721031cSEric Dumazet 	/* Save the IP ID here and check when we get to the transport layer */
394721031cSEric Dumazet 	u16	flush_id;
404721031cSEric Dumazet 
414721031cSEric Dumazet 	/* Number of segments aggregated. */
424721031cSEric Dumazet 	u16	count;
434721031cSEric Dumazet 
44de5a1f3cSPaolo Abeni 	/* Used in ipv6_gro_receive() and foo-over-udp */
45de5a1f3cSPaolo Abeni 	u16	proto;
464721031cSEric Dumazet 
478467fadcSGal Pressman /* Used in napi_gro_cb::free */
488467fadcSGal Pressman #define NAPI_GRO_FREE             1
498467fadcSGal Pressman #define NAPI_GRO_FREE_STOLEN_HEAD 2
50de5a1f3cSPaolo Abeni 	/* portion of the cb set to zero at every gro iteration */
51de5a1f3cSPaolo Abeni 	struct_group(zeroed,
52de5a1f3cSPaolo Abeni 
53de5a1f3cSPaolo Abeni 		/* Start offset for remote checksum offload */
54de5a1f3cSPaolo Abeni 		u16	gro_remcsum_start;
554721031cSEric Dumazet 
564721031cSEric Dumazet 		/* This is non-zero if the packet may be of the same flow. */
574721031cSEric Dumazet 		u8	same_flow:1;
584721031cSEric Dumazet 
594721031cSEric Dumazet 		/* Used in tunnel GRO receive */
604721031cSEric Dumazet 		u8	encap_mark:1;
614721031cSEric Dumazet 
624721031cSEric Dumazet 		/* GRO checksum is valid */
634721031cSEric Dumazet 		u8	csum_valid:1;
644721031cSEric Dumazet 
654721031cSEric Dumazet 		/* Number of checksums via CHECKSUM_UNNECESSARY */
664721031cSEric Dumazet 		u8	csum_cnt:3;
674721031cSEric Dumazet 
684721031cSEric Dumazet 		/* Free the skb? */
694721031cSEric Dumazet 		u8	free:2;
704721031cSEric Dumazet 
714721031cSEric Dumazet 		/* Used in foo-over-udp, set in udp[46]_gro_receive */
724721031cSEric Dumazet 		u8	is_ipv6:1;
734721031cSEric Dumazet 
744721031cSEric Dumazet 		/* Used in GRE, set in fou/gue_gro_receive */
754721031cSEric Dumazet 		u8	is_fou:1;
764721031cSEric Dumazet 
774721031cSEric Dumazet 		/* Used to determine if flush_id can be ignored */
784721031cSEric Dumazet 		u8	is_atomic:1;
794721031cSEric Dumazet 
804721031cSEric Dumazet 		/* Number of gro_receive callbacks this packet already went through */
814721031cSEric Dumazet 		u8 recursion_counter:4;
824721031cSEric Dumazet 
834721031cSEric Dumazet 		/* GRO is done by frag_list pointer chaining. */
844721031cSEric Dumazet 		u8	is_flist:1;
85de5a1f3cSPaolo Abeni 	);
864721031cSEric Dumazet 
874721031cSEric Dumazet 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
884721031cSEric Dumazet 	__wsum	csum;
89*af276a5aSRichard Gobert 
90*af276a5aSRichard Gobert 	/* L3 offsets */
91*af276a5aSRichard Gobert 	union {
92*af276a5aSRichard Gobert 		struct {
93*af276a5aSRichard Gobert 			u16 network_offset;
94*af276a5aSRichard Gobert 			u16 inner_network_offset;
95*af276a5aSRichard Gobert 		};
96*af276a5aSRichard Gobert 		u16 network_offsets[2];
97*af276a5aSRichard Gobert 	};
984721031cSEric Dumazet };
994721031cSEric Dumazet 
1004721031cSEric Dumazet #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1014721031cSEric Dumazet 
1024721031cSEric Dumazet #define GRO_RECURSION_LIMIT 15
gro_recursion_inc_test(struct sk_buff * skb)1034721031cSEric Dumazet static inline int gro_recursion_inc_test(struct sk_buff *skb)
1044721031cSEric Dumazet {
1054721031cSEric Dumazet 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
1064721031cSEric Dumazet }
1074721031cSEric Dumazet 
1084721031cSEric Dumazet typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
1094721031cSEric Dumazet static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
1104721031cSEric Dumazet 					       struct list_head *head,
1114721031cSEric Dumazet 					       struct sk_buff *skb)
1124721031cSEric Dumazet {
1134721031cSEric Dumazet 	if (unlikely(gro_recursion_inc_test(skb))) {
1144721031cSEric Dumazet 		NAPI_GRO_CB(skb)->flush |= 1;
1154721031cSEric Dumazet 		return NULL;
1164721031cSEric Dumazet 	}
1174721031cSEric Dumazet 
1184721031cSEric Dumazet 	return cb(head, skb);
1194721031cSEric Dumazet }
1204721031cSEric Dumazet 
1214721031cSEric Dumazet typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
1224721031cSEric Dumazet 					    struct sk_buff *);
1234721031cSEric Dumazet static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
1244721031cSEric Dumazet 						  struct sock *sk,
1254721031cSEric Dumazet 						  struct list_head *head,
1264721031cSEric Dumazet 						  struct sk_buff *skb)
1274721031cSEric Dumazet {
1284721031cSEric Dumazet 	if (unlikely(gro_recursion_inc_test(skb))) {
1294721031cSEric Dumazet 		NAPI_GRO_CB(skb)->flush |= 1;
1304721031cSEric Dumazet 		return NULL;
1314721031cSEric Dumazet 	}
1324721031cSEric Dumazet 
1334721031cSEric Dumazet 	return cb(sk, head, skb);
1344721031cSEric Dumazet }
1354721031cSEric Dumazet 
1364721031cSEric Dumazet static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1374721031cSEric Dumazet {
1384721031cSEric Dumazet 	return NAPI_GRO_CB(skb)->data_offset;
1394721031cSEric Dumazet }
1404721031cSEric Dumazet 
skb_gro_len(const struct sk_buff * skb)1414721031cSEric Dumazet static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1424721031cSEric Dumazet {
1434721031cSEric Dumazet 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
1444721031cSEric Dumazet }
1454721031cSEric Dumazet 
skb_gro_pull(struct sk_buff * skb,unsigned int len)1464721031cSEric Dumazet static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1474721031cSEric Dumazet {
1484721031cSEric Dumazet 	NAPI_GRO_CB(skb)->data_offset += len;
1494721031cSEric Dumazet }
1504721031cSEric Dumazet 
skb_gro_header_fast(struct sk_buff * skb,unsigned int offset)1514721031cSEric Dumazet static inline void *skb_gro_header_fast(struct sk_buff *skb,
1524721031cSEric Dumazet 					unsigned int offset)
1534721031cSEric Dumazet {
1544721031cSEric Dumazet 	return NAPI_GRO_CB(skb)->frag0 + offset;
1554721031cSEric Dumazet }
1564721031cSEric Dumazet 
skb_gro_header_hard(struct sk_buff * skb,unsigned int hlen)1574721031cSEric Dumazet static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1584721031cSEric Dumazet {
1594721031cSEric Dumazet 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
1604721031cSEric Dumazet }
1614721031cSEric Dumazet 
skb_gro_frag0_invalidate(struct sk_buff * skb)1624721031cSEric Dumazet static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
1634721031cSEric Dumazet {
1644721031cSEric Dumazet 	NAPI_GRO_CB(skb)->frag0 = NULL;
1654721031cSEric Dumazet 	NAPI_GRO_CB(skb)->frag0_len = 0;
1664721031cSEric Dumazet }
1674721031cSEric Dumazet 
skb_gro_header_slow(struct sk_buff * skb,unsigned int hlen,unsigned int offset)1684721031cSEric Dumazet static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1694721031cSEric Dumazet 					unsigned int offset)
1704721031cSEric Dumazet {
1714721031cSEric Dumazet 	if (!pskb_may_pull(skb, hlen))
1724721031cSEric Dumazet 		return NULL;
1734721031cSEric Dumazet 
1744721031cSEric Dumazet 	skb_gro_frag0_invalidate(skb);
1754721031cSEric Dumazet 	return skb->data + offset;
1764721031cSEric Dumazet }
1774721031cSEric Dumazet 
skb_gro_header(struct sk_buff * skb,unsigned int hlen,unsigned int offset)17835ffb665SRichard Gobert static inline void *skb_gro_header(struct sk_buff *skb,
17935ffb665SRichard Gobert 					unsigned int hlen, unsigned int offset)
18035ffb665SRichard Gobert {
18135ffb665SRichard Gobert 	void *ptr;
18235ffb665SRichard Gobert 
18335ffb665SRichard Gobert 	ptr = skb_gro_header_fast(skb, offset);
18435ffb665SRichard Gobert 	if (skb_gro_header_hard(skb, hlen))
18535ffb665SRichard Gobert 		ptr = skb_gro_header_slow(skb, hlen, offset);
18635ffb665SRichard Gobert 	return ptr;
18735ffb665SRichard Gobert }
18835ffb665SRichard Gobert 
skb_gro_network_header(struct sk_buff * skb)1894721031cSEric Dumazet static inline void *skb_gro_network_header(struct sk_buff *skb)
1904721031cSEric Dumazet {
1914721031cSEric Dumazet 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1924721031cSEric Dumazet 	       skb_network_offset(skb);
1934721031cSEric Dumazet }
1944721031cSEric Dumazet 
inet_gro_compute_pseudo(struct sk_buff * skb,int proto)1954721031cSEric Dumazet static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
1964721031cSEric Dumazet {
1974721031cSEric Dumazet 	const struct iphdr *iph = skb_gro_network_header(skb);
1984721031cSEric Dumazet 
1994721031cSEric Dumazet 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
2004721031cSEric Dumazet 				  skb_gro_len(skb), proto, 0);
2014721031cSEric Dumazet }
2024721031cSEric Dumazet 
skb_gro_postpull_rcsum(struct sk_buff * skb,const void * start,unsigned int len)2034721031cSEric Dumazet static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2044721031cSEric Dumazet 					const void *start, unsigned int len)
2054721031cSEric Dumazet {
2064721031cSEric Dumazet 	if (NAPI_GRO_CB(skb)->csum_valid)
20745cac675SEric Dumazet 		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
20845cac675SEric Dumazet 						wsum_negate(NAPI_GRO_CB(skb)->csum)));
2094721031cSEric Dumazet }
2104721031cSEric Dumazet 
2114721031cSEric Dumazet /* GRO checksum functions. These are logical equivalents of the normal
2124721031cSEric Dumazet  * checksum functions (in skbuff.h) except that they operate on the GRO
2134721031cSEric Dumazet  * offsets and fields in sk_buff.
2144721031cSEric Dumazet  */
2154721031cSEric Dumazet 
2164721031cSEric Dumazet __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2174721031cSEric Dumazet 
skb_at_gro_remcsum_start(struct sk_buff * skb)2184721031cSEric Dumazet static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2194721031cSEric Dumazet {
2204721031cSEric Dumazet 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2214721031cSEric Dumazet }
2224721031cSEric Dumazet 
__skb_gro_checksum_validate_needed(struct sk_buff * skb,bool zero_okay,__sum16 check)2234721031cSEric Dumazet static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2244721031cSEric Dumazet 						      bool zero_okay,
2254721031cSEric Dumazet 						      __sum16 check)
2264721031cSEric Dumazet {
2274721031cSEric Dumazet 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2284721031cSEric Dumazet 		skb_checksum_start_offset(skb) <
2294721031cSEric Dumazet 		 skb_gro_offset(skb)) &&
2304721031cSEric Dumazet 		!skb_at_gro_remcsum_start(skb) &&
2314721031cSEric Dumazet 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2324721031cSEric Dumazet 		(!zero_okay || check));
2334721031cSEric Dumazet }
2344721031cSEric Dumazet 
__skb_gro_checksum_validate_complete(struct sk_buff * skb,__wsum psum)2354721031cSEric Dumazet static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2364721031cSEric Dumazet 							   __wsum psum)
2374721031cSEric Dumazet {
2384721031cSEric Dumazet 	if (NAPI_GRO_CB(skb)->csum_valid &&
2394721031cSEric Dumazet 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2404721031cSEric Dumazet 		return 0;
2414721031cSEric Dumazet 
2424721031cSEric Dumazet 	NAPI_GRO_CB(skb)->csum = psum;
2434721031cSEric Dumazet 
2444721031cSEric Dumazet 	return __skb_gro_checksum_complete(skb);
2454721031cSEric Dumazet }
2464721031cSEric Dumazet 
skb_gro_incr_csum_unnecessary(struct sk_buff * skb)2474721031cSEric Dumazet static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2484721031cSEric Dumazet {
2494721031cSEric Dumazet 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2504721031cSEric Dumazet 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
2514721031cSEric Dumazet 		NAPI_GRO_CB(skb)->csum_cnt--;
2524721031cSEric Dumazet 	} else {
2534721031cSEric Dumazet 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2544721031cSEric Dumazet 		 * verified a new top level checksum or an encapsulated one
2554721031cSEric Dumazet 		 * during GRO. This saves work if we fallback to normal path.
2564721031cSEric Dumazet 		 */
2574721031cSEric Dumazet 		__skb_incr_checksum_unnecessary(skb);
2584721031cSEric Dumazet 	}
2594721031cSEric Dumazet }
2604721031cSEric Dumazet 
2614721031cSEric Dumazet #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
2624721031cSEric Dumazet 				    compute_pseudo)			\
2634721031cSEric Dumazet ({									\
2644721031cSEric Dumazet 	__sum16 __ret = 0;						\
2654721031cSEric Dumazet 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
2664721031cSEric Dumazet 		__ret = __skb_gro_checksum_validate_complete(skb,	\
2674721031cSEric Dumazet 				compute_pseudo(skb, proto));		\
2684721031cSEric Dumazet 	if (!__ret)							\
2694721031cSEric Dumazet 		skb_gro_incr_csum_unnecessary(skb);			\
2704721031cSEric Dumazet 	__ret;								\
2714721031cSEric Dumazet })
2724721031cSEric Dumazet 
2734721031cSEric Dumazet #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
2744721031cSEric Dumazet 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2754721031cSEric Dumazet 
2764721031cSEric Dumazet #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
2774721031cSEric Dumazet 					     compute_pseudo)		\
2784721031cSEric Dumazet 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2794721031cSEric Dumazet 
2804721031cSEric Dumazet #define skb_gro_checksum_simple_validate(skb)				\
2814721031cSEric Dumazet 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2824721031cSEric Dumazet 
__skb_gro_checksum_convert_check(struct sk_buff * skb)2834721031cSEric Dumazet static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2844721031cSEric Dumazet {
2854721031cSEric Dumazet 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2864721031cSEric Dumazet 		!NAPI_GRO_CB(skb)->csum_valid);
2874721031cSEric Dumazet }
2884721031cSEric Dumazet 
__skb_gro_checksum_convert(struct sk_buff * skb,__wsum pseudo)2894721031cSEric Dumazet static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2904721031cSEric Dumazet 					      __wsum pseudo)
2914721031cSEric Dumazet {
2924721031cSEric Dumazet 	NAPI_GRO_CB(skb)->csum = ~pseudo;
2934721031cSEric Dumazet 	NAPI_GRO_CB(skb)->csum_valid = 1;
2944721031cSEric Dumazet }
2954721031cSEric Dumazet 
2964721031cSEric Dumazet #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
2974721031cSEric Dumazet do {									\
2984721031cSEric Dumazet 	if (__skb_gro_checksum_convert_check(skb))			\
2994721031cSEric Dumazet 		__skb_gro_checksum_convert(skb, 			\
3004721031cSEric Dumazet 					   compute_pseudo(skb, proto));	\
3014721031cSEric Dumazet } while (0)
3024721031cSEric Dumazet 
3034721031cSEric Dumazet struct gro_remcsum {
3044721031cSEric Dumazet 	int offset;
3054721031cSEric Dumazet 	__wsum delta;
3064721031cSEric Dumazet };
3074721031cSEric Dumazet 
skb_gro_remcsum_init(struct gro_remcsum * grc)3084721031cSEric Dumazet static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
3094721031cSEric Dumazet {
3104721031cSEric Dumazet 	grc->offset = 0;
3114721031cSEric Dumazet 	grc->delta = 0;
3124721031cSEric Dumazet }
3134721031cSEric Dumazet 
skb_gro_remcsum_process(struct sk_buff * skb,void * ptr,unsigned int off,size_t hdrlen,int start,int offset,struct gro_remcsum * grc,bool nopartial)3144721031cSEric Dumazet static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
3154721031cSEric Dumazet 					    unsigned int off, size_t hdrlen,
3164721031cSEric Dumazet 					    int start, int offset,
3174721031cSEric Dumazet 					    struct gro_remcsum *grc,
3184721031cSEric Dumazet 					    bool nopartial)
3194721031cSEric Dumazet {
3204721031cSEric Dumazet 	__wsum delta;
3214721031cSEric Dumazet 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
3224721031cSEric Dumazet 
3234721031cSEric Dumazet 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
3244721031cSEric Dumazet 
3254721031cSEric Dumazet 	if (!nopartial) {
3264721031cSEric Dumazet 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
3274721031cSEric Dumazet 		return ptr;
3284721031cSEric Dumazet 	}
3294721031cSEric Dumazet 
33035ffb665SRichard Gobert 	ptr = skb_gro_header(skb, off + plen, off);
3314721031cSEric Dumazet 	if (!ptr)
3324721031cSEric Dumazet 		return NULL;
3334721031cSEric Dumazet 
3344721031cSEric Dumazet 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
3354721031cSEric Dumazet 			       start, offset);
3364721031cSEric Dumazet 
3374721031cSEric Dumazet 	/* Adjust skb->csum since we changed the packet */
3384721031cSEric Dumazet 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
3394721031cSEric Dumazet 
3404721031cSEric Dumazet 	grc->offset = off + hdrlen + offset;
3414721031cSEric Dumazet 	grc->delta = delta;
3424721031cSEric Dumazet 
3434721031cSEric Dumazet 	return ptr;
3444721031cSEric Dumazet }
3454721031cSEric Dumazet 
skb_gro_remcsum_cleanup(struct sk_buff * skb,struct gro_remcsum * grc)3464721031cSEric Dumazet static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
3474721031cSEric Dumazet 					   struct gro_remcsum *grc)
3484721031cSEric Dumazet {
3494721031cSEric Dumazet 	void *ptr;
3504721031cSEric Dumazet 	size_t plen = grc->offset + sizeof(u16);
3514721031cSEric Dumazet 
3524721031cSEric Dumazet 	if (!grc->delta)
3534721031cSEric Dumazet 		return;
3544721031cSEric Dumazet 
35535ffb665SRichard Gobert 	ptr = skb_gro_header(skb, plen, grc->offset);
3564721031cSEric Dumazet 	if (!ptr)
3574721031cSEric Dumazet 		return;
3584721031cSEric Dumazet 
3594721031cSEric Dumazet 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
3604721031cSEric Dumazet }
3614721031cSEric Dumazet 
3624721031cSEric Dumazet #ifdef CONFIG_XFRM_OFFLOAD
skb_gro_flush_final(struct sk_buff * skb,struct sk_buff * pp,int flush)3634721031cSEric Dumazet static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3644721031cSEric Dumazet {
3654721031cSEric Dumazet 	if (PTR_ERR(pp) != -EINPROGRESS)
3664721031cSEric Dumazet 		NAPI_GRO_CB(skb)->flush |= flush;
3674721031cSEric Dumazet }
skb_gro_flush_final_remcsum(struct sk_buff * skb,struct sk_buff * pp,int flush,struct gro_remcsum * grc)3684721031cSEric Dumazet static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3694721031cSEric Dumazet 					       struct sk_buff *pp,
3704721031cSEric Dumazet 					       int flush,
3714721031cSEric Dumazet 					       struct gro_remcsum *grc)
3724721031cSEric Dumazet {
3734721031cSEric Dumazet 	if (PTR_ERR(pp) != -EINPROGRESS) {
3744721031cSEric Dumazet 		NAPI_GRO_CB(skb)->flush |= flush;
3754721031cSEric Dumazet 		skb_gro_remcsum_cleanup(skb, grc);
3764721031cSEric Dumazet 		skb->remcsum_offload = 0;
3774721031cSEric Dumazet 	}
3784721031cSEric Dumazet }
3794721031cSEric Dumazet #else
skb_gro_flush_final(struct sk_buff * skb,struct sk_buff * pp,int flush)3804721031cSEric Dumazet static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3814721031cSEric Dumazet {
3824721031cSEric Dumazet 	NAPI_GRO_CB(skb)->flush |= flush;
3834721031cSEric Dumazet }
skb_gro_flush_final_remcsum(struct sk_buff * skb,struct sk_buff * pp,int flush,struct gro_remcsum * grc)3844721031cSEric Dumazet static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3854721031cSEric Dumazet 					       struct sk_buff *pp,
3864721031cSEric Dumazet 					       int flush,
3874721031cSEric Dumazet 					       struct gro_remcsum *grc)
3884721031cSEric Dumazet {
3894721031cSEric Dumazet 	NAPI_GRO_CB(skb)->flush |= flush;
3904721031cSEric Dumazet 	skb_gro_remcsum_cleanup(skb, grc);
3914721031cSEric Dumazet 	skb->remcsum_offload = 0;
3924721031cSEric Dumazet }
3934721031cSEric Dumazet #endif
394e75ec151SAlexander Lobakin 
39504f00ab2SLeon Romanovsky INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
39604f00ab2SLeon Romanovsky 							   struct sk_buff *));
39704f00ab2SLeon Romanovsky INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
39804f00ab2SLeon Romanovsky INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
39904f00ab2SLeon Romanovsky 							   struct sk_buff *));
40004f00ab2SLeon Romanovsky INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
40186af2c82SAlexander Lobakin 
4024721031cSEric Dumazet INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
4034721031cSEric Dumazet 							   struct sk_buff *));
4044721031cSEric Dumazet INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
4054721031cSEric Dumazet 
4064721031cSEric Dumazet INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
4074721031cSEric Dumazet 							   struct sk_buff *));
4084721031cSEric Dumazet INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
4094721031cSEric Dumazet 
41086af2c82SAlexander Lobakin #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
41186af2c82SAlexander Lobakin ({								\
41286af2c82SAlexander Lobakin 	unlikely(gro_recursion_inc_test(skb)) ?			\
41386af2c82SAlexander Lobakin 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
41486af2c82SAlexander Lobakin 		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
41586af2c82SAlexander Lobakin })
41686af2c82SAlexander Lobakin 
4174721031cSEric Dumazet struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
4184721031cSEric Dumazet 				struct udphdr *uh, struct sock *sk);
4194721031cSEric Dumazet int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
4204721031cSEric Dumazet 
udp_gro_udphdr(struct sk_buff * skb)4214721031cSEric Dumazet static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
4224721031cSEric Dumazet {
4234721031cSEric Dumazet 	struct udphdr *uh;
4244721031cSEric Dumazet 	unsigned int hlen, off;
4254721031cSEric Dumazet 
4264721031cSEric Dumazet 	off  = skb_gro_offset(skb);
4274721031cSEric Dumazet 	hlen = off + sizeof(*uh);
42835ffb665SRichard Gobert 	uh   = skb_gro_header(skb, hlen, off);
4294721031cSEric Dumazet 
4304721031cSEric Dumazet 	return uh;
4314721031cSEric Dumazet }
4324721031cSEric Dumazet 
ip6_gro_compute_pseudo(struct sk_buff * skb,int proto)4334721031cSEric Dumazet static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
4344721031cSEric Dumazet {
4354721031cSEric Dumazet 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
4364721031cSEric Dumazet 
4374721031cSEric Dumazet 	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
4384721031cSEric Dumazet 					    skb_gro_len(skb), proto, 0));
4394721031cSEric Dumazet }
4404721031cSEric Dumazet 
441e456a18aSEric Dumazet int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
442e456a18aSEric Dumazet 
443587652bbSEric Dumazet /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
gro_normal_list(struct napi_struct * napi)444587652bbSEric Dumazet static inline void gro_normal_list(struct napi_struct *napi)
445587652bbSEric Dumazet {
446587652bbSEric Dumazet 	if (!napi->rx_count)
447587652bbSEric Dumazet 		return;
448587652bbSEric Dumazet 	netif_receive_skb_list_internal(&napi->rx_list);
449587652bbSEric Dumazet 	INIT_LIST_HEAD(&napi->rx_list);
450587652bbSEric Dumazet 	napi->rx_count = 0;
451587652bbSEric Dumazet }
452587652bbSEric Dumazet 
453587652bbSEric Dumazet /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
454587652bbSEric Dumazet  * pass the whole batch up to the stack.
455587652bbSEric Dumazet  */
gro_normal_one(struct napi_struct * napi,struct sk_buff * skb,int segs)456587652bbSEric Dumazet static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
457587652bbSEric Dumazet {
458587652bbSEric Dumazet 	list_add_tail(&skb->list, &napi->rx_list);
459587652bbSEric Dumazet 	napi->rx_count += segs;
4608db24af3SKuniyuki Iwashima 	if (napi->rx_count >= READ_ONCE(gro_normal_batch))
461587652bbSEric Dumazet 		gro_normal_list(napi);
462587652bbSEric Dumazet }
463587652bbSEric Dumazet 
4647938cd15SRichard Gobert /* This function is the alternative of 'inet_iif' and 'inet_sdif'
4657938cd15SRichard Gobert  * functions in case we can not rely on fields of IPCB.
4667938cd15SRichard Gobert  *
4677938cd15SRichard Gobert  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
4687938cd15SRichard Gobert  * The caller must hold the RCU read lock.
4697938cd15SRichard Gobert  */
inet_get_iif_sdif(const struct sk_buff * skb,int * iif,int * sdif)4707938cd15SRichard Gobert static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
4717938cd15SRichard Gobert {
4727938cd15SRichard Gobert 	*iif = inet_iif(skb) ?: skb->dev->ifindex;
4737938cd15SRichard Gobert 	*sdif = 0;
4747938cd15SRichard Gobert 
4757938cd15SRichard Gobert #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
4767938cd15SRichard Gobert 	if (netif_is_l3_slave(skb->dev)) {
4777938cd15SRichard Gobert 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
4787938cd15SRichard Gobert 
4797938cd15SRichard Gobert 		*sdif = *iif;
4807938cd15SRichard Gobert 		*iif = master ? master->ifindex : 0;
4817938cd15SRichard Gobert 	}
4827938cd15SRichard Gobert #endif
4837938cd15SRichard Gobert }
4847938cd15SRichard Gobert 
4857938cd15SRichard Gobert /* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
4867938cd15SRichard Gobert  * functions in case we can not rely on fields of IP6CB.
4877938cd15SRichard Gobert  *
4887938cd15SRichard Gobert  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
4897938cd15SRichard Gobert  * The caller must hold the RCU read lock.
4907938cd15SRichard Gobert  */
inet6_get_iif_sdif(const struct sk_buff * skb,int * iif,int * sdif)4917938cd15SRichard Gobert static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
4927938cd15SRichard Gobert {
4937938cd15SRichard Gobert 	/* using skb->dev->ifindex because skb_dst(skb) is not initialized */
4947938cd15SRichard Gobert 	*iif = skb->dev->ifindex;
4957938cd15SRichard Gobert 	*sdif = 0;
4967938cd15SRichard Gobert 
4977938cd15SRichard Gobert #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
4987938cd15SRichard Gobert 	if (netif_is_l3_slave(skb->dev)) {
4997938cd15SRichard Gobert 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
5007938cd15SRichard Gobert 
5017938cd15SRichard Gobert 		*sdif = *iif;
5027938cd15SRichard Gobert 		*iif = master ? master->ifindex : 0;
5037938cd15SRichard Gobert 	}
5047938cd15SRichard Gobert #endif
5057938cd15SRichard Gobert }
5067938cd15SRichard Gobert 
507d457a0e3SEric Dumazet extern struct list_head offload_base;
508587652bbSEric Dumazet 
50904f00ab2SLeon Romanovsky #endif /* _NET_IPV6_GRO_H */
510