197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e1d9a90aSSharath Chandra Vurukala /* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
3ceed73a2SSubash Abhinov Kasiviswanathan  *
4ceed73a2SSubash Abhinov Kasiviswanathan  * RMNET Data MAP protocol
5ceed73a2SSubash Abhinov Kasiviswanathan  */
6ceed73a2SSubash Abhinov Kasiviswanathan 
7ceed73a2SSubash Abhinov Kasiviswanathan #include <linux/netdevice.h>
8bbd21b24SSubash Abhinov Kasiviswanathan #include <linux/ip.h>
9bbd21b24SSubash Abhinov Kasiviswanathan #include <linux/ipv6.h>
10bbd21b24SSubash Abhinov Kasiviswanathan #include <net/ip6_checksum.h>
11e1d9a90aSSharath Chandra Vurukala #include <linux/bitfield.h>
12ceed73a2SSubash Abhinov Kasiviswanathan #include "rmnet_config.h"
13ceed73a2SSubash Abhinov Kasiviswanathan #include "rmnet_map.h"
14ceed73a2SSubash Abhinov Kasiviswanathan #include "rmnet_private.h"
15*64b5d1f8SDaniele Palmas #include "rmnet_vnd.h"
16ceed73a2SSubash Abhinov Kasiviswanathan 
17ceed73a2SSubash Abhinov Kasiviswanathan #define RMNET_MAP_DEAGGR_SPACING  64
18ceed73a2SSubash Abhinov Kasiviswanathan #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
19ceed73a2SSubash Abhinov Kasiviswanathan 
rmnet_map_get_csum_field(unsigned char protocol,const void * txporthdr)20bbd21b24SSubash Abhinov Kasiviswanathan static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
21bbd21b24SSubash Abhinov Kasiviswanathan 					 const void *txporthdr)
22bbd21b24SSubash Abhinov Kasiviswanathan {
23874a333fSAlex Elder 	if (protocol == IPPROTO_TCP)
24874a333fSAlex Elder 		return &((struct tcphdr *)txporthdr)->check;
25bbd21b24SSubash Abhinov Kasiviswanathan 
26874a333fSAlex Elder 	if (protocol == IPPROTO_UDP)
27874a333fSAlex Elder 		return &((struct udphdr *)txporthdr)->check;
28bbd21b24SSubash Abhinov Kasiviswanathan 
29874a333fSAlex Elder 	return NULL;
30bbd21b24SSubash Abhinov Kasiviswanathan }
31bbd21b24SSubash Abhinov Kasiviswanathan 
32bbd21b24SSubash Abhinov Kasiviswanathan static int
rmnet_map_ipv4_dl_csum_trailer(struct sk_buff * skb,struct rmnet_map_dl_csum_trailer * csum_trailer,struct rmnet_priv * priv)33bbd21b24SSubash Abhinov Kasiviswanathan rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
34bbde32d3SSubash Abhinov Kasiviswanathan 			       struct rmnet_map_dl_csum_trailer *csum_trailer,
35bbde32d3SSubash Abhinov Kasiviswanathan 			       struct rmnet_priv *priv)
36bbd21b24SSubash Abhinov Kasiviswanathan {
3716bf3d33SAlex Elder 	struct iphdr *ip4h = (struct iphdr *)skb->data;
3816bf3d33SAlex Elder 	void *txporthdr = skb->data + ip4h->ihl * 4;
390e6af897SAlex Elder 	__sum16 *csum_field, pseudo_csum;
4016bf3d33SAlex Elder 	__sum16 ip_payload_csum;
41bbd21b24SSubash Abhinov Kasiviswanathan 
4216bf3d33SAlex Elder 	/* Computing the checksum over just the IPv4 header--including its
4316bf3d33SAlex Elder 	 * checksum field--should yield 0.  If it doesn't, the IP header
4416bf3d33SAlex Elder 	 * is bad, so return an error and let the IP layer drop it.
4516bf3d33SAlex Elder 	 */
4616bf3d33SAlex Elder 	if (ip_fast_csum(ip4h, ip4h->ihl)) {
4716bf3d33SAlex Elder 		priv->stats.csum_ip4_header_bad++;
4816bf3d33SAlex Elder 		return -EINVAL;
4916bf3d33SAlex Elder 	}
50e4517d8aSAlex Elder 
51e4517d8aSAlex Elder 	/* We don't support checksum offload on IPv4 fragments */
52e4517d8aSAlex Elder 	if (ip_is_fragment(ip4h)) {
53bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_fragmented_pkt++;
54bbd21b24SSubash Abhinov Kasiviswanathan 		return -EOPNOTSUPP;
55bbde32d3SSubash Abhinov Kasiviswanathan 	}
56bbd21b24SSubash Abhinov Kasiviswanathan 
5716bf3d33SAlex Elder 	/* Checksum offload is only supported for UDP and TCP protocols */
58bbd21b24SSubash Abhinov Kasiviswanathan 	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
59bbde32d3SSubash Abhinov Kasiviswanathan 	if (!csum_field) {
60bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_err_invalid_transport++;
61bbd21b24SSubash Abhinov Kasiviswanathan 		return -EPROTONOSUPPORT;
62bbde32d3SSubash Abhinov Kasiviswanathan 	}
63bbd21b24SSubash Abhinov Kasiviswanathan 
6416bf3d33SAlex Elder 	/* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
6516bf3d33SAlex Elder 	if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
66bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_skipped++;
67bbd21b24SSubash Abhinov Kasiviswanathan 		return 0;
68bbde32d3SSubash Abhinov Kasiviswanathan 	}
69bbd21b24SSubash Abhinov Kasiviswanathan 
7016bf3d33SAlex Elder 	/* The checksum value in the trailer is computed over the entire
7116bf3d33SAlex Elder 	 * IP packet, including the IP header and payload.  To derive the
7216bf3d33SAlex Elder 	 * transport checksum from this, we first subract the contribution
7316bf3d33SAlex Elder 	 * of the IP header from the trailer checksum.  We then add the
7416bf3d33SAlex Elder 	 * checksum computed over the pseudo header.
7516bf3d33SAlex Elder 	 *
7616bf3d33SAlex Elder 	 * We verified above that the IP header contributes zero to the
7716bf3d33SAlex Elder 	 * trailer checksum.  Therefore the checksum in the trailer is
7816bf3d33SAlex Elder 	 * just the checksum computed over the IP payload.
79fab01a6fSAlex Elder 
80fab01a6fSAlex Elder 	 * If the IP payload arrives intact, adding the pseudo header
81fab01a6fSAlex Elder 	 * checksum to the IP payload checksum will yield 0xffff (negative
82fab01a6fSAlex Elder 	 * zero).  This means the trailer checksum and the pseudo checksum
83fab01a6fSAlex Elder 	 * are additive inverses of each other.  Put another way, the
84fab01a6fSAlex Elder 	 * message passes the checksum test if the trailer checksum value
85fab01a6fSAlex Elder 	 * is the negated pseudo header checksum.
86fab01a6fSAlex Elder 	 *
87fab01a6fSAlex Elder 	 * Knowing this, we don't even need to examine the transport
88fab01a6fSAlex Elder 	 * header checksum value; it is already accounted for in the
89fab01a6fSAlex Elder 	 * checksum value found in the trailer.
9016bf3d33SAlex Elder 	 */
91411a795eSAlex Elder 	ip_payload_csum = csum_trailer->csum_value;
92bbd21b24SSubash Abhinov Kasiviswanathan 
93411a795eSAlex Elder 	pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
94bbd21b24SSubash Abhinov Kasiviswanathan 					ntohs(ip4h->tot_len) - ip4h->ihl * 4,
95bbd21b24SSubash Abhinov Kasiviswanathan 					ip4h->protocol, 0);
96bbd21b24SSubash Abhinov Kasiviswanathan 
97fab01a6fSAlex Elder 	/* The cast is required to ensure only the low 16 bits are examined */
98698aa6c4SAlex Elder 	if (ip_payload_csum != (__sum16)~pseudo_csum) {
99fab01a6fSAlex Elder 		priv->stats.csum_validation_failed++;
100fab01a6fSAlex Elder 		return -EINVAL;
101fab01a6fSAlex Elder 	}
102bbd21b24SSubash Abhinov Kasiviswanathan 
103bbde32d3SSubash Abhinov Kasiviswanathan 	priv->stats.csum_ok++;
104bbd21b24SSubash Abhinov Kasiviswanathan 	return 0;
105bbde32d3SSubash Abhinov Kasiviswanathan }
106bbd21b24SSubash Abhinov Kasiviswanathan 
107bbd21b24SSubash Abhinov Kasiviswanathan #if IS_ENABLED(CONFIG_IPV6)
108bbd21b24SSubash Abhinov Kasiviswanathan static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff * skb,struct rmnet_map_dl_csum_trailer * csum_trailer,struct rmnet_priv * priv)109bbd21b24SSubash Abhinov Kasiviswanathan rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
110bbde32d3SSubash Abhinov Kasiviswanathan 			       struct rmnet_map_dl_csum_trailer *csum_trailer,
111bbde32d3SSubash Abhinov Kasiviswanathan 			       struct rmnet_priv *priv)
112bbd21b24SSubash Abhinov Kasiviswanathan {
113e5adbbdfSAlex Elder 	struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
114e5adbbdfSAlex Elder 	void *txporthdr = skb->data + sizeof(*ip6h);
1150e6af897SAlex Elder 	__sum16 *csum_field, pseudo_csum;
116e5adbbdfSAlex Elder 	__sum16 ip6_payload_csum;
117e5adbbdfSAlex Elder 	__be16 ip_header_csum;
118bbd21b24SSubash Abhinov Kasiviswanathan 
119e5adbbdfSAlex Elder 	/* Checksum offload is only supported for UDP and TCP protocols;
120e5adbbdfSAlex Elder 	 * the packet cannot include any IPv6 extension headers
121e5adbbdfSAlex Elder 	 */
122bbd21b24SSubash Abhinov Kasiviswanathan 	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
123bbde32d3SSubash Abhinov Kasiviswanathan 	if (!csum_field) {
124bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_err_invalid_transport++;
125bbd21b24SSubash Abhinov Kasiviswanathan 		return -EPROTONOSUPPORT;
126bbde32d3SSubash Abhinov Kasiviswanathan 	}
127bbd21b24SSubash Abhinov Kasiviswanathan 
128e5adbbdfSAlex Elder 	/* The checksum value in the trailer is computed over the entire
129e5adbbdfSAlex Elder 	 * IP packet, including the IP header and payload.  To derive the
130e5adbbdfSAlex Elder 	 * transport checksum from this, we first subract the contribution
131e5adbbdfSAlex Elder 	 * of the IP header from the trailer checksum.  We then add the
132e5adbbdfSAlex Elder 	 * checksum computed over the pseudo header.
133e5adbbdfSAlex Elder 	 */
134e5adbbdfSAlex Elder 	ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
135411a795eSAlex Elder 	ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
136bbd21b24SSubash Abhinov Kasiviswanathan 
137411a795eSAlex Elder 	pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
138185a108fSAlex Elder 				      ntohs(ip6h->payload_len),
139185a108fSAlex Elder 				      ip6h->nexthdr, 0);
140bbd21b24SSubash Abhinov Kasiviswanathan 
141698aa6c4SAlex Elder 	/* It's sufficient to compare the IP payload checksum with the
142698aa6c4SAlex Elder 	 * negated pseudo checksum to determine whether the packet
143698aa6c4SAlex Elder 	 * checksum was good.  (See further explanation in comments
144698aa6c4SAlex Elder 	 * in rmnet_map_ipv4_dl_csum_trailer()).
145698aa6c4SAlex Elder 	 *
146698aa6c4SAlex Elder 	 * The cast is required to ensure only the low 16 bits are
147698aa6c4SAlex Elder 	 * examined.
148698aa6c4SAlex Elder 	 */
149698aa6c4SAlex Elder 	if (ip6_payload_csum != (__sum16)~pseudo_csum) {
150fab01a6fSAlex Elder 		priv->stats.csum_validation_failed++;
151fab01a6fSAlex Elder 		return -EINVAL;
152fab01a6fSAlex Elder 	}
153fab01a6fSAlex Elder 
154bbde32d3SSubash Abhinov Kasiviswanathan 	priv->stats.csum_ok++;
155bbd21b24SSubash Abhinov Kasiviswanathan 	return 0;
156bbde32d3SSubash Abhinov Kasiviswanathan }
157b84b53eeSAlex Elder #else
158b84b53eeSAlex Elder static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff * skb,struct rmnet_map_dl_csum_trailer * csum_trailer,struct rmnet_priv * priv)159b84b53eeSAlex Elder rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
160b84b53eeSAlex Elder 			       struct rmnet_map_dl_csum_trailer *csum_trailer,
161b84b53eeSAlex Elder 			       struct rmnet_priv *priv)
162b84b53eeSAlex Elder {
163b84b53eeSAlex Elder 	return 0;
164b84b53eeSAlex Elder }
165bbd21b24SSubash Abhinov Kasiviswanathan #endif
166bbd21b24SSubash Abhinov Kasiviswanathan 
rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr * ip4h)16756a967c4SSubash Abhinov Kasiviswanathan static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
1685eb5f860SSubash Abhinov Kasiviswanathan {
1695eb5f860SSubash Abhinov Kasiviswanathan 	void *txphdr;
1705eb5f860SSubash Abhinov Kasiviswanathan 	u16 *csum;
1715eb5f860SSubash Abhinov Kasiviswanathan 
172753ba09aSDan Carpenter 	txphdr = (void *)ip4h + ip4h->ihl * 4;
1735eb5f860SSubash Abhinov Kasiviswanathan 
1745eb5f860SSubash Abhinov Kasiviswanathan 	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
1755eb5f860SSubash Abhinov Kasiviswanathan 		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
1765eb5f860SSubash Abhinov Kasiviswanathan 		*csum = ~(*csum);
1775eb5f860SSubash Abhinov Kasiviswanathan 	}
1785eb5f860SSubash Abhinov Kasiviswanathan }
1795eb5f860SSubash Abhinov Kasiviswanathan 
1805eb5f860SSubash Abhinov Kasiviswanathan static void
rmnet_map_ipv4_ul_csum_header(struct iphdr * iphdr,struct rmnet_map_ul_csum_header * ul_header,struct sk_buff * skb)1811d257f45SAlex Elder rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
1825eb5f860SSubash Abhinov Kasiviswanathan 			      struct rmnet_map_ul_csum_header *ul_header,
1835eb5f860SSubash Abhinov Kasiviswanathan 			      struct sk_buff *skb)
1845eb5f860SSubash Abhinov Kasiviswanathan {
18586ca860eSAlex Elder 	u16 val;
18686ca860eSAlex Elder 
18786ca860eSAlex Elder 	val = MAP_CSUM_UL_ENABLED_FLAG;
1881d257f45SAlex Elder 	if (iphdr->protocol == IPPROTO_UDP)
18986ca860eSAlex Elder 		val |= MAP_CSUM_UL_UDP_FLAG;
19086ca860eSAlex Elder 	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
1915eb5f860SSubash Abhinov Kasiviswanathan 
19250c62a11SAlex Elder 	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
19386ca860eSAlex Elder 	ul_header->csum_info = htons(val);
1945eb5f860SSubash Abhinov Kasiviswanathan 
1955eb5f860SSubash Abhinov Kasiviswanathan 	skb->ip_summed = CHECKSUM_NONE;
1965eb5f860SSubash Abhinov Kasiviswanathan 
1975eb5f860SSubash Abhinov Kasiviswanathan 	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
1985eb5f860SSubash Abhinov Kasiviswanathan }
1995eb5f860SSubash Abhinov Kasiviswanathan 
2005eb5f860SSubash Abhinov Kasiviswanathan #if IS_ENABLED(CONFIG_IPV6)
20156a967c4SSubash Abhinov Kasiviswanathan static void
rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr * ip6h)20256a967c4SSubash Abhinov Kasiviswanathan rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
2035eb5f860SSubash Abhinov Kasiviswanathan {
2045eb5f860SSubash Abhinov Kasiviswanathan 	void *txphdr;
2055eb5f860SSubash Abhinov Kasiviswanathan 	u16 *csum;
2065eb5f860SSubash Abhinov Kasiviswanathan 
207753ba09aSDan Carpenter 	txphdr = ip6h + 1;
2085eb5f860SSubash Abhinov Kasiviswanathan 
2095eb5f860SSubash Abhinov Kasiviswanathan 	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
2105eb5f860SSubash Abhinov Kasiviswanathan 		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
2115eb5f860SSubash Abhinov Kasiviswanathan 		*csum = ~(*csum);
2125eb5f860SSubash Abhinov Kasiviswanathan 	}
2135eb5f860SSubash Abhinov Kasiviswanathan }
2145eb5f860SSubash Abhinov Kasiviswanathan 
2155eb5f860SSubash Abhinov Kasiviswanathan static void
rmnet_map_ipv6_ul_csum_header(struct ipv6hdr * ipv6hdr,struct rmnet_map_ul_csum_header * ul_header,struct sk_buff * skb)2161d257f45SAlex Elder rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
2175eb5f860SSubash Abhinov Kasiviswanathan 			      struct rmnet_map_ul_csum_header *ul_header,
2185eb5f860SSubash Abhinov Kasiviswanathan 			      struct sk_buff *skb)
2195eb5f860SSubash Abhinov Kasiviswanathan {
22086ca860eSAlex Elder 	u16 val;
22186ca860eSAlex Elder 
22286ca860eSAlex Elder 	val = MAP_CSUM_UL_ENABLED_FLAG;
2231d257f45SAlex Elder 	if (ipv6hdr->nexthdr == IPPROTO_UDP)
22486ca860eSAlex Elder 		val |= MAP_CSUM_UL_UDP_FLAG;
22586ca860eSAlex Elder 	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
2265eb5f860SSubash Abhinov Kasiviswanathan 
22750c62a11SAlex Elder 	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
22886ca860eSAlex Elder 	ul_header->csum_info = htons(val);
2295eb5f860SSubash Abhinov Kasiviswanathan 
2305eb5f860SSubash Abhinov Kasiviswanathan 	skb->ip_summed = CHECKSUM_NONE;
2315eb5f860SSubash Abhinov Kasiviswanathan 
2321d257f45SAlex Elder 	rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
2335eb5f860SSubash Abhinov Kasiviswanathan }
234b84b53eeSAlex Elder #else
235b84b53eeSAlex Elder static void
rmnet_map_ipv6_ul_csum_header(void * ip6hdr,struct rmnet_map_ul_csum_header * ul_header,struct sk_buff * skb)236b84b53eeSAlex Elder rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
237b84b53eeSAlex Elder 			      struct rmnet_map_ul_csum_header *ul_header,
238b84b53eeSAlex Elder 			      struct sk_buff *skb)
239b84b53eeSAlex Elder {
240b84b53eeSAlex Elder }
2415eb5f860SSubash Abhinov Kasiviswanathan #endif
2425eb5f860SSubash Abhinov Kasiviswanathan 
rmnet_map_v5_checksum_uplink_packet(struct sk_buff * skb,struct rmnet_port * port,struct net_device * orig_dev)243b6e5d27eSSharath Chandra Vurukala static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
244b6e5d27eSSharath Chandra Vurukala 						struct rmnet_port *port,
245b6e5d27eSSharath Chandra Vurukala 						struct net_device *orig_dev)
246b6e5d27eSSharath Chandra Vurukala {
247b6e5d27eSSharath Chandra Vurukala 	struct rmnet_priv *priv = netdev_priv(orig_dev);
248b6e5d27eSSharath Chandra Vurukala 	struct rmnet_map_v5_csum_header *ul_header;
249b6e5d27eSSharath Chandra Vurukala 
250b6e5d27eSSharath Chandra Vurukala 	ul_header = skb_push(skb, sizeof(*ul_header));
251b6e5d27eSSharath Chandra Vurukala 	memset(ul_header, 0, sizeof(*ul_header));
252b6e5d27eSSharath Chandra Vurukala 	ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
253b6e5d27eSSharath Chandra Vurukala 						MAPV5_HDRINFO_HDR_TYPE_FMASK);
254b6e5d27eSSharath Chandra Vurukala 
255b6e5d27eSSharath Chandra Vurukala 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
256b6e5d27eSSharath Chandra Vurukala 		void *iph = ip_hdr(skb);
257b6e5d27eSSharath Chandra Vurukala 		__sum16 *check;
258b6e5d27eSSharath Chandra Vurukala 		void *trans;
259b6e5d27eSSharath Chandra Vurukala 		u8 proto;
260b6e5d27eSSharath Chandra Vurukala 
261b6e5d27eSSharath Chandra Vurukala 		if (skb->protocol == htons(ETH_P_IP)) {
262b6e5d27eSSharath Chandra Vurukala 			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
263b6e5d27eSSharath Chandra Vurukala 
264b6e5d27eSSharath Chandra Vurukala 			proto = ((struct iphdr *)iph)->protocol;
265b6e5d27eSSharath Chandra Vurukala 			trans = iph + ip_len;
266118de610SNathan Chancellor 		} else if (IS_ENABLED(CONFIG_IPV6) &&
267118de610SNathan Chancellor 			   skb->protocol == htons(ETH_P_IPV6)) {
268b6e5d27eSSharath Chandra Vurukala 			u16 ip_len = sizeof(struct ipv6hdr);
269b6e5d27eSSharath Chandra Vurukala 
270b6e5d27eSSharath Chandra Vurukala 			proto = ((struct ipv6hdr *)iph)->nexthdr;
271b6e5d27eSSharath Chandra Vurukala 			trans = iph + ip_len;
272118de610SNathan Chancellor 		} else {
273b6e5d27eSSharath Chandra Vurukala 			priv->stats.csum_err_invalid_ip_version++;
274b6e5d27eSSharath Chandra Vurukala 			goto sw_csum;
275b6e5d27eSSharath Chandra Vurukala 		}
276b6e5d27eSSharath Chandra Vurukala 
277b6e5d27eSSharath Chandra Vurukala 		check = rmnet_map_get_csum_field(proto, trans);
278b6e5d27eSSharath Chandra Vurukala 		if (check) {
279b6e5d27eSSharath Chandra Vurukala 			skb->ip_summed = CHECKSUM_NONE;
280b6e5d27eSSharath Chandra Vurukala 			/* Ask for checksum offloading */
281b6e5d27eSSharath Chandra Vurukala 			ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
282b6e5d27eSSharath Chandra Vurukala 			priv->stats.csum_hw++;
283b6e5d27eSSharath Chandra Vurukala 			return;
284b6e5d27eSSharath Chandra Vurukala 		}
285b6e5d27eSSharath Chandra Vurukala 	}
286b6e5d27eSSharath Chandra Vurukala 
287b6e5d27eSSharath Chandra Vurukala sw_csum:
288b6e5d27eSSharath Chandra Vurukala 	priv->stats.csum_sw++;
289b6e5d27eSSharath Chandra Vurukala }
290b6e5d27eSSharath Chandra Vurukala 
291ceed73a2SSubash Abhinov Kasiviswanathan /* Adds MAP header to front of skb->data
292ceed73a2SSubash Abhinov Kasiviswanathan  * Padding is calculated and set appropriately in MAP header. Mux ID is
293ceed73a2SSubash Abhinov Kasiviswanathan  * initialized to 0.
294ceed73a2SSubash Abhinov Kasiviswanathan  */
rmnet_map_add_map_header(struct sk_buff * skb,int hdrlen,struct rmnet_port * port,int pad)295ceed73a2SSubash Abhinov Kasiviswanathan struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
296b6e5d27eSSharath Chandra Vurukala 						  int hdrlen,
297b6e5d27eSSharath Chandra Vurukala 						  struct rmnet_port *port,
298b6e5d27eSSharath Chandra Vurukala 						  int pad)
299ceed73a2SSubash Abhinov Kasiviswanathan {
300ceed73a2SSubash Abhinov Kasiviswanathan 	struct rmnet_map_header *map_header;
301ceed73a2SSubash Abhinov Kasiviswanathan 	u32 padding, map_datalen;
302ceed73a2SSubash Abhinov Kasiviswanathan 
303ceed73a2SSubash Abhinov Kasiviswanathan 	map_datalen = skb->len - hdrlen;
304ceed73a2SSubash Abhinov Kasiviswanathan 	map_header = (struct rmnet_map_header *)
305ceed73a2SSubash Abhinov Kasiviswanathan 			skb_push(skb, sizeof(struct rmnet_map_header));
306ceed73a2SSubash Abhinov Kasiviswanathan 	memset(map_header, 0, sizeof(struct rmnet_map_header));
307ceed73a2SSubash Abhinov Kasiviswanathan 
308b6e5d27eSSharath Chandra Vurukala 	/* Set next_hdr bit for csum offload packets */
309b6e5d27eSSharath Chandra Vurukala 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
310b6e5d27eSSharath Chandra Vurukala 		map_header->flags |= MAP_NEXT_HEADER_FLAG;
311b6e5d27eSSharath Chandra Vurukala 
312ceed73a2SSubash Abhinov Kasiviswanathan 	if (pad == RMNET_MAP_NO_PAD_BYTES) {
313ceed73a2SSubash Abhinov Kasiviswanathan 		map_header->pkt_len = htons(map_datalen);
314ceed73a2SSubash Abhinov Kasiviswanathan 		return map_header;
315ceed73a2SSubash Abhinov Kasiviswanathan 	}
316ceed73a2SSubash Abhinov Kasiviswanathan 
31716653c16SAlex Elder 	BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
318ceed73a2SSubash Abhinov Kasiviswanathan 	padding = ALIGN(map_datalen, 4) - map_datalen;
319ceed73a2SSubash Abhinov Kasiviswanathan 
320ceed73a2SSubash Abhinov Kasiviswanathan 	if (padding == 0)
321ceed73a2SSubash Abhinov Kasiviswanathan 		goto done;
322ceed73a2SSubash Abhinov Kasiviswanathan 
323ceed73a2SSubash Abhinov Kasiviswanathan 	if (skb_tailroom(skb) < padding)
324ceed73a2SSubash Abhinov Kasiviswanathan 		return NULL;
325ceed73a2SSubash Abhinov Kasiviswanathan 
326354ad9a8SChristophe JAILLET 	skb_put_zero(skb, padding);
327ceed73a2SSubash Abhinov Kasiviswanathan 
328ceed73a2SSubash Abhinov Kasiviswanathan done:
329ceed73a2SSubash Abhinov Kasiviswanathan 	map_header->pkt_len = htons(map_datalen + padding);
33016653c16SAlex Elder 	/* This is a data packet, so the CMD bit is 0 */
33116653c16SAlex Elder 	map_header->flags = padding & MAP_PAD_LEN_MASK;
332ceed73a2SSubash Abhinov Kasiviswanathan 
333ceed73a2SSubash Abhinov Kasiviswanathan 	return map_header;
334ceed73a2SSubash Abhinov Kasiviswanathan }
335ceed73a2SSubash Abhinov Kasiviswanathan 
336ceed73a2SSubash Abhinov Kasiviswanathan /* Deaggregates a single packet
337ceed73a2SSubash Abhinov Kasiviswanathan  * A whole new buffer is allocated for each portion of an aggregated frame.
338ceed73a2SSubash Abhinov Kasiviswanathan  * Caller should keep calling deaggregate() on the source skb until 0 is
339ceed73a2SSubash Abhinov Kasiviswanathan  * returned, indicating that there are no more packets to deaggregate. Caller
340ceed73a2SSubash Abhinov Kasiviswanathan  * is responsible for freeing the original skb.
341ceed73a2SSubash Abhinov Kasiviswanathan  */
rmnet_map_deaggregate(struct sk_buff * skb,struct rmnet_port * port)342bbd21b24SSubash Abhinov Kasiviswanathan struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
343bbd21b24SSubash Abhinov Kasiviswanathan 				      struct rmnet_port *port)
344ceed73a2SSubash Abhinov Kasiviswanathan {
345e1d9a90aSSharath Chandra Vurukala 	struct rmnet_map_v5_csum_header *next_hdr = NULL;
346ceed73a2SSubash Abhinov Kasiviswanathan 	struct rmnet_map_header *maph;
347e1d9a90aSSharath Chandra Vurukala 	void *data = skb->data;
348ceed73a2SSubash Abhinov Kasiviswanathan 	struct sk_buff *skbn;
349e1d9a90aSSharath Chandra Vurukala 	u8 nexthdr_type;
350ceed73a2SSubash Abhinov Kasiviswanathan 	u32 packet_len;
351ceed73a2SSubash Abhinov Kasiviswanathan 
352ceed73a2SSubash Abhinov Kasiviswanathan 	if (skb->len == 0)
353ceed73a2SSubash Abhinov Kasiviswanathan 		return NULL;
354ceed73a2SSubash Abhinov Kasiviswanathan 
355ceed73a2SSubash Abhinov Kasiviswanathan 	maph = (struct rmnet_map_header *)skb->data;
3569d131d04SAlex Elder 	packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
357ceed73a2SSubash Abhinov Kasiviswanathan 
358e1d9a90aSSharath Chandra Vurukala 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
359bbd21b24SSubash Abhinov Kasiviswanathan 		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
360e1d9a90aSSharath Chandra Vurukala 	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
361e1d9a90aSSharath Chandra Vurukala 		if (!(maph->flags & MAP_CMD_FLAG)) {
362e1d9a90aSSharath Chandra Vurukala 			packet_len += sizeof(*next_hdr);
363e1d9a90aSSharath Chandra Vurukala 			if (maph->flags & MAP_NEXT_HEADER_FLAG)
364e1d9a90aSSharath Chandra Vurukala 				next_hdr = data + sizeof(*maph);
365e1d9a90aSSharath Chandra Vurukala 			else
366e1d9a90aSSharath Chandra Vurukala 				/* Mapv5 data pkt without csum hdr is invalid */
367e1d9a90aSSharath Chandra Vurukala 				return NULL;
368e1d9a90aSSharath Chandra Vurukala 		}
369e1d9a90aSSharath Chandra Vurukala 	}
370bbd21b24SSubash Abhinov Kasiviswanathan 
371ceed73a2SSubash Abhinov Kasiviswanathan 	if (((int)skb->len - (int)packet_len) < 0)
372ceed73a2SSubash Abhinov Kasiviswanathan 		return NULL;
373ceed73a2SSubash Abhinov Kasiviswanathan 
3741f4f554aSDan Carpenter 	/* Some hardware can send us empty frames. Catch them */
3759d131d04SAlex Elder 	if (!maph->pkt_len)
3761f4f554aSDan Carpenter 		return NULL;
3771f4f554aSDan Carpenter 
378e1d9a90aSSharath Chandra Vurukala 	if (next_hdr) {
379e1d9a90aSSharath Chandra Vurukala 		nexthdr_type = u8_get_bits(next_hdr->header_info,
380e1d9a90aSSharath Chandra Vurukala 					   MAPV5_HDRINFO_HDR_TYPE_FMASK);
381e1d9a90aSSharath Chandra Vurukala 		if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
382e1d9a90aSSharath Chandra Vurukala 			return NULL;
383e1d9a90aSSharath Chandra Vurukala 	}
384e1d9a90aSSharath Chandra Vurukala 
385ceed73a2SSubash Abhinov Kasiviswanathan 	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
386ceed73a2SSubash Abhinov Kasiviswanathan 	if (!skbn)
387ceed73a2SSubash Abhinov Kasiviswanathan 		return NULL;
388ceed73a2SSubash Abhinov Kasiviswanathan 
389ceed73a2SSubash Abhinov Kasiviswanathan 	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
390ceed73a2SSubash Abhinov Kasiviswanathan 	skb_put(skbn, packet_len);
391ceed73a2SSubash Abhinov Kasiviswanathan 	memcpy(skbn->data, skb->data, packet_len);
392ceed73a2SSubash Abhinov Kasiviswanathan 	skb_pull(skb, packet_len);
393ceed73a2SSubash Abhinov Kasiviswanathan 
394ceed73a2SSubash Abhinov Kasiviswanathan 	return skbn;
395ceed73a2SSubash Abhinov Kasiviswanathan }
396bbd21b24SSubash Abhinov Kasiviswanathan 
397bbd21b24SSubash Abhinov Kasiviswanathan /* Validates packet checksums. Function takes a pointer to
398bbd21b24SSubash Abhinov Kasiviswanathan  * the beginning of a buffer which contains the IP payload +
399bbd21b24SSubash Abhinov Kasiviswanathan  * padding + checksum trailer.
400bbd21b24SSubash Abhinov Kasiviswanathan  * Only IPv4 and IPv6 are supported along with TCP & UDP.
401bbd21b24SSubash Abhinov Kasiviswanathan  * Fragmented or tunneled packets are not supported.
402bbd21b24SSubash Abhinov Kasiviswanathan  */
rmnet_map_checksum_downlink_packet(struct sk_buff * skb,u16 len)403bbd21b24SSubash Abhinov Kasiviswanathan int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
404bbd21b24SSubash Abhinov Kasiviswanathan {
405bbde32d3SSubash Abhinov Kasiviswanathan 	struct rmnet_priv *priv = netdev_priv(skb->dev);
406bbd21b24SSubash Abhinov Kasiviswanathan 	struct rmnet_map_dl_csum_trailer *csum_trailer;
407bbd21b24SSubash Abhinov Kasiviswanathan 
408bbde32d3SSubash Abhinov Kasiviswanathan 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
409bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_sw++;
410bbd21b24SSubash Abhinov Kasiviswanathan 		return -EOPNOTSUPP;
411bbde32d3SSubash Abhinov Kasiviswanathan 	}
412bbd21b24SSubash Abhinov Kasiviswanathan 
413bbd21b24SSubash Abhinov Kasiviswanathan 	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
414bbd21b24SSubash Abhinov Kasiviswanathan 
415cc1b21baSAlex Elder 	if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
416bbde32d3SSubash Abhinov Kasiviswanathan 		priv->stats.csum_valid_unset++;
417bbd21b24SSubash Abhinov Kasiviswanathan 		return -EINVAL;
418bbde32d3SSubash Abhinov Kasiviswanathan 	}
419bbd21b24SSubash Abhinov Kasiviswanathan 
42075db5b07SAlex Elder 	if (skb->protocol == htons(ETH_P_IP))
421bbde32d3SSubash Abhinov Kasiviswanathan 		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
422bbd21b24SSubash Abhinov Kasiviswanathan 
42375db5b07SAlex Elder 	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
42475db5b07SAlex Elder 		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
42575db5b07SAlex Elder 
42675db5b07SAlex Elder 	priv->stats.csum_err_invalid_ip_version++;
42775db5b07SAlex Elder 
42875db5b07SAlex Elder 	return -EPROTONOSUPPORT;
429bbd21b24SSubash Abhinov Kasiviswanathan }
4305eb5f860SSubash Abhinov Kasiviswanathan 
rmnet_map_v4_checksum_uplink_packet(struct sk_buff * skb,struct net_device * orig_dev)431b6e5d27eSSharath Chandra Vurukala static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
4325eb5f860SSubash Abhinov Kasiviswanathan 						struct net_device *orig_dev)
4335eb5f860SSubash Abhinov Kasiviswanathan {
434bbde32d3SSubash Abhinov Kasiviswanathan 	struct rmnet_priv *priv = netdev_priv(orig_dev);
4355eb5f860SSubash Abhinov Kasiviswanathan 	struct rmnet_map_ul_csum_header *ul_header;
4365eb5f860SSubash Abhinov Kasiviswanathan 	void *iphdr;
4375eb5f860SSubash Abhinov Kasiviswanathan 
4385eb5f860SSubash Abhinov Kasiviswanathan 	ul_header = (struct rmnet_map_ul_csum_header *)
4395eb5f860SSubash Abhinov Kasiviswanathan 		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
4405eb5f860SSubash Abhinov Kasiviswanathan 
4415eb5f860SSubash Abhinov Kasiviswanathan 	if (unlikely(!(orig_dev->features &
4425eb5f860SSubash Abhinov Kasiviswanathan 		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
4435eb5f860SSubash Abhinov Kasiviswanathan 		goto sw_csum;
4445eb5f860SSubash Abhinov Kasiviswanathan 
44575db5b07SAlex Elder 	if (skb->ip_summed != CHECKSUM_PARTIAL)
44675db5b07SAlex Elder 		goto sw_csum;
44775db5b07SAlex Elder 
4485eb5f860SSubash Abhinov Kasiviswanathan 	iphdr = (char *)ul_header +
4495eb5f860SSubash Abhinov Kasiviswanathan 		sizeof(struct rmnet_map_ul_csum_header);
4505eb5f860SSubash Abhinov Kasiviswanathan 
4515eb5f860SSubash Abhinov Kasiviswanathan 	if (skb->protocol == htons(ETH_P_IP)) {
4525eb5f860SSubash Abhinov Kasiviswanathan 		rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
453b6e5d27eSSharath Chandra Vurukala 		priv->stats.csum_hw++;
4545eb5f860SSubash Abhinov Kasiviswanathan 		return;
45575db5b07SAlex Elder 	}
45675db5b07SAlex Elder 
45775db5b07SAlex Elder 	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
4585eb5f860SSubash Abhinov Kasiviswanathan 		rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
459b6e5d27eSSharath Chandra Vurukala 		priv->stats.csum_hw++;
4605eb5f860SSubash Abhinov Kasiviswanathan 		return;
4615eb5f860SSubash Abhinov Kasiviswanathan 	}
46275db5b07SAlex Elder 
46375db5b07SAlex Elder 	priv->stats.csum_err_invalid_ip_version++;
4645eb5f860SSubash Abhinov Kasiviswanathan 
4655eb5f860SSubash Abhinov Kasiviswanathan sw_csum:
46686ca860eSAlex Elder 	memset(ul_header, 0, sizeof(*ul_header));
467bbde32d3SSubash Abhinov Kasiviswanathan 
468bbde32d3SSubash Abhinov Kasiviswanathan 	priv->stats.csum_sw++;
4695eb5f860SSubash Abhinov Kasiviswanathan }
470e1d9a90aSSharath Chandra Vurukala 
471b6e5d27eSSharath Chandra Vurukala /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
472b6e5d27eSSharath Chandra Vurukala  * packets that are supported for UL checksum offload.
473b6e5d27eSSharath Chandra Vurukala  */
rmnet_map_checksum_uplink_packet(struct sk_buff * skb,struct rmnet_port * port,struct net_device * orig_dev,int csum_type)474b6e5d27eSSharath Chandra Vurukala void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
475b6e5d27eSSharath Chandra Vurukala 				      struct rmnet_port *port,
476b6e5d27eSSharath Chandra Vurukala 				      struct net_device *orig_dev,
477b6e5d27eSSharath Chandra Vurukala 				      int csum_type)
478b6e5d27eSSharath Chandra Vurukala {
479b6e5d27eSSharath Chandra Vurukala 	switch (csum_type) {
480b6e5d27eSSharath Chandra Vurukala 	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
481b6e5d27eSSharath Chandra Vurukala 		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
482b6e5d27eSSharath Chandra Vurukala 		break;
483b6e5d27eSSharath Chandra Vurukala 	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
484b6e5d27eSSharath Chandra Vurukala 		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
485b6e5d27eSSharath Chandra Vurukala 		break;
486b6e5d27eSSharath Chandra Vurukala 	default:
487b6e5d27eSSharath Chandra Vurukala 		break;
488b6e5d27eSSharath Chandra Vurukala 	}
489b6e5d27eSSharath Chandra Vurukala }
490b6e5d27eSSharath Chandra Vurukala 
491e1d9a90aSSharath Chandra Vurukala /* Process a MAPv5 packet header */
rmnet_map_process_next_hdr_packet(struct sk_buff * skb,u16 len)492e1d9a90aSSharath Chandra Vurukala int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
493e1d9a90aSSharath Chandra Vurukala 				      u16 len)
494e1d9a90aSSharath Chandra Vurukala {
495e1d9a90aSSharath Chandra Vurukala 	struct rmnet_priv *priv = netdev_priv(skb->dev);
496e1d9a90aSSharath Chandra Vurukala 	struct rmnet_map_v5_csum_header *next_hdr;
497e1d9a90aSSharath Chandra Vurukala 	u8 nexthdr_type;
498e1d9a90aSSharath Chandra Vurukala 
499e1d9a90aSSharath Chandra Vurukala 	next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
500e1d9a90aSSharath Chandra Vurukala 			sizeof(struct rmnet_map_header));
501e1d9a90aSSharath Chandra Vurukala 
502e1d9a90aSSharath Chandra Vurukala 	nexthdr_type = u8_get_bits(next_hdr->header_info,
503e1d9a90aSSharath Chandra Vurukala 				   MAPV5_HDRINFO_HDR_TYPE_FMASK);
504e1d9a90aSSharath Chandra Vurukala 
505e1d9a90aSSharath Chandra Vurukala 	if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
506e1d9a90aSSharath Chandra Vurukala 		return -EINVAL;
507e1d9a90aSSharath Chandra Vurukala 
508e1d9a90aSSharath Chandra Vurukala 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
509e1d9a90aSSharath Chandra Vurukala 		priv->stats.csum_sw++;
510e1d9a90aSSharath Chandra Vurukala 	} else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
511e1d9a90aSSharath Chandra Vurukala 		priv->stats.csum_ok++;
512e1d9a90aSSharath Chandra Vurukala 		skb->ip_summed = CHECKSUM_UNNECESSARY;
513e1d9a90aSSharath Chandra Vurukala 	} else {
514e1d9a90aSSharath Chandra Vurukala 		priv->stats.csum_valid_unset++;
515e1d9a90aSSharath Chandra Vurukala 	}
516e1d9a90aSSharath Chandra Vurukala 
517e1d9a90aSSharath Chandra Vurukala 	/* Pull csum v5 header */
518e1d9a90aSSharath Chandra Vurukala 	skb_pull(skb, sizeof(*next_hdr));
519e1d9a90aSSharath Chandra Vurukala 
520e1d9a90aSSharath Chandra Vurukala 	return 0;
521e1d9a90aSSharath Chandra Vurukala }
522*64b5d1f8SDaniele Palmas 
523*64b5d1f8SDaniele Palmas #define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
524*64b5d1f8SDaniele Palmas 
reset_aggr_params(struct rmnet_port * port)525*64b5d1f8SDaniele Palmas static void reset_aggr_params(struct rmnet_port *port)
526*64b5d1f8SDaniele Palmas {
527*64b5d1f8SDaniele Palmas 	port->skbagg_head = NULL;
528*64b5d1f8SDaniele Palmas 	port->agg_count = 0;
529*64b5d1f8SDaniele Palmas 	port->agg_state = 0;
530*64b5d1f8SDaniele Palmas 	memset(&port->agg_time, 0, sizeof(struct timespec64));
531*64b5d1f8SDaniele Palmas }
532*64b5d1f8SDaniele Palmas 
rmnet_send_skb(struct rmnet_port * port,struct sk_buff * skb)533*64b5d1f8SDaniele Palmas static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
534*64b5d1f8SDaniele Palmas {
535*64b5d1f8SDaniele Palmas 	if (skb_needs_linearize(skb, port->dev->features)) {
536*64b5d1f8SDaniele Palmas 		if (unlikely(__skb_linearize(skb))) {
537*64b5d1f8SDaniele Palmas 			struct rmnet_priv *priv;
538*64b5d1f8SDaniele Palmas 
539*64b5d1f8SDaniele Palmas 			priv = netdev_priv(port->rmnet_dev);
540*64b5d1f8SDaniele Palmas 			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
541*64b5d1f8SDaniele Palmas 			dev_kfree_skb_any(skb);
542*64b5d1f8SDaniele Palmas 			return;
543*64b5d1f8SDaniele Palmas 		}
544*64b5d1f8SDaniele Palmas 	}
545*64b5d1f8SDaniele Palmas 
546*64b5d1f8SDaniele Palmas 	dev_queue_xmit(skb);
547*64b5d1f8SDaniele Palmas }
548*64b5d1f8SDaniele Palmas 
rmnet_map_flush_tx_packet_work(struct work_struct * work)549*64b5d1f8SDaniele Palmas static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
550*64b5d1f8SDaniele Palmas {
551*64b5d1f8SDaniele Palmas 	struct sk_buff *skb = NULL;
552*64b5d1f8SDaniele Palmas 	struct rmnet_port *port;
553*64b5d1f8SDaniele Palmas 
554*64b5d1f8SDaniele Palmas 	port = container_of(work, struct rmnet_port, agg_wq);
555*64b5d1f8SDaniele Palmas 
556*64b5d1f8SDaniele Palmas 	spin_lock_bh(&port->agg_lock);
557*64b5d1f8SDaniele Palmas 	if (likely(port->agg_state == -EINPROGRESS)) {
558*64b5d1f8SDaniele Palmas 		/* Buffer may have already been shipped out */
559*64b5d1f8SDaniele Palmas 		if (likely(port->skbagg_head)) {
560*64b5d1f8SDaniele Palmas 			skb = port->skbagg_head;
561*64b5d1f8SDaniele Palmas 			reset_aggr_params(port);
562*64b5d1f8SDaniele Palmas 		}
563*64b5d1f8SDaniele Palmas 		port->agg_state = 0;
564*64b5d1f8SDaniele Palmas 	}
565*64b5d1f8SDaniele Palmas 
566*64b5d1f8SDaniele Palmas 	spin_unlock_bh(&port->agg_lock);
567*64b5d1f8SDaniele Palmas 	if (skb)
568*64b5d1f8SDaniele Palmas 		rmnet_send_skb(port, skb);
569*64b5d1f8SDaniele Palmas }
570*64b5d1f8SDaniele Palmas 
rmnet_map_flush_tx_packet_queue(struct hrtimer * t)571*64b5d1f8SDaniele Palmas static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
572*64b5d1f8SDaniele Palmas {
573*64b5d1f8SDaniele Palmas 	struct rmnet_port *port;
574*64b5d1f8SDaniele Palmas 
575*64b5d1f8SDaniele Palmas 	port = container_of(t, struct rmnet_port, hrtimer);
576*64b5d1f8SDaniele Palmas 
577*64b5d1f8SDaniele Palmas 	schedule_work(&port->agg_wq);
578*64b5d1f8SDaniele Palmas 
579*64b5d1f8SDaniele Palmas 	return HRTIMER_NORESTART;
580*64b5d1f8SDaniele Palmas }
581*64b5d1f8SDaniele Palmas 
rmnet_map_tx_aggregate(struct sk_buff * skb,struct rmnet_port * port,struct net_device * orig_dev)582*64b5d1f8SDaniele Palmas unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
583*64b5d1f8SDaniele Palmas 				    struct net_device *orig_dev)
584*64b5d1f8SDaniele Palmas {
585*64b5d1f8SDaniele Palmas 	struct timespec64 diff, last;
586*64b5d1f8SDaniele Palmas 	unsigned int len = skb->len;
587*64b5d1f8SDaniele Palmas 	struct sk_buff *agg_skb;
588*64b5d1f8SDaniele Palmas 	int size;
589*64b5d1f8SDaniele Palmas 
590*64b5d1f8SDaniele Palmas 	spin_lock_bh(&port->agg_lock);
591*64b5d1f8SDaniele Palmas 	memcpy(&last, &port->agg_last, sizeof(struct timespec64));
592*64b5d1f8SDaniele Palmas 	ktime_get_real_ts64(&port->agg_last);
593*64b5d1f8SDaniele Palmas 
594*64b5d1f8SDaniele Palmas 	if (!port->skbagg_head) {
595*64b5d1f8SDaniele Palmas 		/* Check to see if we should agg first. If the traffic is very
596*64b5d1f8SDaniele Palmas 		 * sparse, don't aggregate.
597*64b5d1f8SDaniele Palmas 		 */
598*64b5d1f8SDaniele Palmas new_packet:
599*64b5d1f8SDaniele Palmas 		diff = timespec64_sub(port->agg_last, last);
600*64b5d1f8SDaniele Palmas 		size = port->egress_agg_params.bytes - skb->len;
601*64b5d1f8SDaniele Palmas 
602*64b5d1f8SDaniele Palmas 		if (size < 0) {
603*64b5d1f8SDaniele Palmas 			/* dropped */
604*64b5d1f8SDaniele Palmas 			spin_unlock_bh(&port->agg_lock);
605*64b5d1f8SDaniele Palmas 			return 0;
606*64b5d1f8SDaniele Palmas 		}
607*64b5d1f8SDaniele Palmas 
608*64b5d1f8SDaniele Palmas 		if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
609*64b5d1f8SDaniele Palmas 		    size == 0)
610*64b5d1f8SDaniele Palmas 			goto no_aggr;
611*64b5d1f8SDaniele Palmas 
612*64b5d1f8SDaniele Palmas 		port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
613*64b5d1f8SDaniele Palmas 		if (!port->skbagg_head)
614*64b5d1f8SDaniele Palmas 			goto no_aggr;
615*64b5d1f8SDaniele Palmas 
616*64b5d1f8SDaniele Palmas 		dev_kfree_skb_any(skb);
617*64b5d1f8SDaniele Palmas 		port->skbagg_head->protocol = htons(ETH_P_MAP);
618*64b5d1f8SDaniele Palmas 		port->agg_count = 1;
619*64b5d1f8SDaniele Palmas 		ktime_get_real_ts64(&port->agg_time);
620*64b5d1f8SDaniele Palmas 		skb_frag_list_init(port->skbagg_head);
621*64b5d1f8SDaniele Palmas 		goto schedule;
622*64b5d1f8SDaniele Palmas 	}
623*64b5d1f8SDaniele Palmas 	diff = timespec64_sub(port->agg_last, port->agg_time);
624*64b5d1f8SDaniele Palmas 	size = port->egress_agg_params.bytes - port->skbagg_head->len;
625*64b5d1f8SDaniele Palmas 
626*64b5d1f8SDaniele Palmas 	if (skb->len > size) {
627*64b5d1f8SDaniele Palmas 		agg_skb = port->skbagg_head;
628*64b5d1f8SDaniele Palmas 		reset_aggr_params(port);
629*64b5d1f8SDaniele Palmas 		spin_unlock_bh(&port->agg_lock);
630*64b5d1f8SDaniele Palmas 		hrtimer_cancel(&port->hrtimer);
631*64b5d1f8SDaniele Palmas 		rmnet_send_skb(port, agg_skb);
632*64b5d1f8SDaniele Palmas 		spin_lock_bh(&port->agg_lock);
633*64b5d1f8SDaniele Palmas 		goto new_packet;
634*64b5d1f8SDaniele Palmas 	}
635*64b5d1f8SDaniele Palmas 
636*64b5d1f8SDaniele Palmas 	if (skb_has_frag_list(port->skbagg_head))
637*64b5d1f8SDaniele Palmas 		port->skbagg_tail->next = skb;
638*64b5d1f8SDaniele Palmas 	else
639*64b5d1f8SDaniele Palmas 		skb_shinfo(port->skbagg_head)->frag_list = skb;
640*64b5d1f8SDaniele Palmas 
641*64b5d1f8SDaniele Palmas 	port->skbagg_head->len += skb->len;
642*64b5d1f8SDaniele Palmas 	port->skbagg_head->data_len += skb->len;
643*64b5d1f8SDaniele Palmas 	port->skbagg_head->truesize += skb->truesize;
644*64b5d1f8SDaniele Palmas 	port->skbagg_tail = skb;
645*64b5d1f8SDaniele Palmas 	port->agg_count++;
646*64b5d1f8SDaniele Palmas 
647*64b5d1f8SDaniele Palmas 	if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
648*64b5d1f8SDaniele Palmas 	    port->agg_count >= port->egress_agg_params.count ||
649*64b5d1f8SDaniele Palmas 	    port->skbagg_head->len == port->egress_agg_params.bytes) {
650*64b5d1f8SDaniele Palmas 		agg_skb = port->skbagg_head;
651*64b5d1f8SDaniele Palmas 		reset_aggr_params(port);
652*64b5d1f8SDaniele Palmas 		spin_unlock_bh(&port->agg_lock);
653*64b5d1f8SDaniele Palmas 		hrtimer_cancel(&port->hrtimer);
654*64b5d1f8SDaniele Palmas 		rmnet_send_skb(port, agg_skb);
655*64b5d1f8SDaniele Palmas 		return len;
656*64b5d1f8SDaniele Palmas 	}
657*64b5d1f8SDaniele Palmas 
658*64b5d1f8SDaniele Palmas schedule:
659*64b5d1f8SDaniele Palmas 	if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
660*64b5d1f8SDaniele Palmas 		port->agg_state = -EINPROGRESS;
661*64b5d1f8SDaniele Palmas 		hrtimer_start(&port->hrtimer,
662*64b5d1f8SDaniele Palmas 			      ns_to_ktime(port->egress_agg_params.time_nsec),
663*64b5d1f8SDaniele Palmas 			      HRTIMER_MODE_REL);
664*64b5d1f8SDaniele Palmas 	}
665*64b5d1f8SDaniele Palmas 	spin_unlock_bh(&port->agg_lock);
666*64b5d1f8SDaniele Palmas 
667*64b5d1f8SDaniele Palmas 	return len;
668*64b5d1f8SDaniele Palmas 
669*64b5d1f8SDaniele Palmas no_aggr:
670*64b5d1f8SDaniele Palmas 	spin_unlock_bh(&port->agg_lock);
671*64b5d1f8SDaniele Palmas 	skb->protocol = htons(ETH_P_MAP);
672*64b5d1f8SDaniele Palmas 	dev_queue_xmit(skb);
673*64b5d1f8SDaniele Palmas 
674*64b5d1f8SDaniele Palmas 	return len;
675*64b5d1f8SDaniele Palmas }
676*64b5d1f8SDaniele Palmas 
rmnet_map_update_ul_agg_config(struct rmnet_port * port,u32 size,u32 count,u32 time)677*64b5d1f8SDaniele Palmas void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
678*64b5d1f8SDaniele Palmas 				    u32 count, u32 time)
679*64b5d1f8SDaniele Palmas {
680*64b5d1f8SDaniele Palmas 	spin_lock_bh(&port->agg_lock);
681*64b5d1f8SDaniele Palmas 	port->egress_agg_params.bytes = size;
682*64b5d1f8SDaniele Palmas 	WRITE_ONCE(port->egress_agg_params.count, count);
683*64b5d1f8SDaniele Palmas 	port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
684*64b5d1f8SDaniele Palmas 	spin_unlock_bh(&port->agg_lock);
685*64b5d1f8SDaniele Palmas }
686*64b5d1f8SDaniele Palmas 
rmnet_map_tx_aggregate_init(struct rmnet_port * port)687*64b5d1f8SDaniele Palmas void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
688*64b5d1f8SDaniele Palmas {
689*64b5d1f8SDaniele Palmas 	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
690*64b5d1f8SDaniele Palmas 	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
691*64b5d1f8SDaniele Palmas 	spin_lock_init(&port->agg_lock);
692*64b5d1f8SDaniele Palmas 	rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
693*64b5d1f8SDaniele Palmas 	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
694*64b5d1f8SDaniele Palmas }
695*64b5d1f8SDaniele Palmas 
rmnet_map_tx_aggregate_exit(struct rmnet_port * port)696*64b5d1f8SDaniele Palmas void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
697*64b5d1f8SDaniele Palmas {
698*64b5d1f8SDaniele Palmas 	hrtimer_cancel(&port->hrtimer);
699*64b5d1f8SDaniele Palmas 	cancel_work_sync(&port->agg_wq);
700*64b5d1f8SDaniele Palmas 
701*64b5d1f8SDaniele Palmas 	spin_lock_bh(&port->agg_lock);
702*64b5d1f8SDaniele Palmas 	if (port->agg_state == -EINPROGRESS) {
703*64b5d1f8SDaniele Palmas 		if (port->skbagg_head) {
704*64b5d1f8SDaniele Palmas 			dev_kfree_skb_any(port->skbagg_head);
705*64b5d1f8SDaniele Palmas 			reset_aggr_params(port);
706*64b5d1f8SDaniele Palmas 		}
707*64b5d1f8SDaniele Palmas 
708*64b5d1f8SDaniele Palmas 		port->agg_state = 0;
709*64b5d1f8SDaniele Palmas 	}
710*64b5d1f8SDaniele Palmas 	spin_unlock_bh(&port->agg_lock);
711*64b5d1f8SDaniele Palmas }
712