1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET Data MAP protocol
13  *
14  */
15 
16 #include <linux/netdevice.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include "rmnet_config.h"
21 #include "rmnet_map.h"
22 #include "rmnet_private.h"
23 
24 #define RMNET_MAP_DEAGGR_SPACING  64
25 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
26 
27 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
28 					 const void *txporthdr)
29 {
30 	__sum16 *check = NULL;
31 
32 	switch (protocol) {
33 	case IPPROTO_TCP:
34 		check = &(((struct tcphdr *)txporthdr)->check);
35 		break;
36 
37 	case IPPROTO_UDP:
38 		check = &(((struct udphdr *)txporthdr)->check);
39 		break;
40 
41 	default:
42 		check = NULL;
43 		break;
44 	}
45 
46 	return check;
47 }
48 
49 static int
50 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
51 			       struct rmnet_map_dl_csum_trailer *csum_trailer,
52 			       struct rmnet_priv *priv)
53 {
54 	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
55 	u16 csum_value, csum_value_final;
56 	struct iphdr *ip4h;
57 	void *txporthdr;
58 	__be16 addend;
59 
60 	ip4h = (struct iphdr *)(skb->data);
61 	if ((ntohs(ip4h->frag_off) & IP_MF) ||
62 	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
63 		priv->stats.csum_fragmented_pkt++;
64 		return -EOPNOTSUPP;
65 	}
66 
67 	txporthdr = skb->data + ip4h->ihl * 4;
68 
69 	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
70 
71 	if (!csum_field) {
72 		priv->stats.csum_err_invalid_transport++;
73 		return -EPROTONOSUPPORT;
74 	}
75 
76 	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
77 	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
78 		priv->stats.csum_skipped++;
79 		return 0;
80 	}
81 
82 	csum_value = ~ntohs(csum_trailer->csum_value);
83 	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
84 	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
85 				     (__force __be16)hdr_csum);
86 
87 	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
88 					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
89 					 ip4h->protocol, 0);
90 	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
91 	pseudo_csum = csum16_add(ip_payload_csum, addend);
92 
93 	addend = (__force __be16)ntohs((__force __be16)*csum_field);
94 	csum_temp = ~csum16_sub(pseudo_csum, addend);
95 	csum_value_final = (__force u16)csum_temp;
96 
97 	if (unlikely(csum_value_final == 0)) {
98 		switch (ip4h->protocol) {
99 		case IPPROTO_UDP:
100 			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
101 			csum_value_final = ~csum_value_final;
102 			break;
103 
104 		case IPPROTO_TCP:
105 			/* DL4 Non-RFC compliant TCP checksum found */
106 			if (*csum_field == (__force __sum16)0xFFFF)
107 				csum_value_final = ~csum_value_final;
108 			break;
109 		}
110 	}
111 
112 	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
113 		priv->stats.csum_ok++;
114 		return 0;
115 	} else {
116 		priv->stats.csum_validation_failed++;
117 		return -EINVAL;
118 	}
119 }
120 
121 #if IS_ENABLED(CONFIG_IPV6)
122 static int
123 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
124 			       struct rmnet_map_dl_csum_trailer *csum_trailer,
125 			       struct rmnet_priv *priv)
126 {
127 	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
128 	u16 csum_value, csum_value_final;
129 	__be16 ip6_hdr_csum, addend;
130 	struct ipv6hdr *ip6h;
131 	void *txporthdr;
132 	u32 length;
133 
134 	ip6h = (struct ipv6hdr *)(skb->data);
135 
136 	txporthdr = skb->data + sizeof(struct ipv6hdr);
137 	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
138 
139 	if (!csum_field) {
140 		priv->stats.csum_err_invalid_transport++;
141 		return -EPROTONOSUPPORT;
142 	}
143 
144 	csum_value = ~ntohs(csum_trailer->csum_value);
145 	ip6_hdr_csum = (__force __be16)
146 			~ntohs((__force __be16)ip_compute_csum(ip6h,
147 			       (int)(txporthdr - (void *)(skb->data))));
148 	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
149 				      ip6_hdr_csum);
150 
151 	length = (ip6h->nexthdr == IPPROTO_UDP) ?
152 		 ntohs(((struct udphdr *)txporthdr)->len) :
153 		 ntohs(ip6h->payload_len);
154 	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
155 			     length, ip6h->nexthdr, 0));
156 	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
157 	pseudo_csum = csum16_add(ip6_payload_csum, addend);
158 
159 	addend = (__force __be16)ntohs((__force __be16)*csum_field);
160 	csum_temp = ~csum16_sub(pseudo_csum, addend);
161 	csum_value_final = (__force u16)csum_temp;
162 
163 	if (unlikely(csum_value_final == 0)) {
164 		switch (ip6h->nexthdr) {
165 		case IPPROTO_UDP:
166 			/* RFC 2460 section 8.1
167 			 * DL6 One's complement rule for UDP checksum 0
168 			 */
169 			csum_value_final = ~csum_value_final;
170 			break;
171 
172 		case IPPROTO_TCP:
173 			/* DL6 Non-RFC compliant TCP checksum found */
174 			if (*csum_field == (__force __sum16)0xFFFF)
175 				csum_value_final = ~csum_value_final;
176 			break;
177 		}
178 	}
179 
180 	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
181 		priv->stats.csum_ok++;
182 		return 0;
183 	} else {
184 		priv->stats.csum_validation_failed++;
185 		return -EINVAL;
186 	}
187 }
188 #endif
189 
190 static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
191 {
192 	struct iphdr *ip4h = (struct iphdr *)iphdr;
193 	void *txphdr;
194 	u16 *csum;
195 
196 	txphdr = iphdr + ip4h->ihl * 4;
197 
198 	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
199 		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
200 		*csum = ~(*csum);
201 	}
202 }
203 
204 static void
205 rmnet_map_ipv4_ul_csum_header(void *iphdr,
206 			      struct rmnet_map_ul_csum_header *ul_header,
207 			      struct sk_buff *skb)
208 {
209 	struct iphdr *ip4h = (struct iphdr *)iphdr;
210 	__be16 *hdr = (__be16 *)ul_header, offset;
211 
212 	offset = htons((__force u16)(skb_transport_header(skb) -
213 				     (unsigned char *)iphdr));
214 	ul_header->csum_start_offset = offset;
215 	ul_header->csum_insert_offset = skb->csum_offset;
216 	ul_header->csum_enabled = 1;
217 	if (ip4h->protocol == IPPROTO_UDP)
218 		ul_header->udp_ip4_ind = 1;
219 	else
220 		ul_header->udp_ip4_ind = 0;
221 
222 	/* Changing remaining fields to network order */
223 	hdr++;
224 	*hdr = htons((__force u16)*hdr);
225 
226 	skb->ip_summed = CHECKSUM_NONE;
227 
228 	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
229 }
230 
231 #if IS_ENABLED(CONFIG_IPV6)
232 static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
233 {
234 	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
235 	void *txphdr;
236 	u16 *csum;
237 
238 	txphdr = ip6hdr + sizeof(struct ipv6hdr);
239 
240 	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
241 		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
242 		*csum = ~(*csum);
243 	}
244 }
245 
246 static void
247 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
248 			      struct rmnet_map_ul_csum_header *ul_header,
249 			      struct sk_buff *skb)
250 {
251 	__be16 *hdr = (__be16 *)ul_header, offset;
252 
253 	offset = htons((__force u16)(skb_transport_header(skb) -
254 				     (unsigned char *)ip6hdr));
255 	ul_header->csum_start_offset = offset;
256 	ul_header->csum_insert_offset = skb->csum_offset;
257 	ul_header->csum_enabled = 1;
258 	ul_header->udp_ip4_ind = 0;
259 
260 	/* Changing remaining fields to network order */
261 	hdr++;
262 	*hdr = htons((__force u16)*hdr);
263 
264 	skb->ip_summed = CHECKSUM_NONE;
265 
266 	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
267 }
268 #endif
269 
270 /* Adds MAP header to front of skb->data
271  * Padding is calculated and set appropriately in MAP header. Mux ID is
272  * initialized to 0.
273  */
274 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
275 						  int hdrlen, int pad)
276 {
277 	struct rmnet_map_header *map_header;
278 	u32 padding, map_datalen;
279 	u8 *padbytes;
280 
281 	map_datalen = skb->len - hdrlen;
282 	map_header = (struct rmnet_map_header *)
283 			skb_push(skb, sizeof(struct rmnet_map_header));
284 	memset(map_header, 0, sizeof(struct rmnet_map_header));
285 
286 	if (pad == RMNET_MAP_NO_PAD_BYTES) {
287 		map_header->pkt_len = htons(map_datalen);
288 		return map_header;
289 	}
290 
291 	padding = ALIGN(map_datalen, 4) - map_datalen;
292 
293 	if (padding == 0)
294 		goto done;
295 
296 	if (skb_tailroom(skb) < padding)
297 		return NULL;
298 
299 	padbytes = (u8 *)skb_put(skb, padding);
300 	memset(padbytes, 0, padding);
301 
302 done:
303 	map_header->pkt_len = htons(map_datalen + padding);
304 	map_header->pad_len = padding & 0x3F;
305 
306 	return map_header;
307 }
308 
309 /* Deaggregates a single packet
310  * A whole new buffer is allocated for each portion of an aggregated frame.
311  * Caller should keep calling deaggregate() on the source skb until 0 is
312  * returned, indicating that there are no more packets to deaggregate. Caller
313  * is responsible for freeing the original skb.
314  */
315 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
316 				      struct rmnet_port *port)
317 {
318 	struct rmnet_map_header *maph;
319 	struct sk_buff *skbn;
320 	u32 packet_len;
321 
322 	if (skb->len == 0)
323 		return NULL;
324 
325 	maph = (struct rmnet_map_header *)skb->data;
326 	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
327 
328 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
329 		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
330 
331 	if (((int)skb->len - (int)packet_len) < 0)
332 		return NULL;
333 
334 	/* Some hardware can send us empty frames. Catch them */
335 	if (ntohs(maph->pkt_len) == 0)
336 		return NULL;
337 
338 	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
339 	if (!skbn)
340 		return NULL;
341 
342 	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
343 	skb_put(skbn, packet_len);
344 	memcpy(skbn->data, skb->data, packet_len);
345 	skb_pull(skb, packet_len);
346 
347 	return skbn;
348 }
349 
350 /* Validates packet checksums. Function takes a pointer to
351  * the beginning of a buffer which contains the IP payload +
352  * padding + checksum trailer.
353  * Only IPv4 and IPv6 are supported along with TCP & UDP.
354  * Fragmented or tunneled packets are not supported.
355  */
356 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
357 {
358 	struct rmnet_priv *priv = netdev_priv(skb->dev);
359 	struct rmnet_map_dl_csum_trailer *csum_trailer;
360 
361 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
362 		priv->stats.csum_sw++;
363 		return -EOPNOTSUPP;
364 	}
365 
366 	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
367 
368 	if (!csum_trailer->valid) {
369 		priv->stats.csum_valid_unset++;
370 		return -EINVAL;
371 	}
372 
373 	if (skb->protocol == htons(ETH_P_IP)) {
374 		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
375 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
376 #if IS_ENABLED(CONFIG_IPV6)
377 		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
378 #else
379 		priv->stats.csum_err_invalid_ip_version++;
380 		return -EPROTONOSUPPORT;
381 #endif
382 	} else {
383 		priv->stats.csum_err_invalid_ip_version++;
384 		return -EPROTONOSUPPORT;
385 	}
386 
387 	return 0;
388 }
389 
390 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
391  * packets that are supported for UL checksum offload.
392  */
393 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
394 				      struct net_device *orig_dev)
395 {
396 	struct rmnet_priv *priv = netdev_priv(orig_dev);
397 	struct rmnet_map_ul_csum_header *ul_header;
398 	void *iphdr;
399 
400 	ul_header = (struct rmnet_map_ul_csum_header *)
401 		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
402 
403 	if (unlikely(!(orig_dev->features &
404 		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
405 		goto sw_csum;
406 
407 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
408 		iphdr = (char *)ul_header +
409 			sizeof(struct rmnet_map_ul_csum_header);
410 
411 		if (skb->protocol == htons(ETH_P_IP)) {
412 			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
413 			return;
414 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
415 #if IS_ENABLED(CONFIG_IPV6)
416 			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
417 			return;
418 #else
419 			priv->stats.csum_err_invalid_ip_version++;
420 			goto sw_csum;
421 #endif
422 		} else {
423 			priv->stats.csum_err_invalid_ip_version++;
424 		}
425 	}
426 
427 sw_csum:
428 	ul_header->csum_start_offset = 0;
429 	ul_header->csum_insert_offset = 0;
430 	ul_header->csum_enabled = 0;
431 	ul_header->udp_ip4_ind = 0;
432 
433 	priv->stats.csum_sw++;
434 }
435