1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * RMNET Data MAP protocol 13 * 14 */ 15 16 #include <linux/netdevice.h> 17 #include <linux/ip.h> 18 #include <linux/ipv6.h> 19 #include <net/ip6_checksum.h> 20 #include "rmnet_config.h" 21 #include "rmnet_map.h" 22 #include "rmnet_private.h" 23 24 #define RMNET_MAP_DEAGGR_SPACING 64 25 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) 26 27 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, 28 const void *txporthdr) 29 { 30 __sum16 *check = NULL; 31 32 switch (protocol) { 33 case IPPROTO_TCP: 34 check = &(((struct tcphdr *)txporthdr)->check); 35 break; 36 37 case IPPROTO_UDP: 38 check = &(((struct udphdr *)txporthdr)->check); 39 break; 40 41 default: 42 check = NULL; 43 break; 44 } 45 46 return check; 47 } 48 49 static int 50 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, 51 struct rmnet_map_dl_csum_trailer *csum_trailer) 52 { 53 __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; 54 u16 csum_value, csum_value_final; 55 struct iphdr *ip4h; 56 void *txporthdr; 57 __be16 addend; 58 59 ip4h = (struct iphdr *)(skb->data); 60 if ((ntohs(ip4h->frag_off) & IP_MF) || 61 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) 62 return -EOPNOTSUPP; 63 64 txporthdr = skb->data + ip4h->ihl * 4; 65 66 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); 67 68 if (!csum_field) 69 return -EPROTONOSUPPORT; 70 71 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ 72 if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) 73 return 0; 74 75 csum_value = ~ntohs(csum_trailer->csum_value); 76 hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); 77 ip_payload_csum = csum16_sub((__force __sum16)csum_value, 78 (__force __be16)hdr_csum); 79 80 pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, 81 ntohs(ip4h->tot_len) - ip4h->ihl * 4, 82 ip4h->protocol, 0); 83 addend = (__force __be16)ntohs((__force __be16)pseudo_csum); 84 pseudo_csum = csum16_add(ip_payload_csum, addend); 85 86 addend = (__force __be16)ntohs((__force __be16)*csum_field); 87 csum_temp = ~csum16_sub(pseudo_csum, addend); 88 csum_value_final = (__force u16)csum_temp; 89 90 if (unlikely(csum_value_final == 0)) { 91 switch (ip4h->protocol) { 92 case IPPROTO_UDP: 93 /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ 94 csum_value_final = ~csum_value_final; 95 break; 96 97 case IPPROTO_TCP: 98 /* DL4 Non-RFC compliant TCP checksum found */ 99 if (*csum_field == (__force __sum16)0xFFFF) 100 csum_value_final = ~csum_value_final; 101 break; 102 } 103 } 104 105 if (csum_value_final == ntohs((__force __be16)*csum_field)) 106 return 0; 107 else 108 return -EINVAL; 109 } 110 111 #if IS_ENABLED(CONFIG_IPV6) 112 static int 113 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, 114 struct rmnet_map_dl_csum_trailer *csum_trailer) 115 { 116 __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; 117 u16 csum_value, csum_value_final; 118 __be16 ip6_hdr_csum, addend; 119 struct ipv6hdr *ip6h; 120 void *txporthdr; 121 u32 length; 122 123 ip6h = (struct ipv6hdr *)(skb->data); 124 125 txporthdr = skb->data + sizeof(struct ipv6hdr); 126 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); 127 128 if (!csum_field) 129 return -EPROTONOSUPPORT; 130 131 csum_value = ~ntohs(csum_trailer->csum_value); 132 ip6_hdr_csum = (__force __be16) 133 ~ntohs((__force __be16)ip_compute_csum(ip6h, 134 (int)(txporthdr - (void *)(skb->data)))); 135 ip6_payload_csum = csum16_sub((__force __sum16)csum_value, 136 ip6_hdr_csum); 137 138 length = (ip6h->nexthdr == IPPROTO_UDP) ? 139 ntohs(((struct udphdr *)txporthdr)->len) : 140 ntohs(ip6h->payload_len); 141 pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 142 length, ip6h->nexthdr, 0)); 143 addend = (__force __be16)ntohs((__force __be16)pseudo_csum); 144 pseudo_csum = csum16_add(ip6_payload_csum, addend); 145 146 addend = (__force __be16)ntohs((__force __be16)*csum_field); 147 csum_temp = ~csum16_sub(pseudo_csum, addend); 148 csum_value_final = (__force u16)csum_temp; 149 150 if (unlikely(csum_value_final == 0)) { 151 switch (ip6h->nexthdr) { 152 case IPPROTO_UDP: 153 /* RFC 2460 section 8.1 154 * DL6 One's complement rule for UDP checksum 0 155 */ 156 csum_value_final = ~csum_value_final; 157 break; 158 159 case IPPROTO_TCP: 160 /* DL6 Non-RFC compliant TCP checksum found */ 161 if (*csum_field == (__force __sum16)0xFFFF) 162 csum_value_final = ~csum_value_final; 163 break; 164 } 165 } 166 167 if (csum_value_final == ntohs((__force __be16)*csum_field)) 168 return 0; 169 else 170 return -EINVAL; 171 } 172 #endif 173 174 static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) 175 { 176 struct iphdr *ip4h = (struct iphdr *)iphdr; 177 void *txphdr; 178 u16 *csum; 179 180 txphdr = iphdr + ip4h->ihl * 4; 181 182 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { 183 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); 184 *csum = ~(*csum); 185 } 186 } 187 188 static void 189 rmnet_map_ipv4_ul_csum_header(void *iphdr, 190 struct rmnet_map_ul_csum_header *ul_header, 191 struct sk_buff *skb) 192 { 193 struct iphdr *ip4h = (struct iphdr *)iphdr; 194 __be16 *hdr = (__be16 *)ul_header, offset; 195 196 offset = htons((__force u16)(skb_transport_header(skb) - 197 (unsigned char *)iphdr)); 198 ul_header->csum_start_offset = offset; 199 ul_header->csum_insert_offset = skb->csum_offset; 200 ul_header->csum_enabled = 1; 201 if (ip4h->protocol == IPPROTO_UDP) 202 ul_header->udp_ip4_ind = 1; 203 else 204 ul_header->udp_ip4_ind = 0; 205 206 /* Changing remaining fields to network order */ 207 hdr++; 208 *hdr = htons((__force u16)*hdr); 209 210 skb->ip_summed = CHECKSUM_NONE; 211 212 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); 213 } 214 215 #if IS_ENABLED(CONFIG_IPV6) 216 static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) 217 { 218 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; 219 void *txphdr; 220 u16 *csum; 221 222 txphdr = ip6hdr + sizeof(struct ipv6hdr); 223 224 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { 225 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); 226 *csum = ~(*csum); 227 } 228 } 229 230 static void 231 rmnet_map_ipv6_ul_csum_header(void *ip6hdr, 232 struct rmnet_map_ul_csum_header *ul_header, 233 struct sk_buff *skb) 234 { 235 __be16 *hdr = (__be16 *)ul_header, offset; 236 237 offset = htons((__force u16)(skb_transport_header(skb) - 238 (unsigned char *)ip6hdr)); 239 ul_header->csum_start_offset = offset; 240 ul_header->csum_insert_offset = skb->csum_offset; 241 ul_header->csum_enabled = 1; 242 ul_header->udp_ip4_ind = 0; 243 244 /* Changing remaining fields to network order */ 245 hdr++; 246 *hdr = htons((__force u16)*hdr); 247 248 skb->ip_summed = CHECKSUM_NONE; 249 250 rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); 251 } 252 #endif 253 254 /* Adds MAP header to front of skb->data 255 * Padding is calculated and set appropriately in MAP header. Mux ID is 256 * initialized to 0. 257 */ 258 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, 259 int hdrlen, int pad) 260 { 261 struct rmnet_map_header *map_header; 262 u32 padding, map_datalen; 263 u8 *padbytes; 264 265 map_datalen = skb->len - hdrlen; 266 map_header = (struct rmnet_map_header *) 267 skb_push(skb, sizeof(struct rmnet_map_header)); 268 memset(map_header, 0, sizeof(struct rmnet_map_header)); 269 270 if (pad == RMNET_MAP_NO_PAD_BYTES) { 271 map_header->pkt_len = htons(map_datalen); 272 return map_header; 273 } 274 275 padding = ALIGN(map_datalen, 4) - map_datalen; 276 277 if (padding == 0) 278 goto done; 279 280 if (skb_tailroom(skb) < padding) 281 return NULL; 282 283 padbytes = (u8 *)skb_put(skb, padding); 284 memset(padbytes, 0, padding); 285 286 done: 287 map_header->pkt_len = htons(map_datalen + padding); 288 map_header->pad_len = padding & 0x3F; 289 290 return map_header; 291 } 292 293 /* Deaggregates a single packet 294 * A whole new buffer is allocated for each portion of an aggregated frame. 295 * Caller should keep calling deaggregate() on the source skb until 0 is 296 * returned, indicating that there are no more packets to deaggregate. Caller 297 * is responsible for freeing the original skb. 298 */ 299 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, 300 struct rmnet_port *port) 301 { 302 struct rmnet_map_header *maph; 303 struct sk_buff *skbn; 304 u32 packet_len; 305 306 if (skb->len == 0) 307 return NULL; 308 309 maph = (struct rmnet_map_header *)skb->data; 310 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); 311 312 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) 313 packet_len += sizeof(struct rmnet_map_dl_csum_trailer); 314 315 if (((int)skb->len - (int)packet_len) < 0) 316 return NULL; 317 318 /* Some hardware can send us empty frames. Catch them */ 319 if (ntohs(maph->pkt_len) == 0) 320 return NULL; 321 322 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); 323 if (!skbn) 324 return NULL; 325 326 skbn->dev = skb->dev; 327 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); 328 skb_put(skbn, packet_len); 329 memcpy(skbn->data, skb->data, packet_len); 330 skb_pull(skb, packet_len); 331 332 return skbn; 333 } 334 335 /* Validates packet checksums. Function takes a pointer to 336 * the beginning of a buffer which contains the IP payload + 337 * padding + checksum trailer. 338 * Only IPv4 and IPv6 are supported along with TCP & UDP. 339 * Fragmented or tunneled packets are not supported. 340 */ 341 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) 342 { 343 struct rmnet_map_dl_csum_trailer *csum_trailer; 344 345 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) 346 return -EOPNOTSUPP; 347 348 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); 349 350 if (!csum_trailer->valid) 351 return -EINVAL; 352 353 if (skb->protocol == htons(ETH_P_IP)) 354 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); 355 else if (skb->protocol == htons(ETH_P_IPV6)) 356 #if IS_ENABLED(CONFIG_IPV6) 357 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); 358 #else 359 return -EPROTONOSUPPORT; 360 #endif 361 362 return 0; 363 } 364 365 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP 366 * packets that are supported for UL checksum offload. 367 */ 368 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, 369 struct net_device *orig_dev) 370 { 371 struct rmnet_map_ul_csum_header *ul_header; 372 void *iphdr; 373 374 ul_header = (struct rmnet_map_ul_csum_header *) 375 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); 376 377 if (unlikely(!(orig_dev->features & 378 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) 379 goto sw_csum; 380 381 if (skb->ip_summed == CHECKSUM_PARTIAL) { 382 iphdr = (char *)ul_header + 383 sizeof(struct rmnet_map_ul_csum_header); 384 385 if (skb->protocol == htons(ETH_P_IP)) { 386 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); 387 return; 388 } else if (skb->protocol == htons(ETH_P_IPV6)) { 389 #if IS_ENABLED(CONFIG_IPV6) 390 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); 391 return; 392 #else 393 goto sw_csum; 394 #endif 395 } 396 } 397 398 sw_csum: 399 ul_header->csum_start_offset = 0; 400 ul_header->csum_insert_offset = 0; 401 ul_header->csum_enabled = 0; 402 ul_header->udp_ip4_ind = 0; 403 } 404