1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved. 3 * 4 * RMNET Data MAP protocol 5 */ 6 7 #include <linux/netdevice.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <net/ip6_checksum.h> 11 #include <linux/bitfield.h> 12 #include "rmnet_config.h" 13 #include "rmnet_map.h" 14 #include "rmnet_private.h" 15 16 #define RMNET_MAP_DEAGGR_SPACING 64 17 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) 18 19 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, 20 const void *txporthdr) 21 { 22 if (protocol == IPPROTO_TCP) 23 return &((struct tcphdr *)txporthdr)->check; 24 25 if (protocol == IPPROTO_UDP) 26 return &((struct udphdr *)txporthdr)->check; 27 28 return NULL; 29 } 30 31 static int 32 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, 33 struct rmnet_map_dl_csum_trailer *csum_trailer, 34 struct rmnet_priv *priv) 35 { 36 struct iphdr *ip4h = (struct iphdr *)skb->data; 37 void *txporthdr = skb->data + ip4h->ihl * 4; 38 __sum16 *csum_field, pseudo_csum; 39 __sum16 ip_payload_csum; 40 41 /* Computing the checksum over just the IPv4 header--including its 42 * checksum field--should yield 0. If it doesn't, the IP header 43 * is bad, so return an error and let the IP layer drop it. 44 */ 45 if (ip_fast_csum(ip4h, ip4h->ihl)) { 46 priv->stats.csum_ip4_header_bad++; 47 return -EINVAL; 48 } 49 50 /* We don't support checksum offload on IPv4 fragments */ 51 if (ip_is_fragment(ip4h)) { 52 priv->stats.csum_fragmented_pkt++; 53 return -EOPNOTSUPP; 54 } 55 56 /* Checksum offload is only supported for UDP and TCP protocols */ 57 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); 58 if (!csum_field) { 59 priv->stats.csum_err_invalid_transport++; 60 return -EPROTONOSUPPORT; 61 } 62 63 /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */ 64 if (!*csum_field && ip4h->protocol == IPPROTO_UDP) { 65 priv->stats.csum_skipped++; 66 return 0; 67 } 68 69 /* The checksum value in the trailer is computed over the entire 70 * IP packet, including the IP header and payload. To derive the 71 * transport checksum from this, we first subract the contribution 72 * of the IP header from the trailer checksum. We then add the 73 * checksum computed over the pseudo header. 74 * 75 * We verified above that the IP header contributes zero to the 76 * trailer checksum. Therefore the checksum in the trailer is 77 * just the checksum computed over the IP payload. 78 79 * If the IP payload arrives intact, adding the pseudo header 80 * checksum to the IP payload checksum will yield 0xffff (negative 81 * zero). This means the trailer checksum and the pseudo checksum 82 * are additive inverses of each other. Put another way, the 83 * message passes the checksum test if the trailer checksum value 84 * is the negated pseudo header checksum. 85 * 86 * Knowing this, we don't even need to examine the transport 87 * header checksum value; it is already accounted for in the 88 * checksum value found in the trailer. 89 */ 90 ip_payload_csum = csum_trailer->csum_value; 91 92 pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, 93 ntohs(ip4h->tot_len) - ip4h->ihl * 4, 94 ip4h->protocol, 0); 95 96 /* The cast is required to ensure only the low 16 bits are examined */ 97 if (ip_payload_csum != (__sum16)~pseudo_csum) { 98 priv->stats.csum_validation_failed++; 99 return -EINVAL; 100 } 101 102 priv->stats.csum_ok++; 103 return 0; 104 } 105 106 #if IS_ENABLED(CONFIG_IPV6) 107 static int 108 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, 109 struct rmnet_map_dl_csum_trailer *csum_trailer, 110 struct rmnet_priv *priv) 111 { 112 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; 113 void *txporthdr = skb->data + sizeof(*ip6h); 114 __sum16 *csum_field, pseudo_csum; 115 __sum16 ip6_payload_csum; 116 __be16 ip_header_csum; 117 118 /* Checksum offload is only supported for UDP and TCP protocols; 119 * the packet cannot include any IPv6 extension headers 120 */ 121 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); 122 if (!csum_field) { 123 priv->stats.csum_err_invalid_transport++; 124 return -EPROTONOSUPPORT; 125 } 126 127 /* The checksum value in the trailer is computed over the entire 128 * IP packet, including the IP header and payload. To derive the 129 * transport checksum from this, we first subract the contribution 130 * of the IP header from the trailer checksum. We then add the 131 * checksum computed over the pseudo header. 132 */ 133 ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4); 134 ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum); 135 136 pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 137 ntohs(ip6h->payload_len), 138 ip6h->nexthdr, 0); 139 140 /* It's sufficient to compare the IP payload checksum with the 141 * negated pseudo checksum to determine whether the packet 142 * checksum was good. (See further explanation in comments 143 * in rmnet_map_ipv4_dl_csum_trailer()). 144 * 145 * The cast is required to ensure only the low 16 bits are 146 * examined. 147 */ 148 if (ip6_payload_csum != (__sum16)~pseudo_csum) { 149 priv->stats.csum_validation_failed++; 150 return -EINVAL; 151 } 152 153 priv->stats.csum_ok++; 154 return 0; 155 } 156 #else 157 static int 158 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, 159 struct rmnet_map_dl_csum_trailer *csum_trailer, 160 struct rmnet_priv *priv) 161 { 162 return 0; 163 } 164 #endif 165 166 static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h) 167 { 168 void *txphdr; 169 u16 *csum; 170 171 txphdr = (void *)ip4h + ip4h->ihl * 4; 172 173 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { 174 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); 175 *csum = ~(*csum); 176 } 177 } 178 179 static void 180 rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr, 181 struct rmnet_map_ul_csum_header *ul_header, 182 struct sk_buff *skb) 183 { 184 u16 val; 185 186 val = MAP_CSUM_UL_ENABLED_FLAG; 187 if (iphdr->protocol == IPPROTO_UDP) 188 val |= MAP_CSUM_UL_UDP_FLAG; 189 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; 190 191 ul_header->csum_start_offset = htons(skb_network_header_len(skb)); 192 ul_header->csum_info = htons(val); 193 194 skb->ip_summed = CHECKSUM_NONE; 195 196 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); 197 } 198 199 #if IS_ENABLED(CONFIG_IPV6) 200 static void 201 rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h) 202 { 203 void *txphdr; 204 u16 *csum; 205 206 txphdr = ip6h + 1; 207 208 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { 209 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); 210 *csum = ~(*csum); 211 } 212 } 213 214 static void 215 rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr, 216 struct rmnet_map_ul_csum_header *ul_header, 217 struct sk_buff *skb) 218 { 219 u16 val; 220 221 val = MAP_CSUM_UL_ENABLED_FLAG; 222 if (ipv6hdr->nexthdr == IPPROTO_UDP) 223 val |= MAP_CSUM_UL_UDP_FLAG; 224 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; 225 226 ul_header->csum_start_offset = htons(skb_network_header_len(skb)); 227 ul_header->csum_info = htons(val); 228 229 skb->ip_summed = CHECKSUM_NONE; 230 231 rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr); 232 } 233 #else 234 static void 235 rmnet_map_ipv6_ul_csum_header(void *ip6hdr, 236 struct rmnet_map_ul_csum_header *ul_header, 237 struct sk_buff *skb) 238 { 239 } 240 #endif 241 242 static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, 243 struct rmnet_port *port, 244 struct net_device *orig_dev) 245 { 246 struct rmnet_priv *priv = netdev_priv(orig_dev); 247 struct rmnet_map_v5_csum_header *ul_header; 248 249 ul_header = skb_push(skb, sizeof(*ul_header)); 250 memset(ul_header, 0, sizeof(*ul_header)); 251 ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD, 252 MAPV5_HDRINFO_HDR_TYPE_FMASK); 253 254 if (skb->ip_summed == CHECKSUM_PARTIAL) { 255 void *iph = ip_hdr(skb); 256 __sum16 *check; 257 void *trans; 258 u8 proto; 259 260 if (skb->protocol == htons(ETH_P_IP)) { 261 u16 ip_len = ((struct iphdr *)iph)->ihl * 4; 262 263 proto = ((struct iphdr *)iph)->protocol; 264 trans = iph + ip_len; 265 } else if (IS_ENABLED(CONFIG_IPV6) && 266 skb->protocol == htons(ETH_P_IPV6)) { 267 u16 ip_len = sizeof(struct ipv6hdr); 268 269 proto = ((struct ipv6hdr *)iph)->nexthdr; 270 trans = iph + ip_len; 271 } else { 272 priv->stats.csum_err_invalid_ip_version++; 273 goto sw_csum; 274 } 275 276 check = rmnet_map_get_csum_field(proto, trans); 277 if (check) { 278 skb->ip_summed = CHECKSUM_NONE; 279 /* Ask for checksum offloading */ 280 ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG; 281 priv->stats.csum_hw++; 282 return; 283 } 284 } 285 286 sw_csum: 287 priv->stats.csum_sw++; 288 } 289 290 /* Adds MAP header to front of skb->data 291 * Padding is calculated and set appropriately in MAP header. Mux ID is 292 * initialized to 0. 293 */ 294 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, 295 int hdrlen, 296 struct rmnet_port *port, 297 int pad) 298 { 299 struct rmnet_map_header *map_header; 300 u32 padding, map_datalen; 301 u8 *padbytes; 302 303 map_datalen = skb->len - hdrlen; 304 map_header = (struct rmnet_map_header *) 305 skb_push(skb, sizeof(struct rmnet_map_header)); 306 memset(map_header, 0, sizeof(struct rmnet_map_header)); 307 308 /* Set next_hdr bit for csum offload packets */ 309 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) 310 map_header->flags |= MAP_NEXT_HEADER_FLAG; 311 312 if (pad == RMNET_MAP_NO_PAD_BYTES) { 313 map_header->pkt_len = htons(map_datalen); 314 return map_header; 315 } 316 317 BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3); 318 padding = ALIGN(map_datalen, 4) - map_datalen; 319 320 if (padding == 0) 321 goto done; 322 323 if (skb_tailroom(skb) < padding) 324 return NULL; 325 326 padbytes = (u8 *)skb_put(skb, padding); 327 memset(padbytes, 0, padding); 328 329 done: 330 map_header->pkt_len = htons(map_datalen + padding); 331 /* This is a data packet, so the CMD bit is 0 */ 332 map_header->flags = padding & MAP_PAD_LEN_MASK; 333 334 return map_header; 335 } 336 337 /* Deaggregates a single packet 338 * A whole new buffer is allocated for each portion of an aggregated frame. 339 * Caller should keep calling deaggregate() on the source skb until 0 is 340 * returned, indicating that there are no more packets to deaggregate. Caller 341 * is responsible for freeing the original skb. 342 */ 343 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, 344 struct rmnet_port *port) 345 { 346 struct rmnet_map_v5_csum_header *next_hdr = NULL; 347 struct rmnet_map_header *maph; 348 void *data = skb->data; 349 struct sk_buff *skbn; 350 u8 nexthdr_type; 351 u32 packet_len; 352 353 if (skb->len == 0) 354 return NULL; 355 356 maph = (struct rmnet_map_header *)skb->data; 357 packet_len = ntohs(maph->pkt_len) + sizeof(*maph); 358 359 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { 360 packet_len += sizeof(struct rmnet_map_dl_csum_trailer); 361 } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) { 362 if (!(maph->flags & MAP_CMD_FLAG)) { 363 packet_len += sizeof(*next_hdr); 364 if (maph->flags & MAP_NEXT_HEADER_FLAG) 365 next_hdr = data + sizeof(*maph); 366 else 367 /* Mapv5 data pkt without csum hdr is invalid */ 368 return NULL; 369 } 370 } 371 372 if (((int)skb->len - (int)packet_len) < 0) 373 return NULL; 374 375 /* Some hardware can send us empty frames. Catch them */ 376 if (!maph->pkt_len) 377 return NULL; 378 379 if (next_hdr) { 380 nexthdr_type = u8_get_bits(next_hdr->header_info, 381 MAPV5_HDRINFO_HDR_TYPE_FMASK); 382 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD) 383 return NULL; 384 } 385 386 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); 387 if (!skbn) 388 return NULL; 389 390 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); 391 skb_put(skbn, packet_len); 392 memcpy(skbn->data, skb->data, packet_len); 393 skb_pull(skb, packet_len); 394 395 return skbn; 396 } 397 398 /* Validates packet checksums. Function takes a pointer to 399 * the beginning of a buffer which contains the IP payload + 400 * padding + checksum trailer. 401 * Only IPv4 and IPv6 are supported along with TCP & UDP. 402 * Fragmented or tunneled packets are not supported. 403 */ 404 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) 405 { 406 struct rmnet_priv *priv = netdev_priv(skb->dev); 407 struct rmnet_map_dl_csum_trailer *csum_trailer; 408 409 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { 410 priv->stats.csum_sw++; 411 return -EOPNOTSUPP; 412 } 413 414 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); 415 416 if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) { 417 priv->stats.csum_valid_unset++; 418 return -EINVAL; 419 } 420 421 if (skb->protocol == htons(ETH_P_IP)) 422 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); 423 424 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) 425 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); 426 427 priv->stats.csum_err_invalid_ip_version++; 428 429 return -EPROTONOSUPPORT; 430 } 431 432 static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, 433 struct net_device *orig_dev) 434 { 435 struct rmnet_priv *priv = netdev_priv(orig_dev); 436 struct rmnet_map_ul_csum_header *ul_header; 437 void *iphdr; 438 439 ul_header = (struct rmnet_map_ul_csum_header *) 440 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); 441 442 if (unlikely(!(orig_dev->features & 443 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) 444 goto sw_csum; 445 446 if (skb->ip_summed != CHECKSUM_PARTIAL) 447 goto sw_csum; 448 449 iphdr = (char *)ul_header + 450 sizeof(struct rmnet_map_ul_csum_header); 451 452 if (skb->protocol == htons(ETH_P_IP)) { 453 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); 454 priv->stats.csum_hw++; 455 return; 456 } 457 458 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { 459 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); 460 priv->stats.csum_hw++; 461 return; 462 } 463 464 priv->stats.csum_err_invalid_ip_version++; 465 466 sw_csum: 467 memset(ul_header, 0, sizeof(*ul_header)); 468 469 priv->stats.csum_sw++; 470 } 471 472 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP 473 * packets that are supported for UL checksum offload. 474 */ 475 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, 476 struct rmnet_port *port, 477 struct net_device *orig_dev, 478 int csum_type) 479 { 480 switch (csum_type) { 481 case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: 482 rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); 483 break; 484 case RMNET_FLAGS_EGRESS_MAP_CKSUMV5: 485 rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev); 486 break; 487 default: 488 break; 489 } 490 } 491 492 /* Process a MAPv5 packet header */ 493 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, 494 u16 len) 495 { 496 struct rmnet_priv *priv = netdev_priv(skb->dev); 497 struct rmnet_map_v5_csum_header *next_hdr; 498 u8 nexthdr_type; 499 500 next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data + 501 sizeof(struct rmnet_map_header)); 502 503 nexthdr_type = u8_get_bits(next_hdr->header_info, 504 MAPV5_HDRINFO_HDR_TYPE_FMASK); 505 506 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD) 507 return -EINVAL; 508 509 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { 510 priv->stats.csum_sw++; 511 } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) { 512 priv->stats.csum_ok++; 513 skb->ip_summed = CHECKSUM_UNNECESSARY; 514 } else { 515 priv->stats.csum_valid_unset++; 516 } 517 518 /* Pull csum v5 header */ 519 skb_pull(skb, sizeof(*next_hdr)); 520 521 return 0; 522 } 523