1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV6 GSO/GRO offload support 4 * Linux INET6 implementation 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/socket.h> 9 #include <linux/netdevice.h> 10 #include <linux/skbuff.h> 11 #include <linux/printk.h> 12 13 #include <net/protocol.h> 14 #include <net/ipv6.h> 15 #include <net/inet_common.h> 16 #include <net/tcp.h> 17 #include <net/udp.h> 18 #include <net/gro.h> 19 20 #include "ip6_offload.h" 21 22 /* All GRO functions are always builtin, except UDP over ipv6, which lays in 23 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care 24 * when ipv6 is built as a module 25 */ 26 #if IS_BUILTIN(CONFIG_IPV6) 27 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) 28 #else 29 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) 30 #endif 31 32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ 33 ({ \ 34 unlikely(gro_recursion_inc_test(skb)) ? \ 35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ 37 }) 38 39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) 40 { 41 const struct net_offload *ops = NULL; 42 43 for (;;) { 44 struct ipv6_opt_hdr *opth; 45 int len; 46 47 if (proto != NEXTHDR_HOP) { 48 ops = rcu_dereference(inet6_offloads[proto]); 49 50 if (unlikely(!ops)) 51 break; 52 53 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 54 break; 55 } 56 57 if (unlikely(!pskb_may_pull(skb, 8))) 58 break; 59 60 opth = (void *)skb->data; 61 len = ipv6_optlen(opth); 62 63 if (unlikely(!pskb_may_pull(skb, len))) 64 break; 65 66 opth = (void *)skb->data; 67 proto = opth->nexthdr; 68 __skb_pull(skb, len); 69 } 70 71 return proto; 72 } 73 74 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, 75 netdev_features_t features) 76 { 77 struct sk_buff *segs = ERR_PTR(-EINVAL); 78 struct ipv6hdr *ipv6h; 79 const struct net_offload *ops; 80 int proto, nexthdr; 81 struct frag_hdr *fptr; 82 unsigned int payload_len; 83 u8 *prevhdr; 84 int offset = 0; 85 bool encap, udpfrag; 86 int nhoff; 87 bool gso_partial; 88 89 skb_reset_network_header(skb); 90 nexthdr = ipv6_has_hopopt_jumbo(skb); 91 if (nexthdr) { 92 const int hophdr_len = sizeof(struct hop_jumbo_hdr); 93 int err; 94 95 err = skb_cow_head(skb, 0); 96 if (err < 0) 97 return ERR_PTR(err); 98 99 /* remove the HBH header. 100 * Layout: [Ethernet header][IPv6 header][HBH][TCP header] 101 */ 102 memmove(skb_mac_header(skb) + hophdr_len, 103 skb_mac_header(skb), 104 ETH_HLEN + sizeof(struct ipv6hdr)); 105 skb->data += hophdr_len; 106 skb->len -= hophdr_len; 107 skb->network_header += hophdr_len; 108 skb->mac_header += hophdr_len; 109 ipv6h = (struct ipv6hdr *)skb->data; 110 ipv6h->nexthdr = nexthdr; 111 } 112 nhoff = skb_network_header(skb) - skb_mac_header(skb); 113 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 114 goto out; 115 116 encap = SKB_GSO_CB(skb)->encap_level > 0; 117 if (encap) 118 features &= skb->dev->hw_enc_features; 119 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); 120 121 ipv6h = ipv6_hdr(skb); 122 __skb_pull(skb, sizeof(*ipv6h)); 123 segs = ERR_PTR(-EPROTONOSUPPORT); 124 125 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 126 127 if (skb->encapsulation && 128 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) 129 udpfrag = proto == IPPROTO_UDP && encap && 130 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 131 else 132 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation && 133 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 134 135 ops = rcu_dereference(inet6_offloads[proto]); 136 if (likely(ops && ops->callbacks.gso_segment)) { 137 skb_reset_transport_header(skb); 138 segs = ops->callbacks.gso_segment(skb, features); 139 if (!segs) 140 skb->network_header = skb_mac_header(skb) + nhoff - skb->head; 141 } 142 143 if (IS_ERR_OR_NULL(segs)) 144 goto out; 145 146 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 147 148 for (skb = segs; skb; skb = skb->next) { 149 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); 150 if (gso_partial && skb_is_gso(skb)) 151 payload_len = skb_shinfo(skb)->gso_size + 152 SKB_GSO_CB(skb)->data_offset + 153 skb->head - (unsigned char *)(ipv6h + 1); 154 else 155 payload_len = skb->len - nhoff - sizeof(*ipv6h); 156 ipv6h->payload_len = htons(payload_len); 157 skb->network_header = (u8 *)ipv6h - skb->head; 158 skb_reset_mac_len(skb); 159 160 if (udpfrag) { 161 int err = ip6_find_1stfragopt(skb, &prevhdr); 162 if (err < 0) { 163 kfree_skb_list(segs); 164 return ERR_PTR(err); 165 } 166 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 167 fptr->frag_off = htons(offset); 168 if (skb->next) 169 fptr->frag_off |= htons(IP6_MF); 170 offset += (ntohs(ipv6h->payload_len) - 171 sizeof(struct frag_hdr)); 172 } 173 if (encap) 174 skb_reset_inner_headers(skb); 175 } 176 177 out: 178 return segs; 179 } 180 181 /* Return the total length of all the extension hdrs, following the same 182 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. 183 */ 184 static int ipv6_exthdrs_len(struct ipv6hdr *iph, 185 const struct net_offload **opps) 186 { 187 struct ipv6_opt_hdr *opth = (void *)iph; 188 int len = 0, proto, optlen = sizeof(*iph); 189 190 proto = iph->nexthdr; 191 for (;;) { 192 if (proto != NEXTHDR_HOP) { 193 *opps = rcu_dereference(inet6_offloads[proto]); 194 if (unlikely(!(*opps))) 195 break; 196 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) 197 break; 198 } 199 opth = (void *)opth + optlen; 200 optlen = ipv6_optlen(opth); 201 len += optlen; 202 proto = opth->nexthdr; 203 } 204 return len; 205 } 206 207 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, 208 struct sk_buff *skb) 209 { 210 const struct net_offload *ops; 211 struct sk_buff *pp = NULL; 212 struct sk_buff *p; 213 struct ipv6hdr *iph; 214 unsigned int nlen; 215 unsigned int hlen; 216 unsigned int off; 217 u16 flush = 1; 218 int proto; 219 220 off = skb_gro_offset(skb); 221 hlen = off + sizeof(*iph); 222 iph = skb_gro_header_fast(skb, off); 223 if (skb_gro_header_hard(skb, hlen)) { 224 iph = skb_gro_header_slow(skb, hlen, off); 225 if (unlikely(!iph)) 226 goto out; 227 } 228 229 skb_set_network_header(skb, off); 230 skb_gro_pull(skb, sizeof(*iph)); 231 skb_set_transport_header(skb, skb_gro_offset(skb)); 232 233 flush += ntohs(iph->payload_len) != skb_gro_len(skb); 234 235 proto = iph->nexthdr; 236 ops = rcu_dereference(inet6_offloads[proto]); 237 if (!ops || !ops->callbacks.gro_receive) { 238 __pskb_pull(skb, skb_gro_offset(skb)); 239 skb_gro_frag0_invalidate(skb); 240 proto = ipv6_gso_pull_exthdrs(skb, proto); 241 skb_gro_pull(skb, -skb_transport_offset(skb)); 242 skb_reset_transport_header(skb); 243 __skb_push(skb, skb_gro_offset(skb)); 244 245 ops = rcu_dereference(inet6_offloads[proto]); 246 if (!ops || !ops->callbacks.gro_receive) 247 goto out; 248 249 iph = ipv6_hdr(skb); 250 } 251 252 NAPI_GRO_CB(skb)->proto = proto; 253 254 flush--; 255 nlen = skb_network_header_len(skb); 256 257 list_for_each_entry(p, head, list) { 258 const struct ipv6hdr *iph2; 259 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ 260 261 if (!NAPI_GRO_CB(p)->same_flow) 262 continue; 263 264 iph2 = (struct ipv6hdr *)(p->data + off); 265 first_word = *(__be32 *)iph ^ *(__be32 *)iph2; 266 267 /* All fields must match except length and Traffic Class. 268 * XXX skbs on the gro_list have all been parsed and pulled 269 * already so we don't need to compare nlen 270 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) 271 * memcmp() alone below is sufficient, right? 272 */ 273 if ((first_word & htonl(0xF00FFFFF)) || 274 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || 275 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || 276 iph->nexthdr != iph2->nexthdr) { 277 not_same_flow: 278 NAPI_GRO_CB(p)->same_flow = 0; 279 continue; 280 } 281 if (unlikely(nlen > sizeof(struct ipv6hdr))) { 282 if (memcmp(iph + 1, iph2 + 1, 283 nlen - sizeof(struct ipv6hdr))) 284 goto not_same_flow; 285 } 286 /* flush if Traffic Class fields are different */ 287 NAPI_GRO_CB(p)->flush |= !!((first_word & htonl(0x0FF00000)) | 288 (__force __be32)(iph->hop_limit ^ iph2->hop_limit)); 289 NAPI_GRO_CB(p)->flush |= flush; 290 291 /* If the previous IP ID value was based on an atomic 292 * datagram we can overwrite the value and ignore it. 293 */ 294 if (NAPI_GRO_CB(skb)->is_atomic) 295 NAPI_GRO_CB(p)->flush_id = 0; 296 } 297 298 NAPI_GRO_CB(skb)->is_atomic = true; 299 NAPI_GRO_CB(skb)->flush |= flush; 300 301 skb_gro_postpull_rcsum(skb, iph, nlen); 302 303 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, 304 ops->callbacks.gro_receive, head, skb); 305 306 out: 307 skb_gro_flush_final(skb, pp, flush); 308 309 return pp; 310 } 311 312 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, 313 struct sk_buff *skb) 314 { 315 /* Common GRO receive for SIT and IP6IP6 */ 316 317 if (NAPI_GRO_CB(skb)->encap_mark) { 318 NAPI_GRO_CB(skb)->flush = 1; 319 return NULL; 320 } 321 322 NAPI_GRO_CB(skb)->encap_mark = 1; 323 324 return ipv6_gro_receive(head, skb); 325 } 326 327 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, 328 struct sk_buff *skb) 329 { 330 /* Common GRO receive for SIT and IP6IP6 */ 331 332 if (NAPI_GRO_CB(skb)->encap_mark) { 333 NAPI_GRO_CB(skb)->flush = 1; 334 return NULL; 335 } 336 337 NAPI_GRO_CB(skb)->encap_mark = 1; 338 339 return inet_gro_receive(head, skb); 340 } 341 342 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) 343 { 344 const struct net_offload *ops; 345 struct ipv6hdr *iph; 346 int err = -ENOSYS; 347 u32 payload_len; 348 349 if (skb->encapsulation) { 350 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); 351 skb_set_inner_network_header(skb, nhoff); 352 } 353 354 payload_len = skb->len - nhoff - sizeof(*iph); 355 if (unlikely(payload_len > IPV6_MAXPLEN)) { 356 struct hop_jumbo_hdr *hop_jumbo; 357 int hoplen = sizeof(*hop_jumbo); 358 359 /* Move network header left */ 360 memmove(skb_mac_header(skb) - hoplen, skb_mac_header(skb), 361 skb->transport_header - skb->mac_header); 362 skb->data -= hoplen; 363 skb->len += hoplen; 364 skb->mac_header -= hoplen; 365 skb->network_header -= hoplen; 366 iph = (struct ipv6hdr *)(skb->data + nhoff); 367 hop_jumbo = (struct hop_jumbo_hdr *)(iph + 1); 368 369 /* Build hop-by-hop options */ 370 hop_jumbo->nexthdr = iph->nexthdr; 371 hop_jumbo->hdrlen = 0; 372 hop_jumbo->tlv_type = IPV6_TLV_JUMBO; 373 hop_jumbo->tlv_len = 4; 374 hop_jumbo->jumbo_payload_len = htonl(payload_len + hoplen); 375 376 iph->nexthdr = NEXTHDR_HOP; 377 iph->payload_len = 0; 378 } else { 379 iph = (struct ipv6hdr *)(skb->data + nhoff); 380 iph->payload_len = htons(payload_len); 381 } 382 383 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); 384 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 385 goto out; 386 387 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, 388 udp6_gro_complete, skb, nhoff); 389 390 out: 391 return err; 392 } 393 394 static int sit_gro_complete(struct sk_buff *skb, int nhoff) 395 { 396 skb->encapsulation = 1; 397 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 398 return ipv6_gro_complete(skb, nhoff); 399 } 400 401 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff) 402 { 403 skb->encapsulation = 1; 404 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 405 return ipv6_gro_complete(skb, nhoff); 406 } 407 408 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff) 409 { 410 skb->encapsulation = 1; 411 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 412 return inet_gro_complete(skb, nhoff); 413 } 414 415 static struct packet_offload ipv6_packet_offload __read_mostly = { 416 .type = cpu_to_be16(ETH_P_IPV6), 417 .callbacks = { 418 .gso_segment = ipv6_gso_segment, 419 .gro_receive = ipv6_gro_receive, 420 .gro_complete = ipv6_gro_complete, 421 }, 422 }; 423 424 static struct sk_buff *sit_gso_segment(struct sk_buff *skb, 425 netdev_features_t features) 426 { 427 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) 428 return ERR_PTR(-EINVAL); 429 430 return ipv6_gso_segment(skb, features); 431 } 432 433 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb, 434 netdev_features_t features) 435 { 436 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) 437 return ERR_PTR(-EINVAL); 438 439 return inet_gso_segment(skb, features); 440 } 441 442 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb, 443 netdev_features_t features) 444 { 445 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) 446 return ERR_PTR(-EINVAL); 447 448 return ipv6_gso_segment(skb, features); 449 } 450 451 static const struct net_offload sit_offload = { 452 .callbacks = { 453 .gso_segment = sit_gso_segment, 454 .gro_receive = sit_ip6ip6_gro_receive, 455 .gro_complete = sit_gro_complete, 456 }, 457 }; 458 459 static const struct net_offload ip4ip6_offload = { 460 .callbacks = { 461 .gso_segment = ip4ip6_gso_segment, 462 .gro_receive = ip4ip6_gro_receive, 463 .gro_complete = ip4ip6_gro_complete, 464 }, 465 }; 466 467 static const struct net_offload ip6ip6_offload = { 468 .callbacks = { 469 .gso_segment = ip6ip6_gso_segment, 470 .gro_receive = sit_ip6ip6_gro_receive, 471 .gro_complete = ip6ip6_gro_complete, 472 }, 473 }; 474 static int __init ipv6_offload_init(void) 475 { 476 477 if (tcpv6_offload_init() < 0) 478 pr_crit("%s: Cannot add TCP protocol offload\n", __func__); 479 if (ipv6_exthdrs_offload_init() < 0) 480 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); 481 482 dev_add_offload(&ipv6_packet_offload); 483 484 inet_add_offload(&sit_offload, IPPROTO_IPV6); 485 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6); 486 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP); 487 488 return 0; 489 } 490 491 fs_initcall(ipv6_offload_init); 492