1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV6 GSO/GRO offload support 4 * Linux INET6 implementation 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/socket.h> 9 #include <linux/netdevice.h> 10 #include <linux/skbuff.h> 11 #include <linux/printk.h> 12 13 #include <net/protocol.h> 14 #include <net/ipv6.h> 15 #include <net/inet_common.h> 16 #include <net/tcp.h> 17 #include <net/udp.h> 18 #include <net/gro.h> 19 20 #include "ip6_offload.h" 21 22 /* All GRO functions are always builtin, except UDP over ipv6, which lays in 23 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care 24 * when ipv6 is built as a module 25 */ 26 #if IS_BUILTIN(CONFIG_IPV6) 27 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) 28 #else 29 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) 30 #endif 31 32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ 33 ({ \ 34 unlikely(gro_recursion_inc_test(skb)) ? \ 35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ 37 }) 38 39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) 40 { 41 const struct net_offload *ops = NULL; 42 43 for (;;) { 44 struct ipv6_opt_hdr *opth; 45 int len; 46 47 if (proto != NEXTHDR_HOP) { 48 ops = rcu_dereference(inet6_offloads[proto]); 49 50 if (unlikely(!ops)) 51 break; 52 53 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 54 break; 55 } 56 57 if (unlikely(!pskb_may_pull(skb, 8))) 58 break; 59 60 opth = (void *)skb->data; 61 len = ipv6_optlen(opth); 62 63 if (unlikely(!pskb_may_pull(skb, len))) 64 break; 65 66 opth = (void *)skb->data; 67 proto = opth->nexthdr; 68 __skb_pull(skb, len); 69 } 70 71 return proto; 72 } 73 74 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, 75 netdev_features_t features) 76 { 77 struct sk_buff *segs = ERR_PTR(-EINVAL); 78 struct ipv6hdr *ipv6h; 79 const struct net_offload *ops; 80 int proto; 81 struct frag_hdr *fptr; 82 unsigned int payload_len; 83 u8 *prevhdr; 84 int offset = 0; 85 bool encap, udpfrag; 86 int nhoff; 87 bool gso_partial; 88 89 skb_reset_network_header(skb); 90 nhoff = skb_network_header(skb) - skb_mac_header(skb); 91 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 92 goto out; 93 94 encap = SKB_GSO_CB(skb)->encap_level > 0; 95 if (encap) 96 features &= skb->dev->hw_enc_features; 97 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); 98 99 ipv6h = ipv6_hdr(skb); 100 __skb_pull(skb, sizeof(*ipv6h)); 101 segs = ERR_PTR(-EPROTONOSUPPORT); 102 103 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 104 105 if (skb->encapsulation && 106 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) 107 udpfrag = proto == IPPROTO_UDP && encap && 108 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 109 else 110 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation && 111 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 112 113 ops = rcu_dereference(inet6_offloads[proto]); 114 if (likely(ops && ops->callbacks.gso_segment)) { 115 skb_reset_transport_header(skb); 116 segs = ops->callbacks.gso_segment(skb, features); 117 } 118 119 if (IS_ERR_OR_NULL(segs)) 120 goto out; 121 122 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 123 124 for (skb = segs; skb; skb = skb->next) { 125 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); 126 if (gso_partial && skb_is_gso(skb)) 127 payload_len = skb_shinfo(skb)->gso_size + 128 SKB_GSO_CB(skb)->data_offset + 129 skb->head - (unsigned char *)(ipv6h + 1); 130 else 131 payload_len = skb->len - nhoff - sizeof(*ipv6h); 132 ipv6h->payload_len = htons(payload_len); 133 skb->network_header = (u8 *)ipv6h - skb->head; 134 skb_reset_mac_len(skb); 135 136 if (udpfrag) { 137 int err = ip6_find_1stfragopt(skb, &prevhdr); 138 if (err < 0) { 139 kfree_skb_list(segs); 140 return ERR_PTR(err); 141 } 142 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 143 fptr->frag_off = htons(offset); 144 if (skb->next) 145 fptr->frag_off |= htons(IP6_MF); 146 offset += (ntohs(ipv6h->payload_len) - 147 sizeof(struct frag_hdr)); 148 } 149 if (encap) 150 skb_reset_inner_headers(skb); 151 } 152 153 out: 154 return segs; 155 } 156 157 /* Return the total length of all the extension hdrs, following the same 158 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. 159 */ 160 static int ipv6_exthdrs_len(struct ipv6hdr *iph, 161 const struct net_offload **opps) 162 { 163 struct ipv6_opt_hdr *opth = (void *)iph; 164 int len = 0, proto, optlen = sizeof(*iph); 165 166 proto = iph->nexthdr; 167 for (;;) { 168 if (proto != NEXTHDR_HOP) { 169 *opps = rcu_dereference(inet6_offloads[proto]); 170 if (unlikely(!(*opps))) 171 break; 172 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) 173 break; 174 } 175 opth = (void *)opth + optlen; 176 optlen = ipv6_optlen(opth); 177 len += optlen; 178 proto = opth->nexthdr; 179 } 180 return len; 181 } 182 183 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, 184 struct sk_buff *skb) 185 { 186 const struct net_offload *ops; 187 struct sk_buff *pp = NULL; 188 struct sk_buff *p; 189 struct ipv6hdr *iph; 190 unsigned int nlen; 191 unsigned int hlen; 192 unsigned int off; 193 u16 flush = 1; 194 int proto; 195 196 off = skb_gro_offset(skb); 197 hlen = off + sizeof(*iph); 198 iph = skb_gro_header_fast(skb, off); 199 if (skb_gro_header_hard(skb, hlen)) { 200 iph = skb_gro_header_slow(skb, hlen, off); 201 if (unlikely(!iph)) 202 goto out; 203 } 204 205 skb_set_network_header(skb, off); 206 skb_gro_pull(skb, sizeof(*iph)); 207 skb_set_transport_header(skb, skb_gro_offset(skb)); 208 209 flush += ntohs(iph->payload_len) != skb_gro_len(skb); 210 211 proto = iph->nexthdr; 212 ops = rcu_dereference(inet6_offloads[proto]); 213 if (!ops || !ops->callbacks.gro_receive) { 214 __pskb_pull(skb, skb_gro_offset(skb)); 215 skb_gro_frag0_invalidate(skb); 216 proto = ipv6_gso_pull_exthdrs(skb, proto); 217 skb_gro_pull(skb, -skb_transport_offset(skb)); 218 skb_reset_transport_header(skb); 219 __skb_push(skb, skb_gro_offset(skb)); 220 221 ops = rcu_dereference(inet6_offloads[proto]); 222 if (!ops || !ops->callbacks.gro_receive) 223 goto out; 224 225 iph = ipv6_hdr(skb); 226 } 227 228 NAPI_GRO_CB(skb)->proto = proto; 229 230 flush--; 231 nlen = skb_network_header_len(skb); 232 233 list_for_each_entry(p, head, list) { 234 const struct ipv6hdr *iph2; 235 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ 236 237 if (!NAPI_GRO_CB(p)->same_flow) 238 continue; 239 240 iph2 = (struct ipv6hdr *)(p->data + off); 241 first_word = *(__be32 *)iph ^ *(__be32 *)iph2; 242 243 /* All fields must match except length and Traffic Class. 244 * XXX skbs on the gro_list have all been parsed and pulled 245 * already so we don't need to compare nlen 246 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) 247 * memcmp() alone below is sufficient, right? 248 */ 249 if ((first_word & htonl(0xF00FFFFF)) || 250 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || 251 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || 252 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { 253 not_same_flow: 254 NAPI_GRO_CB(p)->same_flow = 0; 255 continue; 256 } 257 if (unlikely(nlen > sizeof(struct ipv6hdr))) { 258 if (memcmp(iph + 1, iph2 + 1, 259 nlen - sizeof(struct ipv6hdr))) 260 goto not_same_flow; 261 } 262 /* flush if Traffic Class fields are different */ 263 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); 264 NAPI_GRO_CB(p)->flush |= flush; 265 266 /* If the previous IP ID value was based on an atomic 267 * datagram we can overwrite the value and ignore it. 268 */ 269 if (NAPI_GRO_CB(skb)->is_atomic) 270 NAPI_GRO_CB(p)->flush_id = 0; 271 } 272 273 NAPI_GRO_CB(skb)->is_atomic = true; 274 NAPI_GRO_CB(skb)->flush |= flush; 275 276 skb_gro_postpull_rcsum(skb, iph, nlen); 277 278 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, 279 ops->callbacks.gro_receive, head, skb); 280 281 out: 282 skb_gro_flush_final(skb, pp, flush); 283 284 return pp; 285 } 286 287 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, 288 struct sk_buff *skb) 289 { 290 /* Common GRO receive for SIT and IP6IP6 */ 291 292 if (NAPI_GRO_CB(skb)->encap_mark) { 293 NAPI_GRO_CB(skb)->flush = 1; 294 return NULL; 295 } 296 297 NAPI_GRO_CB(skb)->encap_mark = 1; 298 299 return ipv6_gro_receive(head, skb); 300 } 301 302 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, 303 struct sk_buff *skb) 304 { 305 /* Common GRO receive for SIT and IP6IP6 */ 306 307 if (NAPI_GRO_CB(skb)->encap_mark) { 308 NAPI_GRO_CB(skb)->flush = 1; 309 return NULL; 310 } 311 312 NAPI_GRO_CB(skb)->encap_mark = 1; 313 314 return inet_gro_receive(head, skb); 315 } 316 317 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) 318 { 319 const struct net_offload *ops; 320 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); 321 int err = -ENOSYS; 322 323 if (skb->encapsulation) { 324 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); 325 skb_set_inner_network_header(skb, nhoff); 326 } 327 328 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 329 330 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); 331 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 332 goto out; 333 334 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, 335 udp6_gro_complete, skb, nhoff); 336 337 out: 338 return err; 339 } 340 341 static int sit_gro_complete(struct sk_buff *skb, int nhoff) 342 { 343 skb->encapsulation = 1; 344 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 345 return ipv6_gro_complete(skb, nhoff); 346 } 347 348 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff) 349 { 350 skb->encapsulation = 1; 351 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 352 return ipv6_gro_complete(skb, nhoff); 353 } 354 355 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff) 356 { 357 skb->encapsulation = 1; 358 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 359 return inet_gro_complete(skb, nhoff); 360 } 361 362 static struct packet_offload ipv6_packet_offload __read_mostly = { 363 .type = cpu_to_be16(ETH_P_IPV6), 364 .callbacks = { 365 .gso_segment = ipv6_gso_segment, 366 .gro_receive = ipv6_gro_receive, 367 .gro_complete = ipv6_gro_complete, 368 }, 369 }; 370 371 static struct sk_buff *sit_gso_segment(struct sk_buff *skb, 372 netdev_features_t features) 373 { 374 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) 375 return ERR_PTR(-EINVAL); 376 377 return ipv6_gso_segment(skb, features); 378 } 379 380 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb, 381 netdev_features_t features) 382 { 383 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) 384 return ERR_PTR(-EINVAL); 385 386 return inet_gso_segment(skb, features); 387 } 388 389 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb, 390 netdev_features_t features) 391 { 392 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) 393 return ERR_PTR(-EINVAL); 394 395 return ipv6_gso_segment(skb, features); 396 } 397 398 static const struct net_offload sit_offload = { 399 .callbacks = { 400 .gso_segment = sit_gso_segment, 401 .gro_receive = sit_ip6ip6_gro_receive, 402 .gro_complete = sit_gro_complete, 403 }, 404 }; 405 406 static const struct net_offload ip4ip6_offload = { 407 .callbacks = { 408 .gso_segment = ip4ip6_gso_segment, 409 .gro_receive = ip4ip6_gro_receive, 410 .gro_complete = ip4ip6_gro_complete, 411 }, 412 }; 413 414 static const struct net_offload ip6ip6_offload = { 415 .callbacks = { 416 .gso_segment = ip6ip6_gso_segment, 417 .gro_receive = sit_ip6ip6_gro_receive, 418 .gro_complete = ip6ip6_gro_complete, 419 }, 420 }; 421 static int __init ipv6_offload_init(void) 422 { 423 424 if (tcpv6_offload_init() < 0) 425 pr_crit("%s: Cannot add TCP protocol offload\n", __func__); 426 if (ipv6_exthdrs_offload_init() < 0) 427 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); 428 429 dev_add_offload(&ipv6_packet_offload); 430 431 inet_add_offload(&sit_offload, IPPROTO_IPV6); 432 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6); 433 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP); 434 435 return 0; 436 } 437 438 fs_initcall(ipv6_offload_init); 439