1 /* 2 * IPV6 GSO/GRO offload support 3 * Linux INET6 implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/socket.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 #include <linux/printk.h> 16 17 #include <net/protocol.h> 18 #include <net/ipv6.h> 19 #include <net/inet_common.h> 20 21 #include "ip6_offload.h" 22 23 /* All GRO functions are always builtin, except UDP over ipv6, which lays in 24 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care 25 * when ipv6 is built as a module 26 */ 27 #if IS_BUILTIN(CONFIG_IPV6) 28 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) 29 #else 30 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) 31 #endif 32 33 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ 34 ({ \ 35 unlikely(gro_recursion_inc_test(skb)) ? \ 36 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 37 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ 38 }) 39 40 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) 41 { 42 const struct net_offload *ops = NULL; 43 44 for (;;) { 45 struct ipv6_opt_hdr *opth; 46 int len; 47 48 if (proto != NEXTHDR_HOP) { 49 ops = rcu_dereference(inet6_offloads[proto]); 50 51 if (unlikely(!ops)) 52 break; 53 54 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 55 break; 56 } 57 58 if (unlikely(!pskb_may_pull(skb, 8))) 59 break; 60 61 opth = (void *)skb->data; 62 len = ipv6_optlen(opth); 63 64 if (unlikely(!pskb_may_pull(skb, len))) 65 break; 66 67 opth = (void *)skb->data; 68 proto = opth->nexthdr; 69 __skb_pull(skb, len); 70 } 71 72 return proto; 73 } 74 75 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, 76 netdev_features_t features) 77 { 78 struct sk_buff *segs = ERR_PTR(-EINVAL); 79 struct ipv6hdr *ipv6h; 80 const struct net_offload *ops; 81 int proto; 82 struct frag_hdr *fptr; 83 unsigned int payload_len; 84 u8 *prevhdr; 85 int offset = 0; 86 bool encap, udpfrag; 87 int nhoff; 88 bool gso_partial; 89 90 skb_reset_network_header(skb); 91 nhoff = skb_network_header(skb) - skb_mac_header(skb); 92 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 93 goto out; 94 95 encap = SKB_GSO_CB(skb)->encap_level > 0; 96 if (encap) 97 features &= skb->dev->hw_enc_features; 98 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); 99 100 ipv6h = ipv6_hdr(skb); 101 __skb_pull(skb, sizeof(*ipv6h)); 102 segs = ERR_PTR(-EPROTONOSUPPORT); 103 104 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 105 106 if (skb->encapsulation && 107 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) 108 udpfrag = proto == IPPROTO_UDP && encap && 109 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 110 else 111 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation && 112 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 113 114 ops = rcu_dereference(inet6_offloads[proto]); 115 if (likely(ops && ops->callbacks.gso_segment)) { 116 skb_reset_transport_header(skb); 117 segs = ops->callbacks.gso_segment(skb, features); 118 } 119 120 if (IS_ERR_OR_NULL(segs)) 121 goto out; 122 123 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 124 125 for (skb = segs; skb; skb = skb->next) { 126 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); 127 if (gso_partial && skb_is_gso(skb)) 128 payload_len = skb_shinfo(skb)->gso_size + 129 SKB_GSO_CB(skb)->data_offset + 130 skb->head - (unsigned char *)(ipv6h + 1); 131 else 132 payload_len = skb->len - nhoff - sizeof(*ipv6h); 133 ipv6h->payload_len = htons(payload_len); 134 skb->network_header = (u8 *)ipv6h - skb->head; 135 skb_reset_mac_len(skb); 136 137 if (udpfrag) { 138 int err = ip6_find_1stfragopt(skb, &prevhdr); 139 if (err < 0) { 140 kfree_skb_list(segs); 141 return ERR_PTR(err); 142 } 143 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 144 fptr->frag_off = htons(offset); 145 if (skb->next) 146 fptr->frag_off |= htons(IP6_MF); 147 offset += (ntohs(ipv6h->payload_len) - 148 sizeof(struct frag_hdr)); 149 } 150 if (encap) 151 skb_reset_inner_headers(skb); 152 } 153 154 out: 155 return segs; 156 } 157 158 /* Return the total length of all the extension hdrs, following the same 159 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. 160 */ 161 static int ipv6_exthdrs_len(struct ipv6hdr *iph, 162 const struct net_offload **opps) 163 { 164 struct ipv6_opt_hdr *opth = (void *)iph; 165 int len = 0, proto, optlen = sizeof(*iph); 166 167 proto = iph->nexthdr; 168 for (;;) { 169 if (proto != NEXTHDR_HOP) { 170 *opps = rcu_dereference(inet6_offloads[proto]); 171 if (unlikely(!(*opps))) 172 break; 173 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) 174 break; 175 } 176 opth = (void *)opth + optlen; 177 optlen = ipv6_optlen(opth); 178 len += optlen; 179 proto = opth->nexthdr; 180 } 181 return len; 182 } 183 184 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *, 185 struct sk_buff *)); 186 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, 187 struct sk_buff *)); 188 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, 189 struct sk_buff *skb) 190 { 191 const struct net_offload *ops; 192 struct sk_buff *pp = NULL; 193 struct sk_buff *p; 194 struct ipv6hdr *iph; 195 unsigned int nlen; 196 unsigned int hlen; 197 unsigned int off; 198 u16 flush = 1; 199 int proto; 200 201 off = skb_gro_offset(skb); 202 hlen = off + sizeof(*iph); 203 iph = skb_gro_header_fast(skb, off); 204 if (skb_gro_header_hard(skb, hlen)) { 205 iph = skb_gro_header_slow(skb, hlen, off); 206 if (unlikely(!iph)) 207 goto out; 208 } 209 210 skb_set_network_header(skb, off); 211 skb_gro_pull(skb, sizeof(*iph)); 212 skb_set_transport_header(skb, skb_gro_offset(skb)); 213 214 flush += ntohs(iph->payload_len) != skb_gro_len(skb); 215 216 rcu_read_lock(); 217 proto = iph->nexthdr; 218 ops = rcu_dereference(inet6_offloads[proto]); 219 if (!ops || !ops->callbacks.gro_receive) { 220 __pskb_pull(skb, skb_gro_offset(skb)); 221 skb_gro_frag0_invalidate(skb); 222 proto = ipv6_gso_pull_exthdrs(skb, proto); 223 skb_gro_pull(skb, -skb_transport_offset(skb)); 224 skb_reset_transport_header(skb); 225 __skb_push(skb, skb_gro_offset(skb)); 226 227 ops = rcu_dereference(inet6_offloads[proto]); 228 if (!ops || !ops->callbacks.gro_receive) 229 goto out_unlock; 230 231 iph = ipv6_hdr(skb); 232 } 233 234 NAPI_GRO_CB(skb)->proto = proto; 235 236 flush--; 237 nlen = skb_network_header_len(skb); 238 239 list_for_each_entry(p, head, list) { 240 const struct ipv6hdr *iph2; 241 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ 242 243 if (!NAPI_GRO_CB(p)->same_flow) 244 continue; 245 246 iph2 = (struct ipv6hdr *)(p->data + off); 247 first_word = *(__be32 *)iph ^ *(__be32 *)iph2; 248 249 /* All fields must match except length and Traffic Class. 250 * XXX skbs on the gro_list have all been parsed and pulled 251 * already so we don't need to compare nlen 252 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) 253 * memcmp() alone below is sufficient, right? 254 */ 255 if ((first_word & htonl(0xF00FFFFF)) || 256 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || 257 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || 258 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { 259 not_same_flow: 260 NAPI_GRO_CB(p)->same_flow = 0; 261 continue; 262 } 263 if (unlikely(nlen > sizeof(struct ipv6hdr))) { 264 if (memcmp(iph + 1, iph2 + 1, 265 nlen - sizeof(struct ipv6hdr))) 266 goto not_same_flow; 267 } 268 /* flush if Traffic Class fields are different */ 269 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); 270 NAPI_GRO_CB(p)->flush |= flush; 271 272 /* If the previous IP ID value was based on an atomic 273 * datagram we can overwrite the value and ignore it. 274 */ 275 if (NAPI_GRO_CB(skb)->is_atomic) 276 NAPI_GRO_CB(p)->flush_id = 0; 277 } 278 279 NAPI_GRO_CB(skb)->is_atomic = true; 280 NAPI_GRO_CB(skb)->flush |= flush; 281 282 skb_gro_postpull_rcsum(skb, iph, nlen); 283 284 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, 285 ops->callbacks.gro_receive, head, skb); 286 287 out_unlock: 288 rcu_read_unlock(); 289 290 out: 291 skb_gro_flush_final(skb, pp, flush); 292 293 return pp; 294 } 295 296 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, 297 struct sk_buff *skb) 298 { 299 /* Common GRO receive for SIT and IP6IP6 */ 300 301 if (NAPI_GRO_CB(skb)->encap_mark) { 302 NAPI_GRO_CB(skb)->flush = 1; 303 return NULL; 304 } 305 306 NAPI_GRO_CB(skb)->encap_mark = 1; 307 308 return ipv6_gro_receive(head, skb); 309 } 310 311 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, 312 struct sk_buff *skb) 313 { 314 /* Common GRO receive for SIT and IP6IP6 */ 315 316 if (NAPI_GRO_CB(skb)->encap_mark) { 317 NAPI_GRO_CB(skb)->flush = 1; 318 return NULL; 319 } 320 321 NAPI_GRO_CB(skb)->encap_mark = 1; 322 323 return inet_gro_receive(head, skb); 324 } 325 326 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int)); 327 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); 328 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) 329 { 330 const struct net_offload *ops; 331 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); 332 int err = -ENOSYS; 333 334 if (skb->encapsulation) { 335 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); 336 skb_set_inner_network_header(skb, nhoff); 337 } 338 339 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 340 341 rcu_read_lock(); 342 343 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); 344 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 345 goto out_unlock; 346 347 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, 348 udp6_gro_complete, skb, nhoff); 349 350 out_unlock: 351 rcu_read_unlock(); 352 353 return err; 354 } 355 356 static int sit_gro_complete(struct sk_buff *skb, int nhoff) 357 { 358 skb->encapsulation = 1; 359 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 360 return ipv6_gro_complete(skb, nhoff); 361 } 362 363 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff) 364 { 365 skb->encapsulation = 1; 366 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 367 return ipv6_gro_complete(skb, nhoff); 368 } 369 370 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff) 371 { 372 skb->encapsulation = 1; 373 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 374 return inet_gro_complete(skb, nhoff); 375 } 376 377 static struct packet_offload ipv6_packet_offload __read_mostly = { 378 .type = cpu_to_be16(ETH_P_IPV6), 379 .callbacks = { 380 .gso_segment = ipv6_gso_segment, 381 .gro_receive = ipv6_gro_receive, 382 .gro_complete = ipv6_gro_complete, 383 }, 384 }; 385 386 static const struct net_offload sit_offload = { 387 .callbacks = { 388 .gso_segment = ipv6_gso_segment, 389 .gro_receive = sit_ip6ip6_gro_receive, 390 .gro_complete = sit_gro_complete, 391 }, 392 }; 393 394 static const struct net_offload ip4ip6_offload = { 395 .callbacks = { 396 .gso_segment = inet_gso_segment, 397 .gro_receive = ip4ip6_gro_receive, 398 .gro_complete = ip4ip6_gro_complete, 399 }, 400 }; 401 402 static const struct net_offload ip6ip6_offload = { 403 .callbacks = { 404 .gso_segment = ipv6_gso_segment, 405 .gro_receive = sit_ip6ip6_gro_receive, 406 .gro_complete = ip6ip6_gro_complete, 407 }, 408 }; 409 static int __init ipv6_offload_init(void) 410 { 411 412 if (tcpv6_offload_init() < 0) 413 pr_crit("%s: Cannot add TCP protocol offload\n", __func__); 414 if (ipv6_exthdrs_offload_init() < 0) 415 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); 416 417 dev_add_offload(&ipv6_packet_offload); 418 419 inet_add_offload(&sit_offload, IPPROTO_IPV6); 420 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6); 421 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP); 422 423 return 0; 424 } 425 426 fs_initcall(ipv6_offload_init); 427