1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * UDPv4 GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <net/udp.h> 15 #include <net/protocol.h> 16 17 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, 18 netdev_features_t features, 19 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 20 netdev_features_t features), 21 __be16 new_protocol, bool is_ipv6) 22 { 23 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 24 bool remcsum, need_csum, offload_csum, ufo; 25 struct sk_buff *segs = ERR_PTR(-EINVAL); 26 struct udphdr *uh = udp_hdr(skb); 27 u16 mac_offset = skb->mac_header; 28 __be16 protocol = skb->protocol; 29 u16 mac_len = skb->mac_len; 30 int udp_offset, outer_hlen; 31 __wsum partial; 32 33 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 34 goto out; 35 36 /* Adjust partial header checksum to negate old length. 37 * We cannot rely on the value contained in uh->len as it is 38 * possible that the actual value exceeds the boundaries of the 39 * 16 bit length field due to the header being added outside of an 40 * IP or IPv6 frame that was already limited to 64K - 1. 41 */ 42 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) 43 partial = (__force __wsum)uh->len; 44 else 45 partial = (__force __wsum)htonl(skb->len); 46 partial = csum_sub(csum_unfold(uh->check), partial); 47 48 /* setup inner skb. */ 49 skb->encapsulation = 0; 50 SKB_GSO_CB(skb)->encap_level = 0; 51 __skb_pull(skb, tnl_hlen); 52 skb_reset_mac_header(skb); 53 skb_set_network_header(skb, skb_inner_network_offset(skb)); 54 skb->mac_len = skb_inner_network_offset(skb); 55 skb->protocol = new_protocol; 56 57 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 58 skb->encap_hdr_csum = need_csum; 59 60 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); 61 skb->remcsum_offload = remcsum; 62 63 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 64 65 /* Try to offload checksum if possible */ 66 offload_csum = !!(need_csum && 67 (skb->dev->features & 68 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 69 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); 70 71 features &= skb->dev->hw_enc_features; 72 73 /* The only checksum offload we care about from here on out is the 74 * outer one so strip the existing checksum feature flags and 75 * instead set the flag based on our outer checksum offload value. 76 */ 77 if (remcsum || ufo) { 78 features &= ~NETIF_F_CSUM_MASK; 79 if (!need_csum || offload_csum) 80 features |= NETIF_F_HW_CSUM; 81 } 82 83 /* segment inner packet. */ 84 segs = gso_inner_segment(skb, features); 85 if (IS_ERR_OR_NULL(segs)) { 86 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 87 mac_len); 88 goto out; 89 } 90 91 outer_hlen = skb_tnl_header_len(skb); 92 udp_offset = outer_hlen - tnl_hlen; 93 skb = segs; 94 do { 95 unsigned int len; 96 97 if (remcsum) 98 skb->ip_summed = CHECKSUM_NONE; 99 100 /* Set up inner headers if we are offloading inner checksum */ 101 if (skb->ip_summed == CHECKSUM_PARTIAL) { 102 skb_reset_inner_headers(skb); 103 skb->encapsulation = 1; 104 } 105 106 skb->mac_len = mac_len; 107 skb->protocol = protocol; 108 109 __skb_push(skb, outer_hlen); 110 skb_reset_mac_header(skb); 111 skb_set_network_header(skb, mac_len); 112 skb_set_transport_header(skb, udp_offset); 113 len = skb->len - udp_offset; 114 uh = udp_hdr(skb); 115 116 /* If we are only performing partial GSO the inner header 117 * will be using a length value equal to only one MSS sized 118 * segment instead of the entire frame. 119 */ 120 if (skb_is_gso(skb)) { 121 uh->len = htons(skb_shinfo(skb)->gso_size + 122 SKB_GSO_CB(skb)->data_offset + 123 skb->head - (unsigned char *)uh); 124 } else { 125 uh->len = htons(len); 126 } 127 128 if (!need_csum) 129 continue; 130 131 uh->check = ~csum_fold(csum_add(partial, 132 (__force __wsum)htonl(len))); 133 134 if (skb->encapsulation || !offload_csum) { 135 uh->check = gso_make_checksum(skb, ~uh->check); 136 if (uh->check == 0) 137 uh->check = CSUM_MANGLED_0; 138 } else { 139 skb->ip_summed = CHECKSUM_PARTIAL; 140 skb->csum_start = skb_transport_header(skb) - skb->head; 141 skb->csum_offset = offsetof(struct udphdr, check); 142 } 143 } while ((skb = skb->next)); 144 out: 145 return segs; 146 } 147 148 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 149 netdev_features_t features, 150 bool is_ipv6) 151 { 152 __be16 protocol = skb->protocol; 153 const struct net_offload **offloads; 154 const struct net_offload *ops; 155 struct sk_buff *segs = ERR_PTR(-EINVAL); 156 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 157 netdev_features_t features); 158 159 rcu_read_lock(); 160 161 switch (skb->inner_protocol_type) { 162 case ENCAP_TYPE_ETHER: 163 protocol = skb->inner_protocol; 164 gso_inner_segment = skb_mac_gso_segment; 165 break; 166 case ENCAP_TYPE_IPPROTO: 167 offloads = is_ipv6 ? inet6_offloads : inet_offloads; 168 ops = rcu_dereference(offloads[skb->inner_ipproto]); 169 if (!ops || !ops->callbacks.gso_segment) 170 goto out_unlock; 171 gso_inner_segment = ops->callbacks.gso_segment; 172 break; 173 default: 174 goto out_unlock; 175 } 176 177 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, 178 protocol, is_ipv6); 179 180 out_unlock: 181 rcu_read_unlock(); 182 183 return segs; 184 } 185 EXPORT_SYMBOL(skb_udp_tunnel_segment); 186 187 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 188 netdev_features_t features) 189 { 190 struct sk_buff *segs = ERR_PTR(-EINVAL); 191 unsigned int mss; 192 __wsum csum; 193 struct udphdr *uh; 194 struct iphdr *iph; 195 196 if (skb->encapsulation && 197 (skb_shinfo(skb)->gso_type & 198 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { 199 segs = skb_udp_tunnel_segment(skb, features, false); 200 goto out; 201 } 202 203 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 204 goto out; 205 206 mss = skb_shinfo(skb)->gso_size; 207 if (unlikely(skb->len <= mss)) 208 goto out; 209 210 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 211 /* Packet is from an untrusted source, reset gso_segs. */ 212 int type = skb_shinfo(skb)->gso_type; 213 214 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 215 SKB_GSO_UDP_TUNNEL | 216 SKB_GSO_UDP_TUNNEL_CSUM | 217 SKB_GSO_TUNNEL_REMCSUM | 218 SKB_GSO_IPIP | 219 SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || 220 !(type & (SKB_GSO_UDP)))) 221 goto out; 222 223 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 224 225 segs = NULL; 226 goto out; 227 } 228 229 /* Do software UFO. Complete and fill in the UDP checksum as 230 * HW cannot do checksum of UDP packets sent as multiple 231 * IP fragments. 232 */ 233 234 uh = udp_hdr(skb); 235 iph = ip_hdr(skb); 236 237 uh->check = 0; 238 csum = skb_checksum(skb, 0, skb->len, 0); 239 uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); 240 if (uh->check == 0) 241 uh->check = CSUM_MANGLED_0; 242 243 skb->ip_summed = CHECKSUM_NONE; 244 245 /* If there is no outer header we can fake a checksum offload 246 * due to the fact that we have already done the checksum in 247 * software prior to segmenting the frame. 248 */ 249 if (!skb->encap_hdr_csum) 250 features |= NETIF_F_HW_CSUM; 251 252 /* Fragment the skb. IP headers of the fragments are updated in 253 * inet_gso_segment() 254 */ 255 segs = skb_segment(skb, features); 256 out: 257 return segs; 258 } 259 260 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 261 struct udphdr *uh, udp_lookup_t lookup) 262 { 263 struct sk_buff *p, **pp = NULL; 264 struct udphdr *uh2; 265 unsigned int off = skb_gro_offset(skb); 266 int flush = 1; 267 struct sock *sk; 268 269 if (NAPI_GRO_CB(skb)->encap_mark || 270 (skb->ip_summed != CHECKSUM_PARTIAL && 271 NAPI_GRO_CB(skb)->csum_cnt == 0 && 272 !NAPI_GRO_CB(skb)->csum_valid)) 273 goto out; 274 275 /* mark that this skb passed once through the tunnel gro layer */ 276 NAPI_GRO_CB(skb)->encap_mark = 1; 277 278 rcu_read_lock(); 279 sk = (*lookup)(skb, uh->source, uh->dest); 280 281 if (sk && udp_sk(sk)->gro_receive) 282 goto unflush; 283 goto out_unlock; 284 285 unflush: 286 flush = 0; 287 288 for (p = *head; p; p = p->next) { 289 if (!NAPI_GRO_CB(p)->same_flow) 290 continue; 291 292 uh2 = (struct udphdr *)(p->data + off); 293 294 /* Match ports and either checksums are either both zero 295 * or nonzero. 296 */ 297 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || 298 (!uh->check ^ !uh2->check)) { 299 NAPI_GRO_CB(p)->same_flow = 0; 300 continue; 301 } 302 } 303 304 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 305 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 306 pp = udp_sk(sk)->gro_receive(sk, head, skb); 307 308 out_unlock: 309 rcu_read_unlock(); 310 out: 311 NAPI_GRO_CB(skb)->flush |= flush; 312 return pp; 313 } 314 EXPORT_SYMBOL(udp_gro_receive); 315 316 static struct sk_buff **udp4_gro_receive(struct sk_buff **head, 317 struct sk_buff *skb) 318 { 319 struct udphdr *uh = udp_gro_udphdr(skb); 320 321 if (unlikely(!uh)) 322 goto flush; 323 324 /* Don't bother verifying checksum if we're going to flush anyway. */ 325 if (NAPI_GRO_CB(skb)->flush) 326 goto skip; 327 328 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, 329 inet_gro_compute_pseudo)) 330 goto flush; 331 else if (uh->check) 332 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 333 inet_gro_compute_pseudo); 334 skip: 335 NAPI_GRO_CB(skb)->is_ipv6 = 0; 336 return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); 337 338 flush: 339 NAPI_GRO_CB(skb)->flush = 1; 340 return NULL; 341 } 342 343 int udp_gro_complete(struct sk_buff *skb, int nhoff, 344 udp_lookup_t lookup) 345 { 346 __be16 newlen = htons(skb->len - nhoff); 347 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 348 int err = -ENOSYS; 349 struct sock *sk; 350 351 uh->len = newlen; 352 353 rcu_read_lock(); 354 sk = (*lookup)(skb, uh->source, uh->dest); 355 if (sk && udp_sk(sk)->gro_complete) 356 err = udp_sk(sk)->gro_complete(sk, skb, 357 nhoff + sizeof(struct udphdr)); 358 rcu_read_unlock(); 359 360 if (skb->remcsum_offload) 361 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; 362 363 skb->encapsulation = 1; 364 skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); 365 366 return err; 367 } 368 EXPORT_SYMBOL(udp_gro_complete); 369 370 static int udp4_gro_complete(struct sk_buff *skb, int nhoff) 371 { 372 const struct iphdr *iph = ip_hdr(skb); 373 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 374 375 if (uh->check) { 376 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 377 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, 378 iph->daddr, 0); 379 } else { 380 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 381 } 382 383 return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); 384 } 385 386 static const struct net_offload udpv4_offload = { 387 .callbacks = { 388 .gso_segment = udp4_ufo_fragment, 389 .gro_receive = udp4_gro_receive, 390 .gro_complete = udp4_gro_complete, 391 }, 392 }; 393 394 int __init udpv4_offload_init(void) 395 { 396 return inet_add_offload(&udpv4_offload, IPPROTO_UDP); 397 } 398