1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * UDPv4 GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <net/udp.h> 15 #include <net/protocol.h> 16 17 static DEFINE_SPINLOCK(udp_offload_lock); 18 static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; 19 20 #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) 21 22 struct udp_offload_priv { 23 struct udp_offload *offload; 24 struct rcu_head rcu; 25 struct udp_offload_priv __rcu *next; 26 }; 27 28 static int udp4_ufo_send_check(struct sk_buff *skb) 29 { 30 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 31 return -EINVAL; 32 33 if (likely(!skb->encapsulation)) { 34 const struct iphdr *iph; 35 struct udphdr *uh; 36 37 iph = ip_hdr(skb); 38 uh = udp_hdr(skb); 39 40 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, 41 IPPROTO_UDP, 0); 42 skb->csum_start = skb_transport_header(skb) - skb->head; 43 skb->csum_offset = offsetof(struct udphdr, check); 44 skb->ip_summed = CHECKSUM_PARTIAL; 45 } 46 47 return 0; 48 } 49 50 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 51 netdev_features_t features) 52 { 53 struct sk_buff *segs = ERR_PTR(-EINVAL); 54 unsigned int mss; 55 int offset; 56 __wsum csum; 57 58 if (skb->encapsulation && 59 (skb_shinfo(skb)->gso_type & 60 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { 61 segs = skb_udp_tunnel_segment(skb, features); 62 goto out; 63 } 64 65 mss = skb_shinfo(skb)->gso_size; 66 if (unlikely(skb->len <= mss)) 67 goto out; 68 69 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 70 /* Packet is from an untrusted source, reset gso_segs. */ 71 int type = skb_shinfo(skb)->gso_type; 72 73 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 74 SKB_GSO_UDP_TUNNEL | 75 SKB_GSO_UDP_TUNNEL_CSUM | 76 SKB_GSO_IPIP | 77 SKB_GSO_GRE | SKB_GSO_GRE_CSUM | 78 SKB_GSO_MPLS) || 79 !(type & (SKB_GSO_UDP)))) 80 goto out; 81 82 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 83 84 segs = NULL; 85 goto out; 86 } 87 88 /* Do software UFO. Complete and fill in the UDP checksum as 89 * HW cannot do checksum of UDP packets sent as multiple 90 * IP fragments. 91 */ 92 offset = skb_checksum_start_offset(skb); 93 csum = skb_checksum(skb, offset, skb->len - offset, 0); 94 offset += skb->csum_offset; 95 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 96 skb->ip_summed = CHECKSUM_NONE; 97 98 /* Fragment the skb. IP headers of the fragments are updated in 99 * inet_gso_segment() 100 */ 101 segs = skb_segment(skb, features); 102 out: 103 return segs; 104 } 105 106 int udp_add_offload(struct udp_offload *uo) 107 { 108 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); 109 110 if (!new_offload) 111 return -ENOMEM; 112 113 new_offload->offload = uo; 114 115 spin_lock(&udp_offload_lock); 116 new_offload->next = udp_offload_base; 117 rcu_assign_pointer(udp_offload_base, new_offload); 118 spin_unlock(&udp_offload_lock); 119 120 return 0; 121 } 122 EXPORT_SYMBOL(udp_add_offload); 123 124 static void udp_offload_free_routine(struct rcu_head *head) 125 { 126 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); 127 kfree(ou_priv); 128 } 129 130 void udp_del_offload(struct udp_offload *uo) 131 { 132 struct udp_offload_priv __rcu **head = &udp_offload_base; 133 struct udp_offload_priv *uo_priv; 134 135 spin_lock(&udp_offload_lock); 136 137 uo_priv = udp_deref_protected(*head); 138 for (; uo_priv != NULL; 139 uo_priv = udp_deref_protected(*head)) { 140 if (uo_priv->offload == uo) { 141 rcu_assign_pointer(*head, 142 udp_deref_protected(uo_priv->next)); 143 goto unlock; 144 } 145 head = &uo_priv->next; 146 } 147 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); 148 unlock: 149 spin_unlock(&udp_offload_lock); 150 if (uo_priv != NULL) 151 call_rcu(&uo_priv->rcu, udp_offload_free_routine); 152 } 153 EXPORT_SYMBOL(udp_del_offload); 154 155 static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) 156 { 157 struct udp_offload_priv *uo_priv; 158 struct sk_buff *p, **pp = NULL; 159 struct udphdr *uh, *uh2; 160 unsigned int hlen, off; 161 int flush = 1; 162 163 if (NAPI_GRO_CB(skb)->udp_mark || 164 (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) 165 goto out; 166 167 /* mark that this skb passed once through the udp gro layer */ 168 NAPI_GRO_CB(skb)->udp_mark = 1; 169 170 off = skb_gro_offset(skb); 171 hlen = off + sizeof(*uh); 172 uh = skb_gro_header_fast(skb, off); 173 if (skb_gro_header_hard(skb, hlen)) { 174 uh = skb_gro_header_slow(skb, hlen, off); 175 if (unlikely(!uh)) 176 goto out; 177 } 178 179 rcu_read_lock(); 180 uo_priv = rcu_dereference(udp_offload_base); 181 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 182 if (uo_priv->offload->port == uh->dest && 183 uo_priv->offload->callbacks.gro_receive) 184 goto unflush; 185 } 186 goto out_unlock; 187 188 unflush: 189 flush = 0; 190 191 for (p = *head; p; p = p->next) { 192 if (!NAPI_GRO_CB(p)->same_flow) 193 continue; 194 195 uh2 = (struct udphdr *)(p->data + off); 196 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { 197 NAPI_GRO_CB(p)->same_flow = 0; 198 continue; 199 } 200 } 201 202 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 203 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 204 pp = uo_priv->offload->callbacks.gro_receive(head, skb); 205 206 out_unlock: 207 rcu_read_unlock(); 208 out: 209 NAPI_GRO_CB(skb)->flush |= flush; 210 return pp; 211 } 212 213 static int udp_gro_complete(struct sk_buff *skb, int nhoff) 214 { 215 struct udp_offload_priv *uo_priv; 216 __be16 newlen = htons(skb->len - nhoff); 217 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 218 int err = -ENOSYS; 219 220 uh->len = newlen; 221 222 rcu_read_lock(); 223 224 uo_priv = rcu_dereference(udp_offload_base); 225 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 226 if (uo_priv->offload->port == uh->dest && 227 uo_priv->offload->callbacks.gro_complete) 228 break; 229 } 230 231 if (uo_priv != NULL) 232 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); 233 234 rcu_read_unlock(); 235 return err; 236 } 237 238 static const struct net_offload udpv4_offload = { 239 .callbacks = { 240 .gso_send_check = udp4_ufo_send_check, 241 .gso_segment = udp4_ufo_fragment, 242 .gro_receive = udp_gro_receive, 243 .gro_complete = udp_gro_complete, 244 }, 245 }; 246 247 int __init udpv4_offload_init(void) 248 { 249 return inet_add_offload(&udpv4_offload, IPPROTO_UDP); 250 } 251