1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * GRE GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <net/protocol.h> 16 #include <net/gre.h> 17 18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 19 netdev_features_t features) 20 { 21 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 22 struct sk_buff *segs = ERR_PTR(-EINVAL); 23 u16 mac_offset = skb->mac_header; 24 __be16 protocol = skb->protocol; 25 u16 mac_len = skb->mac_len; 26 int gre_offset, outer_hlen; 27 bool need_csum, ufo; 28 29 if (!skb->encapsulation) 30 goto out; 31 32 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) 33 goto out; 34 35 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 36 goto out; 37 38 /* setup inner skb. */ 39 skb->encapsulation = 0; 40 SKB_GSO_CB(skb)->encap_level = 0; 41 __skb_pull(skb, tnl_hlen); 42 skb_reset_mac_header(skb); 43 skb_set_network_header(skb, skb_inner_network_offset(skb)); 44 skb->mac_len = skb_inner_network_offset(skb); 45 skb->protocol = skb->inner_protocol; 46 47 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); 48 skb->encap_hdr_csum = need_csum; 49 50 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 51 52 features &= skb->dev->hw_enc_features; 53 54 /* The only checksum offload we care about from here on out is the 55 * outer one so strip the existing checksum feature flags based 56 * on the fact that we will be computing our checksum in software. 57 */ 58 if (ufo) { 59 features &= ~NETIF_F_CSUM_MASK; 60 if (!need_csum) 61 features |= NETIF_F_HW_CSUM; 62 } 63 64 /* segment inner packet. */ 65 segs = skb_mac_gso_segment(skb, features); 66 if (IS_ERR_OR_NULL(segs)) { 67 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 68 mac_len); 69 goto out; 70 } 71 72 outer_hlen = skb_tnl_header_len(skb); 73 gre_offset = outer_hlen - tnl_hlen; 74 skb = segs; 75 do { 76 struct gre_base_hdr *greh; 77 __sum16 *pcsum; 78 79 /* Set up inner headers if we are offloading inner checksum */ 80 if (skb->ip_summed == CHECKSUM_PARTIAL) { 81 skb_reset_inner_headers(skb); 82 skb->encapsulation = 1; 83 } 84 85 skb->mac_len = mac_len; 86 skb->protocol = protocol; 87 88 __skb_push(skb, outer_hlen); 89 skb_reset_mac_header(skb); 90 skb_set_network_header(skb, mac_len); 91 skb_set_transport_header(skb, gre_offset); 92 93 if (!need_csum) 94 continue; 95 96 greh = (struct gre_base_hdr *)skb_transport_header(skb); 97 pcsum = (__sum16 *)(greh + 1); 98 99 if (skb_is_gso(skb)) { 100 unsigned int partial_adj; 101 102 /* Adjust checksum to account for the fact that 103 * the partial checksum is based on actual size 104 * whereas headers should be based on MSS size. 105 */ 106 partial_adj = skb->len + skb_headroom(skb) - 107 SKB_GSO_CB(skb)->data_offset - 108 skb_shinfo(skb)->gso_size; 109 *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); 110 } else { 111 *pcsum = 0; 112 } 113 114 *(pcsum + 1) = 0; 115 *pcsum = gso_make_checksum(skb, 0); 116 } while ((skb = skb->next)); 117 out: 118 return segs; 119 } 120 121 static struct sk_buff **gre_gro_receive(struct sk_buff **head, 122 struct sk_buff *skb) 123 { 124 struct sk_buff **pp = NULL; 125 struct sk_buff *p; 126 const struct gre_base_hdr *greh; 127 unsigned int hlen, grehlen; 128 unsigned int off; 129 int flush = 1; 130 struct packet_offload *ptype; 131 __be16 type; 132 133 if (NAPI_GRO_CB(skb)->encap_mark) 134 goto out; 135 136 NAPI_GRO_CB(skb)->encap_mark = 1; 137 138 off = skb_gro_offset(skb); 139 hlen = off + sizeof(*greh); 140 greh = skb_gro_header_fast(skb, off); 141 if (skb_gro_header_hard(skb, hlen)) { 142 greh = skb_gro_header_slow(skb, hlen, off); 143 if (unlikely(!greh)) 144 goto out; 145 } 146 147 /* Only support version 0 and K (key), C (csum) flags. Note that 148 * although the support for the S (seq#) flag can be added easily 149 * for GRO, this is problematic for GSO hence can not be enabled 150 * here because a GRO pkt may end up in the forwarding path, thus 151 * requiring GSO support to break it up correctly. 152 */ 153 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 154 goto out; 155 156 /* We can only support GRE_CSUM if we can track the location of 157 * the GRE header. In the case of FOU/GUE we cannot because the 158 * outer UDP header displaces the GRE header leaving us in a state 159 * of limbo. 160 */ 161 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou) 162 goto out; 163 164 type = greh->protocol; 165 166 rcu_read_lock(); 167 ptype = gro_find_receive_by_type(type); 168 if (!ptype) 169 goto out_unlock; 170 171 grehlen = GRE_HEADER_SECTION; 172 173 if (greh->flags & GRE_KEY) 174 grehlen += GRE_HEADER_SECTION; 175 176 if (greh->flags & GRE_CSUM) 177 grehlen += GRE_HEADER_SECTION; 178 179 hlen = off + grehlen; 180 if (skb_gro_header_hard(skb, hlen)) { 181 greh = skb_gro_header_slow(skb, hlen, off); 182 if (unlikely(!greh)) 183 goto out_unlock; 184 } 185 186 /* Don't bother verifying checksum if we're going to flush anyway. */ 187 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { 188 if (skb_gro_checksum_simple_validate(skb)) 189 goto out_unlock; 190 191 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, 192 null_compute_pseudo); 193 } 194 195 for (p = *head; p; p = p->next) { 196 const struct gre_base_hdr *greh2; 197 198 if (!NAPI_GRO_CB(p)->same_flow) 199 continue; 200 201 /* The following checks are needed to ensure only pkts 202 * from the same tunnel are considered for aggregation. 203 * The criteria for "the same tunnel" includes: 204 * 1) same version (we only support version 0 here) 205 * 2) same protocol (we only support ETH_P_IP for now) 206 * 3) same set of flags 207 * 4) same key if the key field is present. 208 */ 209 greh2 = (struct gre_base_hdr *)(p->data + off); 210 211 if (greh2->flags != greh->flags || 212 greh2->protocol != greh->protocol) { 213 NAPI_GRO_CB(p)->same_flow = 0; 214 continue; 215 } 216 if (greh->flags & GRE_KEY) { 217 /* compare keys */ 218 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 219 NAPI_GRO_CB(p)->same_flow = 0; 220 continue; 221 } 222 } 223 } 224 225 skb_gro_pull(skb, grehlen); 226 227 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 228 skb_gro_postpull_rcsum(skb, greh, grehlen); 229 230 pp = ptype->callbacks.gro_receive(head, skb); 231 flush = 0; 232 233 out_unlock: 234 rcu_read_unlock(); 235 out: 236 NAPI_GRO_CB(skb)->flush |= flush; 237 238 return pp; 239 } 240 241 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 242 { 243 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 244 struct packet_offload *ptype; 245 unsigned int grehlen = sizeof(*greh); 246 int err = -ENOENT; 247 __be16 type; 248 249 skb->encapsulation = 1; 250 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; 251 252 type = greh->protocol; 253 if (greh->flags & GRE_KEY) 254 grehlen += GRE_HEADER_SECTION; 255 256 if (greh->flags & GRE_CSUM) 257 grehlen += GRE_HEADER_SECTION; 258 259 rcu_read_lock(); 260 ptype = gro_find_complete_by_type(type); 261 if (ptype) 262 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 263 264 rcu_read_unlock(); 265 266 skb_set_inner_mac_header(skb, nhoff + grehlen); 267 268 return err; 269 } 270 271 static const struct net_offload gre_offload = { 272 .callbacks = { 273 .gso_segment = gre_gso_segment, 274 .gro_receive = gre_gro_receive, 275 .gro_complete = gre_gro_complete, 276 }, 277 }; 278 279 static int __init gre_offload_init(void) 280 { 281 int err; 282 283 err = inet_add_offload(&gre_offload, IPPROTO_GRE); 284 #if IS_ENABLED(CONFIG_IPV6) 285 if (err) 286 return err; 287 288 err = inet6_add_offload(&gre_offload, IPPROTO_GRE); 289 if (err) 290 inet_del_offload(&gre_offload, IPPROTO_GRE); 291 #endif 292 293 return err; 294 } 295 device_initcall(gre_offload_init); 296