1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * GRE GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <net/protocol.h> 16 #include <net/gre.h> 17 18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 19 netdev_features_t features) 20 { 21 struct sk_buff *segs = ERR_PTR(-EINVAL); 22 netdev_features_t enc_features; 23 int ghl; 24 struct gre_base_hdr *greh; 25 u16 mac_offset = skb->mac_header; 26 int mac_len = skb->mac_len; 27 __be16 protocol = skb->protocol; 28 int tnl_hlen; 29 bool csum; 30 31 if (unlikely(skb_shinfo(skb)->gso_type & 32 ~(SKB_GSO_TCPV4 | 33 SKB_GSO_TCPV6 | 34 SKB_GSO_UDP | 35 SKB_GSO_DODGY | 36 SKB_GSO_TCP_ECN | 37 SKB_GSO_GRE | 38 SKB_GSO_GRE_CSUM | 39 SKB_GSO_IPIP))) 40 goto out; 41 42 if (!skb->encapsulation) 43 goto out; 44 45 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) 46 goto out; 47 48 greh = (struct gre_base_hdr *)skb_transport_header(skb); 49 50 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); 51 if (unlikely(ghl < sizeof(*greh))) 52 goto out; 53 54 csum = !!(greh->flags & GRE_CSUM); 55 if (csum) 56 skb->encap_hdr_csum = 1; 57 58 /* setup inner skb. */ 59 skb->protocol = greh->protocol; 60 skb->encapsulation = 0; 61 62 if (unlikely(!pskb_may_pull(skb, ghl))) 63 goto out; 64 65 __skb_pull(skb, ghl); 66 skb_reset_mac_header(skb); 67 skb_set_network_header(skb, skb_inner_network_offset(skb)); 68 skb->mac_len = skb_inner_network_offset(skb); 69 70 /* segment inner packet. */ 71 enc_features = skb->dev->hw_enc_features & features; 72 segs = skb_mac_gso_segment(skb, enc_features); 73 if (IS_ERR_OR_NULL(segs)) { 74 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); 75 goto out; 76 } 77 78 skb = segs; 79 tnl_hlen = skb_tnl_header_len(skb); 80 do { 81 __skb_push(skb, ghl); 82 if (csum) { 83 __be32 *pcsum; 84 85 if (skb_has_shared_frag(skb)) { 86 int err; 87 88 err = __skb_linearize(skb); 89 if (err) { 90 kfree_skb_list(segs); 91 segs = ERR_PTR(err); 92 goto out; 93 } 94 } 95 96 skb_reset_transport_header(skb); 97 98 greh = (struct gre_base_hdr *) 99 skb_transport_header(skb); 100 pcsum = (__be32 *)(greh + 1); 101 *pcsum = 0; 102 *(__sum16 *)pcsum = gso_make_checksum(skb, 0); 103 } 104 __skb_push(skb, tnl_hlen - ghl); 105 106 skb_reset_inner_headers(skb); 107 skb->encapsulation = 1; 108 109 skb_reset_mac_header(skb); 110 skb_set_network_header(skb, mac_len); 111 skb->mac_len = mac_len; 112 skb->protocol = protocol; 113 } while ((skb = skb->next)); 114 out: 115 return segs; 116 } 117 118 static struct sk_buff **gre_gro_receive(struct sk_buff **head, 119 struct sk_buff *skb) 120 { 121 struct sk_buff **pp = NULL; 122 struct sk_buff *p; 123 const struct gre_base_hdr *greh; 124 unsigned int hlen, grehlen; 125 unsigned int off; 126 int flush = 1; 127 struct packet_offload *ptype; 128 __be16 type; 129 130 off = skb_gro_offset(skb); 131 hlen = off + sizeof(*greh); 132 greh = skb_gro_header_fast(skb, off); 133 if (skb_gro_header_hard(skb, hlen)) { 134 greh = skb_gro_header_slow(skb, hlen, off); 135 if (unlikely(!greh)) 136 goto out; 137 } 138 139 /* Only support version 0 and K (key), C (csum) flags. Note that 140 * although the support for the S (seq#) flag can be added easily 141 * for GRO, this is problematic for GSO hence can not be enabled 142 * here because a GRO pkt may end up in the forwarding path, thus 143 * requiring GSO support to break it up correctly. 144 */ 145 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 146 goto out; 147 148 type = greh->protocol; 149 150 rcu_read_lock(); 151 ptype = gro_find_receive_by_type(type); 152 if (ptype == NULL) 153 goto out_unlock; 154 155 grehlen = GRE_HEADER_SECTION; 156 157 if (greh->flags & GRE_KEY) 158 grehlen += GRE_HEADER_SECTION; 159 160 if (greh->flags & GRE_CSUM) 161 grehlen += GRE_HEADER_SECTION; 162 163 hlen = off + grehlen; 164 if (skb_gro_header_hard(skb, hlen)) { 165 greh = skb_gro_header_slow(skb, hlen, off); 166 if (unlikely(!greh)) 167 goto out_unlock; 168 } 169 170 /* Don't bother verifying checksum if we're going to flush anyway. */ 171 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { 172 if (skb_gro_checksum_simple_validate(skb)) 173 goto out_unlock; 174 175 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, 176 null_compute_pseudo); 177 } 178 179 flush = 0; 180 181 for (p = *head; p; p = p->next) { 182 const struct gre_base_hdr *greh2; 183 184 if (!NAPI_GRO_CB(p)->same_flow) 185 continue; 186 187 /* The following checks are needed to ensure only pkts 188 * from the same tunnel are considered for aggregation. 189 * The criteria for "the same tunnel" includes: 190 * 1) same version (we only support version 0 here) 191 * 2) same protocol (we only support ETH_P_IP for now) 192 * 3) same set of flags 193 * 4) same key if the key field is present. 194 */ 195 greh2 = (struct gre_base_hdr *)(p->data + off); 196 197 if (greh2->flags != greh->flags || 198 greh2->protocol != greh->protocol) { 199 NAPI_GRO_CB(p)->same_flow = 0; 200 continue; 201 } 202 if (greh->flags & GRE_KEY) { 203 /* compare keys */ 204 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 205 NAPI_GRO_CB(p)->same_flow = 0; 206 continue; 207 } 208 } 209 } 210 211 skb_gro_pull(skb, grehlen); 212 213 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 214 skb_gro_postpull_rcsum(skb, greh, grehlen); 215 216 pp = ptype->callbacks.gro_receive(head, skb); 217 218 out_unlock: 219 rcu_read_unlock(); 220 out: 221 NAPI_GRO_CB(skb)->flush |= flush; 222 223 return pp; 224 } 225 226 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 227 { 228 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 229 struct packet_offload *ptype; 230 unsigned int grehlen = sizeof(*greh); 231 int err = -ENOENT; 232 __be16 type; 233 234 skb->encapsulation = 1; 235 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; 236 237 type = greh->protocol; 238 if (greh->flags & GRE_KEY) 239 grehlen += GRE_HEADER_SECTION; 240 241 if (greh->flags & GRE_CSUM) 242 grehlen += GRE_HEADER_SECTION; 243 244 rcu_read_lock(); 245 ptype = gro_find_complete_by_type(type); 246 if (ptype != NULL) 247 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 248 249 rcu_read_unlock(); 250 return err; 251 } 252 253 static const struct net_offload gre_offload = { 254 .callbacks = { 255 .gso_segment = gre_gso_segment, 256 .gro_receive = gre_gro_receive, 257 .gro_complete = gre_gro_complete, 258 }, 259 }; 260 261 static int __init gre_offload_init(void) 262 { 263 return inet_add_offload(&gre_offload, IPPROTO_GRE); 264 } 265 device_initcall(gre_offload_init); 266