1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * GRE GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <net/protocol.h> 16 #include <net/gre.h> 17 18 static int gre_gso_send_check(struct sk_buff *skb) 19 { 20 if (!skb->encapsulation) 21 return -EINVAL; 22 return 0; 23 } 24 25 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 26 netdev_features_t features) 27 { 28 struct sk_buff *segs = ERR_PTR(-EINVAL); 29 netdev_features_t enc_features; 30 int ghl; 31 struct gre_base_hdr *greh; 32 u16 mac_offset = skb->mac_header; 33 int mac_len = skb->mac_len; 34 __be16 protocol = skb->protocol; 35 int tnl_hlen; 36 bool csum; 37 38 if (unlikely(skb_shinfo(skb)->gso_type & 39 ~(SKB_GSO_TCPV4 | 40 SKB_GSO_TCPV6 | 41 SKB_GSO_UDP | 42 SKB_GSO_DODGY | 43 SKB_GSO_TCP_ECN | 44 SKB_GSO_GRE | 45 SKB_GSO_IPIP))) 46 goto out; 47 48 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) 49 goto out; 50 51 greh = (struct gre_base_hdr *)skb_transport_header(skb); 52 53 ghl = skb_inner_network_header(skb) - skb_transport_header(skb); 54 if (unlikely(ghl < sizeof(*greh))) 55 goto out; 56 57 csum = !!(greh->flags & GRE_CSUM); 58 59 if (unlikely(!pskb_may_pull(skb, ghl))) 60 goto out; 61 62 /* setup inner skb. */ 63 skb->protocol = greh->protocol; 64 skb->encapsulation = 0; 65 66 __skb_pull(skb, ghl); 67 skb_reset_mac_header(skb); 68 skb_set_network_header(skb, skb_inner_network_offset(skb)); 69 skb->mac_len = skb_inner_network_offset(skb); 70 71 /* segment inner packet. */ 72 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 73 segs = skb_mac_gso_segment(skb, enc_features); 74 if (!segs || IS_ERR(segs)) { 75 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); 76 goto out; 77 } 78 79 skb = segs; 80 tnl_hlen = skb_tnl_header_len(skb); 81 do { 82 __skb_push(skb, ghl); 83 if (csum) { 84 __be32 *pcsum; 85 86 if (skb_has_shared_frag(skb)) { 87 int err; 88 89 err = __skb_linearize(skb); 90 if (err) { 91 kfree_skb_list(segs); 92 segs = ERR_PTR(err); 93 goto out; 94 } 95 } 96 97 greh = (struct gre_base_hdr *)(skb->data); 98 pcsum = (__be32 *)(greh + 1); 99 *pcsum = 0; 100 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); 101 } 102 __skb_push(skb, tnl_hlen - ghl); 103 104 skb_reset_inner_headers(skb); 105 skb->encapsulation = 1; 106 107 skb_reset_mac_header(skb); 108 skb_set_network_header(skb, mac_len); 109 skb->mac_len = mac_len; 110 skb->protocol = protocol; 111 } while ((skb = skb->next)); 112 out: 113 return segs; 114 } 115 116 /* Compute the whole skb csum in s/w and store it, then verify GRO csum 117 * starting from gro_offset. 118 */ 119 static __sum16 gro_skb_checksum(struct sk_buff *skb) 120 { 121 __sum16 sum; 122 123 skb->csum = skb_checksum(skb, 0, skb->len, 0); 124 NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum, 125 csum_partial(skb->data, skb_gro_offset(skb), 0)); 126 sum = csum_fold(NAPI_GRO_CB(skb)->csum); 127 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { 128 if (unlikely(!sum)) 129 netdev_rx_csum_fault(skb->dev); 130 } else 131 skb->ip_summed = CHECKSUM_COMPLETE; 132 133 return sum; 134 } 135 136 static struct sk_buff **gre_gro_receive(struct sk_buff **head, 137 struct sk_buff *skb) 138 { 139 struct sk_buff **pp = NULL; 140 struct sk_buff *p; 141 const struct gre_base_hdr *greh; 142 unsigned int hlen, grehlen; 143 unsigned int off; 144 int flush = 1; 145 struct packet_offload *ptype; 146 __be16 type; 147 148 off = skb_gro_offset(skb); 149 hlen = off + sizeof(*greh); 150 greh = skb_gro_header_fast(skb, off); 151 if (skb_gro_header_hard(skb, hlen)) { 152 greh = skb_gro_header_slow(skb, hlen, off); 153 if (unlikely(!greh)) 154 goto out; 155 } 156 157 /* Only support version 0 and K (key), C (csum) flags. Note that 158 * although the support for the S (seq#) flag can be added easily 159 * for GRO, this is problematic for GSO hence can not be enabled 160 * here because a GRO pkt may end up in the forwarding path, thus 161 * requiring GSO support to break it up correctly. 162 */ 163 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 164 goto out; 165 166 type = greh->protocol; 167 168 rcu_read_lock(); 169 ptype = gro_find_receive_by_type(type); 170 if (ptype == NULL) 171 goto out_unlock; 172 173 grehlen = GRE_HEADER_SECTION; 174 175 if (greh->flags & GRE_KEY) 176 grehlen += GRE_HEADER_SECTION; 177 178 if (greh->flags & GRE_CSUM) 179 grehlen += GRE_HEADER_SECTION; 180 181 hlen = off + grehlen; 182 if (skb_gro_header_hard(skb, hlen)) { 183 greh = skb_gro_header_slow(skb, hlen, off); 184 if (unlikely(!greh)) 185 goto out_unlock; 186 } 187 if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */ 188 __sum16 csum = 0; 189 190 if (skb->ip_summed == CHECKSUM_COMPLETE) 191 csum = csum_fold(NAPI_GRO_CB(skb)->csum); 192 /* Don't trust csum error calculated/reported by h/w */ 193 if (skb->ip_summed == CHECKSUM_NONE || csum != 0) 194 csum = gro_skb_checksum(skb); 195 196 /* GRE CSUM is the 1's complement of the 1's complement sum 197 * of the GRE hdr plus payload so it should add up to 0xffff 198 * (and 0 after csum_fold()) just like the IPv4 hdr csum. 199 */ 200 if (csum) 201 goto out_unlock; 202 } 203 flush = 0; 204 205 for (p = *head; p; p = p->next) { 206 const struct gre_base_hdr *greh2; 207 208 if (!NAPI_GRO_CB(p)->same_flow) 209 continue; 210 211 /* The following checks are needed to ensure only pkts 212 * from the same tunnel are considered for aggregation. 213 * The criteria for "the same tunnel" includes: 214 * 1) same version (we only support version 0 here) 215 * 2) same protocol (we only support ETH_P_IP for now) 216 * 3) same set of flags 217 * 4) same key if the key field is present. 218 */ 219 greh2 = (struct gre_base_hdr *)(p->data + off); 220 221 if (greh2->flags != greh->flags || 222 greh2->protocol != greh->protocol) { 223 NAPI_GRO_CB(p)->same_flow = 0; 224 continue; 225 } 226 if (greh->flags & GRE_KEY) { 227 /* compare keys */ 228 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 229 NAPI_GRO_CB(p)->same_flow = 0; 230 continue; 231 } 232 } 233 } 234 235 skb_gro_pull(skb, grehlen); 236 237 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 238 skb_gro_postpull_rcsum(skb, greh, grehlen); 239 240 pp = ptype->callbacks.gro_receive(head, skb); 241 242 out_unlock: 243 rcu_read_unlock(); 244 out: 245 NAPI_GRO_CB(skb)->flush |= flush; 246 247 return pp; 248 } 249 250 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 251 { 252 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 253 struct packet_offload *ptype; 254 unsigned int grehlen = sizeof(*greh); 255 int err = -ENOENT; 256 __be16 type; 257 258 type = greh->protocol; 259 if (greh->flags & GRE_KEY) 260 grehlen += GRE_HEADER_SECTION; 261 262 if (greh->flags & GRE_CSUM) 263 grehlen += GRE_HEADER_SECTION; 264 265 rcu_read_lock(); 266 ptype = gro_find_complete_by_type(type); 267 if (ptype != NULL) 268 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 269 270 rcu_read_unlock(); 271 return err; 272 } 273 274 static const struct net_offload gre_offload = { 275 .callbacks = { 276 .gso_send_check = gre_gso_send_check, 277 .gso_segment = gre_gso_segment, 278 .gro_receive = gre_gro_receive, 279 .gro_complete = gre_gro_complete, 280 }, 281 }; 282 283 static int __init gre_offload_init(void) 284 { 285 return inet_add_offload(&gre_offload, IPPROTO_GRE); 286 } 287 device_initcall(gre_offload_init); 288