1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * GRE GSO support 7 */ 8 9 #include <linux/skbuff.h> 10 #include <linux/init.h> 11 #include <net/protocol.h> 12 #include <net/gre.h> 13 14 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 15 netdev_features_t features) 16 { 17 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 18 struct sk_buff *segs = ERR_PTR(-EINVAL); 19 u16 mac_offset = skb->mac_header; 20 __be16 protocol = skb->protocol; 21 u16 mac_len = skb->mac_len; 22 int gre_offset, outer_hlen; 23 bool need_csum, gso_partial; 24 25 if (!skb->encapsulation) 26 goto out; 27 28 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) 29 goto out; 30 31 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 32 goto out; 33 34 /* setup inner skb. */ 35 skb->encapsulation = 0; 36 SKB_GSO_CB(skb)->encap_level = 0; 37 __skb_pull(skb, tnl_hlen); 38 skb_reset_mac_header(skb); 39 skb_set_network_header(skb, skb_inner_network_offset(skb)); 40 skb->mac_len = skb_inner_network_offset(skb); 41 skb->protocol = skb->inner_protocol; 42 43 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); 44 skb->encap_hdr_csum = need_csum; 45 46 features &= skb->dev->hw_enc_features; 47 48 /* segment inner packet. */ 49 segs = skb_mac_gso_segment(skb, features); 50 if (IS_ERR_OR_NULL(segs)) { 51 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 52 mac_len); 53 goto out; 54 } 55 56 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 57 58 outer_hlen = skb_tnl_header_len(skb); 59 gre_offset = outer_hlen - tnl_hlen; 60 skb = segs; 61 do { 62 struct gre_base_hdr *greh; 63 __sum16 *pcsum; 64 65 /* Set up inner headers if we are offloading inner checksum */ 66 if (skb->ip_summed == CHECKSUM_PARTIAL) { 67 skb_reset_inner_headers(skb); 68 skb->encapsulation = 1; 69 } 70 71 skb->mac_len = mac_len; 72 skb->protocol = protocol; 73 74 __skb_push(skb, outer_hlen); 75 skb_reset_mac_header(skb); 76 skb_set_network_header(skb, mac_len); 77 skb_set_transport_header(skb, gre_offset); 78 79 if (!need_csum) 80 continue; 81 82 greh = (struct gre_base_hdr *)skb_transport_header(skb); 83 pcsum = (__sum16 *)(greh + 1); 84 85 if (gso_partial && skb_is_gso(skb)) { 86 unsigned int partial_adj; 87 88 /* Adjust checksum to account for the fact that 89 * the partial checksum is based on actual size 90 * whereas headers should be based on MSS size. 91 */ 92 partial_adj = skb->len + skb_headroom(skb) - 93 SKB_GSO_CB(skb)->data_offset - 94 skb_shinfo(skb)->gso_size; 95 *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); 96 } else { 97 *pcsum = 0; 98 } 99 100 *(pcsum + 1) = 0; 101 *pcsum = gso_make_checksum(skb, 0); 102 } while ((skb = skb->next)); 103 out: 104 return segs; 105 } 106 107 static struct sk_buff *gre_gro_receive(struct list_head *head, 108 struct sk_buff *skb) 109 { 110 struct sk_buff *pp = NULL; 111 struct sk_buff *p; 112 const struct gre_base_hdr *greh; 113 unsigned int hlen, grehlen; 114 unsigned int off; 115 int flush = 1; 116 struct packet_offload *ptype; 117 __be16 type; 118 119 if (NAPI_GRO_CB(skb)->encap_mark) 120 goto out; 121 122 NAPI_GRO_CB(skb)->encap_mark = 1; 123 124 off = skb_gro_offset(skb); 125 hlen = off + sizeof(*greh); 126 greh = skb_gro_header_fast(skb, off); 127 if (skb_gro_header_hard(skb, hlen)) { 128 greh = skb_gro_header_slow(skb, hlen, off); 129 if (unlikely(!greh)) 130 goto out; 131 } 132 133 /* Only support version 0 and K (key), C (csum) flags. Note that 134 * although the support for the S (seq#) flag can be added easily 135 * for GRO, this is problematic for GSO hence can not be enabled 136 * here because a GRO pkt may end up in the forwarding path, thus 137 * requiring GSO support to break it up correctly. 138 */ 139 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 140 goto out; 141 142 /* We can only support GRE_CSUM if we can track the location of 143 * the GRE header. In the case of FOU/GUE we cannot because the 144 * outer UDP header displaces the GRE header leaving us in a state 145 * of limbo. 146 */ 147 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou) 148 goto out; 149 150 type = greh->protocol; 151 152 rcu_read_lock(); 153 ptype = gro_find_receive_by_type(type); 154 if (!ptype) 155 goto out_unlock; 156 157 grehlen = GRE_HEADER_SECTION; 158 159 if (greh->flags & GRE_KEY) 160 grehlen += GRE_HEADER_SECTION; 161 162 if (greh->flags & GRE_CSUM) 163 grehlen += GRE_HEADER_SECTION; 164 165 hlen = off + grehlen; 166 if (skb_gro_header_hard(skb, hlen)) { 167 greh = skb_gro_header_slow(skb, hlen, off); 168 if (unlikely(!greh)) 169 goto out_unlock; 170 } 171 172 /* Don't bother verifying checksum if we're going to flush anyway. */ 173 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { 174 if (skb_gro_checksum_simple_validate(skb)) 175 goto out_unlock; 176 177 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, 178 null_compute_pseudo); 179 } 180 181 list_for_each_entry(p, head, list) { 182 const struct gre_base_hdr *greh2; 183 184 if (!NAPI_GRO_CB(p)->same_flow) 185 continue; 186 187 /* The following checks are needed to ensure only pkts 188 * from the same tunnel are considered for aggregation. 189 * The criteria for "the same tunnel" includes: 190 * 1) same version (we only support version 0 here) 191 * 2) same protocol (we only support ETH_P_IP for now) 192 * 3) same set of flags 193 * 4) same key if the key field is present. 194 */ 195 greh2 = (struct gre_base_hdr *)(p->data + off); 196 197 if (greh2->flags != greh->flags || 198 greh2->protocol != greh->protocol) { 199 NAPI_GRO_CB(p)->same_flow = 0; 200 continue; 201 } 202 if (greh->flags & GRE_KEY) { 203 /* compare keys */ 204 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 205 NAPI_GRO_CB(p)->same_flow = 0; 206 continue; 207 } 208 } 209 } 210 211 skb_gro_pull(skb, grehlen); 212 213 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 214 skb_gro_postpull_rcsum(skb, greh, grehlen); 215 216 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 217 flush = 0; 218 219 out_unlock: 220 rcu_read_unlock(); 221 out: 222 skb_gro_flush_final(skb, pp, flush); 223 224 return pp; 225 } 226 227 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 228 { 229 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 230 struct packet_offload *ptype; 231 unsigned int grehlen = sizeof(*greh); 232 int err = -ENOENT; 233 __be16 type; 234 235 skb->encapsulation = 1; 236 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; 237 238 type = greh->protocol; 239 if (greh->flags & GRE_KEY) 240 grehlen += GRE_HEADER_SECTION; 241 242 if (greh->flags & GRE_CSUM) 243 grehlen += GRE_HEADER_SECTION; 244 245 rcu_read_lock(); 246 ptype = gro_find_complete_by_type(type); 247 if (ptype) 248 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 249 250 rcu_read_unlock(); 251 252 skb_set_inner_mac_header(skb, nhoff + grehlen); 253 254 return err; 255 } 256 257 static const struct net_offload gre_offload = { 258 .callbacks = { 259 .gso_segment = gre_gso_segment, 260 .gro_receive = gre_gro_receive, 261 .gro_complete = gre_gro_complete, 262 }, 263 }; 264 265 static int __init gre_offload_init(void) 266 { 267 int err; 268 269 err = inet_add_offload(&gre_offload, IPPROTO_GRE); 270 #if IS_ENABLED(CONFIG_IPV6) 271 if (err) 272 return err; 273 274 err = inet6_add_offload(&gre_offload, IPPROTO_GRE); 275 if (err) 276 inet_del_offload(&gre_offload, IPPROTO_GRE); 277 #endif 278 279 return err; 280 } 281 device_initcall(gre_offload_init); 282