1 /* 2 * IPV6 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * Copyright (C) 2016 secunet Security Networks AG 6 * Author: Steffen Klassert <steffen.klassert@secunet.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * ESP GRO support 13 */ 14 15 #include <linux/skbuff.h> 16 #include <linux/init.h> 17 #include <net/protocol.h> 18 #include <crypto/aead.h> 19 #include <crypto/authenc.h> 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <net/ip.h> 23 #include <net/xfrm.h> 24 #include <net/esp.h> 25 #include <linux/scatterlist.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/ip6_route.h> 30 #include <net/ipv6.h> 31 #include <linux/icmpv6.h> 32 33 static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 34 struct sk_buff *skb) 35 { 36 int offset = skb_gro_offset(skb); 37 struct xfrm_offload *xo; 38 struct xfrm_state *x; 39 __be32 seq; 40 __be32 spi; 41 int err; 42 43 skb_pull(skb, offset); 44 45 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 46 goto out; 47 48 xo = xfrm_offload(skb); 49 if (!xo || !(xo->flags & CRYPTO_DONE)) { 50 err = secpath_set(skb); 51 if (err) 52 goto out; 53 54 if (skb->sp->len == XFRM_MAX_DEPTH) 55 goto out; 56 57 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 58 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 59 spi, IPPROTO_ESP, AF_INET6); 60 if (!x) 61 goto out; 62 63 skb->sp->xvec[skb->sp->len++] = x; 64 skb->sp->olen++; 65 66 xo = xfrm_offload(skb); 67 if (!xo) { 68 xfrm_state_put(x); 69 goto out; 70 } 71 } 72 73 xo->flags |= XFRM_GRO; 74 75 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 76 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 77 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 78 XFRM_SPI_SKB_CB(skb)->seq = seq; 79 80 /* We don't need to handle errors from xfrm_input, it does all 81 * the error handling and frees the resources on error. */ 82 xfrm_input(skb, IPPROTO_ESP, spi, -2); 83 84 return ERR_PTR(-EINPROGRESS); 85 out: 86 skb_push(skb, offset); 87 NAPI_GRO_CB(skb)->same_flow = 0; 88 NAPI_GRO_CB(skb)->flush = 1; 89 90 return NULL; 91 } 92 93 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 94 { 95 struct ip_esp_hdr *esph; 96 struct ipv6hdr *iph = ipv6_hdr(skb); 97 struct xfrm_offload *xo = xfrm_offload(skb); 98 int proto = iph->nexthdr; 99 100 skb_push(skb, -skb_network_offset(skb)); 101 esph = ip_esp_hdr(skb); 102 *skb_mac_header(skb) = IPPROTO_ESP; 103 104 esph->spi = x->id.spi; 105 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 106 107 xo->proto = proto; 108 } 109 110 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 111 netdev_features_t features) 112 { 113 __u32 seq; 114 int err = 0; 115 struct sk_buff *skb2; 116 struct xfrm_state *x; 117 struct ip_esp_hdr *esph; 118 struct crypto_aead *aead; 119 struct sk_buff *segs = ERR_PTR(-EINVAL); 120 netdev_features_t esp_features = features; 121 struct xfrm_offload *xo = xfrm_offload(skb); 122 123 if (!xo) 124 goto out; 125 126 seq = xo->seq.low; 127 128 x = skb->sp->xvec[skb->sp->len - 1]; 129 aead = x->data; 130 esph = ip_esp_hdr(skb); 131 132 if (esph->spi != x->id.spi) 133 goto out; 134 135 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 136 goto out; 137 138 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 139 140 skb->encap_hdr_csum = 1; 141 142 if (!(features & NETIF_F_HW_ESP)) 143 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 144 145 segs = x->outer_mode->gso_segment(x, skb, esp_features); 146 if (IS_ERR_OR_NULL(segs)) 147 goto out; 148 149 __skb_pull(skb, skb->data - skb_mac_header(skb)); 150 151 skb2 = segs; 152 do { 153 struct sk_buff *nskb = skb2->next; 154 155 xo = xfrm_offload(skb2); 156 xo->flags |= XFRM_GSO_SEGMENT; 157 xo->seq.low = seq; 158 xo->seq.hi = xfrm_replay_seqhi(x, seq); 159 160 if(!(features & NETIF_F_HW_ESP)) 161 xo->flags |= CRYPTO_FALLBACK; 162 163 x->outer_mode->xmit(x, skb2); 164 165 err = x->type_offload->xmit(x, skb2, esp_features); 166 if (err) { 167 kfree_skb_list(segs); 168 return ERR_PTR(err); 169 } 170 171 if (!skb_is_gso(skb2)) 172 seq++; 173 else 174 seq += skb_shinfo(skb2)->gso_segs; 175 176 skb_push(skb2, skb2->mac_len); 177 skb2 = nskb; 178 } while (skb2); 179 180 out: 181 return segs; 182 } 183 184 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 185 { 186 struct crypto_aead *aead = x->data; 187 188 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 189 return -EINVAL; 190 191 skb->ip_summed = CHECKSUM_NONE; 192 193 return esp6_input_done2(skb, 0); 194 } 195 196 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 197 { 198 int err; 199 int alen; 200 int blksize; 201 struct xfrm_offload *xo; 202 struct ip_esp_hdr *esph; 203 struct crypto_aead *aead; 204 struct esp_info esp; 205 bool hw_offload = true; 206 207 esp.inplace = true; 208 209 xo = xfrm_offload(skb); 210 211 if (!xo) 212 return -EINVAL; 213 214 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 215 (x->xso.dev != skb->dev)) { 216 xo->flags |= CRYPTO_FALLBACK; 217 hw_offload = false; 218 } 219 220 esp.proto = xo->proto; 221 222 /* skb is pure payload to encrypt */ 223 224 aead = x->data; 225 alen = crypto_aead_authsize(aead); 226 227 esp.tfclen = 0; 228 /* XXX: Add support for tfc padding here. */ 229 230 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 231 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 232 esp.plen = esp.clen - skb->len - esp.tfclen; 233 esp.tailen = esp.tfclen + esp.plen + alen; 234 235 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 236 esp.nfrags = esp6_output_head(x, skb, &esp); 237 if (esp.nfrags < 0) 238 return esp.nfrags; 239 } 240 241 esph = ip_esp_hdr(skb); 242 esph->spi = x->id.spi; 243 244 skb_push(skb, -skb_network_offset(skb)); 245 246 if (xo->flags & XFRM_GSO_SEGMENT) { 247 esph->seq_no = htonl(xo->seq.low); 248 } else { 249 int len; 250 251 len = skb->len - sizeof(struct ipv6hdr); 252 if (len > IPV6_MAXPLEN) 253 len = 0; 254 255 ipv6_hdr(skb)->payload_len = htons(len); 256 } 257 258 if (hw_offload) 259 return 0; 260 261 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 262 263 err = esp6_output_tail(x, skb, &esp); 264 if (err < 0) 265 return err; 266 267 secpath_reset(skb); 268 269 return 0; 270 } 271 272 static const struct net_offload esp6_offload = { 273 .callbacks = { 274 .gro_receive = esp6_gro_receive, 275 .gso_segment = esp6_gso_segment, 276 }, 277 }; 278 279 static const struct xfrm_type_offload esp6_type_offload = { 280 .description = "ESP6 OFFLOAD", 281 .owner = THIS_MODULE, 282 .proto = IPPROTO_ESP, 283 .input_tail = esp6_input_tail, 284 .xmit = esp6_xmit, 285 .encap = esp6_gso_encap, 286 }; 287 288 static int __init esp6_offload_init(void) 289 { 290 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 291 pr_info("%s: can't add xfrm type offload\n", __func__); 292 return -EAGAIN; 293 } 294 295 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 296 } 297 298 static void __exit esp6_offload_exit(void) 299 { 300 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0) 301 pr_info("%s: can't remove xfrm type offload\n", __func__); 302 303 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 304 } 305 306 module_init(esp6_offload_init); 307 module_exit(esp6_offload_exit); 308 MODULE_LICENSE("GPL"); 309 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 310