1 /* 2 * IPV6 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * Copyright (C) 2016 secunet Security Networks AG 6 * Author: Steffen Klassert <steffen.klassert@secunet.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * ESP GRO support 13 */ 14 15 #include <linux/skbuff.h> 16 #include <linux/init.h> 17 #include <net/protocol.h> 18 #include <crypto/aead.h> 19 #include <crypto/authenc.h> 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <net/ip.h> 23 #include <net/xfrm.h> 24 #include <net/esp.h> 25 #include <linux/scatterlist.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/ip6_route.h> 30 #include <net/ipv6.h> 31 #include <linux/icmpv6.h> 32 33 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 34 { 35 int off = sizeof(struct ipv6hdr); 36 struct ipv6_opt_hdr *exthdr; 37 38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 39 return offsetof(struct ipv6hdr, nexthdr); 40 41 while (off < nhlen) { 42 exthdr = (void *)ipv6_hdr + off; 43 if (exthdr->nexthdr == NEXTHDR_ESP) 44 return off; 45 46 off += ipv6_optlen(exthdr); 47 } 48 49 return 0; 50 } 51 52 static struct sk_buff *esp6_gro_receive(struct list_head *head, 53 struct sk_buff *skb) 54 { 55 int offset = skb_gro_offset(skb); 56 struct xfrm_offload *xo; 57 struct xfrm_state *x; 58 __be32 seq; 59 __be32 spi; 60 int nhoff; 61 int err; 62 63 if (!pskb_pull(skb, offset)) 64 return NULL; 65 66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 67 goto out; 68 69 xo = xfrm_offload(skb); 70 if (!xo || !(xo->flags & CRYPTO_DONE)) { 71 struct sec_path *sp = secpath_set(skb); 72 73 if (!sp) 74 goto out; 75 76 if (sp->len == XFRM_MAX_DEPTH) 77 goto out_reset; 78 79 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 80 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 81 spi, IPPROTO_ESP, AF_INET6); 82 if (!x) 83 goto out_reset; 84 85 sp->xvec[sp->len++] = x; 86 sp->olen++; 87 88 xo = xfrm_offload(skb); 89 if (!xo) { 90 xfrm_state_put(x); 91 goto out_reset; 92 } 93 } 94 95 xo->flags |= XFRM_GRO; 96 97 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 98 if (!nhoff) 99 goto out; 100 101 IP6CB(skb)->nhoff = nhoff; 102 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 103 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 104 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 105 XFRM_SPI_SKB_CB(skb)->seq = seq; 106 107 /* We don't need to handle errors from xfrm_input, it does all 108 * the error handling and frees the resources on error. */ 109 xfrm_input(skb, IPPROTO_ESP, spi, -2); 110 111 return ERR_PTR(-EINPROGRESS); 112 out_reset: 113 secpath_reset(skb); 114 out: 115 skb_push(skb, offset); 116 NAPI_GRO_CB(skb)->same_flow = 0; 117 NAPI_GRO_CB(skb)->flush = 1; 118 119 return NULL; 120 } 121 122 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 123 { 124 struct ip_esp_hdr *esph; 125 struct ipv6hdr *iph = ipv6_hdr(skb); 126 struct xfrm_offload *xo = xfrm_offload(skb); 127 int proto = iph->nexthdr; 128 129 skb_push(skb, -skb_network_offset(skb)); 130 esph = ip_esp_hdr(skb); 131 *skb_mac_header(skb) = IPPROTO_ESP; 132 133 esph->spi = x->id.spi; 134 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 135 136 xo->proto = proto; 137 } 138 139 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 140 netdev_features_t features) 141 { 142 struct xfrm_state *x; 143 struct ip_esp_hdr *esph; 144 struct crypto_aead *aead; 145 netdev_features_t esp_features = features; 146 struct xfrm_offload *xo = xfrm_offload(skb); 147 struct sec_path *sp; 148 149 if (!xo) 150 return ERR_PTR(-EINVAL); 151 152 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 153 return ERR_PTR(-EINVAL); 154 155 sp = skb_sec_path(skb); 156 x = sp->xvec[sp->len - 1]; 157 aead = x->data; 158 esph = ip_esp_hdr(skb); 159 160 if (esph->spi != x->id.spi) 161 return ERR_PTR(-EINVAL); 162 163 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 164 return ERR_PTR(-EINVAL); 165 166 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 167 168 skb->encap_hdr_csum = 1; 169 170 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) 171 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 172 else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) 173 esp_features = features & ~NETIF_F_CSUM_MASK; 174 175 xo->flags |= XFRM_GSO_SEGMENT; 176 177 return x->outer_mode->gso_segment(x, skb, esp_features); 178 } 179 180 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 181 { 182 struct crypto_aead *aead = x->data; 183 struct xfrm_offload *xo = xfrm_offload(skb); 184 185 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 186 return -EINVAL; 187 188 if (!(xo->flags & CRYPTO_DONE)) 189 skb->ip_summed = CHECKSUM_NONE; 190 191 return esp6_input_done2(skb, 0); 192 } 193 194 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 195 { 196 int len; 197 int err; 198 int alen; 199 int blksize; 200 struct xfrm_offload *xo; 201 struct ip_esp_hdr *esph; 202 struct crypto_aead *aead; 203 struct esp_info esp; 204 bool hw_offload = true; 205 __u32 seq; 206 207 esp.inplace = true; 208 209 xo = xfrm_offload(skb); 210 211 if (!xo) 212 return -EINVAL; 213 214 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { 215 xo->flags |= CRYPTO_FALLBACK; 216 hw_offload = false; 217 } 218 219 esp.proto = xo->proto; 220 221 /* skb is pure payload to encrypt */ 222 223 aead = x->data; 224 alen = crypto_aead_authsize(aead); 225 226 esp.tfclen = 0; 227 /* XXX: Add support for tfc padding here. */ 228 229 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 230 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 231 esp.plen = esp.clen - skb->len - esp.tfclen; 232 esp.tailen = esp.tfclen + esp.plen + alen; 233 234 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 235 esp.nfrags = esp6_output_head(x, skb, &esp); 236 if (esp.nfrags < 0) 237 return esp.nfrags; 238 } 239 240 seq = xo->seq.low; 241 242 esph = ip_esp_hdr(skb); 243 esph->spi = x->id.spi; 244 245 skb_push(skb, -skb_network_offset(skb)); 246 247 if (xo->flags & XFRM_GSO_SEGMENT) { 248 esph->seq_no = htonl(seq); 249 250 if (!skb_is_gso(skb)) 251 xo->seq.low++; 252 else 253 xo->seq.low += skb_shinfo(skb)->gso_segs; 254 } 255 256 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 257 258 len = skb->len - sizeof(struct ipv6hdr); 259 if (len > IPV6_MAXPLEN) 260 len = 0; 261 262 ipv6_hdr(skb)->payload_len = htons(len); 263 264 if (hw_offload) 265 return 0; 266 267 err = esp6_output_tail(x, skb, &esp); 268 if (err) 269 return err; 270 271 secpath_reset(skb); 272 273 return 0; 274 } 275 276 static const struct net_offload esp6_offload = { 277 .callbacks = { 278 .gro_receive = esp6_gro_receive, 279 .gso_segment = esp6_gso_segment, 280 }, 281 }; 282 283 static const struct xfrm_type_offload esp6_type_offload = { 284 .description = "ESP6 OFFLOAD", 285 .owner = THIS_MODULE, 286 .proto = IPPROTO_ESP, 287 .input_tail = esp6_input_tail, 288 .xmit = esp6_xmit, 289 .encap = esp6_gso_encap, 290 }; 291 292 static int __init esp6_offload_init(void) 293 { 294 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 295 pr_info("%s: can't add xfrm type offload\n", __func__); 296 return -EAGAIN; 297 } 298 299 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 300 } 301 302 static void __exit esp6_offload_exit(void) 303 { 304 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0) 305 pr_info("%s: can't remove xfrm type offload\n", __func__); 306 307 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 308 } 309 310 module_init(esp6_offload_init); 311 module_exit(esp6_offload_exit); 312 MODULE_LICENSE("GPL"); 313 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 314 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); 315