1 /* 2 * IPV6 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * Copyright (C) 2016 secunet Security Networks AG 6 * Author: Steffen Klassert <steffen.klassert@secunet.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * ESP GRO support 13 */ 14 15 #include <linux/skbuff.h> 16 #include <linux/init.h> 17 #include <net/protocol.h> 18 #include <crypto/aead.h> 19 #include <crypto/authenc.h> 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <net/ip.h> 23 #include <net/xfrm.h> 24 #include <net/esp.h> 25 #include <linux/scatterlist.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/ip6_route.h> 30 #include <net/ipv6.h> 31 #include <linux/icmpv6.h> 32 33 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 34 { 35 int off = sizeof(struct ipv6hdr); 36 struct ipv6_opt_hdr *exthdr; 37 38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 39 return offsetof(struct ipv6hdr, nexthdr); 40 41 while (off < nhlen) { 42 exthdr = (void *)ipv6_hdr + off; 43 if (exthdr->nexthdr == NEXTHDR_ESP) 44 return off; 45 46 off += ipv6_optlen(exthdr); 47 } 48 49 return 0; 50 } 51 52 static struct sk_buff *esp6_gro_receive(struct list_head *head, 53 struct sk_buff *skb) 54 { 55 int offset = skb_gro_offset(skb); 56 struct xfrm_offload *xo; 57 struct xfrm_state *x; 58 __be32 seq; 59 __be32 spi; 60 int nhoff; 61 int err; 62 63 if (!pskb_pull(skb, offset)) 64 return NULL; 65 66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 67 goto out; 68 69 xo = xfrm_offload(skb); 70 if (!xo || !(xo->flags & CRYPTO_DONE)) { 71 struct sec_path *sp = secpath_set(skb); 72 73 if (!sp) 74 goto out; 75 76 if (sp->len == XFRM_MAX_DEPTH) 77 goto out; 78 79 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 80 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 81 spi, IPPROTO_ESP, AF_INET6); 82 if (!x) 83 goto out; 84 85 sp->xvec[sp->len++] = x; 86 sp->olen++; 87 88 xo = xfrm_offload(skb); 89 if (!xo) { 90 xfrm_state_put(x); 91 goto out; 92 } 93 } 94 95 xo->flags |= XFRM_GRO; 96 97 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 98 if (!nhoff) 99 goto out; 100 101 IP6CB(skb)->nhoff = nhoff; 102 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 103 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 104 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 105 XFRM_SPI_SKB_CB(skb)->seq = seq; 106 107 /* We don't need to handle errors from xfrm_input, it does all 108 * the error handling and frees the resources on error. */ 109 xfrm_input(skb, IPPROTO_ESP, spi, -2); 110 111 return ERR_PTR(-EINPROGRESS); 112 out: 113 skb_push(skb, offset); 114 NAPI_GRO_CB(skb)->same_flow = 0; 115 NAPI_GRO_CB(skb)->flush = 1; 116 117 return NULL; 118 } 119 120 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 121 { 122 struct ip_esp_hdr *esph; 123 struct ipv6hdr *iph = ipv6_hdr(skb); 124 struct xfrm_offload *xo = xfrm_offload(skb); 125 int proto = iph->nexthdr; 126 127 skb_push(skb, -skb_network_offset(skb)); 128 esph = ip_esp_hdr(skb); 129 *skb_mac_header(skb) = IPPROTO_ESP; 130 131 esph->spi = x->id.spi; 132 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 133 134 xo->proto = proto; 135 } 136 137 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 138 netdev_features_t features) 139 { 140 struct xfrm_state *x; 141 struct ip_esp_hdr *esph; 142 struct crypto_aead *aead; 143 netdev_features_t esp_features = features; 144 struct xfrm_offload *xo = xfrm_offload(skb); 145 struct sec_path *sp; 146 147 if (!xo) 148 return ERR_PTR(-EINVAL); 149 150 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 151 return ERR_PTR(-EINVAL); 152 153 sp = skb_sec_path(skb); 154 x = sp->xvec[sp->len - 1]; 155 aead = x->data; 156 esph = ip_esp_hdr(skb); 157 158 if (esph->spi != x->id.spi) 159 return ERR_PTR(-EINVAL); 160 161 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 162 return ERR_PTR(-EINVAL); 163 164 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 165 166 skb->encap_hdr_csum = 1; 167 168 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) 169 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 170 else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) 171 esp_features = features & ~NETIF_F_CSUM_MASK; 172 173 xo->flags |= XFRM_GSO_SEGMENT; 174 175 return x->outer_mode->gso_segment(x, skb, esp_features); 176 } 177 178 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 179 { 180 struct crypto_aead *aead = x->data; 181 struct xfrm_offload *xo = xfrm_offload(skb); 182 183 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 184 return -EINVAL; 185 186 if (!(xo->flags & CRYPTO_DONE)) 187 skb->ip_summed = CHECKSUM_NONE; 188 189 return esp6_input_done2(skb, 0); 190 } 191 192 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 193 { 194 int len; 195 int err; 196 int alen; 197 int blksize; 198 struct xfrm_offload *xo; 199 struct ip_esp_hdr *esph; 200 struct crypto_aead *aead; 201 struct esp_info esp; 202 bool hw_offload = true; 203 __u32 seq; 204 205 esp.inplace = true; 206 207 xo = xfrm_offload(skb); 208 209 if (!xo) 210 return -EINVAL; 211 212 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { 213 xo->flags |= CRYPTO_FALLBACK; 214 hw_offload = false; 215 } 216 217 esp.proto = xo->proto; 218 219 /* skb is pure payload to encrypt */ 220 221 aead = x->data; 222 alen = crypto_aead_authsize(aead); 223 224 esp.tfclen = 0; 225 /* XXX: Add support for tfc padding here. */ 226 227 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 228 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 229 esp.plen = esp.clen - skb->len - esp.tfclen; 230 esp.tailen = esp.tfclen + esp.plen + alen; 231 232 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 233 esp.nfrags = esp6_output_head(x, skb, &esp); 234 if (esp.nfrags < 0) 235 return esp.nfrags; 236 } 237 238 seq = xo->seq.low; 239 240 esph = ip_esp_hdr(skb); 241 esph->spi = x->id.spi; 242 243 skb_push(skb, -skb_network_offset(skb)); 244 245 if (xo->flags & XFRM_GSO_SEGMENT) { 246 esph->seq_no = htonl(seq); 247 248 if (!skb_is_gso(skb)) 249 xo->seq.low++; 250 else 251 xo->seq.low += skb_shinfo(skb)->gso_segs; 252 } 253 254 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 255 256 len = skb->len - sizeof(struct ipv6hdr); 257 if (len > IPV6_MAXPLEN) 258 len = 0; 259 260 ipv6_hdr(skb)->payload_len = htons(len); 261 262 if (hw_offload) 263 return 0; 264 265 err = esp6_output_tail(x, skb, &esp); 266 if (err) 267 return err; 268 269 secpath_reset(skb); 270 271 return 0; 272 } 273 274 static const struct net_offload esp6_offload = { 275 .callbacks = { 276 .gro_receive = esp6_gro_receive, 277 .gso_segment = esp6_gso_segment, 278 }, 279 }; 280 281 static const struct xfrm_type_offload esp6_type_offload = { 282 .description = "ESP6 OFFLOAD", 283 .owner = THIS_MODULE, 284 .proto = IPPROTO_ESP, 285 .input_tail = esp6_input_tail, 286 .xmit = esp6_xmit, 287 .encap = esp6_gso_encap, 288 }; 289 290 static int __init esp6_offload_init(void) 291 { 292 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 293 pr_info("%s: can't add xfrm type offload\n", __func__); 294 return -EAGAIN; 295 } 296 297 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 298 } 299 300 static void __exit esp6_offload_exit(void) 301 { 302 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0) 303 pr_info("%s: can't remove xfrm type offload\n", __func__); 304 305 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 306 } 307 308 module_init(esp6_offload_init); 309 module_exit(esp6_offload_exit); 310 MODULE_LICENSE("GPL"); 311 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 312 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); 313