1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IPV6 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * Copyright (C) 2016 secunet Security Networks AG 7 * Author: Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * ESP GRO support 10 */ 11 12 #include <linux/skbuff.h> 13 #include <linux/init.h> 14 #include <net/protocol.h> 15 #include <crypto/aead.h> 16 #include <crypto/authenc.h> 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <net/ip.h> 20 #include <net/xfrm.h> 21 #include <net/esp.h> 22 #include <linux/scatterlist.h> 23 #include <linux/kernel.h> 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <net/ip6_route.h> 27 #include <net/ipv6.h> 28 #include <linux/icmpv6.h> 29 30 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 31 { 32 int off = sizeof(struct ipv6hdr); 33 struct ipv6_opt_hdr *exthdr; 34 35 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 36 return offsetof(struct ipv6hdr, nexthdr); 37 38 while (off < nhlen) { 39 exthdr = (void *)ipv6_hdr + off; 40 if (exthdr->nexthdr == NEXTHDR_ESP) 41 return off; 42 43 off += ipv6_optlen(exthdr); 44 } 45 46 return 0; 47 } 48 49 static struct sk_buff *esp6_gro_receive(struct list_head *head, 50 struct sk_buff *skb) 51 { 52 int offset = skb_gro_offset(skb); 53 struct xfrm_offload *xo; 54 struct xfrm_state *x; 55 __be32 seq; 56 __be32 spi; 57 int nhoff; 58 int err; 59 60 if (!pskb_pull(skb, offset)) 61 return NULL; 62 63 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 64 goto out; 65 66 xo = xfrm_offload(skb); 67 if (!xo || !(xo->flags & CRYPTO_DONE)) { 68 struct sec_path *sp = secpath_set(skb); 69 70 if (!sp) 71 goto out; 72 73 if (sp->len == XFRM_MAX_DEPTH) 74 goto out_reset; 75 76 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 77 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 78 spi, IPPROTO_ESP, AF_INET6); 79 if (!x) 80 goto out_reset; 81 82 sp->xvec[sp->len++] = x; 83 sp->olen++; 84 85 xo = xfrm_offload(skb); 86 if (!xo) { 87 xfrm_state_put(x); 88 goto out_reset; 89 } 90 } 91 92 xo->flags |= XFRM_GRO; 93 94 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 95 if (!nhoff) 96 goto out; 97 98 IP6CB(skb)->nhoff = nhoff; 99 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 100 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 101 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 102 XFRM_SPI_SKB_CB(skb)->seq = seq; 103 104 /* We don't need to handle errors from xfrm_input, it does all 105 * the error handling and frees the resources on error. */ 106 xfrm_input(skb, IPPROTO_ESP, spi, -2); 107 108 return ERR_PTR(-EINPROGRESS); 109 out_reset: 110 secpath_reset(skb); 111 out: 112 skb_push(skb, offset); 113 NAPI_GRO_CB(skb)->same_flow = 0; 114 NAPI_GRO_CB(skb)->flush = 1; 115 116 return NULL; 117 } 118 119 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 120 { 121 struct ip_esp_hdr *esph; 122 struct ipv6hdr *iph = ipv6_hdr(skb); 123 struct xfrm_offload *xo = xfrm_offload(skb); 124 int proto = iph->nexthdr; 125 126 skb_push(skb, -skb_network_offset(skb)); 127 esph = ip_esp_hdr(skb); 128 *skb_mac_header(skb) = IPPROTO_ESP; 129 130 esph->spi = x->id.spi; 131 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 132 133 xo->proto = proto; 134 } 135 136 static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x, 137 struct sk_buff *skb, 138 netdev_features_t features) 139 { 140 __skb_push(skb, skb->mac_len); 141 return skb_mac_gso_segment(skb, features); 142 } 143 144 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x, 145 struct sk_buff *skb, 146 netdev_features_t features) 147 { 148 const struct net_offload *ops; 149 struct sk_buff *segs = ERR_PTR(-EINVAL); 150 struct xfrm_offload *xo = xfrm_offload(skb); 151 152 skb->transport_header += x->props.header_len; 153 ops = rcu_dereference(inet6_offloads[xo->proto]); 154 if (likely(ops && ops->callbacks.gso_segment)) 155 segs = ops->callbacks.gso_segment(skb, features); 156 157 return segs; 158 } 159 160 static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x, 161 struct sk_buff *skb, 162 netdev_features_t features) 163 { 164 switch (x->outer_mode.encap) { 165 case XFRM_MODE_TUNNEL: 166 return xfrm6_tunnel_gso_segment(x, skb, features); 167 case XFRM_MODE_TRANSPORT: 168 return xfrm6_transport_gso_segment(x, skb, features); 169 } 170 171 return ERR_PTR(-EOPNOTSUPP); 172 } 173 174 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 175 netdev_features_t features) 176 { 177 struct xfrm_state *x; 178 struct ip_esp_hdr *esph; 179 struct crypto_aead *aead; 180 netdev_features_t esp_features = features; 181 struct xfrm_offload *xo = xfrm_offload(skb); 182 struct sec_path *sp; 183 184 if (!xo) 185 return ERR_PTR(-EINVAL); 186 187 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 188 return ERR_PTR(-EINVAL); 189 190 sp = skb_sec_path(skb); 191 x = sp->xvec[sp->len - 1]; 192 aead = x->data; 193 esph = ip_esp_hdr(skb); 194 195 if (esph->spi != x->id.spi) 196 return ERR_PTR(-EINVAL); 197 198 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 199 return ERR_PTR(-EINVAL); 200 201 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 202 203 skb->encap_hdr_csum = 1; 204 205 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) 206 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 207 else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) 208 esp_features = features & ~NETIF_F_CSUM_MASK; 209 210 xo->flags |= XFRM_GSO_SEGMENT; 211 212 return xfrm6_outer_mode_gso_segment(x, skb, esp_features); 213 } 214 215 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 216 { 217 struct crypto_aead *aead = x->data; 218 struct xfrm_offload *xo = xfrm_offload(skb); 219 220 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 221 return -EINVAL; 222 223 if (!(xo->flags & CRYPTO_DONE)) 224 skb->ip_summed = CHECKSUM_NONE; 225 226 return esp6_input_done2(skb, 0); 227 } 228 229 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 230 { 231 int len; 232 int err; 233 int alen; 234 int blksize; 235 struct xfrm_offload *xo; 236 struct ip_esp_hdr *esph; 237 struct crypto_aead *aead; 238 struct esp_info esp; 239 bool hw_offload = true; 240 __u32 seq; 241 242 esp.inplace = true; 243 244 xo = xfrm_offload(skb); 245 246 if (!xo) 247 return -EINVAL; 248 249 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { 250 xo->flags |= CRYPTO_FALLBACK; 251 hw_offload = false; 252 } 253 254 esp.proto = xo->proto; 255 256 /* skb is pure payload to encrypt */ 257 258 aead = x->data; 259 alen = crypto_aead_authsize(aead); 260 261 esp.tfclen = 0; 262 /* XXX: Add support for tfc padding here. */ 263 264 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 265 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 266 esp.plen = esp.clen - skb->len - esp.tfclen; 267 esp.tailen = esp.tfclen + esp.plen + alen; 268 269 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 270 esp.nfrags = esp6_output_head(x, skb, &esp); 271 if (esp.nfrags < 0) 272 return esp.nfrags; 273 } 274 275 seq = xo->seq.low; 276 277 esph = ip_esp_hdr(skb); 278 esph->spi = x->id.spi; 279 280 skb_push(skb, -skb_network_offset(skb)); 281 282 if (xo->flags & XFRM_GSO_SEGMENT) { 283 esph->seq_no = htonl(seq); 284 285 if (!skb_is_gso(skb)) 286 xo->seq.low++; 287 else 288 xo->seq.low += skb_shinfo(skb)->gso_segs; 289 } 290 291 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 292 293 len = skb->len - sizeof(struct ipv6hdr); 294 if (len > IPV6_MAXPLEN) 295 len = 0; 296 297 ipv6_hdr(skb)->payload_len = htons(len); 298 299 if (hw_offload) 300 return 0; 301 302 err = esp6_output_tail(x, skb, &esp); 303 if (err) 304 return err; 305 306 secpath_reset(skb); 307 308 return 0; 309 } 310 311 static const struct net_offload esp6_offload = { 312 .callbacks = { 313 .gro_receive = esp6_gro_receive, 314 .gso_segment = esp6_gso_segment, 315 }, 316 }; 317 318 static const struct xfrm_type_offload esp6_type_offload = { 319 .description = "ESP6 OFFLOAD", 320 .owner = THIS_MODULE, 321 .proto = IPPROTO_ESP, 322 .input_tail = esp6_input_tail, 323 .xmit = esp6_xmit, 324 .encap = esp6_gso_encap, 325 }; 326 327 static int __init esp6_offload_init(void) 328 { 329 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 330 pr_info("%s: can't add xfrm type offload\n", __func__); 331 return -EAGAIN; 332 } 333 334 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 335 } 336 337 static void __exit esp6_offload_exit(void) 338 { 339 xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6); 340 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 341 } 342 343 module_init(esp6_offload_init); 344 module_exit(esp6_offload_exit); 345 MODULE_LICENSE("GPL"); 346 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 347 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); 348