1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * Copyright (C) 2016 secunet Security Networks AG 6 * Author: Steffen Klassert <steffen.klassert@secunet.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * ESP GRO support 13 */ 14 15 #include <linux/skbuff.h> 16 #include <linux/init.h> 17 #include <net/protocol.h> 18 #include <crypto/aead.h> 19 #include <crypto/authenc.h> 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <net/ip.h> 23 #include <net/xfrm.h> 24 #include <net/esp.h> 25 #include <linux/scatterlist.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/udp.h> 30 31 static struct sk_buff **esp4_gro_receive(struct sk_buff **head, 32 struct sk_buff *skb) 33 { 34 int offset = skb_gro_offset(skb); 35 struct xfrm_offload *xo; 36 struct xfrm_state *x; 37 __be32 seq; 38 __be32 spi; 39 int err; 40 41 skb_pull(skb, offset); 42 43 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 44 goto out; 45 46 xo = xfrm_offload(skb); 47 if (!xo || !(xo->flags & CRYPTO_DONE)) { 48 err = secpath_set(skb); 49 if (err) 50 goto out; 51 52 if (skb->sp->len == XFRM_MAX_DEPTH) 53 goto out; 54 55 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 56 (xfrm_address_t *)&ip_hdr(skb)->daddr, 57 spi, IPPROTO_ESP, AF_INET); 58 if (!x) 59 goto out; 60 61 skb->sp->xvec[skb->sp->len++] = x; 62 skb->sp->olen++; 63 64 xo = xfrm_offload(skb); 65 if (!xo) { 66 xfrm_state_put(x); 67 goto out; 68 } 69 } 70 71 xo->flags |= XFRM_GRO; 72 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 74 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 76 XFRM_SPI_SKB_CB(skb)->seq = seq; 77 78 /* We don't need to handle errors from xfrm_input, it does all 79 * the error handling and frees the resources on error. */ 80 xfrm_input(skb, IPPROTO_ESP, spi, -2); 81 82 return ERR_PTR(-EINPROGRESS); 83 out: 84 skb_push(skb, offset); 85 NAPI_GRO_CB(skb)->same_flow = 0; 86 NAPI_GRO_CB(skb)->flush = 1; 87 88 return NULL; 89 } 90 91 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 92 { 93 struct ip_esp_hdr *esph; 94 struct iphdr *iph = ip_hdr(skb); 95 struct xfrm_offload *xo = xfrm_offload(skb); 96 int proto = iph->protocol; 97 98 skb_push(skb, -skb_network_offset(skb)); 99 esph = ip_esp_hdr(skb); 100 *skb_mac_header(skb) = IPPROTO_ESP; 101 102 esph->spi = x->id.spi; 103 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 104 105 xo->proto = proto; 106 } 107 108 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 109 netdev_features_t features) 110 { 111 __u32 seq; 112 int err = 0; 113 struct sk_buff *skb2; 114 struct xfrm_state *x; 115 struct ip_esp_hdr *esph; 116 struct crypto_aead *aead; 117 struct sk_buff *segs = ERR_PTR(-EINVAL); 118 netdev_features_t esp_features = features; 119 struct xfrm_offload *xo = xfrm_offload(skb); 120 121 if (!xo) 122 goto out; 123 124 seq = xo->seq.low; 125 126 x = skb->sp->xvec[skb->sp->len - 1]; 127 aead = x->data; 128 esph = ip_esp_hdr(skb); 129 130 if (esph->spi != x->id.spi) 131 goto out; 132 133 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 134 goto out; 135 136 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 137 138 skb->encap_hdr_csum = 1; 139 140 if (!(features & NETIF_F_HW_ESP)) 141 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 142 143 segs = x->outer_mode->gso_segment(x, skb, esp_features); 144 if (IS_ERR_OR_NULL(segs)) 145 goto out; 146 147 __skb_pull(skb, skb->data - skb_mac_header(skb)); 148 149 skb2 = segs; 150 do { 151 struct sk_buff *nskb = skb2->next; 152 153 xo = xfrm_offload(skb2); 154 xo->flags |= XFRM_GSO_SEGMENT; 155 xo->seq.low = seq; 156 xo->seq.hi = xfrm_replay_seqhi(x, seq); 157 158 if(!(features & NETIF_F_HW_ESP)) 159 xo->flags |= CRYPTO_FALLBACK; 160 161 x->outer_mode->xmit(x, skb2); 162 163 err = x->type_offload->xmit(x, skb2, esp_features); 164 if (err) { 165 kfree_skb_list(segs); 166 return ERR_PTR(err); 167 } 168 169 if (!skb_is_gso(skb2)) 170 seq++; 171 else 172 seq += skb_shinfo(skb2)->gso_segs; 173 174 skb_push(skb2, skb2->mac_len); 175 skb2 = nskb; 176 } while (skb2); 177 178 out: 179 return segs; 180 } 181 182 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 183 { 184 struct crypto_aead *aead = x->data; 185 186 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 187 return -EINVAL; 188 189 skb->ip_summed = CHECKSUM_NONE; 190 191 return esp_input_done2(skb, 0); 192 } 193 194 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 195 { 196 int err; 197 int alen; 198 int blksize; 199 struct xfrm_offload *xo; 200 struct ip_esp_hdr *esph; 201 struct crypto_aead *aead; 202 struct esp_info esp; 203 bool hw_offload = true; 204 205 esp.inplace = true; 206 207 xo = xfrm_offload(skb); 208 209 if (!xo) 210 return -EINVAL; 211 212 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 213 (x->xso.dev != skb->dev)) { 214 xo->flags |= CRYPTO_FALLBACK; 215 hw_offload = false; 216 } 217 218 esp.proto = xo->proto; 219 220 /* skb is pure payload to encrypt */ 221 222 aead = x->data; 223 alen = crypto_aead_authsize(aead); 224 225 esp.tfclen = 0; 226 /* XXX: Add support for tfc padding here. */ 227 228 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 229 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 230 esp.plen = esp.clen - skb->len - esp.tfclen; 231 esp.tailen = esp.tfclen + esp.plen + alen; 232 233 esp.esph = ip_esp_hdr(skb); 234 235 236 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 237 esp.nfrags = esp_output_head(x, skb, &esp); 238 if (esp.nfrags < 0) 239 return esp.nfrags; 240 } 241 242 esph = esp.esph; 243 esph->spi = x->id.spi; 244 245 skb_push(skb, -skb_network_offset(skb)); 246 247 if (xo->flags & XFRM_GSO_SEGMENT) { 248 esph->seq_no = htonl(xo->seq.low); 249 } else { 250 ip_hdr(skb)->tot_len = htons(skb->len); 251 ip_send_check(ip_hdr(skb)); 252 } 253 254 if (hw_offload) 255 return 0; 256 257 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 258 259 err = esp_output_tail(x, skb, &esp); 260 if (err < 0) 261 return err; 262 263 secpath_reset(skb); 264 265 return 0; 266 } 267 268 static const struct net_offload esp4_offload = { 269 .callbacks = { 270 .gro_receive = esp4_gro_receive, 271 .gso_segment = esp4_gso_segment, 272 }, 273 }; 274 275 static const struct xfrm_type_offload esp_type_offload = { 276 .description = "ESP4 OFFLOAD", 277 .owner = THIS_MODULE, 278 .proto = IPPROTO_ESP, 279 .input_tail = esp_input_tail, 280 .xmit = esp_xmit, 281 .encap = esp4_gso_encap, 282 }; 283 284 static int __init esp4_offload_init(void) 285 { 286 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { 287 pr_info("%s: can't add xfrm type offload\n", __func__); 288 return -EAGAIN; 289 } 290 291 return inet_add_offload(&esp4_offload, IPPROTO_ESP); 292 } 293 294 static void __exit esp4_offload_exit(void) 295 { 296 if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) 297 pr_info("%s: can't remove xfrm type offload\n", __func__); 298 299 inet_del_offload(&esp4_offload, IPPROTO_ESP); 300 } 301 302 module_init(esp4_offload_init); 303 module_exit(esp4_offload_exit); 304 MODULE_LICENSE("GPL"); 305 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 306