1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * Copyright (C) 2016 secunet Security Networks AG 7 * Author: Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * ESP GRO support 10 */ 11 12 #include <linux/skbuff.h> 13 #include <linux/init.h> 14 #include <net/protocol.h> 15 #include <crypto/aead.h> 16 #include <crypto/authenc.h> 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <net/gro.h> 20 #include <net/ip.h> 21 #include <net/xfrm.h> 22 #include <net/esp.h> 23 #include <linux/scatterlist.h> 24 #include <linux/kernel.h> 25 #include <linux/slab.h> 26 #include <linux/spinlock.h> 27 #include <net/udp.h> 28 29 static struct sk_buff *esp4_gro_receive(struct list_head *head, 30 struct sk_buff *skb) 31 { 32 int offset = skb_gro_offset(skb); 33 struct xfrm_offload *xo; 34 struct xfrm_state *x; 35 __be32 seq; 36 __be32 spi; 37 38 if (!pskb_pull(skb, offset)) 39 return NULL; 40 41 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) 42 goto out; 43 44 xo = xfrm_offload(skb); 45 if (!xo || !(xo->flags & CRYPTO_DONE)) { 46 struct sec_path *sp = secpath_set(skb); 47 48 if (!sp) 49 goto out; 50 51 if (sp->len == XFRM_MAX_DEPTH) 52 goto out_reset; 53 54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 55 (xfrm_address_t *)&ip_hdr(skb)->daddr, 56 spi, IPPROTO_ESP, AF_INET); 57 if (!x) 58 goto out_reset; 59 60 skb->mark = xfrm_smark_get(skb->mark, x); 61 62 sp->xvec[sp->len++] = x; 63 sp->olen++; 64 65 xo = xfrm_offload(skb); 66 if (!xo) 67 goto out_reset; 68 } 69 70 xo->flags |= XFRM_GRO; 71 72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 73 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 75 XFRM_SPI_SKB_CB(skb)->seq = seq; 76 77 /* We don't need to handle errors from xfrm_input, it does all 78 * the error handling and frees the resources on error. */ 79 xfrm_input(skb, IPPROTO_ESP, spi, -2); 80 81 return ERR_PTR(-EINPROGRESS); 82 out_reset: 83 secpath_reset(skb); 84 out: 85 skb_push(skb, offset); 86 NAPI_GRO_CB(skb)->same_flow = 0; 87 NAPI_GRO_CB(skb)->flush = 1; 88 89 return NULL; 90 } 91 92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 93 { 94 struct ip_esp_hdr *esph; 95 struct iphdr *iph = ip_hdr(skb); 96 struct xfrm_offload *xo = xfrm_offload(skb); 97 int proto = iph->protocol; 98 99 skb_push(skb, -skb_network_offset(skb)); 100 esph = ip_esp_hdr(skb); 101 *skb_mac_header(skb) = IPPROTO_ESP; 102 103 esph->spi = x->id.spi; 104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 105 106 xo->proto = proto; 107 } 108 109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, 110 struct sk_buff *skb, 111 netdev_features_t features) 112 { 113 __skb_push(skb, skb->mac_len); 114 return skb_mac_gso_segment(skb, features); 115 } 116 117 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, 118 struct sk_buff *skb, 119 netdev_features_t features) 120 { 121 const struct net_offload *ops; 122 struct sk_buff *segs = ERR_PTR(-EINVAL); 123 struct xfrm_offload *xo = xfrm_offload(skb); 124 125 skb->transport_header += x->props.header_len; 126 ops = rcu_dereference(inet_offloads[xo->proto]); 127 if (likely(ops && ops->callbacks.gso_segment)) 128 segs = ops->callbacks.gso_segment(skb, features); 129 130 return segs; 131 } 132 133 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, 134 struct sk_buff *skb, 135 netdev_features_t features) 136 { 137 struct xfrm_offload *xo = xfrm_offload(skb); 138 struct sk_buff *segs = ERR_PTR(-EINVAL); 139 const struct net_offload *ops; 140 u8 proto = xo->proto; 141 142 skb->transport_header += x->props.header_len; 143 144 if (x->sel.family != AF_INET6) { 145 if (proto == IPPROTO_BEETPH) { 146 struct ip_beet_phdr *ph = 147 (struct ip_beet_phdr *)skb->data; 148 149 skb->transport_header += ph->hdrlen * 8; 150 proto = ph->nexthdr; 151 } else { 152 skb->transport_header -= IPV4_BEET_PHMAXLEN; 153 } 154 } else { 155 __be16 frag; 156 157 skb->transport_header += 158 ipv6_skip_exthdr(skb, 0, &proto, &frag); 159 if (proto == IPPROTO_TCP) 160 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 161 } 162 163 __skb_pull(skb, skb_transport_offset(skb)); 164 ops = rcu_dereference(inet_offloads[proto]); 165 if (likely(ops && ops->callbacks.gso_segment)) 166 segs = ops->callbacks.gso_segment(skb, features); 167 168 return segs; 169 } 170 171 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x, 172 struct sk_buff *skb, 173 netdev_features_t features) 174 { 175 switch (x->outer_mode.encap) { 176 case XFRM_MODE_TUNNEL: 177 return xfrm4_tunnel_gso_segment(x, skb, features); 178 case XFRM_MODE_TRANSPORT: 179 return xfrm4_transport_gso_segment(x, skb, features); 180 case XFRM_MODE_BEET: 181 return xfrm4_beet_gso_segment(x, skb, features); 182 } 183 184 return ERR_PTR(-EOPNOTSUPP); 185 } 186 187 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 188 netdev_features_t features) 189 { 190 struct xfrm_state *x; 191 struct ip_esp_hdr *esph; 192 struct crypto_aead *aead; 193 netdev_features_t esp_features = features; 194 struct xfrm_offload *xo = xfrm_offload(skb); 195 struct sec_path *sp; 196 197 if (!xo) 198 return ERR_PTR(-EINVAL); 199 200 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 201 return ERR_PTR(-EINVAL); 202 203 sp = skb_sec_path(skb); 204 x = sp->xvec[sp->len - 1]; 205 aead = x->data; 206 esph = ip_esp_hdr(skb); 207 208 if (esph->spi != x->id.spi) 209 return ERR_PTR(-EINVAL); 210 211 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 212 return ERR_PTR(-EINVAL); 213 214 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 215 216 skb->encap_hdr_csum = 1; 217 218 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) && 219 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev) 220 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | 221 NETIF_F_SCTP_CRC); 222 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) && 223 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM)) 224 esp_features = features & ~(NETIF_F_CSUM_MASK | 225 NETIF_F_SCTP_CRC); 226 227 xo->flags |= XFRM_GSO_SEGMENT; 228 229 return xfrm4_outer_mode_gso_segment(x, skb, esp_features); 230 } 231 232 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 233 { 234 struct crypto_aead *aead = x->data; 235 struct xfrm_offload *xo = xfrm_offload(skb); 236 237 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 238 return -EINVAL; 239 240 if (!(xo->flags & CRYPTO_DONE)) 241 skb->ip_summed = CHECKSUM_NONE; 242 243 return esp_input_done2(skb, 0); 244 } 245 246 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 247 { 248 int err; 249 int alen; 250 int blksize; 251 struct xfrm_offload *xo; 252 struct ip_esp_hdr *esph; 253 struct crypto_aead *aead; 254 struct esp_info esp; 255 bool hw_offload = true; 256 __u32 seq; 257 258 esp.inplace = true; 259 260 xo = xfrm_offload(skb); 261 262 if (!xo) 263 return -EINVAL; 264 265 if ((!(features & NETIF_F_HW_ESP) && 266 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) || 267 x->xso.dev != skb->dev) { 268 xo->flags |= CRYPTO_FALLBACK; 269 hw_offload = false; 270 } 271 272 esp.proto = xo->proto; 273 274 /* skb is pure payload to encrypt */ 275 276 aead = x->data; 277 alen = crypto_aead_authsize(aead); 278 279 esp.tfclen = 0; 280 /* XXX: Add support for tfc padding here. */ 281 282 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 283 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 284 esp.plen = esp.clen - skb->len - esp.tfclen; 285 esp.tailen = esp.tfclen + esp.plen + alen; 286 287 esp.esph = ip_esp_hdr(skb); 288 289 290 if (!hw_offload || !skb_is_gso(skb)) { 291 esp.nfrags = esp_output_head(x, skb, &esp); 292 if (esp.nfrags < 0) 293 return esp.nfrags; 294 } 295 296 seq = xo->seq.low; 297 298 esph = esp.esph; 299 esph->spi = x->id.spi; 300 301 skb_push(skb, -skb_network_offset(skb)); 302 303 if (xo->flags & XFRM_GSO_SEGMENT) { 304 esph->seq_no = htonl(seq); 305 306 if (!skb_is_gso(skb)) 307 xo->seq.low++; 308 else 309 xo->seq.low += skb_shinfo(skb)->gso_segs; 310 } 311 312 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); 313 314 ip_hdr(skb)->tot_len = htons(skb->len); 315 ip_send_check(ip_hdr(skb)); 316 317 if (hw_offload) { 318 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH)) 319 return -ENOMEM; 320 321 xo = xfrm_offload(skb); 322 if (!xo) 323 return -EINVAL; 324 325 xo->flags |= XFRM_XMIT; 326 return 0; 327 } 328 329 err = esp_output_tail(x, skb, &esp); 330 if (err) 331 return err; 332 333 secpath_reset(skb); 334 335 return 0; 336 } 337 338 static const struct net_offload esp4_offload = { 339 .callbacks = { 340 .gro_receive = esp4_gro_receive, 341 .gso_segment = esp4_gso_segment, 342 }, 343 }; 344 345 static const struct xfrm_type_offload esp_type_offload = { 346 .owner = THIS_MODULE, 347 .proto = IPPROTO_ESP, 348 .input_tail = esp_input_tail, 349 .xmit = esp_xmit, 350 .encap = esp4_gso_encap, 351 }; 352 353 static int __init esp4_offload_init(void) 354 { 355 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { 356 pr_info("%s: can't add xfrm type offload\n", __func__); 357 return -EAGAIN; 358 } 359 360 return inet_add_offload(&esp4_offload, IPPROTO_ESP); 361 } 362 363 static void __exit esp4_offload_exit(void) 364 { 365 xfrm_unregister_type_offload(&esp_type_offload, AF_INET); 366 inet_del_offload(&esp4_offload, IPPROTO_ESP); 367 } 368 369 module_init(esp4_offload_init); 370 module_exit(esp4_offload_exit); 371 MODULE_LICENSE("GPL"); 372 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 373 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); 374 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support"); 375