1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * xfrm_device.c - IPsec device offloading code. 4 * 5 * Copyright (c) 2015 secunet Security Networks AG 6 * 7 * Author: 8 * Steffen Klassert <steffen.klassert@secunet.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <net/dst.h> 18 #include <net/xfrm.h> 19 #include <linux/notifier.h> 20 21 #ifdef CONFIG_XFRM_OFFLOAD 22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, 23 unsigned int hsize) 24 { 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 27 skb_reset_mac_len(skb); 28 if (xo->flags & XFRM_GSO_SEGMENT) 29 skb->transport_header -= x->props.header_len; 30 31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 32 } 33 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, 35 unsigned int hsize) 36 37 { 38 struct xfrm_offload *xo = xfrm_offload(skb); 39 40 if (xo->flags & XFRM_GSO_SEGMENT) 41 skb->transport_header = skb->network_header + hsize; 42 43 skb_reset_mac_len(skb); 44 pskb_pull(skb, skb->mac_len + x->props.header_len); 45 } 46 47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, 48 unsigned int hsize) 49 { 50 struct xfrm_offload *xo = xfrm_offload(skb); 51 int phlen = 0; 52 53 if (xo->flags & XFRM_GSO_SEGMENT) 54 skb->transport_header = skb->network_header + hsize; 55 56 skb_reset_mac_len(skb); 57 if (x->sel.family != AF_INET6) { 58 phlen = IPV4_BEET_PHMAXLEN; 59 if (x->outer_mode.family == AF_INET6) 60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); 61 } 62 63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); 64 } 65 66 /* Adjust pointers into the packet when IPsec is done at layer2 */ 67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) 68 { 69 switch (x->outer_mode.encap) { 70 case XFRM_MODE_TUNNEL: 71 if (x->outer_mode.family == AF_INET) 72 return __xfrm_mode_tunnel_prep(x, skb, 73 sizeof(struct iphdr)); 74 if (x->outer_mode.family == AF_INET6) 75 return __xfrm_mode_tunnel_prep(x, skb, 76 sizeof(struct ipv6hdr)); 77 break; 78 case XFRM_MODE_TRANSPORT: 79 if (x->outer_mode.family == AF_INET) 80 return __xfrm_transport_prep(x, skb, 81 sizeof(struct iphdr)); 82 if (x->outer_mode.family == AF_INET6) 83 return __xfrm_transport_prep(x, skb, 84 sizeof(struct ipv6hdr)); 85 break; 86 case XFRM_MODE_BEET: 87 if (x->outer_mode.family == AF_INET) 88 return __xfrm_mode_beet_prep(x, skb, 89 sizeof(struct iphdr)); 90 if (x->outer_mode.family == AF_INET6) 91 return __xfrm_mode_beet_prep(x, skb, 92 sizeof(struct ipv6hdr)); 93 break; 94 case XFRM_MODE_ROUTEOPTIMIZATION: 95 case XFRM_MODE_IN_TRIGGER: 96 break; 97 } 98 } 99 100 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 101 { 102 int err; 103 unsigned long flags; 104 struct xfrm_state *x; 105 struct softnet_data *sd; 106 struct sk_buff *skb2, *nskb, *pskb = NULL; 107 netdev_features_t esp_features = features; 108 struct xfrm_offload *xo = xfrm_offload(skb); 109 struct sec_path *sp; 110 111 if (!xo || (xo->flags & XFRM_XMIT)) 112 return skb; 113 114 if (!(features & NETIF_F_HW_ESP)) 115 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 116 117 sp = skb_sec_path(skb); 118 x = sp->xvec[sp->len - 1]; 119 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) 120 return skb; 121 122 local_irq_save(flags); 123 sd = this_cpu_ptr(&softnet_data); 124 err = !skb_queue_empty(&sd->xfrm_backlog); 125 local_irq_restore(flags); 126 127 if (err) { 128 *again = true; 129 return skb; 130 } 131 132 xo->flags |= XFRM_XMIT; 133 134 if (skb_is_gso(skb)) { 135 struct net_device *dev = skb->dev; 136 137 if (unlikely(x->xso.dev != dev)) { 138 struct sk_buff *segs; 139 140 /* Packet got rerouted, fixup features and segment it. */ 141 esp_features = esp_features & ~(NETIF_F_HW_ESP 142 | NETIF_F_GSO_ESP); 143 144 segs = skb_gso_segment(skb, esp_features); 145 if (IS_ERR(segs)) { 146 kfree_skb(skb); 147 atomic_long_inc(&dev->tx_dropped); 148 return NULL; 149 } else { 150 consume_skb(skb); 151 skb = segs; 152 } 153 } 154 } 155 156 if (!skb->next) { 157 esp_features |= skb->dev->gso_partial_features; 158 xfrm_outer_mode_prep(x, skb); 159 160 xo->flags |= XFRM_DEV_RESUME; 161 162 err = x->type_offload->xmit(x, skb, esp_features); 163 if (err) { 164 if (err == -EINPROGRESS) 165 return NULL; 166 167 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 168 kfree_skb(skb); 169 return NULL; 170 } 171 172 skb_push(skb, skb->data - skb_mac_header(skb)); 173 174 return skb; 175 } 176 177 skb_list_walk_safe(skb, skb2, nskb) { 178 esp_features |= skb->dev->gso_partial_features; 179 skb_mark_not_on_list(skb2); 180 181 xo = xfrm_offload(skb2); 182 xo->flags |= XFRM_DEV_RESUME; 183 184 xfrm_outer_mode_prep(x, skb2); 185 186 err = x->type_offload->xmit(x, skb2, esp_features); 187 if (!err) { 188 skb2->next = nskb; 189 } else if (err != -EINPROGRESS) { 190 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 191 skb2->next = nskb; 192 kfree_skb_list(skb2); 193 return NULL; 194 } else { 195 if (skb == skb2) 196 skb = nskb; 197 else 198 pskb->next = nskb; 199 200 continue; 201 } 202 203 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 204 pskb = skb2; 205 } 206 207 return skb; 208 } 209 EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 210 211 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 212 struct xfrm_user_offload *xuo) 213 { 214 int err; 215 struct dst_entry *dst; 216 struct net_device *dev; 217 struct xfrm_state_offload *xso = &x->xso; 218 xfrm_address_t *saddr; 219 xfrm_address_t *daddr; 220 221 if (!x->type_offload) 222 return -EINVAL; 223 224 /* We don't yet support UDP encapsulation and TFC padding. */ 225 if (x->encap || x->tfcpad) 226 return -EINVAL; 227 228 dev = dev_get_by_index(net, xuo->ifindex); 229 if (!dev) { 230 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 231 saddr = &x->props.saddr; 232 daddr = &x->id.daddr; 233 } else { 234 saddr = &x->id.daddr; 235 daddr = &x->props.saddr; 236 } 237 238 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 239 x->props.family, 240 xfrm_smark_get(0, x)); 241 if (IS_ERR(dst)) 242 return 0; 243 244 dev = dst->dev; 245 246 dev_hold(dev); 247 dst_release(dst); 248 } 249 250 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 251 xso->dev = NULL; 252 dev_put(dev); 253 return 0; 254 } 255 256 if (x->props.flags & XFRM_STATE_ESN && 257 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { 258 xso->dev = NULL; 259 dev_put(dev); 260 return -EINVAL; 261 } 262 263 xso->dev = dev; 264 xso->num_exthdrs = 1; 265 xso->flags = xuo->flags; 266 267 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 268 if (err) { 269 xso->num_exthdrs = 0; 270 xso->flags = 0; 271 xso->dev = NULL; 272 dev_put(dev); 273 274 if (err != -EOPNOTSUPP) 275 return err; 276 } 277 278 return 0; 279 } 280 EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 281 282 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 283 { 284 int mtu; 285 struct dst_entry *dst = skb_dst(skb); 286 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 287 struct net_device *dev = x->xso.dev; 288 289 if (!x->type_offload || x->encap) 290 return false; 291 292 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 293 (!xdst->child->xfrm)) { 294 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 295 if (skb->len <= mtu) 296 goto ok; 297 298 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 299 goto ok; 300 } 301 302 return false; 303 304 ok: 305 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 306 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 307 308 return true; 309 } 310 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 311 312 void xfrm_dev_resume(struct sk_buff *skb) 313 { 314 struct net_device *dev = skb->dev; 315 int ret = NETDEV_TX_BUSY; 316 struct netdev_queue *txq; 317 struct softnet_data *sd; 318 unsigned long flags; 319 320 rcu_read_lock(); 321 txq = netdev_core_pick_tx(dev, skb, NULL); 322 323 HARD_TX_LOCK(dev, txq, smp_processor_id()); 324 if (!netif_xmit_frozen_or_stopped(txq)) 325 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 326 HARD_TX_UNLOCK(dev, txq); 327 328 if (!dev_xmit_complete(ret)) { 329 local_irq_save(flags); 330 sd = this_cpu_ptr(&softnet_data); 331 skb_queue_tail(&sd->xfrm_backlog, skb); 332 raise_softirq_irqoff(NET_TX_SOFTIRQ); 333 local_irq_restore(flags); 334 } 335 rcu_read_unlock(); 336 } 337 EXPORT_SYMBOL_GPL(xfrm_dev_resume); 338 339 void xfrm_dev_backlog(struct softnet_data *sd) 340 { 341 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 342 struct sk_buff_head list; 343 struct sk_buff *skb; 344 345 if (skb_queue_empty(xfrm_backlog)) 346 return; 347 348 __skb_queue_head_init(&list); 349 350 spin_lock(&xfrm_backlog->lock); 351 skb_queue_splice_init(xfrm_backlog, &list); 352 spin_unlock(&xfrm_backlog->lock); 353 354 while (!skb_queue_empty(&list)) { 355 skb = __skb_dequeue(&list); 356 xfrm_dev_resume(skb); 357 } 358 359 } 360 #endif 361 362 static int xfrm_api_check(struct net_device *dev) 363 { 364 #ifdef CONFIG_XFRM_OFFLOAD 365 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 366 !(dev->features & NETIF_F_HW_ESP)) 367 return NOTIFY_BAD; 368 369 if ((dev->features & NETIF_F_HW_ESP) && 370 (!(dev->xfrmdev_ops && 371 dev->xfrmdev_ops->xdo_dev_state_add && 372 dev->xfrmdev_ops->xdo_dev_state_delete))) 373 return NOTIFY_BAD; 374 #else 375 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 376 return NOTIFY_BAD; 377 #endif 378 379 return NOTIFY_DONE; 380 } 381 382 static int xfrm_dev_register(struct net_device *dev) 383 { 384 return xfrm_api_check(dev); 385 } 386 387 static int xfrm_dev_feat_change(struct net_device *dev) 388 { 389 return xfrm_api_check(dev); 390 } 391 392 static int xfrm_dev_down(struct net_device *dev) 393 { 394 if (dev->features & NETIF_F_HW_ESP) 395 xfrm_dev_state_flush(dev_net(dev), dev, true); 396 397 return NOTIFY_DONE; 398 } 399 400 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 401 { 402 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 403 404 switch (event) { 405 case NETDEV_REGISTER: 406 return xfrm_dev_register(dev); 407 408 case NETDEV_FEAT_CHANGE: 409 return xfrm_dev_feat_change(dev); 410 411 case NETDEV_DOWN: 412 case NETDEV_UNREGISTER: 413 return xfrm_dev_down(dev); 414 } 415 return NOTIFY_DONE; 416 } 417 418 static struct notifier_block xfrm_dev_notifier = { 419 .notifier_call = xfrm_dev_event, 420 }; 421 422 void __init xfrm_dev_init(void) 423 { 424 register_netdevice_notifier(&xfrm_dev_notifier); 425 } 426