1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * xfrm_device.c - IPsec device offloading code. 4 * 5 * Copyright (c) 2015 secunet Security Networks AG 6 * 7 * Author: 8 * Steffen Klassert <steffen.klassert@secunet.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <net/dst.h> 18 #include <net/xfrm.h> 19 #include <linux/notifier.h> 20 21 #ifdef CONFIG_XFRM_OFFLOAD 22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, 23 unsigned int hsize) 24 { 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 27 skb_reset_mac_len(skb); 28 if (xo->flags & XFRM_GSO_SEGMENT) 29 skb->transport_header -= x->props.header_len; 30 31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 32 } 33 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, 35 unsigned int hsize) 36 37 { 38 struct xfrm_offload *xo = xfrm_offload(skb); 39 40 if (xo->flags & XFRM_GSO_SEGMENT) 41 skb->transport_header = skb->network_header + hsize; 42 43 skb_reset_mac_len(skb); 44 pskb_pull(skb, skb->mac_len + x->props.header_len); 45 } 46 47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, 48 unsigned int hsize) 49 { 50 struct xfrm_offload *xo = xfrm_offload(skb); 51 int phlen = 0; 52 53 if (xo->flags & XFRM_GSO_SEGMENT) 54 skb->transport_header = skb->network_header + hsize; 55 56 skb_reset_mac_len(skb); 57 if (x->sel.family != AF_INET6) { 58 phlen = IPV4_BEET_PHMAXLEN; 59 if (x->outer_mode.family == AF_INET6) 60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); 61 } 62 63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); 64 } 65 66 /* Adjust pointers into the packet when IPsec is done at layer2 */ 67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) 68 { 69 switch (x->outer_mode.encap) { 70 case XFRM_MODE_TUNNEL: 71 if (x->outer_mode.family == AF_INET) 72 return __xfrm_mode_tunnel_prep(x, skb, 73 sizeof(struct iphdr)); 74 if (x->outer_mode.family == AF_INET6) 75 return __xfrm_mode_tunnel_prep(x, skb, 76 sizeof(struct ipv6hdr)); 77 break; 78 case XFRM_MODE_TRANSPORT: 79 if (x->outer_mode.family == AF_INET) 80 return __xfrm_transport_prep(x, skb, 81 sizeof(struct iphdr)); 82 if (x->outer_mode.family == AF_INET6) 83 return __xfrm_transport_prep(x, skb, 84 sizeof(struct ipv6hdr)); 85 break; 86 case XFRM_MODE_BEET: 87 if (x->outer_mode.family == AF_INET) 88 return __xfrm_mode_beet_prep(x, skb, 89 sizeof(struct iphdr)); 90 if (x->outer_mode.family == AF_INET6) 91 return __xfrm_mode_beet_prep(x, skb, 92 sizeof(struct ipv6hdr)); 93 break; 94 case XFRM_MODE_ROUTEOPTIMIZATION: 95 case XFRM_MODE_IN_TRIGGER: 96 break; 97 } 98 } 99 100 static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) 101 { 102 struct xfrm_offload *xo = xfrm_offload(skb); 103 __u32 seq = xo->seq.low; 104 105 seq += skb_shinfo(skb)->gso_segs; 106 if (unlikely(seq < xo->seq.low)) 107 return true; 108 109 return false; 110 } 111 112 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 113 { 114 int err; 115 unsigned long flags; 116 struct xfrm_state *x; 117 struct softnet_data *sd; 118 struct sk_buff *skb2, *nskb, *pskb = NULL; 119 netdev_features_t esp_features = features; 120 struct xfrm_offload *xo = xfrm_offload(skb); 121 struct net_device *dev = skb->dev; 122 struct sec_path *sp; 123 124 if (!xo || (xo->flags & XFRM_XMIT)) 125 return skb; 126 127 if (!(features & NETIF_F_HW_ESP)) 128 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 129 130 sp = skb_sec_path(skb); 131 x = sp->xvec[sp->len - 1]; 132 if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN) 133 return skb; 134 135 /* This skb was already validated on the upper/virtual dev */ 136 if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) 137 return skb; 138 139 local_irq_save(flags); 140 sd = this_cpu_ptr(&softnet_data); 141 err = !skb_queue_empty(&sd->xfrm_backlog); 142 local_irq_restore(flags); 143 144 if (err) { 145 *again = true; 146 return skb; 147 } 148 149 if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || 150 unlikely(xmit_xfrm_check_overflow(skb)))) { 151 struct sk_buff *segs; 152 153 /* Packet got rerouted, fixup features and segment it. */ 154 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); 155 156 segs = skb_gso_segment(skb, esp_features); 157 if (IS_ERR(segs)) { 158 kfree_skb(skb); 159 dev_core_stats_tx_dropped_inc(dev); 160 return NULL; 161 } else { 162 consume_skb(skb); 163 skb = segs; 164 } 165 } 166 167 if (!skb->next) { 168 esp_features |= skb->dev->gso_partial_features; 169 xfrm_outer_mode_prep(x, skb); 170 171 xo->flags |= XFRM_DEV_RESUME; 172 173 err = x->type_offload->xmit(x, skb, esp_features); 174 if (err) { 175 if (err == -EINPROGRESS) 176 return NULL; 177 178 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 179 kfree_skb(skb); 180 return NULL; 181 } 182 183 skb_push(skb, skb->data - skb_mac_header(skb)); 184 185 return skb; 186 } 187 188 skb_list_walk_safe(skb, skb2, nskb) { 189 esp_features |= skb->dev->gso_partial_features; 190 skb_mark_not_on_list(skb2); 191 192 xo = xfrm_offload(skb2); 193 xo->flags |= XFRM_DEV_RESUME; 194 195 xfrm_outer_mode_prep(x, skb2); 196 197 err = x->type_offload->xmit(x, skb2, esp_features); 198 if (!err) { 199 skb2->next = nskb; 200 } else if (err != -EINPROGRESS) { 201 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 202 skb2->next = nskb; 203 kfree_skb_list(skb2); 204 return NULL; 205 } else { 206 if (skb == skb2) 207 skb = nskb; 208 else 209 pskb->next = nskb; 210 211 continue; 212 } 213 214 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 215 pskb = skb2; 216 } 217 218 return skb; 219 } 220 EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 221 222 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 223 struct xfrm_user_offload *xuo, 224 struct netlink_ext_ack *extack) 225 { 226 int err; 227 struct dst_entry *dst; 228 struct net_device *dev; 229 struct xfrm_dev_offload *xso = &x->xso; 230 xfrm_address_t *saddr; 231 xfrm_address_t *daddr; 232 bool is_packet_offload; 233 234 if (!x->type_offload) { 235 NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 236 return -EINVAL; 237 } 238 239 /* We don't yet support UDP encapsulation and TFC padding. */ 240 if (x->encap || x->tfcpad) { 241 NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded"); 242 return -EINVAL; 243 } 244 245 if (xuo->flags & 246 ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { 247 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); 248 return -EINVAL; 249 } 250 251 is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; 252 dev = dev_get_by_index(net, xuo->ifindex); 253 if (!dev) { 254 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 255 saddr = &x->props.saddr; 256 daddr = &x->id.daddr; 257 } else { 258 saddr = &x->id.daddr; 259 daddr = &x->props.saddr; 260 } 261 262 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 263 x->props.family, 264 xfrm_smark_get(0, x)); 265 if (IS_ERR(dst)) 266 return (is_packet_offload) ? -EINVAL : 0; 267 268 dev = dst->dev; 269 270 dev_hold(dev); 271 dst_release(dst); 272 } 273 274 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 275 xso->dev = NULL; 276 dev_put(dev); 277 return (is_packet_offload) ? -EINVAL : 0; 278 } 279 280 if (x->props.flags & XFRM_STATE_ESN && 281 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { 282 NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN"); 283 xso->dev = NULL; 284 dev_put(dev); 285 return -EINVAL; 286 } 287 288 xso->dev = dev; 289 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); 290 xso->real_dev = dev; 291 292 if (xuo->flags & XFRM_OFFLOAD_INBOUND) 293 xso->dir = XFRM_DEV_OFFLOAD_IN; 294 else 295 xso->dir = XFRM_DEV_OFFLOAD_OUT; 296 297 if (is_packet_offload) 298 xso->type = XFRM_DEV_OFFLOAD_PACKET; 299 else 300 xso->type = XFRM_DEV_OFFLOAD_CRYPTO; 301 302 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 303 if (err) { 304 xso->dev = NULL; 305 xso->dir = 0; 306 xso->real_dev = NULL; 307 netdev_put(dev, &xso->dev_tracker); 308 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 309 310 /* User explicitly requested packet offload mode and configured 311 * policy in addition to the XFRM state. So be civil to users, 312 * and return an error instead of taking fallback path. 313 * 314 * This WARN_ON() can be seen as a documentation for driver 315 * authors to do not return -EOPNOTSUPP in packet offload mode. 316 */ 317 WARN_ON(err == -EOPNOTSUPP && is_packet_offload); 318 if (err != -EOPNOTSUPP || is_packet_offload) { 319 NL_SET_ERR_MSG(extack, "Device failed to offload this state"); 320 return err; 321 } 322 } 323 324 return 0; 325 } 326 EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 327 328 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 329 struct xfrm_user_offload *xuo, u8 dir, 330 struct netlink_ext_ack *extack) 331 { 332 struct xfrm_dev_offload *xdo = &xp->xdo; 333 struct net_device *dev; 334 int err; 335 336 if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) { 337 /* We support only packet offload mode and it means 338 * that user must set XFRM_OFFLOAD_PACKET bit. 339 */ 340 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); 341 return -EINVAL; 342 } 343 344 dev = dev_get_by_index(net, xuo->ifindex); 345 if (!dev) 346 return -EINVAL; 347 348 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) { 349 xdo->dev = NULL; 350 dev_put(dev); 351 NL_SET_ERR_MSG(extack, "Policy offload is not supported"); 352 return -EINVAL; 353 } 354 355 xdo->dev = dev; 356 netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); 357 xdo->real_dev = dev; 358 xdo->type = XFRM_DEV_OFFLOAD_PACKET; 359 switch (dir) { 360 case XFRM_POLICY_IN: 361 xdo->dir = XFRM_DEV_OFFLOAD_IN; 362 break; 363 case XFRM_POLICY_OUT: 364 xdo->dir = XFRM_DEV_OFFLOAD_OUT; 365 break; 366 case XFRM_POLICY_FWD: 367 xdo->dir = XFRM_DEV_OFFLOAD_FWD; 368 break; 369 default: 370 xdo->dev = NULL; 371 dev_put(dev); 372 NL_SET_ERR_MSG(extack, "Unrecognized oflload direction"); 373 return -EINVAL; 374 } 375 376 err = dev->xfrmdev_ops->xdo_dev_policy_add(xp); 377 if (err) { 378 xdo->dev = NULL; 379 xdo->real_dev = NULL; 380 xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 381 xdo->dir = 0; 382 netdev_put(dev, &xdo->dev_tracker); 383 NL_SET_ERR_MSG(extack, "Device failed to offload this policy"); 384 return err; 385 } 386 387 return 0; 388 } 389 EXPORT_SYMBOL_GPL(xfrm_dev_policy_add); 390 391 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 392 { 393 int mtu; 394 struct dst_entry *dst = skb_dst(skb); 395 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 396 struct net_device *dev = x->xso.dev; 397 398 if (!x->type_offload || x->encap) 399 return false; 400 401 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 402 (!xdst->child->xfrm)) { 403 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 404 if (skb->len <= mtu) 405 goto ok; 406 407 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 408 goto ok; 409 } 410 411 return false; 412 413 ok: 414 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 415 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 416 417 return true; 418 } 419 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 420 421 void xfrm_dev_resume(struct sk_buff *skb) 422 { 423 struct net_device *dev = skb->dev; 424 int ret = NETDEV_TX_BUSY; 425 struct netdev_queue *txq; 426 struct softnet_data *sd; 427 unsigned long flags; 428 429 rcu_read_lock(); 430 txq = netdev_core_pick_tx(dev, skb, NULL); 431 432 HARD_TX_LOCK(dev, txq, smp_processor_id()); 433 if (!netif_xmit_frozen_or_stopped(txq)) 434 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 435 HARD_TX_UNLOCK(dev, txq); 436 437 if (!dev_xmit_complete(ret)) { 438 local_irq_save(flags); 439 sd = this_cpu_ptr(&softnet_data); 440 skb_queue_tail(&sd->xfrm_backlog, skb); 441 raise_softirq_irqoff(NET_TX_SOFTIRQ); 442 local_irq_restore(flags); 443 } 444 rcu_read_unlock(); 445 } 446 EXPORT_SYMBOL_GPL(xfrm_dev_resume); 447 448 void xfrm_dev_backlog(struct softnet_data *sd) 449 { 450 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 451 struct sk_buff_head list; 452 struct sk_buff *skb; 453 454 if (skb_queue_empty(xfrm_backlog)) 455 return; 456 457 __skb_queue_head_init(&list); 458 459 spin_lock(&xfrm_backlog->lock); 460 skb_queue_splice_init(xfrm_backlog, &list); 461 spin_unlock(&xfrm_backlog->lock); 462 463 while (!skb_queue_empty(&list)) { 464 skb = __skb_dequeue(&list); 465 xfrm_dev_resume(skb); 466 } 467 468 } 469 #endif 470 471 static int xfrm_api_check(struct net_device *dev) 472 { 473 #ifdef CONFIG_XFRM_OFFLOAD 474 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 475 !(dev->features & NETIF_F_HW_ESP)) 476 return NOTIFY_BAD; 477 478 if ((dev->features & NETIF_F_HW_ESP) && 479 (!(dev->xfrmdev_ops && 480 dev->xfrmdev_ops->xdo_dev_state_add && 481 dev->xfrmdev_ops->xdo_dev_state_delete))) 482 return NOTIFY_BAD; 483 #else 484 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 485 return NOTIFY_BAD; 486 #endif 487 488 return NOTIFY_DONE; 489 } 490 491 static int xfrm_dev_down(struct net_device *dev) 492 { 493 if (dev->features & NETIF_F_HW_ESP) { 494 xfrm_dev_state_flush(dev_net(dev), dev, true); 495 xfrm_dev_policy_flush(dev_net(dev), dev, true); 496 } 497 498 return NOTIFY_DONE; 499 } 500 501 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 502 { 503 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 504 505 switch (event) { 506 case NETDEV_REGISTER: 507 return xfrm_api_check(dev); 508 509 case NETDEV_FEAT_CHANGE: 510 return xfrm_api_check(dev); 511 512 case NETDEV_DOWN: 513 case NETDEV_UNREGISTER: 514 return xfrm_dev_down(dev); 515 } 516 return NOTIFY_DONE; 517 } 518 519 static struct notifier_block xfrm_dev_notifier = { 520 .notifier_call = xfrm_dev_event, 521 }; 522 523 void __init xfrm_dev_init(void) 524 { 525 register_netdevice_notifier(&xfrm_dev_notifier); 526 } 527