12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 221f42cc9SSteffen Klassert /* 321f42cc9SSteffen Klassert * xfrm_device.c - IPsec device offloading code. 421f42cc9SSteffen Klassert * 521f42cc9SSteffen Klassert * Copyright (c) 2015 secunet Security Networks AG 621f42cc9SSteffen Klassert * 721f42cc9SSteffen Klassert * Author: 821f42cc9SSteffen Klassert * Steffen Klassert <steffen.klassert@secunet.com> 921f42cc9SSteffen Klassert */ 1021f42cc9SSteffen Klassert 1121f42cc9SSteffen Klassert #include <linux/errno.h> 1221f42cc9SSteffen Klassert #include <linux/module.h> 1321f42cc9SSteffen Klassert #include <linux/netdevice.h> 1421f42cc9SSteffen Klassert #include <linux/skbuff.h> 1521f42cc9SSteffen Klassert #include <linux/slab.h> 1621f42cc9SSteffen Klassert #include <linux/spinlock.h> 1721f42cc9SSteffen Klassert #include <net/dst.h> 18d457a0e3SEric Dumazet #include <net/gso.h> 1921f42cc9SSteffen Klassert #include <net/xfrm.h> 2021f42cc9SSteffen Klassert #include <linux/notifier.h> 2121f42cc9SSteffen Klassert 22b81f884aSHangbin Liu #ifdef CONFIG_XFRM_OFFLOAD 23303c5fabSFlorian Westphal static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, 24303c5fabSFlorian Westphal unsigned int hsize) 25303c5fabSFlorian Westphal { 26303c5fabSFlorian Westphal struct xfrm_offload *xo = xfrm_offload(skb); 27303c5fabSFlorian Westphal 28303c5fabSFlorian Westphal skb_reset_mac_len(skb); 2906a0afcfSXin Long if (xo->flags & XFRM_GSO_SEGMENT) 30303c5fabSFlorian Westphal skb->transport_header -= x->props.header_len; 3106a0afcfSXin Long 3206a0afcfSXin Long pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 33303c5fabSFlorian Westphal } 34303c5fabSFlorian Westphal 35303c5fabSFlorian Westphal static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, 36303c5fabSFlorian Westphal unsigned int hsize) 37303c5fabSFlorian Westphal 38303c5fabSFlorian Westphal { 39303c5fabSFlorian Westphal struct xfrm_offload *xo = xfrm_offload(skb); 40303c5fabSFlorian Westphal 41303c5fabSFlorian Westphal if (xo->flags & XFRM_GSO_SEGMENT) 42303c5fabSFlorian Westphal skb->transport_header = skb->network_header + hsize; 43303c5fabSFlorian Westphal 44303c5fabSFlorian Westphal skb_reset_mac_len(skb); 45303c5fabSFlorian Westphal pskb_pull(skb, skb->mac_len + x->props.header_len); 46303c5fabSFlorian Westphal } 47303c5fabSFlorian Westphal 4830849175SXin Long static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, 4930849175SXin Long unsigned int hsize) 5030849175SXin Long { 5130849175SXin Long struct xfrm_offload *xo = xfrm_offload(skb); 5230849175SXin Long int phlen = 0; 5330849175SXin Long 5430849175SXin Long if (xo->flags & XFRM_GSO_SEGMENT) 5530849175SXin Long skb->transport_header = skb->network_header + hsize; 5630849175SXin Long 5730849175SXin Long skb_reset_mac_len(skb); 5830849175SXin Long if (x->sel.family != AF_INET6) { 5930849175SXin Long phlen = IPV4_BEET_PHMAXLEN; 6030849175SXin Long if (x->outer_mode.family == AF_INET6) 6130849175SXin Long phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); 6230849175SXin Long } 6330849175SXin Long 6430849175SXin Long pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); 6530849175SXin Long } 6630849175SXin Long 67303c5fabSFlorian Westphal /* Adjust pointers into the packet when IPsec is done at layer2 */ 68303c5fabSFlorian Westphal static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) 69303c5fabSFlorian Westphal { 70c9500d7bSFlorian Westphal switch (x->outer_mode.encap) { 71303c5fabSFlorian Westphal case XFRM_MODE_TUNNEL: 72c9500d7bSFlorian Westphal if (x->outer_mode.family == AF_INET) 73303c5fabSFlorian Westphal return __xfrm_mode_tunnel_prep(x, skb, 74303c5fabSFlorian Westphal sizeof(struct iphdr)); 75c9500d7bSFlorian Westphal if (x->outer_mode.family == AF_INET6) 76303c5fabSFlorian Westphal return __xfrm_mode_tunnel_prep(x, skb, 77303c5fabSFlorian Westphal sizeof(struct ipv6hdr)); 78303c5fabSFlorian Westphal break; 79303c5fabSFlorian Westphal case XFRM_MODE_TRANSPORT: 80c9500d7bSFlorian Westphal if (x->outer_mode.family == AF_INET) 81303c5fabSFlorian Westphal return __xfrm_transport_prep(x, skb, 82303c5fabSFlorian Westphal sizeof(struct iphdr)); 83c9500d7bSFlorian Westphal if (x->outer_mode.family == AF_INET6) 84303c5fabSFlorian Westphal return __xfrm_transport_prep(x, skb, 85303c5fabSFlorian Westphal sizeof(struct ipv6hdr)); 86303c5fabSFlorian Westphal break; 8730849175SXin Long case XFRM_MODE_BEET: 8830849175SXin Long if (x->outer_mode.family == AF_INET) 8930849175SXin Long return __xfrm_mode_beet_prep(x, skb, 9030849175SXin Long sizeof(struct iphdr)); 9130849175SXin Long if (x->outer_mode.family == AF_INET6) 9230849175SXin Long return __xfrm_mode_beet_prep(x, skb, 9330849175SXin Long sizeof(struct ipv6hdr)); 9430849175SXin Long break; 95303c5fabSFlorian Westphal case XFRM_MODE_ROUTEOPTIMIZATION: 96303c5fabSFlorian Westphal case XFRM_MODE_IN_TRIGGER: 97303c5fabSFlorian Westphal break; 98303c5fabSFlorian Westphal } 99303c5fabSFlorian Westphal } 100303c5fabSFlorian Westphal 1014b549cccSChristian Langrock static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) 1024b549cccSChristian Langrock { 1034b549cccSChristian Langrock struct xfrm_offload *xo = xfrm_offload(skb); 1044b549cccSChristian Langrock __u32 seq = xo->seq.low; 1054b549cccSChristian Langrock 1064b549cccSChristian Langrock seq += skb_shinfo(skb)->gso_segs; 1074b549cccSChristian Langrock if (unlikely(seq < xo->seq.low)) 1084b549cccSChristian Langrock return true; 1094b549cccSChristian Langrock 1104b549cccSChristian Langrock return false; 1114b549cccSChristian Langrock } 1124b549cccSChristian Langrock 113f53c7239SSteffen Klassert struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 114f6e27114SSteffen Klassert { 115f6e27114SSteffen Klassert int err; 116f53c7239SSteffen Klassert unsigned long flags; 117f6e27114SSteffen Klassert struct xfrm_state *x; 118f53c7239SSteffen Klassert struct softnet_data *sd; 119d1d17a35SXin Long struct sk_buff *skb2, *nskb, *pskb = NULL; 1203dca3f38SSteffen Klassert netdev_features_t esp_features = features; 121f6e27114SSteffen Klassert struct xfrm_offload *xo = xfrm_offload(skb); 122272c2330SJarod Wilson struct net_device *dev = skb->dev; 1232294be0fSFlorian Westphal struct sec_path *sp; 124f6e27114SSteffen Klassert 12594579ac3SHuy Nguyen if (!xo || (xo->flags & XFRM_XMIT)) 1263dca3f38SSteffen Klassert return skb; 127f6e27114SSteffen Klassert 1283dca3f38SSteffen Klassert if (!(features & NETIF_F_HW_ESP)) 1293dca3f38SSteffen Klassert esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 1303dca3f38SSteffen Klassert 1312294be0fSFlorian Westphal sp = skb_sec_path(skb); 1322294be0fSFlorian Westphal x = sp->xvec[sp->len - 1]; 133482db2f1SLeon Romanovsky if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN) 1343dca3f38SSteffen Klassert return skb; 135f6e27114SSteffen Klassert 136f8a70afaSLeon Romanovsky /* The packet was sent to HW IPsec packet offload engine, 137f8a70afaSLeon Romanovsky * but to wrong device. Drop the packet, so it won't skip 138f8a70afaSLeon Romanovsky * XFRM stack. 139f8a70afaSLeon Romanovsky */ 140f8a70afaSLeon Romanovsky if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) { 141f8a70afaSLeon Romanovsky kfree_skb(skb); 142f8a70afaSLeon Romanovsky dev_core_stats_tx_dropped_inc(dev); 143f8a70afaSLeon Romanovsky return NULL; 144f8a70afaSLeon Romanovsky } 145f8a70afaSLeon Romanovsky 146bdfd2d1fSJarod Wilson /* This skb was already validated on the upper/virtual dev */ 147bdfd2d1fSJarod Wilson if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) 148272c2330SJarod Wilson return skb; 149272c2330SJarod Wilson 150f53c7239SSteffen Klassert local_irq_save(flags); 151f53c7239SSteffen Klassert sd = this_cpu_ptr(&softnet_data); 152f53c7239SSteffen Klassert err = !skb_queue_empty(&sd->xfrm_backlog); 153f53c7239SSteffen Klassert local_irq_restore(flags); 154f53c7239SSteffen Klassert 155f53c7239SSteffen Klassert if (err) { 156f53c7239SSteffen Klassert *again = true; 157f53c7239SSteffen Klassert return skb; 158f53c7239SSteffen Klassert } 159f53c7239SSteffen Klassert 1604b549cccSChristian Langrock if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || 1614b549cccSChristian Langrock unlikely(xmit_xfrm_check_overflow(skb)))) { 1623dca3f38SSteffen Klassert struct sk_buff *segs; 1633dca3f38SSteffen Klassert 1643dca3f38SSteffen Klassert /* Packet got rerouted, fixup features and segment it. */ 165272c2330SJarod Wilson esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); 1663dca3f38SSteffen Klassert 1673dca3f38SSteffen Klassert segs = skb_gso_segment(skb, esp_features); 1683dca3f38SSteffen Klassert if (IS_ERR(segs)) { 1693dca3f38SSteffen Klassert kfree_skb(skb); 170625788b5SEric Dumazet dev_core_stats_tx_dropped_inc(dev); 1713dca3f38SSteffen Klassert return NULL; 1723dca3f38SSteffen Klassert } else { 1733dca3f38SSteffen Klassert consume_skb(skb); 1743dca3f38SSteffen Klassert skb = segs; 1753dca3f38SSteffen Klassert } 1763dca3f38SSteffen Klassert } 1773dca3f38SSteffen Klassert 1783dca3f38SSteffen Klassert if (!skb->next) { 17965fd2c2aSBoris Pismenny esp_features |= skb->dev->gso_partial_features; 180303c5fabSFlorian Westphal xfrm_outer_mode_prep(x, skb); 181f6e27114SSteffen Klassert 182f53c7239SSteffen Klassert xo->flags |= XFRM_DEV_RESUME; 183f53c7239SSteffen Klassert 1843dca3f38SSteffen Klassert err = x->type_offload->xmit(x, skb, esp_features); 185f6e27114SSteffen Klassert if (err) { 186f53c7239SSteffen Klassert if (err == -EINPROGRESS) 187f53c7239SSteffen Klassert return NULL; 188f53c7239SSteffen Klassert 189f6e27114SSteffen Klassert XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 1903dca3f38SSteffen Klassert kfree_skb(skb); 1913dca3f38SSteffen Klassert return NULL; 192f6e27114SSteffen Klassert } 193f6e27114SSteffen Klassert 194f6e27114SSteffen Klassert skb_push(skb, skb->data - skb_mac_header(skb)); 1953dca3f38SSteffen Klassert 1963dca3f38SSteffen Klassert return skb; 197f6e27114SSteffen Klassert } 198f6e27114SSteffen Klassert 199c3b18e0dSJason A. Donenfeld skb_list_walk_safe(skb, skb2, nskb) { 20065fd2c2aSBoris Pismenny esp_features |= skb->dev->gso_partial_features; 201a8305bffSDavid S. Miller skb_mark_not_on_list(skb2); 2023dca3f38SSteffen Klassert 2033dca3f38SSteffen Klassert xo = xfrm_offload(skb2); 204f53c7239SSteffen Klassert xo->flags |= XFRM_DEV_RESUME; 2053dca3f38SSteffen Klassert 206303c5fabSFlorian Westphal xfrm_outer_mode_prep(x, skb2); 2073dca3f38SSteffen Klassert 2083dca3f38SSteffen Klassert err = x->type_offload->xmit(x, skb2, esp_features); 209f53c7239SSteffen Klassert if (!err) { 210f53c7239SSteffen Klassert skb2->next = nskb; 211f53c7239SSteffen Klassert } else if (err != -EINPROGRESS) { 2123dca3f38SSteffen Klassert XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 2133dca3f38SSteffen Klassert skb2->next = nskb; 2143dca3f38SSteffen Klassert kfree_skb_list(skb2); 2153dca3f38SSteffen Klassert return NULL; 216f53c7239SSteffen Klassert } else { 217f53c7239SSteffen Klassert if (skb == skb2) 218f53c7239SSteffen Klassert skb = nskb; 219d1d17a35SXin Long else 220d1d17a35SXin Long pskb->next = nskb; 221f53c7239SSteffen Klassert 222c3b18e0dSJason A. Donenfeld continue; 223f53c7239SSteffen Klassert } 2243dca3f38SSteffen Klassert 2253dca3f38SSteffen Klassert skb_push(skb2, skb2->data - skb_mac_header(skb2)); 226d1d17a35SXin Long pskb = skb2; 227c3b18e0dSJason A. Donenfeld } 2283dca3f38SSteffen Klassert 2293dca3f38SSteffen Klassert return skb; 230f6e27114SSteffen Klassert } 231f6e27114SSteffen Klassert EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 232f6e27114SSteffen Klassert 233d77e38e6SSteffen Klassert int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 234adb5c33eSSabrina Dubroca struct xfrm_user_offload *xuo, 235adb5c33eSSabrina Dubroca struct netlink_ext_ack *extack) 236d77e38e6SSteffen Klassert { 237d77e38e6SSteffen Klassert int err; 238d77e38e6SSteffen Klassert struct dst_entry *dst; 239d77e38e6SSteffen Klassert struct net_device *dev; 24087e0a94eSLeon Romanovsky struct xfrm_dev_offload *xso = &x->xso; 241d77e38e6SSteffen Klassert xfrm_address_t *saddr; 242d77e38e6SSteffen Klassert xfrm_address_t *daddr; 24362f6eca5SLeon Romanovsky bool is_packet_offload; 244d77e38e6SSteffen Klassert 245adb5c33eSSabrina Dubroca if (!x->type_offload) { 246adb5c33eSSabrina Dubroca NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 247ffdb5211SIlan Tayari return -EINVAL; 248adb5c33eSSabrina Dubroca } 249d77e38e6SSteffen Klassert 25062f6eca5SLeon Romanovsky if (xuo->flags & 25162f6eca5SLeon Romanovsky ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { 252adb5c33eSSabrina Dubroca NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); 2537c76ecd9SLeon Romanovsky return -EINVAL; 254adb5c33eSSabrina Dubroca } 2557c76ecd9SLeon Romanovsky 25662f6eca5SLeon Romanovsky is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; 257*89edf402SLeon Romanovsky 258*89edf402SLeon Romanovsky /* We don't yet support UDP encapsulation and TFC padding. */ 259*89edf402SLeon Romanovsky if ((!is_packet_offload && x->encap) || x->tfcpad) { 260*89edf402SLeon Romanovsky NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded"); 261*89edf402SLeon Romanovsky return -EINVAL; 262*89edf402SLeon Romanovsky } 263*89edf402SLeon Romanovsky 264d77e38e6SSteffen Klassert dev = dev_get_by_index(net, xuo->ifindex); 265d77e38e6SSteffen Klassert if (!dev) { 266d77e38e6SSteffen Klassert if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 267d77e38e6SSteffen Klassert saddr = &x->props.saddr; 268d77e38e6SSteffen Klassert daddr = &x->id.daddr; 269d77e38e6SSteffen Klassert } else { 270d77e38e6SSteffen Klassert saddr = &x->id.daddr; 271d77e38e6SSteffen Klassert daddr = &x->props.saddr; 272d77e38e6SSteffen Klassert } 273d77e38e6SSteffen Klassert 274077fbac4SLorenzo Colitti dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 2759b42c1f1SSteffen Klassert x->props.family, 2769b42c1f1SSteffen Klassert xfrm_smark_get(0, x)); 277d77e38e6SSteffen Klassert if (IS_ERR(dst)) 27862f6eca5SLeon Romanovsky return (is_packet_offload) ? -EINVAL : 0; 279d77e38e6SSteffen Klassert 280d77e38e6SSteffen Klassert dev = dst->dev; 281d77e38e6SSteffen Klassert 282d77e38e6SSteffen Klassert dev_hold(dev); 283d77e38e6SSteffen Klassert dst_release(dst); 284d77e38e6SSteffen Klassert } 285d77e38e6SSteffen Klassert 286d77e38e6SSteffen Klassert if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 28767a63387SSteffen Klassert xso->dev = NULL; 288d77e38e6SSteffen Klassert dev_put(dev); 28962f6eca5SLeon Romanovsky return (is_packet_offload) ? -EINVAL : 0; 290d77e38e6SSteffen Klassert } 291d77e38e6SSteffen Klassert 2923e1c957fSLeon Romanovsky if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN && 29350bd870aSYossef Efraim !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { 294adb5c33eSSabrina Dubroca NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN"); 29550bd870aSYossef Efraim xso->dev = NULL; 29650bd870aSYossef Efraim dev_put(dev); 29750bd870aSYossef Efraim return -EINVAL; 29850bd870aSYossef Efraim } 29950bd870aSYossef Efraim 300d77e38e6SSteffen Klassert xso->dev = dev; 301e1b539bdSEric Dumazet netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); 302bdfd2d1fSJarod Wilson xso->real_dev = dev; 303d77e38e6SSteffen Klassert 304482db2f1SLeon Romanovsky if (xuo->flags & XFRM_OFFLOAD_INBOUND) 305482db2f1SLeon Romanovsky xso->dir = XFRM_DEV_OFFLOAD_IN; 306482db2f1SLeon Romanovsky else 307482db2f1SLeon Romanovsky xso->dir = XFRM_DEV_OFFLOAD_OUT; 308482db2f1SLeon Romanovsky 30962f6eca5SLeon Romanovsky if (is_packet_offload) 31062f6eca5SLeon Romanovsky xso->type = XFRM_DEV_OFFLOAD_PACKET; 31162f6eca5SLeon Romanovsky else 312d14f28b8SLeon Romanovsky xso->type = XFRM_DEV_OFFLOAD_CRYPTO; 313d14f28b8SLeon Romanovsky 3147681a4f5SLeon Romanovsky err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack); 315d77e38e6SSteffen Klassert if (err) { 316aa5dd6faSAviad Yehezkel xso->dev = NULL; 317482db2f1SLeon Romanovsky xso->dir = 0; 318dd72fadfSAyush Sawal xso->real_dev = NULL; 319d62607c3SJakub Kicinski netdev_put(dev, &xso->dev_tracker); 320d14f28b8SLeon Romanovsky xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 3214a132095SShannon Nelson 32262f6eca5SLeon Romanovsky /* User explicitly requested packet offload mode and configured 32362f6eca5SLeon Romanovsky * policy in addition to the XFRM state. So be civil to users, 32462f6eca5SLeon Romanovsky * and return an error instead of taking fallback path. 32562f6eca5SLeon Romanovsky * 32662f6eca5SLeon Romanovsky * This WARN_ON() can be seen as a documentation for driver 32762f6eca5SLeon Romanovsky * authors to do not return -EOPNOTSUPP in packet offload mode. 32862f6eca5SLeon Romanovsky */ 32962f6eca5SLeon Romanovsky WARN_ON(err == -EOPNOTSUPP && is_packet_offload); 330028fb19cSLeon Romanovsky if (err != -EOPNOTSUPP || is_packet_offload) { 331028fb19cSLeon Romanovsky NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state"); 332d77e38e6SSteffen Klassert return err; 333d77e38e6SSteffen Klassert } 334028fb19cSLeon Romanovsky } 335d77e38e6SSteffen Klassert 336d77e38e6SSteffen Klassert return 0; 337d77e38e6SSteffen Klassert } 338d77e38e6SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 339d77e38e6SSteffen Klassert 340919e43faSLeon Romanovsky int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 341919e43faSLeon Romanovsky struct xfrm_user_offload *xuo, u8 dir, 342919e43faSLeon Romanovsky struct netlink_ext_ack *extack) 343919e43faSLeon Romanovsky { 344919e43faSLeon Romanovsky struct xfrm_dev_offload *xdo = &xp->xdo; 345919e43faSLeon Romanovsky struct net_device *dev; 346919e43faSLeon Romanovsky int err; 347919e43faSLeon Romanovsky 348919e43faSLeon Romanovsky if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) { 349919e43faSLeon Romanovsky /* We support only packet offload mode and it means 350919e43faSLeon Romanovsky * that user must set XFRM_OFFLOAD_PACKET bit. 351919e43faSLeon Romanovsky */ 352919e43faSLeon Romanovsky NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); 353919e43faSLeon Romanovsky return -EINVAL; 354919e43faSLeon Romanovsky } 355919e43faSLeon Romanovsky 356919e43faSLeon Romanovsky dev = dev_get_by_index(net, xuo->ifindex); 357919e43faSLeon Romanovsky if (!dev) 358919e43faSLeon Romanovsky return -EINVAL; 359919e43faSLeon Romanovsky 360919e43faSLeon Romanovsky if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) { 361919e43faSLeon Romanovsky xdo->dev = NULL; 362919e43faSLeon Romanovsky dev_put(dev); 363919e43faSLeon Romanovsky NL_SET_ERR_MSG(extack, "Policy offload is not supported"); 364919e43faSLeon Romanovsky return -EINVAL; 365919e43faSLeon Romanovsky } 366919e43faSLeon Romanovsky 367919e43faSLeon Romanovsky xdo->dev = dev; 368919e43faSLeon Romanovsky netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); 369919e43faSLeon Romanovsky xdo->real_dev = dev; 370919e43faSLeon Romanovsky xdo->type = XFRM_DEV_OFFLOAD_PACKET; 371919e43faSLeon Romanovsky switch (dir) { 372919e43faSLeon Romanovsky case XFRM_POLICY_IN: 373919e43faSLeon Romanovsky xdo->dir = XFRM_DEV_OFFLOAD_IN; 374919e43faSLeon Romanovsky break; 375919e43faSLeon Romanovsky case XFRM_POLICY_OUT: 376919e43faSLeon Romanovsky xdo->dir = XFRM_DEV_OFFLOAD_OUT; 377919e43faSLeon Romanovsky break; 378919e43faSLeon Romanovsky case XFRM_POLICY_FWD: 379919e43faSLeon Romanovsky xdo->dir = XFRM_DEV_OFFLOAD_FWD; 380919e43faSLeon Romanovsky break; 381919e43faSLeon Romanovsky default: 382919e43faSLeon Romanovsky xdo->dev = NULL; 383ec8f32adSLeon Romanovsky netdev_put(dev, &xdo->dev_tracker); 384abe2343dSColin Ian King NL_SET_ERR_MSG(extack, "Unrecognized offload direction"); 385919e43faSLeon Romanovsky return -EINVAL; 386919e43faSLeon Romanovsky } 387919e43faSLeon Romanovsky 3883089386dSLeon Romanovsky err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack); 389919e43faSLeon Romanovsky if (err) { 390919e43faSLeon Romanovsky xdo->dev = NULL; 391919e43faSLeon Romanovsky xdo->real_dev = NULL; 392919e43faSLeon Romanovsky xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 393919e43faSLeon Romanovsky xdo->dir = 0; 394919e43faSLeon Romanovsky netdev_put(dev, &xdo->dev_tracker); 395028fb19cSLeon Romanovsky NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy"); 396919e43faSLeon Romanovsky return err; 397919e43faSLeon Romanovsky } 398919e43faSLeon Romanovsky 399919e43faSLeon Romanovsky return 0; 400919e43faSLeon Romanovsky } 401919e43faSLeon Romanovsky EXPORT_SYMBOL_GPL(xfrm_dev_policy_add); 402919e43faSLeon Romanovsky 403d77e38e6SSteffen Klassert bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 404d77e38e6SSteffen Klassert { 405d77e38e6SSteffen Klassert int mtu; 406d77e38e6SSteffen Klassert struct dst_entry *dst = skb_dst(skb); 407d77e38e6SSteffen Klassert struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 408d77e38e6SSteffen Klassert struct net_device *dev = x->xso.dev; 409d77e38e6SSteffen Klassert 410d77e38e6SSteffen Klassert if (!x->type_offload || x->encap) 411d77e38e6SSteffen Klassert return false; 412d77e38e6SSteffen Klassert 413f8a70afaSLeon Romanovsky if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET || 414f8a70afaSLeon Romanovsky ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 415f8a70afaSLeon Romanovsky !xdst->child->xfrm)) { 416c7b37c76SFlorian Westphal mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 417d77e38e6SSteffen Klassert if (skb->len <= mtu) 418d77e38e6SSteffen Klassert goto ok; 419d77e38e6SSteffen Klassert 420779b7931SDaniel Axtens if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 421d77e38e6SSteffen Klassert goto ok; 422d77e38e6SSteffen Klassert } 423d77e38e6SSteffen Klassert 424d77e38e6SSteffen Klassert return false; 425d77e38e6SSteffen Klassert 426d77e38e6SSteffen Klassert ok: 427d77e38e6SSteffen Klassert if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 428d77e38e6SSteffen Klassert return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 429d77e38e6SSteffen Klassert 430d77e38e6SSteffen Klassert return true; 431d77e38e6SSteffen Klassert } 432d77e38e6SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 433f53c7239SSteffen Klassert 434f53c7239SSteffen Klassert void xfrm_dev_resume(struct sk_buff *skb) 435f53c7239SSteffen Klassert { 436f53c7239SSteffen Klassert struct net_device *dev = skb->dev; 437f53c7239SSteffen Klassert int ret = NETDEV_TX_BUSY; 438f53c7239SSteffen Klassert struct netdev_queue *txq; 439f53c7239SSteffen Klassert struct softnet_data *sd; 440f53c7239SSteffen Klassert unsigned long flags; 441f53c7239SSteffen Klassert 442f53c7239SSteffen Klassert rcu_read_lock(); 4434bd97d51SPaolo Abeni txq = netdev_core_pick_tx(dev, skb, NULL); 444f53c7239SSteffen Klassert 445f53c7239SSteffen Klassert HARD_TX_LOCK(dev, txq, smp_processor_id()); 446f53c7239SSteffen Klassert if (!netif_xmit_frozen_or_stopped(txq)) 447f53c7239SSteffen Klassert skb = dev_hard_start_xmit(skb, dev, txq, &ret); 448f53c7239SSteffen Klassert HARD_TX_UNLOCK(dev, txq); 449f53c7239SSteffen Klassert 450f53c7239SSteffen Klassert if (!dev_xmit_complete(ret)) { 451f53c7239SSteffen Klassert local_irq_save(flags); 452f53c7239SSteffen Klassert sd = this_cpu_ptr(&softnet_data); 453f53c7239SSteffen Klassert skb_queue_tail(&sd->xfrm_backlog, skb); 454f53c7239SSteffen Klassert raise_softirq_irqoff(NET_TX_SOFTIRQ); 455f53c7239SSteffen Klassert local_irq_restore(flags); 456f53c7239SSteffen Klassert } 457f53c7239SSteffen Klassert rcu_read_unlock(); 458f53c7239SSteffen Klassert } 459f53c7239SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_resume); 460f53c7239SSteffen Klassert 461f53c7239SSteffen Klassert void xfrm_dev_backlog(struct softnet_data *sd) 462f53c7239SSteffen Klassert { 463f53c7239SSteffen Klassert struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 464f53c7239SSteffen Klassert struct sk_buff_head list; 465f53c7239SSteffen Klassert struct sk_buff *skb; 466f53c7239SSteffen Klassert 467f53c7239SSteffen Klassert if (skb_queue_empty(xfrm_backlog)) 468f53c7239SSteffen Klassert return; 469f53c7239SSteffen Klassert 470f53c7239SSteffen Klassert __skb_queue_head_init(&list); 471f53c7239SSteffen Klassert 472f53c7239SSteffen Klassert spin_lock(&xfrm_backlog->lock); 473f53c7239SSteffen Klassert skb_queue_splice_init(xfrm_backlog, &list); 474f53c7239SSteffen Klassert spin_unlock(&xfrm_backlog->lock); 475f53c7239SSteffen Klassert 476f53c7239SSteffen Klassert while (!skb_queue_empty(&list)) { 477f53c7239SSteffen Klassert skb = __skb_dequeue(&list); 478f53c7239SSteffen Klassert xfrm_dev_resume(skb); 479f53c7239SSteffen Klassert } 480f53c7239SSteffen Klassert 481f53c7239SSteffen Klassert } 482b81f884aSHangbin Liu #endif 483d77e38e6SSteffen Klassert 48492a23206SShannon Nelson static int xfrm_api_check(struct net_device *dev) 485d77e38e6SSteffen Klassert { 48692a23206SShannon Nelson #ifdef CONFIG_XFRM_OFFLOAD 487d77e38e6SSteffen Klassert if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 488d77e38e6SSteffen Klassert !(dev->features & NETIF_F_HW_ESP)) 489d77e38e6SSteffen Klassert return NOTIFY_BAD; 490d77e38e6SSteffen Klassert 49192a23206SShannon Nelson if ((dev->features & NETIF_F_HW_ESP) && 49292a23206SShannon Nelson (!(dev->xfrmdev_ops && 49392a23206SShannon Nelson dev->xfrmdev_ops->xdo_dev_state_add && 49492a23206SShannon Nelson dev->xfrmdev_ops->xdo_dev_state_delete))) 49592a23206SShannon Nelson return NOTIFY_BAD; 49692a23206SShannon Nelson #else 49792a23206SShannon Nelson if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 49892a23206SShannon Nelson return NOTIFY_BAD; 49992a23206SShannon Nelson #endif 50092a23206SShannon Nelson 501d77e38e6SSteffen Klassert return NOTIFY_DONE; 502d77e38e6SSteffen Klassert } 503d77e38e6SSteffen Klassert 504d77e38e6SSteffen Klassert static int xfrm_dev_down(struct net_device *dev) 505d77e38e6SSteffen Klassert { 506919e43faSLeon Romanovsky if (dev->features & NETIF_F_HW_ESP) { 507d77e38e6SSteffen Klassert xfrm_dev_state_flush(dev_net(dev), dev, true); 508919e43faSLeon Romanovsky xfrm_dev_policy_flush(dev_net(dev), dev, true); 509919e43faSLeon Romanovsky } 510d77e38e6SSteffen Klassert 511d77e38e6SSteffen Klassert return NOTIFY_DONE; 512d77e38e6SSteffen Klassert } 513d77e38e6SSteffen Klassert 51421f42cc9SSteffen Klassert static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 51521f42cc9SSteffen Klassert { 51621f42cc9SSteffen Klassert struct net_device *dev = netdev_notifier_info_to_dev(ptr); 51721f42cc9SSteffen Klassert 51821f42cc9SSteffen Klassert switch (event) { 519d77e38e6SSteffen Klassert case NETDEV_REGISTER: 5202ecda181SLeon Romanovsky return xfrm_api_check(dev); 521d77e38e6SSteffen Klassert 522d77e38e6SSteffen Klassert case NETDEV_FEAT_CHANGE: 5232ecda181SLeon Romanovsky return xfrm_api_check(dev); 524d77e38e6SSteffen Klassert 52521f42cc9SSteffen Klassert case NETDEV_DOWN: 52603891f82SRaed Salem case NETDEV_UNREGISTER: 527d77e38e6SSteffen Klassert return xfrm_dev_down(dev); 52821f42cc9SSteffen Klassert } 52921f42cc9SSteffen Klassert return NOTIFY_DONE; 53021f42cc9SSteffen Klassert } 53121f42cc9SSteffen Klassert 53221f42cc9SSteffen Klassert static struct notifier_block xfrm_dev_notifier = { 53321f42cc9SSteffen Klassert .notifier_call = xfrm_dev_event, 53421f42cc9SSteffen Klassert }; 53521f42cc9SSteffen Klassert 536e9a441b6SKirill Tkhai void __init xfrm_dev_init(void) 53721f42cc9SSteffen Klassert { 53821f42cc9SSteffen Klassert register_netdevice_notifier(&xfrm_dev_notifier); 53921f42cc9SSteffen Klassert } 540