1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * xfrm_device.c - IPsec device offloading code. 4 * 5 * Copyright (c) 2015 secunet Security Networks AG 6 * 7 * Author: 8 * Steffen Klassert <steffen.klassert@secunet.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <net/dst.h> 18 #include <net/xfrm.h> 19 #include <linux/notifier.h> 20 21 #ifdef CONFIG_XFRM_OFFLOAD 22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, 23 unsigned int hsize) 24 { 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 27 skb_reset_mac_len(skb); 28 if (xo->flags & XFRM_GSO_SEGMENT) 29 skb->transport_header -= x->props.header_len; 30 31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 32 } 33 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, 35 unsigned int hsize) 36 37 { 38 struct xfrm_offload *xo = xfrm_offload(skb); 39 40 if (xo->flags & XFRM_GSO_SEGMENT) 41 skb->transport_header = skb->network_header + hsize; 42 43 skb_reset_mac_len(skb); 44 pskb_pull(skb, skb->mac_len + x->props.header_len); 45 } 46 47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, 48 unsigned int hsize) 49 { 50 struct xfrm_offload *xo = xfrm_offload(skb); 51 int phlen = 0; 52 53 if (xo->flags & XFRM_GSO_SEGMENT) 54 skb->transport_header = skb->network_header + hsize; 55 56 skb_reset_mac_len(skb); 57 if (x->sel.family != AF_INET6) { 58 phlen = IPV4_BEET_PHMAXLEN; 59 if (x->outer_mode.family == AF_INET6) 60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); 61 } 62 63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); 64 } 65 66 /* Adjust pointers into the packet when IPsec is done at layer2 */ 67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) 68 { 69 switch (x->outer_mode.encap) { 70 case XFRM_MODE_TUNNEL: 71 if (x->outer_mode.family == AF_INET) 72 return __xfrm_mode_tunnel_prep(x, skb, 73 sizeof(struct iphdr)); 74 if (x->outer_mode.family == AF_INET6) 75 return __xfrm_mode_tunnel_prep(x, skb, 76 sizeof(struct ipv6hdr)); 77 break; 78 case XFRM_MODE_TRANSPORT: 79 if (x->outer_mode.family == AF_INET) 80 return __xfrm_transport_prep(x, skb, 81 sizeof(struct iphdr)); 82 if (x->outer_mode.family == AF_INET6) 83 return __xfrm_transport_prep(x, skb, 84 sizeof(struct ipv6hdr)); 85 break; 86 case XFRM_MODE_BEET: 87 if (x->outer_mode.family == AF_INET) 88 return __xfrm_mode_beet_prep(x, skb, 89 sizeof(struct iphdr)); 90 if (x->outer_mode.family == AF_INET6) 91 return __xfrm_mode_beet_prep(x, skb, 92 sizeof(struct ipv6hdr)); 93 break; 94 case XFRM_MODE_ROUTEOPTIMIZATION: 95 case XFRM_MODE_IN_TRIGGER: 96 break; 97 } 98 } 99 100 static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) 101 { 102 struct xfrm_offload *xo = xfrm_offload(skb); 103 __u32 seq = xo->seq.low; 104 105 seq += skb_shinfo(skb)->gso_segs; 106 if (unlikely(seq < xo->seq.low)) 107 return true; 108 109 return false; 110 } 111 112 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 113 { 114 int err; 115 unsigned long flags; 116 struct xfrm_state *x; 117 struct softnet_data *sd; 118 struct sk_buff *skb2, *nskb, *pskb = NULL; 119 netdev_features_t esp_features = features; 120 struct xfrm_offload *xo = xfrm_offload(skb); 121 struct net_device *dev = skb->dev; 122 struct sec_path *sp; 123 124 if (!xo || (xo->flags & XFRM_XMIT)) 125 return skb; 126 127 if (!(features & NETIF_F_HW_ESP)) 128 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 129 130 sp = skb_sec_path(skb); 131 x = sp->xvec[sp->len - 1]; 132 if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN) 133 return skb; 134 135 /* This skb was already validated on the upper/virtual dev */ 136 if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) 137 return skb; 138 139 local_irq_save(flags); 140 sd = this_cpu_ptr(&softnet_data); 141 err = !skb_queue_empty(&sd->xfrm_backlog); 142 local_irq_restore(flags); 143 144 if (err) { 145 *again = true; 146 return skb; 147 } 148 149 if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || 150 unlikely(xmit_xfrm_check_overflow(skb)))) { 151 struct sk_buff *segs; 152 153 /* Packet got rerouted, fixup features and segment it. */ 154 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); 155 156 segs = skb_gso_segment(skb, esp_features); 157 if (IS_ERR(segs)) { 158 kfree_skb(skb); 159 dev_core_stats_tx_dropped_inc(dev); 160 return NULL; 161 } else { 162 consume_skb(skb); 163 skb = segs; 164 } 165 } 166 167 if (!skb->next) { 168 esp_features |= skb->dev->gso_partial_features; 169 xfrm_outer_mode_prep(x, skb); 170 171 xo->flags |= XFRM_DEV_RESUME; 172 173 err = x->type_offload->xmit(x, skb, esp_features); 174 if (err) { 175 if (err == -EINPROGRESS) 176 return NULL; 177 178 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 179 kfree_skb(skb); 180 return NULL; 181 } 182 183 skb_push(skb, skb->data - skb_mac_header(skb)); 184 185 return skb; 186 } 187 188 skb_list_walk_safe(skb, skb2, nskb) { 189 esp_features |= skb->dev->gso_partial_features; 190 skb_mark_not_on_list(skb2); 191 192 xo = xfrm_offload(skb2); 193 xo->flags |= XFRM_DEV_RESUME; 194 195 xfrm_outer_mode_prep(x, skb2); 196 197 err = x->type_offload->xmit(x, skb2, esp_features); 198 if (!err) { 199 skb2->next = nskb; 200 } else if (err != -EINPROGRESS) { 201 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 202 skb2->next = nskb; 203 kfree_skb_list(skb2); 204 return NULL; 205 } else { 206 if (skb == skb2) 207 skb = nskb; 208 else 209 pskb->next = nskb; 210 211 continue; 212 } 213 214 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 215 pskb = skb2; 216 } 217 218 return skb; 219 } 220 EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 221 222 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 223 struct xfrm_user_offload *xuo, 224 struct netlink_ext_ack *extack) 225 { 226 int err; 227 struct dst_entry *dst; 228 struct net_device *dev; 229 struct xfrm_dev_offload *xso = &x->xso; 230 xfrm_address_t *saddr; 231 xfrm_address_t *daddr; 232 233 if (!x->type_offload) { 234 NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 235 return -EINVAL; 236 } 237 238 /* We don't yet support UDP encapsulation and TFC padding. */ 239 if (x->encap || x->tfcpad) { 240 NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded"); 241 return -EINVAL; 242 } 243 244 if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) { 245 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); 246 return -EINVAL; 247 } 248 249 dev = dev_get_by_index(net, xuo->ifindex); 250 if (!dev) { 251 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 252 saddr = &x->props.saddr; 253 daddr = &x->id.daddr; 254 } else { 255 saddr = &x->id.daddr; 256 daddr = &x->props.saddr; 257 } 258 259 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 260 x->props.family, 261 xfrm_smark_get(0, x)); 262 if (IS_ERR(dst)) 263 return 0; 264 265 dev = dst->dev; 266 267 dev_hold(dev); 268 dst_release(dst); 269 } 270 271 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 272 xso->dev = NULL; 273 dev_put(dev); 274 return 0; 275 } 276 277 if (x->props.flags & XFRM_STATE_ESN && 278 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { 279 NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN"); 280 xso->dev = NULL; 281 dev_put(dev); 282 return -EINVAL; 283 } 284 285 xso->dev = dev; 286 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); 287 xso->real_dev = dev; 288 289 if (xuo->flags & XFRM_OFFLOAD_INBOUND) 290 xso->dir = XFRM_DEV_OFFLOAD_IN; 291 else 292 xso->dir = XFRM_DEV_OFFLOAD_OUT; 293 294 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 295 if (err) { 296 xso->dev = NULL; 297 xso->dir = 0; 298 xso->real_dev = NULL; 299 netdev_put(dev, &xso->dev_tracker); 300 301 if (err != -EOPNOTSUPP) { 302 NL_SET_ERR_MSG(extack, "Device failed to offload this state"); 303 return err; 304 } 305 } 306 307 return 0; 308 } 309 EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 310 311 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 312 { 313 int mtu; 314 struct dst_entry *dst = skb_dst(skb); 315 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 316 struct net_device *dev = x->xso.dev; 317 318 if (!x->type_offload || x->encap) 319 return false; 320 321 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 322 (!xdst->child->xfrm)) { 323 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 324 if (skb->len <= mtu) 325 goto ok; 326 327 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 328 goto ok; 329 } 330 331 return false; 332 333 ok: 334 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 335 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 336 337 return true; 338 } 339 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 340 341 void xfrm_dev_resume(struct sk_buff *skb) 342 { 343 struct net_device *dev = skb->dev; 344 int ret = NETDEV_TX_BUSY; 345 struct netdev_queue *txq; 346 struct softnet_data *sd; 347 unsigned long flags; 348 349 rcu_read_lock(); 350 txq = netdev_core_pick_tx(dev, skb, NULL); 351 352 HARD_TX_LOCK(dev, txq, smp_processor_id()); 353 if (!netif_xmit_frozen_or_stopped(txq)) 354 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 355 HARD_TX_UNLOCK(dev, txq); 356 357 if (!dev_xmit_complete(ret)) { 358 local_irq_save(flags); 359 sd = this_cpu_ptr(&softnet_data); 360 skb_queue_tail(&sd->xfrm_backlog, skb); 361 raise_softirq_irqoff(NET_TX_SOFTIRQ); 362 local_irq_restore(flags); 363 } 364 rcu_read_unlock(); 365 } 366 EXPORT_SYMBOL_GPL(xfrm_dev_resume); 367 368 void xfrm_dev_backlog(struct softnet_data *sd) 369 { 370 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 371 struct sk_buff_head list; 372 struct sk_buff *skb; 373 374 if (skb_queue_empty(xfrm_backlog)) 375 return; 376 377 __skb_queue_head_init(&list); 378 379 spin_lock(&xfrm_backlog->lock); 380 skb_queue_splice_init(xfrm_backlog, &list); 381 spin_unlock(&xfrm_backlog->lock); 382 383 while (!skb_queue_empty(&list)) { 384 skb = __skb_dequeue(&list); 385 xfrm_dev_resume(skb); 386 } 387 388 } 389 #endif 390 391 static int xfrm_api_check(struct net_device *dev) 392 { 393 #ifdef CONFIG_XFRM_OFFLOAD 394 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 395 !(dev->features & NETIF_F_HW_ESP)) 396 return NOTIFY_BAD; 397 398 if ((dev->features & NETIF_F_HW_ESP) && 399 (!(dev->xfrmdev_ops && 400 dev->xfrmdev_ops->xdo_dev_state_add && 401 dev->xfrmdev_ops->xdo_dev_state_delete))) 402 return NOTIFY_BAD; 403 #else 404 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 405 return NOTIFY_BAD; 406 #endif 407 408 return NOTIFY_DONE; 409 } 410 411 static int xfrm_dev_down(struct net_device *dev) 412 { 413 if (dev->features & NETIF_F_HW_ESP) 414 xfrm_dev_state_flush(dev_net(dev), dev, true); 415 416 return NOTIFY_DONE; 417 } 418 419 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 420 { 421 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 422 423 switch (event) { 424 case NETDEV_REGISTER: 425 return xfrm_api_check(dev); 426 427 case NETDEV_FEAT_CHANGE: 428 return xfrm_api_check(dev); 429 430 case NETDEV_DOWN: 431 case NETDEV_UNREGISTER: 432 return xfrm_dev_down(dev); 433 } 434 return NOTIFY_DONE; 435 } 436 437 static struct notifier_block xfrm_dev_notifier = { 438 .notifier_call = xfrm_dev_event, 439 }; 440 441 void __init xfrm_dev_init(void) 442 { 443 register_netdevice_notifier(&xfrm_dev_notifier); 444 } 445