1 /* 2 * xfrm_device.c - IPsec device offloading code. 3 * 4 * Copyright (c) 2015 secunet Security Networks AG 5 * 6 * Author: 7 * Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/skbuff.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <net/dst.h> 22 #include <net/xfrm.h> 23 #include <linux/notifier.h> 24 25 #ifdef CONFIG_XFRM_OFFLOAD 26 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 27 { 28 int err; 29 unsigned long flags; 30 struct xfrm_state *x; 31 struct sk_buff *skb2; 32 struct softnet_data *sd; 33 netdev_features_t esp_features = features; 34 struct xfrm_offload *xo = xfrm_offload(skb); 35 struct sec_path *sp; 36 37 if (!xo) 38 return skb; 39 40 if (!(features & NETIF_F_HW_ESP)) 41 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 42 43 sp = skb_sec_path(skb); 44 x = sp->xvec[sp->len - 1]; 45 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) 46 return skb; 47 48 local_irq_save(flags); 49 sd = this_cpu_ptr(&softnet_data); 50 err = !skb_queue_empty(&sd->xfrm_backlog); 51 local_irq_restore(flags); 52 53 if (err) { 54 *again = true; 55 return skb; 56 } 57 58 if (skb_is_gso(skb)) { 59 struct net_device *dev = skb->dev; 60 61 if (unlikely(x->xso.dev != dev)) { 62 struct sk_buff *segs; 63 64 /* Packet got rerouted, fixup features and segment it. */ 65 esp_features = esp_features & ~(NETIF_F_HW_ESP 66 | NETIF_F_GSO_ESP); 67 68 segs = skb_gso_segment(skb, esp_features); 69 if (IS_ERR(segs)) { 70 kfree_skb(skb); 71 atomic_long_inc(&dev->tx_dropped); 72 return NULL; 73 } else { 74 consume_skb(skb); 75 skb = segs; 76 } 77 } 78 } 79 80 if (!skb->next) { 81 x->outer_mode->xmit(x, skb); 82 83 xo->flags |= XFRM_DEV_RESUME; 84 85 err = x->type_offload->xmit(x, skb, esp_features); 86 if (err) { 87 if (err == -EINPROGRESS) 88 return NULL; 89 90 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 91 kfree_skb(skb); 92 return NULL; 93 } 94 95 skb_push(skb, skb->data - skb_mac_header(skb)); 96 97 return skb; 98 } 99 100 skb2 = skb; 101 102 do { 103 struct sk_buff *nskb = skb2->next; 104 skb_mark_not_on_list(skb2); 105 106 xo = xfrm_offload(skb2); 107 xo->flags |= XFRM_DEV_RESUME; 108 109 x->outer_mode->xmit(x, skb2); 110 111 err = x->type_offload->xmit(x, skb2, esp_features); 112 if (!err) { 113 skb2->next = nskb; 114 } else if (err != -EINPROGRESS) { 115 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 116 skb2->next = nskb; 117 kfree_skb_list(skb2); 118 return NULL; 119 } else { 120 if (skb == skb2) 121 skb = nskb; 122 123 if (!skb) 124 return NULL; 125 126 goto skip_push; 127 } 128 129 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 130 131 skip_push: 132 skb2 = nskb; 133 } while (skb2); 134 135 return skb; 136 } 137 EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 138 139 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 140 struct xfrm_user_offload *xuo) 141 { 142 int err; 143 struct dst_entry *dst; 144 struct net_device *dev; 145 struct xfrm_state_offload *xso = &x->xso; 146 xfrm_address_t *saddr; 147 xfrm_address_t *daddr; 148 149 if (!x->type_offload) 150 return -EINVAL; 151 152 /* We don't yet support UDP encapsulation and TFC padding. */ 153 if (x->encap || x->tfcpad) 154 return -EINVAL; 155 156 dev = dev_get_by_index(net, xuo->ifindex); 157 if (!dev) { 158 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 159 saddr = &x->props.saddr; 160 daddr = &x->id.daddr; 161 } else { 162 saddr = &x->id.daddr; 163 daddr = &x->props.saddr; 164 } 165 166 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 167 x->props.family, 168 xfrm_smark_get(0, x)); 169 if (IS_ERR(dst)) 170 return 0; 171 172 dev = dst->dev; 173 174 dev_hold(dev); 175 dst_release(dst); 176 } 177 178 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 179 xso->dev = NULL; 180 dev_put(dev); 181 return 0; 182 } 183 184 if (x->props.flags & XFRM_STATE_ESN && 185 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { 186 xso->dev = NULL; 187 dev_put(dev); 188 return -EINVAL; 189 } 190 191 xso->dev = dev; 192 xso->num_exthdrs = 1; 193 xso->flags = xuo->flags; 194 195 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 196 if (err) { 197 xso->num_exthdrs = 0; 198 xso->flags = 0; 199 xso->dev = NULL; 200 dev_put(dev); 201 202 if (err != -EOPNOTSUPP) 203 return err; 204 } 205 206 return 0; 207 } 208 EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 209 210 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 211 { 212 int mtu; 213 struct dst_entry *dst = skb_dst(skb); 214 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 215 struct net_device *dev = x->xso.dev; 216 217 if (!x->type_offload || x->encap) 218 return false; 219 220 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 221 (!xdst->child->xfrm && x->type->get_mtu)) { 222 mtu = x->type->get_mtu(x, xdst->child_mtu_cached); 223 224 if (skb->len <= mtu) 225 goto ok; 226 227 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 228 goto ok; 229 } 230 231 return false; 232 233 ok: 234 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 235 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 236 237 return true; 238 } 239 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 240 241 void xfrm_dev_resume(struct sk_buff *skb) 242 { 243 struct net_device *dev = skb->dev; 244 int ret = NETDEV_TX_BUSY; 245 struct netdev_queue *txq; 246 struct softnet_data *sd; 247 unsigned long flags; 248 249 rcu_read_lock(); 250 txq = netdev_pick_tx(dev, skb, NULL); 251 252 HARD_TX_LOCK(dev, txq, smp_processor_id()); 253 if (!netif_xmit_frozen_or_stopped(txq)) 254 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 255 HARD_TX_UNLOCK(dev, txq); 256 257 if (!dev_xmit_complete(ret)) { 258 local_irq_save(flags); 259 sd = this_cpu_ptr(&softnet_data); 260 skb_queue_tail(&sd->xfrm_backlog, skb); 261 raise_softirq_irqoff(NET_TX_SOFTIRQ); 262 local_irq_restore(flags); 263 } 264 rcu_read_unlock(); 265 } 266 EXPORT_SYMBOL_GPL(xfrm_dev_resume); 267 268 void xfrm_dev_backlog(struct softnet_data *sd) 269 { 270 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 271 struct sk_buff_head list; 272 struct sk_buff *skb; 273 274 if (skb_queue_empty(xfrm_backlog)) 275 return; 276 277 __skb_queue_head_init(&list); 278 279 spin_lock(&xfrm_backlog->lock); 280 skb_queue_splice_init(xfrm_backlog, &list); 281 spin_unlock(&xfrm_backlog->lock); 282 283 while (!skb_queue_empty(&list)) { 284 skb = __skb_dequeue(&list); 285 xfrm_dev_resume(skb); 286 } 287 288 } 289 #endif 290 291 static int xfrm_api_check(struct net_device *dev) 292 { 293 #ifdef CONFIG_XFRM_OFFLOAD 294 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 295 !(dev->features & NETIF_F_HW_ESP)) 296 return NOTIFY_BAD; 297 298 if ((dev->features & NETIF_F_HW_ESP) && 299 (!(dev->xfrmdev_ops && 300 dev->xfrmdev_ops->xdo_dev_state_add && 301 dev->xfrmdev_ops->xdo_dev_state_delete))) 302 return NOTIFY_BAD; 303 #else 304 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 305 return NOTIFY_BAD; 306 #endif 307 308 return NOTIFY_DONE; 309 } 310 311 static int xfrm_dev_register(struct net_device *dev) 312 { 313 return xfrm_api_check(dev); 314 } 315 316 static int xfrm_dev_feat_change(struct net_device *dev) 317 { 318 return xfrm_api_check(dev); 319 } 320 321 static int xfrm_dev_down(struct net_device *dev) 322 { 323 if (dev->features & NETIF_F_HW_ESP) 324 xfrm_dev_state_flush(dev_net(dev), dev, true); 325 326 return NOTIFY_DONE; 327 } 328 329 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 330 { 331 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 332 333 switch (event) { 334 case NETDEV_REGISTER: 335 return xfrm_dev_register(dev); 336 337 case NETDEV_FEAT_CHANGE: 338 return xfrm_dev_feat_change(dev); 339 340 case NETDEV_DOWN: 341 return xfrm_dev_down(dev); 342 } 343 return NOTIFY_DONE; 344 } 345 346 static struct notifier_block xfrm_dev_notifier = { 347 .notifier_call = xfrm_dev_event, 348 }; 349 350 void __init xfrm_dev_init(void) 351 { 352 register_netdevice_notifier(&xfrm_dev_notifier); 353 } 354