1 /* 2 * xfrm_device.c - IPsec device offloading code. 3 * 4 * Copyright (c) 2015 secunet Security Networks AG 5 * 6 * Author: 7 * Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/skbuff.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <net/dst.h> 22 #include <net/xfrm.h> 23 #include <linux/notifier.h> 24 25 #ifdef CONFIG_XFRM_OFFLOAD 26 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 27 { 28 int err; 29 unsigned long flags; 30 struct xfrm_state *x; 31 struct sk_buff *skb2; 32 struct softnet_data *sd; 33 netdev_features_t esp_features = features; 34 struct xfrm_offload *xo = xfrm_offload(skb); 35 36 if (!xo) 37 return skb; 38 39 if (!(features & NETIF_F_HW_ESP)) 40 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 41 42 x = skb->sp->xvec[skb->sp->len - 1]; 43 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) 44 return skb; 45 46 local_irq_save(flags); 47 sd = this_cpu_ptr(&softnet_data); 48 err = !skb_queue_empty(&sd->xfrm_backlog); 49 local_irq_restore(flags); 50 51 if (err) { 52 *again = true; 53 return skb; 54 } 55 56 if (skb_is_gso(skb)) { 57 struct net_device *dev = skb->dev; 58 59 if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) { 60 struct sk_buff *segs; 61 62 /* Packet got rerouted, fixup features and segment it. */ 63 esp_features = esp_features & ~(NETIF_F_HW_ESP 64 | NETIF_F_GSO_ESP); 65 66 segs = skb_gso_segment(skb, esp_features); 67 if (IS_ERR(segs)) { 68 kfree_skb(skb); 69 atomic_long_inc(&dev->tx_dropped); 70 return NULL; 71 } else { 72 consume_skb(skb); 73 skb = segs; 74 } 75 } 76 } 77 78 if (!skb->next) { 79 x->outer_mode->xmit(x, skb); 80 81 xo->flags |= XFRM_DEV_RESUME; 82 83 err = x->type_offload->xmit(x, skb, esp_features); 84 if (err) { 85 if (err == -EINPROGRESS) 86 return NULL; 87 88 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 89 kfree_skb(skb); 90 return NULL; 91 } 92 93 skb_push(skb, skb->data - skb_mac_header(skb)); 94 95 return skb; 96 } 97 98 skb2 = skb; 99 100 do { 101 struct sk_buff *nskb = skb2->next; 102 skb2->next = NULL; 103 104 xo = xfrm_offload(skb2); 105 xo->flags |= XFRM_DEV_RESUME; 106 107 x->outer_mode->xmit(x, skb2); 108 109 err = x->type_offload->xmit(x, skb2, esp_features); 110 if (!err) { 111 skb2->next = nskb; 112 } else if (err != -EINPROGRESS) { 113 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 114 skb2->next = nskb; 115 kfree_skb_list(skb2); 116 return NULL; 117 } else { 118 if (skb == skb2) 119 skb = nskb; 120 121 if (!skb) 122 return NULL; 123 124 goto skip_push; 125 } 126 127 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 128 129 skip_push: 130 skb2 = nskb; 131 } while (skb2); 132 133 return skb; 134 } 135 EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 136 137 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 138 struct xfrm_user_offload *xuo) 139 { 140 int err; 141 struct dst_entry *dst; 142 struct net_device *dev; 143 struct xfrm_state_offload *xso = &x->xso; 144 xfrm_address_t *saddr; 145 xfrm_address_t *daddr; 146 147 if (!x->type_offload) 148 return -EINVAL; 149 150 /* We don't yet support UDP encapsulation, TFC padding and ESN. */ 151 if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN)) 152 return -EINVAL; 153 154 dev = dev_get_by_index(net, xuo->ifindex); 155 if (!dev) { 156 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 157 saddr = &x->props.saddr; 158 daddr = &x->id.daddr; 159 } else { 160 saddr = &x->id.daddr; 161 daddr = &x->props.saddr; 162 } 163 164 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, 165 x->props.family, x->props.output_mark); 166 if (IS_ERR(dst)) 167 return 0; 168 169 dev = dst->dev; 170 171 dev_hold(dev); 172 dst_release(dst); 173 } 174 175 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 176 xso->dev = NULL; 177 dev_put(dev); 178 return 0; 179 } 180 181 xso->dev = dev; 182 xso->num_exthdrs = 1; 183 xso->flags = xuo->flags; 184 185 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 186 if (err) { 187 dev_put(dev); 188 return err; 189 } 190 191 return 0; 192 } 193 EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 194 195 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 196 { 197 int mtu; 198 struct dst_entry *dst = skb_dst(skb); 199 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 200 struct net_device *dev = x->xso.dev; 201 202 if (!x->type_offload || x->encap) 203 return false; 204 205 if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) && 206 (!xdst->child->xfrm && x->type->get_mtu)) { 207 mtu = x->type->get_mtu(x, xdst->child_mtu_cached); 208 209 if (skb->len <= mtu) 210 goto ok; 211 212 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 213 goto ok; 214 } 215 216 return false; 217 218 ok: 219 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 220 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 221 222 return true; 223 } 224 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 225 226 void xfrm_dev_resume(struct sk_buff *skb) 227 { 228 struct net_device *dev = skb->dev; 229 int ret = NETDEV_TX_BUSY; 230 struct netdev_queue *txq; 231 struct softnet_data *sd; 232 unsigned long flags; 233 234 rcu_read_lock(); 235 txq = netdev_pick_tx(dev, skb, NULL); 236 237 HARD_TX_LOCK(dev, txq, smp_processor_id()); 238 if (!netif_xmit_frozen_or_stopped(txq)) 239 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 240 HARD_TX_UNLOCK(dev, txq); 241 242 if (!dev_xmit_complete(ret)) { 243 local_irq_save(flags); 244 sd = this_cpu_ptr(&softnet_data); 245 skb_queue_tail(&sd->xfrm_backlog, skb); 246 raise_softirq_irqoff(NET_TX_SOFTIRQ); 247 local_irq_restore(flags); 248 } 249 rcu_read_unlock(); 250 } 251 EXPORT_SYMBOL_GPL(xfrm_dev_resume); 252 253 void xfrm_dev_backlog(struct softnet_data *sd) 254 { 255 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; 256 struct sk_buff_head list; 257 struct sk_buff *skb; 258 259 if (skb_queue_empty(xfrm_backlog)) 260 return; 261 262 __skb_queue_head_init(&list); 263 264 spin_lock(&xfrm_backlog->lock); 265 skb_queue_splice_init(xfrm_backlog, &list); 266 spin_unlock(&xfrm_backlog->lock); 267 268 while (!skb_queue_empty(&list)) { 269 skb = __skb_dequeue(&list); 270 xfrm_dev_resume(skb); 271 } 272 273 } 274 #endif 275 276 static int xfrm_api_check(struct net_device *dev) 277 { 278 #ifdef CONFIG_XFRM_OFFLOAD 279 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 280 !(dev->features & NETIF_F_HW_ESP)) 281 return NOTIFY_BAD; 282 283 if ((dev->features & NETIF_F_HW_ESP) && 284 (!(dev->xfrmdev_ops && 285 dev->xfrmdev_ops->xdo_dev_state_add && 286 dev->xfrmdev_ops->xdo_dev_state_delete))) 287 return NOTIFY_BAD; 288 #else 289 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) 290 return NOTIFY_BAD; 291 #endif 292 293 return NOTIFY_DONE; 294 } 295 296 static int xfrm_dev_register(struct net_device *dev) 297 { 298 return xfrm_api_check(dev); 299 } 300 301 static int xfrm_dev_unregister(struct net_device *dev) 302 { 303 xfrm_policy_cache_flush(); 304 return NOTIFY_DONE; 305 } 306 307 static int xfrm_dev_feat_change(struct net_device *dev) 308 { 309 return xfrm_api_check(dev); 310 } 311 312 static int xfrm_dev_down(struct net_device *dev) 313 { 314 if (dev->features & NETIF_F_HW_ESP) 315 xfrm_dev_state_flush(dev_net(dev), dev, true); 316 317 xfrm_policy_cache_flush(); 318 return NOTIFY_DONE; 319 } 320 321 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 322 { 323 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 324 325 switch (event) { 326 case NETDEV_REGISTER: 327 return xfrm_dev_register(dev); 328 329 case NETDEV_UNREGISTER: 330 return xfrm_dev_unregister(dev); 331 332 case NETDEV_FEAT_CHANGE: 333 return xfrm_dev_feat_change(dev); 334 335 case NETDEV_DOWN: 336 return xfrm_dev_down(dev); 337 } 338 return NOTIFY_DONE; 339 } 340 341 static struct notifier_block xfrm_dev_notifier = { 342 .notifier_call = xfrm_dev_event, 343 }; 344 345 void __net_init xfrm_dev_init(void) 346 { 347 register_netdevice_notifier(&xfrm_dev_notifier); 348 } 349