xref: /openbmc/linux/net/xfrm/xfrm_device.c (revision c358f538)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xfrm_device.c - IPsec device offloading code.
4  *
5  * Copyright (c) 2015 secunet Security Networks AG
6  *
7  * Author:
8  * Steffen Klassert <steffen.klassert@secunet.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/notifier.h>
20 
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23 				  unsigned int hsize)
24 {
25 	struct xfrm_offload *xo = xfrm_offload(skb);
26 
27 	skb_reset_mac_len(skb);
28 	if (xo->flags & XFRM_GSO_SEGMENT)
29 		skb->transport_header -= x->props.header_len;
30 
31 	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32 }
33 
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35 				    unsigned int hsize)
36 
37 {
38 	struct xfrm_offload *xo = xfrm_offload(skb);
39 
40 	if (xo->flags & XFRM_GSO_SEGMENT)
41 		skb->transport_header = skb->network_header + hsize;
42 
43 	skb_reset_mac_len(skb);
44 	pskb_pull(skb, skb->mac_len + x->props.header_len);
45 }
46 
47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48 				  unsigned int hsize)
49 {
50 	struct xfrm_offload *xo = xfrm_offload(skb);
51 	int phlen = 0;
52 
53 	if (xo->flags & XFRM_GSO_SEGMENT)
54 		skb->transport_header = skb->network_header + hsize;
55 
56 	skb_reset_mac_len(skb);
57 	if (x->sel.family != AF_INET6) {
58 		phlen = IPV4_BEET_PHMAXLEN;
59 		if (x->outer_mode.family == AF_INET6)
60 			phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
61 	}
62 
63 	pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
64 }
65 
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68 {
69 	switch (x->outer_mode.encap) {
70 	case XFRM_MODE_TUNNEL:
71 		if (x->outer_mode.family == AF_INET)
72 			return __xfrm_mode_tunnel_prep(x, skb,
73 						       sizeof(struct iphdr));
74 		if (x->outer_mode.family == AF_INET6)
75 			return __xfrm_mode_tunnel_prep(x, skb,
76 						       sizeof(struct ipv6hdr));
77 		break;
78 	case XFRM_MODE_TRANSPORT:
79 		if (x->outer_mode.family == AF_INET)
80 			return __xfrm_transport_prep(x, skb,
81 						     sizeof(struct iphdr));
82 		if (x->outer_mode.family == AF_INET6)
83 			return __xfrm_transport_prep(x, skb,
84 						     sizeof(struct ipv6hdr));
85 		break;
86 	case XFRM_MODE_BEET:
87 		if (x->outer_mode.family == AF_INET)
88 			return __xfrm_mode_beet_prep(x, skb,
89 						     sizeof(struct iphdr));
90 		if (x->outer_mode.family == AF_INET6)
91 			return __xfrm_mode_beet_prep(x, skb,
92 						     sizeof(struct ipv6hdr));
93 		break;
94 	case XFRM_MODE_ROUTEOPTIMIZATION:
95 	case XFRM_MODE_IN_TRIGGER:
96 		break;
97 	}
98 }
99 
100 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
101 {
102 	int err;
103 	unsigned long flags;
104 	struct xfrm_state *x;
105 	struct softnet_data *sd;
106 	struct sk_buff *skb2, *nskb, *pskb = NULL;
107 	netdev_features_t esp_features = features;
108 	struct xfrm_offload *xo = xfrm_offload(skb);
109 	struct net_device *dev = skb->dev;
110 	struct sec_path *sp;
111 
112 	if (!xo || (xo->flags & XFRM_XMIT))
113 		return skb;
114 
115 	if (!(features & NETIF_F_HW_ESP))
116 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
117 
118 	sp = skb_sec_path(skb);
119 	x = sp->xvec[sp->len - 1];
120 	if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
121 		return skb;
122 
123 	/* This skb was already validated on the upper/virtual dev */
124 	if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
125 		return skb;
126 
127 	local_irq_save(flags);
128 	sd = this_cpu_ptr(&softnet_data);
129 	err = !skb_queue_empty(&sd->xfrm_backlog);
130 	local_irq_restore(flags);
131 
132 	if (err) {
133 		*again = true;
134 		return skb;
135 	}
136 
137 	if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
138 		struct sk_buff *segs;
139 
140 		/* Packet got rerouted, fixup features and segment it. */
141 		esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
142 
143 		segs = skb_gso_segment(skb, esp_features);
144 		if (IS_ERR(segs)) {
145 			kfree_skb(skb);
146 			dev_core_stats_tx_dropped_inc(dev);
147 			return NULL;
148 		} else {
149 			consume_skb(skb);
150 			skb = segs;
151 		}
152 	}
153 
154 	if (!skb->next) {
155 		esp_features |= skb->dev->gso_partial_features;
156 		xfrm_outer_mode_prep(x, skb);
157 
158 		xo->flags |= XFRM_DEV_RESUME;
159 
160 		err = x->type_offload->xmit(x, skb, esp_features);
161 		if (err) {
162 			if (err == -EINPROGRESS)
163 				return NULL;
164 
165 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
166 			kfree_skb(skb);
167 			return NULL;
168 		}
169 
170 		skb_push(skb, skb->data - skb_mac_header(skb));
171 
172 		return skb;
173 	}
174 
175 	skb_list_walk_safe(skb, skb2, nskb) {
176 		esp_features |= skb->dev->gso_partial_features;
177 		skb_mark_not_on_list(skb2);
178 
179 		xo = xfrm_offload(skb2);
180 		xo->flags |= XFRM_DEV_RESUME;
181 
182 		xfrm_outer_mode_prep(x, skb2);
183 
184 		err = x->type_offload->xmit(x, skb2, esp_features);
185 		if (!err) {
186 			skb2->next = nskb;
187 		} else if (err != -EINPROGRESS) {
188 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
189 			skb2->next = nskb;
190 			kfree_skb_list(skb2);
191 			return NULL;
192 		} else {
193 			if (skb == skb2)
194 				skb = nskb;
195 			else
196 				pskb->next = nskb;
197 
198 			continue;
199 		}
200 
201 		skb_push(skb2, skb2->data - skb_mac_header(skb2));
202 		pskb = skb2;
203 	}
204 
205 	return skb;
206 }
207 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
208 
209 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
210 		       struct xfrm_user_offload *xuo,
211 		       struct netlink_ext_ack *extack)
212 {
213 	int err;
214 	struct dst_entry *dst;
215 	struct net_device *dev;
216 	struct xfrm_dev_offload *xso = &x->xso;
217 	xfrm_address_t *saddr;
218 	xfrm_address_t *daddr;
219 
220 	if (!x->type_offload) {
221 		NL_SET_ERR_MSG(extack, "Type doesn't support offload");
222 		return -EINVAL;
223 	}
224 
225 	/* We don't yet support UDP encapsulation and TFC padding. */
226 	if (x->encap || x->tfcpad) {
227 		NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
228 		return -EINVAL;
229 	}
230 
231 	if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) {
232 		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
233 		return -EINVAL;
234 	}
235 
236 	dev = dev_get_by_index(net, xuo->ifindex);
237 	if (!dev) {
238 		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
239 			saddr = &x->props.saddr;
240 			daddr = &x->id.daddr;
241 		} else {
242 			saddr = &x->id.daddr;
243 			daddr = &x->props.saddr;
244 		}
245 
246 		dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
247 					x->props.family,
248 					xfrm_smark_get(0, x));
249 		if (IS_ERR(dst))
250 			return 0;
251 
252 		dev = dst->dev;
253 
254 		dev_hold(dev);
255 		dst_release(dst);
256 	}
257 
258 	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
259 		xso->dev = NULL;
260 		dev_put(dev);
261 		return 0;
262 	}
263 
264 	if (x->props.flags & XFRM_STATE_ESN &&
265 	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
266 		NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
267 		xso->dev = NULL;
268 		dev_put(dev);
269 		return -EINVAL;
270 	}
271 
272 	xso->dev = dev;
273 	netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
274 	xso->real_dev = dev;
275 
276 	if (xuo->flags & XFRM_OFFLOAD_INBOUND)
277 		xso->dir = XFRM_DEV_OFFLOAD_IN;
278 	else
279 		xso->dir = XFRM_DEV_OFFLOAD_OUT;
280 
281 	err = dev->xfrmdev_ops->xdo_dev_state_add(x);
282 	if (err) {
283 		xso->dev = NULL;
284 		xso->dir = 0;
285 		xso->real_dev = NULL;
286 		netdev_put(dev, &xso->dev_tracker);
287 
288 		if (err != -EOPNOTSUPP) {
289 			NL_SET_ERR_MSG(extack, "Device failed to offload this state");
290 			return err;
291 		}
292 	}
293 
294 	return 0;
295 }
296 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
297 
298 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
299 {
300 	int mtu;
301 	struct dst_entry *dst = skb_dst(skb);
302 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
303 	struct net_device *dev = x->xso.dev;
304 
305 	if (!x->type_offload || x->encap)
306 		return false;
307 
308 	if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
309 	    (!xdst->child->xfrm)) {
310 		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
311 		if (skb->len <= mtu)
312 			goto ok;
313 
314 		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
315 			goto ok;
316 	}
317 
318 	return false;
319 
320 ok:
321 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
322 		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
323 
324 	return true;
325 }
326 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
327 
328 void xfrm_dev_resume(struct sk_buff *skb)
329 {
330 	struct net_device *dev = skb->dev;
331 	int ret = NETDEV_TX_BUSY;
332 	struct netdev_queue *txq;
333 	struct softnet_data *sd;
334 	unsigned long flags;
335 
336 	rcu_read_lock();
337 	txq = netdev_core_pick_tx(dev, skb, NULL);
338 
339 	HARD_TX_LOCK(dev, txq, smp_processor_id());
340 	if (!netif_xmit_frozen_or_stopped(txq))
341 		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
342 	HARD_TX_UNLOCK(dev, txq);
343 
344 	if (!dev_xmit_complete(ret)) {
345 		local_irq_save(flags);
346 		sd = this_cpu_ptr(&softnet_data);
347 		skb_queue_tail(&sd->xfrm_backlog, skb);
348 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
349 		local_irq_restore(flags);
350 	}
351 	rcu_read_unlock();
352 }
353 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
354 
355 void xfrm_dev_backlog(struct softnet_data *sd)
356 {
357 	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
358 	struct sk_buff_head list;
359 	struct sk_buff *skb;
360 
361 	if (skb_queue_empty(xfrm_backlog))
362 		return;
363 
364 	__skb_queue_head_init(&list);
365 
366 	spin_lock(&xfrm_backlog->lock);
367 	skb_queue_splice_init(xfrm_backlog, &list);
368 	spin_unlock(&xfrm_backlog->lock);
369 
370 	while (!skb_queue_empty(&list)) {
371 		skb = __skb_dequeue(&list);
372 		xfrm_dev_resume(skb);
373 	}
374 
375 }
376 #endif
377 
378 static int xfrm_api_check(struct net_device *dev)
379 {
380 #ifdef CONFIG_XFRM_OFFLOAD
381 	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
382 	    !(dev->features & NETIF_F_HW_ESP))
383 		return NOTIFY_BAD;
384 
385 	if ((dev->features & NETIF_F_HW_ESP) &&
386 	    (!(dev->xfrmdev_ops &&
387 	       dev->xfrmdev_ops->xdo_dev_state_add &&
388 	       dev->xfrmdev_ops->xdo_dev_state_delete)))
389 		return NOTIFY_BAD;
390 #else
391 	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
392 		return NOTIFY_BAD;
393 #endif
394 
395 	return NOTIFY_DONE;
396 }
397 
398 static int xfrm_dev_down(struct net_device *dev)
399 {
400 	if (dev->features & NETIF_F_HW_ESP)
401 		xfrm_dev_state_flush(dev_net(dev), dev, true);
402 
403 	return NOTIFY_DONE;
404 }
405 
406 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
407 {
408 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
409 
410 	switch (event) {
411 	case NETDEV_REGISTER:
412 		return xfrm_api_check(dev);
413 
414 	case NETDEV_FEAT_CHANGE:
415 		return xfrm_api_check(dev);
416 
417 	case NETDEV_DOWN:
418 	case NETDEV_UNREGISTER:
419 		return xfrm_dev_down(dev);
420 	}
421 	return NOTIFY_DONE;
422 }
423 
424 static struct notifier_block xfrm_dev_notifier = {
425 	.notifier_call	= xfrm_dev_event,
426 };
427 
428 void __init xfrm_dev_init(void)
429 {
430 	register_netdevice_notifier(&xfrm_dev_notifier);
431 }
432