xref: /openbmc/linux/net/xfrm/xfrm_device.c (revision f8a70afa)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
221f42cc9SSteffen Klassert /*
321f42cc9SSteffen Klassert  * xfrm_device.c - IPsec device offloading code.
421f42cc9SSteffen Klassert  *
521f42cc9SSteffen Klassert  * Copyright (c) 2015 secunet Security Networks AG
621f42cc9SSteffen Klassert  *
721f42cc9SSteffen Klassert  * Author:
821f42cc9SSteffen Klassert  * Steffen Klassert <steffen.klassert@secunet.com>
921f42cc9SSteffen Klassert  */
1021f42cc9SSteffen Klassert 
1121f42cc9SSteffen Klassert #include <linux/errno.h>
1221f42cc9SSteffen Klassert #include <linux/module.h>
1321f42cc9SSteffen Klassert #include <linux/netdevice.h>
1421f42cc9SSteffen Klassert #include <linux/skbuff.h>
1521f42cc9SSteffen Klassert #include <linux/slab.h>
1621f42cc9SSteffen Klassert #include <linux/spinlock.h>
1721f42cc9SSteffen Klassert #include <net/dst.h>
1821f42cc9SSteffen Klassert #include <net/xfrm.h>
1921f42cc9SSteffen Klassert #include <linux/notifier.h>
2021f42cc9SSteffen Klassert 
21b81f884aSHangbin Liu #ifdef CONFIG_XFRM_OFFLOAD
22303c5fabSFlorian Westphal static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23303c5fabSFlorian Westphal 				  unsigned int hsize)
24303c5fabSFlorian Westphal {
25303c5fabSFlorian Westphal 	struct xfrm_offload *xo = xfrm_offload(skb);
26303c5fabSFlorian Westphal 
27303c5fabSFlorian Westphal 	skb_reset_mac_len(skb);
2806a0afcfSXin Long 	if (xo->flags & XFRM_GSO_SEGMENT)
29303c5fabSFlorian Westphal 		skb->transport_header -= x->props.header_len;
3006a0afcfSXin Long 
3106a0afcfSXin Long 	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32303c5fabSFlorian Westphal }
33303c5fabSFlorian Westphal 
34303c5fabSFlorian Westphal static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35303c5fabSFlorian Westphal 				    unsigned int hsize)
36303c5fabSFlorian Westphal 
37303c5fabSFlorian Westphal {
38303c5fabSFlorian Westphal 	struct xfrm_offload *xo = xfrm_offload(skb);
39303c5fabSFlorian Westphal 
40303c5fabSFlorian Westphal 	if (xo->flags & XFRM_GSO_SEGMENT)
41303c5fabSFlorian Westphal 		skb->transport_header = skb->network_header + hsize;
42303c5fabSFlorian Westphal 
43303c5fabSFlorian Westphal 	skb_reset_mac_len(skb);
44303c5fabSFlorian Westphal 	pskb_pull(skb, skb->mac_len + x->props.header_len);
45303c5fabSFlorian Westphal }
46303c5fabSFlorian Westphal 
4730849175SXin Long static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
4830849175SXin Long 				  unsigned int hsize)
4930849175SXin Long {
5030849175SXin Long 	struct xfrm_offload *xo = xfrm_offload(skb);
5130849175SXin Long 	int phlen = 0;
5230849175SXin Long 
5330849175SXin Long 	if (xo->flags & XFRM_GSO_SEGMENT)
5430849175SXin Long 		skb->transport_header = skb->network_header + hsize;
5530849175SXin Long 
5630849175SXin Long 	skb_reset_mac_len(skb);
5730849175SXin Long 	if (x->sel.family != AF_INET6) {
5830849175SXin Long 		phlen = IPV4_BEET_PHMAXLEN;
5930849175SXin Long 		if (x->outer_mode.family == AF_INET6)
6030849175SXin Long 			phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
6130849175SXin Long 	}
6230849175SXin Long 
6330849175SXin Long 	pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
6430849175SXin Long }
6530849175SXin Long 
66303c5fabSFlorian Westphal /* Adjust pointers into the packet when IPsec is done at layer2 */
67303c5fabSFlorian Westphal static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68303c5fabSFlorian Westphal {
69c9500d7bSFlorian Westphal 	switch (x->outer_mode.encap) {
70303c5fabSFlorian Westphal 	case XFRM_MODE_TUNNEL:
71c9500d7bSFlorian Westphal 		if (x->outer_mode.family == AF_INET)
72303c5fabSFlorian Westphal 			return __xfrm_mode_tunnel_prep(x, skb,
73303c5fabSFlorian Westphal 						       sizeof(struct iphdr));
74c9500d7bSFlorian Westphal 		if (x->outer_mode.family == AF_INET6)
75303c5fabSFlorian Westphal 			return __xfrm_mode_tunnel_prep(x, skb,
76303c5fabSFlorian Westphal 						       sizeof(struct ipv6hdr));
77303c5fabSFlorian Westphal 		break;
78303c5fabSFlorian Westphal 	case XFRM_MODE_TRANSPORT:
79c9500d7bSFlorian Westphal 		if (x->outer_mode.family == AF_INET)
80303c5fabSFlorian Westphal 			return __xfrm_transport_prep(x, skb,
81303c5fabSFlorian Westphal 						     sizeof(struct iphdr));
82c9500d7bSFlorian Westphal 		if (x->outer_mode.family == AF_INET6)
83303c5fabSFlorian Westphal 			return __xfrm_transport_prep(x, skb,
84303c5fabSFlorian Westphal 						     sizeof(struct ipv6hdr));
85303c5fabSFlorian Westphal 		break;
8630849175SXin Long 	case XFRM_MODE_BEET:
8730849175SXin Long 		if (x->outer_mode.family == AF_INET)
8830849175SXin Long 			return __xfrm_mode_beet_prep(x, skb,
8930849175SXin Long 						     sizeof(struct iphdr));
9030849175SXin Long 		if (x->outer_mode.family == AF_INET6)
9130849175SXin Long 			return __xfrm_mode_beet_prep(x, skb,
9230849175SXin Long 						     sizeof(struct ipv6hdr));
9330849175SXin Long 		break;
94303c5fabSFlorian Westphal 	case XFRM_MODE_ROUTEOPTIMIZATION:
95303c5fabSFlorian Westphal 	case XFRM_MODE_IN_TRIGGER:
96303c5fabSFlorian Westphal 		break;
97303c5fabSFlorian Westphal 	}
98303c5fabSFlorian Westphal }
99303c5fabSFlorian Westphal 
1004b549cccSChristian Langrock static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
1014b549cccSChristian Langrock {
1024b549cccSChristian Langrock 	struct xfrm_offload *xo = xfrm_offload(skb);
1034b549cccSChristian Langrock 	__u32 seq = xo->seq.low;
1044b549cccSChristian Langrock 
1054b549cccSChristian Langrock 	seq += skb_shinfo(skb)->gso_segs;
1064b549cccSChristian Langrock 	if (unlikely(seq < xo->seq.low))
1074b549cccSChristian Langrock 		return true;
1084b549cccSChristian Langrock 
1094b549cccSChristian Langrock 	return false;
1104b549cccSChristian Langrock }
1114b549cccSChristian Langrock 
112f53c7239SSteffen Klassert struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
113f6e27114SSteffen Klassert {
114f6e27114SSteffen Klassert 	int err;
115f53c7239SSteffen Klassert 	unsigned long flags;
116f6e27114SSteffen Klassert 	struct xfrm_state *x;
117f53c7239SSteffen Klassert 	struct softnet_data *sd;
118d1d17a35SXin Long 	struct sk_buff *skb2, *nskb, *pskb = NULL;
1193dca3f38SSteffen Klassert 	netdev_features_t esp_features = features;
120f6e27114SSteffen Klassert 	struct xfrm_offload *xo = xfrm_offload(skb);
121272c2330SJarod Wilson 	struct net_device *dev = skb->dev;
1222294be0fSFlorian Westphal 	struct sec_path *sp;
123f6e27114SSteffen Klassert 
12494579ac3SHuy Nguyen 	if (!xo || (xo->flags & XFRM_XMIT))
1253dca3f38SSteffen Klassert 		return skb;
126f6e27114SSteffen Klassert 
1273dca3f38SSteffen Klassert 	if (!(features & NETIF_F_HW_ESP))
1283dca3f38SSteffen Klassert 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
1293dca3f38SSteffen Klassert 
1302294be0fSFlorian Westphal 	sp = skb_sec_path(skb);
1312294be0fSFlorian Westphal 	x = sp->xvec[sp->len - 1];
132482db2f1SLeon Romanovsky 	if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
1333dca3f38SSteffen Klassert 		return skb;
134f6e27114SSteffen Klassert 
135*f8a70afaSLeon Romanovsky 	/* The packet was sent to HW IPsec packet offload engine,
136*f8a70afaSLeon Romanovsky 	 * but to wrong device. Drop the packet, so it won't skip
137*f8a70afaSLeon Romanovsky 	 * XFRM stack.
138*f8a70afaSLeon Romanovsky 	 */
139*f8a70afaSLeon Romanovsky 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
140*f8a70afaSLeon Romanovsky 		kfree_skb(skb);
141*f8a70afaSLeon Romanovsky 		dev_core_stats_tx_dropped_inc(dev);
142*f8a70afaSLeon Romanovsky 		return NULL;
143*f8a70afaSLeon Romanovsky 	}
144*f8a70afaSLeon Romanovsky 
145bdfd2d1fSJarod Wilson 	/* This skb was already validated on the upper/virtual dev */
146bdfd2d1fSJarod Wilson 	if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
147272c2330SJarod Wilson 		return skb;
148272c2330SJarod Wilson 
149f53c7239SSteffen Klassert 	local_irq_save(flags);
150f53c7239SSteffen Klassert 	sd = this_cpu_ptr(&softnet_data);
151f53c7239SSteffen Klassert 	err = !skb_queue_empty(&sd->xfrm_backlog);
152f53c7239SSteffen Klassert 	local_irq_restore(flags);
153f53c7239SSteffen Klassert 
154f53c7239SSteffen Klassert 	if (err) {
155f53c7239SSteffen Klassert 		*again = true;
156f53c7239SSteffen Klassert 		return skb;
157f53c7239SSteffen Klassert 	}
158f53c7239SSteffen Klassert 
1594b549cccSChristian Langrock 	if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
1604b549cccSChristian Langrock 				unlikely(xmit_xfrm_check_overflow(skb)))) {
1613dca3f38SSteffen Klassert 		struct sk_buff *segs;
1623dca3f38SSteffen Klassert 
1633dca3f38SSteffen Klassert 		/* Packet got rerouted, fixup features and segment it. */
164272c2330SJarod Wilson 		esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
1653dca3f38SSteffen Klassert 
1663dca3f38SSteffen Klassert 		segs = skb_gso_segment(skb, esp_features);
1673dca3f38SSteffen Klassert 		if (IS_ERR(segs)) {
1683dca3f38SSteffen Klassert 			kfree_skb(skb);
169625788b5SEric Dumazet 			dev_core_stats_tx_dropped_inc(dev);
1703dca3f38SSteffen Klassert 			return NULL;
1713dca3f38SSteffen Klassert 		} else {
1723dca3f38SSteffen Klassert 			consume_skb(skb);
1733dca3f38SSteffen Klassert 			skb = segs;
1743dca3f38SSteffen Klassert 		}
1753dca3f38SSteffen Klassert 	}
1763dca3f38SSteffen Klassert 
1773dca3f38SSteffen Klassert 	if (!skb->next) {
17865fd2c2aSBoris Pismenny 		esp_features |= skb->dev->gso_partial_features;
179303c5fabSFlorian Westphal 		xfrm_outer_mode_prep(x, skb);
180f6e27114SSteffen Klassert 
181f53c7239SSteffen Klassert 		xo->flags |= XFRM_DEV_RESUME;
182f53c7239SSteffen Klassert 
1833dca3f38SSteffen Klassert 		err = x->type_offload->xmit(x, skb, esp_features);
184f6e27114SSteffen Klassert 		if (err) {
185f53c7239SSteffen Klassert 			if (err == -EINPROGRESS)
186f53c7239SSteffen Klassert 				return NULL;
187f53c7239SSteffen Klassert 
188f6e27114SSteffen Klassert 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
1893dca3f38SSteffen Klassert 			kfree_skb(skb);
1903dca3f38SSteffen Klassert 			return NULL;
191f6e27114SSteffen Klassert 		}
192f6e27114SSteffen Klassert 
193f6e27114SSteffen Klassert 		skb_push(skb, skb->data - skb_mac_header(skb));
1943dca3f38SSteffen Klassert 
1953dca3f38SSteffen Klassert 		return skb;
196f6e27114SSteffen Klassert 	}
197f6e27114SSteffen Klassert 
198c3b18e0dSJason A. Donenfeld 	skb_list_walk_safe(skb, skb2, nskb) {
19965fd2c2aSBoris Pismenny 		esp_features |= skb->dev->gso_partial_features;
200a8305bffSDavid S. Miller 		skb_mark_not_on_list(skb2);
2013dca3f38SSteffen Klassert 
2023dca3f38SSteffen Klassert 		xo = xfrm_offload(skb2);
203f53c7239SSteffen Klassert 		xo->flags |= XFRM_DEV_RESUME;
2043dca3f38SSteffen Klassert 
205303c5fabSFlorian Westphal 		xfrm_outer_mode_prep(x, skb2);
2063dca3f38SSteffen Klassert 
2073dca3f38SSteffen Klassert 		err = x->type_offload->xmit(x, skb2, esp_features);
208f53c7239SSteffen Klassert 		if (!err) {
209f53c7239SSteffen Klassert 			skb2->next = nskb;
210f53c7239SSteffen Klassert 		} else if (err != -EINPROGRESS) {
2113dca3f38SSteffen Klassert 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
2123dca3f38SSteffen Klassert 			skb2->next = nskb;
2133dca3f38SSteffen Klassert 			kfree_skb_list(skb2);
2143dca3f38SSteffen Klassert 			return NULL;
215f53c7239SSteffen Klassert 		} else {
216f53c7239SSteffen Klassert 			if (skb == skb2)
217f53c7239SSteffen Klassert 				skb = nskb;
218d1d17a35SXin Long 			else
219d1d17a35SXin Long 				pskb->next = nskb;
220f53c7239SSteffen Klassert 
221c3b18e0dSJason A. Donenfeld 			continue;
222f53c7239SSteffen Klassert 		}
2233dca3f38SSteffen Klassert 
2243dca3f38SSteffen Klassert 		skb_push(skb2, skb2->data - skb_mac_header(skb2));
225d1d17a35SXin Long 		pskb = skb2;
226c3b18e0dSJason A. Donenfeld 	}
2273dca3f38SSteffen Klassert 
2283dca3f38SSteffen Klassert 	return skb;
229f6e27114SSteffen Klassert }
230f6e27114SSteffen Klassert EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
231f6e27114SSteffen Klassert 
232d77e38e6SSteffen Klassert int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
233adb5c33eSSabrina Dubroca 		       struct xfrm_user_offload *xuo,
234adb5c33eSSabrina Dubroca 		       struct netlink_ext_ack *extack)
235d77e38e6SSteffen Klassert {
236d77e38e6SSteffen Klassert 	int err;
237d77e38e6SSteffen Klassert 	struct dst_entry *dst;
238d77e38e6SSteffen Klassert 	struct net_device *dev;
23987e0a94eSLeon Romanovsky 	struct xfrm_dev_offload *xso = &x->xso;
240d77e38e6SSteffen Klassert 	xfrm_address_t *saddr;
241d77e38e6SSteffen Klassert 	xfrm_address_t *daddr;
24262f6eca5SLeon Romanovsky 	bool is_packet_offload;
243d77e38e6SSteffen Klassert 
244adb5c33eSSabrina Dubroca 	if (!x->type_offload) {
245adb5c33eSSabrina Dubroca 		NL_SET_ERR_MSG(extack, "Type doesn't support offload");
246ffdb5211SIlan Tayari 		return -EINVAL;
247adb5c33eSSabrina Dubroca 	}
248d77e38e6SSteffen Klassert 
24950bd870aSYossef Efraim 	/* We don't yet support UDP encapsulation and TFC padding. */
250adb5c33eSSabrina Dubroca 	if (x->encap || x->tfcpad) {
251adb5c33eSSabrina Dubroca 		NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
25243024b9cSYossef Efraim 		return -EINVAL;
253adb5c33eSSabrina Dubroca 	}
254d77e38e6SSteffen Klassert 
25562f6eca5SLeon Romanovsky 	if (xuo->flags &
25662f6eca5SLeon Romanovsky 	    ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
257adb5c33eSSabrina Dubroca 		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
2587c76ecd9SLeon Romanovsky 		return -EINVAL;
259adb5c33eSSabrina Dubroca 	}
2607c76ecd9SLeon Romanovsky 
26162f6eca5SLeon Romanovsky 	is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
262d77e38e6SSteffen Klassert 	dev = dev_get_by_index(net, xuo->ifindex);
263d77e38e6SSteffen Klassert 	if (!dev) {
264d77e38e6SSteffen Klassert 		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
265d77e38e6SSteffen Klassert 			saddr = &x->props.saddr;
266d77e38e6SSteffen Klassert 			daddr = &x->id.daddr;
267d77e38e6SSteffen Klassert 		} else {
268d77e38e6SSteffen Klassert 			saddr = &x->id.daddr;
269d77e38e6SSteffen Klassert 			daddr = &x->props.saddr;
270d77e38e6SSteffen Klassert 		}
271d77e38e6SSteffen Klassert 
272077fbac4SLorenzo Colitti 		dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
2739b42c1f1SSteffen Klassert 					x->props.family,
2749b42c1f1SSteffen Klassert 					xfrm_smark_get(0, x));
275d77e38e6SSteffen Klassert 		if (IS_ERR(dst))
27662f6eca5SLeon Romanovsky 			return (is_packet_offload) ? -EINVAL : 0;
277d77e38e6SSteffen Klassert 
278d77e38e6SSteffen Klassert 		dev = dst->dev;
279d77e38e6SSteffen Klassert 
280d77e38e6SSteffen Klassert 		dev_hold(dev);
281d77e38e6SSteffen Klassert 		dst_release(dst);
282d77e38e6SSteffen Klassert 	}
283d77e38e6SSteffen Klassert 
284d77e38e6SSteffen Klassert 	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
28567a63387SSteffen Klassert 		xso->dev = NULL;
286d77e38e6SSteffen Klassert 		dev_put(dev);
28762f6eca5SLeon Romanovsky 		return (is_packet_offload) ? -EINVAL : 0;
288d77e38e6SSteffen Klassert 	}
289d77e38e6SSteffen Klassert 
29050bd870aSYossef Efraim 	if (x->props.flags & XFRM_STATE_ESN &&
29150bd870aSYossef Efraim 	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
292adb5c33eSSabrina Dubroca 		NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
29350bd870aSYossef Efraim 		xso->dev = NULL;
29450bd870aSYossef Efraim 		dev_put(dev);
29550bd870aSYossef Efraim 		return -EINVAL;
29650bd870aSYossef Efraim 	}
29750bd870aSYossef Efraim 
298d77e38e6SSteffen Klassert 	xso->dev = dev;
299e1b539bdSEric Dumazet 	netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
300bdfd2d1fSJarod Wilson 	xso->real_dev = dev;
301d77e38e6SSteffen Klassert 
302482db2f1SLeon Romanovsky 	if (xuo->flags & XFRM_OFFLOAD_INBOUND)
303482db2f1SLeon Romanovsky 		xso->dir = XFRM_DEV_OFFLOAD_IN;
304482db2f1SLeon Romanovsky 	else
305482db2f1SLeon Romanovsky 		xso->dir = XFRM_DEV_OFFLOAD_OUT;
306482db2f1SLeon Romanovsky 
30762f6eca5SLeon Romanovsky 	if (is_packet_offload)
30862f6eca5SLeon Romanovsky 		xso->type = XFRM_DEV_OFFLOAD_PACKET;
30962f6eca5SLeon Romanovsky 	else
310d14f28b8SLeon Romanovsky 		xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
311d14f28b8SLeon Romanovsky 
312d77e38e6SSteffen Klassert 	err = dev->xfrmdev_ops->xdo_dev_state_add(x);
313d77e38e6SSteffen Klassert 	if (err) {
314aa5dd6faSAviad Yehezkel 		xso->dev = NULL;
315482db2f1SLeon Romanovsky 		xso->dir = 0;
316dd72fadfSAyush Sawal 		xso->real_dev = NULL;
317d62607c3SJakub Kicinski 		netdev_put(dev, &xso->dev_tracker);
318d14f28b8SLeon Romanovsky 		xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
3194a132095SShannon Nelson 
32062f6eca5SLeon Romanovsky 		/* User explicitly requested packet offload mode and configured
32162f6eca5SLeon Romanovsky 		 * policy in addition to the XFRM state. So be civil to users,
32262f6eca5SLeon Romanovsky 		 * and return an error instead of taking fallback path.
32362f6eca5SLeon Romanovsky 		 *
32462f6eca5SLeon Romanovsky 		 * This WARN_ON() can be seen as a documentation for driver
32562f6eca5SLeon Romanovsky 		 * authors to do not return -EOPNOTSUPP in packet offload mode.
32662f6eca5SLeon Romanovsky 		 */
32762f6eca5SLeon Romanovsky 		WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
32862f6eca5SLeon Romanovsky 		if (err != -EOPNOTSUPP || is_packet_offload) {
329adb5c33eSSabrina Dubroca 			NL_SET_ERR_MSG(extack, "Device failed to offload this state");
330d77e38e6SSteffen Klassert 			return err;
331d77e38e6SSteffen Klassert 		}
332adb5c33eSSabrina Dubroca 	}
333d77e38e6SSteffen Klassert 
334d77e38e6SSteffen Klassert 	return 0;
335d77e38e6SSteffen Klassert }
336d77e38e6SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
337d77e38e6SSteffen Klassert 
338919e43faSLeon Romanovsky int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
339919e43faSLeon Romanovsky 			struct xfrm_user_offload *xuo, u8 dir,
340919e43faSLeon Romanovsky 			struct netlink_ext_ack *extack)
341919e43faSLeon Romanovsky {
342919e43faSLeon Romanovsky 	struct xfrm_dev_offload *xdo = &xp->xdo;
343919e43faSLeon Romanovsky 	struct net_device *dev;
344919e43faSLeon Romanovsky 	int err;
345919e43faSLeon Romanovsky 
346919e43faSLeon Romanovsky 	if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
347919e43faSLeon Romanovsky 		/* We support only packet offload mode and it means
348919e43faSLeon Romanovsky 		 * that user must set XFRM_OFFLOAD_PACKET bit.
349919e43faSLeon Romanovsky 		 */
350919e43faSLeon Romanovsky 		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
351919e43faSLeon Romanovsky 		return -EINVAL;
352919e43faSLeon Romanovsky 	}
353919e43faSLeon Romanovsky 
354919e43faSLeon Romanovsky 	dev = dev_get_by_index(net, xuo->ifindex);
355919e43faSLeon Romanovsky 	if (!dev)
356919e43faSLeon Romanovsky 		return -EINVAL;
357919e43faSLeon Romanovsky 
358919e43faSLeon Romanovsky 	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
359919e43faSLeon Romanovsky 		xdo->dev = NULL;
360919e43faSLeon Romanovsky 		dev_put(dev);
361919e43faSLeon Romanovsky 		NL_SET_ERR_MSG(extack, "Policy offload is not supported");
362919e43faSLeon Romanovsky 		return -EINVAL;
363919e43faSLeon Romanovsky 	}
364919e43faSLeon Romanovsky 
365919e43faSLeon Romanovsky 	xdo->dev = dev;
366919e43faSLeon Romanovsky 	netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
367919e43faSLeon Romanovsky 	xdo->real_dev = dev;
368919e43faSLeon Romanovsky 	xdo->type = XFRM_DEV_OFFLOAD_PACKET;
369919e43faSLeon Romanovsky 	switch (dir) {
370919e43faSLeon Romanovsky 	case XFRM_POLICY_IN:
371919e43faSLeon Romanovsky 		xdo->dir = XFRM_DEV_OFFLOAD_IN;
372919e43faSLeon Romanovsky 		break;
373919e43faSLeon Romanovsky 	case XFRM_POLICY_OUT:
374919e43faSLeon Romanovsky 		xdo->dir = XFRM_DEV_OFFLOAD_OUT;
375919e43faSLeon Romanovsky 		break;
376919e43faSLeon Romanovsky 	case XFRM_POLICY_FWD:
377919e43faSLeon Romanovsky 		xdo->dir = XFRM_DEV_OFFLOAD_FWD;
378919e43faSLeon Romanovsky 		break;
379919e43faSLeon Romanovsky 	default:
380919e43faSLeon Romanovsky 		xdo->dev = NULL;
381919e43faSLeon Romanovsky 		dev_put(dev);
382919e43faSLeon Romanovsky 		NL_SET_ERR_MSG(extack, "Unrecognized oflload direction");
383919e43faSLeon Romanovsky 		return -EINVAL;
384919e43faSLeon Romanovsky 	}
385919e43faSLeon Romanovsky 
386919e43faSLeon Romanovsky 	err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
387919e43faSLeon Romanovsky 	if (err) {
388919e43faSLeon Romanovsky 		xdo->dev = NULL;
389919e43faSLeon Romanovsky 		xdo->real_dev = NULL;
390919e43faSLeon Romanovsky 		xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
391919e43faSLeon Romanovsky 		xdo->dir = 0;
392919e43faSLeon Romanovsky 		netdev_put(dev, &xdo->dev_tracker);
393919e43faSLeon Romanovsky 		NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
394919e43faSLeon Romanovsky 		return err;
395919e43faSLeon Romanovsky 	}
396919e43faSLeon Romanovsky 
397919e43faSLeon Romanovsky 	return 0;
398919e43faSLeon Romanovsky }
399919e43faSLeon Romanovsky EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
400919e43faSLeon Romanovsky 
401d77e38e6SSteffen Klassert bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
402d77e38e6SSteffen Klassert {
403d77e38e6SSteffen Klassert 	int mtu;
404d77e38e6SSteffen Klassert 	struct dst_entry *dst = skb_dst(skb);
405d77e38e6SSteffen Klassert 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
406d77e38e6SSteffen Klassert 	struct net_device *dev = x->xso.dev;
407d77e38e6SSteffen Klassert 
408d77e38e6SSteffen Klassert 	if (!x->type_offload || x->encap)
409d77e38e6SSteffen Klassert 		return false;
410d77e38e6SSteffen Klassert 
411*f8a70afaSLeon Romanovsky 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
412*f8a70afaSLeon Romanovsky 	    ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
413*f8a70afaSLeon Romanovsky 	     !xdst->child->xfrm)) {
414c7b37c76SFlorian Westphal 		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
415d77e38e6SSteffen Klassert 		if (skb->len <= mtu)
416d77e38e6SSteffen Klassert 			goto ok;
417d77e38e6SSteffen Klassert 
418779b7931SDaniel Axtens 		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
419d77e38e6SSteffen Klassert 			goto ok;
420d77e38e6SSteffen Klassert 	}
421d77e38e6SSteffen Klassert 
422d77e38e6SSteffen Klassert 	return false;
423d77e38e6SSteffen Klassert 
424d77e38e6SSteffen Klassert ok:
425d77e38e6SSteffen Klassert 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
426d77e38e6SSteffen Klassert 		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
427d77e38e6SSteffen Klassert 
428d77e38e6SSteffen Klassert 	return true;
429d77e38e6SSteffen Klassert }
430d77e38e6SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
431f53c7239SSteffen Klassert 
432f53c7239SSteffen Klassert void xfrm_dev_resume(struct sk_buff *skb)
433f53c7239SSteffen Klassert {
434f53c7239SSteffen Klassert 	struct net_device *dev = skb->dev;
435f53c7239SSteffen Klassert 	int ret = NETDEV_TX_BUSY;
436f53c7239SSteffen Klassert 	struct netdev_queue *txq;
437f53c7239SSteffen Klassert 	struct softnet_data *sd;
438f53c7239SSteffen Klassert 	unsigned long flags;
439f53c7239SSteffen Klassert 
440f53c7239SSteffen Klassert 	rcu_read_lock();
4414bd97d51SPaolo Abeni 	txq = netdev_core_pick_tx(dev, skb, NULL);
442f53c7239SSteffen Klassert 
443f53c7239SSteffen Klassert 	HARD_TX_LOCK(dev, txq, smp_processor_id());
444f53c7239SSteffen Klassert 	if (!netif_xmit_frozen_or_stopped(txq))
445f53c7239SSteffen Klassert 		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
446f53c7239SSteffen Klassert 	HARD_TX_UNLOCK(dev, txq);
447f53c7239SSteffen Klassert 
448f53c7239SSteffen Klassert 	if (!dev_xmit_complete(ret)) {
449f53c7239SSteffen Klassert 		local_irq_save(flags);
450f53c7239SSteffen Klassert 		sd = this_cpu_ptr(&softnet_data);
451f53c7239SSteffen Klassert 		skb_queue_tail(&sd->xfrm_backlog, skb);
452f53c7239SSteffen Klassert 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
453f53c7239SSteffen Klassert 		local_irq_restore(flags);
454f53c7239SSteffen Klassert 	}
455f53c7239SSteffen Klassert 	rcu_read_unlock();
456f53c7239SSteffen Klassert }
457f53c7239SSteffen Klassert EXPORT_SYMBOL_GPL(xfrm_dev_resume);
458f53c7239SSteffen Klassert 
459f53c7239SSteffen Klassert void xfrm_dev_backlog(struct softnet_data *sd)
460f53c7239SSteffen Klassert {
461f53c7239SSteffen Klassert 	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
462f53c7239SSteffen Klassert 	struct sk_buff_head list;
463f53c7239SSteffen Klassert 	struct sk_buff *skb;
464f53c7239SSteffen Klassert 
465f53c7239SSteffen Klassert 	if (skb_queue_empty(xfrm_backlog))
466f53c7239SSteffen Klassert 		return;
467f53c7239SSteffen Klassert 
468f53c7239SSteffen Klassert 	__skb_queue_head_init(&list);
469f53c7239SSteffen Klassert 
470f53c7239SSteffen Klassert 	spin_lock(&xfrm_backlog->lock);
471f53c7239SSteffen Klassert 	skb_queue_splice_init(xfrm_backlog, &list);
472f53c7239SSteffen Klassert 	spin_unlock(&xfrm_backlog->lock);
473f53c7239SSteffen Klassert 
474f53c7239SSteffen Klassert 	while (!skb_queue_empty(&list)) {
475f53c7239SSteffen Klassert 		skb = __skb_dequeue(&list);
476f53c7239SSteffen Klassert 		xfrm_dev_resume(skb);
477f53c7239SSteffen Klassert 	}
478f53c7239SSteffen Klassert 
479f53c7239SSteffen Klassert }
480b81f884aSHangbin Liu #endif
481d77e38e6SSteffen Klassert 
48292a23206SShannon Nelson static int xfrm_api_check(struct net_device *dev)
483d77e38e6SSteffen Klassert {
48492a23206SShannon Nelson #ifdef CONFIG_XFRM_OFFLOAD
485d77e38e6SSteffen Klassert 	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
486d77e38e6SSteffen Klassert 	    !(dev->features & NETIF_F_HW_ESP))
487d77e38e6SSteffen Klassert 		return NOTIFY_BAD;
488d77e38e6SSteffen Klassert 
48992a23206SShannon Nelson 	if ((dev->features & NETIF_F_HW_ESP) &&
49092a23206SShannon Nelson 	    (!(dev->xfrmdev_ops &&
49192a23206SShannon Nelson 	       dev->xfrmdev_ops->xdo_dev_state_add &&
49292a23206SShannon Nelson 	       dev->xfrmdev_ops->xdo_dev_state_delete)))
49392a23206SShannon Nelson 		return NOTIFY_BAD;
49492a23206SShannon Nelson #else
49592a23206SShannon Nelson 	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
49692a23206SShannon Nelson 		return NOTIFY_BAD;
49792a23206SShannon Nelson #endif
49892a23206SShannon Nelson 
499d77e38e6SSteffen Klassert 	return NOTIFY_DONE;
500d77e38e6SSteffen Klassert }
501d77e38e6SSteffen Klassert 
502d77e38e6SSteffen Klassert static int xfrm_dev_down(struct net_device *dev)
503d77e38e6SSteffen Klassert {
504919e43faSLeon Romanovsky 	if (dev->features & NETIF_F_HW_ESP) {
505d77e38e6SSteffen Klassert 		xfrm_dev_state_flush(dev_net(dev), dev, true);
506919e43faSLeon Romanovsky 		xfrm_dev_policy_flush(dev_net(dev), dev, true);
507919e43faSLeon Romanovsky 	}
508d77e38e6SSteffen Klassert 
509d77e38e6SSteffen Klassert 	return NOTIFY_DONE;
510d77e38e6SSteffen Klassert }
511d77e38e6SSteffen Klassert 
51221f42cc9SSteffen Klassert static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
51321f42cc9SSteffen Klassert {
51421f42cc9SSteffen Klassert 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
51521f42cc9SSteffen Klassert 
51621f42cc9SSteffen Klassert 	switch (event) {
517d77e38e6SSteffen Klassert 	case NETDEV_REGISTER:
5182ecda181SLeon Romanovsky 		return xfrm_api_check(dev);
519d77e38e6SSteffen Klassert 
520d77e38e6SSteffen Klassert 	case NETDEV_FEAT_CHANGE:
5212ecda181SLeon Romanovsky 		return xfrm_api_check(dev);
522d77e38e6SSteffen Klassert 
52321f42cc9SSteffen Klassert 	case NETDEV_DOWN:
52403891f82SRaed Salem 	case NETDEV_UNREGISTER:
525d77e38e6SSteffen Klassert 		return xfrm_dev_down(dev);
52621f42cc9SSteffen Klassert 	}
52721f42cc9SSteffen Klassert 	return NOTIFY_DONE;
52821f42cc9SSteffen Klassert }
52921f42cc9SSteffen Klassert 
53021f42cc9SSteffen Klassert static struct notifier_block xfrm_dev_notifier = {
53121f42cc9SSteffen Klassert 	.notifier_call	= xfrm_dev_event,
53221f42cc9SSteffen Klassert };
53321f42cc9SSteffen Klassert 
534e9a441b6SKirill Tkhai void __init xfrm_dev_init(void)
53521f42cc9SSteffen Klassert {
53621f42cc9SSteffen Klassert 	register_netdevice_notifier(&xfrm_dev_notifier);
53721f42cc9SSteffen Klassert }
538