xref: /openbmc/linux/net/ipv4/esp4_offload.c (revision b285d2ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IPV4 GSO/GRO offload support
4  * Linux INET implementation
5  *
6  * Copyright (C) 2016 secunet Security Networks AG
7  * Author: Steffen Klassert <steffen.klassert@secunet.com>
8  *
9  * ESP GRO support
10  */
11 
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/esp.h>
22 #include <linux/scatterlist.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <net/udp.h>
27 
28 static struct sk_buff *esp4_gro_receive(struct list_head *head,
29 					struct sk_buff *skb)
30 {
31 	int offset = skb_gro_offset(skb);
32 	struct xfrm_offload *xo;
33 	struct xfrm_state *x;
34 	__be32 seq;
35 	__be32 spi;
36 	int err;
37 
38 	if (!pskb_pull(skb, offset))
39 		return NULL;
40 
41 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42 		goto out;
43 
44 	xo = xfrm_offload(skb);
45 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
46 		struct sec_path *sp = secpath_set(skb);
47 
48 		if (!sp)
49 			goto out;
50 
51 		if (sp->len == XFRM_MAX_DEPTH)
52 			goto out_reset;
53 
54 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 				      spi, IPPROTO_ESP, AF_INET);
57 		if (!x)
58 			goto out_reset;
59 
60 		skb->mark = xfrm_smark_get(skb->mark, x);
61 
62 		sp->xvec[sp->len++] = x;
63 		sp->olen++;
64 
65 		xo = xfrm_offload(skb);
66 		if (!xo)
67 			goto out_reset;
68 	}
69 
70 	xo->flags |= XFRM_GRO;
71 
72 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 	XFRM_SPI_SKB_CB(skb)->seq = seq;
76 
77 	/* We don't need to handle errors from xfrm_input, it does all
78 	 * the error handling and frees the resources on error. */
79 	xfrm_input(skb, IPPROTO_ESP, spi, -2);
80 
81 	return ERR_PTR(-EINPROGRESS);
82 out_reset:
83 	secpath_reset(skb);
84 out:
85 	skb_push(skb, offset);
86 	NAPI_GRO_CB(skb)->same_flow = 0;
87 	NAPI_GRO_CB(skb)->flush = 1;
88 
89 	return NULL;
90 }
91 
92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93 {
94 	struct ip_esp_hdr *esph;
95 	struct iphdr *iph = ip_hdr(skb);
96 	struct xfrm_offload *xo = xfrm_offload(skb);
97 	int proto = iph->protocol;
98 
99 	skb_push(skb, -skb_network_offset(skb));
100 	esph = ip_esp_hdr(skb);
101 	*skb_mac_header(skb) = IPPROTO_ESP;
102 
103 	esph->spi = x->id.spi;
104 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105 
106 	xo->proto = proto;
107 }
108 
109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110 						struct sk_buff *skb,
111 						netdev_features_t features)
112 {
113 	__skb_push(skb, skb->mac_len);
114 	return skb_mac_gso_segment(skb, features);
115 }
116 
117 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118 						   struct sk_buff *skb,
119 						   netdev_features_t features)
120 {
121 	const struct net_offload *ops;
122 	struct sk_buff *segs = ERR_PTR(-EINVAL);
123 	struct xfrm_offload *xo = xfrm_offload(skb);
124 
125 	skb->transport_header += x->props.header_len;
126 	ops = rcu_dereference(inet_offloads[xo->proto]);
127 	if (likely(ops && ops->callbacks.gso_segment))
128 		segs = ops->callbacks.gso_segment(skb, features);
129 
130 	return segs;
131 }
132 
133 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134 					      struct sk_buff *skb,
135 					      netdev_features_t features)
136 {
137 	struct xfrm_offload *xo = xfrm_offload(skb);
138 	struct sk_buff *segs = ERR_PTR(-EINVAL);
139 	const struct net_offload *ops;
140 	u8 proto = xo->proto;
141 
142 	skb->transport_header += x->props.header_len;
143 
144 	if (x->sel.family != AF_INET6) {
145 		if (proto == IPPROTO_BEETPH) {
146 			struct ip_beet_phdr *ph =
147 				(struct ip_beet_phdr *)skb->data;
148 
149 			skb->transport_header += ph->hdrlen * 8;
150 			proto = ph->nexthdr;
151 		} else {
152 			skb->transport_header -= IPV4_BEET_PHMAXLEN;
153 		}
154 	} else {
155 		__be16 frag;
156 
157 		skb->transport_header +=
158 			ipv6_skip_exthdr(skb, 0, &proto, &frag);
159 		if (proto == IPPROTO_TCP)
160 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
161 	}
162 
163 	__skb_pull(skb, skb_transport_offset(skb));
164 	ops = rcu_dereference(inet_offloads[proto]);
165 	if (likely(ops && ops->callbacks.gso_segment))
166 		segs = ops->callbacks.gso_segment(skb, features);
167 
168 	return segs;
169 }
170 
171 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
172 						    struct sk_buff *skb,
173 						    netdev_features_t features)
174 {
175 	switch (x->outer_mode.encap) {
176 	case XFRM_MODE_TUNNEL:
177 		return xfrm4_tunnel_gso_segment(x, skb, features);
178 	case XFRM_MODE_TRANSPORT:
179 		return xfrm4_transport_gso_segment(x, skb, features);
180 	case XFRM_MODE_BEET:
181 		return xfrm4_beet_gso_segment(x, skb, features);
182 	}
183 
184 	return ERR_PTR(-EOPNOTSUPP);
185 }
186 
187 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
188 				        netdev_features_t features)
189 {
190 	struct xfrm_state *x;
191 	struct ip_esp_hdr *esph;
192 	struct crypto_aead *aead;
193 	netdev_features_t esp_features = features;
194 	struct xfrm_offload *xo = xfrm_offload(skb);
195 	struct sec_path *sp;
196 
197 	if (!xo)
198 		return ERR_PTR(-EINVAL);
199 
200 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
201 		return ERR_PTR(-EINVAL);
202 
203 	sp = skb_sec_path(skb);
204 	x = sp->xvec[sp->len - 1];
205 	aead = x->data;
206 	esph = ip_esp_hdr(skb);
207 
208 	if (esph->spi != x->id.spi)
209 		return ERR_PTR(-EINVAL);
210 
211 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
212 		return ERR_PTR(-EINVAL);
213 
214 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
215 
216 	skb->encap_hdr_csum = 1;
217 
218 	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
219 	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
220 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
221 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
222 		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
223 		esp_features = features & ~NETIF_F_CSUM_MASK;
224 
225 	xo->flags |= XFRM_GSO_SEGMENT;
226 
227 	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
228 }
229 
230 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
231 {
232 	struct crypto_aead *aead = x->data;
233 	struct xfrm_offload *xo = xfrm_offload(skb);
234 
235 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
236 		return -EINVAL;
237 
238 	if (!(xo->flags & CRYPTO_DONE))
239 		skb->ip_summed = CHECKSUM_NONE;
240 
241 	return esp_input_done2(skb, 0);
242 }
243 
244 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
245 {
246 	int err;
247 	int alen;
248 	int blksize;
249 	struct xfrm_offload *xo;
250 	struct ip_esp_hdr *esph;
251 	struct crypto_aead *aead;
252 	struct esp_info esp;
253 	bool hw_offload = true;
254 	__u32 seq;
255 
256 	esp.inplace = true;
257 
258 	xo = xfrm_offload(skb);
259 
260 	if (!xo)
261 		return -EINVAL;
262 
263 	if ((!(features & NETIF_F_HW_ESP) &&
264 	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
265 	    x->xso.dev != skb->dev) {
266 		xo->flags |= CRYPTO_FALLBACK;
267 		hw_offload = false;
268 	}
269 
270 	esp.proto = xo->proto;
271 
272 	/* skb is pure payload to encrypt */
273 
274 	aead = x->data;
275 	alen = crypto_aead_authsize(aead);
276 
277 	esp.tfclen = 0;
278 	/* XXX: Add support for tfc padding here. */
279 
280 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
281 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
282 	esp.plen = esp.clen - skb->len - esp.tfclen;
283 	esp.tailen = esp.tfclen + esp.plen + alen;
284 
285 	esp.esph = ip_esp_hdr(skb);
286 
287 
288 	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
289 		esp.nfrags = esp_output_head(x, skb, &esp);
290 		if (esp.nfrags < 0)
291 			return esp.nfrags;
292 	}
293 
294 	seq = xo->seq.low;
295 
296 	esph = esp.esph;
297 	esph->spi = x->id.spi;
298 
299 	skb_push(skb, -skb_network_offset(skb));
300 
301 	if (xo->flags & XFRM_GSO_SEGMENT) {
302 		esph->seq_no = htonl(seq);
303 
304 		if (!skb_is_gso(skb))
305 			xo->seq.low++;
306 		else
307 			xo->seq.low += skb_shinfo(skb)->gso_segs;
308 	}
309 
310 	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
311 
312 	ip_hdr(skb)->tot_len = htons(skb->len);
313 	ip_send_check(ip_hdr(skb));
314 
315 	if (hw_offload)
316 		return 0;
317 
318 	err = esp_output_tail(x, skb, &esp);
319 	if (err)
320 		return err;
321 
322 	secpath_reset(skb);
323 
324 	return 0;
325 }
326 
327 static const struct net_offload esp4_offload = {
328 	.callbacks = {
329 		.gro_receive = esp4_gro_receive,
330 		.gso_segment = esp4_gso_segment,
331 	},
332 };
333 
334 static const struct xfrm_type_offload esp_type_offload = {
335 	.description	= "ESP4 OFFLOAD",
336 	.owner		= THIS_MODULE,
337 	.proto	     	= IPPROTO_ESP,
338 	.input_tail	= esp_input_tail,
339 	.xmit		= esp_xmit,
340 	.encap		= esp4_gso_encap,
341 };
342 
343 static int __init esp4_offload_init(void)
344 {
345 	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
346 		pr_info("%s: can't add xfrm type offload\n", __func__);
347 		return -EAGAIN;
348 	}
349 
350 	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
351 }
352 
353 static void __exit esp4_offload_exit(void)
354 {
355 	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
356 	inet_del_offload(&esp4_offload, IPPROTO_ESP);
357 }
358 
359 module_init(esp4_offload_init);
360 module_exit(esp4_offload_exit);
361 MODULE_LICENSE("GPL");
362 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
363 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
364 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
365