xref: /openbmc/linux/net/ipv4/esp4_offload.c (revision f015b900)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IPV4 GSO/GRO offload support
4  * Linux INET implementation
5  *
6  * Copyright (C) 2016 secunet Security Networks AG
7  * Author: Steffen Klassert <steffen.klassert@secunet.com>
8  *
9  * ESP GRO support
10  */
11 
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/gro.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/xfrm.h>
23 #include <net/esp.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <net/udp.h>
29 
esp4_gro_receive(struct list_head * head,struct sk_buff * skb)30 static struct sk_buff *esp4_gro_receive(struct list_head *head,
31 					struct sk_buff *skb)
32 {
33 	int offset = skb_gro_offset(skb);
34 	struct xfrm_offload *xo;
35 	struct xfrm_state *x;
36 	__be32 seq;
37 	__be32 spi;
38 
39 	if (!pskb_pull(skb, offset))
40 		return NULL;
41 
42 	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
43 		goto out;
44 
45 	xo = xfrm_offload(skb);
46 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
47 		struct sec_path *sp = secpath_set(skb);
48 
49 		if (!sp)
50 			goto out;
51 
52 		if (sp->len == XFRM_MAX_DEPTH)
53 			goto out_reset;
54 
55 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
56 				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
57 				      spi, IPPROTO_ESP, AF_INET);
58 		if (!x)
59 			goto out_reset;
60 
61 		skb->mark = xfrm_smark_get(skb->mark, x);
62 
63 		sp->xvec[sp->len++] = x;
64 		sp->olen++;
65 
66 		xo = xfrm_offload(skb);
67 		if (!xo)
68 			goto out_reset;
69 	}
70 
71 	xo->flags |= XFRM_GRO;
72 
73 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
74 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
75 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
76 	XFRM_SPI_SKB_CB(skb)->seq = seq;
77 
78 	/* We don't need to handle errors from xfrm_input, it does all
79 	 * the error handling and frees the resources on error. */
80 	xfrm_input(skb, IPPROTO_ESP, spi, -2);
81 
82 	return ERR_PTR(-EINPROGRESS);
83 out_reset:
84 	secpath_reset(skb);
85 out:
86 	skb_push(skb, offset);
87 	NAPI_GRO_CB(skb)->same_flow = 0;
88 	NAPI_GRO_CB(skb)->flush = 1;
89 
90 	return NULL;
91 }
92 
esp4_gso_encap(struct xfrm_state * x,struct sk_buff * skb)93 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
94 {
95 	struct ip_esp_hdr *esph;
96 	struct iphdr *iph = ip_hdr(skb);
97 	struct xfrm_offload *xo = xfrm_offload(skb);
98 	int proto = iph->protocol;
99 
100 	skb_push(skb, -skb_network_offset(skb));
101 	esph = ip_esp_hdr(skb);
102 	*skb_mac_header(skb) = IPPROTO_ESP;
103 
104 	esph->spi = x->id.spi;
105 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
106 
107 	xo->proto = proto;
108 }
109 
xfrm4_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)110 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
111 						struct sk_buff *skb,
112 						netdev_features_t features)
113 {
114 	__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
115 						       : htons(ETH_P_IP);
116 
117 	return skb_eth_gso_segment(skb, features, type);
118 }
119 
xfrm4_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)120 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
121 						   struct sk_buff *skb,
122 						   netdev_features_t features)
123 {
124 	const struct net_offload *ops;
125 	struct sk_buff *segs = ERR_PTR(-EINVAL);
126 	struct xfrm_offload *xo = xfrm_offload(skb);
127 
128 	skb->transport_header += x->props.header_len;
129 	ops = rcu_dereference(inet_offloads[xo->proto]);
130 	if (likely(ops && ops->callbacks.gso_segment))
131 		segs = ops->callbacks.gso_segment(skb, features);
132 
133 	return segs;
134 }
135 
xfrm4_beet_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)136 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
137 					      struct sk_buff *skb,
138 					      netdev_features_t features)
139 {
140 	struct xfrm_offload *xo = xfrm_offload(skb);
141 	struct sk_buff *segs = ERR_PTR(-EINVAL);
142 	const struct net_offload *ops;
143 	u8 proto = xo->proto;
144 
145 	skb->transport_header += x->props.header_len;
146 
147 	if (x->sel.family != AF_INET6) {
148 		if (proto == IPPROTO_BEETPH) {
149 			struct ip_beet_phdr *ph =
150 				(struct ip_beet_phdr *)skb->data;
151 
152 			skb->transport_header += ph->hdrlen * 8;
153 			proto = ph->nexthdr;
154 		} else {
155 			skb->transport_header -= IPV4_BEET_PHMAXLEN;
156 		}
157 	} else {
158 		__be16 frag;
159 
160 		skb->transport_header +=
161 			ipv6_skip_exthdr(skb, 0, &proto, &frag);
162 		if (proto == IPPROTO_TCP)
163 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
164 	}
165 
166 	if (proto == IPPROTO_IPV6)
167 		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
168 
169 	__skb_pull(skb, skb_transport_offset(skb));
170 	ops = rcu_dereference(inet_offloads[proto]);
171 	if (likely(ops && ops->callbacks.gso_segment))
172 		segs = ops->callbacks.gso_segment(skb, features);
173 
174 	return segs;
175 }
176 
xfrm4_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)177 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
178 						    struct sk_buff *skb,
179 						    netdev_features_t features)
180 {
181 	switch (x->outer_mode.encap) {
182 	case XFRM_MODE_TUNNEL:
183 		return xfrm4_tunnel_gso_segment(x, skb, features);
184 	case XFRM_MODE_TRANSPORT:
185 		return xfrm4_transport_gso_segment(x, skb, features);
186 	case XFRM_MODE_BEET:
187 		return xfrm4_beet_gso_segment(x, skb, features);
188 	}
189 
190 	return ERR_PTR(-EOPNOTSUPP);
191 }
192 
esp4_gso_segment(struct sk_buff * skb,netdev_features_t features)193 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
194 				        netdev_features_t features)
195 {
196 	struct xfrm_state *x;
197 	struct ip_esp_hdr *esph;
198 	struct crypto_aead *aead;
199 	netdev_features_t esp_features = features;
200 	struct xfrm_offload *xo = xfrm_offload(skb);
201 	struct sec_path *sp;
202 
203 	if (!xo)
204 		return ERR_PTR(-EINVAL);
205 
206 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
207 		return ERR_PTR(-EINVAL);
208 
209 	sp = skb_sec_path(skb);
210 	x = sp->xvec[sp->len - 1];
211 	aead = x->data;
212 	esph = ip_esp_hdr(skb);
213 
214 	if (esph->spi != x->id.spi)
215 		return ERR_PTR(-EINVAL);
216 
217 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
218 		return ERR_PTR(-EINVAL);
219 
220 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
221 
222 	skb->encap_hdr_csum = 1;
223 
224 	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
225 	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
226 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
227 					    NETIF_F_SCTP_CRC);
228 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
229 		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
230 		esp_features = features & ~(NETIF_F_CSUM_MASK |
231 					    NETIF_F_SCTP_CRC);
232 
233 	xo->flags |= XFRM_GSO_SEGMENT;
234 
235 	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
236 }
237 
esp_input_tail(struct xfrm_state * x,struct sk_buff * skb)238 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
239 {
240 	struct crypto_aead *aead = x->data;
241 	struct xfrm_offload *xo = xfrm_offload(skb);
242 
243 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
244 		return -EINVAL;
245 
246 	if (!(xo->flags & CRYPTO_DONE))
247 		skb->ip_summed = CHECKSUM_NONE;
248 
249 	return esp_input_done2(skb, 0);
250 }
251 
esp_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)252 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
253 {
254 	int err;
255 	int alen;
256 	int blksize;
257 	struct xfrm_offload *xo;
258 	struct ip_esp_hdr *esph;
259 	struct crypto_aead *aead;
260 	struct esp_info esp;
261 	bool hw_offload = true;
262 	__u32 seq;
263 
264 	esp.inplace = true;
265 
266 	xo = xfrm_offload(skb);
267 
268 	if (!xo)
269 		return -EINVAL;
270 
271 	if ((!(features & NETIF_F_HW_ESP) &&
272 	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
273 	    x->xso.dev != skb->dev) {
274 		xo->flags |= CRYPTO_FALLBACK;
275 		hw_offload = false;
276 	}
277 
278 	esp.proto = xo->proto;
279 
280 	/* skb is pure payload to encrypt */
281 
282 	aead = x->data;
283 	alen = crypto_aead_authsize(aead);
284 
285 	esp.tfclen = 0;
286 	/* XXX: Add support for tfc padding here. */
287 
288 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
289 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
290 	esp.plen = esp.clen - skb->len - esp.tfclen;
291 	esp.tailen = esp.tfclen + esp.plen + alen;
292 
293 	esp.esph = ip_esp_hdr(skb);
294 
295 
296 	if (!hw_offload || !skb_is_gso(skb)) {
297 		esp.nfrags = esp_output_head(x, skb, &esp);
298 		if (esp.nfrags < 0)
299 			return esp.nfrags;
300 	}
301 
302 	seq = xo->seq.low;
303 
304 	esph = esp.esph;
305 	esph->spi = x->id.spi;
306 
307 	skb_push(skb, -skb_network_offset(skb));
308 
309 	if (xo->flags & XFRM_GSO_SEGMENT) {
310 		esph->seq_no = htonl(seq);
311 
312 		if (!skb_is_gso(skb))
313 			xo->seq.low++;
314 		else
315 			xo->seq.low += skb_shinfo(skb)->gso_segs;
316 	}
317 
318 	if (xo->seq.low < seq)
319 		xo->seq.hi++;
320 
321 	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
322 
323 	ip_hdr(skb)->tot_len = htons(skb->len);
324 	ip_send_check(ip_hdr(skb));
325 
326 	if (hw_offload) {
327 		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
328 			return -ENOMEM;
329 
330 		xo = xfrm_offload(skb);
331 		if (!xo)
332 			return -EINVAL;
333 
334 		xo->flags |= XFRM_XMIT;
335 		return 0;
336 	}
337 
338 	err = esp_output_tail(x, skb, &esp);
339 	if (err)
340 		return err;
341 
342 	secpath_reset(skb);
343 
344 	if (skb_needs_linearize(skb, skb->dev->features) &&
345 	    __skb_linearize(skb))
346 		return -ENOMEM;
347 	return 0;
348 }
349 
350 static const struct net_offload esp4_offload = {
351 	.callbacks = {
352 		.gro_receive = esp4_gro_receive,
353 		.gso_segment = esp4_gso_segment,
354 	},
355 };
356 
357 static const struct xfrm_type_offload esp_type_offload = {
358 	.owner		= THIS_MODULE,
359 	.proto	     	= IPPROTO_ESP,
360 	.input_tail	= esp_input_tail,
361 	.xmit		= esp_xmit,
362 	.encap		= esp4_gso_encap,
363 };
364 
esp4_offload_init(void)365 static int __init esp4_offload_init(void)
366 {
367 	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
368 		pr_info("%s: can't add xfrm type offload\n", __func__);
369 		return -EAGAIN;
370 	}
371 
372 	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
373 }
374 
esp4_offload_exit(void)375 static void __exit esp4_offload_exit(void)
376 {
377 	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
378 	inet_del_offload(&esp4_offload, IPPROTO_ESP);
379 }
380 
381 module_init(esp4_offload_init);
382 module_exit(esp4_offload_exit);
383 MODULE_LICENSE("GPL");
384 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
385 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
386 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
387