xref: /openbmc/linux/net/xfrm/xfrm_output.c (revision 10c1d542)
1 /*
2  * xfrm_output.c - Common IPsec encapsulation code.
3  *
4  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/netfilter.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <net/dst.h>
20 #include <net/xfrm.h>
21 
22 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
23 
24 static int xfrm_skb_check_space(struct sk_buff *skb)
25 {
26 	struct dst_entry *dst = skb_dst(skb);
27 	int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
28 		- skb_headroom(skb);
29 	int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
30 
31 	if (nhead <= 0) {
32 		if (ntail <= 0)
33 			return 0;
34 		nhead = 0;
35 	} else if (ntail < 0)
36 		ntail = 0;
37 
38 	return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
39 }
40 
41 /* Children define the path of the packet through the
42  * Linux networking.  Thus, destinations are stackable.
43  */
44 
45 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
46 {
47 	struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
48 
49 	skb_dst_drop(skb);
50 	return child;
51 }
52 
53 static int xfrm_output_one(struct sk_buff *skb, int err)
54 {
55 	struct dst_entry *dst = skb_dst(skb);
56 	struct xfrm_state *x = dst->xfrm;
57 	struct net *net = xs_net(x);
58 
59 	if (err <= 0)
60 		goto resume;
61 
62 	do {
63 		err = xfrm_skb_check_space(skb);
64 		if (err) {
65 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
66 			goto error_nolock;
67 		}
68 
69 		if (x->props.output_mark)
70 			skb->mark = x->props.output_mark;
71 
72 		err = x->outer_mode->output(x, skb);
73 		if (err) {
74 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
75 			goto error_nolock;
76 		}
77 
78 		spin_lock_bh(&x->lock);
79 
80 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
81 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
82 			err = -EINVAL;
83 			goto error;
84 		}
85 
86 		err = xfrm_state_check_expire(x);
87 		if (err) {
88 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
89 			goto error;
90 		}
91 
92 		err = x->repl->overflow(x, skb);
93 		if (err) {
94 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
95 			goto error;
96 		}
97 
98 		x->curlft.bytes += skb->len;
99 		x->curlft.packets++;
100 
101 		spin_unlock_bh(&x->lock);
102 
103 		skb_dst_force(skb);
104 
105 		if (xfrm_offload(skb)) {
106 			x->type_offload->encap(x, skb);
107 		} else {
108 			/* Inner headers are invalid now. */
109 			skb->encapsulation = 0;
110 
111 			err = x->type->output(x, skb);
112 			if (err == -EINPROGRESS)
113 				goto out;
114 		}
115 
116 resume:
117 		if (err) {
118 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
119 			goto error_nolock;
120 		}
121 
122 		dst = skb_dst_pop(skb);
123 		if (!dst) {
124 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
125 			err = -EHOSTUNREACH;
126 			goto error_nolock;
127 		}
128 		skb_dst_set(skb, dst);
129 		x = dst->xfrm;
130 	} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
131 
132 	return 0;
133 
134 error:
135 	spin_unlock_bh(&x->lock);
136 error_nolock:
137 	kfree_skb(skb);
138 out:
139 	return err;
140 }
141 
142 int xfrm_output_resume(struct sk_buff *skb, int err)
143 {
144 	struct net *net = xs_net(skb_dst(skb)->xfrm);
145 
146 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
147 		nf_reset(skb);
148 
149 		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
150 		if (unlikely(err != 1))
151 			goto out;
152 
153 		if (!skb_dst(skb)->xfrm)
154 			return dst_output(net, skb->sk, skb);
155 
156 		err = nf_hook(skb_dst(skb)->ops->family,
157 			      NF_INET_POST_ROUTING, net, skb->sk, skb,
158 			      NULL, skb_dst(skb)->dev, xfrm_output2);
159 		if (unlikely(err != 1))
160 			goto out;
161 	}
162 
163 	if (err == -EINPROGRESS)
164 		err = 0;
165 
166 out:
167 	return err;
168 }
169 EXPORT_SYMBOL_GPL(xfrm_output_resume);
170 
171 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
172 {
173 	return xfrm_output_resume(skb, 1);
174 }
175 
176 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
177 {
178 	struct sk_buff *segs;
179 
180 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
181 	BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
182 	segs = skb_gso_segment(skb, 0);
183 	kfree_skb(skb);
184 	if (IS_ERR(segs))
185 		return PTR_ERR(segs);
186 	if (segs == NULL)
187 		return -EINVAL;
188 
189 	do {
190 		struct sk_buff *nskb = segs->next;
191 		int err;
192 
193 		segs->next = NULL;
194 		err = xfrm_output2(net, sk, segs);
195 
196 		if (unlikely(err)) {
197 			kfree_skb_list(nskb);
198 			return err;
199 		}
200 
201 		segs = nskb;
202 	} while (segs);
203 
204 	return 0;
205 }
206 
207 int xfrm_output(struct sock *sk, struct sk_buff *skb)
208 {
209 	struct net *net = dev_net(skb_dst(skb)->dev);
210 	struct xfrm_state *x = skb_dst(skb)->xfrm;
211 	int err;
212 
213 	secpath_reset(skb);
214 
215 	if (xfrm_dev_offload_ok(skb, x)) {
216 		struct sec_path *sp;
217 
218 		sp = secpath_dup(skb->sp);
219 		if (!sp) {
220 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
221 			kfree_skb(skb);
222 			return -ENOMEM;
223 		}
224 		if (skb->sp)
225 			secpath_put(skb->sp);
226 		skb->sp = sp;
227 		skb->encapsulation = 1;
228 
229 		sp->olen++;
230 		sp->xvec[skb->sp->len++] = x;
231 		xfrm_state_hold(x);
232 
233 		if (skb_is_gso(skb)) {
234 			skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
235 
236 			return xfrm_output2(net, sk, skb);
237 		}
238 
239 		if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
240 			goto out;
241 	}
242 
243 	if (skb_is_gso(skb))
244 		return xfrm_output_gso(net, sk, skb);
245 
246 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
247 		err = skb_checksum_help(skb);
248 		if (err) {
249 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
250 			kfree_skb(skb);
251 			return err;
252 		}
253 	}
254 
255 out:
256 	return xfrm_output2(net, sk, skb);
257 }
258 EXPORT_SYMBOL_GPL(xfrm_output);
259 
260 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
261 {
262 	struct xfrm_mode *inner_mode;
263 	if (x->sel.family == AF_UNSPEC)
264 		inner_mode = xfrm_ip2inner_mode(x,
265 				xfrm_af2proto(skb_dst(skb)->ops->family));
266 	else
267 		inner_mode = x->inner_mode;
268 
269 	if (inner_mode == NULL)
270 		return -EAFNOSUPPORT;
271 	return inner_mode->afinfo->extract_output(x, skb);
272 }
273 EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
274 
275 void xfrm_local_error(struct sk_buff *skb, int mtu)
276 {
277 	unsigned int proto;
278 	struct xfrm_state_afinfo *afinfo;
279 
280 	if (skb->protocol == htons(ETH_P_IP))
281 		proto = AF_INET;
282 	else if (skb->protocol == htons(ETH_P_IPV6))
283 		proto = AF_INET6;
284 	else
285 		return;
286 
287 	afinfo = xfrm_state_get_afinfo(proto);
288 	if (afinfo)
289 		afinfo->local_error(skb, mtu);
290 	rcu_read_unlock();
291 }
292 EXPORT_SYMBOL_GPL(xfrm_local_error);
293