xref: /openbmc/linux/net/xfrm/xfrm_ipcomp.c (revision 8569c914)
1 /*
2  * IP Payload Compression Protocol (IPComp) - RFC3173.
3  *
4  * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
5  * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; either version 2 of the License, or (at your option)
10  * any later version.
11  *
12  * Todo:
13  *   - Tunable compression parameters.
14  *   - Compression stats.
15  *   - Adaptive compression.
16  */
17 
18 #include <linux/crypto.h>
19 #include <linux/err.h>
20 #include <linux/gfp.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/percpu.h>
25 #include <linux/smp.h>
26 #include <linux/vmalloc.h>
27 #include <net/ip.h>
28 #include <net/ipcomp.h>
29 #include <net/xfrm.h>
30 
31 struct ipcomp_tfms {
32 	struct list_head list;
33 	struct crypto_comp **tfms;
34 	int users;
35 };
36 
37 static DEFINE_MUTEX(ipcomp_resource_mutex);
38 static void **ipcomp_scratches;
39 static int ipcomp_scratch_users;
40 static LIST_HEAD(ipcomp_tfms_list);
41 
42 static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
43 {
44 	struct ipcomp_data *ipcd = x->data;
45 	const int plen = skb->len;
46 	int dlen = IPCOMP_SCRATCH_SIZE;
47 	const u8 *start = skb->data;
48 	const int cpu = get_cpu();
49 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
50 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
51 	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
52 	int len;
53 
54 	if (err)
55 		goto out;
56 
57 	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
58 		err = -EINVAL;
59 		goto out;
60 	}
61 
62 	len = dlen - plen;
63 	if (len > skb_tailroom(skb))
64 		len = skb_tailroom(skb);
65 
66 	skb->truesize += len;
67 	__skb_put(skb, len);
68 
69 	len += plen;
70 	skb_copy_to_linear_data(skb, scratch, len);
71 
72 	while ((scratch += len, dlen -= len) > 0) {
73 		skb_frag_t *frag;
74 
75 		err = -EMSGSIZE;
76 		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
77 			goto out;
78 
79 		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
80 		frag->page = alloc_page(GFP_ATOMIC);
81 
82 		err = -ENOMEM;
83 		if (!frag->page)
84 			goto out;
85 
86 		len = PAGE_SIZE;
87 		if (dlen < len)
88 			len = dlen;
89 
90 		memcpy(page_address(frag->page), scratch, len);
91 
92 		frag->page_offset = 0;
93 		frag->size = len;
94 		skb->truesize += len;
95 		skb->data_len += len;
96 		skb->len += len;
97 
98 		skb_shinfo(skb)->nr_frags++;
99 	}
100 
101 	err = 0;
102 
103 out:
104 	put_cpu();
105 	return err;
106 }
107 
108 int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
109 {
110 	int nexthdr;
111 	int err = -ENOMEM;
112 	struct ip_comp_hdr *ipch;
113 
114 	if (skb_linearize_cow(skb))
115 		goto out;
116 
117 	skb->ip_summed = CHECKSUM_NONE;
118 
119 	/* Remove ipcomp header and decompress original payload */
120 	ipch = (void *)skb->data;
121 	nexthdr = ipch->nexthdr;
122 
123 	skb->transport_header = skb->network_header + sizeof(*ipch);
124 	__skb_pull(skb, sizeof(*ipch));
125 	err = ipcomp_decompress(x, skb);
126 	if (err)
127 		goto out;
128 
129 	err = nexthdr;
130 
131 out:
132 	return err;
133 }
134 EXPORT_SYMBOL_GPL(ipcomp_input);
135 
136 static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
137 {
138 	struct ipcomp_data *ipcd = x->data;
139 	const int plen = skb->len;
140 	int dlen = IPCOMP_SCRATCH_SIZE;
141 	u8 *start = skb->data;
142 	const int cpu = get_cpu();
143 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
144 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
145 	int err;
146 
147 	local_bh_disable();
148 	err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
149 	local_bh_enable();
150 	if (err)
151 		goto out;
152 
153 	if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
154 		err = -EMSGSIZE;
155 		goto out;
156 	}
157 
158 	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
159 	put_cpu();
160 
161 	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
162 	return 0;
163 
164 out:
165 	put_cpu();
166 	return err;
167 }
168 
169 int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
170 {
171 	int err;
172 	struct ip_comp_hdr *ipch;
173 	struct ipcomp_data *ipcd = x->data;
174 
175 	if (skb->len < ipcd->threshold) {
176 		/* Don't bother compressing */
177 		goto out_ok;
178 	}
179 
180 	if (skb_linearize_cow(skb))
181 		goto out_ok;
182 
183 	err = ipcomp_compress(x, skb);
184 
185 	if (err) {
186 		goto out_ok;
187 	}
188 
189 	/* Install ipcomp header, convert into ipcomp datagram. */
190 	ipch = ip_comp_hdr(skb);
191 	ipch->nexthdr = *skb_mac_header(skb);
192 	ipch->flags = 0;
193 	ipch->cpi = htons((u16 )ntohl(x->id.spi));
194 	*skb_mac_header(skb) = IPPROTO_COMP;
195 out_ok:
196 	skb_push(skb, -skb_network_offset(skb));
197 	return 0;
198 }
199 EXPORT_SYMBOL_GPL(ipcomp_output);
200 
201 static void ipcomp_free_scratches(void)
202 {
203 	int i;
204 	void **scratches;
205 
206 	if (--ipcomp_scratch_users)
207 		return;
208 
209 	scratches = ipcomp_scratches;
210 	if (!scratches)
211 		return;
212 
213 	for_each_possible_cpu(i)
214 		vfree(*per_cpu_ptr(scratches, i));
215 
216 	free_percpu(scratches);
217 }
218 
219 static void **ipcomp_alloc_scratches(void)
220 {
221 	int i;
222 	void **scratches;
223 
224 	if (ipcomp_scratch_users++)
225 		return ipcomp_scratches;
226 
227 	scratches = alloc_percpu(void *);
228 	if (!scratches)
229 		return NULL;
230 
231 	ipcomp_scratches = scratches;
232 
233 	for_each_possible_cpu(i) {
234 		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
235 		if (!scratch)
236 			return NULL;
237 		*per_cpu_ptr(scratches, i) = scratch;
238 	}
239 
240 	return scratches;
241 }
242 
243 static void ipcomp_free_tfms(struct crypto_comp **tfms)
244 {
245 	struct ipcomp_tfms *pos;
246 	int cpu;
247 
248 	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
249 		if (pos->tfms == tfms)
250 			break;
251 	}
252 
253 	WARN_ON(!pos);
254 
255 	if (--pos->users)
256 		return;
257 
258 	list_del(&pos->list);
259 	kfree(pos);
260 
261 	if (!tfms)
262 		return;
263 
264 	for_each_possible_cpu(cpu) {
265 		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
266 		crypto_free_comp(tfm);
267 	}
268 	free_percpu(tfms);
269 }
270 
271 static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
272 {
273 	struct ipcomp_tfms *pos;
274 	struct crypto_comp **tfms;
275 	int cpu;
276 
277 	/* This can be any valid CPU ID so we don't need locking. */
278 	cpu = raw_smp_processor_id();
279 
280 	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
281 		struct crypto_comp *tfm;
282 
283 		tfms = pos->tfms;
284 		tfm = *per_cpu_ptr(tfms, cpu);
285 
286 		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
287 			pos->users++;
288 			return tfms;
289 		}
290 	}
291 
292 	pos = kmalloc(sizeof(*pos), GFP_KERNEL);
293 	if (!pos)
294 		return NULL;
295 
296 	pos->users = 1;
297 	INIT_LIST_HEAD(&pos->list);
298 	list_add(&pos->list, &ipcomp_tfms_list);
299 
300 	pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
301 	if (!tfms)
302 		goto error;
303 
304 	for_each_possible_cpu(cpu) {
305 		struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
306 							    CRYPTO_ALG_ASYNC);
307 		if (IS_ERR(tfm))
308 			goto error;
309 		*per_cpu_ptr(tfms, cpu) = tfm;
310 	}
311 
312 	return tfms;
313 
314 error:
315 	ipcomp_free_tfms(tfms);
316 	return NULL;
317 }
318 
319 static void ipcomp_free_data(struct ipcomp_data *ipcd)
320 {
321 	if (ipcd->tfms)
322 		ipcomp_free_tfms(ipcd->tfms);
323 	ipcomp_free_scratches();
324 }
325 
326 void ipcomp_destroy(struct xfrm_state *x)
327 {
328 	struct ipcomp_data *ipcd = x->data;
329 	if (!ipcd)
330 		return;
331 	xfrm_state_delete_tunnel(x);
332 	mutex_lock(&ipcomp_resource_mutex);
333 	ipcomp_free_data(ipcd);
334 	mutex_unlock(&ipcomp_resource_mutex);
335 	kfree(ipcd);
336 }
337 EXPORT_SYMBOL_GPL(ipcomp_destroy);
338 
339 int ipcomp_init_state(struct xfrm_state *x)
340 {
341 	int err;
342 	struct ipcomp_data *ipcd;
343 	struct xfrm_algo_desc *calg_desc;
344 
345 	err = -EINVAL;
346 	if (!x->calg)
347 		goto out;
348 
349 	if (x->encap)
350 		goto out;
351 
352 	err = -ENOMEM;
353 	ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
354 	if (!ipcd)
355 		goto out;
356 
357 	mutex_lock(&ipcomp_resource_mutex);
358 	if (!ipcomp_alloc_scratches())
359 		goto error;
360 
361 	ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
362 	if (!ipcd->tfms)
363 		goto error;
364 	mutex_unlock(&ipcomp_resource_mutex);
365 
366 	calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
367 	BUG_ON(!calg_desc);
368 	ipcd->threshold = calg_desc->uinfo.comp.threshold;
369 	x->data = ipcd;
370 	err = 0;
371 out:
372 	return err;
373 
374 error:
375 	ipcomp_free_data(ipcd);
376 	mutex_unlock(&ipcomp_resource_mutex);
377 	kfree(ipcd);
378 	goto out;
379 }
380 EXPORT_SYMBOL_GPL(ipcomp_init_state);
381 
382 MODULE_LICENSE("GPL");
383 MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
384 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
385