xref: /openbmc/linux/net/tipc/msg.c (revision 5b448065)
1 /*
2  * net/tipc/msg.c: TIPC message header routines
3  *
4  * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5  * Copyright (c) 2005, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <net/sock.h>
38 #include "core.h"
39 #include "msg.h"
40 #include "addr.h"
41 #include "name_table.h"
42 #include "crypto.h"
43 
44 #define MAX_FORWARD_SIZE 1024
45 #ifdef CONFIG_TIPC_CRYPTO
46 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47 #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
48 #else
49 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
50 #define BUF_TAILROOM 16
51 #endif
52 
53 static unsigned int align(unsigned int i)
54 {
55 	return (i + 3) & ~3u;
56 }
57 
58 /**
59  * tipc_buf_acquire - creates a TIPC message buffer
60  * @size: message size (including TIPC header)
61  * @gfp: memory allocation flags
62  *
63  * Return: a new buffer with data pointers set to the specified size.
64  *
65  * NOTE:
66  * Headroom is reserved to allow prepending of a data link header.
67  * There may also be unrequested tailroom present at the buffer's end.
68  */
69 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
70 {
71 	struct sk_buff *skb;
72 #ifdef CONFIG_TIPC_CRYPTO
73 	unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
74 #else
75 	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
76 #endif
77 
78 	skb = alloc_skb_fclone(buf_size, gfp);
79 	if (skb) {
80 		skb_reserve(skb, BUF_HEADROOM);
81 		skb_put(skb, size);
82 		skb->next = NULL;
83 	}
84 	return skb;
85 }
86 
87 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
88 		   u32 hsize, u32 dnode)
89 {
90 	memset(m, 0, hsize);
91 	msg_set_version(m);
92 	msg_set_user(m, user);
93 	msg_set_hdr_sz(m, hsize);
94 	msg_set_size(m, hsize);
95 	msg_set_prevnode(m, own_node);
96 	msg_set_type(m, type);
97 	if (hsize > SHORT_H_SIZE) {
98 		msg_set_orignode(m, own_node);
99 		msg_set_destnode(m, dnode);
100 	}
101 }
102 
103 struct sk_buff *tipc_msg_create(uint user, uint type,
104 				uint hdr_sz, uint data_sz, u32 dnode,
105 				u32 onode, u32 dport, u32 oport, int errcode)
106 {
107 	struct tipc_msg *msg;
108 	struct sk_buff *buf;
109 
110 	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
111 	if (unlikely(!buf))
112 		return NULL;
113 
114 	msg = buf_msg(buf);
115 	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
116 	msg_set_size(msg, hdr_sz + data_sz);
117 	msg_set_origport(msg, oport);
118 	msg_set_destport(msg, dport);
119 	msg_set_errcode(msg, errcode);
120 	return buf;
121 }
122 
123 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
124  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
125  *            out: set when successful non-complete reassembly, otherwise NULL
126  * @*buf:     in:  the buffer to append. Always defined
127  *            out: head buf after successful complete reassembly, otherwise NULL
128  * Returns 1 when reassembly complete, otherwise 0
129  */
130 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
131 {
132 	struct sk_buff *head = *headbuf;
133 	struct sk_buff *frag = *buf;
134 	struct sk_buff *tail = NULL;
135 	struct tipc_msg *msg;
136 	u32 fragid;
137 	int delta;
138 	bool headstolen;
139 
140 	if (!frag)
141 		goto err;
142 
143 	msg = buf_msg(frag);
144 	fragid = msg_type(msg);
145 	frag->next = NULL;
146 	skb_pull(frag, msg_hdr_sz(msg));
147 
148 	if (fragid == FIRST_FRAGMENT) {
149 		if (unlikely(head))
150 			goto err;
151 		*buf = NULL;
152 		if (skb_has_frag_list(frag) && __skb_linearize(frag))
153 			goto err;
154 		frag = skb_unshare(frag, GFP_ATOMIC);
155 		if (unlikely(!frag))
156 			goto err;
157 		head = *headbuf = frag;
158 		TIPC_SKB_CB(head)->tail = NULL;
159 		return 0;
160 	}
161 
162 	if (!head)
163 		goto err;
164 
165 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
166 		kfree_skb_partial(frag, headstolen);
167 	} else {
168 		tail = TIPC_SKB_CB(head)->tail;
169 		if (!skb_has_frag_list(head))
170 			skb_shinfo(head)->frag_list = frag;
171 		else
172 			tail->next = frag;
173 		head->truesize += frag->truesize;
174 		head->data_len += frag->len;
175 		head->len += frag->len;
176 		TIPC_SKB_CB(head)->tail = frag;
177 	}
178 
179 	if (fragid == LAST_FRAGMENT) {
180 		TIPC_SKB_CB(head)->validated = 0;
181 		if (unlikely(!tipc_msg_validate(&head)))
182 			goto err;
183 		*buf = head;
184 		TIPC_SKB_CB(head)->tail = NULL;
185 		*headbuf = NULL;
186 		return 1;
187 	}
188 	*buf = NULL;
189 	return 0;
190 err:
191 	kfree_skb(*buf);
192 	kfree_skb(*headbuf);
193 	*buf = *headbuf = NULL;
194 	return 0;
195 }
196 
197 /**
198  * tipc_msg_append(): Append data to tail of an existing buffer queue
199  * @_hdr: header to be used
200  * @m: the data to be appended
201  * @mss: max allowable size of buffer
202  * @dlen: size of data to be appended
203  * @txq: queue to append to
204  *
205  * Return: the number of 1k blocks appended or errno value
206  */
207 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
208 		    int mss, struct sk_buff_head *txq)
209 {
210 	struct sk_buff *skb;
211 	int accounted, total, curr;
212 	int mlen, cpy, rem = dlen;
213 	struct tipc_msg *hdr;
214 
215 	skb = skb_peek_tail(txq);
216 	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
217 	total = accounted;
218 
219 	do {
220 		if (!skb || skb->len >= mss) {
221 			skb = tipc_buf_acquire(mss, GFP_KERNEL);
222 			if (unlikely(!skb))
223 				return -ENOMEM;
224 			skb_orphan(skb);
225 			skb_trim(skb, MIN_H_SIZE);
226 			hdr = buf_msg(skb);
227 			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
228 			msg_set_hdr_sz(hdr, MIN_H_SIZE);
229 			msg_set_size(hdr, MIN_H_SIZE);
230 			__skb_queue_tail(txq, skb);
231 			total += 1;
232 		}
233 		hdr = buf_msg(skb);
234 		curr = msg_blocks(hdr);
235 		mlen = msg_size(hdr);
236 		cpy = min_t(size_t, rem, mss - mlen);
237 		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
238 			return -EFAULT;
239 		msg_set_size(hdr, mlen + cpy);
240 		skb_put(skb, cpy);
241 		rem -= cpy;
242 		total += msg_blocks(hdr) - curr;
243 	} while (rem > 0);
244 	return total - accounted;
245 }
246 
247 /* tipc_msg_validate - validate basic format of received message
248  *
249  * This routine ensures a TIPC message has an acceptable header, and at least
250  * as much data as the header indicates it should.  The routine also ensures
251  * that the entire message header is stored in the main fragment of the message
252  * buffer, to simplify future access to message header fields.
253  *
254  * Note: Having extra info present in the message header or data areas is OK.
255  * TIPC will ignore the excess, under the assumption that it is optional info
256  * introduced by a later release of the protocol.
257  */
258 bool tipc_msg_validate(struct sk_buff **_skb)
259 {
260 	struct sk_buff *skb = *_skb;
261 	struct tipc_msg *hdr;
262 	int msz, hsz;
263 
264 	/* Ensure that flow control ratio condition is satisfied */
265 	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
266 		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
267 		if (!skb)
268 			return false;
269 		kfree_skb(*_skb);
270 		*_skb = skb;
271 	}
272 
273 	if (unlikely(TIPC_SKB_CB(skb)->validated))
274 		return true;
275 
276 	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
277 		return false;
278 
279 	hsz = msg_hdr_sz(buf_msg(skb));
280 	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
281 		return false;
282 	if (unlikely(!pskb_may_pull(skb, hsz)))
283 		return false;
284 
285 	hdr = buf_msg(skb);
286 	if (unlikely(msg_version(hdr) != TIPC_VERSION))
287 		return false;
288 
289 	msz = msg_size(hdr);
290 	if (unlikely(msz < hsz))
291 		return false;
292 	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
293 		return false;
294 	if (unlikely(skb->len < msz))
295 		return false;
296 
297 	TIPC_SKB_CB(skb)->validated = 1;
298 	return true;
299 }
300 
301 /**
302  * tipc_msg_fragment - build a fragment skb list for TIPC message
303  *
304  * @skb: TIPC message skb
305  * @hdr: internal msg header to be put on the top of the fragments
306  * @pktmax: max size of a fragment incl. the header
307  * @frags: returned fragment skb list
308  *
309  * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
310  * or -ENOMEM
311  */
312 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
313 		      int pktmax, struct sk_buff_head *frags)
314 {
315 	int pktno, nof_fragms, dsz, dmax, eat;
316 	struct tipc_msg *_hdr;
317 	struct sk_buff *_skb;
318 	u8 *data;
319 
320 	/* Non-linear buffer? */
321 	if (skb_linearize(skb))
322 		return -ENOMEM;
323 
324 	data = (u8 *)skb->data;
325 	dsz = msg_size(buf_msg(skb));
326 	dmax = pktmax - INT_H_SIZE;
327 	if (dsz <= dmax || !dmax)
328 		return -EINVAL;
329 
330 	nof_fragms = dsz / dmax + 1;
331 	for (pktno = 1; pktno <= nof_fragms; pktno++) {
332 		if (pktno < nof_fragms)
333 			eat = dmax;
334 		else
335 			eat = dsz % dmax;
336 		/* Allocate a new fragment */
337 		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
338 		if (!_skb)
339 			goto error;
340 		skb_orphan(_skb);
341 		__skb_queue_tail(frags, _skb);
342 		/* Copy header & data to the fragment */
343 		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
344 		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
345 		data += eat;
346 		/* Update the fragment's header */
347 		_hdr = buf_msg(_skb);
348 		msg_set_fragm_no(_hdr, pktno);
349 		msg_set_nof_fragms(_hdr, nof_fragms);
350 		msg_set_size(_hdr, INT_H_SIZE + eat);
351 	}
352 	return 0;
353 
354 error:
355 	__skb_queue_purge(frags);
356 	__skb_queue_head_init(frags);
357 	return -ENOMEM;
358 }
359 
360 /**
361  * tipc_msg_build - create buffer chain containing specified header and data
362  * @mhdr: Message header, to be prepended to data
363  * @m: User message
364  * @offset: buffer offset for fragmented messages (FIXME)
365  * @dsz: Total length of user data
366  * @pktmax: Max packet size that can be used
367  * @list: Buffer or chain of buffers to be returned to caller
368  *
369  * Note that the recursive call we are making here is safe, since it can
370  * logically go only one further level down.
371  *
372  * Return: message data size or errno: -ENOMEM, -EFAULT
373  */
374 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
375 		   int dsz, int pktmax, struct sk_buff_head *list)
376 {
377 	int mhsz = msg_hdr_sz(mhdr);
378 	struct tipc_msg pkthdr;
379 	int msz = mhsz + dsz;
380 	int pktrem = pktmax;
381 	struct sk_buff *skb;
382 	int drem = dsz;
383 	int pktno = 1;
384 	char *pktpos;
385 	int pktsz;
386 	int rc;
387 
388 	msg_set_size(mhdr, msz);
389 
390 	/* No fragmentation needed? */
391 	if (likely(msz <= pktmax)) {
392 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
393 
394 		/* Fall back to smaller MTU if node local message */
395 		if (unlikely(!skb)) {
396 			if (pktmax != MAX_MSG_SIZE)
397 				return -ENOMEM;
398 			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
399 			if (rc != dsz)
400 				return rc;
401 			if (tipc_msg_assemble(list))
402 				return dsz;
403 			return -ENOMEM;
404 		}
405 		skb_orphan(skb);
406 		__skb_queue_tail(list, skb);
407 		skb_copy_to_linear_data(skb, mhdr, mhsz);
408 		pktpos = skb->data + mhsz;
409 		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
410 			return dsz;
411 		rc = -EFAULT;
412 		goto error;
413 	}
414 
415 	/* Prepare reusable fragment header */
416 	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
417 		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
418 	msg_set_size(&pkthdr, pktmax);
419 	msg_set_fragm_no(&pkthdr, pktno);
420 	msg_set_importance(&pkthdr, msg_importance(mhdr));
421 
422 	/* Prepare first fragment */
423 	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
424 	if (!skb)
425 		return -ENOMEM;
426 	skb_orphan(skb);
427 	__skb_queue_tail(list, skb);
428 	pktpos = skb->data;
429 	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
430 	pktpos += INT_H_SIZE;
431 	pktrem -= INT_H_SIZE;
432 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
433 	pktpos += mhsz;
434 	pktrem -= mhsz;
435 
436 	do {
437 		if (drem < pktrem)
438 			pktrem = drem;
439 
440 		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
441 			rc = -EFAULT;
442 			goto error;
443 		}
444 		drem -= pktrem;
445 
446 		if (!drem)
447 			break;
448 
449 		/* Prepare new fragment: */
450 		if (drem < (pktmax - INT_H_SIZE))
451 			pktsz = drem + INT_H_SIZE;
452 		else
453 			pktsz = pktmax;
454 		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
455 		if (!skb) {
456 			rc = -ENOMEM;
457 			goto error;
458 		}
459 		skb_orphan(skb);
460 		__skb_queue_tail(list, skb);
461 		msg_set_type(&pkthdr, FRAGMENT);
462 		msg_set_size(&pkthdr, pktsz);
463 		msg_set_fragm_no(&pkthdr, ++pktno);
464 		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
465 		pktpos = skb->data + INT_H_SIZE;
466 		pktrem = pktsz - INT_H_SIZE;
467 
468 	} while (1);
469 	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
470 	return dsz;
471 error:
472 	__skb_queue_purge(list);
473 	__skb_queue_head_init(list);
474 	return rc;
475 }
476 
477 /**
478  * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
479  * @bskb: the bundle buffer to append to
480  * @msg: message to be appended
481  * @max: max allowable size for the bundle buffer
482  *
483  * Return: "true" if bundling has been performed, otherwise "false"
484  */
485 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
486 			    u32 max)
487 {
488 	struct tipc_msg *bmsg = buf_msg(bskb);
489 	u32 msz, bsz, offset, pad;
490 
491 	msz = msg_size(msg);
492 	bsz = msg_size(bmsg);
493 	offset = align(bsz);
494 	pad = offset - bsz;
495 
496 	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
497 		return false;
498 	if (unlikely(max < (offset + msz)))
499 		return false;
500 
501 	skb_put(bskb, pad + msz);
502 	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
503 	msg_set_size(bmsg, offset + msz);
504 	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
505 	return true;
506 }
507 
508 /**
509  * tipc_msg_try_bundle - Try to bundle a new message to the last one
510  * @tskb: the last/target message to which the new one will be appended
511  * @skb: the new message skb pointer
512  * @mss: max message size (header inclusive)
513  * @dnode: destination node for the message
514  * @new_bundle: if this call made a new bundle or not
515  *
516  * Return: "true" if the new message skb is potential for bundling this time or
517  * later, in the case a bundling has been done this time, the skb is consumed
518  * (the skb pointer = NULL).
519  * Otherwise, "false" if the skb cannot be bundled at all.
520  */
521 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
522 			 u32 dnode, bool *new_bundle)
523 {
524 	struct tipc_msg *msg, *inner, *outer;
525 	u32 tsz;
526 
527 	/* First, check if the new buffer is suitable for bundling */
528 	msg = buf_msg(*skb);
529 	if (msg_user(msg) == MSG_FRAGMENTER)
530 		return false;
531 	if (msg_user(msg) == TUNNEL_PROTOCOL)
532 		return false;
533 	if (msg_user(msg) == BCAST_PROTOCOL)
534 		return false;
535 	if (mss <= INT_H_SIZE + msg_size(msg))
536 		return false;
537 
538 	/* Ok, but the last/target buffer can be empty? */
539 	if (unlikely(!tskb))
540 		return true;
541 
542 	/* Is it a bundle already? Try to bundle the new message to it */
543 	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
544 		*new_bundle = false;
545 		goto bundle;
546 	}
547 
548 	/* Make a new bundle of the two messages if possible */
549 	tsz = msg_size(buf_msg(tskb));
550 	if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
551 		return true;
552 	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
553 				      GFP_ATOMIC)))
554 		return true;
555 	inner = buf_msg(tskb);
556 	skb_push(tskb, INT_H_SIZE);
557 	outer = buf_msg(tskb);
558 	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
559 		      dnode);
560 	msg_set_importance(outer, msg_importance(inner));
561 	msg_set_size(outer, INT_H_SIZE + tsz);
562 	msg_set_msgcnt(outer, 1);
563 	*new_bundle = true;
564 
565 bundle:
566 	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
567 		consume_skb(*skb);
568 		*skb = NULL;
569 	}
570 	return true;
571 }
572 
573 /**
574  *  tipc_msg_extract(): extract bundled inner packet from buffer
575  *  @skb: buffer to be extracted from.
576  *  @iskb: extracted inner buffer, to be returned
577  *  @pos: position in outer message of msg to be extracted.
578  *  Returns position of next msg.
579  *  Consumes outer buffer when last packet extracted
580  *  Return: true when there is an extracted buffer, otherwise false
581  */
582 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
583 {
584 	struct tipc_msg *hdr, *ihdr;
585 	int imsz;
586 
587 	*iskb = NULL;
588 	if (unlikely(skb_linearize(skb)))
589 		goto none;
590 
591 	hdr = buf_msg(skb);
592 	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
593 		goto none;
594 
595 	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
596 	imsz = msg_size(ihdr);
597 
598 	if ((*pos + imsz) > msg_data_sz(hdr))
599 		goto none;
600 
601 	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
602 	if (!*iskb)
603 		goto none;
604 
605 	skb_copy_to_linear_data(*iskb, ihdr, imsz);
606 	if (unlikely(!tipc_msg_validate(iskb)))
607 		goto none;
608 
609 	*pos += align(imsz);
610 	return true;
611 none:
612 	kfree_skb(skb);
613 	kfree_skb(*iskb);
614 	*iskb = NULL;
615 	return false;
616 }
617 
618 /**
619  * tipc_msg_reverse(): swap source and destination addresses and add error code
620  * @own_node: originating node id for reversed message
621  * @skb:  buffer containing message to be reversed; will be consumed
622  * @err:  error code to be set in message, if any
623  * Replaces consumed buffer with new one when successful
624  * Return: true if success, otherwise false
625  */
626 bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
627 {
628 	struct sk_buff *_skb = *skb;
629 	struct tipc_msg *_hdr, *hdr;
630 	int hlen, dlen;
631 
632 	if (skb_linearize(_skb))
633 		goto exit;
634 	_hdr = buf_msg(_skb);
635 	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
636 	hlen = msg_hdr_sz(_hdr);
637 
638 	if (msg_dest_droppable(_hdr))
639 		goto exit;
640 	if (msg_errcode(_hdr))
641 		goto exit;
642 
643 	/* Never return SHORT header */
644 	if (hlen == SHORT_H_SIZE)
645 		hlen = BASIC_H_SIZE;
646 
647 	/* Don't return data along with SYN+, - sender has a clone */
648 	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
649 		dlen = 0;
650 
651 	/* Allocate new buffer to return */
652 	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
653 	if (!*skb)
654 		goto exit;
655 	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
656 	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
657 
658 	/* Build reverse header in new buffer */
659 	hdr = buf_msg(*skb);
660 	msg_set_hdr_sz(hdr, hlen);
661 	msg_set_errcode(hdr, err);
662 	msg_set_non_seq(hdr, 0);
663 	msg_set_origport(hdr, msg_destport(_hdr));
664 	msg_set_destport(hdr, msg_origport(_hdr));
665 	msg_set_destnode(hdr, msg_prevnode(_hdr));
666 	msg_set_prevnode(hdr, own_node);
667 	msg_set_orignode(hdr, own_node);
668 	msg_set_size(hdr, hlen + dlen);
669 	skb_orphan(_skb);
670 	kfree_skb(_skb);
671 	return true;
672 exit:
673 	kfree_skb(_skb);
674 	*skb = NULL;
675 	return false;
676 }
677 
678 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
679 {
680 	struct sk_buff *skb, *_skb;
681 
682 	skb_queue_walk(msg, skb) {
683 		_skb = skb_clone(skb, GFP_ATOMIC);
684 		if (!_skb) {
685 			__skb_queue_purge(cpy);
686 			pr_err_ratelimited("Failed to clone buffer chain\n");
687 			return false;
688 		}
689 		__skb_queue_tail(cpy, _skb);
690 	}
691 	return true;
692 }
693 
694 /**
695  * tipc_msg_lookup_dest(): try to find new destination for named message
696  * @net: pointer to associated network namespace
697  * @skb: the buffer containing the message.
698  * @err: error code to be used by caller if lookup fails
699  * Does not consume buffer
700  * Return: true if a destination is found, false otherwise
701  */
702 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
703 {
704 	struct tipc_msg *msg = buf_msg(skb);
705 	u32 scope = msg_lookup_scope(msg);
706 	u32 self = tipc_own_addr(net);
707 	u32 inst = msg_nameinst(msg);
708 	struct tipc_socket_addr sk;
709 	struct tipc_uaddr ua;
710 
711 	if (!msg_isdata(msg))
712 		return false;
713 	if (!msg_named(msg))
714 		return false;
715 	if (msg_errcode(msg))
716 		return false;
717 	*err = TIPC_ERR_NO_NAME;
718 	if (skb_linearize(skb))
719 		return false;
720 	msg = buf_msg(skb);
721 	if (msg_reroute_cnt(msg))
722 		return false;
723 	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
724 		   msg_nametype(msg), inst, inst);
725 	sk.node = tipc_scope2node(net, scope);
726 	if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
727 		return false;
728 	msg_incr_reroute_cnt(msg);
729 	if (sk.node != self)
730 		msg_set_prevnode(msg, self);
731 	msg_set_destnode(msg, sk.node);
732 	msg_set_destport(msg, sk.ref);
733 	*err = TIPC_OK;
734 
735 	return true;
736 }
737 
738 /* tipc_msg_assemble() - assemble chain of fragments into one message
739  */
740 bool tipc_msg_assemble(struct sk_buff_head *list)
741 {
742 	struct sk_buff *skb, *tmp = NULL;
743 
744 	if (skb_queue_len(list) == 1)
745 		return true;
746 
747 	while ((skb = __skb_dequeue(list))) {
748 		skb->next = NULL;
749 		if (tipc_buf_append(&tmp, &skb)) {
750 			__skb_queue_tail(list, skb);
751 			return true;
752 		}
753 		if (!tmp)
754 			break;
755 	}
756 	__skb_queue_purge(list);
757 	__skb_queue_head_init(list);
758 	pr_warn("Failed do assemble buffer\n");
759 	return false;
760 }
761 
762 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
763  *                         reassemble the clones into one message
764  */
765 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
766 {
767 	struct sk_buff *skb, *_skb;
768 	struct sk_buff *frag = NULL;
769 	struct sk_buff *head = NULL;
770 	int hdr_len;
771 
772 	/* Copy header if single buffer */
773 	if (skb_queue_len(list) == 1) {
774 		skb = skb_peek(list);
775 		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
776 		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
777 		if (!_skb)
778 			return false;
779 		__skb_queue_tail(rcvq, _skb);
780 		return true;
781 	}
782 
783 	/* Clone all fragments and reassemble */
784 	skb_queue_walk(list, skb) {
785 		frag = skb_clone(skb, GFP_ATOMIC);
786 		if (!frag)
787 			goto error;
788 		frag->next = NULL;
789 		if (tipc_buf_append(&head, &frag))
790 			break;
791 		if (!head)
792 			goto error;
793 	}
794 	__skb_queue_tail(rcvq, frag);
795 	return true;
796 error:
797 	pr_warn("Failed do clone local mcast rcv buffer\n");
798 	kfree_skb(head);
799 	return false;
800 }
801 
802 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
803 			struct sk_buff_head *cpy)
804 {
805 	struct sk_buff *skb, *_skb;
806 
807 	skb_queue_walk(msg, skb) {
808 		_skb = pskb_copy(skb, GFP_ATOMIC);
809 		if (!_skb) {
810 			__skb_queue_purge(cpy);
811 			return false;
812 		}
813 		msg_set_destnode(buf_msg(_skb), dst);
814 		__skb_queue_tail(cpy, _skb);
815 	}
816 	return true;
817 }
818 
819 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
820  * @list: list to be appended to
821  * @seqno: sequence number of buffer to add
822  * @skb: buffer to add
823  */
824 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
825 			     struct sk_buff *skb)
826 {
827 	struct sk_buff *_skb, *tmp;
828 
829 	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
830 		__skb_queue_head(list, skb);
831 		return true;
832 	}
833 
834 	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
835 		__skb_queue_tail(list, skb);
836 		return true;
837 	}
838 
839 	skb_queue_walk_safe(list, _skb, tmp) {
840 		if (more(seqno, buf_seqno(_skb)))
841 			continue;
842 		if (seqno == buf_seqno(_skb))
843 			break;
844 		__skb_queue_before(list, _skb, skb);
845 		return true;
846 	}
847 	kfree_skb(skb);
848 	return false;
849 }
850 
851 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
852 		     struct sk_buff_head *xmitq)
853 {
854 	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
855 		__skb_queue_tail(xmitq, skb);
856 }
857