xref: /openbmc/linux/net/tipc/msg.c (revision 4a3fad70)
1 /*
2  * net/tipc/msg.c: TIPC message header routines
3  *
4  * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5  * Copyright (c) 2005, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <net/sock.h>
38 #include "core.h"
39 #include "msg.h"
40 #include "addr.h"
41 #include "name_table.h"
42 
43 #define MAX_FORWARD_SIZE 1024
44 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
45 #define BUF_TAILROOM 16
46 
47 static unsigned int align(unsigned int i)
48 {
49 	return (i + 3) & ~3u;
50 }
51 
52 /**
53  * tipc_buf_acquire - creates a TIPC message buffer
54  * @size: message size (including TIPC header)
55  *
56  * Returns a new buffer with data pointers set to the specified size.
57  *
58  * NOTE: Headroom is reserved to allow prepending of a data link header.
59  *       There may also be unrequested tailroom present at the buffer's end.
60  */
61 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
62 {
63 	struct sk_buff *skb;
64 	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
65 
66 	skb = alloc_skb_fclone(buf_size, gfp);
67 	if (skb) {
68 		skb_reserve(skb, BUF_HEADROOM);
69 		skb_put(skb, size);
70 		skb->next = NULL;
71 	}
72 	return skb;
73 }
74 
75 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
76 		   u32 hsize, u32 dnode)
77 {
78 	memset(m, 0, hsize);
79 	msg_set_version(m);
80 	msg_set_user(m, user);
81 	msg_set_hdr_sz(m, hsize);
82 	msg_set_size(m, hsize);
83 	msg_set_prevnode(m, own_node);
84 	msg_set_type(m, type);
85 	if (hsize > SHORT_H_SIZE) {
86 		msg_set_orignode(m, own_node);
87 		msg_set_destnode(m, dnode);
88 	}
89 }
90 
91 struct sk_buff *tipc_msg_create(uint user, uint type,
92 				uint hdr_sz, uint data_sz, u32 dnode,
93 				u32 onode, u32 dport, u32 oport, int errcode)
94 {
95 	struct tipc_msg *msg;
96 	struct sk_buff *buf;
97 
98 	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 	if (unlikely(!buf))
100 		return NULL;
101 
102 	msg = buf_msg(buf);
103 	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
104 	msg_set_size(msg, hdr_sz + data_sz);
105 	msg_set_origport(msg, oport);
106 	msg_set_destport(msg, dport);
107 	msg_set_errcode(msg, errcode);
108 	if (hdr_sz > SHORT_H_SIZE) {
109 		msg_set_orignode(msg, onode);
110 		msg_set_destnode(msg, dnode);
111 	}
112 	return buf;
113 }
114 
115 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
116  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
117  *            out: set when successful non-complete reassembly, otherwise NULL
118  * @*buf:     in:  the buffer to append. Always defined
119  *            out: head buf after successful complete reassembly, otherwise NULL
120  * Returns 1 when reassembly complete, otherwise 0
121  */
122 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
123 {
124 	struct sk_buff *head = *headbuf;
125 	struct sk_buff *frag = *buf;
126 	struct sk_buff *tail = NULL;
127 	struct tipc_msg *msg;
128 	u32 fragid;
129 	int delta;
130 	bool headstolen;
131 
132 	if (!frag)
133 		goto err;
134 
135 	msg = buf_msg(frag);
136 	fragid = msg_type(msg);
137 	frag->next = NULL;
138 	skb_pull(frag, msg_hdr_sz(msg));
139 
140 	if (fragid == FIRST_FRAGMENT) {
141 		if (unlikely(head))
142 			goto err;
143 		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
144 			goto err;
145 		head = *headbuf = frag;
146 		*buf = NULL;
147 		TIPC_SKB_CB(head)->tail = NULL;
148 		if (skb_is_nonlinear(head)) {
149 			skb_walk_frags(head, tail) {
150 				TIPC_SKB_CB(head)->tail = tail;
151 			}
152 		} else {
153 			skb_frag_list_init(head);
154 		}
155 		return 0;
156 	}
157 
158 	if (!head)
159 		goto err;
160 
161 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
162 		kfree_skb_partial(frag, headstolen);
163 	} else {
164 		tail = TIPC_SKB_CB(head)->tail;
165 		if (!skb_has_frag_list(head))
166 			skb_shinfo(head)->frag_list = frag;
167 		else
168 			tail->next = frag;
169 		head->truesize += frag->truesize;
170 		head->data_len += frag->len;
171 		head->len += frag->len;
172 		TIPC_SKB_CB(head)->tail = frag;
173 	}
174 
175 	if (fragid == LAST_FRAGMENT) {
176 		TIPC_SKB_CB(head)->validated = false;
177 		if (unlikely(!tipc_msg_validate(&head)))
178 			goto err;
179 		*buf = head;
180 		TIPC_SKB_CB(head)->tail = NULL;
181 		*headbuf = NULL;
182 		return 1;
183 	}
184 	*buf = NULL;
185 	return 0;
186 err:
187 	kfree_skb(*buf);
188 	kfree_skb(*headbuf);
189 	*buf = *headbuf = NULL;
190 	return 0;
191 }
192 
193 /* tipc_msg_validate - validate basic format of received message
194  *
195  * This routine ensures a TIPC message has an acceptable header, and at least
196  * as much data as the header indicates it should.  The routine also ensures
197  * that the entire message header is stored in the main fragment of the message
198  * buffer, to simplify future access to message header fields.
199  *
200  * Note: Having extra info present in the message header or data areas is OK.
201  * TIPC will ignore the excess, under the assumption that it is optional info
202  * introduced by a later release of the protocol.
203  */
204 bool tipc_msg_validate(struct sk_buff **_skb)
205 {
206 	struct sk_buff *skb = *_skb;
207 	struct tipc_msg *hdr;
208 	int msz, hsz;
209 
210 	/* Ensure that flow control ratio condition is satisfied */
211 	if (unlikely(skb->truesize / buf_roundup_len(skb) > 4)) {
212 		skb = skb_copy(skb, GFP_ATOMIC);
213 		if (!skb)
214 			return false;
215 		kfree_skb(*_skb);
216 		*_skb = skb;
217 	}
218 
219 	if (unlikely(TIPC_SKB_CB(skb)->validated))
220 		return true;
221 	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
222 		return false;
223 
224 	hsz = msg_hdr_sz(buf_msg(skb));
225 	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
226 		return false;
227 	if (unlikely(!pskb_may_pull(skb, hsz)))
228 		return false;
229 
230 	hdr = buf_msg(skb);
231 	if (unlikely(msg_version(hdr) != TIPC_VERSION))
232 		return false;
233 
234 	msz = msg_size(hdr);
235 	if (unlikely(msz < hsz))
236 		return false;
237 	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
238 		return false;
239 	if (unlikely(skb->len < msz))
240 		return false;
241 
242 	TIPC_SKB_CB(skb)->validated = true;
243 	return true;
244 }
245 
246 /**
247  * tipc_msg_build - create buffer chain containing specified header and data
248  * @mhdr: Message header, to be prepended to data
249  * @m: User message
250  * @dsz: Total length of user data
251  * @pktmax: Max packet size that can be used
252  * @list: Buffer or chain of buffers to be returned to caller
253  *
254  * Returns message data size or errno: -ENOMEM, -EFAULT
255  */
256 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
257 		   int offset, int dsz, int pktmax, struct sk_buff_head *list)
258 {
259 	int mhsz = msg_hdr_sz(mhdr);
260 	int msz = mhsz + dsz;
261 	int pktno = 1;
262 	int pktsz;
263 	int pktrem = pktmax;
264 	int drem = dsz;
265 	struct tipc_msg pkthdr;
266 	struct sk_buff *skb;
267 	char *pktpos;
268 	int rc;
269 
270 	msg_set_size(mhdr, msz);
271 
272 	/* No fragmentation needed? */
273 	if (likely(msz <= pktmax)) {
274 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
275 		if (unlikely(!skb))
276 			return -ENOMEM;
277 		skb_orphan(skb);
278 		__skb_queue_tail(list, skb);
279 		skb_copy_to_linear_data(skb, mhdr, mhsz);
280 		pktpos = skb->data + mhsz;
281 		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
282 			return dsz;
283 		rc = -EFAULT;
284 		goto error;
285 	}
286 
287 	/* Prepare reusable fragment header */
288 	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
289 		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
290 	msg_set_size(&pkthdr, pktmax);
291 	msg_set_fragm_no(&pkthdr, pktno);
292 	msg_set_importance(&pkthdr, msg_importance(mhdr));
293 
294 	/* Prepare first fragment */
295 	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
296 	if (!skb)
297 		return -ENOMEM;
298 	skb_orphan(skb);
299 	__skb_queue_tail(list, skb);
300 	pktpos = skb->data;
301 	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
302 	pktpos += INT_H_SIZE;
303 	pktrem -= INT_H_SIZE;
304 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
305 	pktpos += mhsz;
306 	pktrem -= mhsz;
307 
308 	do {
309 		if (drem < pktrem)
310 			pktrem = drem;
311 
312 		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
313 			rc = -EFAULT;
314 			goto error;
315 		}
316 		drem -= pktrem;
317 
318 		if (!drem)
319 			break;
320 
321 		/* Prepare new fragment: */
322 		if (drem < (pktmax - INT_H_SIZE))
323 			pktsz = drem + INT_H_SIZE;
324 		else
325 			pktsz = pktmax;
326 		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
327 		if (!skb) {
328 			rc = -ENOMEM;
329 			goto error;
330 		}
331 		skb_orphan(skb);
332 		__skb_queue_tail(list, skb);
333 		msg_set_type(&pkthdr, FRAGMENT);
334 		msg_set_size(&pkthdr, pktsz);
335 		msg_set_fragm_no(&pkthdr, ++pktno);
336 		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
337 		pktpos = skb->data + INT_H_SIZE;
338 		pktrem = pktsz - INT_H_SIZE;
339 
340 	} while (1);
341 	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
342 	return dsz;
343 error:
344 	__skb_queue_purge(list);
345 	__skb_queue_head_init(list);
346 	return rc;
347 }
348 
349 /**
350  * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
351  * @skb: the buffer to append to ("bundle")
352  * @msg:  message to be appended
353  * @mtu:  max allowable size for the bundle buffer
354  * Consumes buffer if successful
355  * Returns true if bundling could be performed, otherwise false
356  */
357 bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
358 {
359 	struct tipc_msg *bmsg;
360 	unsigned int bsz;
361 	unsigned int msz = msg_size(msg);
362 	u32 start, pad;
363 	u32 max = mtu - INT_H_SIZE;
364 
365 	if (likely(msg_user(msg) == MSG_FRAGMENTER))
366 		return false;
367 	if (!skb)
368 		return false;
369 	bmsg = buf_msg(skb);
370 	bsz = msg_size(bmsg);
371 	start = align(bsz);
372 	pad = start - bsz;
373 
374 	if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
375 		return false;
376 	if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
377 		return false;
378 	if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
379 		return false;
380 	if (unlikely(skb_tailroom(skb) < (pad + msz)))
381 		return false;
382 	if (unlikely(max < (start + msz)))
383 		return false;
384 	if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
385 	    (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
386 		return false;
387 
388 	skb_put(skb, pad + msz);
389 	skb_copy_to_linear_data_offset(skb, start, msg, msz);
390 	msg_set_size(bmsg, start + msz);
391 	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
392 	return true;
393 }
394 
395 /**
396  *  tipc_msg_extract(): extract bundled inner packet from buffer
397  *  @skb: buffer to be extracted from.
398  *  @iskb: extracted inner buffer, to be returned
399  *  @pos: position in outer message of msg to be extracted.
400  *        Returns position of next msg
401  *  Consumes outer buffer when last packet extracted
402  *  Returns true when when there is an extracted buffer, otherwise false
403  */
404 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
405 {
406 	struct tipc_msg *msg;
407 	int imsz, offset;
408 
409 	*iskb = NULL;
410 	if (unlikely(skb_linearize(skb)))
411 		goto none;
412 
413 	msg = buf_msg(skb);
414 	offset = msg_hdr_sz(msg) + *pos;
415 	if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
416 		goto none;
417 
418 	*iskb = skb_clone(skb, GFP_ATOMIC);
419 	if (unlikely(!*iskb))
420 		goto none;
421 	skb_pull(*iskb, offset);
422 	imsz = msg_size(buf_msg(*iskb));
423 	skb_trim(*iskb, imsz);
424 	if (unlikely(!tipc_msg_validate(iskb)))
425 		goto none;
426 	*pos += align(imsz);
427 	return true;
428 none:
429 	kfree_skb(skb);
430 	kfree_skb(*iskb);
431 	*iskb = NULL;
432 	return false;
433 }
434 
435 /**
436  * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
437  * @list: the buffer chain, where head is the buffer to replace/append
438  * @skb: buffer to be created, appended to and returned in case of success
439  * @msg: message to be appended
440  * @mtu: max allowable size for the bundle buffer, inclusive header
441  * @dnode: destination node for message. (Not always present in header)
442  * Returns true if success, otherwise false
443  */
444 bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
445 			  u32 mtu, u32 dnode)
446 {
447 	struct sk_buff *_skb;
448 	struct tipc_msg *bmsg;
449 	u32 msz = msg_size(msg);
450 	u32 max = mtu - INT_H_SIZE;
451 
452 	if (msg_user(msg) == MSG_FRAGMENTER)
453 		return false;
454 	if (msg_user(msg) == TUNNEL_PROTOCOL)
455 		return false;
456 	if (msg_user(msg) == BCAST_PROTOCOL)
457 		return false;
458 	if (msz > (max / 2))
459 		return false;
460 
461 	_skb = tipc_buf_acquire(max, GFP_ATOMIC);
462 	if (!_skb)
463 		return false;
464 
465 	skb_trim(_skb, INT_H_SIZE);
466 	bmsg = buf_msg(_skb);
467 	tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
468 		      INT_H_SIZE, dnode);
469 	if (msg_isdata(msg))
470 		msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
471 	else
472 		msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
473 	msg_set_seqno(bmsg, msg_seqno(msg));
474 	msg_set_ack(bmsg, msg_ack(msg));
475 	msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
476 	tipc_msg_bundle(_skb, msg, mtu);
477 	*skb = _skb;
478 	return true;
479 }
480 
481 /**
482  * tipc_msg_reverse(): swap source and destination addresses and add error code
483  * @own_node: originating node id for reversed message
484  * @skb:  buffer containing message to be reversed; may be replaced.
485  * @err:  error code to be set in message, if any
486  * Consumes buffer at failure
487  * Returns true if success, otherwise false
488  */
489 bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
490 {
491 	struct sk_buff *_skb = *skb;
492 	struct tipc_msg *hdr;
493 	struct tipc_msg ohdr;
494 	int dlen;
495 
496 	if (skb_linearize(_skb))
497 		goto exit;
498 	hdr = buf_msg(_skb);
499 	dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
500 	if (msg_dest_droppable(hdr))
501 		goto exit;
502 	if (msg_errcode(hdr))
503 		goto exit;
504 
505 	/* Take a copy of original header before altering message */
506 	memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
507 
508 	/* Never return SHORT header; expand by replacing buffer if necessary */
509 	if (msg_short(hdr)) {
510 		*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
511 		if (!*skb)
512 			goto exit;
513 		memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
514 		kfree_skb(_skb);
515 		_skb = *skb;
516 		hdr = buf_msg(_skb);
517 		memcpy(hdr, &ohdr, BASIC_H_SIZE);
518 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
519 	}
520 
521 	if (skb_cloned(_skb) &&
522 	    pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
523 		goto exit;
524 
525 	/* reassign after skb header modifications */
526 	hdr = buf_msg(_skb);
527 	/* Now reverse the concerned fields */
528 	msg_set_errcode(hdr, err);
529 	msg_set_non_seq(hdr, 0);
530 	msg_set_origport(hdr, msg_destport(&ohdr));
531 	msg_set_destport(hdr, msg_origport(&ohdr));
532 	msg_set_destnode(hdr, msg_prevnode(&ohdr));
533 	msg_set_prevnode(hdr, own_node);
534 	msg_set_orignode(hdr, own_node);
535 	msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
536 	skb_trim(_skb, msg_size(hdr));
537 	skb_orphan(_skb);
538 	return true;
539 exit:
540 	kfree_skb(_skb);
541 	*skb = NULL;
542 	return false;
543 }
544 
545 /**
546  * tipc_msg_lookup_dest(): try to find new destination for named message
547  * @skb: the buffer containing the message.
548  * @err: error code to be used by caller if lookup fails
549  * Does not consume buffer
550  * Returns true if a destination is found, false otherwise
551  */
552 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
553 {
554 	struct tipc_msg *msg = buf_msg(skb);
555 	u32 dport, dnode;
556 	u32 onode = tipc_own_addr(net);
557 
558 	if (!msg_isdata(msg))
559 		return false;
560 	if (!msg_named(msg))
561 		return false;
562 	if (msg_errcode(msg))
563 		return false;
564 	*err = TIPC_ERR_NO_NAME;
565 	if (skb_linearize(skb))
566 		return false;
567 	msg = buf_msg(skb);
568 	if (msg_reroute_cnt(msg))
569 		return false;
570 	dnode = addr_domain(net, msg_lookup_scope(msg));
571 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
572 				       msg_nameinst(msg), &dnode);
573 	if (!dport)
574 		return false;
575 	msg_incr_reroute_cnt(msg);
576 	if (dnode != onode)
577 		msg_set_prevnode(msg, onode);
578 	msg_set_destnode(msg, dnode);
579 	msg_set_destport(msg, dport);
580 	*err = TIPC_OK;
581 
582 	if (!skb_cloned(skb))
583 		return true;
584 
585 	/* Unclone buffer in case it was bundled */
586 	if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
587 		return false;
588 
589 	return true;
590 }
591 
592 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
593  *                         reassemble the clones into one message
594  */
595 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
596 {
597 	struct sk_buff *skb, *_skb;
598 	struct sk_buff *frag = NULL;
599 	struct sk_buff *head = NULL;
600 	int hdr_len;
601 
602 	/* Copy header if single buffer */
603 	if (skb_queue_len(list) == 1) {
604 		skb = skb_peek(list);
605 		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
606 		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
607 		if (!_skb)
608 			return false;
609 		__skb_queue_tail(rcvq, _skb);
610 		return true;
611 	}
612 
613 	/* Clone all fragments and reassemble */
614 	skb_queue_walk(list, skb) {
615 		frag = skb_clone(skb, GFP_ATOMIC);
616 		if (!frag)
617 			goto error;
618 		frag->next = NULL;
619 		if (tipc_buf_append(&head, &frag))
620 			break;
621 		if (!head)
622 			goto error;
623 	}
624 	__skb_queue_tail(rcvq, frag);
625 	return true;
626 error:
627 	pr_warn("Failed do clone local mcast rcv buffer\n");
628 	kfree_skb(head);
629 	return false;
630 }
631 
632 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
633 			struct sk_buff_head *cpy)
634 {
635 	struct sk_buff *skb, *_skb;
636 
637 	skb_queue_walk(msg, skb) {
638 		_skb = pskb_copy(skb, GFP_ATOMIC);
639 		if (!_skb) {
640 			__skb_queue_purge(cpy);
641 			return false;
642 		}
643 		msg_set_destnode(buf_msg(_skb), dst);
644 		__skb_queue_tail(cpy, _skb);
645 	}
646 	return true;
647 }
648 
649 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
650  * @list: list to be appended to
651  * @seqno: sequence number of buffer to add
652  * @skb: buffer to add
653  */
654 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
655 			     struct sk_buff *skb)
656 {
657 	struct sk_buff *_skb, *tmp;
658 
659 	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
660 		__skb_queue_head(list, skb);
661 		return;
662 	}
663 
664 	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
665 		__skb_queue_tail(list, skb);
666 		return;
667 	}
668 
669 	skb_queue_walk_safe(list, _skb, tmp) {
670 		if (more(seqno, buf_seqno(_skb)))
671 			continue;
672 		if (seqno == buf_seqno(_skb))
673 			break;
674 		__skb_queue_before(list, _skb, skb);
675 		return;
676 	}
677 	kfree_skb(skb);
678 }
679 
680 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
681 		     struct sk_buff_head *xmitq)
682 {
683 	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
684 		__skb_queue_tail(xmitq, skb);
685 }
686