1 /* 2 * net/tipc/msg.c: TIPC message header routines 3 * 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <net/sock.h> 38 #include "core.h" 39 #include "msg.h" 40 #include "addr.h" 41 #include "name_table.h" 42 43 #define MAX_FORWARD_SIZE 1024 44 #define BUF_HEADROOM (LL_MAX_HEADER + 48) 45 #define BUF_TAILROOM 16 46 47 static unsigned int align(unsigned int i) 48 { 49 return (i + 3) & ~3u; 50 } 51 52 /** 53 * tipc_buf_acquire - creates a TIPC message buffer 54 * @size: message size (including TIPC header) 55 * 56 * Returns a new buffer with data pointers set to the specified size. 57 * 58 * NOTE: Headroom is reserved to allow prepending of a data link header. 59 * There may also be unrequested tailroom present at the buffer's end. 60 */ 61 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) 62 { 63 struct sk_buff *skb; 64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 65 66 skb = alloc_skb_fclone(buf_size, gfp); 67 if (skb) { 68 skb_reserve(skb, BUF_HEADROOM); 69 skb_put(skb, size); 70 skb->next = NULL; 71 } 72 return skb; 73 } 74 75 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, 76 u32 hsize, u32 dnode) 77 { 78 memset(m, 0, hsize); 79 msg_set_version(m); 80 msg_set_user(m, user); 81 msg_set_hdr_sz(m, hsize); 82 msg_set_size(m, hsize); 83 msg_set_prevnode(m, own_node); 84 msg_set_type(m, type); 85 if (hsize > SHORT_H_SIZE) { 86 msg_set_orignode(m, own_node); 87 msg_set_destnode(m, dnode); 88 } 89 } 90 91 struct sk_buff *tipc_msg_create(uint user, uint type, 92 uint hdr_sz, uint data_sz, u32 dnode, 93 u32 onode, u32 dport, u32 oport, int errcode) 94 { 95 struct tipc_msg *msg; 96 struct sk_buff *buf; 97 98 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); 99 if (unlikely(!buf)) 100 return NULL; 101 102 msg = buf_msg(buf); 103 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); 104 msg_set_size(msg, hdr_sz + data_sz); 105 msg_set_origport(msg, oport); 106 msg_set_destport(msg, dport); 107 msg_set_errcode(msg, errcode); 108 if (hdr_sz > SHORT_H_SIZE) { 109 msg_set_orignode(msg, onode); 110 msg_set_destnode(msg, dnode); 111 } 112 return buf; 113 } 114 115 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer 116 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call 117 * out: set when successful non-complete reassembly, otherwise NULL 118 * @*buf: in: the buffer to append. Always defined 119 * out: head buf after successful complete reassembly, otherwise NULL 120 * Returns 1 when reassembly complete, otherwise 0 121 */ 122 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 123 { 124 struct sk_buff *head = *headbuf; 125 struct sk_buff *frag = *buf; 126 struct sk_buff *tail = NULL; 127 struct tipc_msg *msg; 128 u32 fragid; 129 int delta; 130 bool headstolen; 131 132 if (!frag) 133 goto err; 134 135 msg = buf_msg(frag); 136 fragid = msg_type(msg); 137 frag->next = NULL; 138 skb_pull(frag, msg_hdr_sz(msg)); 139 140 if (fragid == FIRST_FRAGMENT) { 141 if (unlikely(head)) 142 goto err; 143 if (unlikely(skb_unclone(frag, GFP_ATOMIC))) 144 goto err; 145 head = *headbuf = frag; 146 *buf = NULL; 147 TIPC_SKB_CB(head)->tail = NULL; 148 if (skb_is_nonlinear(head)) { 149 skb_walk_frags(head, tail) { 150 TIPC_SKB_CB(head)->tail = tail; 151 } 152 } else { 153 skb_frag_list_init(head); 154 } 155 return 0; 156 } 157 158 if (!head) 159 goto err; 160 161 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { 162 kfree_skb_partial(frag, headstolen); 163 } else { 164 tail = TIPC_SKB_CB(head)->tail; 165 if (!skb_has_frag_list(head)) 166 skb_shinfo(head)->frag_list = frag; 167 else 168 tail->next = frag; 169 head->truesize += frag->truesize; 170 head->data_len += frag->len; 171 head->len += frag->len; 172 TIPC_SKB_CB(head)->tail = frag; 173 } 174 175 if (fragid == LAST_FRAGMENT) { 176 TIPC_SKB_CB(head)->validated = false; 177 if (unlikely(!tipc_msg_validate(&head))) 178 goto err; 179 *buf = head; 180 TIPC_SKB_CB(head)->tail = NULL; 181 *headbuf = NULL; 182 return 1; 183 } 184 *buf = NULL; 185 return 0; 186 err: 187 kfree_skb(*buf); 188 kfree_skb(*headbuf); 189 *buf = *headbuf = NULL; 190 return 0; 191 } 192 193 /* tipc_msg_validate - validate basic format of received message 194 * 195 * This routine ensures a TIPC message has an acceptable header, and at least 196 * as much data as the header indicates it should. The routine also ensures 197 * that the entire message header is stored in the main fragment of the message 198 * buffer, to simplify future access to message header fields. 199 * 200 * Note: Having extra info present in the message header or data areas is OK. 201 * TIPC will ignore the excess, under the assumption that it is optional info 202 * introduced by a later release of the protocol. 203 */ 204 bool tipc_msg_validate(struct sk_buff **_skb) 205 { 206 struct sk_buff *skb = *_skb; 207 struct tipc_msg *hdr; 208 int msz, hsz; 209 210 /* Ensure that flow control ratio condition is satisfied */ 211 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { 212 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); 213 if (!skb) 214 return false; 215 kfree_skb(*_skb); 216 *_skb = skb; 217 } 218 219 if (unlikely(TIPC_SKB_CB(skb)->validated)) 220 return true; 221 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) 222 return false; 223 224 hsz = msg_hdr_sz(buf_msg(skb)); 225 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) 226 return false; 227 if (unlikely(!pskb_may_pull(skb, hsz))) 228 return false; 229 230 hdr = buf_msg(skb); 231 if (unlikely(msg_version(hdr) != TIPC_VERSION)) 232 return false; 233 234 msz = msg_size(hdr); 235 if (unlikely(msz < hsz)) 236 return false; 237 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) 238 return false; 239 if (unlikely(skb->len < msz)) 240 return false; 241 242 TIPC_SKB_CB(skb)->validated = true; 243 return true; 244 } 245 246 /** 247 * tipc_msg_fragment - build a fragment skb list for TIPC message 248 * 249 * @skb: TIPC message skb 250 * @hdr: internal msg header to be put on the top of the fragments 251 * @pktmax: max size of a fragment incl. the header 252 * @frags: returned fragment skb list 253 * 254 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL 255 * or -ENOMEM 256 */ 257 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, 258 int pktmax, struct sk_buff_head *frags) 259 { 260 int pktno, nof_fragms, dsz, dmax, eat; 261 struct tipc_msg *_hdr; 262 struct sk_buff *_skb; 263 u8 *data; 264 265 /* Non-linear buffer? */ 266 if (skb_linearize(skb)) 267 return -ENOMEM; 268 269 data = (u8 *)skb->data; 270 dsz = msg_size(buf_msg(skb)); 271 dmax = pktmax - INT_H_SIZE; 272 if (dsz <= dmax || !dmax) 273 return -EINVAL; 274 275 nof_fragms = dsz / dmax + 1; 276 for (pktno = 1; pktno <= nof_fragms; pktno++) { 277 if (pktno < nof_fragms) 278 eat = dmax; 279 else 280 eat = dsz % dmax; 281 /* Allocate a new fragment */ 282 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); 283 if (!_skb) 284 goto error; 285 skb_orphan(_skb); 286 __skb_queue_tail(frags, _skb); 287 /* Copy header & data to the fragment */ 288 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); 289 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); 290 data += eat; 291 /* Update the fragment's header */ 292 _hdr = buf_msg(_skb); 293 msg_set_fragm_no(_hdr, pktno); 294 msg_set_nof_fragms(_hdr, nof_fragms); 295 msg_set_size(_hdr, INT_H_SIZE + eat); 296 } 297 return 0; 298 299 error: 300 __skb_queue_purge(frags); 301 __skb_queue_head_init(frags); 302 return -ENOMEM; 303 } 304 305 /** 306 * tipc_msg_build - create buffer chain containing specified header and data 307 * @mhdr: Message header, to be prepended to data 308 * @m: User message 309 * @dsz: Total length of user data 310 * @pktmax: Max packet size that can be used 311 * @list: Buffer or chain of buffers to be returned to caller 312 * 313 * Note that the recursive call we are making here is safe, since it can 314 * logically go only one further level down. 315 * 316 * Returns message data size or errno: -ENOMEM, -EFAULT 317 */ 318 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, 319 int dsz, int pktmax, struct sk_buff_head *list) 320 { 321 int mhsz = msg_hdr_sz(mhdr); 322 struct tipc_msg pkthdr; 323 int msz = mhsz + dsz; 324 int pktrem = pktmax; 325 struct sk_buff *skb; 326 int drem = dsz; 327 int pktno = 1; 328 char *pktpos; 329 int pktsz; 330 int rc; 331 332 msg_set_size(mhdr, msz); 333 334 /* No fragmentation needed? */ 335 if (likely(msz <= pktmax)) { 336 skb = tipc_buf_acquire(msz, GFP_KERNEL); 337 338 /* Fall back to smaller MTU if node local message */ 339 if (unlikely(!skb)) { 340 if (pktmax != MAX_MSG_SIZE) 341 return -ENOMEM; 342 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list); 343 if (rc != dsz) 344 return rc; 345 if (tipc_msg_assemble(list)) 346 return dsz; 347 return -ENOMEM; 348 } 349 skb_orphan(skb); 350 __skb_queue_tail(list, skb); 351 skb_copy_to_linear_data(skb, mhdr, mhsz); 352 pktpos = skb->data + mhsz; 353 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) 354 return dsz; 355 rc = -EFAULT; 356 goto error; 357 } 358 359 /* Prepare reusable fragment header */ 360 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, 361 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); 362 msg_set_size(&pkthdr, pktmax); 363 msg_set_fragm_no(&pkthdr, pktno); 364 msg_set_importance(&pkthdr, msg_importance(mhdr)); 365 366 /* Prepare first fragment */ 367 skb = tipc_buf_acquire(pktmax, GFP_KERNEL); 368 if (!skb) 369 return -ENOMEM; 370 skb_orphan(skb); 371 __skb_queue_tail(list, skb); 372 pktpos = skb->data; 373 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 374 pktpos += INT_H_SIZE; 375 pktrem -= INT_H_SIZE; 376 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); 377 pktpos += mhsz; 378 pktrem -= mhsz; 379 380 do { 381 if (drem < pktrem) 382 pktrem = drem; 383 384 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { 385 rc = -EFAULT; 386 goto error; 387 } 388 drem -= pktrem; 389 390 if (!drem) 391 break; 392 393 /* Prepare new fragment: */ 394 if (drem < (pktmax - INT_H_SIZE)) 395 pktsz = drem + INT_H_SIZE; 396 else 397 pktsz = pktmax; 398 skb = tipc_buf_acquire(pktsz, GFP_KERNEL); 399 if (!skb) { 400 rc = -ENOMEM; 401 goto error; 402 } 403 skb_orphan(skb); 404 __skb_queue_tail(list, skb); 405 msg_set_type(&pkthdr, FRAGMENT); 406 msg_set_size(&pkthdr, pktsz); 407 msg_set_fragm_no(&pkthdr, ++pktno); 408 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 409 pktpos = skb->data + INT_H_SIZE; 410 pktrem = pktsz - INT_H_SIZE; 411 412 } while (1); 413 msg_set_type(buf_msg(skb), LAST_FRAGMENT); 414 return dsz; 415 error: 416 __skb_queue_purge(list); 417 __skb_queue_head_init(list); 418 return rc; 419 } 420 421 /** 422 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one 423 * @skb: the buffer to append to ("bundle") 424 * @msg: message to be appended 425 * @mtu: max allowable size for the bundle buffer 426 * Consumes buffer if successful 427 * Returns true if bundling could be performed, otherwise false 428 */ 429 bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu) 430 { 431 struct tipc_msg *bmsg; 432 unsigned int bsz; 433 unsigned int msz = msg_size(msg); 434 u32 start, pad; 435 u32 max = mtu - INT_H_SIZE; 436 437 if (likely(msg_user(msg) == MSG_FRAGMENTER)) 438 return false; 439 if (!skb) 440 return false; 441 bmsg = buf_msg(skb); 442 bsz = msg_size(bmsg); 443 start = align(bsz); 444 pad = start - bsz; 445 446 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) 447 return false; 448 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) 449 return false; 450 if (unlikely(msg_user(bmsg) != MSG_BUNDLER)) 451 return false; 452 if (unlikely(skb_tailroom(skb) < (pad + msz))) 453 return false; 454 if (unlikely(max < (start + msz))) 455 return false; 456 if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) && 457 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE)) 458 return false; 459 460 skb_put(skb, pad + msz); 461 skb_copy_to_linear_data_offset(skb, start, msg, msz); 462 msg_set_size(bmsg, start + msz); 463 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 464 return true; 465 } 466 467 /** 468 * tipc_msg_extract(): extract bundled inner packet from buffer 469 * @skb: buffer to be extracted from. 470 * @iskb: extracted inner buffer, to be returned 471 * @pos: position in outer message of msg to be extracted. 472 * Returns position of next msg 473 * Consumes outer buffer when last packet extracted 474 * Returns true when when there is an extracted buffer, otherwise false 475 */ 476 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) 477 { 478 struct tipc_msg *hdr, *ihdr; 479 int imsz; 480 481 *iskb = NULL; 482 if (unlikely(skb_linearize(skb))) 483 goto none; 484 485 hdr = buf_msg(skb); 486 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) 487 goto none; 488 489 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); 490 imsz = msg_size(ihdr); 491 492 if ((*pos + imsz) > msg_data_sz(hdr)) 493 goto none; 494 495 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); 496 if (!*iskb) 497 goto none; 498 499 skb_copy_to_linear_data(*iskb, ihdr, imsz); 500 if (unlikely(!tipc_msg_validate(iskb))) 501 goto none; 502 503 *pos += align(imsz); 504 return true; 505 none: 506 kfree_skb(skb); 507 kfree_skb(*iskb); 508 *iskb = NULL; 509 return false; 510 } 511 512 /** 513 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 514 * @list: the buffer chain, where head is the buffer to replace/append 515 * @skb: buffer to be created, appended to and returned in case of success 516 * @msg: message to be appended 517 * @mtu: max allowable size for the bundle buffer, inclusive header 518 * @dnode: destination node for message. (Not always present in header) 519 * Returns true if success, otherwise false 520 */ 521 bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, 522 u32 mtu, u32 dnode) 523 { 524 struct sk_buff *_skb; 525 struct tipc_msg *bmsg; 526 u32 msz = msg_size(msg); 527 u32 max = mtu - INT_H_SIZE; 528 529 if (msg_user(msg) == MSG_FRAGMENTER) 530 return false; 531 if (msg_user(msg) == TUNNEL_PROTOCOL) 532 return false; 533 if (msg_user(msg) == BCAST_PROTOCOL) 534 return false; 535 if (msz > (max / 2)) 536 return false; 537 538 _skb = tipc_buf_acquire(max, GFP_ATOMIC); 539 if (!_skb) 540 return false; 541 542 skb_trim(_skb, INT_H_SIZE); 543 bmsg = buf_msg(_skb); 544 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, 545 INT_H_SIZE, dnode); 546 if (msg_isdata(msg)) 547 msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE); 548 else 549 msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE); 550 msg_set_seqno(bmsg, msg_seqno(msg)); 551 msg_set_ack(bmsg, msg_ack(msg)); 552 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 553 tipc_msg_bundle(_skb, msg, mtu); 554 *skb = _skb; 555 return true; 556 } 557 558 /** 559 * tipc_msg_reverse(): swap source and destination addresses and add error code 560 * @own_node: originating node id for reversed message 561 * @skb: buffer containing message to be reversed; will be consumed 562 * @err: error code to be set in message, if any 563 * Replaces consumed buffer with new one when successful 564 * Returns true if success, otherwise false 565 */ 566 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) 567 { 568 struct sk_buff *_skb = *skb; 569 struct tipc_msg *_hdr, *hdr; 570 int hlen, dlen; 571 572 if (skb_linearize(_skb)) 573 goto exit; 574 _hdr = buf_msg(_skb); 575 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); 576 hlen = msg_hdr_sz(_hdr); 577 578 if (msg_dest_droppable(_hdr)) 579 goto exit; 580 if (msg_errcode(_hdr)) 581 goto exit; 582 583 /* Never return SHORT header */ 584 if (hlen == SHORT_H_SIZE) 585 hlen = BASIC_H_SIZE; 586 587 /* Don't return data along with SYN+, - sender has a clone */ 588 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) 589 dlen = 0; 590 591 /* Allocate new buffer to return */ 592 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); 593 if (!*skb) 594 goto exit; 595 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); 596 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); 597 598 /* Build reverse header in new buffer */ 599 hdr = buf_msg(*skb); 600 msg_set_hdr_sz(hdr, hlen); 601 msg_set_errcode(hdr, err); 602 msg_set_non_seq(hdr, 0); 603 msg_set_origport(hdr, msg_destport(_hdr)); 604 msg_set_destport(hdr, msg_origport(_hdr)); 605 msg_set_destnode(hdr, msg_prevnode(_hdr)); 606 msg_set_prevnode(hdr, own_node); 607 msg_set_orignode(hdr, own_node); 608 msg_set_size(hdr, hlen + dlen); 609 skb_orphan(_skb); 610 kfree_skb(_skb); 611 return true; 612 exit: 613 kfree_skb(_skb); 614 *skb = NULL; 615 return false; 616 } 617 618 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) 619 { 620 struct sk_buff *skb, *_skb; 621 622 skb_queue_walk(msg, skb) { 623 _skb = skb_clone(skb, GFP_ATOMIC); 624 if (!_skb) { 625 __skb_queue_purge(cpy); 626 pr_err_ratelimited("Failed to clone buffer chain\n"); 627 return false; 628 } 629 __skb_queue_tail(cpy, _skb); 630 } 631 return true; 632 } 633 634 /** 635 * tipc_msg_lookup_dest(): try to find new destination for named message 636 * @skb: the buffer containing the message. 637 * @err: error code to be used by caller if lookup fails 638 * Does not consume buffer 639 * Returns true if a destination is found, false otherwise 640 */ 641 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) 642 { 643 struct tipc_msg *msg = buf_msg(skb); 644 u32 dport, dnode; 645 u32 onode = tipc_own_addr(net); 646 647 if (!msg_isdata(msg)) 648 return false; 649 if (!msg_named(msg)) 650 return false; 651 if (msg_errcode(msg)) 652 return false; 653 *err = TIPC_ERR_NO_NAME; 654 if (skb_linearize(skb)) 655 return false; 656 msg = buf_msg(skb); 657 if (msg_reroute_cnt(msg)) 658 return false; 659 dnode = tipc_scope2node(net, msg_lookup_scope(msg)); 660 dport = tipc_nametbl_translate(net, msg_nametype(msg), 661 msg_nameinst(msg), &dnode); 662 if (!dport) 663 return false; 664 msg_incr_reroute_cnt(msg); 665 if (dnode != onode) 666 msg_set_prevnode(msg, onode); 667 msg_set_destnode(msg, dnode); 668 msg_set_destport(msg, dport); 669 *err = TIPC_OK; 670 671 if (!skb_cloned(skb)) 672 return true; 673 674 return true; 675 } 676 677 /* tipc_msg_assemble() - assemble chain of fragments into one message 678 */ 679 bool tipc_msg_assemble(struct sk_buff_head *list) 680 { 681 struct sk_buff *skb, *tmp = NULL; 682 683 if (skb_queue_len(list) == 1) 684 return true; 685 686 while ((skb = __skb_dequeue(list))) { 687 skb->next = NULL; 688 if (tipc_buf_append(&tmp, &skb)) { 689 __skb_queue_tail(list, skb); 690 return true; 691 } 692 if (!tmp) 693 break; 694 } 695 __skb_queue_purge(list); 696 __skb_queue_head_init(list); 697 pr_warn("Failed do assemble buffer\n"); 698 return false; 699 } 700 701 /* tipc_msg_reassemble() - clone a buffer chain of fragments and 702 * reassemble the clones into one message 703 */ 704 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) 705 { 706 struct sk_buff *skb, *_skb; 707 struct sk_buff *frag = NULL; 708 struct sk_buff *head = NULL; 709 int hdr_len; 710 711 /* Copy header if single buffer */ 712 if (skb_queue_len(list) == 1) { 713 skb = skb_peek(list); 714 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); 715 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); 716 if (!_skb) 717 return false; 718 __skb_queue_tail(rcvq, _skb); 719 return true; 720 } 721 722 /* Clone all fragments and reassemble */ 723 skb_queue_walk(list, skb) { 724 frag = skb_clone(skb, GFP_ATOMIC); 725 if (!frag) 726 goto error; 727 frag->next = NULL; 728 if (tipc_buf_append(&head, &frag)) 729 break; 730 if (!head) 731 goto error; 732 } 733 __skb_queue_tail(rcvq, frag); 734 return true; 735 error: 736 pr_warn("Failed do clone local mcast rcv buffer\n"); 737 kfree_skb(head); 738 return false; 739 } 740 741 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, 742 struct sk_buff_head *cpy) 743 { 744 struct sk_buff *skb, *_skb; 745 746 skb_queue_walk(msg, skb) { 747 _skb = pskb_copy(skb, GFP_ATOMIC); 748 if (!_skb) { 749 __skb_queue_purge(cpy); 750 return false; 751 } 752 msg_set_destnode(buf_msg(_skb), dst); 753 __skb_queue_tail(cpy, _skb); 754 } 755 return true; 756 } 757 758 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number 759 * @list: list to be appended to 760 * @seqno: sequence number of buffer to add 761 * @skb: buffer to add 762 */ 763 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, 764 struct sk_buff *skb) 765 { 766 struct sk_buff *_skb, *tmp; 767 768 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { 769 __skb_queue_head(list, skb); 770 return; 771 } 772 773 if (more(seqno, buf_seqno(skb_peek_tail(list)))) { 774 __skb_queue_tail(list, skb); 775 return; 776 } 777 778 skb_queue_walk_safe(list, _skb, tmp) { 779 if (more(seqno, buf_seqno(_skb))) 780 continue; 781 if (seqno == buf_seqno(_skb)) 782 break; 783 __skb_queue_before(list, _skb, skb); 784 return; 785 } 786 kfree_skb(skb); 787 } 788 789 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, 790 struct sk_buff_head *xmitq) 791 { 792 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 793 __skb_queue_tail(xmitq, skb); 794 } 795