1 /* 2 * net/tipc/msg.c: TIPC message header routines 3 * 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <net/sock.h> 38 #include "core.h" 39 #include "msg.h" 40 #include "addr.h" 41 #include "name_table.h" 42 #include "crypto.h" 43 44 #define MAX_FORWARD_SIZE 1024 45 #ifdef CONFIG_TIPC_CRYPTO 46 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) 47 #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE) 48 #else 49 #define BUF_HEADROOM (LL_MAX_HEADER + 48) 50 #define BUF_TAILROOM 16 51 #endif 52 53 static unsigned int align(unsigned int i) 54 { 55 return (i + 3) & ~3u; 56 } 57 58 /** 59 * tipc_buf_acquire - creates a TIPC message buffer 60 * @size: message size (including TIPC header) 61 * 62 * Returns a new buffer with data pointers set to the specified size. 63 * 64 * NOTE: Headroom is reserved to allow prepending of a data link header. 65 * There may also be unrequested tailroom present at the buffer's end. 66 */ 67 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) 68 { 69 struct sk_buff *skb; 70 #ifdef CONFIG_TIPC_CRYPTO 71 unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u; 72 #else 73 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 74 #endif 75 76 skb = alloc_skb_fclone(buf_size, gfp); 77 if (skb) { 78 skb_reserve(skb, BUF_HEADROOM); 79 skb_put(skb, size); 80 skb->next = NULL; 81 } 82 return skb; 83 } 84 85 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, 86 u32 hsize, u32 dnode) 87 { 88 memset(m, 0, hsize); 89 msg_set_version(m); 90 msg_set_user(m, user); 91 msg_set_hdr_sz(m, hsize); 92 msg_set_size(m, hsize); 93 msg_set_prevnode(m, own_node); 94 msg_set_type(m, type); 95 if (hsize > SHORT_H_SIZE) { 96 msg_set_orignode(m, own_node); 97 msg_set_destnode(m, dnode); 98 } 99 } 100 101 struct sk_buff *tipc_msg_create(uint user, uint type, 102 uint hdr_sz, uint data_sz, u32 dnode, 103 u32 onode, u32 dport, u32 oport, int errcode) 104 { 105 struct tipc_msg *msg; 106 struct sk_buff *buf; 107 108 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); 109 if (unlikely(!buf)) 110 return NULL; 111 112 msg = buf_msg(buf); 113 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); 114 msg_set_size(msg, hdr_sz + data_sz); 115 msg_set_origport(msg, oport); 116 msg_set_destport(msg, dport); 117 msg_set_errcode(msg, errcode); 118 if (hdr_sz > SHORT_H_SIZE) { 119 msg_set_orignode(msg, onode); 120 msg_set_destnode(msg, dnode); 121 } 122 return buf; 123 } 124 125 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer 126 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call 127 * out: set when successful non-complete reassembly, otherwise NULL 128 * @*buf: in: the buffer to append. Always defined 129 * out: head buf after successful complete reassembly, otherwise NULL 130 * Returns 1 when reassembly complete, otherwise 0 131 */ 132 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 133 { 134 struct sk_buff *head = *headbuf; 135 struct sk_buff *frag = *buf; 136 struct sk_buff *tail = NULL; 137 struct tipc_msg *msg; 138 u32 fragid; 139 int delta; 140 bool headstolen; 141 142 if (!frag) 143 goto err; 144 145 msg = buf_msg(frag); 146 fragid = msg_type(msg); 147 frag->next = NULL; 148 skb_pull(frag, msg_hdr_sz(msg)); 149 150 if (fragid == FIRST_FRAGMENT) { 151 if (unlikely(head)) 152 goto err; 153 if (unlikely(skb_unclone(frag, GFP_ATOMIC))) 154 goto err; 155 head = *headbuf = frag; 156 *buf = NULL; 157 TIPC_SKB_CB(head)->tail = NULL; 158 if (skb_is_nonlinear(head)) { 159 skb_walk_frags(head, tail) { 160 TIPC_SKB_CB(head)->tail = tail; 161 } 162 } else { 163 skb_frag_list_init(head); 164 } 165 return 0; 166 } 167 168 if (!head) 169 goto err; 170 171 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { 172 kfree_skb_partial(frag, headstolen); 173 } else { 174 tail = TIPC_SKB_CB(head)->tail; 175 if (!skb_has_frag_list(head)) 176 skb_shinfo(head)->frag_list = frag; 177 else 178 tail->next = frag; 179 head->truesize += frag->truesize; 180 head->data_len += frag->len; 181 head->len += frag->len; 182 TIPC_SKB_CB(head)->tail = frag; 183 } 184 185 if (fragid == LAST_FRAGMENT) { 186 TIPC_SKB_CB(head)->validated = 0; 187 if (unlikely(!tipc_msg_validate(&head))) 188 goto err; 189 *buf = head; 190 TIPC_SKB_CB(head)->tail = NULL; 191 *headbuf = NULL; 192 return 1; 193 } 194 *buf = NULL; 195 return 0; 196 err: 197 kfree_skb(*buf); 198 kfree_skb(*headbuf); 199 *buf = *headbuf = NULL; 200 return 0; 201 } 202 203 /** 204 * tipc_msg_append(): Append data to tail of an existing buffer queue 205 * @_hdr: header to be used 206 * @m: the data to be appended 207 * @mss: max allowable size of buffer 208 * @dlen: size of data to be appended 209 * @txq: queue to appand to 210 * Returns the number og 1k blocks appended or errno value 211 */ 212 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, 213 int mss, struct sk_buff_head *txq) 214 { 215 struct sk_buff *skb; 216 int accounted, total, curr; 217 int mlen, cpy, rem = dlen; 218 struct tipc_msg *hdr; 219 220 skb = skb_peek_tail(txq); 221 accounted = skb ? msg_blocks(buf_msg(skb)) : 0; 222 total = accounted; 223 224 do { 225 if (!skb || skb->len >= mss) { 226 skb = tipc_buf_acquire(mss, GFP_KERNEL); 227 if (unlikely(!skb)) 228 return -ENOMEM; 229 skb_orphan(skb); 230 skb_trim(skb, MIN_H_SIZE); 231 hdr = buf_msg(skb); 232 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); 233 msg_set_hdr_sz(hdr, MIN_H_SIZE); 234 msg_set_size(hdr, MIN_H_SIZE); 235 __skb_queue_tail(txq, skb); 236 total += 1; 237 } 238 hdr = buf_msg(skb); 239 curr = msg_blocks(hdr); 240 mlen = msg_size(hdr); 241 cpy = min_t(size_t, rem, mss - mlen); 242 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) 243 return -EFAULT; 244 msg_set_size(hdr, mlen + cpy); 245 skb_put(skb, cpy); 246 rem -= cpy; 247 total += msg_blocks(hdr) - curr; 248 } while (rem > 0); 249 return total - accounted; 250 } 251 252 /* tipc_msg_validate - validate basic format of received message 253 * 254 * This routine ensures a TIPC message has an acceptable header, and at least 255 * as much data as the header indicates it should. The routine also ensures 256 * that the entire message header is stored in the main fragment of the message 257 * buffer, to simplify future access to message header fields. 258 * 259 * Note: Having extra info present in the message header or data areas is OK. 260 * TIPC will ignore the excess, under the assumption that it is optional info 261 * introduced by a later release of the protocol. 262 */ 263 bool tipc_msg_validate(struct sk_buff **_skb) 264 { 265 struct sk_buff *skb = *_skb; 266 struct tipc_msg *hdr; 267 int msz, hsz; 268 269 /* Ensure that flow control ratio condition is satisfied */ 270 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { 271 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); 272 if (!skb) 273 return false; 274 kfree_skb(*_skb); 275 *_skb = skb; 276 } 277 278 if (unlikely(TIPC_SKB_CB(skb)->validated)) 279 return true; 280 281 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) 282 return false; 283 284 hsz = msg_hdr_sz(buf_msg(skb)); 285 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) 286 return false; 287 if (unlikely(!pskb_may_pull(skb, hsz))) 288 return false; 289 290 hdr = buf_msg(skb); 291 if (unlikely(msg_version(hdr) != TIPC_VERSION)) 292 return false; 293 294 msz = msg_size(hdr); 295 if (unlikely(msz < hsz)) 296 return false; 297 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) 298 return false; 299 if (unlikely(skb->len < msz)) 300 return false; 301 302 TIPC_SKB_CB(skb)->validated = 1; 303 return true; 304 } 305 306 /** 307 * tipc_msg_fragment - build a fragment skb list for TIPC message 308 * 309 * @skb: TIPC message skb 310 * @hdr: internal msg header to be put on the top of the fragments 311 * @pktmax: max size of a fragment incl. the header 312 * @frags: returned fragment skb list 313 * 314 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL 315 * or -ENOMEM 316 */ 317 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, 318 int pktmax, struct sk_buff_head *frags) 319 { 320 int pktno, nof_fragms, dsz, dmax, eat; 321 struct tipc_msg *_hdr; 322 struct sk_buff *_skb; 323 u8 *data; 324 325 /* Non-linear buffer? */ 326 if (skb_linearize(skb)) 327 return -ENOMEM; 328 329 data = (u8 *)skb->data; 330 dsz = msg_size(buf_msg(skb)); 331 dmax = pktmax - INT_H_SIZE; 332 if (dsz <= dmax || !dmax) 333 return -EINVAL; 334 335 nof_fragms = dsz / dmax + 1; 336 for (pktno = 1; pktno <= nof_fragms; pktno++) { 337 if (pktno < nof_fragms) 338 eat = dmax; 339 else 340 eat = dsz % dmax; 341 /* Allocate a new fragment */ 342 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); 343 if (!_skb) 344 goto error; 345 skb_orphan(_skb); 346 __skb_queue_tail(frags, _skb); 347 /* Copy header & data to the fragment */ 348 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); 349 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); 350 data += eat; 351 /* Update the fragment's header */ 352 _hdr = buf_msg(_skb); 353 msg_set_fragm_no(_hdr, pktno); 354 msg_set_nof_fragms(_hdr, nof_fragms); 355 msg_set_size(_hdr, INT_H_SIZE + eat); 356 } 357 return 0; 358 359 error: 360 __skb_queue_purge(frags); 361 __skb_queue_head_init(frags); 362 return -ENOMEM; 363 } 364 365 /** 366 * tipc_msg_build - create buffer chain containing specified header and data 367 * @mhdr: Message header, to be prepended to data 368 * @m: User message 369 * @dsz: Total length of user data 370 * @pktmax: Max packet size that can be used 371 * @list: Buffer or chain of buffers to be returned to caller 372 * 373 * Note that the recursive call we are making here is safe, since it can 374 * logically go only one further level down. 375 * 376 * Returns message data size or errno: -ENOMEM, -EFAULT 377 */ 378 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, 379 int dsz, int pktmax, struct sk_buff_head *list) 380 { 381 int mhsz = msg_hdr_sz(mhdr); 382 struct tipc_msg pkthdr; 383 int msz = mhsz + dsz; 384 int pktrem = pktmax; 385 struct sk_buff *skb; 386 int drem = dsz; 387 int pktno = 1; 388 char *pktpos; 389 int pktsz; 390 int rc; 391 392 msg_set_size(mhdr, msz); 393 394 /* No fragmentation needed? */ 395 if (likely(msz <= pktmax)) { 396 skb = tipc_buf_acquire(msz, GFP_KERNEL); 397 398 /* Fall back to smaller MTU if node local message */ 399 if (unlikely(!skb)) { 400 if (pktmax != MAX_MSG_SIZE) 401 return -ENOMEM; 402 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list); 403 if (rc != dsz) 404 return rc; 405 if (tipc_msg_assemble(list)) 406 return dsz; 407 return -ENOMEM; 408 } 409 skb_orphan(skb); 410 __skb_queue_tail(list, skb); 411 skb_copy_to_linear_data(skb, mhdr, mhsz); 412 pktpos = skb->data + mhsz; 413 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) 414 return dsz; 415 rc = -EFAULT; 416 goto error; 417 } 418 419 /* Prepare reusable fragment header */ 420 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, 421 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); 422 msg_set_size(&pkthdr, pktmax); 423 msg_set_fragm_no(&pkthdr, pktno); 424 msg_set_importance(&pkthdr, msg_importance(mhdr)); 425 426 /* Prepare first fragment */ 427 skb = tipc_buf_acquire(pktmax, GFP_KERNEL); 428 if (!skb) 429 return -ENOMEM; 430 skb_orphan(skb); 431 __skb_queue_tail(list, skb); 432 pktpos = skb->data; 433 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 434 pktpos += INT_H_SIZE; 435 pktrem -= INT_H_SIZE; 436 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); 437 pktpos += mhsz; 438 pktrem -= mhsz; 439 440 do { 441 if (drem < pktrem) 442 pktrem = drem; 443 444 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { 445 rc = -EFAULT; 446 goto error; 447 } 448 drem -= pktrem; 449 450 if (!drem) 451 break; 452 453 /* Prepare new fragment: */ 454 if (drem < (pktmax - INT_H_SIZE)) 455 pktsz = drem + INT_H_SIZE; 456 else 457 pktsz = pktmax; 458 skb = tipc_buf_acquire(pktsz, GFP_KERNEL); 459 if (!skb) { 460 rc = -ENOMEM; 461 goto error; 462 } 463 skb_orphan(skb); 464 __skb_queue_tail(list, skb); 465 msg_set_type(&pkthdr, FRAGMENT); 466 msg_set_size(&pkthdr, pktsz); 467 msg_set_fragm_no(&pkthdr, ++pktno); 468 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 469 pktpos = skb->data + INT_H_SIZE; 470 pktrem = pktsz - INT_H_SIZE; 471 472 } while (1); 473 msg_set_type(buf_msg(skb), LAST_FRAGMENT); 474 return dsz; 475 error: 476 __skb_queue_purge(list); 477 __skb_queue_head_init(list); 478 return rc; 479 } 480 481 /** 482 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one 483 * @bskb: the bundle buffer to append to 484 * @msg: message to be appended 485 * @max: max allowable size for the bundle buffer 486 * 487 * Returns "true" if bundling has been performed, otherwise "false" 488 */ 489 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, 490 u32 max) 491 { 492 struct tipc_msg *bmsg = buf_msg(bskb); 493 u32 msz, bsz, offset, pad; 494 495 msz = msg_size(msg); 496 bsz = msg_size(bmsg); 497 offset = align(bsz); 498 pad = offset - bsz; 499 500 if (unlikely(skb_tailroom(bskb) < (pad + msz))) 501 return false; 502 if (unlikely(max < (offset + msz))) 503 return false; 504 505 skb_put(bskb, pad + msz); 506 skb_copy_to_linear_data_offset(bskb, offset, msg, msz); 507 msg_set_size(bmsg, offset + msz); 508 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 509 return true; 510 } 511 512 /** 513 * tipc_msg_try_bundle - Try to bundle a new message to the last one 514 * @tskb: the last/target message to which the new one will be appended 515 * @skb: the new message skb pointer 516 * @mss: max message size (header inclusive) 517 * @dnode: destination node for the message 518 * @new_bundle: if this call made a new bundle or not 519 * 520 * Return: "true" if the new message skb is potential for bundling this time or 521 * later, in the case a bundling has been done this time, the skb is consumed 522 * (the skb pointer = NULL). 523 * Otherwise, "false" if the skb cannot be bundled at all. 524 */ 525 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, 526 u32 dnode, bool *new_bundle) 527 { 528 struct tipc_msg *msg, *inner, *outer; 529 u32 tsz; 530 531 /* First, check if the new buffer is suitable for bundling */ 532 msg = buf_msg(*skb); 533 if (msg_user(msg) == MSG_FRAGMENTER) 534 return false; 535 if (msg_user(msg) == TUNNEL_PROTOCOL) 536 return false; 537 if (msg_user(msg) == BCAST_PROTOCOL) 538 return false; 539 if (mss <= INT_H_SIZE + msg_size(msg)) 540 return false; 541 542 /* Ok, but the last/target buffer can be empty? */ 543 if (unlikely(!tskb)) 544 return true; 545 546 /* Is it a bundle already? Try to bundle the new message to it */ 547 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { 548 *new_bundle = false; 549 goto bundle; 550 } 551 552 /* Make a new bundle of the two messages if possible */ 553 tsz = msg_size(buf_msg(tskb)); 554 if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg))) 555 return true; 556 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, 557 GFP_ATOMIC))) 558 return true; 559 inner = buf_msg(tskb); 560 skb_push(tskb, INT_H_SIZE); 561 outer = buf_msg(tskb); 562 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, 563 dnode); 564 msg_set_importance(outer, msg_importance(inner)); 565 msg_set_size(outer, INT_H_SIZE + tsz); 566 msg_set_msgcnt(outer, 1); 567 *new_bundle = true; 568 569 bundle: 570 if (likely(tipc_msg_bundle(tskb, msg, mss))) { 571 consume_skb(*skb); 572 *skb = NULL; 573 } 574 return true; 575 } 576 577 /** 578 * tipc_msg_extract(): extract bundled inner packet from buffer 579 * @skb: buffer to be extracted from. 580 * @iskb: extracted inner buffer, to be returned 581 * @pos: position in outer message of msg to be extracted. 582 * Returns position of next msg 583 * Consumes outer buffer when last packet extracted 584 * Returns true when when there is an extracted buffer, otherwise false 585 */ 586 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) 587 { 588 struct tipc_msg *hdr, *ihdr; 589 int imsz; 590 591 *iskb = NULL; 592 if (unlikely(skb_linearize(skb))) 593 goto none; 594 595 hdr = buf_msg(skb); 596 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) 597 goto none; 598 599 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); 600 imsz = msg_size(ihdr); 601 602 if ((*pos + imsz) > msg_data_sz(hdr)) 603 goto none; 604 605 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); 606 if (!*iskb) 607 goto none; 608 609 skb_copy_to_linear_data(*iskb, ihdr, imsz); 610 if (unlikely(!tipc_msg_validate(iskb))) 611 goto none; 612 613 *pos += align(imsz); 614 return true; 615 none: 616 kfree_skb(skb); 617 kfree_skb(*iskb); 618 *iskb = NULL; 619 return false; 620 } 621 622 /** 623 * tipc_msg_reverse(): swap source and destination addresses and add error code 624 * @own_node: originating node id for reversed message 625 * @skb: buffer containing message to be reversed; will be consumed 626 * @err: error code to be set in message, if any 627 * Replaces consumed buffer with new one when successful 628 * Returns true if success, otherwise false 629 */ 630 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) 631 { 632 struct sk_buff *_skb = *skb; 633 struct tipc_msg *_hdr, *hdr; 634 int hlen, dlen; 635 636 if (skb_linearize(_skb)) 637 goto exit; 638 _hdr = buf_msg(_skb); 639 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); 640 hlen = msg_hdr_sz(_hdr); 641 642 if (msg_dest_droppable(_hdr)) 643 goto exit; 644 if (msg_errcode(_hdr)) 645 goto exit; 646 647 /* Never return SHORT header */ 648 if (hlen == SHORT_H_SIZE) 649 hlen = BASIC_H_SIZE; 650 651 /* Don't return data along with SYN+, - sender has a clone */ 652 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) 653 dlen = 0; 654 655 /* Allocate new buffer to return */ 656 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); 657 if (!*skb) 658 goto exit; 659 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); 660 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); 661 662 /* Build reverse header in new buffer */ 663 hdr = buf_msg(*skb); 664 msg_set_hdr_sz(hdr, hlen); 665 msg_set_errcode(hdr, err); 666 msg_set_non_seq(hdr, 0); 667 msg_set_origport(hdr, msg_destport(_hdr)); 668 msg_set_destport(hdr, msg_origport(_hdr)); 669 msg_set_destnode(hdr, msg_prevnode(_hdr)); 670 msg_set_prevnode(hdr, own_node); 671 msg_set_orignode(hdr, own_node); 672 msg_set_size(hdr, hlen + dlen); 673 skb_orphan(_skb); 674 kfree_skb(_skb); 675 return true; 676 exit: 677 kfree_skb(_skb); 678 *skb = NULL; 679 return false; 680 } 681 682 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) 683 { 684 struct sk_buff *skb, *_skb; 685 686 skb_queue_walk(msg, skb) { 687 _skb = skb_clone(skb, GFP_ATOMIC); 688 if (!_skb) { 689 __skb_queue_purge(cpy); 690 pr_err_ratelimited("Failed to clone buffer chain\n"); 691 return false; 692 } 693 __skb_queue_tail(cpy, _skb); 694 } 695 return true; 696 } 697 698 /** 699 * tipc_msg_lookup_dest(): try to find new destination for named message 700 * @skb: the buffer containing the message. 701 * @err: error code to be used by caller if lookup fails 702 * Does not consume buffer 703 * Returns true if a destination is found, false otherwise 704 */ 705 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) 706 { 707 struct tipc_msg *msg = buf_msg(skb); 708 u32 dport, dnode; 709 u32 onode = tipc_own_addr(net); 710 711 if (!msg_isdata(msg)) 712 return false; 713 if (!msg_named(msg)) 714 return false; 715 if (msg_errcode(msg)) 716 return false; 717 *err = TIPC_ERR_NO_NAME; 718 if (skb_linearize(skb)) 719 return false; 720 msg = buf_msg(skb); 721 if (msg_reroute_cnt(msg)) 722 return false; 723 dnode = tipc_scope2node(net, msg_lookup_scope(msg)); 724 dport = tipc_nametbl_translate(net, msg_nametype(msg), 725 msg_nameinst(msg), &dnode); 726 if (!dport) 727 return false; 728 msg_incr_reroute_cnt(msg); 729 if (dnode != onode) 730 msg_set_prevnode(msg, onode); 731 msg_set_destnode(msg, dnode); 732 msg_set_destport(msg, dport); 733 *err = TIPC_OK; 734 735 return true; 736 } 737 738 /* tipc_msg_assemble() - assemble chain of fragments into one message 739 */ 740 bool tipc_msg_assemble(struct sk_buff_head *list) 741 { 742 struct sk_buff *skb, *tmp = NULL; 743 744 if (skb_queue_len(list) == 1) 745 return true; 746 747 while ((skb = __skb_dequeue(list))) { 748 skb->next = NULL; 749 if (tipc_buf_append(&tmp, &skb)) { 750 __skb_queue_tail(list, skb); 751 return true; 752 } 753 if (!tmp) 754 break; 755 } 756 __skb_queue_purge(list); 757 __skb_queue_head_init(list); 758 pr_warn("Failed do assemble buffer\n"); 759 return false; 760 } 761 762 /* tipc_msg_reassemble() - clone a buffer chain of fragments and 763 * reassemble the clones into one message 764 */ 765 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) 766 { 767 struct sk_buff *skb, *_skb; 768 struct sk_buff *frag = NULL; 769 struct sk_buff *head = NULL; 770 int hdr_len; 771 772 /* Copy header if single buffer */ 773 if (skb_queue_len(list) == 1) { 774 skb = skb_peek(list); 775 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); 776 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); 777 if (!_skb) 778 return false; 779 __skb_queue_tail(rcvq, _skb); 780 return true; 781 } 782 783 /* Clone all fragments and reassemble */ 784 skb_queue_walk(list, skb) { 785 frag = skb_clone(skb, GFP_ATOMIC); 786 if (!frag) 787 goto error; 788 frag->next = NULL; 789 if (tipc_buf_append(&head, &frag)) 790 break; 791 if (!head) 792 goto error; 793 } 794 __skb_queue_tail(rcvq, frag); 795 return true; 796 error: 797 pr_warn("Failed do clone local mcast rcv buffer\n"); 798 kfree_skb(head); 799 return false; 800 } 801 802 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, 803 struct sk_buff_head *cpy) 804 { 805 struct sk_buff *skb, *_skb; 806 807 skb_queue_walk(msg, skb) { 808 _skb = pskb_copy(skb, GFP_ATOMIC); 809 if (!_skb) { 810 __skb_queue_purge(cpy); 811 return false; 812 } 813 msg_set_destnode(buf_msg(_skb), dst); 814 __skb_queue_tail(cpy, _skb); 815 } 816 return true; 817 } 818 819 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number 820 * @list: list to be appended to 821 * @seqno: sequence number of buffer to add 822 * @skb: buffer to add 823 */ 824 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, 825 struct sk_buff *skb) 826 { 827 struct sk_buff *_skb, *tmp; 828 829 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { 830 __skb_queue_head(list, skb); 831 return true; 832 } 833 834 if (more(seqno, buf_seqno(skb_peek_tail(list)))) { 835 __skb_queue_tail(list, skb); 836 return true; 837 } 838 839 skb_queue_walk_safe(list, _skb, tmp) { 840 if (more(seqno, buf_seqno(_skb))) 841 continue; 842 if (seqno == buf_seqno(_skb)) 843 break; 844 __skb_queue_before(list, _skb, skb); 845 return true; 846 } 847 kfree_skb(skb); 848 return false; 849 } 850 851 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, 852 struct sk_buff_head *xmitq) 853 { 854 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 855 __skb_queue_tail(xmitq, skb); 856 } 857