1 /* 2 * net/tipc/msg.c: TIPC message header routines 3 * 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <net/sock.h> 38 #include "core.h" 39 #include "msg.h" 40 #include "addr.h" 41 #include "name_table.h" 42 43 #define MAX_FORWARD_SIZE 1024 44 45 static unsigned int align(unsigned int i) 46 { 47 return (i + 3) & ~3u; 48 } 49 50 /** 51 * tipc_buf_acquire - creates a TIPC message buffer 52 * @size: message size (including TIPC header) 53 * 54 * Returns a new buffer with data pointers set to the specified size. 55 * 56 * NOTE: Headroom is reserved to allow prepending of a data link header. 57 * There may also be unrequested tailroom present at the buffer's end. 58 */ 59 struct sk_buff *tipc_buf_acquire(u32 size) 60 { 61 struct sk_buff *skb; 62 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 63 64 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); 65 if (skb) { 66 skb_reserve(skb, BUF_HEADROOM); 67 skb_put(skb, size); 68 skb->next = NULL; 69 } 70 return skb; 71 } 72 73 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, 74 u32 hsize, u32 dnode) 75 { 76 memset(m, 0, hsize); 77 msg_set_version(m); 78 msg_set_user(m, user); 79 msg_set_hdr_sz(m, hsize); 80 msg_set_size(m, hsize); 81 msg_set_prevnode(m, own_node); 82 msg_set_type(m, type); 83 if (hsize > SHORT_H_SIZE) { 84 msg_set_orignode(m, own_node); 85 msg_set_destnode(m, dnode); 86 } 87 } 88 89 struct sk_buff *tipc_msg_create(uint user, uint type, 90 uint hdr_sz, uint data_sz, u32 dnode, 91 u32 onode, u32 dport, u32 oport, int errcode) 92 { 93 struct tipc_msg *msg; 94 struct sk_buff *buf; 95 96 buf = tipc_buf_acquire(hdr_sz + data_sz); 97 if (unlikely(!buf)) 98 return NULL; 99 100 msg = buf_msg(buf); 101 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); 102 msg_set_size(msg, hdr_sz + data_sz); 103 msg_set_origport(msg, oport); 104 msg_set_destport(msg, dport); 105 msg_set_errcode(msg, errcode); 106 if (hdr_sz > SHORT_H_SIZE) { 107 msg_set_orignode(msg, onode); 108 msg_set_destnode(msg, dnode); 109 } 110 return buf; 111 } 112 113 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer 114 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call 115 * out: set when successful non-complete reassembly, otherwise NULL 116 * @*buf: in: the buffer to append. Always defined 117 * out: head buf after successful complete reassembly, otherwise NULL 118 * Returns 1 when reassembly complete, otherwise 0 119 */ 120 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 121 { 122 struct sk_buff *head = *headbuf; 123 struct sk_buff *frag = *buf; 124 struct sk_buff *tail; 125 struct tipc_msg *msg; 126 u32 fragid; 127 int delta; 128 bool headstolen; 129 130 if (!frag) 131 goto err; 132 133 msg = buf_msg(frag); 134 fragid = msg_type(msg); 135 frag->next = NULL; 136 skb_pull(frag, msg_hdr_sz(msg)); 137 138 if (fragid == FIRST_FRAGMENT) { 139 if (unlikely(head)) 140 goto err; 141 if (unlikely(skb_unclone(frag, GFP_ATOMIC))) 142 goto err; 143 head = *headbuf = frag; 144 skb_frag_list_init(head); 145 TIPC_SKB_CB(head)->tail = NULL; 146 *buf = NULL; 147 return 0; 148 } 149 150 if (!head) 151 goto err; 152 153 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { 154 kfree_skb_partial(frag, headstolen); 155 } else { 156 tail = TIPC_SKB_CB(head)->tail; 157 if (!skb_has_frag_list(head)) 158 skb_shinfo(head)->frag_list = frag; 159 else 160 tail->next = frag; 161 head->truesize += frag->truesize; 162 head->data_len += frag->len; 163 head->len += frag->len; 164 TIPC_SKB_CB(head)->tail = frag; 165 } 166 167 if (fragid == LAST_FRAGMENT) { 168 TIPC_SKB_CB(head)->validated = false; 169 if (unlikely(!tipc_msg_validate(head))) 170 goto err; 171 *buf = head; 172 TIPC_SKB_CB(head)->tail = NULL; 173 *headbuf = NULL; 174 return 1; 175 } 176 *buf = NULL; 177 return 0; 178 err: 179 pr_warn_ratelimited("Unable to build fragment list\n"); 180 kfree_skb(*buf); 181 kfree_skb(*headbuf); 182 *buf = *headbuf = NULL; 183 return 0; 184 } 185 186 /* tipc_msg_validate - validate basic format of received message 187 * 188 * This routine ensures a TIPC message has an acceptable header, and at least 189 * as much data as the header indicates it should. The routine also ensures 190 * that the entire message header is stored in the main fragment of the message 191 * buffer, to simplify future access to message header fields. 192 * 193 * Note: Having extra info present in the message header or data areas is OK. 194 * TIPC will ignore the excess, under the assumption that it is optional info 195 * introduced by a later release of the protocol. 196 */ 197 bool tipc_msg_validate(struct sk_buff *skb) 198 { 199 struct tipc_msg *msg; 200 int msz, hsz; 201 202 if (unlikely(TIPC_SKB_CB(skb)->validated)) 203 return true; 204 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) 205 return false; 206 207 hsz = msg_hdr_sz(buf_msg(skb)); 208 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) 209 return false; 210 if (unlikely(!pskb_may_pull(skb, hsz))) 211 return false; 212 213 msg = buf_msg(skb); 214 if (unlikely(msg_version(msg) != TIPC_VERSION)) 215 return false; 216 217 msz = msg_size(msg); 218 if (unlikely(msz < hsz)) 219 return false; 220 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) 221 return false; 222 if (unlikely(skb->len < msz)) 223 return false; 224 225 TIPC_SKB_CB(skb)->validated = true; 226 return true; 227 } 228 229 /** 230 * tipc_msg_build - create buffer chain containing specified header and data 231 * @mhdr: Message header, to be prepended to data 232 * @m: User message 233 * @dsz: Total length of user data 234 * @pktmax: Max packet size that can be used 235 * @list: Buffer or chain of buffers to be returned to caller 236 * 237 * Returns message data size or errno: -ENOMEM, -EFAULT 238 */ 239 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, 240 int offset, int dsz, int pktmax, struct sk_buff_head *list) 241 { 242 int mhsz = msg_hdr_sz(mhdr); 243 int msz = mhsz + dsz; 244 int pktno = 1; 245 int pktsz; 246 int pktrem = pktmax; 247 int drem = dsz; 248 struct tipc_msg pkthdr; 249 struct sk_buff *skb; 250 char *pktpos; 251 int rc; 252 253 msg_set_size(mhdr, msz); 254 255 /* No fragmentation needed? */ 256 if (likely(msz <= pktmax)) { 257 skb = tipc_buf_acquire(msz); 258 if (unlikely(!skb)) 259 return -ENOMEM; 260 skb_orphan(skb); 261 __skb_queue_tail(list, skb); 262 skb_copy_to_linear_data(skb, mhdr, mhsz); 263 pktpos = skb->data + mhsz; 264 if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz) 265 return dsz; 266 rc = -EFAULT; 267 goto error; 268 } 269 270 /* Prepare reusable fragment header */ 271 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, 272 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); 273 msg_set_size(&pkthdr, pktmax); 274 msg_set_fragm_no(&pkthdr, pktno); 275 msg_set_importance(&pkthdr, msg_importance(mhdr)); 276 277 /* Prepare first fragment */ 278 skb = tipc_buf_acquire(pktmax); 279 if (!skb) 280 return -ENOMEM; 281 skb_orphan(skb); 282 __skb_queue_tail(list, skb); 283 pktpos = skb->data; 284 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 285 pktpos += INT_H_SIZE; 286 pktrem -= INT_H_SIZE; 287 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); 288 pktpos += mhsz; 289 pktrem -= mhsz; 290 291 do { 292 if (drem < pktrem) 293 pktrem = drem; 294 295 if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) { 296 rc = -EFAULT; 297 goto error; 298 } 299 drem -= pktrem; 300 301 if (!drem) 302 break; 303 304 /* Prepare new fragment: */ 305 if (drem < (pktmax - INT_H_SIZE)) 306 pktsz = drem + INT_H_SIZE; 307 else 308 pktsz = pktmax; 309 skb = tipc_buf_acquire(pktsz); 310 if (!skb) { 311 rc = -ENOMEM; 312 goto error; 313 } 314 skb_orphan(skb); 315 __skb_queue_tail(list, skb); 316 msg_set_type(&pkthdr, FRAGMENT); 317 msg_set_size(&pkthdr, pktsz); 318 msg_set_fragm_no(&pkthdr, ++pktno); 319 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 320 pktpos = skb->data + INT_H_SIZE; 321 pktrem = pktsz - INT_H_SIZE; 322 323 } while (1); 324 msg_set_type(buf_msg(skb), LAST_FRAGMENT); 325 return dsz; 326 error: 327 __skb_queue_purge(list); 328 __skb_queue_head_init(list); 329 return rc; 330 } 331 332 /** 333 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one 334 * @skb: the buffer to append to ("bundle") 335 * @msg: message to be appended 336 * @mtu: max allowable size for the bundle buffer 337 * Consumes buffer if successful 338 * Returns true if bundling could be performed, otherwise false 339 */ 340 bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu) 341 { 342 struct tipc_msg *bmsg; 343 unsigned int bsz; 344 unsigned int msz = msg_size(msg); 345 u32 start, pad; 346 u32 max = mtu - INT_H_SIZE; 347 348 if (likely(msg_user(msg) == MSG_FRAGMENTER)) 349 return false; 350 if (!skb) 351 return false; 352 bmsg = buf_msg(skb); 353 bsz = msg_size(bmsg); 354 start = align(bsz); 355 pad = start - bsz; 356 357 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) 358 return false; 359 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) 360 return false; 361 if (unlikely(msg_user(bmsg) != MSG_BUNDLER)) 362 return false; 363 if (unlikely(skb_tailroom(skb) < (pad + msz))) 364 return false; 365 if (unlikely(max < (start + msz))) 366 return false; 367 if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) && 368 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE)) 369 return false; 370 371 skb_put(skb, pad + msz); 372 skb_copy_to_linear_data_offset(skb, start, msg, msz); 373 msg_set_size(bmsg, start + msz); 374 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 375 return true; 376 } 377 378 /** 379 * tipc_msg_extract(): extract bundled inner packet from buffer 380 * @skb: buffer to be extracted from. 381 * @iskb: extracted inner buffer, to be returned 382 * @pos: position in outer message of msg to be extracted. 383 * Returns position of next msg 384 * Consumes outer buffer when last packet extracted 385 * Returns true when when there is an extracted buffer, otherwise false 386 */ 387 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) 388 { 389 struct tipc_msg *msg; 390 int imsz, offset; 391 392 *iskb = NULL; 393 if (unlikely(skb_linearize(skb))) 394 goto none; 395 396 msg = buf_msg(skb); 397 offset = msg_hdr_sz(msg) + *pos; 398 if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) 399 goto none; 400 401 *iskb = skb_clone(skb, GFP_ATOMIC); 402 if (unlikely(!*iskb)) 403 goto none; 404 skb_pull(*iskb, offset); 405 imsz = msg_size(buf_msg(*iskb)); 406 skb_trim(*iskb, imsz); 407 if (unlikely(!tipc_msg_validate(*iskb))) 408 goto none; 409 *pos += align(imsz); 410 return true; 411 none: 412 kfree_skb(skb); 413 kfree_skb(*iskb); 414 *iskb = NULL; 415 return false; 416 } 417 418 /** 419 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 420 * @list: the buffer chain, where head is the buffer to replace/append 421 * @skb: buffer to be created, appended to and returned in case of success 422 * @msg: message to be appended 423 * @mtu: max allowable size for the bundle buffer, inclusive header 424 * @dnode: destination node for message. (Not always present in header) 425 * Returns true if success, otherwise false 426 */ 427 bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, 428 u32 mtu, u32 dnode) 429 { 430 struct sk_buff *_skb; 431 struct tipc_msg *bmsg; 432 u32 msz = msg_size(msg); 433 u32 max = mtu - INT_H_SIZE; 434 435 if (msg_user(msg) == MSG_FRAGMENTER) 436 return false; 437 if (msg_user(msg) == TUNNEL_PROTOCOL) 438 return false; 439 if (msg_user(msg) == BCAST_PROTOCOL) 440 return false; 441 if (msz > (max / 2)) 442 return false; 443 444 _skb = tipc_buf_acquire(max); 445 if (!_skb) 446 return false; 447 448 skb_trim(_skb, INT_H_SIZE); 449 bmsg = buf_msg(_skb); 450 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, 451 INT_H_SIZE, dnode); 452 if (msg_isdata(msg)) 453 msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE); 454 else 455 msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE); 456 msg_set_seqno(bmsg, msg_seqno(msg)); 457 msg_set_ack(bmsg, msg_ack(msg)); 458 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 459 tipc_msg_bundle(_skb, msg, mtu); 460 *skb = _skb; 461 return true; 462 } 463 464 /** 465 * tipc_msg_reverse(): swap source and destination addresses and add error code 466 * @own_node: originating node id for reversed message 467 * @skb: buffer containing message to be reversed; may be replaced. 468 * @err: error code to be set in message, if any 469 * Consumes buffer at failure 470 * Returns true if success, otherwise false 471 */ 472 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) 473 { 474 struct sk_buff *_skb = *skb; 475 struct tipc_msg *hdr = buf_msg(_skb); 476 struct tipc_msg ohdr; 477 int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); 478 479 if (skb_linearize(_skb)) 480 goto exit; 481 hdr = buf_msg(_skb); 482 if (msg_dest_droppable(hdr)) 483 goto exit; 484 if (msg_errcode(hdr)) 485 goto exit; 486 487 /* Take a copy of original header before altering message */ 488 memcpy(&ohdr, hdr, msg_hdr_sz(hdr)); 489 490 /* Never return SHORT header; expand by replacing buffer if necessary */ 491 if (msg_short(hdr)) { 492 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); 493 if (!*skb) 494 goto exit; 495 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); 496 kfree_skb(_skb); 497 _skb = *skb; 498 hdr = buf_msg(_skb); 499 memcpy(hdr, &ohdr, BASIC_H_SIZE); 500 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 501 } 502 503 /* Now reverse the concerned fields */ 504 msg_set_errcode(hdr, err); 505 msg_set_origport(hdr, msg_destport(&ohdr)); 506 msg_set_destport(hdr, msg_origport(&ohdr)); 507 msg_set_destnode(hdr, msg_prevnode(&ohdr)); 508 msg_set_prevnode(hdr, own_node); 509 msg_set_orignode(hdr, own_node); 510 msg_set_size(hdr, msg_hdr_sz(hdr) + dlen); 511 skb_trim(_skb, msg_size(hdr)); 512 skb_orphan(_skb); 513 return true; 514 exit: 515 kfree_skb(_skb); 516 *skb = NULL; 517 return false; 518 } 519 520 /** 521 * tipc_msg_lookup_dest(): try to find new destination for named message 522 * @skb: the buffer containing the message. 523 * @err: error code to be used by caller if lookup fails 524 * Does not consume buffer 525 * Returns true if a destination is found, false otherwise 526 */ 527 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) 528 { 529 struct tipc_msg *msg = buf_msg(skb); 530 u32 dport, dnode; 531 u32 onode = tipc_own_addr(net); 532 533 if (!msg_isdata(msg)) 534 return false; 535 if (!msg_named(msg)) 536 return false; 537 if (msg_errcode(msg)) 538 return false; 539 *err = -TIPC_ERR_NO_NAME; 540 if (skb_linearize(skb)) 541 return false; 542 msg = buf_msg(skb); 543 if (msg_reroute_cnt(msg)) 544 return false; 545 dnode = addr_domain(net, msg_lookup_scope(msg)); 546 dport = tipc_nametbl_translate(net, msg_nametype(msg), 547 msg_nameinst(msg), &dnode); 548 if (!dport) 549 return false; 550 msg_incr_reroute_cnt(msg); 551 if (dnode != onode) 552 msg_set_prevnode(msg, onode); 553 msg_set_destnode(msg, dnode); 554 msg_set_destport(msg, dport); 555 *err = TIPC_OK; 556 return true; 557 } 558 559 /* tipc_msg_reassemble() - clone a buffer chain of fragments and 560 * reassemble the clones into one message 561 */ 562 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list) 563 { 564 struct sk_buff *skb; 565 struct sk_buff *frag = NULL; 566 struct sk_buff *head = NULL; 567 int hdr_sz; 568 569 /* Copy header if single buffer */ 570 if (skb_queue_len(list) == 1) { 571 skb = skb_peek(list); 572 hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); 573 return __pskb_copy(skb, hdr_sz, GFP_ATOMIC); 574 } 575 576 /* Clone all fragments and reassemble */ 577 skb_queue_walk(list, skb) { 578 frag = skb_clone(skb, GFP_ATOMIC); 579 if (!frag) 580 goto error; 581 frag->next = NULL; 582 if (tipc_buf_append(&head, &frag)) 583 break; 584 if (!head) 585 goto error; 586 } 587 return frag; 588 error: 589 pr_warn("Failed do clone local mcast rcv buffer\n"); 590 kfree_skb(head); 591 return NULL; 592 } 593