1 /* 2 * net/tipc/msg.c: TIPC message header routines 3 * 4 * Copyright (c) 2000-2006, 2014, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "msg.h" 39 #include "addr.h" 40 #include "name_table.h" 41 42 #define MAX_FORWARD_SIZE 1024 43 44 static unsigned int align(unsigned int i) 45 { 46 return (i + 3) & ~3u; 47 } 48 49 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, 50 u32 destnode) 51 { 52 memset(m, 0, hsize); 53 msg_set_version(m); 54 msg_set_user(m, user); 55 msg_set_hdr_sz(m, hsize); 56 msg_set_size(m, hsize); 57 msg_set_prevnode(m, tipc_own_addr); 58 msg_set_type(m, type); 59 if (hsize > SHORT_H_SIZE) { 60 msg_set_orignode(m, tipc_own_addr); 61 msg_set_destnode(m, destnode); 62 } 63 } 64 65 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, 66 uint data_sz, u32 dnode, u32 onode, 67 u32 dport, u32 oport, int errcode) 68 { 69 struct tipc_msg *msg; 70 struct sk_buff *buf; 71 72 buf = tipc_buf_acquire(hdr_sz + data_sz); 73 if (unlikely(!buf)) 74 return NULL; 75 76 msg = buf_msg(buf); 77 tipc_msg_init(msg, user, type, hdr_sz, dnode); 78 msg_set_size(msg, hdr_sz + data_sz); 79 msg_set_prevnode(msg, onode); 80 msg_set_origport(msg, oport); 81 msg_set_destport(msg, dport); 82 msg_set_errcode(msg, errcode); 83 if (hdr_sz > SHORT_H_SIZE) { 84 msg_set_orignode(msg, onode); 85 msg_set_destnode(msg, dnode); 86 } 87 return buf; 88 } 89 90 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer 91 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call 92 * out: set when successful non-complete reassembly, otherwise NULL 93 * @*buf: in: the buffer to append. Always defined 94 * out: head buf after successful complete reassembly, otherwise NULL 95 * Returns 1 when reassembly complete, otherwise 0 96 */ 97 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 98 { 99 struct sk_buff *head = *headbuf; 100 struct sk_buff *frag = *buf; 101 struct sk_buff *tail; 102 struct tipc_msg *msg; 103 u32 fragid; 104 int delta; 105 bool headstolen; 106 107 if (!frag) 108 goto err; 109 110 msg = buf_msg(frag); 111 fragid = msg_type(msg); 112 frag->next = NULL; 113 skb_pull(frag, msg_hdr_sz(msg)); 114 115 if (fragid == FIRST_FRAGMENT) { 116 if (unlikely(head)) 117 goto err; 118 if (unlikely(skb_unclone(frag, GFP_ATOMIC))) 119 goto err; 120 head = *headbuf = frag; 121 skb_frag_list_init(head); 122 TIPC_SKB_CB(head)->tail = NULL; 123 *buf = NULL; 124 return 0; 125 } 126 127 if (!head) 128 goto err; 129 130 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { 131 kfree_skb_partial(frag, headstolen); 132 } else { 133 tail = TIPC_SKB_CB(head)->tail; 134 if (!skb_has_frag_list(head)) 135 skb_shinfo(head)->frag_list = frag; 136 else 137 tail->next = frag; 138 head->truesize += frag->truesize; 139 head->data_len += frag->len; 140 head->len += frag->len; 141 TIPC_SKB_CB(head)->tail = frag; 142 } 143 144 if (fragid == LAST_FRAGMENT) { 145 *buf = head; 146 TIPC_SKB_CB(head)->tail = NULL; 147 *headbuf = NULL; 148 return 1; 149 } 150 *buf = NULL; 151 return 0; 152 153 err: 154 pr_warn_ratelimited("Unable to build fragment list\n"); 155 kfree_skb(*buf); 156 kfree_skb(*headbuf); 157 *buf = *headbuf = NULL; 158 return 0; 159 } 160 161 162 /** 163 * tipc_msg_build - create buffer chain containing specified header and data 164 * @mhdr: Message header, to be prepended to data 165 * @m: User message 166 * @offset: Posision in iov to start copying from 167 * @dsz: Total length of user data 168 * @pktmax: Max packet size that can be used 169 * @list: Buffer or chain of buffers to be returned to caller 170 * 171 * Returns message data size or errno: -ENOMEM, -EFAULT 172 */ 173 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, 174 int dsz, int pktmax, struct sk_buff_head *list) 175 { 176 int mhsz = msg_hdr_sz(mhdr); 177 int msz = mhsz + dsz; 178 int pktno = 1; 179 int pktsz; 180 int pktrem = pktmax; 181 int drem = dsz; 182 struct tipc_msg pkthdr; 183 struct sk_buff *skb; 184 char *pktpos; 185 int rc; 186 187 msg_set_size(mhdr, msz); 188 189 /* No fragmentation needed? */ 190 if (likely(msz <= pktmax)) { 191 skb = tipc_buf_acquire(msz); 192 if (unlikely(!skb)) 193 return -ENOMEM; 194 __skb_queue_tail(list, skb); 195 skb_copy_to_linear_data(skb, mhdr, mhsz); 196 pktpos = skb->data + mhsz; 197 if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, 198 dsz)) 199 return dsz; 200 rc = -EFAULT; 201 goto error; 202 } 203 204 /* Prepare reusable fragment header */ 205 tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 206 INT_H_SIZE, msg_destnode(mhdr)); 207 msg_set_size(&pkthdr, pktmax); 208 msg_set_fragm_no(&pkthdr, pktno); 209 210 /* Prepare first fragment */ 211 skb = tipc_buf_acquire(pktmax); 212 if (!skb) 213 return -ENOMEM; 214 __skb_queue_tail(list, skb); 215 pktpos = skb->data; 216 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 217 pktpos += INT_H_SIZE; 218 pktrem -= INT_H_SIZE; 219 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); 220 pktpos += mhsz; 221 pktrem -= mhsz; 222 223 do { 224 if (drem < pktrem) 225 pktrem = drem; 226 227 if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) { 228 rc = -EFAULT; 229 goto error; 230 } 231 drem -= pktrem; 232 offset += pktrem; 233 234 if (!drem) 235 break; 236 237 /* Prepare new fragment: */ 238 if (drem < (pktmax - INT_H_SIZE)) 239 pktsz = drem + INT_H_SIZE; 240 else 241 pktsz = pktmax; 242 skb = tipc_buf_acquire(pktsz); 243 if (!skb) { 244 rc = -ENOMEM; 245 goto error; 246 } 247 __skb_queue_tail(list, skb); 248 msg_set_type(&pkthdr, FRAGMENT); 249 msg_set_size(&pkthdr, pktsz); 250 msg_set_fragm_no(&pkthdr, ++pktno); 251 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 252 pktpos = skb->data + INT_H_SIZE; 253 pktrem = pktsz - INT_H_SIZE; 254 255 } while (1); 256 msg_set_type(buf_msg(skb), LAST_FRAGMENT); 257 return dsz; 258 error: 259 __skb_queue_purge(list); 260 __skb_queue_head_init(list); 261 return rc; 262 } 263 264 /** 265 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one 266 * @list: the buffer chain of the existing buffer ("bundle") 267 * @skb: buffer to be appended 268 * @mtu: max allowable size for the bundle buffer 269 * Consumes buffer if successful 270 * Returns true if bundling could be performed, otherwise false 271 */ 272 bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) 273 { 274 struct sk_buff *bskb = skb_peek_tail(list); 275 struct tipc_msg *bmsg = buf_msg(bskb); 276 struct tipc_msg *msg = buf_msg(skb); 277 unsigned int bsz = msg_size(bmsg); 278 unsigned int msz = msg_size(msg); 279 u32 start = align(bsz); 280 u32 max = mtu - INT_H_SIZE; 281 u32 pad = start - bsz; 282 283 if (likely(msg_user(msg) == MSG_FRAGMENTER)) 284 return false; 285 if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) 286 return false; 287 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) 288 return false; 289 if (likely(msg_user(bmsg) != MSG_BUNDLER)) 290 return false; 291 if (likely(!TIPC_SKB_CB(bskb)->bundling)) 292 return false; 293 if (unlikely(skb_tailroom(bskb) < (pad + msz))) 294 return false; 295 if (unlikely(max < (start + msz))) 296 return false; 297 298 skb_put(bskb, pad + msz); 299 skb_copy_to_linear_data_offset(bskb, start, skb->data, msz); 300 msg_set_size(bmsg, start + msz); 301 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 302 kfree_skb(skb); 303 return true; 304 } 305 306 /** 307 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 308 * @list: the buffer chain 309 * @skb: buffer to be appended and replaced 310 * @mtu: max allowable size for the bundle buffer, inclusive header 311 * @dnode: destination node for message. (Not always present in header) 312 * Replaces buffer if successful 313 * Returns true if success, otherwise false 314 */ 315 bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, 316 u32 mtu, u32 dnode) 317 { 318 struct sk_buff *bskb; 319 struct tipc_msg *bmsg; 320 struct tipc_msg *msg = buf_msg(skb); 321 u32 msz = msg_size(msg); 322 u32 max = mtu - INT_H_SIZE; 323 324 if (msg_user(msg) == MSG_FRAGMENTER) 325 return false; 326 if (msg_user(msg) == CHANGEOVER_PROTOCOL) 327 return false; 328 if (msg_user(msg) == BCAST_PROTOCOL) 329 return false; 330 if (msz > (max / 2)) 331 return false; 332 333 bskb = tipc_buf_acquire(max); 334 if (!bskb) 335 return false; 336 337 skb_trim(bskb, INT_H_SIZE); 338 bmsg = buf_msg(bskb); 339 tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode); 340 msg_set_seqno(bmsg, msg_seqno(msg)); 341 msg_set_ack(bmsg, msg_ack(msg)); 342 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 343 TIPC_SKB_CB(bskb)->bundling = true; 344 __skb_queue_tail(list, bskb); 345 return tipc_msg_bundle(list, skb, mtu); 346 } 347 348 /** 349 * tipc_msg_reverse(): swap source and destination addresses and add error code 350 * @buf: buffer containing message to be reversed 351 * @dnode: return value: node where to send message after reversal 352 * @err: error code to be set in message 353 * Consumes buffer if failure 354 * Returns true if success, otherwise false 355 */ 356 bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) 357 { 358 struct tipc_msg *msg = buf_msg(buf); 359 uint imp = msg_importance(msg); 360 struct tipc_msg ohdr; 361 uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); 362 363 if (skb_linearize(buf)) 364 goto exit; 365 if (msg_dest_droppable(msg)) 366 goto exit; 367 if (msg_errcode(msg)) 368 goto exit; 369 370 memcpy(&ohdr, msg, msg_hdr_sz(msg)); 371 imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); 372 if (msg_isdata(msg)) 373 msg_set_importance(msg, imp); 374 msg_set_errcode(msg, err); 375 msg_set_origport(msg, msg_destport(&ohdr)); 376 msg_set_destport(msg, msg_origport(&ohdr)); 377 msg_set_prevnode(msg, tipc_own_addr); 378 if (!msg_short(msg)) { 379 msg_set_orignode(msg, msg_destnode(&ohdr)); 380 msg_set_destnode(msg, msg_orignode(&ohdr)); 381 } 382 msg_set_size(msg, msg_hdr_sz(msg) + rdsz); 383 skb_trim(buf, msg_size(msg)); 384 skb_orphan(buf); 385 *dnode = msg_orignode(&ohdr); 386 return true; 387 exit: 388 kfree_skb(buf); 389 return false; 390 } 391 392 /** 393 * tipc_msg_eval: determine fate of message that found no destination 394 * @buf: the buffer containing the message. 395 * @dnode: return value: next-hop node, if message to be forwarded 396 * @err: error code to use, if message to be rejected 397 * 398 * Does not consume buffer 399 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error 400 * code if message to be rejected 401 */ 402 int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) 403 { 404 struct tipc_msg *msg = buf_msg(buf); 405 u32 dport; 406 407 if (msg_type(msg) != TIPC_NAMED_MSG) 408 return -TIPC_ERR_NO_PORT; 409 if (skb_linearize(buf)) 410 return -TIPC_ERR_NO_NAME; 411 if (msg_data_sz(msg) > MAX_FORWARD_SIZE) 412 return -TIPC_ERR_NO_NAME; 413 if (msg_reroute_cnt(msg) > 0) 414 return -TIPC_ERR_NO_NAME; 415 416 *dnode = addr_domain(msg_lookup_scope(msg)); 417 dport = tipc_nametbl_translate(msg_nametype(msg), 418 msg_nameinst(msg), 419 dnode); 420 if (!dport) 421 return -TIPC_ERR_NO_NAME; 422 msg_incr_reroute_cnt(msg); 423 msg_set_destnode(msg, *dnode); 424 msg_set_destport(msg, dport); 425 return TIPC_OK; 426 } 427 428 /* tipc_msg_reassemble() - clone a buffer chain of fragments and 429 * reassemble the clones into one message 430 */ 431 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list) 432 { 433 struct sk_buff *skb; 434 struct sk_buff *frag = NULL; 435 struct sk_buff *head = NULL; 436 int hdr_sz; 437 438 /* Copy header if single buffer */ 439 if (skb_queue_len(list) == 1) { 440 skb = skb_peek(list); 441 hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); 442 return __pskb_copy(skb, hdr_sz, GFP_ATOMIC); 443 } 444 445 /* Clone all fragments and reassemble */ 446 skb_queue_walk(list, skb) { 447 frag = skb_clone(skb, GFP_ATOMIC); 448 if (!frag) 449 goto error; 450 frag->next = NULL; 451 if (tipc_buf_append(&head, &frag)) 452 break; 453 if (!head) 454 goto error; 455 } 456 return frag; 457 error: 458 pr_warn("Failed do clone local mcast rcv buffer\n"); 459 kfree_skb(head); 460 return NULL; 461 } 462