1 /* 2 * net/tipc/bcast.c: TIPC broadcast code 3 * 4 * Copyright (c) 2004-2006, Ericsson AB 5 * Copyright (c) 2004, Intel Corporation. 6 * Copyright (c) 2005, 2010-2011, Wind River Systems 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "core.h" 39 #include "link.h" 40 #include "port.h" 41 #include "bcast.h" 42 #include "name_distr.h" 43 44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 45 46 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 47 48 /** 49 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link 50 * @primary: pointer to primary bearer 51 * @secondary: pointer to secondary bearer 52 * 53 * Bearers must have same priority and same set of reachable destinations 54 * to be paired. 55 */ 56 57 struct tipc_bcbearer_pair { 58 struct tipc_bearer *primary; 59 struct tipc_bearer *secondary; 60 }; 61 62 /** 63 * struct tipc_bcbearer - bearer used by broadcast link 64 * @bearer: (non-standard) broadcast bearer structure 65 * @media: (non-standard) broadcast media structure 66 * @bpairs: array of bearer pairs 67 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() 68 * @remains: temporary node map used by tipc_bcbearer_send() 69 * @remains_new: temporary node map used tipc_bcbearer_send() 70 * 71 * Note: The fields labelled "temporary" are incorporated into the bearer 72 * to avoid consuming potentially limited stack space through the use of 73 * large local variables within multicast routines. Concurrent access is 74 * prevented through use of the spinlock "bc_lock". 75 */ 76 struct tipc_bcbearer { 77 struct tipc_bearer bearer; 78 struct tipc_media media; 79 struct tipc_bcbearer_pair bpairs[MAX_BEARERS]; 80 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 81 struct tipc_node_map remains; 82 struct tipc_node_map remains_new; 83 }; 84 85 /** 86 * struct tipc_bclink - link used for broadcast messages 87 * @link: (non-standard) broadcast link structure 88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @bcast_nodes: map of broadcast-capable nodes 90 * @retransmit_to: node that most recently requested a retransmit 91 * 92 * Handles sequence numbering, fragmentation, bundling, etc. 93 */ 94 struct tipc_bclink { 95 struct tipc_link link; 96 struct tipc_node node; 97 struct tipc_node_map bcast_nodes; 98 struct tipc_node *retransmit_to; 99 }; 100 101 static struct tipc_bcbearer bcast_bearer; 102 static struct tipc_bclink bcast_link; 103 104 static struct tipc_bcbearer *bcbearer = &bcast_bearer; 105 static struct tipc_bclink *bclink = &bcast_link; 106 static struct tipc_link *bcl = &bcast_link.link; 107 108 static DEFINE_SPINLOCK(bc_lock); 109 110 const char tipc_bclink_name[] = "broadcast-link"; 111 112 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 113 struct tipc_node_map *nm_b, 114 struct tipc_node_map *nm_diff); 115 116 static u32 bcbuf_acks(struct sk_buff *buf) 117 { 118 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; 119 } 120 121 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks) 122 { 123 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks; 124 } 125 126 static void bcbuf_decr_acks(struct sk_buff *buf) 127 { 128 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); 129 } 130 131 void tipc_bclink_add_node(u32 addr) 132 { 133 spin_lock_bh(&bc_lock); 134 tipc_nmap_add(&bclink->bcast_nodes, addr); 135 spin_unlock_bh(&bc_lock); 136 } 137 138 void tipc_bclink_remove_node(u32 addr) 139 { 140 spin_lock_bh(&bc_lock); 141 tipc_nmap_remove(&bclink->bcast_nodes, addr); 142 spin_unlock_bh(&bc_lock); 143 } 144 145 static void bclink_set_last_sent(void) 146 { 147 if (bcl->next_out) 148 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); 149 else 150 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); 151 } 152 153 u32 tipc_bclink_get_last_sent(void) 154 { 155 return bcl->fsm_msg_cnt; 156 } 157 158 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) 159 { 160 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ? 161 seqno : node->bclink.last_sent; 162 } 163 164 165 /** 166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 167 * 168 * Called with bc_lock locked 169 */ 170 struct tipc_node *tipc_bclink_retransmit_to(void) 171 { 172 return bclink->retransmit_to; 173 } 174 175 /** 176 * bclink_retransmit_pkt - retransmit broadcast packets 177 * @after: sequence number of last packet to *not* retransmit 178 * @to: sequence number of last packet to retransmit 179 * 180 * Called with bc_lock locked 181 */ 182 static void bclink_retransmit_pkt(u32 after, u32 to) 183 { 184 struct sk_buff *buf; 185 186 buf = bcl->first_out; 187 while (buf && less_eq(buf_seqno(buf), after)) 188 buf = buf->next; 189 tipc_link_retransmit(bcl, buf, mod(to - after)); 190 } 191 192 /** 193 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 194 * @n_ptr: node that sent acknowledgement info 195 * @acked: broadcast sequence # that has been acknowledged 196 * 197 * Node is locked, bc_lock unlocked. 198 */ 199 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 200 { 201 struct sk_buff *crs; 202 struct sk_buff *next; 203 unsigned int released = 0; 204 205 spin_lock_bh(&bc_lock); 206 207 /* Bail out if tx queue is empty (no clean up is required) */ 208 crs = bcl->first_out; 209 if (!crs) 210 goto exit; 211 212 /* Determine which messages need to be acknowledged */ 213 if (acked == INVALID_LINK_SEQ) { 214 /* 215 * Contact with specified node has been lost, so need to 216 * acknowledge sent messages only (if other nodes still exist) 217 * or both sent and unsent messages (otherwise) 218 */ 219 if (bclink->bcast_nodes.count) 220 acked = bcl->fsm_msg_cnt; 221 else 222 acked = bcl->next_out_no; 223 } else { 224 /* 225 * Bail out if specified sequence number does not correspond 226 * to a message that has been sent and not yet acknowledged 227 */ 228 if (less(acked, buf_seqno(crs)) || 229 less(bcl->fsm_msg_cnt, acked) || 230 less_eq(acked, n_ptr->bclink.acked)) 231 goto exit; 232 } 233 234 /* Skip over packets that node has previously acknowledged */ 235 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) 236 crs = crs->next; 237 238 /* Update packets that node is now acknowledging */ 239 240 while (crs && less_eq(buf_seqno(crs), acked)) { 241 next = crs->next; 242 243 if (crs != bcl->next_out) 244 bcbuf_decr_acks(crs); 245 else { 246 bcbuf_set_acks(crs, 0); 247 bcl->next_out = next; 248 bclink_set_last_sent(); 249 } 250 251 if (bcbuf_acks(crs) == 0) { 252 bcl->first_out = next; 253 bcl->out_queue_size--; 254 kfree_skb(crs); 255 released = 1; 256 } 257 crs = next; 258 } 259 n_ptr->bclink.acked = acked; 260 261 /* Try resolving broadcast link congestion, if necessary */ 262 263 if (unlikely(bcl->next_out)) { 264 tipc_link_push_queue(bcl); 265 bclink_set_last_sent(); 266 } 267 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 268 tipc_link_wakeup_ports(bcl, 0); 269 exit: 270 spin_unlock_bh(&bc_lock); 271 } 272 273 /** 274 * tipc_bclink_update_link_state - update broadcast link state 275 * 276 * tipc_net_lock and node lock set 277 */ 278 void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 279 { 280 struct sk_buff *buf; 281 282 /* Ignore "stale" link state info */ 283 284 if (less_eq(last_sent, n_ptr->bclink.last_in)) 285 return; 286 287 /* Update link synchronization state; quit if in sync */ 288 289 bclink_update_last_sent(n_ptr, last_sent); 290 291 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) 292 return; 293 294 /* Update out-of-sync state; quit if loss is still unconfirmed */ 295 296 if ((++n_ptr->bclink.oos_state) == 1) { 297 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) 298 return; 299 n_ptr->bclink.oos_state++; 300 } 301 302 /* Don't NACK if one has been recently sent (or seen) */ 303 304 if (n_ptr->bclink.oos_state & 0x1) 305 return; 306 307 /* Send NACK */ 308 309 buf = tipc_buf_acquire(INT_H_SIZE); 310 if (buf) { 311 struct tipc_msg *msg = buf_msg(buf); 312 313 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 314 INT_H_SIZE, n_ptr->addr); 315 msg_set_non_seq(msg, 1); 316 msg_set_mc_netid(msg, tipc_net_id); 317 msg_set_bcast_ack(msg, n_ptr->bclink.last_in); 318 msg_set_bcgap_after(msg, n_ptr->bclink.last_in); 319 msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head 320 ? buf_seqno(n_ptr->bclink.deferred_head) - 1 321 : n_ptr->bclink.last_sent); 322 323 spin_lock_bh(&bc_lock); 324 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 325 bcl->stats.sent_nacks++; 326 spin_unlock_bh(&bc_lock); 327 kfree_skb(buf); 328 329 n_ptr->bclink.oos_state++; 330 } 331 } 332 333 /** 334 * bclink_peek_nack - monitor retransmission requests sent by other nodes 335 * 336 * Delay any upcoming NACK by this node if another node has already 337 * requested the first message this node is going to ask for. 338 * 339 * Only tipc_net_lock set. 340 */ 341 static void bclink_peek_nack(struct tipc_msg *msg) 342 { 343 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); 344 345 if (unlikely(!n_ptr)) 346 return; 347 348 tipc_node_lock(n_ptr); 349 350 if (n_ptr->bclink.recv_permitted && 351 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && 352 (n_ptr->bclink.last_in == msg_bcgap_after(msg))) 353 n_ptr->bclink.oos_state = 2; 354 355 tipc_node_unlock(n_ptr); 356 } 357 358 /* 359 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 360 */ 361 int tipc_bclink_send_msg(struct sk_buff *buf) 362 { 363 int res; 364 365 spin_lock_bh(&bc_lock); 366 367 if (!bclink->bcast_nodes.count) { 368 res = msg_data_sz(buf_msg(buf)); 369 kfree_skb(buf); 370 goto exit; 371 } 372 373 res = tipc_link_send_buf(bcl, buf); 374 if (likely(res >= 0)) { 375 bclink_set_last_sent(); 376 bcl->stats.queue_sz_counts++; 377 bcl->stats.accu_queue_sz += bcl->out_queue_size; 378 } 379 exit: 380 spin_unlock_bh(&bc_lock); 381 return res; 382 } 383 384 /** 385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 386 * 387 * Called with both sending node's lock and bc_lock taken. 388 */ 389 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 390 { 391 bclink_update_last_sent(node, seqno); 392 node->bclink.last_in = seqno; 393 node->bclink.oos_state = 0; 394 bcl->stats.recv_info++; 395 396 /* 397 * Unicast an ACK periodically, ensuring that 398 * all nodes in the cluster don't ACK at the same time 399 */ 400 401 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { 402 tipc_link_send_proto_msg( 403 node->active_links[node->addr & 1], 404 STATE_MSG, 0, 0, 0, 0, 0); 405 bcl->stats.sent_acks++; 406 } 407 } 408 409 /** 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 411 * 412 * tipc_net_lock is read_locked, no other locks set 413 */ 414 void tipc_bclink_recv_pkt(struct sk_buff *buf) 415 { 416 struct tipc_msg *msg = buf_msg(buf); 417 struct tipc_node *node; 418 u32 next_in; 419 u32 seqno; 420 int deferred; 421 422 /* Screen out unwanted broadcast messages */ 423 424 if (msg_mc_netid(msg) != tipc_net_id) 425 goto exit; 426 427 node = tipc_node_find(msg_prevnode(msg)); 428 if (unlikely(!node)) 429 goto exit; 430 431 tipc_node_lock(node); 432 if (unlikely(!node->bclink.recv_permitted)) 433 goto unlock; 434 435 /* Handle broadcast protocol message */ 436 437 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 438 if (msg_type(msg) != STATE_MSG) 439 goto unlock; 440 if (msg_destnode(msg) == tipc_own_addr) { 441 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 442 tipc_node_unlock(node); 443 spin_lock_bh(&bc_lock); 444 bcl->stats.recv_nacks++; 445 bclink->retransmit_to = node; 446 bclink_retransmit_pkt(msg_bcgap_after(msg), 447 msg_bcgap_to(msg)); 448 spin_unlock_bh(&bc_lock); 449 } else { 450 tipc_node_unlock(node); 451 bclink_peek_nack(msg); 452 } 453 goto exit; 454 } 455 456 /* Handle in-sequence broadcast message */ 457 458 seqno = msg_seqno(msg); 459 next_in = mod(node->bclink.last_in + 1); 460 461 if (likely(seqno == next_in)) { 462 receive: 463 /* Deliver message to destination */ 464 465 if (likely(msg_isdata(msg))) { 466 spin_lock_bh(&bc_lock); 467 bclink_accept_pkt(node, seqno); 468 spin_unlock_bh(&bc_lock); 469 tipc_node_unlock(node); 470 if (likely(msg_mcast(msg))) 471 tipc_port_recv_mcast(buf, NULL); 472 else 473 kfree_skb(buf); 474 } else if (msg_user(msg) == MSG_BUNDLER) { 475 spin_lock_bh(&bc_lock); 476 bclink_accept_pkt(node, seqno); 477 bcl->stats.recv_bundles++; 478 bcl->stats.recv_bundled += msg_msgcnt(msg); 479 spin_unlock_bh(&bc_lock); 480 tipc_node_unlock(node); 481 tipc_link_recv_bundle(buf); 482 } else if (msg_user(msg) == MSG_FRAGMENTER) { 483 int ret = tipc_link_recv_fragment(&node->bclink.defragm, 484 &buf, &msg); 485 if (ret < 0) 486 goto unlock; 487 spin_lock_bh(&bc_lock); 488 bclink_accept_pkt(node, seqno); 489 bcl->stats.recv_fragments++; 490 if (ret > 0) 491 bcl->stats.recv_fragmented++; 492 spin_unlock_bh(&bc_lock); 493 tipc_node_unlock(node); 494 tipc_net_route_msg(buf); 495 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 496 spin_lock_bh(&bc_lock); 497 bclink_accept_pkt(node, seqno); 498 spin_unlock_bh(&bc_lock); 499 tipc_node_unlock(node); 500 tipc_named_recv(buf); 501 } else { 502 spin_lock_bh(&bc_lock); 503 bclink_accept_pkt(node, seqno); 504 spin_unlock_bh(&bc_lock); 505 tipc_node_unlock(node); 506 kfree_skb(buf); 507 } 508 buf = NULL; 509 510 /* Determine new synchronization state */ 511 512 tipc_node_lock(node); 513 if (unlikely(!tipc_node_is_up(node))) 514 goto unlock; 515 516 if (node->bclink.last_in == node->bclink.last_sent) 517 goto unlock; 518 519 if (!node->bclink.deferred_head) { 520 node->bclink.oos_state = 1; 521 goto unlock; 522 } 523 524 msg = buf_msg(node->bclink.deferred_head); 525 seqno = msg_seqno(msg); 526 next_in = mod(next_in + 1); 527 if (seqno != next_in) 528 goto unlock; 529 530 /* Take in-sequence message from deferred queue & deliver it */ 531 532 buf = node->bclink.deferred_head; 533 node->bclink.deferred_head = buf->next; 534 node->bclink.deferred_size--; 535 goto receive; 536 } 537 538 /* Handle out-of-sequence broadcast message */ 539 540 if (less(next_in, seqno)) { 541 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, 542 &node->bclink.deferred_tail, 543 buf); 544 node->bclink.deferred_size += deferred; 545 bclink_update_last_sent(node, seqno); 546 buf = NULL; 547 } else 548 deferred = 0; 549 550 spin_lock_bh(&bc_lock); 551 552 if (deferred) 553 bcl->stats.deferred_recv++; 554 else 555 bcl->stats.duplicates++; 556 557 spin_unlock_bh(&bc_lock); 558 559 unlock: 560 tipc_node_unlock(node); 561 exit: 562 kfree_skb(buf); 563 } 564 565 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 566 { 567 return (n_ptr->bclink.recv_permitted && 568 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 569 } 570 571 572 /** 573 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 574 * 575 * Send packet over as many bearers as necessary to reach all nodes 576 * that have joined the broadcast link. 577 * 578 * Returns 0 (packet sent successfully) under all circumstances, 579 * since the broadcast link's pseudo-bearer never blocks 580 */ 581 static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, 582 struct tipc_media_addr *unused2) 583 { 584 int bp_index; 585 586 /* Prepare broadcast link message for reliable transmission, 587 * if first time trying to send it; 588 * preparation is skipped for broadcast link protocol messages 589 * since they are sent in an unreliable manner and don't need it 590 */ 591 if (likely(!msg_non_seq(buf_msg(buf)))) { 592 struct tipc_msg *msg; 593 594 bcbuf_set_acks(buf, bclink->bcast_nodes.count); 595 msg = buf_msg(buf); 596 msg_set_non_seq(msg, 1); 597 msg_set_mc_netid(msg, tipc_net_id); 598 bcl->stats.sent_info++; 599 600 if (WARN_ON(!bclink->bcast_nodes.count)) { 601 dump_stack(); 602 return 0; 603 } 604 } 605 606 /* Send buffer over bearers until all targets reached */ 607 bcbearer->remains = bclink->bcast_nodes; 608 609 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 610 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; 611 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; 612 struct tipc_bearer *b = p; 613 struct sk_buff *tbuf; 614 615 if (!p) 616 break; /* No more bearers to try */ 617 618 if (tipc_bearer_blocked(p)) { 619 if (!s || tipc_bearer_blocked(s)) 620 continue; /* Can't use either bearer */ 621 b = s; 622 } 623 624 tipc_nmap_diff(&bcbearer->remains, &b->nodes, 625 &bcbearer->remains_new); 626 if (bcbearer->remains_new.count == bcbearer->remains.count) 627 continue; /* Nothing added by bearer pair */ 628 629 if (bp_index == 0) { 630 /* Use original buffer for first bearer */ 631 tipc_bearer_send(b, buf, &b->bcast_addr); 632 } else { 633 /* Avoid concurrent buffer access */ 634 tbuf = pskb_copy(buf, GFP_ATOMIC); 635 if (!tbuf) 636 break; 637 tipc_bearer_send(b, tbuf, &b->bcast_addr); 638 kfree_skb(tbuf); /* Bearer keeps a clone */ 639 } 640 641 /* Swap bearers for next packet */ 642 if (s) { 643 bcbearer->bpairs[bp_index].primary = s; 644 bcbearer->bpairs[bp_index].secondary = p; 645 } 646 647 if (bcbearer->remains_new.count == 0) 648 break; /* All targets reached */ 649 650 bcbearer->remains = bcbearer->remains_new; 651 } 652 653 return 0; 654 } 655 656 /** 657 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 658 */ 659 void tipc_bcbearer_sort(void) 660 { 661 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 662 struct tipc_bcbearer_pair *bp_curr; 663 int b_index; 664 int pri; 665 666 spin_lock_bh(&bc_lock); 667 668 /* Group bearers by priority (can assume max of two per priority) */ 669 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 670 671 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 672 struct tipc_bearer *b = &tipc_bearers[b_index]; 673 674 if (!b->active || !b->nodes.count) 675 continue; 676 677 if (!bp_temp[b->priority].primary) 678 bp_temp[b->priority].primary = b; 679 else 680 bp_temp[b->priority].secondary = b; 681 } 682 683 /* Create array of bearer pairs for broadcasting */ 684 bp_curr = bcbearer->bpairs; 685 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 686 687 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) { 688 689 if (!bp_temp[pri].primary) 690 continue; 691 692 bp_curr->primary = bp_temp[pri].primary; 693 694 if (bp_temp[pri].secondary) { 695 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, 696 &bp_temp[pri].secondary->nodes)) { 697 bp_curr->secondary = bp_temp[pri].secondary; 698 } else { 699 bp_curr++; 700 bp_curr->primary = bp_temp[pri].secondary; 701 } 702 } 703 704 bp_curr++; 705 } 706 707 spin_unlock_bh(&bc_lock); 708 } 709 710 711 int tipc_bclink_stats(char *buf, const u32 buf_size) 712 { 713 int ret; 714 struct tipc_stats *s; 715 716 if (!bcl) 717 return 0; 718 719 spin_lock_bh(&bc_lock); 720 721 s = &bcl->stats; 722 723 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 724 " Window:%u packets\n", 725 bcl->name, bcl->queue_limit[0]); 726 ret += tipc_snprintf(buf + ret, buf_size - ret, 727 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 728 s->recv_info, s->recv_fragments, 729 s->recv_fragmented, s->recv_bundles, 730 s->recv_bundled); 731 ret += tipc_snprintf(buf + ret, buf_size - ret, 732 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 733 s->sent_info, s->sent_fragments, 734 s->sent_fragmented, s->sent_bundles, 735 s->sent_bundled); 736 ret += tipc_snprintf(buf + ret, buf_size - ret, 737 " RX naks:%u defs:%u dups:%u\n", 738 s->recv_nacks, s->deferred_recv, s->duplicates); 739 ret += tipc_snprintf(buf + ret, buf_size - ret, 740 " TX naks:%u acks:%u dups:%u\n", 741 s->sent_nacks, s->sent_acks, s->retransmitted); 742 ret += tipc_snprintf(buf + ret, buf_size - ret, 743 " Congestion link:%u Send queue max:%u avg:%u\n", 744 s->link_congs, s->max_queue_sz, 745 s->queue_sz_counts ? 746 (s->accu_queue_sz / s->queue_sz_counts) : 0); 747 748 spin_unlock_bh(&bc_lock); 749 return ret; 750 } 751 752 int tipc_bclink_reset_stats(void) 753 { 754 if (!bcl) 755 return -ENOPROTOOPT; 756 757 spin_lock_bh(&bc_lock); 758 memset(&bcl->stats, 0, sizeof(bcl->stats)); 759 spin_unlock_bh(&bc_lock); 760 return 0; 761 } 762 763 int tipc_bclink_set_queue_limits(u32 limit) 764 { 765 if (!bcl) 766 return -ENOPROTOOPT; 767 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 768 return -EINVAL; 769 770 spin_lock_bh(&bc_lock); 771 tipc_link_set_queue_limits(bcl, limit); 772 spin_unlock_bh(&bc_lock); 773 return 0; 774 } 775 776 void tipc_bclink_init(void) 777 { 778 bcbearer->bearer.media = &bcbearer->media; 779 bcbearer->media.send_msg = tipc_bcbearer_send; 780 sprintf(bcbearer->media.name, "tipc-broadcast"); 781 782 INIT_LIST_HEAD(&bcl->waiting_ports); 783 bcl->next_out_no = 1; 784 spin_lock_init(&bclink->node.lock); 785 bcl->owner = &bclink->node; 786 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 787 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 788 spin_lock_init(&bcbearer->bearer.lock); 789 bcl->b_ptr = &bcbearer->bearer; 790 bcl->state = WORKING_WORKING; 791 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 792 } 793 794 void tipc_bclink_stop(void) 795 { 796 spin_lock_bh(&bc_lock); 797 tipc_link_stop(bcl); 798 spin_unlock_bh(&bc_lock); 799 800 memset(bclink, 0, sizeof(*bclink)); 801 memset(bcbearer, 0, sizeof(*bcbearer)); 802 } 803 804 805 /** 806 * tipc_nmap_add - add a node to a node map 807 */ 808 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 809 { 810 int n = tipc_node(node); 811 int w = n / WSIZE; 812 u32 mask = (1 << (n % WSIZE)); 813 814 if ((nm_ptr->map[w] & mask) == 0) { 815 nm_ptr->count++; 816 nm_ptr->map[w] |= mask; 817 } 818 } 819 820 /** 821 * tipc_nmap_remove - remove a node from a node map 822 */ 823 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 824 { 825 int n = tipc_node(node); 826 int w = n / WSIZE; 827 u32 mask = (1 << (n % WSIZE)); 828 829 if ((nm_ptr->map[w] & mask) != 0) { 830 nm_ptr->map[w] &= ~mask; 831 nm_ptr->count--; 832 } 833 } 834 835 /** 836 * tipc_nmap_diff - find differences between node maps 837 * @nm_a: input node map A 838 * @nm_b: input node map B 839 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 840 */ 841 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 842 struct tipc_node_map *nm_b, 843 struct tipc_node_map *nm_diff) 844 { 845 int stop = ARRAY_SIZE(nm_a->map); 846 int w; 847 int b; 848 u32 map; 849 850 memset(nm_diff, 0, sizeof(*nm_diff)); 851 for (w = 0; w < stop; w++) { 852 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); 853 nm_diff->map[w] = map; 854 if (map != 0) { 855 for (b = 0 ; b < WSIZE; b++) { 856 if (map & (1 << b)) 857 nm_diff->count++; 858 } 859 } 860 } 861 } 862 863 /** 864 * tipc_port_list_add - add a port to a port list, ensuring no duplicates 865 */ 866 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) 867 { 868 struct tipc_port_list *item = pl_ptr; 869 int i; 870 int item_sz = PLSIZE; 871 int cnt = pl_ptr->count; 872 873 for (; ; cnt -= item_sz, item = item->next) { 874 if (cnt < PLSIZE) 875 item_sz = cnt; 876 for (i = 0; i < item_sz; i++) 877 if (item->ports[i] == port) 878 return; 879 if (i < PLSIZE) { 880 item->ports[i] = port; 881 pl_ptr->count++; 882 return; 883 } 884 if (!item->next) { 885 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 886 if (!item->next) { 887 pr_warn("Incomplete multicast delivery, no memory\n"); 888 return; 889 } 890 item->next->next = NULL; 891 } 892 } 893 } 894 895 /** 896 * tipc_port_list_free - free dynamically created entries in port_list chain 897 * 898 */ 899 void tipc_port_list_free(struct tipc_port_list *pl_ptr) 900 { 901 struct tipc_port_list *item; 902 struct tipc_port_list *next; 903 904 for (item = pl_ptr->next; item; item = next) { 905 next = item->next; 906 kfree(item); 907 } 908 } 909