1 /* 2 * net/tipc/bcast.c: TIPC broadcast code 3 * 4 * Copyright (c) 2004-2006, Ericsson AB 5 * Copyright (c) 2004, Intel Corporation. 6 * Copyright (c) 2005, 2010-2011, Wind River Systems 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "core.h" 39 #include "link.h" 40 #include "port.h" 41 #include "bcast.h" 42 43 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44 45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 46 47 /** 48 * struct bcbearer_pair - a pair of bearers used by broadcast link 49 * @primary: pointer to primary bearer 50 * @secondary: pointer to secondary bearer 51 * 52 * Bearers must have same priority and same set of reachable destinations 53 * to be paired. 54 */ 55 56 struct bcbearer_pair { 57 struct tipc_bearer *primary; 58 struct tipc_bearer *secondary; 59 }; 60 61 /** 62 * struct bcbearer - bearer used by broadcast link 63 * @bearer: (non-standard) broadcast bearer structure 64 * @media: (non-standard) broadcast media structure 65 * @bpairs: array of bearer pairs 66 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() 67 * @remains: temporary node map used by tipc_bcbearer_send() 68 * @remains_new: temporary node map used tipc_bcbearer_send() 69 * 70 * Note: The fields labelled "temporary" are incorporated into the bearer 71 * to avoid consuming potentially limited stack space through the use of 72 * large local variables within multicast routines. Concurrent access is 73 * prevented through use of the spinlock "bc_lock". 74 */ 75 76 struct bcbearer { 77 struct tipc_bearer bearer; 78 struct media media; 79 struct bcbearer_pair bpairs[MAX_BEARERS]; 80 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 81 struct tipc_node_map remains; 82 struct tipc_node_map remains_new; 83 }; 84 85 /** 86 * struct bclink - link used for broadcast messages 87 * @link: (non-standard) broadcast link structure 88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @retransmit_to: node that most recently requested a retransmit 90 * 91 * Handles sequence numbering, fragmentation, bundling, etc. 92 */ 93 94 struct bclink { 95 struct link link; 96 struct tipc_node node; 97 struct tipc_node *retransmit_to; 98 }; 99 100 101 static struct bcbearer *bcbearer; 102 static struct bclink *bclink; 103 static struct link *bcl; 104 static DEFINE_SPINLOCK(bc_lock); 105 106 /* broadcast-capable node map */ 107 struct tipc_node_map tipc_bcast_nmap; 108 109 const char tipc_bclink_name[] = "broadcast-link"; 110 111 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 112 struct tipc_node_map *nm_b, 113 struct tipc_node_map *nm_diff); 114 115 static u32 buf_seqno(struct sk_buff *buf) 116 { 117 return msg_seqno(buf_msg(buf)); 118 } 119 120 static u32 bcbuf_acks(struct sk_buff *buf) 121 { 122 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; 123 } 124 125 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks) 126 { 127 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks; 128 } 129 130 static void bcbuf_decr_acks(struct sk_buff *buf) 131 { 132 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); 133 } 134 135 136 static void bclink_set_last_sent(void) 137 { 138 if (bcl->next_out) 139 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); 140 else 141 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); 142 } 143 144 u32 tipc_bclink_get_last_sent(void) 145 { 146 return bcl->fsm_msg_cnt; 147 } 148 149 /** 150 * bclink_set_gap - set gap according to contents of current deferred pkt queue 151 * 152 * Called with 'node' locked, bc_lock unlocked 153 */ 154 155 static void bclink_set_gap(struct tipc_node *n_ptr) 156 { 157 struct sk_buff *buf = n_ptr->bclink.deferred_head; 158 159 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 160 mod(n_ptr->bclink.last_in); 161 if (unlikely(buf != NULL)) 162 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); 163 } 164 165 /** 166 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment 167 * 168 * This mechanism endeavours to prevent all nodes in network from trying 169 * to ACK or NACK at the same time. 170 * 171 * Note: TIPC uses a different trigger to distribute ACKs than it does to 172 * distribute NACKs, but tries to use the same spacing (divide by 16). 173 */ 174 175 static int bclink_ack_allowed(u32 n) 176 { 177 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; 178 } 179 180 181 /** 182 * tipc_bclink_retransmit_to - get most recent node to request retransmission 183 * 184 * Called with bc_lock locked 185 */ 186 187 struct tipc_node *tipc_bclink_retransmit_to(void) 188 { 189 return bclink->retransmit_to; 190 } 191 192 /** 193 * bclink_retransmit_pkt - retransmit broadcast packets 194 * @after: sequence number of last packet to *not* retransmit 195 * @to: sequence number of last packet to retransmit 196 * 197 * Called with bc_lock locked 198 */ 199 200 static void bclink_retransmit_pkt(u32 after, u32 to) 201 { 202 struct sk_buff *buf; 203 204 buf = bcl->first_out; 205 while (buf && less_eq(buf_seqno(buf), after)) 206 buf = buf->next; 207 tipc_link_retransmit(bcl, buf, mod(to - after)); 208 } 209 210 /** 211 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 212 * @n_ptr: node that sent acknowledgement info 213 * @acked: broadcast sequence # that has been acknowledged 214 * 215 * Node is locked, bc_lock unlocked. 216 */ 217 218 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 219 { 220 struct sk_buff *crs; 221 struct sk_buff *next; 222 unsigned int released = 0; 223 224 if (less_eq(acked, n_ptr->bclink.acked)) 225 return; 226 227 spin_lock_bh(&bc_lock); 228 229 /* Skip over packets that node has previously acknowledged */ 230 231 crs = bcl->first_out; 232 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) 233 crs = crs->next; 234 235 /* Update packets that node is now acknowledging */ 236 237 while (crs && less_eq(buf_seqno(crs), acked)) { 238 next = crs->next; 239 bcbuf_decr_acks(crs); 240 if (bcbuf_acks(crs) == 0) { 241 bcl->first_out = next; 242 bcl->out_queue_size--; 243 buf_discard(crs); 244 released = 1; 245 } 246 crs = next; 247 } 248 n_ptr->bclink.acked = acked; 249 250 /* Try resolving broadcast link congestion, if necessary */ 251 252 if (unlikely(bcl->next_out)) { 253 tipc_link_push_queue(bcl); 254 bclink_set_last_sent(); 255 } 256 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 257 tipc_link_wakeup_ports(bcl, 0); 258 spin_unlock_bh(&bc_lock); 259 } 260 261 /** 262 * bclink_send_ack - unicast an ACK msg 263 * 264 * tipc_net_lock and node lock set 265 */ 266 267 static void bclink_send_ack(struct tipc_node *n_ptr) 268 { 269 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 270 271 if (l_ptr != NULL) 272 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 273 } 274 275 /** 276 * bclink_send_nack- broadcast a NACK msg 277 * 278 * tipc_net_lock and node lock set 279 */ 280 281 static void bclink_send_nack(struct tipc_node *n_ptr) 282 { 283 struct sk_buff *buf; 284 struct tipc_msg *msg; 285 286 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) 287 return; 288 289 buf = tipc_buf_acquire(INT_H_SIZE); 290 if (buf) { 291 msg = buf_msg(buf); 292 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 293 INT_H_SIZE, n_ptr->addr); 294 msg_set_non_seq(msg, 1); 295 msg_set_mc_netid(msg, tipc_net_id); 296 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 297 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 298 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 299 msg_set_bcast_tag(msg, tipc_own_tag); 300 301 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { 302 bcl->stats.sent_nacks++; 303 buf_discard(buf); 304 } else { 305 tipc_bearer_schedule(bcl->b_ptr, bcl); 306 bcl->proto_msg_queue = buf; 307 bcl->stats.bearer_congs++; 308 } 309 310 /* 311 * Ensure we doesn't send another NACK msg to the node 312 * until 16 more deferred messages arrive from it 313 * (i.e. helps prevent all nodes from NACK'ing at same time) 314 */ 315 316 n_ptr->bclink.nack_sync = tipc_own_tag; 317 } 318 } 319 320 /** 321 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 322 * 323 * tipc_net_lock and node lock set 324 */ 325 326 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) 327 { 328 if (!n_ptr->bclink.supported || 329 less_eq(last_sent, mod(n_ptr->bclink.last_in))) 330 return; 331 332 bclink_set_gap(n_ptr); 333 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to) 334 n_ptr->bclink.gap_to = last_sent; 335 bclink_send_nack(n_ptr); 336 } 337 338 /** 339 * tipc_bclink_peek_nack - process a NACK msg meant for another node 340 * 341 * Only tipc_net_lock set. 342 */ 343 344 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 345 { 346 struct tipc_node *n_ptr = tipc_node_find(dest); 347 u32 my_after, my_to; 348 349 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 350 return; 351 tipc_node_lock(n_ptr); 352 /* 353 * Modify gap to suppress unnecessary NACKs from this node 354 */ 355 my_after = n_ptr->bclink.gap_after; 356 my_to = n_ptr->bclink.gap_to; 357 358 if (less_eq(gap_after, my_after)) { 359 if (less(my_after, gap_to) && less(gap_to, my_to)) 360 n_ptr->bclink.gap_after = gap_to; 361 else if (less_eq(my_to, gap_to)) 362 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after; 363 } else if (less_eq(gap_after, my_to)) { 364 if (less_eq(my_to, gap_to)) 365 n_ptr->bclink.gap_to = gap_after; 366 } else { 367 /* 368 * Expand gap if missing bufs not in deferred queue: 369 */ 370 struct sk_buff *buf = n_ptr->bclink.deferred_head; 371 u32 prev = n_ptr->bclink.gap_to; 372 373 for (; buf; buf = buf->next) { 374 u32 seqno = buf_seqno(buf); 375 376 if (mod(seqno - prev) != 1) { 377 buf = NULL; 378 break; 379 } 380 if (seqno == gap_after) 381 break; 382 prev = seqno; 383 } 384 if (buf == NULL) 385 n_ptr->bclink.gap_to = gap_after; 386 } 387 /* 388 * Some nodes may send a complementary NACK now: 389 */ 390 if (bclink_ack_allowed(sender_tag + 1)) { 391 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { 392 bclink_send_nack(n_ptr); 393 bclink_set_gap(n_ptr); 394 } 395 } 396 tipc_node_unlock(n_ptr); 397 } 398 399 /** 400 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 401 */ 402 403 int tipc_bclink_send_msg(struct sk_buff *buf) 404 { 405 int res; 406 407 spin_lock_bh(&bc_lock); 408 409 res = tipc_link_send_buf(bcl, buf); 410 if (likely(res > 0)) 411 bclink_set_last_sent(); 412 413 bcl->stats.queue_sz_counts++; 414 bcl->stats.accu_queue_sz += bcl->out_queue_size; 415 416 spin_unlock_bh(&bc_lock); 417 return res; 418 } 419 420 /** 421 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 422 * 423 * tipc_net_lock is read_locked, no other locks set 424 */ 425 426 void tipc_bclink_recv_pkt(struct sk_buff *buf) 427 { 428 struct tipc_msg *msg = buf_msg(buf); 429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 430 u32 next_in; 431 u32 seqno; 432 struct sk_buff *deferred; 433 434 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 435 (msg_mc_netid(msg) != tipc_net_id))) { 436 buf_discard(buf); 437 return; 438 } 439 440 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 441 if (msg_destnode(msg) == tipc_own_addr) { 442 tipc_node_lock(node); 443 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 444 tipc_node_unlock(node); 445 spin_lock_bh(&bc_lock); 446 bcl->stats.recv_nacks++; 447 bclink->retransmit_to = node; 448 bclink_retransmit_pkt(msg_bcgap_after(msg), 449 msg_bcgap_to(msg)); 450 spin_unlock_bh(&bc_lock); 451 } else { 452 tipc_bclink_peek_nack(msg_destnode(msg), 453 msg_bcast_tag(msg), 454 msg_bcgap_after(msg), 455 msg_bcgap_to(msg)); 456 } 457 buf_discard(buf); 458 return; 459 } 460 461 tipc_node_lock(node); 462 receive: 463 deferred = node->bclink.deferred_head; 464 next_in = mod(node->bclink.last_in + 1); 465 seqno = msg_seqno(msg); 466 467 if (likely(seqno == next_in)) { 468 bcl->stats.recv_info++; 469 node->bclink.last_in++; 470 bclink_set_gap(node); 471 if (unlikely(bclink_ack_allowed(seqno))) { 472 bclink_send_ack(node); 473 bcl->stats.sent_acks++; 474 } 475 if (likely(msg_isdata(msg))) { 476 tipc_node_unlock(node); 477 tipc_port_recv_mcast(buf, NULL); 478 } else if (msg_user(msg) == MSG_BUNDLER) { 479 bcl->stats.recv_bundles++; 480 bcl->stats.recv_bundled += msg_msgcnt(msg); 481 tipc_node_unlock(node); 482 tipc_link_recv_bundle(buf); 483 } else if (msg_user(msg) == MSG_FRAGMENTER) { 484 bcl->stats.recv_fragments++; 485 if (tipc_link_recv_fragment(&node->bclink.defragm, 486 &buf, &msg)) 487 bcl->stats.recv_fragmented++; 488 tipc_node_unlock(node); 489 tipc_net_route_msg(buf); 490 } else { 491 tipc_node_unlock(node); 492 tipc_net_route_msg(buf); 493 } 494 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 495 tipc_node_lock(node); 496 buf = deferred; 497 msg = buf_msg(buf); 498 node->bclink.deferred_head = deferred->next; 499 goto receive; 500 } 501 return; 502 } else if (less(next_in, seqno)) { 503 u32 gap_after = node->bclink.gap_after; 504 u32 gap_to = node->bclink.gap_to; 505 506 if (tipc_link_defer_pkt(&node->bclink.deferred_head, 507 &node->bclink.deferred_tail, 508 buf)) { 509 node->bclink.nack_sync++; 510 bcl->stats.deferred_recv++; 511 if (seqno == mod(gap_after + 1)) 512 node->bclink.gap_after = seqno; 513 else if (less(gap_after, seqno) && less(seqno, gap_to)) 514 node->bclink.gap_to = seqno; 515 } 516 if (bclink_ack_allowed(node->bclink.nack_sync)) { 517 if (gap_to != gap_after) 518 bclink_send_nack(node); 519 bclink_set_gap(node); 520 } 521 } else { 522 bcl->stats.duplicates++; 523 buf_discard(buf); 524 } 525 tipc_node_unlock(node); 526 } 527 528 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 529 { 530 return (n_ptr->bclink.supported && 531 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 532 } 533 534 535 /** 536 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 537 * 538 * Send through as many bearers as necessary to reach all nodes 539 * that support TIPC multicasting. 540 * 541 * Returns 0 if packet sent successfully, non-zero if not 542 */ 543 544 static int tipc_bcbearer_send(struct sk_buff *buf, 545 struct tipc_bearer *unused1, 546 struct tipc_media_addr *unused2) 547 { 548 int bp_index; 549 550 /* Prepare buffer for broadcasting (if first time trying to send it) */ 551 552 if (likely(!msg_non_seq(buf_msg(buf)))) { 553 struct tipc_msg *msg; 554 555 assert(tipc_bcast_nmap.count != 0); 556 bcbuf_set_acks(buf, tipc_bcast_nmap.count); 557 msg = buf_msg(buf); 558 msg_set_non_seq(msg, 1); 559 msg_set_mc_netid(msg, tipc_net_id); 560 bcl->stats.sent_info++; 561 } 562 563 /* Send buffer over bearers until all targets reached */ 564 565 bcbearer->remains = tipc_bcast_nmap; 566 567 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 568 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; 569 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; 570 571 if (!p) 572 break; /* no more bearers to try */ 573 574 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new); 575 if (bcbearer->remains_new.count == bcbearer->remains.count) 576 continue; /* bearer pair doesn't add anything */ 577 578 if (p->blocked || 579 p->media->send_msg(buf, p, &p->media->bcast_addr)) { 580 /* unable to send on primary bearer */ 581 if (!s || s->blocked || 582 s->media->send_msg(buf, s, 583 &s->media->bcast_addr)) { 584 /* unable to send on either bearer */ 585 continue; 586 } 587 } 588 589 if (s) { 590 bcbearer->bpairs[bp_index].primary = s; 591 bcbearer->bpairs[bp_index].secondary = p; 592 } 593 594 if (bcbearer->remains_new.count == 0) 595 return 0; 596 597 bcbearer->remains = bcbearer->remains_new; 598 } 599 600 /* 601 * Unable to reach all targets (indicate success, since currently 602 * there isn't code in place to properly block & unblock the 603 * pseudo-bearer used by the broadcast link) 604 */ 605 606 return TIPC_OK; 607 } 608 609 /** 610 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 611 */ 612 613 void tipc_bcbearer_sort(void) 614 { 615 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 616 struct bcbearer_pair *bp_curr; 617 int b_index; 618 int pri; 619 620 spin_lock_bh(&bc_lock); 621 622 /* Group bearers by priority (can assume max of two per priority) */ 623 624 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 625 626 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 627 struct tipc_bearer *b = &tipc_bearers[b_index]; 628 629 if (!b->active || !b->nodes.count) 630 continue; 631 632 if (!bp_temp[b->priority].primary) 633 bp_temp[b->priority].primary = b; 634 else 635 bp_temp[b->priority].secondary = b; 636 } 637 638 /* Create array of bearer pairs for broadcasting */ 639 640 bp_curr = bcbearer->bpairs; 641 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 642 643 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) { 644 645 if (!bp_temp[pri].primary) 646 continue; 647 648 bp_curr->primary = bp_temp[pri].primary; 649 650 if (bp_temp[pri].secondary) { 651 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, 652 &bp_temp[pri].secondary->nodes)) { 653 bp_curr->secondary = bp_temp[pri].secondary; 654 } else { 655 bp_curr++; 656 bp_curr->primary = bp_temp[pri].secondary; 657 } 658 } 659 660 bp_curr++; 661 } 662 663 spin_unlock_bh(&bc_lock); 664 } 665 666 /** 667 * tipc_bcbearer_push - resolve bearer congestion 668 * 669 * Forces bclink to push out any unsent packets, until all packets are gone 670 * or congestion reoccurs. 671 * No locks set when function called 672 */ 673 674 void tipc_bcbearer_push(void) 675 { 676 struct tipc_bearer *b_ptr; 677 678 spin_lock_bh(&bc_lock); 679 b_ptr = &bcbearer->bearer; 680 if (b_ptr->blocked) { 681 b_ptr->blocked = 0; 682 tipc_bearer_lock_push(b_ptr); 683 } 684 spin_unlock_bh(&bc_lock); 685 } 686 687 688 int tipc_bclink_stats(char *buf, const u32 buf_size) 689 { 690 struct print_buf pb; 691 692 if (!bcl) 693 return 0; 694 695 tipc_printbuf_init(&pb, buf, buf_size); 696 697 spin_lock_bh(&bc_lock); 698 699 tipc_printf(&pb, "Link <%s>\n" 700 " Window:%u packets\n", 701 bcl->name, bcl->queue_limit[0]); 702 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 703 bcl->stats.recv_info, 704 bcl->stats.recv_fragments, 705 bcl->stats.recv_fragmented, 706 bcl->stats.recv_bundles, 707 bcl->stats.recv_bundled); 708 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 709 bcl->stats.sent_info, 710 bcl->stats.sent_fragments, 711 bcl->stats.sent_fragmented, 712 bcl->stats.sent_bundles, 713 bcl->stats.sent_bundled); 714 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", 715 bcl->stats.recv_nacks, 716 bcl->stats.deferred_recv, 717 bcl->stats.duplicates); 718 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", 719 bcl->stats.sent_nacks, 720 bcl->stats.sent_acks, 721 bcl->stats.retransmitted); 722 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 723 bcl->stats.bearer_congs, 724 bcl->stats.link_congs, 725 bcl->stats.max_queue_sz, 726 bcl->stats.queue_sz_counts 727 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts) 728 : 0); 729 730 spin_unlock_bh(&bc_lock); 731 return tipc_printbuf_validate(&pb); 732 } 733 734 int tipc_bclink_reset_stats(void) 735 { 736 if (!bcl) 737 return -ENOPROTOOPT; 738 739 spin_lock_bh(&bc_lock); 740 memset(&bcl->stats, 0, sizeof(bcl->stats)); 741 spin_unlock_bh(&bc_lock); 742 return 0; 743 } 744 745 int tipc_bclink_set_queue_limits(u32 limit) 746 { 747 if (!bcl) 748 return -ENOPROTOOPT; 749 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 750 return -EINVAL; 751 752 spin_lock_bh(&bc_lock); 753 tipc_link_set_queue_limits(bcl, limit); 754 spin_unlock_bh(&bc_lock); 755 return 0; 756 } 757 758 int tipc_bclink_init(void) 759 { 760 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 761 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 762 if (!bcbearer || !bclink) { 763 warn("Multicast link creation failed, no memory\n"); 764 kfree(bcbearer); 765 bcbearer = NULL; 766 kfree(bclink); 767 bclink = NULL; 768 return -ENOMEM; 769 } 770 771 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 772 bcbearer->bearer.media = &bcbearer->media; 773 bcbearer->media.send_msg = tipc_bcbearer_send; 774 sprintf(bcbearer->media.name, "tipc-multicast"); 775 776 bcl = &bclink->link; 777 INIT_LIST_HEAD(&bcl->waiting_ports); 778 bcl->next_out_no = 1; 779 spin_lock_init(&bclink->node.lock); 780 bcl->owner = &bclink->node; 781 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 782 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 783 bcl->b_ptr = &bcbearer->bearer; 784 bcl->state = WORKING_WORKING; 785 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 786 787 return 0; 788 } 789 790 void tipc_bclink_stop(void) 791 { 792 spin_lock_bh(&bc_lock); 793 if (bcbearer) { 794 tipc_link_stop(bcl); 795 bcl = NULL; 796 kfree(bclink); 797 bclink = NULL; 798 kfree(bcbearer); 799 bcbearer = NULL; 800 } 801 spin_unlock_bh(&bc_lock); 802 } 803 804 805 /** 806 * tipc_nmap_add - add a node to a node map 807 */ 808 809 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 810 { 811 int n = tipc_node(node); 812 int w = n / WSIZE; 813 u32 mask = (1 << (n % WSIZE)); 814 815 if ((nm_ptr->map[w] & mask) == 0) { 816 nm_ptr->count++; 817 nm_ptr->map[w] |= mask; 818 } 819 } 820 821 /** 822 * tipc_nmap_remove - remove a node from a node map 823 */ 824 825 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 826 { 827 int n = tipc_node(node); 828 int w = n / WSIZE; 829 u32 mask = (1 << (n % WSIZE)); 830 831 if ((nm_ptr->map[w] & mask) != 0) { 832 nm_ptr->map[w] &= ~mask; 833 nm_ptr->count--; 834 } 835 } 836 837 /** 838 * tipc_nmap_diff - find differences between node maps 839 * @nm_a: input node map A 840 * @nm_b: input node map B 841 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 842 */ 843 844 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 845 struct tipc_node_map *nm_b, 846 struct tipc_node_map *nm_diff) 847 { 848 int stop = ARRAY_SIZE(nm_a->map); 849 int w; 850 int b; 851 u32 map; 852 853 memset(nm_diff, 0, sizeof(*nm_diff)); 854 for (w = 0; w < stop; w++) { 855 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); 856 nm_diff->map[w] = map; 857 if (map != 0) { 858 for (b = 0 ; b < WSIZE; b++) { 859 if (map & (1 << b)) 860 nm_diff->count++; 861 } 862 } 863 } 864 } 865 866 /** 867 * tipc_port_list_add - add a port to a port list, ensuring no duplicates 868 */ 869 870 void tipc_port_list_add(struct port_list *pl_ptr, u32 port) 871 { 872 struct port_list *item = pl_ptr; 873 int i; 874 int item_sz = PLSIZE; 875 int cnt = pl_ptr->count; 876 877 for (; ; cnt -= item_sz, item = item->next) { 878 if (cnt < PLSIZE) 879 item_sz = cnt; 880 for (i = 0; i < item_sz; i++) 881 if (item->ports[i] == port) 882 return; 883 if (i < PLSIZE) { 884 item->ports[i] = port; 885 pl_ptr->count++; 886 return; 887 } 888 if (!item->next) { 889 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 890 if (!item->next) { 891 warn("Incomplete multicast delivery, no memory\n"); 892 return; 893 } 894 item->next->next = NULL; 895 } 896 } 897 } 898 899 /** 900 * tipc_port_list_free - free dynamically created entries in port_list chain 901 * 902 */ 903 904 void tipc_port_list_free(struct port_list *pl_ptr) 905 { 906 struct port_list *item; 907 struct port_list *next; 908 909 for (item = pl_ptr->next; item; item = next) { 910 next = item->next; 911 kfree(item); 912 } 913 } 914 915