1 /* 2 * net/tipc/bcast.c: TIPC broadcast code 3 * 4 * Copyright (c) 2004-2006, Ericsson AB 5 * Copyright (c) 2004, Intel Corporation. 6 * Copyright (c) 2005, Wind River Systems 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "core.h" 39 #include "link.h" 40 #include "port.h" 41 #include "name_distr.h" 42 #include "bcast.h" 43 44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 45 46 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 47 48 #define BCLINK_LOG_BUF_SIZE 0 49 50 /* 51 * Loss rate for incoming broadcast frames; used to test retransmission code. 52 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any. 53 */ 54 55 #define TIPC_BCAST_LOSS_RATE 0 56 57 /** 58 * struct bcbearer_pair - a pair of bearers used by broadcast link 59 * @primary: pointer to primary bearer 60 * @secondary: pointer to secondary bearer 61 * 62 * Bearers must have same priority and same set of reachable destinations 63 * to be paired. 64 */ 65 66 struct bcbearer_pair { 67 struct bearer *primary; 68 struct bearer *secondary; 69 }; 70 71 /** 72 * struct bcbearer - bearer used by broadcast link 73 * @bearer: (non-standard) broadcast bearer structure 74 * @media: (non-standard) broadcast media structure 75 * @bpairs: array of bearer pairs 76 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() 77 * @remains: temporary node map used by tipc_bcbearer_send() 78 * @remains_new: temporary node map used tipc_bcbearer_send() 79 * 80 * Note: The fields labelled "temporary" are incorporated into the bearer 81 * to avoid consuming potentially limited stack space through the use of 82 * large local variables within multicast routines. Concurrent access is 83 * prevented through use of the spinlock "bc_lock". 84 */ 85 86 struct bcbearer { 87 struct bearer bearer; 88 struct media media; 89 struct bcbearer_pair bpairs[MAX_BEARERS]; 90 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 91 struct tipc_node_map remains; 92 struct tipc_node_map remains_new; 93 }; 94 95 /** 96 * struct bclink - link used for broadcast messages 97 * @link: (non-standard) broadcast link structure 98 * @node: (non-standard) node structure representing b'cast link's peer node 99 * 100 * Handles sequence numbering, fragmentation, bundling, etc. 101 */ 102 103 struct bclink { 104 struct link link; 105 struct tipc_node node; 106 }; 107 108 109 static struct bcbearer *bcbearer = NULL; 110 static struct bclink *bclink = NULL; 111 static struct link *bcl = NULL; 112 static DEFINE_SPINLOCK(bc_lock); 113 114 /* broadcast-capable node map */ 115 struct tipc_node_map tipc_bcast_nmap; 116 117 const char tipc_bclink_name[] = "broadcast-link"; 118 119 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 120 struct tipc_node_map *nm_b, 121 struct tipc_node_map *nm_diff); 122 123 static u32 buf_seqno(struct sk_buff *buf) 124 { 125 return msg_seqno(buf_msg(buf)); 126 } 127 128 static u32 bcbuf_acks(struct sk_buff *buf) 129 { 130 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; 131 } 132 133 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks) 134 { 135 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks; 136 } 137 138 static void bcbuf_decr_acks(struct sk_buff *buf) 139 { 140 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); 141 } 142 143 144 static void bclink_set_last_sent(void) 145 { 146 if (bcl->next_out) 147 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); 148 else 149 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); 150 } 151 152 u32 tipc_bclink_get_last_sent(void) 153 { 154 return bcl->fsm_msg_cnt; 155 } 156 157 /** 158 * bclink_set_gap - set gap according to contents of current deferred pkt queue 159 * 160 * Called with 'node' locked, bc_lock unlocked 161 */ 162 163 static void bclink_set_gap(struct tipc_node *n_ptr) 164 { 165 struct sk_buff *buf = n_ptr->bclink.deferred_head; 166 167 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 168 mod(n_ptr->bclink.last_in); 169 if (unlikely(buf != NULL)) 170 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); 171 } 172 173 /** 174 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment 175 * 176 * This mechanism endeavours to prevent all nodes in network from trying 177 * to ACK or NACK at the same time. 178 * 179 * Note: TIPC uses a different trigger to distribute ACKs than it does to 180 * distribute NACKs, but tries to use the same spacing (divide by 16). 181 */ 182 183 static int bclink_ack_allowed(u32 n) 184 { 185 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; 186 } 187 188 189 /** 190 * bclink_retransmit_pkt - retransmit broadcast packets 191 * @after: sequence number of last packet to *not* retransmit 192 * @to: sequence number of last packet to retransmit 193 * 194 * Called with bc_lock locked 195 */ 196 197 static void bclink_retransmit_pkt(u32 after, u32 to) 198 { 199 struct sk_buff *buf; 200 201 buf = bcl->first_out; 202 while (buf && less_eq(buf_seqno(buf), after)) { 203 buf = buf->next; 204 } 205 tipc_link_retransmit(bcl, buf, mod(to - after)); 206 } 207 208 /** 209 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 210 * @n_ptr: node that sent acknowledgement info 211 * @acked: broadcast sequence # that has been acknowledged 212 * 213 * Node is locked, bc_lock unlocked. 214 */ 215 216 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 217 { 218 struct sk_buff *crs; 219 struct sk_buff *next; 220 unsigned int released = 0; 221 222 if (less_eq(acked, n_ptr->bclink.acked)) 223 return; 224 225 spin_lock_bh(&bc_lock); 226 227 /* Skip over packets that node has previously acknowledged */ 228 229 crs = bcl->first_out; 230 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) { 231 crs = crs->next; 232 } 233 234 /* Update packets that node is now acknowledging */ 235 236 while (crs && less_eq(buf_seqno(crs), acked)) { 237 next = crs->next; 238 bcbuf_decr_acks(crs); 239 if (bcbuf_acks(crs) == 0) { 240 bcl->first_out = next; 241 bcl->out_queue_size--; 242 buf_discard(crs); 243 released = 1; 244 } 245 crs = next; 246 } 247 n_ptr->bclink.acked = acked; 248 249 /* Try resolving broadcast link congestion, if necessary */ 250 251 if (unlikely(bcl->next_out)) { 252 tipc_link_push_queue(bcl); 253 bclink_set_last_sent(); 254 } 255 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 256 tipc_link_wakeup_ports(bcl, 0); 257 spin_unlock_bh(&bc_lock); 258 } 259 260 /** 261 * bclink_send_ack - unicast an ACK msg 262 * 263 * tipc_net_lock and node lock set 264 */ 265 266 static void bclink_send_ack(struct tipc_node *n_ptr) 267 { 268 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 269 270 if (l_ptr != NULL) 271 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 272 } 273 274 /** 275 * bclink_send_nack- broadcast a NACK msg 276 * 277 * tipc_net_lock and node lock set 278 */ 279 280 static void bclink_send_nack(struct tipc_node *n_ptr) 281 { 282 struct sk_buff *buf; 283 struct tipc_msg *msg; 284 285 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) 286 return; 287 288 buf = tipc_buf_acquire(INT_H_SIZE); 289 if (buf) { 290 msg = buf_msg(buf); 291 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 292 INT_H_SIZE, n_ptr->addr); 293 msg_set_mc_netid(msg, tipc_net_id); 294 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 295 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 296 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 297 msg_set_bcast_tag(msg, tipc_own_tag); 298 299 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { 300 bcl->stats.sent_nacks++; 301 buf_discard(buf); 302 } else { 303 tipc_bearer_schedule(bcl->b_ptr, bcl); 304 bcl->proto_msg_queue = buf; 305 bcl->stats.bearer_congs++; 306 } 307 308 /* 309 * Ensure we doesn't send another NACK msg to the node 310 * until 16 more deferred messages arrive from it 311 * (i.e. helps prevent all nodes from NACK'ing at same time) 312 */ 313 314 n_ptr->bclink.nack_sync = tipc_own_tag; 315 } 316 } 317 318 /** 319 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 320 * 321 * tipc_net_lock and node lock set 322 */ 323 324 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) 325 { 326 if (!n_ptr->bclink.supported || 327 less_eq(last_sent, mod(n_ptr->bclink.last_in))) 328 return; 329 330 bclink_set_gap(n_ptr); 331 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to) 332 n_ptr->bclink.gap_to = last_sent; 333 bclink_send_nack(n_ptr); 334 } 335 336 /** 337 * tipc_bclink_peek_nack - process a NACK msg meant for another node 338 * 339 * Only tipc_net_lock set. 340 */ 341 342 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 343 { 344 struct tipc_node *n_ptr = tipc_node_find(dest); 345 u32 my_after, my_to; 346 347 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 348 return; 349 tipc_node_lock(n_ptr); 350 /* 351 * Modify gap to suppress unnecessary NACKs from this node 352 */ 353 my_after = n_ptr->bclink.gap_after; 354 my_to = n_ptr->bclink.gap_to; 355 356 if (less_eq(gap_after, my_after)) { 357 if (less(my_after, gap_to) && less(gap_to, my_to)) 358 n_ptr->bclink.gap_after = gap_to; 359 else if (less_eq(my_to, gap_to)) 360 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after; 361 } else if (less_eq(gap_after, my_to)) { 362 if (less_eq(my_to, gap_to)) 363 n_ptr->bclink.gap_to = gap_after; 364 } else { 365 /* 366 * Expand gap if missing bufs not in deferred queue: 367 */ 368 struct sk_buff *buf = n_ptr->bclink.deferred_head; 369 u32 prev = n_ptr->bclink.gap_to; 370 371 for (; buf; buf = buf->next) { 372 u32 seqno = buf_seqno(buf); 373 374 if (mod(seqno - prev) != 1) { 375 buf = NULL; 376 break; 377 } 378 if (seqno == gap_after) 379 break; 380 prev = seqno; 381 } 382 if (buf == NULL) 383 n_ptr->bclink.gap_to = gap_after; 384 } 385 /* 386 * Some nodes may send a complementary NACK now: 387 */ 388 if (bclink_ack_allowed(sender_tag + 1)) { 389 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { 390 bclink_send_nack(n_ptr); 391 bclink_set_gap(n_ptr); 392 } 393 } 394 tipc_node_unlock(n_ptr); 395 } 396 397 /** 398 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 399 */ 400 401 int tipc_bclink_send_msg(struct sk_buff *buf) 402 { 403 int res; 404 405 spin_lock_bh(&bc_lock); 406 407 res = tipc_link_send_buf(bcl, buf); 408 if (unlikely(res == -ELINKCONG)) 409 buf_discard(buf); 410 else 411 bclink_set_last_sent(); 412 413 if (bcl->out_queue_size > bcl->stats.max_queue_sz) 414 bcl->stats.max_queue_sz = bcl->out_queue_size; 415 bcl->stats.queue_sz_counts++; 416 bcl->stats.accu_queue_sz += bcl->out_queue_size; 417 418 spin_unlock_bh(&bc_lock); 419 return res; 420 } 421 422 /** 423 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 424 * 425 * tipc_net_lock is read_locked, no other locks set 426 */ 427 428 void tipc_bclink_recv_pkt(struct sk_buff *buf) 429 { 430 #if (TIPC_BCAST_LOSS_RATE) 431 static int rx_count = 0; 432 #endif 433 struct tipc_msg *msg = buf_msg(buf); 434 struct tipc_node* node = tipc_node_find(msg_prevnode(msg)); 435 u32 next_in; 436 u32 seqno; 437 struct sk_buff *deferred; 438 439 msg_dbg(msg, "<BC<<<"); 440 441 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 442 (msg_mc_netid(msg) != tipc_net_id))) { 443 buf_discard(buf); 444 return; 445 } 446 447 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 448 msg_dbg(msg, "<BCNACK<<<"); 449 if (msg_destnode(msg) == tipc_own_addr) { 450 tipc_node_lock(node); 451 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 452 tipc_node_unlock(node); 453 spin_lock_bh(&bc_lock); 454 bcl->stats.recv_nacks++; 455 bcl->owner->next = node; /* remember requestor */ 456 bclink_retransmit_pkt(msg_bcgap_after(msg), 457 msg_bcgap_to(msg)); 458 bcl->owner->next = NULL; 459 spin_unlock_bh(&bc_lock); 460 } else { 461 tipc_bclink_peek_nack(msg_destnode(msg), 462 msg_bcast_tag(msg), 463 msg_bcgap_after(msg), 464 msg_bcgap_to(msg)); 465 } 466 buf_discard(buf); 467 return; 468 } 469 470 #if (TIPC_BCAST_LOSS_RATE) 471 if (++rx_count == TIPC_BCAST_LOSS_RATE) { 472 rx_count = 0; 473 buf_discard(buf); 474 return; 475 } 476 #endif 477 478 tipc_node_lock(node); 479 receive: 480 deferred = node->bclink.deferred_head; 481 next_in = mod(node->bclink.last_in + 1); 482 seqno = msg_seqno(msg); 483 484 if (likely(seqno == next_in)) { 485 bcl->stats.recv_info++; 486 node->bclink.last_in++; 487 bclink_set_gap(node); 488 if (unlikely(bclink_ack_allowed(seqno))) { 489 bclink_send_ack(node); 490 bcl->stats.sent_acks++; 491 } 492 if (likely(msg_isdata(msg))) { 493 tipc_node_unlock(node); 494 tipc_port_recv_mcast(buf, NULL); 495 } else if (msg_user(msg) == MSG_BUNDLER) { 496 bcl->stats.recv_bundles++; 497 bcl->stats.recv_bundled += msg_msgcnt(msg); 498 tipc_node_unlock(node); 499 tipc_link_recv_bundle(buf); 500 } else if (msg_user(msg) == MSG_FRAGMENTER) { 501 bcl->stats.recv_fragments++; 502 if (tipc_link_recv_fragment(&node->bclink.defragm, 503 &buf, &msg)) 504 bcl->stats.recv_fragmented++; 505 tipc_node_unlock(node); 506 tipc_net_route_msg(buf); 507 } else { 508 tipc_node_unlock(node); 509 tipc_net_route_msg(buf); 510 } 511 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 512 tipc_node_lock(node); 513 buf = deferred; 514 msg = buf_msg(buf); 515 node->bclink.deferred_head = deferred->next; 516 goto receive; 517 } 518 return; 519 } else if (less(next_in, seqno)) { 520 u32 gap_after = node->bclink.gap_after; 521 u32 gap_to = node->bclink.gap_to; 522 523 if (tipc_link_defer_pkt(&node->bclink.deferred_head, 524 &node->bclink.deferred_tail, 525 buf)) { 526 node->bclink.nack_sync++; 527 bcl->stats.deferred_recv++; 528 if (seqno == mod(gap_after + 1)) 529 node->bclink.gap_after = seqno; 530 else if (less(gap_after, seqno) && less(seqno, gap_to)) 531 node->bclink.gap_to = seqno; 532 } 533 if (bclink_ack_allowed(node->bclink.nack_sync)) { 534 if (gap_to != gap_after) 535 bclink_send_nack(node); 536 bclink_set_gap(node); 537 } 538 } else { 539 bcl->stats.duplicates++; 540 buf_discard(buf); 541 } 542 tipc_node_unlock(node); 543 } 544 545 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 546 { 547 return (n_ptr->bclink.supported && 548 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 549 } 550 551 552 /** 553 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 554 * 555 * Send through as many bearers as necessary to reach all nodes 556 * that support TIPC multicasting. 557 * 558 * Returns 0 if packet sent successfully, non-zero if not 559 */ 560 561 static int tipc_bcbearer_send(struct sk_buff *buf, 562 struct tipc_bearer *unused1, 563 struct tipc_media_addr *unused2) 564 { 565 int bp_index; 566 567 /* Prepare buffer for broadcasting (if first time trying to send it) */ 568 569 if (likely(!msg_non_seq(buf_msg(buf)))) { 570 struct tipc_msg *msg; 571 572 assert(tipc_bcast_nmap.count != 0); 573 bcbuf_set_acks(buf, tipc_bcast_nmap.count); 574 msg = buf_msg(buf); 575 msg_set_non_seq(msg, 1); 576 msg_set_mc_netid(msg, tipc_net_id); 577 bcl->stats.sent_info++; 578 } 579 580 /* Send buffer over bearers until all targets reached */ 581 582 bcbearer->remains = tipc_bcast_nmap; 583 584 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 585 struct bearer *p = bcbearer->bpairs[bp_index].primary; 586 struct bearer *s = bcbearer->bpairs[bp_index].secondary; 587 588 if (!p) 589 break; /* no more bearers to try */ 590 591 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new); 592 if (bcbearer->remains_new.count == bcbearer->remains.count) 593 continue; /* bearer pair doesn't add anything */ 594 595 if (p->publ.blocked || 596 p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { 597 /* unable to send on primary bearer */ 598 if (!s || s->publ.blocked || 599 s->media->send_msg(buf, &s->publ, 600 &s->media->bcast_addr)) { 601 /* unable to send on either bearer */ 602 continue; 603 } 604 } 605 606 if (s) { 607 bcbearer->bpairs[bp_index].primary = s; 608 bcbearer->bpairs[bp_index].secondary = p; 609 } 610 611 if (bcbearer->remains_new.count == 0) 612 return 0; 613 614 bcbearer->remains = bcbearer->remains_new; 615 } 616 617 /* 618 * Unable to reach all targets (indicate success, since currently 619 * there isn't code in place to properly block & unblock the 620 * pseudo-bearer used by the broadcast link) 621 */ 622 623 return TIPC_OK; 624 } 625 626 /** 627 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 628 */ 629 630 void tipc_bcbearer_sort(void) 631 { 632 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 633 struct bcbearer_pair *bp_curr; 634 int b_index; 635 int pri; 636 637 spin_lock_bh(&bc_lock); 638 639 /* Group bearers by priority (can assume max of two per priority) */ 640 641 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 642 643 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 644 struct bearer *b = &tipc_bearers[b_index]; 645 646 if (!b->active || !b->nodes.count) 647 continue; 648 649 if (!bp_temp[b->priority].primary) 650 bp_temp[b->priority].primary = b; 651 else 652 bp_temp[b->priority].secondary = b; 653 } 654 655 /* Create array of bearer pairs for broadcasting */ 656 657 bp_curr = bcbearer->bpairs; 658 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 659 660 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) { 661 662 if (!bp_temp[pri].primary) 663 continue; 664 665 bp_curr->primary = bp_temp[pri].primary; 666 667 if (bp_temp[pri].secondary) { 668 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, 669 &bp_temp[pri].secondary->nodes)) { 670 bp_curr->secondary = bp_temp[pri].secondary; 671 } else { 672 bp_curr++; 673 bp_curr->primary = bp_temp[pri].secondary; 674 } 675 } 676 677 bp_curr++; 678 } 679 680 spin_unlock_bh(&bc_lock); 681 } 682 683 /** 684 * tipc_bcbearer_push - resolve bearer congestion 685 * 686 * Forces bclink to push out any unsent packets, until all packets are gone 687 * or congestion reoccurs. 688 * No locks set when function called 689 */ 690 691 void tipc_bcbearer_push(void) 692 { 693 struct bearer *b_ptr; 694 695 spin_lock_bh(&bc_lock); 696 b_ptr = &bcbearer->bearer; 697 if (b_ptr->publ.blocked) { 698 b_ptr->publ.blocked = 0; 699 tipc_bearer_lock_push(b_ptr); 700 } 701 spin_unlock_bh(&bc_lock); 702 } 703 704 705 int tipc_bclink_stats(char *buf, const u32 buf_size) 706 { 707 struct print_buf pb; 708 709 if (!bcl) 710 return 0; 711 712 tipc_printbuf_init(&pb, buf, buf_size); 713 714 spin_lock_bh(&bc_lock); 715 716 tipc_printf(&pb, "Link <%s>\n" 717 " Window:%u packets\n", 718 bcl->name, bcl->queue_limit[0]); 719 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 720 bcl->stats.recv_info, 721 bcl->stats.recv_fragments, 722 bcl->stats.recv_fragmented, 723 bcl->stats.recv_bundles, 724 bcl->stats.recv_bundled); 725 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 726 bcl->stats.sent_info, 727 bcl->stats.sent_fragments, 728 bcl->stats.sent_fragmented, 729 bcl->stats.sent_bundles, 730 bcl->stats.sent_bundled); 731 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", 732 bcl->stats.recv_nacks, 733 bcl->stats.deferred_recv, 734 bcl->stats.duplicates); 735 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", 736 bcl->stats.sent_nacks, 737 bcl->stats.sent_acks, 738 bcl->stats.retransmitted); 739 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 740 bcl->stats.bearer_congs, 741 bcl->stats.link_congs, 742 bcl->stats.max_queue_sz, 743 bcl->stats.queue_sz_counts 744 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts) 745 : 0); 746 747 spin_unlock_bh(&bc_lock); 748 return tipc_printbuf_validate(&pb); 749 } 750 751 int tipc_bclink_reset_stats(void) 752 { 753 if (!bcl) 754 return -ENOPROTOOPT; 755 756 spin_lock_bh(&bc_lock); 757 memset(&bcl->stats, 0, sizeof(bcl->stats)); 758 spin_unlock_bh(&bc_lock); 759 return 0; 760 } 761 762 int tipc_bclink_set_queue_limits(u32 limit) 763 { 764 if (!bcl) 765 return -ENOPROTOOPT; 766 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 767 return -EINVAL; 768 769 spin_lock_bh(&bc_lock); 770 tipc_link_set_queue_limits(bcl, limit); 771 spin_unlock_bh(&bc_lock); 772 return 0; 773 } 774 775 int tipc_bclink_init(void) 776 { 777 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 778 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 779 if (!bcbearer || !bclink) { 780 nomem: 781 warn("Multicast link creation failed, no memory\n"); 782 kfree(bcbearer); 783 bcbearer = NULL; 784 kfree(bclink); 785 bclink = NULL; 786 return -ENOMEM; 787 } 788 789 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 790 bcbearer->bearer.media = &bcbearer->media; 791 bcbearer->media.send_msg = tipc_bcbearer_send; 792 sprintf(bcbearer->media.name, "tipc-multicast"); 793 794 bcl = &bclink->link; 795 INIT_LIST_HEAD(&bcl->waiting_ports); 796 bcl->next_out_no = 1; 797 spin_lock_init(&bclink->node.lock); 798 bcl->owner = &bclink->node; 799 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 800 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 801 bcl->b_ptr = &bcbearer->bearer; 802 bcl->state = WORKING_WORKING; 803 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 804 805 if (BCLINK_LOG_BUF_SIZE) { 806 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC); 807 808 if (!pb) 809 goto nomem; 810 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); 811 } 812 813 return 0; 814 } 815 816 void tipc_bclink_stop(void) 817 { 818 spin_lock_bh(&bc_lock); 819 if (bcbearer) { 820 tipc_link_stop(bcl); 821 if (BCLINK_LOG_BUF_SIZE) 822 kfree(bcl->print_buf.buf); 823 bcl = NULL; 824 kfree(bclink); 825 bclink = NULL; 826 kfree(bcbearer); 827 bcbearer = NULL; 828 } 829 spin_unlock_bh(&bc_lock); 830 } 831 832 833 /** 834 * tipc_nmap_add - add a node to a node map 835 */ 836 837 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 838 { 839 int n = tipc_node(node); 840 int w = n / WSIZE; 841 u32 mask = (1 << (n % WSIZE)); 842 843 if ((nm_ptr->map[w] & mask) == 0) { 844 nm_ptr->count++; 845 nm_ptr->map[w] |= mask; 846 } 847 } 848 849 /** 850 * tipc_nmap_remove - remove a node from a node map 851 */ 852 853 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 854 { 855 int n = tipc_node(node); 856 int w = n / WSIZE; 857 u32 mask = (1 << (n % WSIZE)); 858 859 if ((nm_ptr->map[w] & mask) != 0) { 860 nm_ptr->map[w] &= ~mask; 861 nm_ptr->count--; 862 } 863 } 864 865 /** 866 * tipc_nmap_diff - find differences between node maps 867 * @nm_a: input node map A 868 * @nm_b: input node map B 869 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 870 */ 871 872 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 873 struct tipc_node_map *nm_b, 874 struct tipc_node_map *nm_diff) 875 { 876 int stop = ARRAY_SIZE(nm_a->map); 877 int w; 878 int b; 879 u32 map; 880 881 memset(nm_diff, 0, sizeof(*nm_diff)); 882 for (w = 0; w < stop; w++) { 883 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); 884 nm_diff->map[w] = map; 885 if (map != 0) { 886 for (b = 0 ; b < WSIZE; b++) { 887 if (map & (1 << b)) 888 nm_diff->count++; 889 } 890 } 891 } 892 } 893 894 /** 895 * tipc_port_list_add - add a port to a port list, ensuring no duplicates 896 */ 897 898 void tipc_port_list_add(struct port_list *pl_ptr, u32 port) 899 { 900 struct port_list *item = pl_ptr; 901 int i; 902 int item_sz = PLSIZE; 903 int cnt = pl_ptr->count; 904 905 for (; ; cnt -= item_sz, item = item->next) { 906 if (cnt < PLSIZE) 907 item_sz = cnt; 908 for (i = 0; i < item_sz; i++) 909 if (item->ports[i] == port) 910 return; 911 if (i < PLSIZE) { 912 item->ports[i] = port; 913 pl_ptr->count++; 914 return; 915 } 916 if (!item->next) { 917 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 918 if (!item->next) { 919 warn("Incomplete multicast delivery, no memory\n"); 920 return; 921 } 922 item->next->next = NULL; 923 } 924 } 925 } 926 927 /** 928 * tipc_port_list_free - free dynamically created entries in port_list chain 929 * 930 */ 931 932 void tipc_port_list_free(struct port_list *pl_ptr) 933 { 934 struct port_list *item; 935 struct port_list *next; 936 937 for (item = pl_ptr->next; item; item = next) { 938 next = item->next; 939 kfree(item); 940 } 941 } 942 943