1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "config.h" 39 #include "node.h" 40 #include "cluster.h" 41 #include "net.h" 42 #include "addr.h" 43 #include "node_subscr.h" 44 #include "link.h" 45 #include "port.h" 46 #include "bearer.h" 47 #include "name_distr.h" 48 49 void node_print(struct print_buf *buf, struct node *n_ptr, char *str); 50 static void node_lost_contact(struct node *n_ptr); 51 static void node_established_contact(struct node *n_ptr); 52 53 struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 54 55 u32 tipc_own_tag = 0; 56 57 struct node *tipc_node_create(u32 addr) 58 { 59 struct cluster *c_ptr; 60 struct node *n_ptr; 61 struct node **curr_node; 62 63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 64 if (!n_ptr) { 65 warn("Node creation failed, no memory\n"); 66 return NULL; 67 } 68 69 c_ptr = tipc_cltr_find(addr); 70 if (!c_ptr) { 71 c_ptr = tipc_cltr_create(addr); 72 } 73 if (!c_ptr) { 74 kfree(n_ptr); 75 return NULL; 76 } 77 78 n_ptr->addr = addr; 79 spin_lock_init(&n_ptr->lock); 80 INIT_LIST_HEAD(&n_ptr->nsub); 81 n_ptr->owner = c_ptr; 82 tipc_cltr_attach_node(c_ptr, n_ptr); 83 n_ptr->last_router = -1; 84 85 /* Insert node into ordered list */ 86 for (curr_node = &tipc_nodes; *curr_node; 87 curr_node = &(*curr_node)->next) { 88 if (addr < (*curr_node)->addr) { 89 n_ptr->next = *curr_node; 90 break; 91 } 92 } 93 (*curr_node) = n_ptr; 94 return n_ptr; 95 } 96 97 void tipc_node_delete(struct node *n_ptr) 98 { 99 if (!n_ptr) 100 return; 101 102 #if 0 103 /* Not needed because links are already deleted via tipc_bearer_stop() */ 104 105 u32 l_num; 106 107 for (l_num = 0; l_num < MAX_BEARERS; l_num++) { 108 link_delete(n_ptr->links[l_num]); 109 } 110 #endif 111 112 dbg("node %x deleted\n", n_ptr->addr); 113 kfree(n_ptr); 114 } 115 116 117 /** 118 * tipc_node_link_up - handle addition of link 119 * 120 * Link becomes active (alone or shared) or standby, depending on its priority. 121 */ 122 123 void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr) 124 { 125 struct link **active = &n_ptr->active_links[0]; 126 127 n_ptr->working_links++; 128 129 info("Established link <%s> on network plane %c\n", 130 l_ptr->name, l_ptr->b_ptr->net_plane); 131 132 if (!active[0]) { 133 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 134 active[0] = active[1] = l_ptr; 135 node_established_contact(n_ptr); 136 return; 137 } 138 if (l_ptr->priority < active[0]->priority) { 139 info("New link <%s> becomes standby\n", l_ptr->name); 140 return; 141 } 142 tipc_link_send_duplicate(active[0], l_ptr); 143 if (l_ptr->priority == active[0]->priority) { 144 active[0] = l_ptr; 145 return; 146 } 147 info("Old link <%s> becomes standby\n", active[0]->name); 148 if (active[1] != active[0]) 149 info("Old link <%s> becomes standby\n", active[1]->name); 150 active[0] = active[1] = l_ptr; 151 } 152 153 /** 154 * node_select_active_links - select active link 155 */ 156 157 static void node_select_active_links(struct node *n_ptr) 158 { 159 struct link **active = &n_ptr->active_links[0]; 160 u32 i; 161 u32 highest_prio = 0; 162 163 active[0] = active[1] = NULL; 164 165 for (i = 0; i < MAX_BEARERS; i++) { 166 struct link *l_ptr = n_ptr->links[i]; 167 168 if (!l_ptr || !tipc_link_is_up(l_ptr) || 169 (l_ptr->priority < highest_prio)) 170 continue; 171 172 if (l_ptr->priority > highest_prio) { 173 highest_prio = l_ptr->priority; 174 active[0] = active[1] = l_ptr; 175 } else { 176 active[1] = l_ptr; 177 } 178 } 179 } 180 181 /** 182 * tipc_node_link_down - handle loss of link 183 */ 184 185 void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr) 186 { 187 struct link **active; 188 189 n_ptr->working_links--; 190 191 if (!tipc_link_is_active(l_ptr)) { 192 info("Lost standby link <%s> on network plane %c\n", 193 l_ptr->name, l_ptr->b_ptr->net_plane); 194 return; 195 } 196 info("Lost link <%s> on network plane %c\n", 197 l_ptr->name, l_ptr->b_ptr->net_plane); 198 199 active = &n_ptr->active_links[0]; 200 if (active[0] == l_ptr) 201 active[0] = active[1]; 202 if (active[1] == l_ptr) 203 active[1] = active[0]; 204 if (active[0] == l_ptr) 205 node_select_active_links(n_ptr); 206 if (tipc_node_is_up(n_ptr)) 207 tipc_link_changeover(l_ptr); 208 else 209 node_lost_contact(n_ptr); 210 } 211 212 int tipc_node_has_active_links(struct node *n_ptr) 213 { 214 return (n_ptr && 215 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 216 } 217 218 int tipc_node_has_redundant_links(struct node *n_ptr) 219 { 220 return (n_ptr->working_links > 1); 221 } 222 223 static int tipc_node_has_active_routes(struct node *n_ptr) 224 { 225 return (n_ptr && (n_ptr->last_router >= 0)); 226 } 227 228 int tipc_node_is_up(struct node *n_ptr) 229 { 230 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr)); 231 } 232 233 struct node *tipc_node_attach_link(struct link *l_ptr) 234 { 235 struct node *n_ptr = tipc_node_find(l_ptr->addr); 236 237 if (!n_ptr) 238 n_ptr = tipc_node_create(l_ptr->addr); 239 if (n_ptr) { 240 u32 bearer_id = l_ptr->b_ptr->identity; 241 char addr_string[16]; 242 243 if (n_ptr->link_cnt >= 2) { 244 char addr_string[16]; 245 246 err("Attempt to create third link to %s\n", 247 addr_string_fill(addr_string, n_ptr->addr)); 248 return NULL; 249 } 250 251 if (!n_ptr->links[bearer_id]) { 252 n_ptr->links[bearer_id] = l_ptr; 253 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++; 254 n_ptr->link_cnt++; 255 return n_ptr; 256 } 257 err("Attempt to establish second link on <%s> to %s \n", 258 l_ptr->b_ptr->publ.name, 259 addr_string_fill(addr_string, l_ptr->addr)); 260 } 261 return NULL; 262 } 263 264 void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr) 265 { 266 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 267 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; 268 n_ptr->link_cnt--; 269 } 270 271 /* 272 * Routing table management - five cases to handle: 273 * 274 * 1: A link towards a zone/cluster external node comes up. 275 * => Send a multicast message updating routing tables of all 276 * system nodes within own cluster that the new destination 277 * can be reached via this node. 278 * (node.establishedContact()=>cluster.multicastNewRoute()) 279 * 280 * 2: A link towards a slave node comes up. 281 * => Send a multicast message updating routing tables of all 282 * system nodes within own cluster that the new destination 283 * can be reached via this node. 284 * (node.establishedContact()=>cluster.multicastNewRoute()) 285 * => Send a message to the slave node about existence 286 * of all system nodes within cluster: 287 * (node.establishedContact()=>cluster.sendLocalRoutes()) 288 * 289 * 3: A new cluster local system node becomes available. 290 * => Send message(s) to this particular node containing 291 * information about all cluster external and slave 292 * nodes which can be reached via this node. 293 * (node.establishedContact()==>network.sendExternalRoutes()) 294 * (node.establishedContact()==>network.sendSlaveRoutes()) 295 * => Send messages to all directly connected slave nodes 296 * containing information about the existence of the new node 297 * (node.establishedContact()=>cluster.multicastNewRoute()) 298 * 299 * 4: The link towards a zone/cluster external node or slave 300 * node goes down. 301 * => Send a multcast message updating routing tables of all 302 * nodes within cluster that the new destination can not any 303 * longer be reached via this node. 304 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 305 * 306 * 5: A cluster local system node becomes unavailable. 307 * => Remove all references to this node from the local 308 * routing tables. Note: This is a completely node 309 * local operation. 310 * (node.lostAllLinks()=>network.removeAsRouter()) 311 * => Send messages to all directly connected slave nodes 312 * containing information about loss of the node 313 * (node.establishedContact()=>cluster.multicastLostRoute()) 314 * 315 */ 316 317 static void node_established_contact(struct node *n_ptr) 318 { 319 struct cluster *c_ptr; 320 321 dbg("node_established_contact:-> %x\n", n_ptr->addr); 322 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) { 323 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 324 } 325 326 /* Syncronize broadcast acks */ 327 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 328 329 if (is_slave(tipc_own_addr)) 330 return; 331 if (!in_own_cluster(n_ptr->addr)) { 332 /* Usage case 1 (see above) */ 333 c_ptr = tipc_cltr_find(tipc_own_addr); 334 if (!c_ptr) 335 c_ptr = tipc_cltr_create(tipc_own_addr); 336 if (c_ptr) 337 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 338 tipc_max_nodes); 339 return; 340 } 341 342 c_ptr = n_ptr->owner; 343 if (is_slave(n_ptr->addr)) { 344 /* Usage case 2 (see above) */ 345 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 346 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr); 347 return; 348 } 349 350 if (n_ptr->bclink.supported) { 351 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr); 352 if (n_ptr->addr < tipc_own_addr) 353 tipc_own_tag++; 354 } 355 356 /* Case 3 (see above) */ 357 tipc_net_send_external_routes(n_ptr->addr); 358 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr); 359 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 360 tipc_highest_allowed_slave); 361 } 362 363 static void node_lost_contact(struct node *n_ptr) 364 { 365 struct cluster *c_ptr; 366 struct node_subscr *ns, *tns; 367 char addr_string[16]; 368 u32 i; 369 370 /* Clean up broadcast reception remains */ 371 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 372 while (n_ptr->bclink.deferred_head) { 373 struct sk_buff* buf = n_ptr->bclink.deferred_head; 374 n_ptr->bclink.deferred_head = buf->next; 375 buf_discard(buf); 376 } 377 if (n_ptr->bclink.defragm) { 378 buf_discard(n_ptr->bclink.defragm); 379 n_ptr->bclink.defragm = NULL; 380 } 381 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 382 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 383 } 384 385 /* Update routing tables */ 386 if (is_slave(tipc_own_addr)) { 387 tipc_net_remove_as_router(n_ptr->addr); 388 } else { 389 if (!in_own_cluster(n_ptr->addr)) { 390 /* Case 4 (see above) */ 391 c_ptr = tipc_cltr_find(tipc_own_addr); 392 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 393 tipc_max_nodes); 394 } else { 395 /* Case 5 (see above) */ 396 c_ptr = tipc_cltr_find(n_ptr->addr); 397 if (is_slave(n_ptr->addr)) { 398 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 399 tipc_max_nodes); 400 } else { 401 if (n_ptr->bclink.supported) { 402 tipc_nmap_remove(&tipc_cltr_bcast_nodes, 403 n_ptr->addr); 404 if (n_ptr->addr < tipc_own_addr) 405 tipc_own_tag--; 406 } 407 tipc_net_remove_as_router(n_ptr->addr); 408 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 409 LOWEST_SLAVE, 410 tipc_highest_allowed_slave); 411 } 412 } 413 } 414 if (tipc_node_has_active_routes(n_ptr)) 415 return; 416 417 info("Lost contact with %s\n", 418 addr_string_fill(addr_string, n_ptr->addr)); 419 420 /* Abort link changeover */ 421 for (i = 0; i < MAX_BEARERS; i++) { 422 struct link *l_ptr = n_ptr->links[i]; 423 if (!l_ptr) 424 continue; 425 l_ptr->reset_checkpoint = l_ptr->next_in_no; 426 l_ptr->exp_msg_count = 0; 427 tipc_link_reset_fragments(l_ptr); 428 } 429 430 /* Notify subscribers */ 431 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 432 ns->node = NULL; 433 list_del_init(&ns->nodesub_list); 434 tipc_k_signal((Handler)ns->handle_node_down, 435 (unsigned long)ns->usr_handle); 436 } 437 } 438 439 /** 440 * tipc_node_select_next_hop - find the next-hop node for a message 441 * 442 * Called by when cluster local lookup has failed. 443 */ 444 445 struct node *tipc_node_select_next_hop(u32 addr, u32 selector) 446 { 447 struct node *n_ptr; 448 u32 router_addr; 449 450 if (!tipc_addr_domain_valid(addr)) 451 return NULL; 452 453 /* Look for direct link to destination processsor */ 454 n_ptr = tipc_node_find(addr); 455 if (n_ptr && tipc_node_has_active_links(n_ptr)) 456 return n_ptr; 457 458 /* Cluster local system nodes *must* have direct links */ 459 if (!is_slave(addr) && in_own_cluster(addr)) 460 return NULL; 461 462 /* Look for cluster local router with direct link to node */ 463 router_addr = tipc_node_select_router(n_ptr, selector); 464 if (router_addr) 465 return tipc_node_select(router_addr, selector); 466 467 /* Slave nodes can only be accessed within own cluster via a 468 known router with direct link -- if no router was found,give up */ 469 if (is_slave(addr)) 470 return NULL; 471 472 /* Inter zone/cluster -- find any direct link to remote cluster */ 473 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 474 n_ptr = tipc_net_select_remote_node(addr, selector); 475 if (n_ptr && tipc_node_has_active_links(n_ptr)) 476 return n_ptr; 477 478 /* Last resort -- look for any router to anywhere in remote zone */ 479 router_addr = tipc_net_select_router(addr, selector); 480 if (router_addr) 481 return tipc_node_select(router_addr, selector); 482 483 return NULL; 484 } 485 486 /** 487 * tipc_node_select_router - select router to reach specified node 488 * 489 * Uses a deterministic and fair algorithm for selecting router node. 490 */ 491 492 u32 tipc_node_select_router(struct node *n_ptr, u32 ref) 493 { 494 u32 ulim; 495 u32 mask; 496 u32 start; 497 u32 r; 498 499 if (!n_ptr) 500 return 0; 501 502 if (n_ptr->last_router < 0) 503 return 0; 504 ulim = ((n_ptr->last_router + 1) * 32) - 1; 505 506 /* Start entry must be random */ 507 mask = tipc_max_nodes; 508 while (mask > ulim) 509 mask >>= 1; 510 start = ref & mask; 511 r = start; 512 513 /* Lookup upwards with wrap-around */ 514 do { 515 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 516 break; 517 } while (++r <= ulim); 518 if (r > ulim) { 519 r = 1; 520 do { 521 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 522 break; 523 } while (++r < start); 524 assert(r != start); 525 } 526 assert(r && (r <= ulim)); 527 return tipc_addr(own_zone(), own_cluster(), r); 528 } 529 530 void tipc_node_add_router(struct node *n_ptr, u32 router) 531 { 532 u32 r_num = tipc_node(router); 533 534 n_ptr->routers[r_num / 32] = 535 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); 536 n_ptr->last_router = tipc_max_nodes / 32; 537 while ((--n_ptr->last_router >= 0) && 538 !n_ptr->routers[n_ptr->last_router]); 539 } 540 541 void tipc_node_remove_router(struct node *n_ptr, u32 router) 542 { 543 u32 r_num = tipc_node(router); 544 545 if (n_ptr->last_router < 0) 546 return; /* No routes */ 547 548 n_ptr->routers[r_num / 32] = 549 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); 550 n_ptr->last_router = tipc_max_nodes / 32; 551 while ((--n_ptr->last_router >= 0) && 552 !n_ptr->routers[n_ptr->last_router]); 553 554 if (!tipc_node_is_up(n_ptr)) 555 node_lost_contact(n_ptr); 556 } 557 558 #if 0 559 void node_print(struct print_buf *buf, struct node *n_ptr, char *str) 560 { 561 u32 i; 562 563 tipc_printf(buf, "\n\n%s", str); 564 for (i = 0; i < MAX_BEARERS; i++) { 565 if (!n_ptr->links[i]) 566 continue; 567 tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]); 568 } 569 tipc_printf(buf, "Active links: [%x,%x]\n", 570 n_ptr->active_links[0], n_ptr->active_links[1]); 571 } 572 #endif 573 574 u32 tipc_available_nodes(const u32 domain) 575 { 576 struct node *n_ptr; 577 u32 cnt = 0; 578 579 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 580 if (!in_scope(domain, n_ptr->addr)) 581 continue; 582 if (tipc_node_is_up(n_ptr)) 583 cnt++; 584 } 585 return cnt; 586 } 587 588 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 589 { 590 u32 domain; 591 struct sk_buff *buf; 592 struct node *n_ptr; 593 struct tipc_node_info node_info; 594 u32 payload_size; 595 596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 597 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 598 599 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 600 if (!tipc_addr_domain_valid(domain)) 601 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 602 " (network address)"); 603 604 if (!tipc_nodes) 605 return tipc_cfg_reply_none(); 606 607 /* For now, get space for all other nodes 608 (will need to modify this when slave nodes are supported */ 609 610 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 611 if (payload_size > 32768u) 612 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 613 " (too many nodes)"); 614 buf = tipc_cfg_reply_alloc(payload_size); 615 if (!buf) 616 return NULL; 617 618 /* Add TLVs for all nodes in scope */ 619 620 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 621 if (!in_scope(domain, n_ptr->addr)) 622 continue; 623 node_info.addr = htonl(n_ptr->addr); 624 node_info.up = htonl(tipc_node_is_up(n_ptr)); 625 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 626 &node_info, sizeof(node_info)); 627 } 628 629 return buf; 630 } 631 632 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 633 { 634 u32 domain; 635 struct sk_buff *buf; 636 struct node *n_ptr; 637 struct tipc_link_info link_info; 638 u32 payload_size; 639 640 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 641 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 642 643 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 644 if (!tipc_addr_domain_valid(domain)) 645 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 646 " (network address)"); 647 648 if (tipc_mode != TIPC_NET_MODE) 649 return tipc_cfg_reply_none(); 650 651 /* Get space for all unicast links + multicast link */ 652 653 payload_size = TLV_SPACE(sizeof(link_info)) * 654 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 655 if (payload_size > 32768u) 656 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 657 " (too many links)"); 658 buf = tipc_cfg_reply_alloc(payload_size); 659 if (!buf) 660 return NULL; 661 662 /* Add TLV for broadcast link */ 663 664 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 665 link_info.up = htonl(1); 666 sprintf(link_info.str, tipc_bclink_name); 667 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 668 669 /* Add TLVs for any other links in scope */ 670 671 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 672 u32 i; 673 674 if (!in_scope(domain, n_ptr->addr)) 675 continue; 676 for (i = 0; i < MAX_BEARERS; i++) { 677 if (!n_ptr->links[i]) 678 continue; 679 link_info.dest = htonl(n_ptr->addr); 680 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 681 strcpy(link_info.str, n_ptr->links[i]->name); 682 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 683 &link_info, sizeof(link_info)); 684 } 685 } 686 687 return buf; 688 } 689