1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "config.h" 39 #include "node.h" 40 #include "port.h" 41 #include "name_distr.h" 42 43 static void node_lost_contact(struct tipc_node *n_ptr); 44 static void node_established_contact(struct tipc_node *n_ptr); 45 46 /* sorted list of nodes within cluster */ 47 static struct tipc_node *tipc_nodes = NULL; 48 49 static DEFINE_SPINLOCK(node_create_lock); 50 51 u32 tipc_own_tag = 0; 52 53 /** 54 * tipc_node_create - create neighboring node 55 * 56 * Currently, this routine is called by neighbor discovery code, which holds 57 * net_lock for reading only. We must take node_create_lock to ensure a node 58 * isn't created twice if two different bearers discover the node at the same 59 * time. (It would be preferable to switch to holding net_lock in write mode, 60 * but this is a non-trivial change.) 61 */ 62 63 struct tipc_node *tipc_node_create(u32 addr) 64 { 65 struct cluster *c_ptr; 66 struct tipc_node *n_ptr; 67 struct tipc_node **curr_node; 68 69 spin_lock_bh(&node_create_lock); 70 71 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 72 if (addr < n_ptr->addr) 73 break; 74 if (addr == n_ptr->addr) { 75 spin_unlock_bh(&node_create_lock); 76 return n_ptr; 77 } 78 } 79 80 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 81 if (!n_ptr) { 82 spin_unlock_bh(&node_create_lock); 83 warn("Node creation failed, no memory\n"); 84 return NULL; 85 } 86 87 c_ptr = tipc_cltr_find(addr); 88 if (!c_ptr) { 89 c_ptr = tipc_cltr_create(addr); 90 } 91 if (!c_ptr) { 92 spin_unlock_bh(&node_create_lock); 93 kfree(n_ptr); 94 return NULL; 95 } 96 97 n_ptr->addr = addr; 98 spin_lock_init(&n_ptr->lock); 99 INIT_LIST_HEAD(&n_ptr->nsub); 100 n_ptr->owner = c_ptr; 101 tipc_cltr_attach_node(c_ptr, n_ptr); 102 103 /* Insert node into ordered list */ 104 for (curr_node = &tipc_nodes; *curr_node; 105 curr_node = &(*curr_node)->next) { 106 if (addr < (*curr_node)->addr) { 107 n_ptr->next = *curr_node; 108 break; 109 } 110 } 111 (*curr_node) = n_ptr; 112 spin_unlock_bh(&node_create_lock); 113 return n_ptr; 114 } 115 116 void tipc_node_delete(struct tipc_node *n_ptr) 117 { 118 if (!n_ptr) 119 return; 120 121 dbg("node %x deleted\n", n_ptr->addr); 122 kfree(n_ptr); 123 } 124 125 126 /** 127 * tipc_node_link_up - handle addition of link 128 * 129 * Link becomes active (alone or shared) or standby, depending on its priority. 130 */ 131 132 void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) 133 { 134 struct link **active = &n_ptr->active_links[0]; 135 136 n_ptr->working_links++; 137 138 info("Established link <%s> on network plane %c\n", 139 l_ptr->name, l_ptr->b_ptr->net_plane); 140 141 if (!active[0]) { 142 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 143 active[0] = active[1] = l_ptr; 144 node_established_contact(n_ptr); 145 return; 146 } 147 if (l_ptr->priority < active[0]->priority) { 148 info("New link <%s> becomes standby\n", l_ptr->name); 149 return; 150 } 151 tipc_link_send_duplicate(active[0], l_ptr); 152 if (l_ptr->priority == active[0]->priority) { 153 active[0] = l_ptr; 154 return; 155 } 156 info("Old link <%s> becomes standby\n", active[0]->name); 157 if (active[1] != active[0]) 158 info("Old link <%s> becomes standby\n", active[1]->name); 159 active[0] = active[1] = l_ptr; 160 } 161 162 /** 163 * node_select_active_links - select active link 164 */ 165 166 static void node_select_active_links(struct tipc_node *n_ptr) 167 { 168 struct link **active = &n_ptr->active_links[0]; 169 u32 i; 170 u32 highest_prio = 0; 171 172 active[0] = active[1] = NULL; 173 174 for (i = 0; i < MAX_BEARERS; i++) { 175 struct link *l_ptr = n_ptr->links[i]; 176 177 if (!l_ptr || !tipc_link_is_up(l_ptr) || 178 (l_ptr->priority < highest_prio)) 179 continue; 180 181 if (l_ptr->priority > highest_prio) { 182 highest_prio = l_ptr->priority; 183 active[0] = active[1] = l_ptr; 184 } else { 185 active[1] = l_ptr; 186 } 187 } 188 } 189 190 /** 191 * tipc_node_link_down - handle loss of link 192 */ 193 194 void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) 195 { 196 struct link **active; 197 198 n_ptr->working_links--; 199 200 if (!tipc_link_is_active(l_ptr)) { 201 info("Lost standby link <%s> on network plane %c\n", 202 l_ptr->name, l_ptr->b_ptr->net_plane); 203 return; 204 } 205 info("Lost link <%s> on network plane %c\n", 206 l_ptr->name, l_ptr->b_ptr->net_plane); 207 208 active = &n_ptr->active_links[0]; 209 if (active[0] == l_ptr) 210 active[0] = active[1]; 211 if (active[1] == l_ptr) 212 active[1] = active[0]; 213 if (active[0] == l_ptr) 214 node_select_active_links(n_ptr); 215 if (tipc_node_is_up(n_ptr)) 216 tipc_link_changeover(l_ptr); 217 else 218 node_lost_contact(n_ptr); 219 } 220 221 int tipc_node_has_active_links(struct tipc_node *n_ptr) 222 { 223 return n_ptr->active_links[0] != NULL; 224 } 225 226 int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 227 { 228 return n_ptr->working_links > 1; 229 } 230 231 int tipc_node_is_up(struct tipc_node *n_ptr) 232 { 233 return tipc_node_has_active_links(n_ptr); 234 } 235 236 struct tipc_node *tipc_node_attach_link(struct link *l_ptr) 237 { 238 struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr); 239 240 if (!n_ptr) 241 n_ptr = tipc_node_create(l_ptr->addr); 242 if (n_ptr) { 243 u32 bearer_id = l_ptr->b_ptr->identity; 244 char addr_string[16]; 245 246 if (n_ptr->link_cnt >= 2) { 247 err("Attempt to create third link to %s\n", 248 tipc_addr_string_fill(addr_string, n_ptr->addr)); 249 return NULL; 250 } 251 252 if (!n_ptr->links[bearer_id]) { 253 n_ptr->links[bearer_id] = l_ptr; 254 tipc_net.links++; 255 n_ptr->link_cnt++; 256 return n_ptr; 257 } 258 err("Attempt to establish second link on <%s> to %s\n", 259 l_ptr->b_ptr->publ.name, 260 tipc_addr_string_fill(addr_string, l_ptr->addr)); 261 } 262 return NULL; 263 } 264 265 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) 266 { 267 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 268 tipc_net.links--; 269 n_ptr->link_cnt--; 270 } 271 272 /* 273 * Routing table management - five cases to handle: 274 * 275 * 1: A link towards a zone/cluster external node comes up. 276 * => Send a multicast message updating routing tables of all 277 * system nodes within own cluster that the new destination 278 * can be reached via this node. 279 * (node.establishedContact()=>cluster.multicastNewRoute()) 280 * 281 * 2: A link towards a slave node comes up. 282 * => Send a multicast message updating routing tables of all 283 * system nodes within own cluster that the new destination 284 * can be reached via this node. 285 * (node.establishedContact()=>cluster.multicastNewRoute()) 286 * => Send a message to the slave node about existence 287 * of all system nodes within cluster: 288 * (node.establishedContact()=>cluster.sendLocalRoutes()) 289 * 290 * 3: A new cluster local system node becomes available. 291 * => Send message(s) to this particular node containing 292 * information about all cluster external and slave 293 * nodes which can be reached via this node. 294 * (node.establishedContact()==>network.sendExternalRoutes()) 295 * (node.establishedContact()==>network.sendSlaveRoutes()) 296 * => Send messages to all directly connected slave nodes 297 * containing information about the existence of the new node 298 * (node.establishedContact()=>cluster.multicastNewRoute()) 299 * 300 * 4: The link towards a zone/cluster external node or slave 301 * node goes down. 302 * => Send a multcast message updating routing tables of all 303 * nodes within cluster that the new destination can not any 304 * longer be reached via this node. 305 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 306 * 307 * 5: A cluster local system node becomes unavailable. 308 * => Remove all references to this node from the local 309 * routing tables. Note: This is a completely node 310 * local operation. 311 * (node.lostAllLinks()=>network.removeAsRouter()) 312 * => Send messages to all directly connected slave nodes 313 * containing information about loss of the node 314 * (node.establishedContact()=>cluster.multicastLostRoute()) 315 * 316 */ 317 318 static void node_established_contact(struct tipc_node *n_ptr) 319 { 320 dbg("node_established_contact:-> %x\n", n_ptr->addr); 321 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 322 323 /* Syncronize broadcast acks */ 324 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 325 326 if (n_ptr->bclink.supported) { 327 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr); 328 if (n_ptr->addr < tipc_own_addr) 329 tipc_own_tag++; 330 } 331 } 332 333 static void node_cleanup_finished(unsigned long node_addr) 334 { 335 struct tipc_node *n_ptr; 336 337 read_lock_bh(&tipc_net_lock); 338 n_ptr = tipc_node_find(node_addr); 339 if (n_ptr) { 340 tipc_node_lock(n_ptr); 341 n_ptr->cleanup_required = 0; 342 tipc_node_unlock(n_ptr); 343 } 344 read_unlock_bh(&tipc_net_lock); 345 } 346 347 static void node_lost_contact(struct tipc_node *n_ptr) 348 { 349 struct tipc_node_subscr *ns, *tns; 350 char addr_string[16]; 351 u32 i; 352 353 /* Clean up broadcast reception remains */ 354 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 355 while (n_ptr->bclink.deferred_head) { 356 struct sk_buff* buf = n_ptr->bclink.deferred_head; 357 n_ptr->bclink.deferred_head = buf->next; 358 buf_discard(buf); 359 } 360 if (n_ptr->bclink.defragm) { 361 buf_discard(n_ptr->bclink.defragm); 362 n_ptr->bclink.defragm = NULL; 363 } 364 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 365 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 366 } 367 368 /* Update routing tables */ 369 if (n_ptr->bclink.supported) { 370 tipc_nmap_remove(&tipc_cltr_bcast_nodes, n_ptr->addr); 371 if (n_ptr->addr < tipc_own_addr) 372 tipc_own_tag--; 373 } 374 375 info("Lost contact with %s\n", 376 tipc_addr_string_fill(addr_string, n_ptr->addr)); 377 378 /* Abort link changeover */ 379 for (i = 0; i < MAX_BEARERS; i++) { 380 struct link *l_ptr = n_ptr->links[i]; 381 if (!l_ptr) 382 continue; 383 l_ptr->reset_checkpoint = l_ptr->next_in_no; 384 l_ptr->exp_msg_count = 0; 385 tipc_link_reset_fragments(l_ptr); 386 } 387 388 /* Notify subscribers */ 389 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 390 ns->node = NULL; 391 list_del_init(&ns->nodesub_list); 392 tipc_k_signal((Handler)ns->handle_node_down, 393 (unsigned long)ns->usr_handle); 394 } 395 396 /* Prevent re-contact with node until all cleanup is done */ 397 398 n_ptr->cleanup_required = 1; 399 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); 400 } 401 402 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 403 { 404 u32 domain; 405 struct sk_buff *buf; 406 struct tipc_node *n_ptr; 407 struct tipc_node_info node_info; 408 u32 payload_size; 409 410 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 411 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 412 413 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 414 if (!tipc_addr_domain_valid(domain)) 415 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 416 " (network address)"); 417 418 read_lock_bh(&tipc_net_lock); 419 if (!tipc_nodes) { 420 read_unlock_bh(&tipc_net_lock); 421 return tipc_cfg_reply_none(); 422 } 423 424 /* For now, get space for all other nodes */ 425 426 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 427 if (payload_size > 32768u) { 428 read_unlock_bh(&tipc_net_lock); 429 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 430 " (too many nodes)"); 431 } 432 buf = tipc_cfg_reply_alloc(payload_size); 433 if (!buf) { 434 read_unlock_bh(&tipc_net_lock); 435 return NULL; 436 } 437 438 /* Add TLVs for all nodes in scope */ 439 440 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 441 if (!tipc_in_scope(domain, n_ptr->addr)) 442 continue; 443 node_info.addr = htonl(n_ptr->addr); 444 node_info.up = htonl(tipc_node_is_up(n_ptr)); 445 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 446 &node_info, sizeof(node_info)); 447 } 448 449 read_unlock_bh(&tipc_net_lock); 450 return buf; 451 } 452 453 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 454 { 455 u32 domain; 456 struct sk_buff *buf; 457 struct tipc_node *n_ptr; 458 struct tipc_link_info link_info; 459 u32 payload_size; 460 461 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 462 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 463 464 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 465 if (!tipc_addr_domain_valid(domain)) 466 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 467 " (network address)"); 468 469 if (tipc_mode != TIPC_NET_MODE) 470 return tipc_cfg_reply_none(); 471 472 read_lock_bh(&tipc_net_lock); 473 474 /* Get space for all unicast links + multicast link */ 475 476 payload_size = TLV_SPACE(sizeof(link_info)) * (tipc_net.links + 1); 477 if (payload_size > 32768u) { 478 read_unlock_bh(&tipc_net_lock); 479 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 480 " (too many links)"); 481 } 482 buf = tipc_cfg_reply_alloc(payload_size); 483 if (!buf) { 484 read_unlock_bh(&tipc_net_lock); 485 return NULL; 486 } 487 488 /* Add TLV for broadcast link */ 489 490 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 491 link_info.up = htonl(1); 492 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 493 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 494 495 /* Add TLVs for any other links in scope */ 496 497 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 498 u32 i; 499 500 if (!tipc_in_scope(domain, n_ptr->addr)) 501 continue; 502 tipc_node_lock(n_ptr); 503 for (i = 0; i < MAX_BEARERS; i++) { 504 if (!n_ptr->links[i]) 505 continue; 506 link_info.dest = htonl(n_ptr->addr); 507 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 508 strcpy(link_info.str, n_ptr->links[i]->name); 509 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 510 &link_info, sizeof(link_info)); 511 } 512 tipc_node_unlock(n_ptr); 513 } 514 515 read_unlock_bh(&tipc_net_lock); 516 return buf; 517 } 518