1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012 Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "config.h" 39 #include "node.h" 40 #include "name_distr.h" 41 42 #define NODE_HTABLE_SIZE 512 43 44 static void node_lost_contact(struct tipc_node *n_ptr); 45 static void node_established_contact(struct tipc_node *n_ptr); 46 47 static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 48 LIST_HEAD(tipc_node_list); 49 static u32 tipc_num_nodes; 50 static u32 tipc_num_links; 51 static DEFINE_SPINLOCK(node_list_lock); 52 53 /* 54 * A trivial power-of-two bitmask technique is used for speed, since this 55 * operation is done for every incoming TIPC packet. The number of hash table 56 * entries has been chosen so that no hash chain exceeds 8 nodes and will 57 * usually be much smaller (typically only a single node). 58 */ 59 static unsigned int tipc_hashfn(u32 addr) 60 { 61 return addr & (NODE_HTABLE_SIZE - 1); 62 } 63 64 /* 65 * tipc_node_find - locate specified node object, if it exists 66 */ 67 struct tipc_node *tipc_node_find(u32 addr) 68 { 69 struct tipc_node *node; 70 71 if (unlikely(!in_own_cluster_exact(addr))) 72 return NULL; 73 74 rcu_read_lock(); 75 hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { 76 if (node->addr == addr) { 77 rcu_read_unlock(); 78 return node; 79 } 80 } 81 rcu_read_unlock(); 82 return NULL; 83 } 84 85 struct tipc_node *tipc_node_create(u32 addr) 86 { 87 struct tipc_node *n_ptr, *temp_node; 88 89 spin_lock_bh(&node_list_lock); 90 91 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 92 if (!n_ptr) { 93 spin_unlock_bh(&node_list_lock); 94 pr_warn("Node creation failed, no memory\n"); 95 return NULL; 96 } 97 98 n_ptr->addr = addr; 99 spin_lock_init(&n_ptr->lock); 100 INIT_HLIST_NODE(&n_ptr->hash); 101 INIT_LIST_HEAD(&n_ptr->list); 102 INIT_LIST_HEAD(&n_ptr->nsub); 103 104 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 105 106 list_for_each_entry_rcu(temp_node, &tipc_node_list, list) { 107 if (n_ptr->addr < temp_node->addr) 108 break; 109 } 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 111 n_ptr->block_setup = WAIT_PEER_DOWN; 112 n_ptr->signature = INVALID_NODE_SIG; 113 114 tipc_num_nodes++; 115 116 spin_unlock_bh(&node_list_lock); 117 return n_ptr; 118 } 119 120 static void tipc_node_delete(struct tipc_node *n_ptr) 121 { 122 list_del_rcu(&n_ptr->list); 123 hlist_del_rcu(&n_ptr->hash); 124 kfree_rcu(n_ptr, rcu); 125 126 tipc_num_nodes--; 127 } 128 129 void tipc_node_stop(void) 130 { 131 struct tipc_node *node, *t_node; 132 133 spin_lock_bh(&node_list_lock); 134 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 135 tipc_node_delete(node); 136 spin_unlock_bh(&node_list_lock); 137 } 138 139 /** 140 * tipc_node_link_up - handle addition of link 141 * 142 * Link becomes active (alone or shared) or standby, depending on its priority. 143 */ 144 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 145 { 146 struct tipc_link **active = &n_ptr->active_links[0]; 147 148 n_ptr->working_links++; 149 150 pr_info("Established link <%s> on network plane %c\n", 151 l_ptr->name, l_ptr->b_ptr->net_plane); 152 153 if (!active[0]) { 154 active[0] = active[1] = l_ptr; 155 node_established_contact(n_ptr); 156 return; 157 } 158 if (l_ptr->priority < active[0]->priority) { 159 pr_info("New link <%s> becomes standby\n", l_ptr->name); 160 return; 161 } 162 tipc_link_dup_queue_xmit(active[0], l_ptr); 163 if (l_ptr->priority == active[0]->priority) { 164 active[0] = l_ptr; 165 return; 166 } 167 pr_info("Old link <%s> becomes standby\n", active[0]->name); 168 if (active[1] != active[0]) 169 pr_info("Old link <%s> becomes standby\n", active[1]->name); 170 active[0] = active[1] = l_ptr; 171 } 172 173 /** 174 * node_select_active_links - select active link 175 */ 176 static void node_select_active_links(struct tipc_node *n_ptr) 177 { 178 struct tipc_link **active = &n_ptr->active_links[0]; 179 u32 i; 180 u32 highest_prio = 0; 181 182 active[0] = active[1] = NULL; 183 184 for (i = 0; i < MAX_BEARERS; i++) { 185 struct tipc_link *l_ptr = n_ptr->links[i]; 186 187 if (!l_ptr || !tipc_link_is_up(l_ptr) || 188 (l_ptr->priority < highest_prio)) 189 continue; 190 191 if (l_ptr->priority > highest_prio) { 192 highest_prio = l_ptr->priority; 193 active[0] = active[1] = l_ptr; 194 } else { 195 active[1] = l_ptr; 196 } 197 } 198 } 199 200 /** 201 * tipc_node_link_down - handle loss of link 202 */ 203 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 204 { 205 struct tipc_link **active; 206 207 n_ptr->working_links--; 208 209 if (!tipc_link_is_active(l_ptr)) { 210 pr_info("Lost standby link <%s> on network plane %c\n", 211 l_ptr->name, l_ptr->b_ptr->net_plane); 212 return; 213 } 214 pr_info("Lost link <%s> on network plane %c\n", 215 l_ptr->name, l_ptr->b_ptr->net_plane); 216 217 active = &n_ptr->active_links[0]; 218 if (active[0] == l_ptr) 219 active[0] = active[1]; 220 if (active[1] == l_ptr) 221 active[1] = active[0]; 222 if (active[0] == l_ptr) 223 node_select_active_links(n_ptr); 224 if (tipc_node_is_up(n_ptr)) 225 tipc_link_failover_send_queue(l_ptr); 226 else 227 node_lost_contact(n_ptr); 228 } 229 230 int tipc_node_active_links(struct tipc_node *n_ptr) 231 { 232 return n_ptr->active_links[0] != NULL; 233 } 234 235 int tipc_node_is_up(struct tipc_node *n_ptr) 236 { 237 return tipc_node_active_links(n_ptr); 238 } 239 240 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 241 { 242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 243 spin_lock_bh(&node_list_lock); 244 tipc_num_links++; 245 spin_unlock_bh(&node_list_lock); 246 n_ptr->link_cnt++; 247 } 248 249 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 250 { 251 int i; 252 253 for (i = 0; i < MAX_BEARERS; i++) { 254 if (l_ptr != n_ptr->links[i]) 255 continue; 256 n_ptr->links[i] = NULL; 257 spin_lock_bh(&node_list_lock); 258 tipc_num_links--; 259 spin_unlock_bh(&node_list_lock); 260 n_ptr->link_cnt--; 261 } 262 } 263 264 static void node_established_contact(struct tipc_node *n_ptr) 265 { 266 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 267 n_ptr->bclink.oos_state = 0; 268 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 269 tipc_bclink_add_node(n_ptr->addr); 270 } 271 272 static void node_name_purge_complete(unsigned long node_addr) 273 { 274 struct tipc_node *n_ptr; 275 276 read_lock_bh(&tipc_net_lock); 277 n_ptr = tipc_node_find(node_addr); 278 if (n_ptr) { 279 tipc_node_lock(n_ptr); 280 n_ptr->block_setup &= ~WAIT_NAMES_GONE; 281 tipc_node_unlock(n_ptr); 282 } 283 read_unlock_bh(&tipc_net_lock); 284 } 285 286 static void node_lost_contact(struct tipc_node *n_ptr) 287 { 288 char addr_string[16]; 289 u32 i; 290 291 pr_info("Lost contact with %s\n", 292 tipc_addr_string_fill(addr_string, n_ptr->addr)); 293 294 /* Flush broadcast link info associated with lost node */ 295 if (n_ptr->bclink.recv_permitted) { 296 kfree_skb_list(n_ptr->bclink.deferred_head); 297 n_ptr->bclink.deferred_size = 0; 298 299 if (n_ptr->bclink.reasm_head) { 300 kfree_skb(n_ptr->bclink.reasm_head); 301 n_ptr->bclink.reasm_head = NULL; 302 n_ptr->bclink.reasm_tail = NULL; 303 } 304 305 tipc_bclink_remove_node(n_ptr->addr); 306 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 307 308 n_ptr->bclink.recv_permitted = false; 309 } 310 311 /* Abort link changeover */ 312 for (i = 0; i < MAX_BEARERS; i++) { 313 struct tipc_link *l_ptr = n_ptr->links[i]; 314 if (!l_ptr) 315 continue; 316 l_ptr->reset_checkpoint = l_ptr->next_in_no; 317 l_ptr->exp_msg_count = 0; 318 tipc_link_reset_fragments(l_ptr); 319 } 320 321 /* Notify subscribers */ 322 tipc_nodesub_notify(n_ptr); 323 324 /* Prevent re-contact with node until cleanup is done */ 325 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 326 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 327 } 328 329 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 330 { 331 u32 domain; 332 struct sk_buff *buf; 333 struct tipc_node *n_ptr; 334 struct tipc_node_info node_info; 335 u32 payload_size; 336 337 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 338 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 339 340 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 341 if (!tipc_addr_domain_valid(domain)) 342 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 343 " (network address)"); 344 345 spin_lock_bh(&node_list_lock); 346 if (!tipc_num_nodes) { 347 spin_unlock_bh(&node_list_lock); 348 return tipc_cfg_reply_none(); 349 } 350 351 /* For now, get space for all other nodes */ 352 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 353 if (payload_size > 32768u) { 354 spin_unlock_bh(&node_list_lock); 355 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 356 " (too many nodes)"); 357 } 358 spin_unlock_bh(&node_list_lock); 359 360 buf = tipc_cfg_reply_alloc(payload_size); 361 if (!buf) 362 return NULL; 363 364 /* Add TLVs for all nodes in scope */ 365 rcu_read_lock(); 366 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 367 if (!tipc_in_scope(domain, n_ptr->addr)) 368 continue; 369 node_info.addr = htonl(n_ptr->addr); 370 node_info.up = htonl(tipc_node_is_up(n_ptr)); 371 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 372 &node_info, sizeof(node_info)); 373 } 374 rcu_read_unlock(); 375 return buf; 376 } 377 378 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 379 { 380 u32 domain; 381 struct sk_buff *buf; 382 struct tipc_node *n_ptr; 383 struct tipc_link_info link_info; 384 u32 payload_size; 385 386 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 387 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 388 389 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 390 if (!tipc_addr_domain_valid(domain)) 391 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 392 " (network address)"); 393 394 if (!tipc_own_addr) 395 return tipc_cfg_reply_none(); 396 397 spin_lock_bh(&node_list_lock); 398 /* Get space for all unicast links + broadcast link */ 399 payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1)); 400 if (payload_size > 32768u) { 401 spin_unlock_bh(&node_list_lock); 402 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 403 " (too many links)"); 404 } 405 spin_unlock_bh(&node_list_lock); 406 407 buf = tipc_cfg_reply_alloc(payload_size); 408 if (!buf) 409 return NULL; 410 411 /* Add TLV for broadcast link */ 412 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 413 link_info.up = htonl(1); 414 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 415 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 416 417 /* Add TLVs for any other links in scope */ 418 rcu_read_lock(); 419 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 420 u32 i; 421 422 if (!tipc_in_scope(domain, n_ptr->addr)) 423 continue; 424 tipc_node_lock(n_ptr); 425 for (i = 0; i < MAX_BEARERS; i++) { 426 if (!n_ptr->links[i]) 427 continue; 428 link_info.dest = htonl(n_ptr->addr); 429 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 430 strcpy(link_info.str, n_ptr->links[i]->name); 431 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 432 &link_info, sizeof(link_info)); 433 } 434 tipc_node_unlock(n_ptr); 435 } 436 rcu_read_unlock(); 437 return buf; 438 } 439