1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012 Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "config.h" 39 #include "node.h" 40 #include "name_distr.h" 41 42 #define NODE_HTABLE_SIZE 512 43 44 static void node_lost_contact(struct tipc_node *n_ptr); 45 static void node_established_contact(struct tipc_node *n_ptr); 46 47 static DEFINE_SPINLOCK(node_create_lock); 48 49 static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 50 LIST_HEAD(tipc_node_list); 51 static u32 tipc_num_nodes; 52 53 static atomic_t tipc_num_links = ATOMIC_INIT(0); 54 55 /* 56 * A trivial power-of-two bitmask technique is used for speed, since this 57 * operation is done for every incoming TIPC packet. The number of hash table 58 * entries has been chosen so that no hash chain exceeds 8 nodes and will 59 * usually be much smaller (typically only a single node). 60 */ 61 static unsigned int tipc_hashfn(u32 addr) 62 { 63 return addr & (NODE_HTABLE_SIZE - 1); 64 } 65 66 /* 67 * tipc_node_find - locate specified node object, if it exists 68 */ 69 struct tipc_node *tipc_node_find(u32 addr) 70 { 71 struct tipc_node *node; 72 73 if (unlikely(!in_own_cluster_exact(addr))) 74 return NULL; 75 76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { 77 if (node->addr == addr) 78 return node; 79 } 80 return NULL; 81 } 82 83 /** 84 * tipc_node_create - create neighboring node 85 * 86 * Currently, this routine is called by neighbor discovery code, which holds 87 * net_lock for reading only. We must take node_create_lock to ensure a node 88 * isn't created twice if two different bearers discover the node at the same 89 * time. (It would be preferable to switch to holding net_lock in write mode, 90 * but this is a non-trivial change.) 91 */ 92 struct tipc_node *tipc_node_create(u32 addr) 93 { 94 struct tipc_node *n_ptr, *temp_node; 95 96 spin_lock_bh(&node_create_lock); 97 98 n_ptr = tipc_node_find(addr); 99 if (n_ptr) { 100 spin_unlock_bh(&node_create_lock); 101 return n_ptr; 102 } 103 104 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 105 if (!n_ptr) { 106 spin_unlock_bh(&node_create_lock); 107 pr_warn("Node creation failed, no memory\n"); 108 return NULL; 109 } 110 111 n_ptr->addr = addr; 112 spin_lock_init(&n_ptr->lock); 113 INIT_HLIST_NODE(&n_ptr->hash); 114 INIT_LIST_HEAD(&n_ptr->list); 115 INIT_LIST_HEAD(&n_ptr->nsub); 116 117 hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 118 119 list_for_each_entry(temp_node, &tipc_node_list, list) { 120 if (n_ptr->addr < temp_node->addr) 121 break; 122 } 123 list_add_tail(&n_ptr->list, &temp_node->list); 124 n_ptr->block_setup = WAIT_PEER_DOWN; 125 n_ptr->signature = INVALID_NODE_SIG; 126 127 tipc_num_nodes++; 128 129 spin_unlock_bh(&node_create_lock); 130 return n_ptr; 131 } 132 133 void tipc_node_delete(struct tipc_node *n_ptr) 134 { 135 list_del(&n_ptr->list); 136 hlist_del(&n_ptr->hash); 137 kfree(n_ptr); 138 139 tipc_num_nodes--; 140 } 141 142 /** 143 * tipc_node_link_up - handle addition of link 144 * 145 * Link becomes active (alone or shared) or standby, depending on its priority. 146 */ 147 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 148 { 149 struct tipc_link **active = &n_ptr->active_links[0]; 150 151 n_ptr->working_links++; 152 153 pr_info("Established link <%s> on network plane %c\n", 154 l_ptr->name, l_ptr->b_ptr->net_plane); 155 156 if (!active[0]) { 157 active[0] = active[1] = l_ptr; 158 node_established_contact(n_ptr); 159 return; 160 } 161 if (l_ptr->priority < active[0]->priority) { 162 pr_info("New link <%s> becomes standby\n", l_ptr->name); 163 return; 164 } 165 tipc_link_dup_send_queue(active[0], l_ptr); 166 if (l_ptr->priority == active[0]->priority) { 167 active[0] = l_ptr; 168 return; 169 } 170 pr_info("Old link <%s> becomes standby\n", active[0]->name); 171 if (active[1] != active[0]) 172 pr_info("Old link <%s> becomes standby\n", active[1]->name); 173 active[0] = active[1] = l_ptr; 174 } 175 176 /** 177 * node_select_active_links - select active link 178 */ 179 static void node_select_active_links(struct tipc_node *n_ptr) 180 { 181 struct tipc_link **active = &n_ptr->active_links[0]; 182 u32 i; 183 u32 highest_prio = 0; 184 185 active[0] = active[1] = NULL; 186 187 for (i = 0; i < MAX_BEARERS; i++) { 188 struct tipc_link *l_ptr = n_ptr->links[i]; 189 190 if (!l_ptr || !tipc_link_is_up(l_ptr) || 191 (l_ptr->priority < highest_prio)) 192 continue; 193 194 if (l_ptr->priority > highest_prio) { 195 highest_prio = l_ptr->priority; 196 active[0] = active[1] = l_ptr; 197 } else { 198 active[1] = l_ptr; 199 } 200 } 201 } 202 203 /** 204 * tipc_node_link_down - handle loss of link 205 */ 206 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 207 { 208 struct tipc_link **active; 209 210 n_ptr->working_links--; 211 212 if (!tipc_link_is_active(l_ptr)) { 213 pr_info("Lost standby link <%s> on network plane %c\n", 214 l_ptr->name, l_ptr->b_ptr->net_plane); 215 return; 216 } 217 pr_info("Lost link <%s> on network plane %c\n", 218 l_ptr->name, l_ptr->b_ptr->net_plane); 219 220 active = &n_ptr->active_links[0]; 221 if (active[0] == l_ptr) 222 active[0] = active[1]; 223 if (active[1] == l_ptr) 224 active[1] = active[0]; 225 if (active[0] == l_ptr) 226 node_select_active_links(n_ptr); 227 if (tipc_node_is_up(n_ptr)) 228 tipc_link_failover_send_queue(l_ptr); 229 else 230 node_lost_contact(n_ptr); 231 } 232 233 int tipc_node_active_links(struct tipc_node *n_ptr) 234 { 235 return n_ptr->active_links[0] != NULL; 236 } 237 238 int tipc_node_is_up(struct tipc_node *n_ptr) 239 { 240 return tipc_node_active_links(n_ptr); 241 } 242 243 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 244 { 245 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 246 atomic_inc(&tipc_num_links); 247 n_ptr->link_cnt++; 248 } 249 250 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 251 { 252 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 253 atomic_dec(&tipc_num_links); 254 n_ptr->link_cnt--; 255 } 256 257 static void node_established_contact(struct tipc_node *n_ptr) 258 { 259 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 260 n_ptr->bclink.oos_state = 0; 261 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 262 tipc_bclink_add_node(n_ptr->addr); 263 } 264 265 static void node_name_purge_complete(unsigned long node_addr) 266 { 267 struct tipc_node *n_ptr; 268 269 read_lock_bh(&tipc_net_lock); 270 n_ptr = tipc_node_find(node_addr); 271 if (n_ptr) { 272 tipc_node_lock(n_ptr); 273 n_ptr->block_setup &= ~WAIT_NAMES_GONE; 274 tipc_node_unlock(n_ptr); 275 } 276 read_unlock_bh(&tipc_net_lock); 277 } 278 279 static void node_lost_contact(struct tipc_node *n_ptr) 280 { 281 char addr_string[16]; 282 u32 i; 283 284 pr_info("Lost contact with %s\n", 285 tipc_addr_string_fill(addr_string, n_ptr->addr)); 286 287 /* Flush broadcast link info associated with lost node */ 288 if (n_ptr->bclink.recv_permitted) { 289 kfree_skb_list(n_ptr->bclink.deferred_head); 290 n_ptr->bclink.deferred_size = 0; 291 292 if (n_ptr->bclink.reasm_head) { 293 kfree_skb(n_ptr->bclink.reasm_head); 294 n_ptr->bclink.reasm_head = NULL; 295 n_ptr->bclink.reasm_tail = NULL; 296 } 297 298 tipc_bclink_remove_node(n_ptr->addr); 299 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 300 301 n_ptr->bclink.recv_permitted = false; 302 } 303 304 /* Abort link changeover */ 305 for (i = 0; i < MAX_BEARERS; i++) { 306 struct tipc_link *l_ptr = n_ptr->links[i]; 307 if (!l_ptr) 308 continue; 309 l_ptr->reset_checkpoint = l_ptr->next_in_no; 310 l_ptr->exp_msg_count = 0; 311 tipc_link_reset_fragments(l_ptr); 312 } 313 314 /* Notify subscribers */ 315 tipc_nodesub_notify(n_ptr); 316 317 /* Prevent re-contact with node until cleanup is done */ 318 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 319 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 320 } 321 322 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 323 { 324 u32 domain; 325 struct sk_buff *buf; 326 struct tipc_node *n_ptr; 327 struct tipc_node_info node_info; 328 u32 payload_size; 329 330 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 331 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 332 333 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 334 if (!tipc_addr_domain_valid(domain)) 335 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 336 " (network address)"); 337 338 read_lock_bh(&tipc_net_lock); 339 if (!tipc_num_nodes) { 340 read_unlock_bh(&tipc_net_lock); 341 return tipc_cfg_reply_none(); 342 } 343 344 /* For now, get space for all other nodes */ 345 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 346 if (payload_size > 32768u) { 347 read_unlock_bh(&tipc_net_lock); 348 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 349 " (too many nodes)"); 350 } 351 buf = tipc_cfg_reply_alloc(payload_size); 352 if (!buf) { 353 read_unlock_bh(&tipc_net_lock); 354 return NULL; 355 } 356 357 /* Add TLVs for all nodes in scope */ 358 list_for_each_entry(n_ptr, &tipc_node_list, list) { 359 if (!tipc_in_scope(domain, n_ptr->addr)) 360 continue; 361 node_info.addr = htonl(n_ptr->addr); 362 node_info.up = htonl(tipc_node_is_up(n_ptr)); 363 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 364 &node_info, sizeof(node_info)); 365 } 366 367 read_unlock_bh(&tipc_net_lock); 368 return buf; 369 } 370 371 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 372 { 373 u32 domain; 374 struct sk_buff *buf; 375 struct tipc_node *n_ptr; 376 struct tipc_link_info link_info; 377 u32 payload_size; 378 379 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 380 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 381 382 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 383 if (!tipc_addr_domain_valid(domain)) 384 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 385 " (network address)"); 386 387 if (!tipc_own_addr) 388 return tipc_cfg_reply_none(); 389 390 read_lock_bh(&tipc_net_lock); 391 392 /* Get space for all unicast links + broadcast link */ 393 payload_size = TLV_SPACE(sizeof(link_info)) * 394 (atomic_read(&tipc_num_links) + 1); 395 if (payload_size > 32768u) { 396 read_unlock_bh(&tipc_net_lock); 397 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 398 " (too many links)"); 399 } 400 buf = tipc_cfg_reply_alloc(payload_size); 401 if (!buf) { 402 read_unlock_bh(&tipc_net_lock); 403 return NULL; 404 } 405 406 /* Add TLV for broadcast link */ 407 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 408 link_info.up = htonl(1); 409 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 410 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 411 412 /* Add TLVs for any other links in scope */ 413 list_for_each_entry(n_ptr, &tipc_node_list, list) { 414 u32 i; 415 416 if (!tipc_in_scope(domain, n_ptr->addr)) 417 continue; 418 tipc_node_lock(n_ptr); 419 for (i = 0; i < MAX_BEARERS; i++) { 420 if (!n_ptr->links[i]) 421 continue; 422 link_info.dest = htonl(n_ptr->addr); 423 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 424 strcpy(link_info.str, n_ptr->links[i]->name); 425 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 426 &link_info, sizeof(link_info)); 427 } 428 tipc_node_unlock(n_ptr); 429 } 430 431 read_unlock_bh(&tipc_net_lock); 432 return buf; 433 } 434