xref: /openbmc/linux/net/tipc/node.c (revision 54525552)
1 /*
2  * net/tipc/node.c: TIPC node management routines
3  *
4  * Copyright (c) 2000-2006, Ericsson AB
5  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "config.h"
39 #include "node.h"
40 #include "name_distr.h"
41 
42 static void node_lost_contact(struct tipc_node *n_ptr);
43 static void node_established_contact(struct tipc_node *n_ptr);
44 
45 static DEFINE_SPINLOCK(node_create_lock);
46 
47 static struct hlist_head node_htable[NODE_HTABLE_SIZE];
48 LIST_HEAD(tipc_node_list);
49 static u32 tipc_num_nodes;
50 
51 static atomic_t tipc_num_links = ATOMIC_INIT(0);
52 u32 tipc_own_tag;
53 
54 /**
55  * tipc_node_find - locate specified node object, if it exists
56  */
57 
58 struct tipc_node *tipc_node_find(u32 addr)
59 {
60 	struct tipc_node *node;
61 	struct hlist_node *pos;
62 
63 	if (unlikely(!in_own_cluster(addr)))
64 		return NULL;
65 
66 	hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
67 		if (node->addr == addr)
68 			return node;
69 	}
70 	return NULL;
71 }
72 
73 /**
74  * tipc_node_create - create neighboring node
75  *
76  * Currently, this routine is called by neighbor discovery code, which holds
77  * net_lock for reading only.  We must take node_create_lock to ensure a node
78  * isn't created twice if two different bearers discover the node at the same
79  * time.  (It would be preferable to switch to holding net_lock in write mode,
80  * but this is a non-trivial change.)
81  */
82 
83 struct tipc_node *tipc_node_create(u32 addr)
84 {
85 	struct tipc_node *n_ptr, *temp_node;
86 
87 	spin_lock_bh(&node_create_lock);
88 
89 	n_ptr = tipc_node_find(addr);
90 	if (n_ptr) {
91 		spin_unlock_bh(&node_create_lock);
92 		return n_ptr;
93 	}
94 
95 	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
96 	if (!n_ptr) {
97 		spin_unlock_bh(&node_create_lock);
98 		warn("Node creation failed, no memory\n");
99 		return NULL;
100 	}
101 
102 	n_ptr->addr = addr;
103 	spin_lock_init(&n_ptr->lock);
104 	INIT_HLIST_NODE(&n_ptr->hash);
105 	INIT_LIST_HEAD(&n_ptr->list);
106 	INIT_LIST_HEAD(&n_ptr->nsub);
107 
108 	hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
109 
110 	list_for_each_entry(temp_node, &tipc_node_list, list) {
111 		if (n_ptr->addr < temp_node->addr)
112 			break;
113 	}
114 	list_add_tail(&n_ptr->list, &temp_node->list);
115 
116 	tipc_num_nodes++;
117 
118 	spin_unlock_bh(&node_create_lock);
119 	return n_ptr;
120 }
121 
122 void tipc_node_delete(struct tipc_node *n_ptr)
123 {
124 	list_del(&n_ptr->list);
125 	hlist_del(&n_ptr->hash);
126 	kfree(n_ptr);
127 
128 	tipc_num_nodes--;
129 }
130 
131 
132 /**
133  * tipc_node_link_up - handle addition of link
134  *
135  * Link becomes active (alone or shared) or standby, depending on its priority.
136  */
137 
138 void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
139 {
140 	struct link **active = &n_ptr->active_links[0];
141 
142 	n_ptr->working_links++;
143 
144 	info("Established link <%s> on network plane %c\n",
145 	     l_ptr->name, l_ptr->b_ptr->net_plane);
146 
147 	if (!active[0]) {
148 		active[0] = active[1] = l_ptr;
149 		node_established_contact(n_ptr);
150 		return;
151 	}
152 	if (l_ptr->priority < active[0]->priority) {
153 		info("New link <%s> becomes standby\n", l_ptr->name);
154 		return;
155 	}
156 	tipc_link_send_duplicate(active[0], l_ptr);
157 	if (l_ptr->priority == active[0]->priority) {
158 		active[0] = l_ptr;
159 		return;
160 	}
161 	info("Old link <%s> becomes standby\n", active[0]->name);
162 	if (active[1] != active[0])
163 		info("Old link <%s> becomes standby\n", active[1]->name);
164 	active[0] = active[1] = l_ptr;
165 }
166 
167 /**
168  * node_select_active_links - select active link
169  */
170 
171 static void node_select_active_links(struct tipc_node *n_ptr)
172 {
173 	struct link **active = &n_ptr->active_links[0];
174 	u32 i;
175 	u32 highest_prio = 0;
176 
177 	active[0] = active[1] = NULL;
178 
179 	for (i = 0; i < MAX_BEARERS; i++) {
180 		struct link *l_ptr = n_ptr->links[i];
181 
182 		if (!l_ptr || !tipc_link_is_up(l_ptr) ||
183 		    (l_ptr->priority < highest_prio))
184 			continue;
185 
186 		if (l_ptr->priority > highest_prio) {
187 			highest_prio = l_ptr->priority;
188 			active[0] = active[1] = l_ptr;
189 		} else {
190 			active[1] = l_ptr;
191 		}
192 	}
193 }
194 
195 /**
196  * tipc_node_link_down - handle loss of link
197  */
198 
199 void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
200 {
201 	struct link **active;
202 
203 	n_ptr->working_links--;
204 
205 	if (!tipc_link_is_active(l_ptr)) {
206 		info("Lost standby link <%s> on network plane %c\n",
207 		     l_ptr->name, l_ptr->b_ptr->net_plane);
208 		return;
209 	}
210 	info("Lost link <%s> on network plane %c\n",
211 		l_ptr->name, l_ptr->b_ptr->net_plane);
212 
213 	active = &n_ptr->active_links[0];
214 	if (active[0] == l_ptr)
215 		active[0] = active[1];
216 	if (active[1] == l_ptr)
217 		active[1] = active[0];
218 	if (active[0] == l_ptr)
219 		node_select_active_links(n_ptr);
220 	if (tipc_node_is_up(n_ptr))
221 		tipc_link_changeover(l_ptr);
222 	else
223 		node_lost_contact(n_ptr);
224 }
225 
226 int tipc_node_active_links(struct tipc_node *n_ptr)
227 {
228 	return n_ptr->active_links[0] != NULL;
229 }
230 
231 int tipc_node_redundant_links(struct tipc_node *n_ptr)
232 {
233 	return n_ptr->working_links > 1;
234 }
235 
236 int tipc_node_is_up(struct tipc_node *n_ptr)
237 {
238 	return tipc_node_active_links(n_ptr);
239 }
240 
241 void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
242 {
243 	n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
244 	atomic_inc(&tipc_num_links);
245 	n_ptr->link_cnt++;
246 }
247 
248 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
249 {
250 	n_ptr->links[l_ptr->b_ptr->identity] = NULL;
251 	atomic_dec(&tipc_num_links);
252 	n_ptr->link_cnt--;
253 }
254 
255 /*
256  * Routing table management - five cases to handle:
257  *
258  * 1: A link towards a zone/cluster external node comes up.
259  *    => Send a multicast message updating routing tables of all
260  *    system nodes within own cluster that the new destination
261  *    can be reached via this node.
262  *    (node.establishedContact()=>cluster.multicastNewRoute())
263  *
264  * 2: A link towards a slave node comes up.
265  *    => Send a multicast message updating routing tables of all
266  *    system nodes within own cluster that the new destination
267  *    can be reached via this node.
268  *    (node.establishedContact()=>cluster.multicastNewRoute())
269  *    => Send a  message to the slave node about existence
270  *    of all system nodes within cluster:
271  *    (node.establishedContact()=>cluster.sendLocalRoutes())
272  *
273  * 3: A new cluster local system node becomes available.
274  *    => Send message(s) to this particular node containing
275  *    information about all cluster external and slave
276  *     nodes which can be reached via this node.
277  *    (node.establishedContact()==>network.sendExternalRoutes())
278  *    (node.establishedContact()==>network.sendSlaveRoutes())
279  *    => Send messages to all directly connected slave nodes
280  *    containing information about the existence of the new node
281  *    (node.establishedContact()=>cluster.multicastNewRoute())
282  *
283  * 4: The link towards a zone/cluster external node or slave
284  *    node goes down.
285  *    => Send a multcast message updating routing tables of all
286  *    nodes within cluster that the new destination can not any
287  *    longer be reached via this node.
288  *    (node.lostAllLinks()=>cluster.bcastLostRoute())
289  *
290  * 5: A cluster local system node becomes unavailable.
291  *    => Remove all references to this node from the local
292  *    routing tables. Note: This is a completely node
293  *    local operation.
294  *    (node.lostAllLinks()=>network.removeAsRouter())
295  *    => Send messages to all directly connected slave nodes
296  *    containing information about loss of the node
297  *    (node.establishedContact()=>cluster.multicastLostRoute())
298  *
299  */
300 
301 static void node_established_contact(struct tipc_node *n_ptr)
302 {
303 	tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
304 
305 	/* Syncronize broadcast acks */
306 	n_ptr->bclink.acked = tipc_bclink_get_last_sent();
307 
308 	if (n_ptr->bclink.supported) {
309 		tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
310 		if (n_ptr->addr < tipc_own_addr)
311 			tipc_own_tag++;
312 	}
313 }
314 
315 static void node_cleanup_finished(unsigned long node_addr)
316 {
317 	struct tipc_node *n_ptr;
318 
319 	read_lock_bh(&tipc_net_lock);
320 	n_ptr = tipc_node_find(node_addr);
321 	if (n_ptr) {
322 		tipc_node_lock(n_ptr);
323 		n_ptr->cleanup_required = 0;
324 		tipc_node_unlock(n_ptr);
325 	}
326 	read_unlock_bh(&tipc_net_lock);
327 }
328 
329 static void node_lost_contact(struct tipc_node *n_ptr)
330 {
331 	char addr_string[16];
332 	u32 i;
333 
334 	/* Clean up broadcast reception remains */
335 	n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
336 	while (n_ptr->bclink.deferred_head) {
337 		struct sk_buff *buf = n_ptr->bclink.deferred_head;
338 		n_ptr->bclink.deferred_head = buf->next;
339 		buf_discard(buf);
340 	}
341 	if (n_ptr->bclink.defragm) {
342 		buf_discard(n_ptr->bclink.defragm);
343 		n_ptr->bclink.defragm = NULL;
344 	}
345 
346 	if (n_ptr->bclink.supported) {
347 		tipc_bclink_acknowledge(n_ptr,
348 					mod(n_ptr->bclink.acked + 10000));
349 		tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
350 		if (n_ptr->addr < tipc_own_addr)
351 			tipc_own_tag--;
352 	}
353 
354 	info("Lost contact with %s\n",
355 	     tipc_addr_string_fill(addr_string, n_ptr->addr));
356 
357 	/* Abort link changeover */
358 	for (i = 0; i < MAX_BEARERS; i++) {
359 		struct link *l_ptr = n_ptr->links[i];
360 		if (!l_ptr)
361 			continue;
362 		l_ptr->reset_checkpoint = l_ptr->next_in_no;
363 		l_ptr->exp_msg_count = 0;
364 		tipc_link_reset_fragments(l_ptr);
365 	}
366 
367 	/* Notify subscribers */
368 	tipc_nodesub_notify(n_ptr);
369 
370 	/* Prevent re-contact with node until all cleanup is done */
371 
372 	n_ptr->cleanup_required = 1;
373 	tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
374 }
375 
376 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
377 {
378 	u32 domain;
379 	struct sk_buff *buf;
380 	struct tipc_node *n_ptr;
381 	struct tipc_node_info node_info;
382 	u32 payload_size;
383 
384 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
385 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
386 
387 	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
388 	if (!tipc_addr_domain_valid(domain))
389 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
390 						   " (network address)");
391 
392 	read_lock_bh(&tipc_net_lock);
393 	if (!tipc_num_nodes) {
394 		read_unlock_bh(&tipc_net_lock);
395 		return tipc_cfg_reply_none();
396 	}
397 
398 	/* For now, get space for all other nodes */
399 
400 	payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
401 	if (payload_size > 32768u) {
402 		read_unlock_bh(&tipc_net_lock);
403 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
404 						   " (too many nodes)");
405 	}
406 	buf = tipc_cfg_reply_alloc(payload_size);
407 	if (!buf) {
408 		read_unlock_bh(&tipc_net_lock);
409 		return NULL;
410 	}
411 
412 	/* Add TLVs for all nodes in scope */
413 
414 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
415 		if (!tipc_in_scope(domain, n_ptr->addr))
416 			continue;
417 		node_info.addr = htonl(n_ptr->addr);
418 		node_info.up = htonl(tipc_node_is_up(n_ptr));
419 		tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
420 				    &node_info, sizeof(node_info));
421 	}
422 
423 	read_unlock_bh(&tipc_net_lock);
424 	return buf;
425 }
426 
427 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
428 {
429 	u32 domain;
430 	struct sk_buff *buf;
431 	struct tipc_node *n_ptr;
432 	struct tipc_link_info link_info;
433 	u32 payload_size;
434 
435 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
436 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
437 
438 	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
439 	if (!tipc_addr_domain_valid(domain))
440 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
441 						   " (network address)");
442 
443 	if (tipc_mode != TIPC_NET_MODE)
444 		return tipc_cfg_reply_none();
445 
446 	read_lock_bh(&tipc_net_lock);
447 
448 	/* Get space for all unicast links + multicast link */
449 
450 	payload_size = TLV_SPACE(sizeof(link_info)) *
451 		(atomic_read(&tipc_num_links) + 1);
452 	if (payload_size > 32768u) {
453 		read_unlock_bh(&tipc_net_lock);
454 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
455 						   " (too many links)");
456 	}
457 	buf = tipc_cfg_reply_alloc(payload_size);
458 	if (!buf) {
459 		read_unlock_bh(&tipc_net_lock);
460 		return NULL;
461 	}
462 
463 	/* Add TLV for broadcast link */
464 
465 	link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
466 	link_info.up = htonl(1);
467 	strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
468 	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
469 
470 	/* Add TLVs for any other links in scope */
471 
472 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
473 		u32 i;
474 
475 		if (!tipc_in_scope(domain, n_ptr->addr))
476 			continue;
477 		tipc_node_lock(n_ptr);
478 		for (i = 0; i < MAX_BEARERS; i++) {
479 			if (!n_ptr->links[i])
480 				continue;
481 			link_info.dest = htonl(n_ptr->addr);
482 			link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
483 			strcpy(link_info.str, n_ptr->links[i]->name);
484 			tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
485 					    &link_info, sizeof(link_info));
486 		}
487 		tipc_node_unlock(n_ptr);
488 	}
489 
490 	read_unlock_bh(&tipc_net_lock);
491 	return buf;
492 }
493