xref: /openbmc/linux/net/tipc/name_distr.c (revision 81d67439)
1 /*
2  * net/tipc/name_distr.c: TIPC name distribution code
3  *
4  * Copyright (c) 2000-2006, Ericsson AB
5  * Copyright (c) 2005, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "link.h"
39 #include "name_distr.h"
40 
41 #define ITEM_SIZE sizeof(struct distr_item)
42 
43 /**
44  * struct distr_item - publication info distributed to other nodes
45  * @type: name sequence type
46  * @lower: name sequence lower bound
47  * @upper: name sequence upper bound
48  * @ref: publishing port reference
49  * @key: publication key
50  *
51  * ===> All fields are stored in network byte order. <===
52  *
53  * First 3 fields identify (name or) name sequence being published.
54  * Reference field uniquely identifies port that published name sequence.
55  * Key field uniquely identifies publication, in the event a port has
56  * multiple publications of the same name sequence.
57  *
58  * Note: There is no field that identifies the publishing node because it is
59  * the same for all items contained within a publication message.
60  */
61 
62 struct distr_item {
63 	__be32 type;
64 	__be32 lower;
65 	__be32 upper;
66 	__be32 ref;
67 	__be32 key;
68 };
69 
70 /**
71  * List of externally visible publications by this node --
72  * that is, all publications having scope > TIPC_NODE_SCOPE.
73  */
74 
75 static LIST_HEAD(publ_root);
76 static u32 publ_cnt;
77 
78 /**
79  * publ_to_item - add publication info to a publication message
80  */
81 
82 static void publ_to_item(struct distr_item *i, struct publication *p)
83 {
84 	i->type = htonl(p->type);
85 	i->lower = htonl(p->lower);
86 	i->upper = htonl(p->upper);
87 	i->ref = htonl(p->ref);
88 	i->key = htonl(p->key);
89 }
90 
91 /**
92  * named_prepare_buf - allocate & initialize a publication message
93  */
94 
95 static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
96 {
97 	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
98 	struct tipc_msg *msg;
99 
100 	if (buf != NULL) {
101 		msg = buf_msg(buf);
102 		tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
103 		msg_set_size(msg, INT_H_SIZE + size);
104 	}
105 	return buf;
106 }
107 
108 static void named_cluster_distribute(struct sk_buff *buf)
109 {
110 	struct sk_buff *buf_copy;
111 	struct tipc_node *n_ptr;
112 
113 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
114 		if (tipc_node_active_links(n_ptr)) {
115 			buf_copy = skb_copy(buf, GFP_ATOMIC);
116 			if (!buf_copy)
117 				break;
118 			msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
119 			tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
120 		}
121 	}
122 
123 	buf_discard(buf);
124 }
125 
126 /**
127  * tipc_named_publish - tell other nodes about a new publication by this node
128  */
129 
130 void tipc_named_publish(struct publication *publ)
131 {
132 	struct sk_buff *buf;
133 	struct distr_item *item;
134 
135 	list_add_tail(&publ->local_list, &publ_root);
136 	publ_cnt++;
137 
138 	buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
139 	if (!buf) {
140 		warn("Publication distribution failure\n");
141 		return;
142 	}
143 
144 	item = (struct distr_item *)msg_data(buf_msg(buf));
145 	publ_to_item(item, publ);
146 	named_cluster_distribute(buf);
147 }
148 
149 /**
150  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
151  */
152 
153 void tipc_named_withdraw(struct publication *publ)
154 {
155 	struct sk_buff *buf;
156 	struct distr_item *item;
157 
158 	list_del(&publ->local_list);
159 	publ_cnt--;
160 
161 	buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
162 	if (!buf) {
163 		warn("Withdrawal distribution failure\n");
164 		return;
165 	}
166 
167 	item = (struct distr_item *)msg_data(buf_msg(buf));
168 	publ_to_item(item, publ);
169 	named_cluster_distribute(buf);
170 }
171 
172 /**
173  * tipc_named_node_up - tell specified node about all publications by this node
174  */
175 
176 void tipc_named_node_up(unsigned long node)
177 {
178 	struct publication *publ;
179 	struct distr_item *item = NULL;
180 	struct sk_buff *buf = NULL;
181 	u32 left = 0;
182 	u32 rest;
183 	u32 max_item_buf;
184 
185 	read_lock_bh(&tipc_nametbl_lock);
186 	max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
187 	max_item_buf *= ITEM_SIZE;
188 	rest = publ_cnt * ITEM_SIZE;
189 
190 	list_for_each_entry(publ, &publ_root, local_list) {
191 		if (!buf) {
192 			left = (rest <= max_item_buf) ? rest : max_item_buf;
193 			rest -= left;
194 			buf = named_prepare_buf(PUBLICATION, left, node);
195 			if (!buf) {
196 				warn("Bulk publication distribution failure\n");
197 				goto exit;
198 			}
199 			item = (struct distr_item *)msg_data(buf_msg(buf));
200 		}
201 		publ_to_item(item, publ);
202 		item++;
203 		left -= ITEM_SIZE;
204 		if (!left) {
205 			msg_set_link_selector(buf_msg(buf), node);
206 			tipc_link_send(buf, node, node);
207 			buf = NULL;
208 		}
209 	}
210 exit:
211 	read_unlock_bh(&tipc_nametbl_lock);
212 }
213 
214 /**
215  * named_purge_publ - remove publication associated with a failed node
216  *
217  * Invoked for each publication issued by a newly failed node.
218  * Removes publication structure from name table & deletes it.
219  * In rare cases the link may have come back up again when this
220  * function is called, and we have two items representing the same
221  * publication. Nudge this item's key to distinguish it from the other.
222  */
223 
224 static void named_purge_publ(struct publication *publ)
225 {
226 	struct publication *p;
227 
228 	write_lock_bh(&tipc_nametbl_lock);
229 	publ->key += 1222345;
230 	p = tipc_nametbl_remove_publ(publ->type, publ->lower,
231 				     publ->node, publ->ref, publ->key);
232 	if (p)
233 		tipc_nodesub_unsubscribe(&p->subscr);
234 	write_unlock_bh(&tipc_nametbl_lock);
235 
236 	if (p != publ) {
237 		err("Unable to remove publication from failed node\n"
238 		    "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
239 		    publ->type, publ->lower, publ->node, publ->ref, publ->key);
240 	}
241 
242 	kfree(p);
243 }
244 
245 /**
246  * tipc_named_recv - process name table update message sent by another node
247  */
248 
249 void tipc_named_recv(struct sk_buff *buf)
250 {
251 	struct publication *publ;
252 	struct tipc_msg *msg = buf_msg(buf);
253 	struct distr_item *item = (struct distr_item *)msg_data(msg);
254 	u32 count = msg_data_sz(msg) / ITEM_SIZE;
255 
256 	write_lock_bh(&tipc_nametbl_lock);
257 	while (count--) {
258 		if (msg_type(msg) == PUBLICATION) {
259 			publ = tipc_nametbl_insert_publ(ntohl(item->type),
260 							ntohl(item->lower),
261 							ntohl(item->upper),
262 							TIPC_CLUSTER_SCOPE,
263 							msg_orignode(msg),
264 							ntohl(item->ref),
265 							ntohl(item->key));
266 			if (publ) {
267 				tipc_nodesub_subscribe(&publ->subscr,
268 						       msg_orignode(msg),
269 						       publ,
270 						       (net_ev_handler)
271 						       named_purge_publ);
272 			}
273 		} else if (msg_type(msg) == WITHDRAWAL) {
274 			publ = tipc_nametbl_remove_publ(ntohl(item->type),
275 							ntohl(item->lower),
276 							msg_orignode(msg),
277 							ntohl(item->ref),
278 							ntohl(item->key));
279 
280 			if (publ) {
281 				tipc_nodesub_unsubscribe(&publ->subscr);
282 				kfree(publ);
283 			} else {
284 				err("Unable to remove publication by node 0x%x\n"
285 				    "(type=%u, lower=%u, ref=%u, key=%u)\n",
286 				    msg_orignode(msg),
287 				    ntohl(item->type), ntohl(item->lower),
288 				    ntohl(item->ref), ntohl(item->key));
289 			}
290 		} else {
291 			warn("Unrecognized name table message received\n");
292 		}
293 		item++;
294 	}
295 	write_unlock_bh(&tipc_nametbl_lock);
296 	buf_discard(buf);
297 }
298 
299 /**
300  * tipc_named_reinit - re-initialize local publication list
301  *
302  * This routine is called whenever TIPC networking is (re)enabled.
303  * All existing publications by this node that have "cluster" or "zone" scope
304  * are updated to reflect the node's current network address.
305  * (If the node's address is unchanged, the update loop terminates immediately.)
306  */
307 
308 void tipc_named_reinit(void)
309 {
310 	struct publication *publ;
311 
312 	write_lock_bh(&tipc_nametbl_lock);
313 	list_for_each_entry(publ, &publ_root, local_list) {
314 		if (publ->node == tipc_own_addr)
315 			break;
316 		publ->node = tipc_own_addr;
317 	}
318 	write_unlock_bh(&tipc_nametbl_lock);
319 }
320