1 /* 2 * net/tipc/name_distr.c: TIPC name distribution code 3 * 4 * Copyright (c) 2000-2006, 2014-2019, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * Copyright (c) 2020-2021, Red Hat Inc 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "core.h" 39 #include "link.h" 40 #include "name_distr.h" 41 42 int sysctl_tipc_named_timeout __read_mostly = 2000; 43 44 /** 45 * publ_to_item - add publication info to a publication message 46 * @p: publication info 47 * @i: location of item in the message 48 */ 49 static void publ_to_item(struct distr_item *i, struct publication *p) 50 { 51 i->type = htonl(p->sr.type); 52 i->lower = htonl(p->sr.lower); 53 i->upper = htonl(p->sr.upper); 54 i->port = htonl(p->sk.ref); 55 i->key = htonl(p->key); 56 } 57 58 /** 59 * named_prepare_buf - allocate & initialize a publication message 60 * @net: the associated network namespace 61 * @type: message type 62 * @size: payload size 63 * @dest: destination node 64 * 65 * The buffer returned is of size INT_H_SIZE + payload size 66 */ 67 static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, 68 u32 dest) 69 { 70 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); 71 u32 self = tipc_own_addr(net); 72 struct tipc_msg *msg; 73 74 if (buf != NULL) { 75 msg = buf_msg(buf); 76 tipc_msg_init(self, msg, NAME_DISTRIBUTOR, 77 type, INT_H_SIZE, dest); 78 msg_set_size(msg, INT_H_SIZE + size); 79 } 80 return buf; 81 } 82 83 /** 84 * tipc_named_publish - tell other nodes about a new publication by this node 85 * @net: the associated network namespace 86 * @p: the new publication 87 */ 88 struct sk_buff *tipc_named_publish(struct net *net, struct publication *p) 89 { 90 struct name_table *nt = tipc_name_table(net); 91 struct distr_item *item; 92 struct sk_buff *skb; 93 94 if (p->scope == TIPC_NODE_SCOPE) { 95 list_add_tail_rcu(&p->binding_node, &nt->node_scope); 96 return NULL; 97 } 98 write_lock_bh(&nt->cluster_scope_lock); 99 list_add_tail(&p->binding_node, &nt->cluster_scope); 100 write_unlock_bh(&nt->cluster_scope_lock); 101 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0); 102 if (!skb) { 103 pr_warn("Publication distribution failure\n"); 104 return NULL; 105 } 106 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); 107 msg_set_non_legacy(buf_msg(skb)); 108 item = (struct distr_item *)msg_data(buf_msg(skb)); 109 publ_to_item(item, p); 110 return skb; 111 } 112 113 /** 114 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 115 * @net: the associated network namespace 116 * @p: the withdrawn publication 117 */ 118 struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p) 119 { 120 struct name_table *nt = tipc_name_table(net); 121 struct distr_item *item; 122 struct sk_buff *skb; 123 124 write_lock_bh(&nt->cluster_scope_lock); 125 list_del(&p->binding_node); 126 write_unlock_bh(&nt->cluster_scope_lock); 127 if (p->scope == TIPC_NODE_SCOPE) 128 return NULL; 129 130 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); 131 if (!skb) { 132 pr_warn("Withdrawal distribution failure\n"); 133 return NULL; 134 } 135 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); 136 msg_set_non_legacy(buf_msg(skb)); 137 item = (struct distr_item *)msg_data(buf_msg(skb)); 138 publ_to_item(item, p); 139 return skb; 140 } 141 142 /** 143 * named_distribute - prepare name info for bulk distribution to another node 144 * @net: the associated network namespace 145 * @list: list of messages (buffers) to be returned from this function 146 * @dnode: node to be updated 147 * @pls: linked list of publication items to be packed into buffer chain 148 * @seqno: sequence number for this message 149 */ 150 static void named_distribute(struct net *net, struct sk_buff_head *list, 151 u32 dnode, struct list_head *pls, u16 seqno) 152 { 153 struct publication *publ; 154 struct sk_buff *skb = NULL; 155 struct distr_item *item = NULL; 156 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / 157 ITEM_SIZE) * ITEM_SIZE; 158 u32 msg_rem = msg_dsz; 159 struct tipc_msg *hdr; 160 161 list_for_each_entry(publ, pls, binding_node) { 162 /* Prepare next buffer: */ 163 if (!skb) { 164 skb = named_prepare_buf(net, PUBLICATION, msg_rem, 165 dnode); 166 if (!skb) { 167 pr_warn("Bulk publication failure\n"); 168 return; 169 } 170 hdr = buf_msg(skb); 171 msg_set_bc_ack_invalid(hdr, true); 172 msg_set_bulk(hdr); 173 msg_set_non_legacy(hdr); 174 item = (struct distr_item *)msg_data(hdr); 175 } 176 177 /* Pack publication into message: */ 178 publ_to_item(item, publ); 179 item++; 180 msg_rem -= ITEM_SIZE; 181 182 /* Append full buffer to list: */ 183 if (!msg_rem) { 184 __skb_queue_tail(list, skb); 185 skb = NULL; 186 msg_rem = msg_dsz; 187 } 188 } 189 if (skb) { 190 hdr = buf_msg(skb); 191 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); 192 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); 193 __skb_queue_tail(list, skb); 194 } 195 hdr = buf_msg(skb_peek_tail(list)); 196 msg_set_last_bulk(hdr); 197 msg_set_named_seqno(hdr, seqno); 198 } 199 200 /** 201 * tipc_named_node_up - tell specified node about all publications by this node 202 * @net: the associated network namespace 203 * @dnode: destination node 204 * @capabilities: peer node's capabilities 205 */ 206 void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) 207 { 208 struct name_table *nt = tipc_name_table(net); 209 struct tipc_net *tn = tipc_net(net); 210 struct sk_buff_head head; 211 u16 seqno; 212 213 __skb_queue_head_init(&head); 214 spin_lock_bh(&tn->nametbl_lock); 215 if (!(capabilities & TIPC_NAMED_BCAST)) 216 nt->rc_dests++; 217 seqno = nt->snd_nxt; 218 spin_unlock_bh(&tn->nametbl_lock); 219 220 read_lock_bh(&nt->cluster_scope_lock); 221 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); 222 tipc_node_xmit(net, &head, dnode, 0); 223 read_unlock_bh(&nt->cluster_scope_lock); 224 } 225 226 /** 227 * tipc_publ_purge - remove publication associated with a failed node 228 * @net: the associated network namespace 229 * @p: the publication to remove 230 * @addr: failed node's address 231 * 232 * Invoked for each publication issued by a newly failed node. 233 * Removes publication structure from name table & deletes it. 234 */ 235 static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr) 236 { 237 struct tipc_net *tn = tipc_net(net); 238 struct publication *_p; 239 struct tipc_uaddr ua; 240 241 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type, 242 p->sr.lower, p->sr.upper); 243 spin_lock_bh(&tn->nametbl_lock); 244 _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key); 245 if (_p) 246 tipc_node_unsubscribe(net, &_p->binding_node, addr); 247 spin_unlock_bh(&tn->nametbl_lock); 248 if (_p) 249 kfree_rcu(_p, rcu); 250 } 251 252 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, 253 u32 addr, u16 capabilities) 254 { 255 struct name_table *nt = tipc_name_table(net); 256 struct tipc_net *tn = tipc_net(net); 257 258 struct publication *publ, *tmp; 259 260 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) 261 tipc_publ_purge(net, publ, addr); 262 spin_lock_bh(&tn->nametbl_lock); 263 if (!(capabilities & TIPC_NAMED_BCAST)) 264 nt->rc_dests--; 265 spin_unlock_bh(&tn->nametbl_lock); 266 } 267 268 /** 269 * tipc_update_nametbl - try to process a nametable update and notify 270 * subscribers 271 * @net: the associated network namespace 272 * @i: location of item in the message 273 * @node: node address 274 * @dtype: name distributor message type 275 * 276 * tipc_nametbl_lock must be held. 277 * Return: the publication item if successful, otherwise NULL. 278 */ 279 static bool tipc_update_nametbl(struct net *net, struct distr_item *i, 280 u32 node, u32 dtype) 281 { 282 struct publication *p = NULL; 283 struct tipc_socket_addr sk; 284 struct tipc_uaddr ua; 285 u32 key = ntohl(i->key); 286 287 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE, 288 ntohl(i->type), ntohl(i->lower), ntohl(i->upper)); 289 sk.ref = ntohl(i->port); 290 sk.node = node; 291 292 if (dtype == PUBLICATION) { 293 p = tipc_nametbl_insert_publ(net, &ua, &sk, key); 294 if (p) { 295 tipc_node_subscribe(net, &p->binding_node, node); 296 return true; 297 } 298 } else if (dtype == WITHDRAWAL) { 299 p = tipc_nametbl_remove_publ(net, &ua, &sk, key); 300 if (p) { 301 tipc_node_unsubscribe(net, &p->binding_node, node); 302 kfree_rcu(p, rcu); 303 return true; 304 } 305 pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n", 306 ua.sr.type, ua.sr.lower, node); 307 } else { 308 pr_warn_ratelimited("Unknown name table message received\n"); 309 } 310 return false; 311 } 312 313 static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, 314 u16 *rcv_nxt, bool *open) 315 { 316 struct sk_buff *skb, *tmp; 317 struct tipc_msg *hdr; 318 u16 seqno; 319 320 spin_lock_bh(&namedq->lock); 321 skb_queue_walk_safe(namedq, skb, tmp) { 322 if (unlikely(skb_linearize(skb))) { 323 __skb_unlink(skb, namedq); 324 kfree_skb(skb); 325 continue; 326 } 327 hdr = buf_msg(skb); 328 seqno = msg_named_seqno(hdr); 329 if (msg_is_last_bulk(hdr)) { 330 *rcv_nxt = seqno; 331 *open = true; 332 } 333 334 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { 335 __skb_unlink(skb, namedq); 336 spin_unlock_bh(&namedq->lock); 337 return skb; 338 } 339 340 if (*open && (*rcv_nxt == seqno)) { 341 (*rcv_nxt)++; 342 __skb_unlink(skb, namedq); 343 spin_unlock_bh(&namedq->lock); 344 return skb; 345 } 346 347 if (less(seqno, *rcv_nxt)) { 348 __skb_unlink(skb, namedq); 349 kfree_skb(skb); 350 continue; 351 } 352 } 353 spin_unlock_bh(&namedq->lock); 354 return NULL; 355 } 356 357 /** 358 * tipc_named_rcv - process name table update messages sent by another node 359 * @net: the associated network namespace 360 * @namedq: queue to receive from 361 * @rcv_nxt: store last received seqno here 362 * @open: last bulk msg was received (FIXME) 363 */ 364 void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, 365 u16 *rcv_nxt, bool *open) 366 { 367 struct tipc_net *tn = tipc_net(net); 368 struct distr_item *item; 369 struct tipc_msg *hdr; 370 struct sk_buff *skb; 371 u32 count, node; 372 373 spin_lock_bh(&tn->nametbl_lock); 374 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { 375 hdr = buf_msg(skb); 376 node = msg_orignode(hdr); 377 item = (struct distr_item *)msg_data(hdr); 378 count = msg_data_sz(hdr) / ITEM_SIZE; 379 while (count--) { 380 tipc_update_nametbl(net, item, node, msg_type(hdr)); 381 item++; 382 } 383 kfree_skb(skb); 384 } 385 spin_unlock_bh(&tn->nametbl_lock); 386 } 387 388 /** 389 * tipc_named_reinit - re-initialize local publications 390 * @net: the associated network namespace 391 * 392 * This routine is called whenever TIPC networking is enabled. 393 * All name table entries published by this node are updated to reflect 394 * the node's new network address. 395 */ 396 void tipc_named_reinit(struct net *net) 397 { 398 struct name_table *nt = tipc_name_table(net); 399 struct tipc_net *tn = tipc_net(net); 400 struct publication *p; 401 u32 self = tipc_own_addr(net); 402 403 spin_lock_bh(&tn->nametbl_lock); 404 405 list_for_each_entry_rcu(p, &nt->node_scope, binding_node) 406 p->sk.node = self; 407 list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node) 408 p->sk.node = self; 409 nt->rc_dests = 0; 410 spin_unlock_bh(&tn->nametbl_lock); 411 } 412