1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "discover.h" 44 45 #define INVALID_NODE_SIG 0x10000 46 47 /* Flags used to take different actions according to flag type 48 * TIPC_NOTIFY_NODE_DOWN: notify node is down 49 * TIPC_NOTIFY_NODE_UP: notify node is up 50 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 51 */ 52 enum { 53 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 54 TIPC_NOTIFY_NODE_UP = (1 << 4), 55 TIPC_NOTIFY_LINK_UP = (1 << 6), 56 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 57 }; 58 59 struct tipc_link_entry { 60 struct tipc_link *link; 61 spinlock_t lock; /* per link */ 62 u32 mtu; 63 struct sk_buff_head inputq; 64 struct tipc_media_addr maddr; 65 }; 66 67 struct tipc_bclink_entry { 68 struct tipc_link *link; 69 struct sk_buff_head inputq1; 70 struct sk_buff_head arrvq; 71 struct sk_buff_head inputq2; 72 struct sk_buff_head namedq; 73 }; 74 75 /** 76 * struct tipc_node - TIPC node structure 77 * @addr: network address of node 78 * @ref: reference counter to node object 79 * @lock: rwlock governing access to structure 80 * @net: the applicable net namespace 81 * @hash: links to adjacent nodes in unsorted hash chain 82 * @inputq: pointer to input queue containing messages for msg event 83 * @namedq: pointer to name table input queue with name table messages 84 * @active_links: bearer ids of active links, used as index into links[] array 85 * @links: array containing references to all links to node 86 * @action_flags: bit mask of different types of node actions 87 * @state: connectivity state vs peer node 88 * @sync_point: sequence number where synch/failover is finished 89 * @list: links to adjacent nodes in sorted list of cluster's nodes 90 * @working_links: number of working links to node (both active and standby) 91 * @link_cnt: number of links to node 92 * @capabilities: bitmap, indicating peer node's functional capabilities 93 * @signature: node instance identifier 94 * @link_id: local and remote bearer ids of changing link, if any 95 * @publ_list: list of publications 96 * @rcu: rcu struct for tipc_node 97 */ 98 struct tipc_node { 99 u32 addr; 100 struct kref kref; 101 rwlock_t lock; 102 struct net *net; 103 struct hlist_node hash; 104 int active_links[2]; 105 struct tipc_link_entry links[MAX_BEARERS]; 106 struct tipc_bclink_entry bc_entry; 107 int action_flags; 108 struct list_head list; 109 int state; 110 u16 sync_point; 111 int link_cnt; 112 u16 working_links; 113 u16 capabilities; 114 u32 signature; 115 u32 link_id; 116 struct list_head publ_list; 117 struct list_head conn_sks; 118 unsigned long keepalive_intv; 119 struct timer_list timer; 120 struct rcu_head rcu; 121 }; 122 123 /* Node FSM states and events: 124 */ 125 enum { 126 SELF_DOWN_PEER_DOWN = 0xdd, 127 SELF_UP_PEER_UP = 0xaa, 128 SELF_DOWN_PEER_LEAVING = 0xd1, 129 SELF_UP_PEER_COMING = 0xac, 130 SELF_COMING_PEER_UP = 0xca, 131 SELF_LEAVING_PEER_DOWN = 0x1d, 132 NODE_FAILINGOVER = 0xf0, 133 NODE_SYNCHING = 0xcc 134 }; 135 136 enum { 137 SELF_ESTABL_CONTACT_EVT = 0xece, 138 SELF_LOST_CONTACT_EVT = 0x1ce, 139 PEER_ESTABL_CONTACT_EVT = 0x9ece, 140 PEER_LOST_CONTACT_EVT = 0x91ce, 141 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 142 NODE_FAILOVER_END_EVT = 0xfee, 143 NODE_SYNCH_BEGIN_EVT = 0xcbe, 144 NODE_SYNCH_END_EVT = 0xcee 145 }; 146 147 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 148 struct sk_buff_head *xmitq, 149 struct tipc_media_addr **maddr); 150 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 151 bool delete); 152 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 153 static void tipc_node_delete(struct tipc_node *node); 154 static void tipc_node_timeout(unsigned long data); 155 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 156 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 157 static void tipc_node_put(struct tipc_node *node); 158 static bool tipc_node_is_up(struct tipc_node *n); 159 160 struct tipc_sock_conn { 161 u32 port; 162 u32 peer_port; 163 u32 peer_node; 164 struct list_head list; 165 }; 166 167 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 168 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, 169 [TIPC_NLA_LINK_NAME] = { 170 .type = NLA_STRING, 171 .len = TIPC_MAX_LINK_NAME 172 }, 173 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, 174 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, 175 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, 176 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, 177 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, 178 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, 179 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, 180 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } 181 }; 182 183 static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { 184 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, 185 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, 186 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } 187 }; 188 189 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 190 { 191 int bearer_id = n->active_links[sel & 1]; 192 193 if (unlikely(bearer_id == INVALID_BEARER_ID)) 194 return NULL; 195 196 return n->links[bearer_id].link; 197 } 198 199 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) 200 { 201 struct tipc_node *n; 202 int bearer_id; 203 unsigned int mtu = MAX_MSG_SIZE; 204 205 n = tipc_node_find(net, addr); 206 if (unlikely(!n)) 207 return mtu; 208 209 bearer_id = n->active_links[sel & 1]; 210 if (likely(bearer_id != INVALID_BEARER_ID)) 211 mtu = n->links[bearer_id].mtu; 212 tipc_node_put(n); 213 return mtu; 214 } 215 /* 216 * A trivial power-of-two bitmask technique is used for speed, since this 217 * operation is done for every incoming TIPC packet. The number of hash table 218 * entries has been chosen so that no hash chain exceeds 8 nodes and will 219 * usually be much smaller (typically only a single node). 220 */ 221 static unsigned int tipc_hashfn(u32 addr) 222 { 223 return addr & (NODE_HTABLE_SIZE - 1); 224 } 225 226 static void tipc_node_kref_release(struct kref *kref) 227 { 228 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 229 230 kfree(n->bc_entry.link); 231 kfree_rcu(n, rcu); 232 } 233 234 static void tipc_node_put(struct tipc_node *node) 235 { 236 kref_put(&node->kref, tipc_node_kref_release); 237 } 238 239 static void tipc_node_get(struct tipc_node *node) 240 { 241 kref_get(&node->kref); 242 } 243 244 /* 245 * tipc_node_find - locate specified node object, if it exists 246 */ 247 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 248 { 249 struct tipc_net *tn = tipc_net(net); 250 struct tipc_node *node; 251 unsigned int thash = tipc_hashfn(addr); 252 253 if (unlikely(!in_own_cluster_exact(net, addr))) 254 return NULL; 255 256 rcu_read_lock(); 257 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 258 if (node->addr != addr) 259 continue; 260 if (!kref_get_unless_zero(&node->kref)) 261 node = NULL; 262 break; 263 } 264 rcu_read_unlock(); 265 return node; 266 } 267 268 static void tipc_node_read_lock(struct tipc_node *n) 269 { 270 read_lock_bh(&n->lock); 271 } 272 273 static void tipc_node_read_unlock(struct tipc_node *n) 274 { 275 read_unlock_bh(&n->lock); 276 } 277 278 static void tipc_node_write_lock(struct tipc_node *n) 279 { 280 write_lock_bh(&n->lock); 281 } 282 283 static void tipc_node_write_unlock(struct tipc_node *n) 284 { 285 struct net *net = n->net; 286 u32 addr = 0; 287 u32 flags = n->action_flags; 288 u32 link_id = 0; 289 struct list_head *publ_list; 290 291 if (likely(!flags)) { 292 write_unlock_bh(&n->lock); 293 return; 294 } 295 296 addr = n->addr; 297 link_id = n->link_id; 298 publ_list = &n->publ_list; 299 300 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 301 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 302 303 write_unlock_bh(&n->lock); 304 305 if (flags & TIPC_NOTIFY_NODE_DOWN) 306 tipc_publ_notify(net, publ_list, addr); 307 308 if (flags & TIPC_NOTIFY_NODE_UP) 309 tipc_named_node_up(net, addr); 310 311 if (flags & TIPC_NOTIFY_LINK_UP) 312 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 313 TIPC_NODE_SCOPE, link_id, addr); 314 315 if (flags & TIPC_NOTIFY_LINK_DOWN) 316 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 317 link_id, addr); 318 } 319 320 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) 321 { 322 struct tipc_net *tn = net_generic(net, tipc_net_id); 323 struct tipc_node *n, *temp_node; 324 int i; 325 326 spin_lock_bh(&tn->node_list_lock); 327 n = tipc_node_find(net, addr); 328 if (n) 329 goto exit; 330 n = kzalloc(sizeof(*n), GFP_ATOMIC); 331 if (!n) { 332 pr_warn("Node creation failed, no memory\n"); 333 goto exit; 334 } 335 n->addr = addr; 336 n->net = net; 337 n->capabilities = capabilities; 338 kref_init(&n->kref); 339 rwlock_init(&n->lock); 340 INIT_HLIST_NODE(&n->hash); 341 INIT_LIST_HEAD(&n->list); 342 INIT_LIST_HEAD(&n->publ_list); 343 INIT_LIST_HEAD(&n->conn_sks); 344 skb_queue_head_init(&n->bc_entry.namedq); 345 skb_queue_head_init(&n->bc_entry.inputq1); 346 __skb_queue_head_init(&n->bc_entry.arrvq); 347 skb_queue_head_init(&n->bc_entry.inputq2); 348 for (i = 0; i < MAX_BEARERS; i++) 349 spin_lock_init(&n->links[i].lock); 350 n->state = SELF_DOWN_PEER_LEAVING; 351 n->signature = INVALID_NODE_SIG; 352 n->active_links[0] = INVALID_BEARER_ID; 353 n->active_links[1] = INVALID_BEARER_ID; 354 if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr, 355 U16_MAX, 356 tipc_link_window(tipc_bc_sndlink(net)), 357 n->capabilities, 358 &n->bc_entry.inputq1, 359 &n->bc_entry.namedq, 360 tipc_bc_sndlink(net), 361 &n->bc_entry.link)) { 362 pr_warn("Broadcast rcv link creation failed, no memory\n"); 363 kfree(n); 364 n = NULL; 365 goto exit; 366 } 367 tipc_node_get(n); 368 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 369 n->keepalive_intv = U32_MAX; 370 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 371 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 372 if (n->addr < temp_node->addr) 373 break; 374 } 375 list_add_tail_rcu(&n->list, &temp_node->list); 376 exit: 377 spin_unlock_bh(&tn->node_list_lock); 378 return n; 379 } 380 381 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 382 { 383 unsigned long tol = tipc_link_tolerance(l); 384 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 385 unsigned long keepalive_intv = msecs_to_jiffies(intv); 386 387 /* Link with lowest tolerance determines timer interval */ 388 if (keepalive_intv < n->keepalive_intv) 389 n->keepalive_intv = keepalive_intv; 390 391 /* Ensure link's abort limit corresponds to current interval */ 392 tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv)); 393 } 394 395 static void tipc_node_delete(struct tipc_node *node) 396 { 397 list_del_rcu(&node->list); 398 hlist_del_rcu(&node->hash); 399 tipc_node_put(node); 400 401 del_timer_sync(&node->timer); 402 tipc_node_put(node); 403 } 404 405 void tipc_node_stop(struct net *net) 406 { 407 struct tipc_net *tn = tipc_net(net); 408 struct tipc_node *node, *t_node; 409 410 spin_lock_bh(&tn->node_list_lock); 411 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 412 tipc_node_delete(node); 413 spin_unlock_bh(&tn->node_list_lock); 414 } 415 416 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 417 { 418 struct tipc_node *n; 419 420 if (in_own_node(net, addr)) 421 return; 422 423 n = tipc_node_find(net, addr); 424 if (!n) { 425 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 426 return; 427 } 428 tipc_node_write_lock(n); 429 list_add_tail(subscr, &n->publ_list); 430 tipc_node_write_unlock(n); 431 tipc_node_put(n); 432 } 433 434 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 435 { 436 struct tipc_node *n; 437 438 if (in_own_node(net, addr)) 439 return; 440 441 n = tipc_node_find(net, addr); 442 if (!n) { 443 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 444 return; 445 } 446 tipc_node_write_lock(n); 447 list_del_init(subscr); 448 tipc_node_write_unlock(n); 449 tipc_node_put(n); 450 } 451 452 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 453 { 454 struct tipc_node *node; 455 struct tipc_sock_conn *conn; 456 int err = 0; 457 458 if (in_own_node(net, dnode)) 459 return 0; 460 461 node = tipc_node_find(net, dnode); 462 if (!node) { 463 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 464 return -EHOSTUNREACH; 465 } 466 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 467 if (!conn) { 468 err = -EHOSTUNREACH; 469 goto exit; 470 } 471 conn->peer_node = dnode; 472 conn->port = port; 473 conn->peer_port = peer_port; 474 475 tipc_node_write_lock(node); 476 list_add_tail(&conn->list, &node->conn_sks); 477 tipc_node_write_unlock(node); 478 exit: 479 tipc_node_put(node); 480 return err; 481 } 482 483 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 484 { 485 struct tipc_node *node; 486 struct tipc_sock_conn *conn, *safe; 487 488 if (in_own_node(net, dnode)) 489 return; 490 491 node = tipc_node_find(net, dnode); 492 if (!node) 493 return; 494 495 tipc_node_write_lock(node); 496 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 497 if (port != conn->port) 498 continue; 499 list_del(&conn->list); 500 kfree(conn); 501 } 502 tipc_node_write_unlock(node); 503 tipc_node_put(node); 504 } 505 506 /* tipc_node_timeout - handle expiration of node timer 507 */ 508 static void tipc_node_timeout(unsigned long data) 509 { 510 struct tipc_node *n = (struct tipc_node *)data; 511 struct tipc_link_entry *le; 512 struct sk_buff_head xmitq; 513 int bearer_id; 514 int rc = 0; 515 516 __skb_queue_head_init(&xmitq); 517 518 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 519 tipc_node_read_lock(n); 520 le = &n->links[bearer_id]; 521 spin_lock_bh(&le->lock); 522 if (le->link) { 523 /* Link tolerance may change asynchronously: */ 524 tipc_node_calculate_timer(n, le->link); 525 rc = tipc_link_timeout(le->link, &xmitq); 526 } 527 spin_unlock_bh(&le->lock); 528 tipc_node_read_unlock(n); 529 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 530 if (rc & TIPC_LINK_DOWN_EVT) 531 tipc_node_link_down(n, bearer_id, false); 532 } 533 mod_timer(&n->timer, jiffies + n->keepalive_intv); 534 } 535 536 /** 537 * __tipc_node_link_up - handle addition of link 538 * Node lock must be held by caller 539 * Link becomes active (alone or shared) or standby, depending on its priority. 540 */ 541 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 542 struct sk_buff_head *xmitq) 543 { 544 int *slot0 = &n->active_links[0]; 545 int *slot1 = &n->active_links[1]; 546 struct tipc_link *ol = node_active_link(n, 0); 547 struct tipc_link *nl = n->links[bearer_id].link; 548 549 if (!nl) 550 return; 551 552 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 553 if (!tipc_link_is_up(nl)) 554 return; 555 556 n->working_links++; 557 n->action_flags |= TIPC_NOTIFY_LINK_UP; 558 n->link_id = tipc_link_id(nl); 559 560 /* Leave room for tunnel header when returning 'mtu' to users: */ 561 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 562 563 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 564 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 565 566 pr_debug("Established link <%s> on network plane %c\n", 567 tipc_link_name(nl), tipc_link_plane(nl)); 568 569 /* First link? => give it both slots */ 570 if (!ol) { 571 *slot0 = bearer_id; 572 *slot1 = bearer_id; 573 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 574 n->action_flags |= TIPC_NOTIFY_NODE_UP; 575 tipc_bcast_add_peer(n->net, nl, xmitq); 576 return; 577 } 578 579 /* Second link => redistribute slots */ 580 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 581 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 582 *slot0 = bearer_id; 583 *slot1 = bearer_id; 584 tipc_link_set_active(nl, true); 585 tipc_link_set_active(ol, false); 586 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 587 tipc_link_set_active(nl, true); 588 *slot1 = bearer_id; 589 } else { 590 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 591 } 592 593 /* Prepare synchronization with first link */ 594 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 595 } 596 597 /** 598 * tipc_node_link_up - handle addition of link 599 * 600 * Link becomes active (alone or shared) or standby, depending on its priority. 601 */ 602 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 603 struct sk_buff_head *xmitq) 604 { 605 tipc_node_write_lock(n); 606 __tipc_node_link_up(n, bearer_id, xmitq); 607 tipc_node_write_unlock(n); 608 } 609 610 /** 611 * __tipc_node_link_down - handle loss of link 612 */ 613 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 614 struct sk_buff_head *xmitq, 615 struct tipc_media_addr **maddr) 616 { 617 struct tipc_link_entry *le = &n->links[*bearer_id]; 618 int *slot0 = &n->active_links[0]; 619 int *slot1 = &n->active_links[1]; 620 int i, highest = 0, prio; 621 struct tipc_link *l, *_l, *tnl; 622 623 l = n->links[*bearer_id].link; 624 if (!l || tipc_link_is_reset(l)) 625 return; 626 627 n->working_links--; 628 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 629 n->link_id = tipc_link_id(l); 630 631 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 632 633 pr_debug("Lost link <%s> on network plane %c\n", 634 tipc_link_name(l), tipc_link_plane(l)); 635 636 /* Select new active link if any available */ 637 *slot0 = INVALID_BEARER_ID; 638 *slot1 = INVALID_BEARER_ID; 639 for (i = 0; i < MAX_BEARERS; i++) { 640 _l = n->links[i].link; 641 if (!_l || !tipc_link_is_up(_l)) 642 continue; 643 if (_l == l) 644 continue; 645 prio = tipc_link_prio(_l); 646 if (prio < highest) 647 continue; 648 if (prio > highest) { 649 highest = prio; 650 *slot0 = i; 651 *slot1 = i; 652 continue; 653 } 654 *slot1 = i; 655 } 656 657 if (!tipc_node_is_up(n)) { 658 if (tipc_link_peer_is_down(l)) 659 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 660 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 661 tipc_link_fsm_evt(l, LINK_RESET_EVT); 662 tipc_link_reset(l); 663 tipc_link_build_reset_msg(l, xmitq); 664 *maddr = &n->links[*bearer_id].maddr; 665 node_lost_contact(n, &le->inputq); 666 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 667 return; 668 } 669 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 670 671 /* There is still a working link => initiate failover */ 672 *bearer_id = n->active_links[0]; 673 tnl = n->links[*bearer_id].link; 674 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 675 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 676 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 677 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 678 tipc_link_reset(l); 679 tipc_link_fsm_evt(l, LINK_RESET_EVT); 680 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 681 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 682 *maddr = &n->links[*bearer_id].maddr; 683 } 684 685 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 686 { 687 struct tipc_link_entry *le = &n->links[bearer_id]; 688 struct tipc_link *l = le->link; 689 struct tipc_media_addr *maddr; 690 struct sk_buff_head xmitq; 691 692 if (!l) 693 return; 694 695 __skb_queue_head_init(&xmitq); 696 697 tipc_node_write_lock(n); 698 if (!tipc_link_is_establishing(l)) { 699 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 700 if (delete) { 701 kfree(l); 702 le->link = NULL; 703 n->link_cnt--; 704 } 705 } else { 706 /* Defuse pending tipc_node_link_up() */ 707 tipc_link_fsm_evt(l, LINK_RESET_EVT); 708 } 709 tipc_node_write_unlock(n); 710 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 711 tipc_sk_rcv(n->net, &le->inputq); 712 } 713 714 static bool tipc_node_is_up(struct tipc_node *n) 715 { 716 return n->active_links[0] != INVALID_BEARER_ID; 717 } 718 719 void tipc_node_check_dest(struct net *net, u32 onode, 720 struct tipc_bearer *b, 721 u16 capabilities, u32 signature, 722 struct tipc_media_addr *maddr, 723 bool *respond, bool *dupl_addr) 724 { 725 struct tipc_node *n; 726 struct tipc_link *l; 727 struct tipc_link_entry *le; 728 bool addr_match = false; 729 bool sign_match = false; 730 bool link_up = false; 731 bool accept_addr = false; 732 bool reset = true; 733 char *if_name; 734 735 *dupl_addr = false; 736 *respond = false; 737 738 n = tipc_node_create(net, onode, capabilities); 739 if (!n) 740 return; 741 742 tipc_node_write_lock(n); 743 744 le = &n->links[b->identity]; 745 746 /* Prepare to validate requesting node's signature and media address */ 747 l = le->link; 748 link_up = l && tipc_link_is_up(l); 749 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 750 sign_match = (signature == n->signature); 751 752 /* These three flags give us eight permutations: */ 753 754 if (sign_match && addr_match && link_up) { 755 /* All is fine. Do nothing. */ 756 reset = false; 757 } else if (sign_match && addr_match && !link_up) { 758 /* Respond. The link will come up in due time */ 759 *respond = true; 760 } else if (sign_match && !addr_match && link_up) { 761 /* Peer has changed i/f address without rebooting. 762 * If so, the link will reset soon, and the next 763 * discovery will be accepted. So we can ignore it. 764 * It may also be an cloned or malicious peer having 765 * chosen the same node address and signature as an 766 * existing one. 767 * Ignore requests until the link goes down, if ever. 768 */ 769 *dupl_addr = true; 770 } else if (sign_match && !addr_match && !link_up) { 771 /* Peer link has changed i/f address without rebooting. 772 * It may also be a cloned or malicious peer; we can't 773 * distinguish between the two. 774 * The signature is correct, so we must accept. 775 */ 776 accept_addr = true; 777 *respond = true; 778 } else if (!sign_match && addr_match && link_up) { 779 /* Peer node rebooted. Two possibilities: 780 * - Delayed re-discovery; this link endpoint has already 781 * reset and re-established contact with the peer, before 782 * receiving a discovery message from that node. 783 * (The peer happened to receive one from this node first). 784 * - The peer came back so fast that our side has not 785 * discovered it yet. Probing from this side will soon 786 * reset the link, since there can be no working link 787 * endpoint at the peer end, and the link will re-establish. 788 * Accept the signature, since it comes from a known peer. 789 */ 790 n->signature = signature; 791 } else if (!sign_match && addr_match && !link_up) { 792 /* The peer node has rebooted. 793 * Accept signature, since it is a known peer. 794 */ 795 n->signature = signature; 796 *respond = true; 797 } else if (!sign_match && !addr_match && link_up) { 798 /* Peer rebooted with new address, or a new/duplicate peer. 799 * Ignore until the link goes down, if ever. 800 */ 801 *dupl_addr = true; 802 } else if (!sign_match && !addr_match && !link_up) { 803 /* Peer rebooted with new address, or it is a new peer. 804 * Accept signature and address. 805 */ 806 n->signature = signature; 807 accept_addr = true; 808 *respond = true; 809 } 810 811 if (!accept_addr) 812 goto exit; 813 814 /* Now create new link if not already existing */ 815 if (!l) { 816 if (n->link_cnt == 2) { 817 pr_warn("Cannot establish 3rd link to %x\n", n->addr); 818 goto exit; 819 } 820 if_name = strchr(b->name, ':') + 1; 821 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 822 b->net_plane, b->mtu, b->priority, 823 b->window, mod(tipc_net(net)->random), 824 tipc_own_addr(net), onode, 825 n->capabilities, 826 tipc_bc_sndlink(n->net), n->bc_entry.link, 827 &le->inputq, 828 &n->bc_entry.namedq, &l)) { 829 *respond = false; 830 goto exit; 831 } 832 tipc_link_reset(l); 833 tipc_link_fsm_evt(l, LINK_RESET_EVT); 834 if (n->state == NODE_FAILINGOVER) 835 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 836 le->link = l; 837 n->link_cnt++; 838 tipc_node_calculate_timer(n, l); 839 if (n->link_cnt == 1) 840 if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) 841 tipc_node_get(n); 842 } 843 memcpy(&le->maddr, maddr, sizeof(*maddr)); 844 exit: 845 tipc_node_write_unlock(n); 846 if (reset && !tipc_link_is_reset(l)) 847 tipc_node_link_down(n, b->identity, false); 848 tipc_node_put(n); 849 } 850 851 void tipc_node_delete_links(struct net *net, int bearer_id) 852 { 853 struct tipc_net *tn = net_generic(net, tipc_net_id); 854 struct tipc_node *n; 855 856 rcu_read_lock(); 857 list_for_each_entry_rcu(n, &tn->node_list, list) { 858 tipc_node_link_down(n, bearer_id, true); 859 } 860 rcu_read_unlock(); 861 } 862 863 static void tipc_node_reset_links(struct tipc_node *n) 864 { 865 char addr_string[16]; 866 int i; 867 868 pr_warn("Resetting all links to %s\n", 869 tipc_addr_string_fill(addr_string, n->addr)); 870 871 for (i = 0; i < MAX_BEARERS; i++) { 872 tipc_node_link_down(n, i, false); 873 } 874 } 875 876 /* tipc_node_fsm_evt - node finite state machine 877 * Determines when contact is allowed with peer node 878 */ 879 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 880 { 881 int state = n->state; 882 883 switch (state) { 884 case SELF_DOWN_PEER_DOWN: 885 switch (evt) { 886 case SELF_ESTABL_CONTACT_EVT: 887 state = SELF_UP_PEER_COMING; 888 break; 889 case PEER_ESTABL_CONTACT_EVT: 890 state = SELF_COMING_PEER_UP; 891 break; 892 case SELF_LOST_CONTACT_EVT: 893 case PEER_LOST_CONTACT_EVT: 894 break; 895 case NODE_SYNCH_END_EVT: 896 case NODE_SYNCH_BEGIN_EVT: 897 case NODE_FAILOVER_BEGIN_EVT: 898 case NODE_FAILOVER_END_EVT: 899 default: 900 goto illegal_evt; 901 } 902 break; 903 case SELF_UP_PEER_UP: 904 switch (evt) { 905 case SELF_LOST_CONTACT_EVT: 906 state = SELF_DOWN_PEER_LEAVING; 907 break; 908 case PEER_LOST_CONTACT_EVT: 909 state = SELF_LEAVING_PEER_DOWN; 910 break; 911 case NODE_SYNCH_BEGIN_EVT: 912 state = NODE_SYNCHING; 913 break; 914 case NODE_FAILOVER_BEGIN_EVT: 915 state = NODE_FAILINGOVER; 916 break; 917 case SELF_ESTABL_CONTACT_EVT: 918 case PEER_ESTABL_CONTACT_EVT: 919 case NODE_SYNCH_END_EVT: 920 case NODE_FAILOVER_END_EVT: 921 break; 922 default: 923 goto illegal_evt; 924 } 925 break; 926 case SELF_DOWN_PEER_LEAVING: 927 switch (evt) { 928 case PEER_LOST_CONTACT_EVT: 929 state = SELF_DOWN_PEER_DOWN; 930 break; 931 case SELF_ESTABL_CONTACT_EVT: 932 case PEER_ESTABL_CONTACT_EVT: 933 case SELF_LOST_CONTACT_EVT: 934 break; 935 case NODE_SYNCH_END_EVT: 936 case NODE_SYNCH_BEGIN_EVT: 937 case NODE_FAILOVER_BEGIN_EVT: 938 case NODE_FAILOVER_END_EVT: 939 default: 940 goto illegal_evt; 941 } 942 break; 943 case SELF_UP_PEER_COMING: 944 switch (evt) { 945 case PEER_ESTABL_CONTACT_EVT: 946 state = SELF_UP_PEER_UP; 947 break; 948 case SELF_LOST_CONTACT_EVT: 949 state = SELF_DOWN_PEER_LEAVING; 950 break; 951 case SELF_ESTABL_CONTACT_EVT: 952 case PEER_LOST_CONTACT_EVT: 953 case NODE_SYNCH_END_EVT: 954 case NODE_FAILOVER_BEGIN_EVT: 955 break; 956 case NODE_SYNCH_BEGIN_EVT: 957 case NODE_FAILOVER_END_EVT: 958 default: 959 goto illegal_evt; 960 } 961 break; 962 case SELF_COMING_PEER_UP: 963 switch (evt) { 964 case SELF_ESTABL_CONTACT_EVT: 965 state = SELF_UP_PEER_UP; 966 break; 967 case PEER_LOST_CONTACT_EVT: 968 state = SELF_LEAVING_PEER_DOWN; 969 break; 970 case SELF_LOST_CONTACT_EVT: 971 case PEER_ESTABL_CONTACT_EVT: 972 break; 973 case NODE_SYNCH_END_EVT: 974 case NODE_SYNCH_BEGIN_EVT: 975 case NODE_FAILOVER_BEGIN_EVT: 976 case NODE_FAILOVER_END_EVT: 977 default: 978 goto illegal_evt; 979 } 980 break; 981 case SELF_LEAVING_PEER_DOWN: 982 switch (evt) { 983 case SELF_LOST_CONTACT_EVT: 984 state = SELF_DOWN_PEER_DOWN; 985 break; 986 case SELF_ESTABL_CONTACT_EVT: 987 case PEER_ESTABL_CONTACT_EVT: 988 case PEER_LOST_CONTACT_EVT: 989 break; 990 case NODE_SYNCH_END_EVT: 991 case NODE_SYNCH_BEGIN_EVT: 992 case NODE_FAILOVER_BEGIN_EVT: 993 case NODE_FAILOVER_END_EVT: 994 default: 995 goto illegal_evt; 996 } 997 break; 998 case NODE_FAILINGOVER: 999 switch (evt) { 1000 case SELF_LOST_CONTACT_EVT: 1001 state = SELF_DOWN_PEER_LEAVING; 1002 break; 1003 case PEER_LOST_CONTACT_EVT: 1004 state = SELF_LEAVING_PEER_DOWN; 1005 break; 1006 case NODE_FAILOVER_END_EVT: 1007 state = SELF_UP_PEER_UP; 1008 break; 1009 case NODE_FAILOVER_BEGIN_EVT: 1010 case SELF_ESTABL_CONTACT_EVT: 1011 case PEER_ESTABL_CONTACT_EVT: 1012 break; 1013 case NODE_SYNCH_BEGIN_EVT: 1014 case NODE_SYNCH_END_EVT: 1015 default: 1016 goto illegal_evt; 1017 } 1018 break; 1019 case NODE_SYNCHING: 1020 switch (evt) { 1021 case SELF_LOST_CONTACT_EVT: 1022 state = SELF_DOWN_PEER_LEAVING; 1023 break; 1024 case PEER_LOST_CONTACT_EVT: 1025 state = SELF_LEAVING_PEER_DOWN; 1026 break; 1027 case NODE_SYNCH_END_EVT: 1028 state = SELF_UP_PEER_UP; 1029 break; 1030 case NODE_FAILOVER_BEGIN_EVT: 1031 state = NODE_FAILINGOVER; 1032 break; 1033 case NODE_SYNCH_BEGIN_EVT: 1034 case SELF_ESTABL_CONTACT_EVT: 1035 case PEER_ESTABL_CONTACT_EVT: 1036 break; 1037 case NODE_FAILOVER_END_EVT: 1038 default: 1039 goto illegal_evt; 1040 } 1041 break; 1042 default: 1043 pr_err("Unknown node fsm state %x\n", state); 1044 break; 1045 } 1046 n->state = state; 1047 return; 1048 1049 illegal_evt: 1050 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1051 } 1052 1053 static void node_lost_contact(struct tipc_node *n, 1054 struct sk_buff_head *inputq) 1055 { 1056 char addr_string[16]; 1057 struct tipc_sock_conn *conn, *safe; 1058 struct tipc_link *l; 1059 struct list_head *conns = &n->conn_sks; 1060 struct sk_buff *skb; 1061 uint i; 1062 1063 pr_debug("Lost contact with %s\n", 1064 tipc_addr_string_fill(addr_string, n->addr)); 1065 1066 /* Clean up broadcast state */ 1067 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1068 1069 /* Abort any ongoing link failover */ 1070 for (i = 0; i < MAX_BEARERS; i++) { 1071 l = n->links[i].link; 1072 if (l) 1073 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1074 } 1075 1076 /* Notify publications from this node */ 1077 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1078 1079 /* Notify sockets connected to node */ 1080 list_for_each_entry_safe(conn, safe, conns, list) { 1081 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1082 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1083 conn->peer_node, conn->port, 1084 conn->peer_port, TIPC_ERR_NO_NODE); 1085 if (likely(skb)) 1086 skb_queue_tail(inputq, skb); 1087 list_del(&conn->list); 1088 kfree(conn); 1089 } 1090 } 1091 1092 /** 1093 * tipc_node_get_linkname - get the name of a link 1094 * 1095 * @bearer_id: id of the bearer 1096 * @node: peer node address 1097 * @linkname: link name output buffer 1098 * 1099 * Returns 0 on success 1100 */ 1101 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1102 char *linkname, size_t len) 1103 { 1104 struct tipc_link *link; 1105 int err = -EINVAL; 1106 struct tipc_node *node = tipc_node_find(net, addr); 1107 1108 if (!node) 1109 return err; 1110 1111 if (bearer_id >= MAX_BEARERS) 1112 goto exit; 1113 1114 tipc_node_read_lock(node); 1115 link = node->links[bearer_id].link; 1116 if (link) { 1117 strncpy(linkname, tipc_link_name(link), len); 1118 err = 0; 1119 } 1120 exit: 1121 tipc_node_read_unlock(node); 1122 tipc_node_put(node); 1123 return err; 1124 } 1125 1126 /* Caller should hold node lock for the passed node */ 1127 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1128 { 1129 void *hdr; 1130 struct nlattr *attrs; 1131 1132 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1133 NLM_F_MULTI, TIPC_NL_NODE_GET); 1134 if (!hdr) 1135 return -EMSGSIZE; 1136 1137 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); 1138 if (!attrs) 1139 goto msg_full; 1140 1141 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1142 goto attr_msg_full; 1143 if (tipc_node_is_up(node)) 1144 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1145 goto attr_msg_full; 1146 1147 nla_nest_end(msg->skb, attrs); 1148 genlmsg_end(msg->skb, hdr); 1149 1150 return 0; 1151 1152 attr_msg_full: 1153 nla_nest_cancel(msg->skb, attrs); 1154 msg_full: 1155 genlmsg_cancel(msg->skb, hdr); 1156 1157 return -EMSGSIZE; 1158 } 1159 1160 /** 1161 * tipc_node_xmit() is the general link level function for message sending 1162 * @net: the applicable net namespace 1163 * @list: chain of buffers containing message 1164 * @dnode: address of destination node 1165 * @selector: a number used for deterministic link selection 1166 * Consumes the buffer chain, except when returning -ELINKCONG 1167 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1168 */ 1169 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1170 u32 dnode, int selector) 1171 { 1172 struct tipc_link_entry *le = NULL; 1173 struct tipc_node *n; 1174 struct sk_buff_head xmitq; 1175 int bearer_id; 1176 int rc; 1177 1178 if (in_own_node(net, dnode)) { 1179 tipc_sk_rcv(net, list); 1180 return 0; 1181 } 1182 1183 n = tipc_node_find(net, dnode); 1184 if (unlikely(!n)) { 1185 skb_queue_purge(list); 1186 return -EHOSTUNREACH; 1187 } 1188 1189 tipc_node_read_lock(n); 1190 bearer_id = n->active_links[selector & 1]; 1191 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1192 tipc_node_read_unlock(n); 1193 tipc_node_put(n); 1194 skb_queue_purge(list); 1195 return -EHOSTUNREACH; 1196 } 1197 1198 __skb_queue_head_init(&xmitq); 1199 le = &n->links[bearer_id]; 1200 spin_lock_bh(&le->lock); 1201 rc = tipc_link_xmit(le->link, list, &xmitq); 1202 spin_unlock_bh(&le->lock); 1203 tipc_node_read_unlock(n); 1204 1205 if (likely(rc == 0)) 1206 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1207 else if (rc == -ENOBUFS) 1208 tipc_node_link_down(n, bearer_id, false); 1209 1210 tipc_node_put(n); 1211 1212 return rc; 1213 } 1214 1215 /* tipc_node_xmit_skb(): send single buffer to destination 1216 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1217 * messages, which will not be rejected 1218 * The only exception is datagram messages rerouted after secondary 1219 * lookup, which are rare and safe to dispose of anyway. 1220 * TODO: Return real return value, and let callers use 1221 * tipc_wait_for_sendpkt() where applicable 1222 */ 1223 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1224 u32 selector) 1225 { 1226 struct sk_buff_head head; 1227 int rc; 1228 1229 skb_queue_head_init(&head); 1230 __skb_queue_tail(&head, skb); 1231 rc = tipc_node_xmit(net, &head, dnode, selector); 1232 if (rc == -ELINKCONG) 1233 kfree_skb(skb); 1234 return 0; 1235 } 1236 1237 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1238 { 1239 struct sk_buff *txskb; 1240 struct tipc_node *n; 1241 u32 dst; 1242 1243 rcu_read_lock(); 1244 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1245 dst = n->addr; 1246 if (in_own_node(net, dst)) 1247 continue; 1248 if (!tipc_node_is_up(n)) 1249 continue; 1250 txskb = pskb_copy(skb, GFP_ATOMIC); 1251 if (!txskb) 1252 break; 1253 msg_set_destnode(buf_msg(txskb), dst); 1254 tipc_node_xmit_skb(net, txskb, dst, 0); 1255 } 1256 rcu_read_unlock(); 1257 1258 kfree_skb(skb); 1259 } 1260 1261 /** 1262 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1263 * @net: the applicable net namespace 1264 * @skb: TIPC packet 1265 * @bearer_id: id of bearer message arrived on 1266 * 1267 * Invoked with no locks held. 1268 */ 1269 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1270 { 1271 int rc; 1272 struct sk_buff_head xmitq; 1273 struct tipc_bclink_entry *be; 1274 struct tipc_link_entry *le; 1275 struct tipc_msg *hdr = buf_msg(skb); 1276 int usr = msg_user(hdr); 1277 u32 dnode = msg_destnode(hdr); 1278 struct tipc_node *n; 1279 1280 __skb_queue_head_init(&xmitq); 1281 1282 /* If NACK for other node, let rcv link for that node peek into it */ 1283 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1284 n = tipc_node_find(net, dnode); 1285 else 1286 n = tipc_node_find(net, msg_prevnode(hdr)); 1287 if (!n) { 1288 kfree_skb(skb); 1289 return; 1290 } 1291 be = &n->bc_entry; 1292 le = &n->links[bearer_id]; 1293 1294 rc = tipc_bcast_rcv(net, be->link, skb); 1295 1296 /* Broadcast link reset may happen at reassembly failure */ 1297 if (rc & TIPC_LINK_DOWN_EVT) 1298 tipc_node_reset_links(n); 1299 1300 /* Broadcast ACKs are sent on a unicast link */ 1301 if (rc & TIPC_LINK_SND_BC_ACK) { 1302 tipc_node_read_lock(n); 1303 tipc_link_build_ack_msg(le->link, &xmitq); 1304 tipc_node_read_unlock(n); 1305 } 1306 1307 if (!skb_queue_empty(&xmitq)) 1308 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1309 1310 /* Deliver. 'arrvq' is under inputq2's lock protection */ 1311 if (!skb_queue_empty(&be->inputq1)) { 1312 spin_lock_bh(&be->inputq2.lock); 1313 spin_lock_bh(&be->inputq1.lock); 1314 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1315 spin_unlock_bh(&be->inputq1.lock); 1316 spin_unlock_bh(&be->inputq2.lock); 1317 tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); 1318 } 1319 tipc_node_put(n); 1320 } 1321 1322 /** 1323 * tipc_node_check_state - check and if necessary update node state 1324 * @skb: TIPC packet 1325 * @bearer_id: identity of bearer delivering the packet 1326 * Returns true if state is ok, otherwise consumes buffer and returns false 1327 */ 1328 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1329 int bearer_id, struct sk_buff_head *xmitq) 1330 { 1331 struct tipc_msg *hdr = buf_msg(skb); 1332 int usr = msg_user(hdr); 1333 int mtyp = msg_type(hdr); 1334 u16 oseqno = msg_seqno(hdr); 1335 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1336 u16 exp_pkts = msg_msgcnt(hdr); 1337 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1338 int state = n->state; 1339 struct tipc_link *l, *tnl, *pl = NULL; 1340 struct tipc_media_addr *maddr; 1341 int pb_id; 1342 1343 l = n->links[bearer_id].link; 1344 if (!l) 1345 return false; 1346 rcv_nxt = tipc_link_rcv_nxt(l); 1347 1348 1349 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1350 return true; 1351 1352 /* Find parallel link, if any */ 1353 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1354 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1355 pl = n->links[pb_id].link; 1356 break; 1357 } 1358 } 1359 1360 /* Check and update node accesibility if applicable */ 1361 if (state == SELF_UP_PEER_COMING) { 1362 if (!tipc_link_is_up(l)) 1363 return true; 1364 if (!msg_peer_link_is_up(hdr)) 1365 return true; 1366 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1367 } 1368 1369 if (state == SELF_DOWN_PEER_LEAVING) { 1370 if (msg_peer_node_is_up(hdr)) 1371 return false; 1372 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1373 return true; 1374 } 1375 1376 if (state == SELF_LEAVING_PEER_DOWN) 1377 return false; 1378 1379 /* Ignore duplicate packets */ 1380 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1381 return true; 1382 1383 /* Initiate or update failover mode if applicable */ 1384 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1385 syncpt = oseqno + exp_pkts - 1; 1386 if (pl && tipc_link_is_up(pl)) { 1387 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1388 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1389 tipc_link_inputq(l)); 1390 } 1391 /* If pkts arrive out of order, use lowest calculated syncpt */ 1392 if (less(syncpt, n->sync_point)) 1393 n->sync_point = syncpt; 1394 } 1395 1396 /* Open parallel link when tunnel link reaches synch point */ 1397 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1398 if (!more(rcv_nxt, n->sync_point)) 1399 return true; 1400 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1401 if (pl) 1402 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1403 return true; 1404 } 1405 1406 /* No synching needed if only one link */ 1407 if (!pl || !tipc_link_is_up(pl)) 1408 return true; 1409 1410 /* Initiate synch mode if applicable */ 1411 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1412 syncpt = iseqno + exp_pkts - 1; 1413 if (!tipc_link_is_up(l)) { 1414 tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1415 __tipc_node_link_up(n, bearer_id, xmitq); 1416 } 1417 if (n->state == SELF_UP_PEER_UP) { 1418 n->sync_point = syncpt; 1419 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1420 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1421 } 1422 } 1423 1424 /* Open tunnel link when parallel link reaches synch point */ 1425 if (n->state == NODE_SYNCHING) { 1426 if (tipc_link_is_synching(l)) { 1427 tnl = l; 1428 } else { 1429 tnl = pl; 1430 pl = l; 1431 } 1432 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1433 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1434 if (more(dlv_nxt, n->sync_point)) { 1435 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1436 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1437 return true; 1438 } 1439 if (l == pl) 1440 return true; 1441 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1442 return true; 1443 if (usr == LINK_PROTOCOL) 1444 return true; 1445 return false; 1446 } 1447 return true; 1448 } 1449 1450 /** 1451 * tipc_rcv - process TIPC packets/messages arriving from off-node 1452 * @net: the applicable net namespace 1453 * @skb: TIPC packet 1454 * @bearer: pointer to bearer message arrived on 1455 * 1456 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1457 * structure (i.e. cannot be NULL), but bearer can be inactive. 1458 */ 1459 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1460 { 1461 struct sk_buff_head xmitq; 1462 struct tipc_node *n; 1463 struct tipc_msg *hdr = buf_msg(skb); 1464 int usr = msg_user(hdr); 1465 int bearer_id = b->identity; 1466 struct tipc_link_entry *le; 1467 u16 bc_ack = msg_bcast_ack(hdr); 1468 int rc = 0; 1469 1470 __skb_queue_head_init(&xmitq); 1471 1472 /* Ensure message is well-formed */ 1473 if (unlikely(!tipc_msg_validate(skb))) 1474 goto discard; 1475 1476 /* Handle arrival of discovery or broadcast packet */ 1477 if (unlikely(msg_non_seq(hdr))) { 1478 if (unlikely(usr == LINK_CONFIG)) 1479 return tipc_disc_rcv(net, skb, b); 1480 else 1481 return tipc_node_bc_rcv(net, skb, bearer_id); 1482 } 1483 1484 /* Locate neighboring node that sent packet */ 1485 n = tipc_node_find(net, msg_prevnode(hdr)); 1486 if (unlikely(!n)) 1487 goto discard; 1488 le = &n->links[bearer_id]; 1489 1490 /* Ensure broadcast reception is in synch with peer's send state */ 1491 if (unlikely(usr == LINK_PROTOCOL)) 1492 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr); 1493 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1494 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1495 1496 /* Receive packet directly if conditions permit */ 1497 tipc_node_read_lock(n); 1498 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1499 spin_lock_bh(&le->lock); 1500 if (le->link) { 1501 rc = tipc_link_rcv(le->link, skb, &xmitq); 1502 skb = NULL; 1503 } 1504 spin_unlock_bh(&le->lock); 1505 } 1506 tipc_node_read_unlock(n); 1507 1508 /* Check/update node state before receiving */ 1509 if (unlikely(skb)) { 1510 tipc_node_write_lock(n); 1511 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1512 if (le->link) { 1513 rc = tipc_link_rcv(le->link, skb, &xmitq); 1514 skb = NULL; 1515 } 1516 } 1517 tipc_node_write_unlock(n); 1518 } 1519 1520 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1521 tipc_node_link_up(n, bearer_id, &xmitq); 1522 1523 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1524 tipc_node_link_down(n, bearer_id, false); 1525 1526 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1527 tipc_named_rcv(net, &n->bc_entry.namedq); 1528 1529 if (!skb_queue_empty(&le->inputq)) 1530 tipc_sk_rcv(net, &le->inputq); 1531 1532 if (!skb_queue_empty(&xmitq)) 1533 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1534 1535 tipc_node_put(n); 1536 discard: 1537 kfree_skb(skb); 1538 } 1539 1540 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 1541 { 1542 int err; 1543 struct net *net = sock_net(skb->sk); 1544 struct tipc_net *tn = net_generic(net, tipc_net_id); 1545 int done = cb->args[0]; 1546 int last_addr = cb->args[1]; 1547 struct tipc_node *node; 1548 struct tipc_nl_msg msg; 1549 1550 if (done) 1551 return 0; 1552 1553 msg.skb = skb; 1554 msg.portid = NETLINK_CB(cb->skb).portid; 1555 msg.seq = cb->nlh->nlmsg_seq; 1556 1557 rcu_read_lock(); 1558 if (last_addr) { 1559 node = tipc_node_find(net, last_addr); 1560 if (!node) { 1561 rcu_read_unlock(); 1562 /* We never set seq or call nl_dump_check_consistent() 1563 * this means that setting prev_seq here will cause the 1564 * consistence check to fail in the netlink callback 1565 * handler. Resulting in the NLMSG_DONE message having 1566 * the NLM_F_DUMP_INTR flag set if the node state 1567 * changed while we released the lock. 1568 */ 1569 cb->prev_seq = 1; 1570 return -EPIPE; 1571 } 1572 tipc_node_put(node); 1573 } 1574 1575 list_for_each_entry_rcu(node, &tn->node_list, list) { 1576 if (last_addr) { 1577 if (node->addr == last_addr) 1578 last_addr = 0; 1579 else 1580 continue; 1581 } 1582 1583 tipc_node_read_lock(node); 1584 err = __tipc_nl_add_node(&msg, node); 1585 if (err) { 1586 last_addr = node->addr; 1587 tipc_node_read_unlock(node); 1588 goto out; 1589 } 1590 1591 tipc_node_read_unlock(node); 1592 } 1593 done = 1; 1594 out: 1595 cb->args[0] = done; 1596 cb->args[1] = last_addr; 1597 rcu_read_unlock(); 1598 1599 return skb->len; 1600 } 1601 1602 /* tipc_node_find_by_name - locate owner node of link by link's name 1603 * @net: the applicable net namespace 1604 * @name: pointer to link name string 1605 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1606 * 1607 * Returns pointer to node owning the link, or 0 if no matching link is found. 1608 */ 1609 static struct tipc_node *tipc_node_find_by_name(struct net *net, 1610 const char *link_name, 1611 unsigned int *bearer_id) 1612 { 1613 struct tipc_net *tn = net_generic(net, tipc_net_id); 1614 struct tipc_link *l; 1615 struct tipc_node *n; 1616 struct tipc_node *found_node = NULL; 1617 int i; 1618 1619 *bearer_id = 0; 1620 rcu_read_lock(); 1621 list_for_each_entry_rcu(n, &tn->node_list, list) { 1622 tipc_node_read_lock(n); 1623 for (i = 0; i < MAX_BEARERS; i++) { 1624 l = n->links[i].link; 1625 if (l && !strcmp(tipc_link_name(l), link_name)) { 1626 *bearer_id = i; 1627 found_node = n; 1628 break; 1629 } 1630 } 1631 tipc_node_read_unlock(n); 1632 if (found_node) 1633 break; 1634 } 1635 rcu_read_unlock(); 1636 1637 return found_node; 1638 } 1639 1640 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 1641 { 1642 int err; 1643 int res = 0; 1644 int bearer_id; 1645 char *name; 1646 struct tipc_link *link; 1647 struct tipc_node *node; 1648 struct sk_buff_head xmitq; 1649 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1650 struct net *net = sock_net(skb->sk); 1651 1652 __skb_queue_head_init(&xmitq); 1653 1654 if (!info->attrs[TIPC_NLA_LINK]) 1655 return -EINVAL; 1656 1657 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1658 info->attrs[TIPC_NLA_LINK], 1659 tipc_nl_link_policy); 1660 if (err) 1661 return err; 1662 1663 if (!attrs[TIPC_NLA_LINK_NAME]) 1664 return -EINVAL; 1665 1666 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1667 1668 if (strcmp(name, tipc_bclink_name) == 0) 1669 return tipc_nl_bc_link_set(net, attrs); 1670 1671 node = tipc_node_find_by_name(net, name, &bearer_id); 1672 if (!node) 1673 return -EINVAL; 1674 1675 tipc_node_read_lock(node); 1676 1677 link = node->links[bearer_id].link; 1678 if (!link) { 1679 res = -EINVAL; 1680 goto out; 1681 } 1682 1683 if (attrs[TIPC_NLA_LINK_PROP]) { 1684 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1685 1686 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 1687 props); 1688 if (err) { 1689 res = err; 1690 goto out; 1691 } 1692 1693 if (props[TIPC_NLA_PROP_TOL]) { 1694 u32 tol; 1695 1696 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1697 tipc_link_set_tolerance(link, tol, &xmitq); 1698 } 1699 if (props[TIPC_NLA_PROP_PRIO]) { 1700 u32 prio; 1701 1702 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1703 tipc_link_set_prio(link, prio, &xmitq); 1704 } 1705 if (props[TIPC_NLA_PROP_WIN]) { 1706 u32 win; 1707 1708 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1709 tipc_link_set_queue_limits(link, win); 1710 } 1711 } 1712 1713 out: 1714 tipc_node_read_unlock(node); 1715 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 1716 return res; 1717 } 1718 1719 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 1720 { 1721 struct net *net = genl_info_net(info); 1722 struct tipc_nl_msg msg; 1723 char *name; 1724 int err; 1725 1726 msg.portid = info->snd_portid; 1727 msg.seq = info->snd_seq; 1728 1729 if (!info->attrs[TIPC_NLA_LINK_NAME]) 1730 return -EINVAL; 1731 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 1732 1733 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1734 if (!msg.skb) 1735 return -ENOMEM; 1736 1737 if (strcmp(name, tipc_bclink_name) == 0) { 1738 err = tipc_nl_add_bc_link(net, &msg); 1739 if (err) { 1740 nlmsg_free(msg.skb); 1741 return err; 1742 } 1743 } else { 1744 int bearer_id; 1745 struct tipc_node *node; 1746 struct tipc_link *link; 1747 1748 node = tipc_node_find_by_name(net, name, &bearer_id); 1749 if (!node) 1750 return -EINVAL; 1751 1752 tipc_node_read_lock(node); 1753 link = node->links[bearer_id].link; 1754 if (!link) { 1755 tipc_node_read_unlock(node); 1756 nlmsg_free(msg.skb); 1757 return -EINVAL; 1758 } 1759 1760 err = __tipc_nl_add_link(net, &msg, link, 0); 1761 tipc_node_read_unlock(node); 1762 if (err) { 1763 nlmsg_free(msg.skb); 1764 return err; 1765 } 1766 } 1767 1768 return genlmsg_reply(msg.skb, info); 1769 } 1770 1771 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 1772 { 1773 int err; 1774 char *link_name; 1775 unsigned int bearer_id; 1776 struct tipc_link *link; 1777 struct tipc_node *node; 1778 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1779 struct net *net = sock_net(skb->sk); 1780 struct tipc_link_entry *le; 1781 1782 if (!info->attrs[TIPC_NLA_LINK]) 1783 return -EINVAL; 1784 1785 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1786 info->attrs[TIPC_NLA_LINK], 1787 tipc_nl_link_policy); 1788 if (err) 1789 return err; 1790 1791 if (!attrs[TIPC_NLA_LINK_NAME]) 1792 return -EINVAL; 1793 1794 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1795 1796 if (strcmp(link_name, tipc_bclink_name) == 0) { 1797 err = tipc_bclink_reset_stats(net); 1798 if (err) 1799 return err; 1800 return 0; 1801 } 1802 1803 node = tipc_node_find_by_name(net, link_name, &bearer_id); 1804 if (!node) 1805 return -EINVAL; 1806 1807 le = &node->links[bearer_id]; 1808 tipc_node_read_lock(node); 1809 spin_lock_bh(&le->lock); 1810 link = node->links[bearer_id].link; 1811 if (!link) { 1812 spin_unlock_bh(&le->lock); 1813 tipc_node_read_unlock(node); 1814 return -EINVAL; 1815 } 1816 tipc_link_reset_stats(link); 1817 spin_unlock_bh(&le->lock); 1818 tipc_node_read_unlock(node); 1819 return 0; 1820 } 1821 1822 /* Caller should hold node lock */ 1823 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 1824 struct tipc_node *node, u32 *prev_link) 1825 { 1826 u32 i; 1827 int err; 1828 1829 for (i = *prev_link; i < MAX_BEARERS; i++) { 1830 *prev_link = i; 1831 1832 if (!node->links[i].link) 1833 continue; 1834 1835 err = __tipc_nl_add_link(net, msg, 1836 node->links[i].link, NLM_F_MULTI); 1837 if (err) 1838 return err; 1839 } 1840 *prev_link = 0; 1841 1842 return 0; 1843 } 1844 1845 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 1846 { 1847 struct net *net = sock_net(skb->sk); 1848 struct tipc_net *tn = net_generic(net, tipc_net_id); 1849 struct tipc_node *node; 1850 struct tipc_nl_msg msg; 1851 u32 prev_node = cb->args[0]; 1852 u32 prev_link = cb->args[1]; 1853 int done = cb->args[2]; 1854 int err; 1855 1856 if (done) 1857 return 0; 1858 1859 msg.skb = skb; 1860 msg.portid = NETLINK_CB(cb->skb).portid; 1861 msg.seq = cb->nlh->nlmsg_seq; 1862 1863 rcu_read_lock(); 1864 if (prev_node) { 1865 node = tipc_node_find(net, prev_node); 1866 if (!node) { 1867 /* We never set seq or call nl_dump_check_consistent() 1868 * this means that setting prev_seq here will cause the 1869 * consistence check to fail in the netlink callback 1870 * handler. Resulting in the last NLMSG_DONE message 1871 * having the NLM_F_DUMP_INTR flag set. 1872 */ 1873 cb->prev_seq = 1; 1874 goto out; 1875 } 1876 tipc_node_put(node); 1877 1878 list_for_each_entry_continue_rcu(node, &tn->node_list, 1879 list) { 1880 tipc_node_read_lock(node); 1881 err = __tipc_nl_add_node_links(net, &msg, node, 1882 &prev_link); 1883 tipc_node_read_unlock(node); 1884 if (err) 1885 goto out; 1886 1887 prev_node = node->addr; 1888 } 1889 } else { 1890 err = tipc_nl_add_bc_link(net, &msg); 1891 if (err) 1892 goto out; 1893 1894 list_for_each_entry_rcu(node, &tn->node_list, list) { 1895 tipc_node_read_lock(node); 1896 err = __tipc_nl_add_node_links(net, &msg, node, 1897 &prev_link); 1898 tipc_node_read_unlock(node); 1899 if (err) 1900 goto out; 1901 1902 prev_node = node->addr; 1903 } 1904 } 1905 done = 1; 1906 out: 1907 rcu_read_unlock(); 1908 1909 cb->args[0] = prev_node; 1910 cb->args[1] = prev_link; 1911 cb->args[2] = done; 1912 1913 return skb->len; 1914 } 1915