1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "discover.h" 44 45 #define INVALID_NODE_SIG 0x10000 46 47 /* Flags used to take different actions according to flag type 48 * TIPC_NOTIFY_NODE_DOWN: notify node is down 49 * TIPC_NOTIFY_NODE_UP: notify node is up 50 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 51 */ 52 enum { 53 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 54 TIPC_NOTIFY_NODE_UP = (1 << 4), 55 TIPC_NOTIFY_LINK_UP = (1 << 6), 56 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 57 }; 58 59 struct tipc_link_entry { 60 struct tipc_link *link; 61 spinlock_t lock; /* per link */ 62 u32 mtu; 63 struct sk_buff_head inputq; 64 struct tipc_media_addr maddr; 65 }; 66 67 struct tipc_bclink_entry { 68 struct tipc_link *link; 69 struct sk_buff_head inputq1; 70 struct sk_buff_head arrvq; 71 struct sk_buff_head inputq2; 72 struct sk_buff_head namedq; 73 }; 74 75 /** 76 * struct tipc_node - TIPC node structure 77 * @addr: network address of node 78 * @ref: reference counter to node object 79 * @lock: rwlock governing access to structure 80 * @net: the applicable net namespace 81 * @hash: links to adjacent nodes in unsorted hash chain 82 * @inputq: pointer to input queue containing messages for msg event 83 * @namedq: pointer to name table input queue with name table messages 84 * @active_links: bearer ids of active links, used as index into links[] array 85 * @links: array containing references to all links to node 86 * @action_flags: bit mask of different types of node actions 87 * @state: connectivity state vs peer node 88 * @sync_point: sequence number where synch/failover is finished 89 * @list: links to adjacent nodes in sorted list of cluster's nodes 90 * @working_links: number of working links to node (both active and standby) 91 * @link_cnt: number of links to node 92 * @capabilities: bitmap, indicating peer node's functional capabilities 93 * @signature: node instance identifier 94 * @link_id: local and remote bearer ids of changing link, if any 95 * @publ_list: list of publications 96 * @rcu: rcu struct for tipc_node 97 */ 98 struct tipc_node { 99 u32 addr; 100 struct kref kref; 101 rwlock_t lock; 102 struct net *net; 103 struct hlist_node hash; 104 int active_links[2]; 105 struct tipc_link_entry links[MAX_BEARERS]; 106 struct tipc_bclink_entry bc_entry; 107 int action_flags; 108 struct list_head list; 109 int state; 110 u16 sync_point; 111 int link_cnt; 112 u16 working_links; 113 u16 capabilities; 114 u32 signature; 115 u32 link_id; 116 struct list_head publ_list; 117 struct list_head conn_sks; 118 unsigned long keepalive_intv; 119 struct timer_list timer; 120 struct rcu_head rcu; 121 }; 122 123 /* Node FSM states and events: 124 */ 125 enum { 126 SELF_DOWN_PEER_DOWN = 0xdd, 127 SELF_UP_PEER_UP = 0xaa, 128 SELF_DOWN_PEER_LEAVING = 0xd1, 129 SELF_UP_PEER_COMING = 0xac, 130 SELF_COMING_PEER_UP = 0xca, 131 SELF_LEAVING_PEER_DOWN = 0x1d, 132 NODE_FAILINGOVER = 0xf0, 133 NODE_SYNCHING = 0xcc 134 }; 135 136 enum { 137 SELF_ESTABL_CONTACT_EVT = 0xece, 138 SELF_LOST_CONTACT_EVT = 0x1ce, 139 PEER_ESTABL_CONTACT_EVT = 0x9ece, 140 PEER_LOST_CONTACT_EVT = 0x91ce, 141 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 142 NODE_FAILOVER_END_EVT = 0xfee, 143 NODE_SYNCH_BEGIN_EVT = 0xcbe, 144 NODE_SYNCH_END_EVT = 0xcee 145 }; 146 147 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 148 struct sk_buff_head *xmitq, 149 struct tipc_media_addr **maddr); 150 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 151 bool delete); 152 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 153 static void tipc_node_delete(struct tipc_node *node); 154 static void tipc_node_timeout(unsigned long data); 155 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 156 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 157 static void tipc_node_put(struct tipc_node *node); 158 static bool tipc_node_is_up(struct tipc_node *n); 159 160 struct tipc_sock_conn { 161 u32 port; 162 u32 peer_port; 163 u32 peer_node; 164 struct list_head list; 165 }; 166 167 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 168 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, 169 [TIPC_NLA_LINK_NAME] = { 170 .type = NLA_STRING, 171 .len = TIPC_MAX_LINK_NAME 172 }, 173 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, 174 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, 175 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, 176 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, 177 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, 178 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, 179 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, 180 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } 181 }; 182 183 static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { 184 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, 185 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, 186 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } 187 }; 188 189 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 190 { 191 int bearer_id = n->active_links[sel & 1]; 192 193 if (unlikely(bearer_id == INVALID_BEARER_ID)) 194 return NULL; 195 196 return n->links[bearer_id].link; 197 } 198 199 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) 200 { 201 struct tipc_node *n; 202 int bearer_id; 203 unsigned int mtu = MAX_MSG_SIZE; 204 205 n = tipc_node_find(net, addr); 206 if (unlikely(!n)) 207 return mtu; 208 209 bearer_id = n->active_links[sel & 1]; 210 if (likely(bearer_id != INVALID_BEARER_ID)) 211 mtu = n->links[bearer_id].mtu; 212 tipc_node_put(n); 213 return mtu; 214 } 215 /* 216 * A trivial power-of-two bitmask technique is used for speed, since this 217 * operation is done for every incoming TIPC packet. The number of hash table 218 * entries has been chosen so that no hash chain exceeds 8 nodes and will 219 * usually be much smaller (typically only a single node). 220 */ 221 static unsigned int tipc_hashfn(u32 addr) 222 { 223 return addr & (NODE_HTABLE_SIZE - 1); 224 } 225 226 static void tipc_node_kref_release(struct kref *kref) 227 { 228 struct tipc_node *node = container_of(kref, struct tipc_node, kref); 229 230 tipc_node_delete(node); 231 } 232 233 static void tipc_node_put(struct tipc_node *node) 234 { 235 kref_put(&node->kref, tipc_node_kref_release); 236 } 237 238 static void tipc_node_get(struct tipc_node *node) 239 { 240 kref_get(&node->kref); 241 } 242 243 /* 244 * tipc_node_find - locate specified node object, if it exists 245 */ 246 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 247 { 248 struct tipc_net *tn = net_generic(net, tipc_net_id); 249 struct tipc_node *node; 250 251 if (unlikely(!in_own_cluster_exact(net, addr))) 252 return NULL; 253 254 rcu_read_lock(); 255 hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], 256 hash) { 257 if (node->addr == addr) { 258 tipc_node_get(node); 259 rcu_read_unlock(); 260 return node; 261 } 262 } 263 rcu_read_unlock(); 264 return NULL; 265 } 266 267 static void tipc_node_read_lock(struct tipc_node *n) 268 { 269 read_lock_bh(&n->lock); 270 } 271 272 static void tipc_node_read_unlock(struct tipc_node *n) 273 { 274 read_unlock_bh(&n->lock); 275 } 276 277 static void tipc_node_write_lock(struct tipc_node *n) 278 { 279 write_lock_bh(&n->lock); 280 } 281 282 static void tipc_node_write_unlock(struct tipc_node *n) 283 { 284 struct net *net = n->net; 285 u32 addr = 0; 286 u32 flags = n->action_flags; 287 u32 link_id = 0; 288 struct list_head *publ_list; 289 290 if (likely(!flags)) { 291 write_unlock_bh(&n->lock); 292 return; 293 } 294 295 addr = n->addr; 296 link_id = n->link_id; 297 publ_list = &n->publ_list; 298 299 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 300 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 301 302 write_unlock_bh(&n->lock); 303 304 if (flags & TIPC_NOTIFY_NODE_DOWN) 305 tipc_publ_notify(net, publ_list, addr); 306 307 if (flags & TIPC_NOTIFY_NODE_UP) 308 tipc_named_node_up(net, addr); 309 310 if (flags & TIPC_NOTIFY_LINK_UP) 311 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 312 TIPC_NODE_SCOPE, link_id, addr); 313 314 if (flags & TIPC_NOTIFY_LINK_DOWN) 315 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 316 link_id, addr); 317 } 318 319 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) 320 { 321 struct tipc_net *tn = net_generic(net, tipc_net_id); 322 struct tipc_node *n, *temp_node; 323 int i; 324 325 spin_lock_bh(&tn->node_list_lock); 326 n = tipc_node_find(net, addr); 327 if (n) 328 goto exit; 329 n = kzalloc(sizeof(*n), GFP_ATOMIC); 330 if (!n) { 331 pr_warn("Node creation failed, no memory\n"); 332 goto exit; 333 } 334 n->addr = addr; 335 n->net = net; 336 n->capabilities = capabilities; 337 kref_init(&n->kref); 338 rwlock_init(&n->lock); 339 INIT_HLIST_NODE(&n->hash); 340 INIT_LIST_HEAD(&n->list); 341 INIT_LIST_HEAD(&n->publ_list); 342 INIT_LIST_HEAD(&n->conn_sks); 343 skb_queue_head_init(&n->bc_entry.namedq); 344 skb_queue_head_init(&n->bc_entry.inputq1); 345 __skb_queue_head_init(&n->bc_entry.arrvq); 346 skb_queue_head_init(&n->bc_entry.inputq2); 347 for (i = 0; i < MAX_BEARERS; i++) 348 spin_lock_init(&n->links[i].lock); 349 n->state = SELF_DOWN_PEER_LEAVING; 350 n->signature = INVALID_NODE_SIG; 351 n->active_links[0] = INVALID_BEARER_ID; 352 n->active_links[1] = INVALID_BEARER_ID; 353 if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr, 354 U16_MAX, 355 tipc_link_window(tipc_bc_sndlink(net)), 356 n->capabilities, 357 &n->bc_entry.inputq1, 358 &n->bc_entry.namedq, 359 tipc_bc_sndlink(net), 360 &n->bc_entry.link)) { 361 pr_warn("Broadcast rcv link creation failed, no memory\n"); 362 kfree(n); 363 n = NULL; 364 goto exit; 365 } 366 tipc_node_get(n); 367 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 368 n->keepalive_intv = U32_MAX; 369 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 370 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 371 if (n->addr < temp_node->addr) 372 break; 373 } 374 list_add_tail_rcu(&n->list, &temp_node->list); 375 exit: 376 spin_unlock_bh(&tn->node_list_lock); 377 return n; 378 } 379 380 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 381 { 382 unsigned long tol = tipc_link_tolerance(l); 383 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 384 unsigned long keepalive_intv = msecs_to_jiffies(intv); 385 386 /* Link with lowest tolerance determines timer interval */ 387 if (keepalive_intv < n->keepalive_intv) 388 n->keepalive_intv = keepalive_intv; 389 390 /* Ensure link's abort limit corresponds to current interval */ 391 tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv)); 392 } 393 394 static void tipc_node_delete(struct tipc_node *node) 395 { 396 list_del_rcu(&node->list); 397 hlist_del_rcu(&node->hash); 398 kfree(node->bc_entry.link); 399 kfree_rcu(node, rcu); 400 } 401 402 void tipc_node_stop(struct net *net) 403 { 404 struct tipc_net *tn = net_generic(net, tipc_net_id); 405 struct tipc_node *node, *t_node; 406 407 spin_lock_bh(&tn->node_list_lock); 408 list_for_each_entry_safe(node, t_node, &tn->node_list, list) { 409 if (del_timer(&node->timer)) 410 tipc_node_put(node); 411 tipc_node_put(node); 412 } 413 spin_unlock_bh(&tn->node_list_lock); 414 } 415 416 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 417 { 418 struct tipc_node *n; 419 420 if (in_own_node(net, addr)) 421 return; 422 423 n = tipc_node_find(net, addr); 424 if (!n) { 425 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 426 return; 427 } 428 tipc_node_write_lock(n); 429 list_add_tail(subscr, &n->publ_list); 430 tipc_node_write_unlock(n); 431 tipc_node_put(n); 432 } 433 434 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 435 { 436 struct tipc_node *n; 437 438 if (in_own_node(net, addr)) 439 return; 440 441 n = tipc_node_find(net, addr); 442 if (!n) { 443 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 444 return; 445 } 446 tipc_node_write_lock(n); 447 list_del_init(subscr); 448 tipc_node_write_unlock(n); 449 tipc_node_put(n); 450 } 451 452 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 453 { 454 struct tipc_node *node; 455 struct tipc_sock_conn *conn; 456 int err = 0; 457 458 if (in_own_node(net, dnode)) 459 return 0; 460 461 node = tipc_node_find(net, dnode); 462 if (!node) { 463 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 464 return -EHOSTUNREACH; 465 } 466 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 467 if (!conn) { 468 err = -EHOSTUNREACH; 469 goto exit; 470 } 471 conn->peer_node = dnode; 472 conn->port = port; 473 conn->peer_port = peer_port; 474 475 tipc_node_write_lock(node); 476 list_add_tail(&conn->list, &node->conn_sks); 477 tipc_node_write_unlock(node); 478 exit: 479 tipc_node_put(node); 480 return err; 481 } 482 483 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 484 { 485 struct tipc_node *node; 486 struct tipc_sock_conn *conn, *safe; 487 488 if (in_own_node(net, dnode)) 489 return; 490 491 node = tipc_node_find(net, dnode); 492 if (!node) 493 return; 494 495 tipc_node_write_lock(node); 496 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 497 if (port != conn->port) 498 continue; 499 list_del(&conn->list); 500 kfree(conn); 501 } 502 tipc_node_write_unlock(node); 503 tipc_node_put(node); 504 } 505 506 /* tipc_node_timeout - handle expiration of node timer 507 */ 508 static void tipc_node_timeout(unsigned long data) 509 { 510 struct tipc_node *n = (struct tipc_node *)data; 511 struct tipc_link_entry *le; 512 struct sk_buff_head xmitq; 513 int bearer_id; 514 int rc = 0; 515 516 __skb_queue_head_init(&xmitq); 517 518 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 519 tipc_node_read_lock(n); 520 le = &n->links[bearer_id]; 521 spin_lock_bh(&le->lock); 522 if (le->link) { 523 /* Link tolerance may change asynchronously: */ 524 tipc_node_calculate_timer(n, le->link); 525 rc = tipc_link_timeout(le->link, &xmitq); 526 } 527 spin_unlock_bh(&le->lock); 528 tipc_node_read_unlock(n); 529 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 530 if (rc & TIPC_LINK_DOWN_EVT) 531 tipc_node_link_down(n, bearer_id, false); 532 } 533 if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) 534 tipc_node_get(n); 535 tipc_node_put(n); 536 } 537 538 /** 539 * __tipc_node_link_up - handle addition of link 540 * Node lock must be held by caller 541 * Link becomes active (alone or shared) or standby, depending on its priority. 542 */ 543 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 544 struct sk_buff_head *xmitq) 545 { 546 int *slot0 = &n->active_links[0]; 547 int *slot1 = &n->active_links[1]; 548 struct tipc_link *ol = node_active_link(n, 0); 549 struct tipc_link *nl = n->links[bearer_id].link; 550 551 if (!nl) 552 return; 553 554 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 555 if (!tipc_link_is_up(nl)) 556 return; 557 558 n->working_links++; 559 n->action_flags |= TIPC_NOTIFY_LINK_UP; 560 n->link_id = tipc_link_id(nl); 561 562 /* Leave room for tunnel header when returning 'mtu' to users: */ 563 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 564 565 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 566 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 567 568 pr_debug("Established link <%s> on network plane %c\n", 569 tipc_link_name(nl), tipc_link_plane(nl)); 570 571 /* First link? => give it both slots */ 572 if (!ol) { 573 *slot0 = bearer_id; 574 *slot1 = bearer_id; 575 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 576 n->action_flags |= TIPC_NOTIFY_NODE_UP; 577 tipc_bcast_add_peer(n->net, nl, xmitq); 578 return; 579 } 580 581 /* Second link => redistribute slots */ 582 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 583 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 584 *slot0 = bearer_id; 585 *slot1 = bearer_id; 586 tipc_link_set_active(nl, true); 587 tipc_link_set_active(ol, false); 588 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 589 tipc_link_set_active(nl, true); 590 *slot1 = bearer_id; 591 } else { 592 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 593 } 594 595 /* Prepare synchronization with first link */ 596 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 597 } 598 599 /** 600 * tipc_node_link_up - handle addition of link 601 * 602 * Link becomes active (alone or shared) or standby, depending on its priority. 603 */ 604 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 605 struct sk_buff_head *xmitq) 606 { 607 tipc_node_write_lock(n); 608 __tipc_node_link_up(n, bearer_id, xmitq); 609 tipc_node_write_unlock(n); 610 } 611 612 /** 613 * __tipc_node_link_down - handle loss of link 614 */ 615 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 616 struct sk_buff_head *xmitq, 617 struct tipc_media_addr **maddr) 618 { 619 struct tipc_link_entry *le = &n->links[*bearer_id]; 620 int *slot0 = &n->active_links[0]; 621 int *slot1 = &n->active_links[1]; 622 int i, highest = 0, prio; 623 struct tipc_link *l, *_l, *tnl; 624 625 l = n->links[*bearer_id].link; 626 if (!l || tipc_link_is_reset(l)) 627 return; 628 629 n->working_links--; 630 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 631 n->link_id = tipc_link_id(l); 632 633 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 634 635 pr_debug("Lost link <%s> on network plane %c\n", 636 tipc_link_name(l), tipc_link_plane(l)); 637 638 /* Select new active link if any available */ 639 *slot0 = INVALID_BEARER_ID; 640 *slot1 = INVALID_BEARER_ID; 641 for (i = 0; i < MAX_BEARERS; i++) { 642 _l = n->links[i].link; 643 if (!_l || !tipc_link_is_up(_l)) 644 continue; 645 if (_l == l) 646 continue; 647 prio = tipc_link_prio(_l); 648 if (prio < highest) 649 continue; 650 if (prio > highest) { 651 highest = prio; 652 *slot0 = i; 653 *slot1 = i; 654 continue; 655 } 656 *slot1 = i; 657 } 658 659 if (!tipc_node_is_up(n)) { 660 if (tipc_link_peer_is_down(l)) 661 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 662 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 663 tipc_link_fsm_evt(l, LINK_RESET_EVT); 664 tipc_link_reset(l); 665 tipc_link_build_reset_msg(l, xmitq); 666 *maddr = &n->links[*bearer_id].maddr; 667 node_lost_contact(n, &le->inputq); 668 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 669 return; 670 } 671 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 672 673 /* There is still a working link => initiate failover */ 674 *bearer_id = n->active_links[0]; 675 tnl = n->links[*bearer_id].link; 676 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 677 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 678 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 679 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 680 tipc_link_reset(l); 681 tipc_link_fsm_evt(l, LINK_RESET_EVT); 682 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 683 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 684 *maddr = &n->links[*bearer_id].maddr; 685 } 686 687 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 688 { 689 struct tipc_link_entry *le = &n->links[bearer_id]; 690 struct tipc_link *l = le->link; 691 struct tipc_media_addr *maddr; 692 struct sk_buff_head xmitq; 693 694 if (!l) 695 return; 696 697 __skb_queue_head_init(&xmitq); 698 699 tipc_node_write_lock(n); 700 if (!tipc_link_is_establishing(l)) { 701 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 702 if (delete) { 703 kfree(l); 704 le->link = NULL; 705 n->link_cnt--; 706 } 707 } else { 708 /* Defuse pending tipc_node_link_up() */ 709 tipc_link_fsm_evt(l, LINK_RESET_EVT); 710 } 711 tipc_node_write_unlock(n); 712 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 713 tipc_sk_rcv(n->net, &le->inputq); 714 } 715 716 static bool tipc_node_is_up(struct tipc_node *n) 717 { 718 return n->active_links[0] != INVALID_BEARER_ID; 719 } 720 721 void tipc_node_check_dest(struct net *net, u32 onode, 722 struct tipc_bearer *b, 723 u16 capabilities, u32 signature, 724 struct tipc_media_addr *maddr, 725 bool *respond, bool *dupl_addr) 726 { 727 struct tipc_node *n; 728 struct tipc_link *l; 729 struct tipc_link_entry *le; 730 bool addr_match = false; 731 bool sign_match = false; 732 bool link_up = false; 733 bool accept_addr = false; 734 bool reset = true; 735 char *if_name; 736 737 *dupl_addr = false; 738 *respond = false; 739 740 n = tipc_node_create(net, onode, capabilities); 741 if (!n) 742 return; 743 744 tipc_node_write_lock(n); 745 746 le = &n->links[b->identity]; 747 748 /* Prepare to validate requesting node's signature and media address */ 749 l = le->link; 750 link_up = l && tipc_link_is_up(l); 751 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 752 sign_match = (signature == n->signature); 753 754 /* These three flags give us eight permutations: */ 755 756 if (sign_match && addr_match && link_up) { 757 /* All is fine. Do nothing. */ 758 reset = false; 759 } else if (sign_match && addr_match && !link_up) { 760 /* Respond. The link will come up in due time */ 761 *respond = true; 762 } else if (sign_match && !addr_match && link_up) { 763 /* Peer has changed i/f address without rebooting. 764 * If so, the link will reset soon, and the next 765 * discovery will be accepted. So we can ignore it. 766 * It may also be an cloned or malicious peer having 767 * chosen the same node address and signature as an 768 * existing one. 769 * Ignore requests until the link goes down, if ever. 770 */ 771 *dupl_addr = true; 772 } else if (sign_match && !addr_match && !link_up) { 773 /* Peer link has changed i/f address without rebooting. 774 * It may also be a cloned or malicious peer; we can't 775 * distinguish between the two. 776 * The signature is correct, so we must accept. 777 */ 778 accept_addr = true; 779 *respond = true; 780 } else if (!sign_match && addr_match && link_up) { 781 /* Peer node rebooted. Two possibilities: 782 * - Delayed re-discovery; this link endpoint has already 783 * reset and re-established contact with the peer, before 784 * receiving a discovery message from that node. 785 * (The peer happened to receive one from this node first). 786 * - The peer came back so fast that our side has not 787 * discovered it yet. Probing from this side will soon 788 * reset the link, since there can be no working link 789 * endpoint at the peer end, and the link will re-establish. 790 * Accept the signature, since it comes from a known peer. 791 */ 792 n->signature = signature; 793 } else if (!sign_match && addr_match && !link_up) { 794 /* The peer node has rebooted. 795 * Accept signature, since it is a known peer. 796 */ 797 n->signature = signature; 798 *respond = true; 799 } else if (!sign_match && !addr_match && link_up) { 800 /* Peer rebooted with new address, or a new/duplicate peer. 801 * Ignore until the link goes down, if ever. 802 */ 803 *dupl_addr = true; 804 } else if (!sign_match && !addr_match && !link_up) { 805 /* Peer rebooted with new address, or it is a new peer. 806 * Accept signature and address. 807 */ 808 n->signature = signature; 809 accept_addr = true; 810 *respond = true; 811 } 812 813 if (!accept_addr) 814 goto exit; 815 816 /* Now create new link if not already existing */ 817 if (!l) { 818 if (n->link_cnt == 2) { 819 pr_warn("Cannot establish 3rd link to %x\n", n->addr); 820 goto exit; 821 } 822 if_name = strchr(b->name, ':') + 1; 823 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 824 b->net_plane, b->mtu, b->priority, 825 b->window, mod(tipc_net(net)->random), 826 tipc_own_addr(net), onode, 827 n->capabilities, 828 tipc_bc_sndlink(n->net), n->bc_entry.link, 829 &le->inputq, 830 &n->bc_entry.namedq, &l)) { 831 *respond = false; 832 goto exit; 833 } 834 tipc_link_reset(l); 835 tipc_link_fsm_evt(l, LINK_RESET_EVT); 836 if (n->state == NODE_FAILINGOVER) 837 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 838 le->link = l; 839 n->link_cnt++; 840 tipc_node_calculate_timer(n, l); 841 if (n->link_cnt == 1) 842 if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) 843 tipc_node_get(n); 844 } 845 memcpy(&le->maddr, maddr, sizeof(*maddr)); 846 exit: 847 tipc_node_write_unlock(n); 848 if (reset && !tipc_link_is_reset(l)) 849 tipc_node_link_down(n, b->identity, false); 850 tipc_node_put(n); 851 } 852 853 void tipc_node_delete_links(struct net *net, int bearer_id) 854 { 855 struct tipc_net *tn = net_generic(net, tipc_net_id); 856 struct tipc_node *n; 857 858 rcu_read_lock(); 859 list_for_each_entry_rcu(n, &tn->node_list, list) { 860 tipc_node_link_down(n, bearer_id, true); 861 } 862 rcu_read_unlock(); 863 } 864 865 static void tipc_node_reset_links(struct tipc_node *n) 866 { 867 char addr_string[16]; 868 int i; 869 870 pr_warn("Resetting all links to %s\n", 871 tipc_addr_string_fill(addr_string, n->addr)); 872 873 for (i = 0; i < MAX_BEARERS; i++) { 874 tipc_node_link_down(n, i, false); 875 } 876 } 877 878 /* tipc_node_fsm_evt - node finite state machine 879 * Determines when contact is allowed with peer node 880 */ 881 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 882 { 883 int state = n->state; 884 885 switch (state) { 886 case SELF_DOWN_PEER_DOWN: 887 switch (evt) { 888 case SELF_ESTABL_CONTACT_EVT: 889 state = SELF_UP_PEER_COMING; 890 break; 891 case PEER_ESTABL_CONTACT_EVT: 892 state = SELF_COMING_PEER_UP; 893 break; 894 case SELF_LOST_CONTACT_EVT: 895 case PEER_LOST_CONTACT_EVT: 896 break; 897 case NODE_SYNCH_END_EVT: 898 case NODE_SYNCH_BEGIN_EVT: 899 case NODE_FAILOVER_BEGIN_EVT: 900 case NODE_FAILOVER_END_EVT: 901 default: 902 goto illegal_evt; 903 } 904 break; 905 case SELF_UP_PEER_UP: 906 switch (evt) { 907 case SELF_LOST_CONTACT_EVT: 908 state = SELF_DOWN_PEER_LEAVING; 909 break; 910 case PEER_LOST_CONTACT_EVT: 911 state = SELF_LEAVING_PEER_DOWN; 912 break; 913 case NODE_SYNCH_BEGIN_EVT: 914 state = NODE_SYNCHING; 915 break; 916 case NODE_FAILOVER_BEGIN_EVT: 917 state = NODE_FAILINGOVER; 918 break; 919 case SELF_ESTABL_CONTACT_EVT: 920 case PEER_ESTABL_CONTACT_EVT: 921 case NODE_SYNCH_END_EVT: 922 case NODE_FAILOVER_END_EVT: 923 break; 924 default: 925 goto illegal_evt; 926 } 927 break; 928 case SELF_DOWN_PEER_LEAVING: 929 switch (evt) { 930 case PEER_LOST_CONTACT_EVT: 931 state = SELF_DOWN_PEER_DOWN; 932 break; 933 case SELF_ESTABL_CONTACT_EVT: 934 case PEER_ESTABL_CONTACT_EVT: 935 case SELF_LOST_CONTACT_EVT: 936 break; 937 case NODE_SYNCH_END_EVT: 938 case NODE_SYNCH_BEGIN_EVT: 939 case NODE_FAILOVER_BEGIN_EVT: 940 case NODE_FAILOVER_END_EVT: 941 default: 942 goto illegal_evt; 943 } 944 break; 945 case SELF_UP_PEER_COMING: 946 switch (evt) { 947 case PEER_ESTABL_CONTACT_EVT: 948 state = SELF_UP_PEER_UP; 949 break; 950 case SELF_LOST_CONTACT_EVT: 951 state = SELF_DOWN_PEER_LEAVING; 952 break; 953 case SELF_ESTABL_CONTACT_EVT: 954 case PEER_LOST_CONTACT_EVT: 955 case NODE_SYNCH_END_EVT: 956 case NODE_FAILOVER_BEGIN_EVT: 957 break; 958 case NODE_SYNCH_BEGIN_EVT: 959 case NODE_FAILOVER_END_EVT: 960 default: 961 goto illegal_evt; 962 } 963 break; 964 case SELF_COMING_PEER_UP: 965 switch (evt) { 966 case SELF_ESTABL_CONTACT_EVT: 967 state = SELF_UP_PEER_UP; 968 break; 969 case PEER_LOST_CONTACT_EVT: 970 state = SELF_LEAVING_PEER_DOWN; 971 break; 972 case SELF_LOST_CONTACT_EVT: 973 case PEER_ESTABL_CONTACT_EVT: 974 break; 975 case NODE_SYNCH_END_EVT: 976 case NODE_SYNCH_BEGIN_EVT: 977 case NODE_FAILOVER_BEGIN_EVT: 978 case NODE_FAILOVER_END_EVT: 979 default: 980 goto illegal_evt; 981 } 982 break; 983 case SELF_LEAVING_PEER_DOWN: 984 switch (evt) { 985 case SELF_LOST_CONTACT_EVT: 986 state = SELF_DOWN_PEER_DOWN; 987 break; 988 case SELF_ESTABL_CONTACT_EVT: 989 case PEER_ESTABL_CONTACT_EVT: 990 case PEER_LOST_CONTACT_EVT: 991 break; 992 case NODE_SYNCH_END_EVT: 993 case NODE_SYNCH_BEGIN_EVT: 994 case NODE_FAILOVER_BEGIN_EVT: 995 case NODE_FAILOVER_END_EVT: 996 default: 997 goto illegal_evt; 998 } 999 break; 1000 case NODE_FAILINGOVER: 1001 switch (evt) { 1002 case SELF_LOST_CONTACT_EVT: 1003 state = SELF_DOWN_PEER_LEAVING; 1004 break; 1005 case PEER_LOST_CONTACT_EVT: 1006 state = SELF_LEAVING_PEER_DOWN; 1007 break; 1008 case NODE_FAILOVER_END_EVT: 1009 state = SELF_UP_PEER_UP; 1010 break; 1011 case NODE_FAILOVER_BEGIN_EVT: 1012 case SELF_ESTABL_CONTACT_EVT: 1013 case PEER_ESTABL_CONTACT_EVT: 1014 break; 1015 case NODE_SYNCH_BEGIN_EVT: 1016 case NODE_SYNCH_END_EVT: 1017 default: 1018 goto illegal_evt; 1019 } 1020 break; 1021 case NODE_SYNCHING: 1022 switch (evt) { 1023 case SELF_LOST_CONTACT_EVT: 1024 state = SELF_DOWN_PEER_LEAVING; 1025 break; 1026 case PEER_LOST_CONTACT_EVT: 1027 state = SELF_LEAVING_PEER_DOWN; 1028 break; 1029 case NODE_SYNCH_END_EVT: 1030 state = SELF_UP_PEER_UP; 1031 break; 1032 case NODE_FAILOVER_BEGIN_EVT: 1033 state = NODE_FAILINGOVER; 1034 break; 1035 case NODE_SYNCH_BEGIN_EVT: 1036 case SELF_ESTABL_CONTACT_EVT: 1037 case PEER_ESTABL_CONTACT_EVT: 1038 break; 1039 case NODE_FAILOVER_END_EVT: 1040 default: 1041 goto illegal_evt; 1042 } 1043 break; 1044 default: 1045 pr_err("Unknown node fsm state %x\n", state); 1046 break; 1047 } 1048 n->state = state; 1049 return; 1050 1051 illegal_evt: 1052 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1053 } 1054 1055 static void node_lost_contact(struct tipc_node *n, 1056 struct sk_buff_head *inputq) 1057 { 1058 char addr_string[16]; 1059 struct tipc_sock_conn *conn, *safe; 1060 struct tipc_link *l; 1061 struct list_head *conns = &n->conn_sks; 1062 struct sk_buff *skb; 1063 uint i; 1064 1065 pr_debug("Lost contact with %s\n", 1066 tipc_addr_string_fill(addr_string, n->addr)); 1067 1068 /* Clean up broadcast state */ 1069 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1070 1071 /* Abort any ongoing link failover */ 1072 for (i = 0; i < MAX_BEARERS; i++) { 1073 l = n->links[i].link; 1074 if (l) 1075 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1076 } 1077 1078 /* Notify publications from this node */ 1079 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1080 1081 /* Notify sockets connected to node */ 1082 list_for_each_entry_safe(conn, safe, conns, list) { 1083 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1084 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1085 conn->peer_node, conn->port, 1086 conn->peer_port, TIPC_ERR_NO_NODE); 1087 if (likely(skb)) 1088 skb_queue_tail(inputq, skb); 1089 list_del(&conn->list); 1090 kfree(conn); 1091 } 1092 } 1093 1094 /** 1095 * tipc_node_get_linkname - get the name of a link 1096 * 1097 * @bearer_id: id of the bearer 1098 * @node: peer node address 1099 * @linkname: link name output buffer 1100 * 1101 * Returns 0 on success 1102 */ 1103 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1104 char *linkname, size_t len) 1105 { 1106 struct tipc_link *link; 1107 int err = -EINVAL; 1108 struct tipc_node *node = tipc_node_find(net, addr); 1109 1110 if (!node) 1111 return err; 1112 1113 if (bearer_id >= MAX_BEARERS) 1114 goto exit; 1115 1116 tipc_node_read_lock(node); 1117 link = node->links[bearer_id].link; 1118 if (link) { 1119 strncpy(linkname, tipc_link_name(link), len); 1120 err = 0; 1121 } 1122 exit: 1123 tipc_node_read_unlock(node); 1124 tipc_node_put(node); 1125 return err; 1126 } 1127 1128 /* Caller should hold node lock for the passed node */ 1129 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1130 { 1131 void *hdr; 1132 struct nlattr *attrs; 1133 1134 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1135 NLM_F_MULTI, TIPC_NL_NODE_GET); 1136 if (!hdr) 1137 return -EMSGSIZE; 1138 1139 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); 1140 if (!attrs) 1141 goto msg_full; 1142 1143 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1144 goto attr_msg_full; 1145 if (tipc_node_is_up(node)) 1146 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1147 goto attr_msg_full; 1148 1149 nla_nest_end(msg->skb, attrs); 1150 genlmsg_end(msg->skb, hdr); 1151 1152 return 0; 1153 1154 attr_msg_full: 1155 nla_nest_cancel(msg->skb, attrs); 1156 msg_full: 1157 genlmsg_cancel(msg->skb, hdr); 1158 1159 return -EMSGSIZE; 1160 } 1161 1162 /** 1163 * tipc_node_xmit() is the general link level function for message sending 1164 * @net: the applicable net namespace 1165 * @list: chain of buffers containing message 1166 * @dnode: address of destination node 1167 * @selector: a number used for deterministic link selection 1168 * Consumes the buffer chain, except when returning -ELINKCONG 1169 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 1170 */ 1171 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1172 u32 dnode, int selector) 1173 { 1174 struct tipc_link_entry *le = NULL; 1175 struct tipc_node *n; 1176 struct sk_buff_head xmitq; 1177 int bearer_id = -1; 1178 int rc = -EHOSTUNREACH; 1179 1180 __skb_queue_head_init(&xmitq); 1181 n = tipc_node_find(net, dnode); 1182 if (likely(n)) { 1183 tipc_node_read_lock(n); 1184 bearer_id = n->active_links[selector & 1]; 1185 if (bearer_id >= 0) { 1186 le = &n->links[bearer_id]; 1187 spin_lock_bh(&le->lock); 1188 rc = tipc_link_xmit(le->link, list, &xmitq); 1189 spin_unlock_bh(&le->lock); 1190 } 1191 tipc_node_read_unlock(n); 1192 if (likely(!rc)) 1193 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1194 else if (rc == -ENOBUFS) 1195 tipc_node_link_down(n, bearer_id, false); 1196 tipc_node_put(n); 1197 return rc; 1198 } 1199 1200 if (likely(in_own_node(net, dnode))) { 1201 tipc_sk_rcv(net, list); 1202 return 0; 1203 } 1204 return rc; 1205 } 1206 1207 /* tipc_node_xmit_skb(): send single buffer to destination 1208 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1209 * messages, which will not be rejected 1210 * The only exception is datagram messages rerouted after secondary 1211 * lookup, which are rare and safe to dispose of anyway. 1212 * TODO: Return real return value, and let callers use 1213 * tipc_wait_for_sendpkt() where applicable 1214 */ 1215 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1216 u32 selector) 1217 { 1218 struct sk_buff_head head; 1219 int rc; 1220 1221 skb_queue_head_init(&head); 1222 __skb_queue_tail(&head, skb); 1223 rc = tipc_node_xmit(net, &head, dnode, selector); 1224 if (rc == -ELINKCONG) 1225 kfree_skb(skb); 1226 return 0; 1227 } 1228 1229 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1230 { 1231 struct sk_buff *txskb; 1232 struct tipc_node *n; 1233 u32 dst; 1234 1235 rcu_read_lock(); 1236 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1237 dst = n->addr; 1238 if (in_own_node(net, dst)) 1239 continue; 1240 if (!tipc_node_is_up(n)) 1241 continue; 1242 txskb = pskb_copy(skb, GFP_ATOMIC); 1243 if (!txskb) 1244 break; 1245 msg_set_destnode(buf_msg(txskb), dst); 1246 tipc_node_xmit_skb(net, txskb, dst, 0); 1247 } 1248 rcu_read_unlock(); 1249 1250 kfree_skb(skb); 1251 } 1252 1253 /** 1254 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1255 * @net: the applicable net namespace 1256 * @skb: TIPC packet 1257 * @bearer_id: id of bearer message arrived on 1258 * 1259 * Invoked with no locks held. 1260 */ 1261 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1262 { 1263 int rc; 1264 struct sk_buff_head xmitq; 1265 struct tipc_bclink_entry *be; 1266 struct tipc_link_entry *le; 1267 struct tipc_msg *hdr = buf_msg(skb); 1268 int usr = msg_user(hdr); 1269 u32 dnode = msg_destnode(hdr); 1270 struct tipc_node *n; 1271 1272 __skb_queue_head_init(&xmitq); 1273 1274 /* If NACK for other node, let rcv link for that node peek into it */ 1275 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1276 n = tipc_node_find(net, dnode); 1277 else 1278 n = tipc_node_find(net, msg_prevnode(hdr)); 1279 if (!n) { 1280 kfree_skb(skb); 1281 return; 1282 } 1283 be = &n->bc_entry; 1284 le = &n->links[bearer_id]; 1285 1286 rc = tipc_bcast_rcv(net, be->link, skb); 1287 1288 /* Broadcast link reset may happen at reassembly failure */ 1289 if (rc & TIPC_LINK_DOWN_EVT) 1290 tipc_node_reset_links(n); 1291 1292 /* Broadcast ACKs are sent on a unicast link */ 1293 if (rc & TIPC_LINK_SND_BC_ACK) { 1294 tipc_node_read_lock(n); 1295 tipc_link_build_ack_msg(le->link, &xmitq); 1296 tipc_node_read_unlock(n); 1297 } 1298 1299 if (!skb_queue_empty(&xmitq)) 1300 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1301 1302 /* Deliver. 'arrvq' is under inputq2's lock protection */ 1303 if (!skb_queue_empty(&be->inputq1)) { 1304 spin_lock_bh(&be->inputq2.lock); 1305 spin_lock_bh(&be->inputq1.lock); 1306 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1307 spin_unlock_bh(&be->inputq1.lock); 1308 spin_unlock_bh(&be->inputq2.lock); 1309 tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); 1310 } 1311 tipc_node_put(n); 1312 } 1313 1314 /** 1315 * tipc_node_check_state - check and if necessary update node state 1316 * @skb: TIPC packet 1317 * @bearer_id: identity of bearer delivering the packet 1318 * Returns true if state is ok, otherwise consumes buffer and returns false 1319 */ 1320 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1321 int bearer_id, struct sk_buff_head *xmitq) 1322 { 1323 struct tipc_msg *hdr = buf_msg(skb); 1324 int usr = msg_user(hdr); 1325 int mtyp = msg_type(hdr); 1326 u16 oseqno = msg_seqno(hdr); 1327 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1328 u16 exp_pkts = msg_msgcnt(hdr); 1329 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1330 int state = n->state; 1331 struct tipc_link *l, *tnl, *pl = NULL; 1332 struct tipc_media_addr *maddr; 1333 int pb_id; 1334 1335 l = n->links[bearer_id].link; 1336 if (!l) 1337 return false; 1338 rcv_nxt = tipc_link_rcv_nxt(l); 1339 1340 1341 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1342 return true; 1343 1344 /* Find parallel link, if any */ 1345 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1346 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1347 pl = n->links[pb_id].link; 1348 break; 1349 } 1350 } 1351 1352 /* Check and update node accesibility if applicable */ 1353 if (state == SELF_UP_PEER_COMING) { 1354 if (!tipc_link_is_up(l)) 1355 return true; 1356 if (!msg_peer_link_is_up(hdr)) 1357 return true; 1358 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1359 } 1360 1361 if (state == SELF_DOWN_PEER_LEAVING) { 1362 if (msg_peer_node_is_up(hdr)) 1363 return false; 1364 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1365 return true; 1366 } 1367 1368 if (state == SELF_LEAVING_PEER_DOWN) 1369 return false; 1370 1371 /* Ignore duplicate packets */ 1372 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1373 return true; 1374 1375 /* Initiate or update failover mode if applicable */ 1376 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1377 syncpt = oseqno + exp_pkts - 1; 1378 if (pl && tipc_link_is_up(pl)) { 1379 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1380 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1381 tipc_link_inputq(l)); 1382 } 1383 /* If pkts arrive out of order, use lowest calculated syncpt */ 1384 if (less(syncpt, n->sync_point)) 1385 n->sync_point = syncpt; 1386 } 1387 1388 /* Open parallel link when tunnel link reaches synch point */ 1389 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1390 if (!more(rcv_nxt, n->sync_point)) 1391 return true; 1392 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1393 if (pl) 1394 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1395 return true; 1396 } 1397 1398 /* No synching needed if only one link */ 1399 if (!pl || !tipc_link_is_up(pl)) 1400 return true; 1401 1402 /* Initiate synch mode if applicable */ 1403 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1404 syncpt = iseqno + exp_pkts - 1; 1405 if (!tipc_link_is_up(l)) { 1406 tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1407 __tipc_node_link_up(n, bearer_id, xmitq); 1408 } 1409 if (n->state == SELF_UP_PEER_UP) { 1410 n->sync_point = syncpt; 1411 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1412 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1413 } 1414 } 1415 1416 /* Open tunnel link when parallel link reaches synch point */ 1417 if (n->state == NODE_SYNCHING) { 1418 if (tipc_link_is_synching(l)) { 1419 tnl = l; 1420 } else { 1421 tnl = pl; 1422 pl = l; 1423 } 1424 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1425 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1426 if (more(dlv_nxt, n->sync_point)) { 1427 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1428 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1429 return true; 1430 } 1431 if (l == pl) 1432 return true; 1433 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1434 return true; 1435 if (usr == LINK_PROTOCOL) 1436 return true; 1437 return false; 1438 } 1439 return true; 1440 } 1441 1442 /** 1443 * tipc_rcv - process TIPC packets/messages arriving from off-node 1444 * @net: the applicable net namespace 1445 * @skb: TIPC packet 1446 * @bearer: pointer to bearer message arrived on 1447 * 1448 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1449 * structure (i.e. cannot be NULL), but bearer can be inactive. 1450 */ 1451 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1452 { 1453 struct sk_buff_head xmitq; 1454 struct tipc_node *n; 1455 struct tipc_msg *hdr = buf_msg(skb); 1456 int usr = msg_user(hdr); 1457 int bearer_id = b->identity; 1458 struct tipc_link_entry *le; 1459 u16 bc_ack = msg_bcast_ack(hdr); 1460 int rc = 0; 1461 1462 __skb_queue_head_init(&xmitq); 1463 1464 /* Ensure message is well-formed */ 1465 if (unlikely(!tipc_msg_validate(skb))) 1466 goto discard; 1467 1468 /* Handle arrival of discovery or broadcast packet */ 1469 if (unlikely(msg_non_seq(hdr))) { 1470 if (unlikely(usr == LINK_CONFIG)) 1471 return tipc_disc_rcv(net, skb, b); 1472 else 1473 return tipc_node_bc_rcv(net, skb, bearer_id); 1474 } 1475 1476 /* Locate neighboring node that sent packet */ 1477 n = tipc_node_find(net, msg_prevnode(hdr)); 1478 if (unlikely(!n)) 1479 goto discard; 1480 le = &n->links[bearer_id]; 1481 1482 /* Ensure broadcast reception is in synch with peer's send state */ 1483 if (unlikely(usr == LINK_PROTOCOL)) 1484 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr); 1485 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1486 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1487 1488 /* Receive packet directly if conditions permit */ 1489 tipc_node_read_lock(n); 1490 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1491 spin_lock_bh(&le->lock); 1492 if (le->link) { 1493 rc = tipc_link_rcv(le->link, skb, &xmitq); 1494 skb = NULL; 1495 } 1496 spin_unlock_bh(&le->lock); 1497 } 1498 tipc_node_read_unlock(n); 1499 1500 /* Check/update node state before receiving */ 1501 if (unlikely(skb)) { 1502 tipc_node_write_lock(n); 1503 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1504 if (le->link) { 1505 rc = tipc_link_rcv(le->link, skb, &xmitq); 1506 skb = NULL; 1507 } 1508 } 1509 tipc_node_write_unlock(n); 1510 } 1511 1512 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1513 tipc_node_link_up(n, bearer_id, &xmitq); 1514 1515 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1516 tipc_node_link_down(n, bearer_id, false); 1517 1518 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1519 tipc_named_rcv(net, &n->bc_entry.namedq); 1520 1521 if (!skb_queue_empty(&le->inputq)) 1522 tipc_sk_rcv(net, &le->inputq); 1523 1524 if (!skb_queue_empty(&xmitq)) 1525 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1526 1527 tipc_node_put(n); 1528 discard: 1529 kfree_skb(skb); 1530 } 1531 1532 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 1533 { 1534 int err; 1535 struct net *net = sock_net(skb->sk); 1536 struct tipc_net *tn = net_generic(net, tipc_net_id); 1537 int done = cb->args[0]; 1538 int last_addr = cb->args[1]; 1539 struct tipc_node *node; 1540 struct tipc_nl_msg msg; 1541 1542 if (done) 1543 return 0; 1544 1545 msg.skb = skb; 1546 msg.portid = NETLINK_CB(cb->skb).portid; 1547 msg.seq = cb->nlh->nlmsg_seq; 1548 1549 rcu_read_lock(); 1550 if (last_addr) { 1551 node = tipc_node_find(net, last_addr); 1552 if (!node) { 1553 rcu_read_unlock(); 1554 /* We never set seq or call nl_dump_check_consistent() 1555 * this means that setting prev_seq here will cause the 1556 * consistence check to fail in the netlink callback 1557 * handler. Resulting in the NLMSG_DONE message having 1558 * the NLM_F_DUMP_INTR flag set if the node state 1559 * changed while we released the lock. 1560 */ 1561 cb->prev_seq = 1; 1562 return -EPIPE; 1563 } 1564 tipc_node_put(node); 1565 } 1566 1567 list_for_each_entry_rcu(node, &tn->node_list, list) { 1568 if (last_addr) { 1569 if (node->addr == last_addr) 1570 last_addr = 0; 1571 else 1572 continue; 1573 } 1574 1575 tipc_node_read_lock(node); 1576 err = __tipc_nl_add_node(&msg, node); 1577 if (err) { 1578 last_addr = node->addr; 1579 tipc_node_read_unlock(node); 1580 goto out; 1581 } 1582 1583 tipc_node_read_unlock(node); 1584 } 1585 done = 1; 1586 out: 1587 cb->args[0] = done; 1588 cb->args[1] = last_addr; 1589 rcu_read_unlock(); 1590 1591 return skb->len; 1592 } 1593 1594 /* tipc_node_find_by_name - locate owner node of link by link's name 1595 * @net: the applicable net namespace 1596 * @name: pointer to link name string 1597 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1598 * 1599 * Returns pointer to node owning the link, or 0 if no matching link is found. 1600 */ 1601 static struct tipc_node *tipc_node_find_by_name(struct net *net, 1602 const char *link_name, 1603 unsigned int *bearer_id) 1604 { 1605 struct tipc_net *tn = net_generic(net, tipc_net_id); 1606 struct tipc_link *l; 1607 struct tipc_node *n; 1608 struct tipc_node *found_node = NULL; 1609 int i; 1610 1611 *bearer_id = 0; 1612 rcu_read_lock(); 1613 list_for_each_entry_rcu(n, &tn->node_list, list) { 1614 tipc_node_read_lock(n); 1615 for (i = 0; i < MAX_BEARERS; i++) { 1616 l = n->links[i].link; 1617 if (l && !strcmp(tipc_link_name(l), link_name)) { 1618 *bearer_id = i; 1619 found_node = n; 1620 break; 1621 } 1622 } 1623 tipc_node_read_unlock(n); 1624 if (found_node) 1625 break; 1626 } 1627 rcu_read_unlock(); 1628 1629 return found_node; 1630 } 1631 1632 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 1633 { 1634 int err; 1635 int res = 0; 1636 int bearer_id; 1637 char *name; 1638 struct tipc_link *link; 1639 struct tipc_node *node; 1640 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1641 struct net *net = sock_net(skb->sk); 1642 1643 if (!info->attrs[TIPC_NLA_LINK]) 1644 return -EINVAL; 1645 1646 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1647 info->attrs[TIPC_NLA_LINK], 1648 tipc_nl_link_policy); 1649 if (err) 1650 return err; 1651 1652 if (!attrs[TIPC_NLA_LINK_NAME]) 1653 return -EINVAL; 1654 1655 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1656 1657 if (strcmp(name, tipc_bclink_name) == 0) 1658 return tipc_nl_bc_link_set(net, attrs); 1659 1660 node = tipc_node_find_by_name(net, name, &bearer_id); 1661 if (!node) 1662 return -EINVAL; 1663 1664 tipc_node_read_lock(node); 1665 1666 link = node->links[bearer_id].link; 1667 if (!link) { 1668 res = -EINVAL; 1669 goto out; 1670 } 1671 1672 if (attrs[TIPC_NLA_LINK_PROP]) { 1673 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1674 1675 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 1676 props); 1677 if (err) { 1678 res = err; 1679 goto out; 1680 } 1681 1682 if (props[TIPC_NLA_PROP_TOL]) { 1683 u32 tol; 1684 1685 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1686 tipc_link_set_tolerance(link, tol); 1687 } 1688 if (props[TIPC_NLA_PROP_PRIO]) { 1689 u32 prio; 1690 1691 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1692 tipc_link_set_prio(link, prio); 1693 } 1694 if (props[TIPC_NLA_PROP_WIN]) { 1695 u32 win; 1696 1697 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1698 tipc_link_set_queue_limits(link, win); 1699 } 1700 } 1701 1702 out: 1703 tipc_node_read_unlock(node); 1704 1705 return res; 1706 } 1707 1708 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 1709 { 1710 struct net *net = genl_info_net(info); 1711 struct tipc_nl_msg msg; 1712 char *name; 1713 int err; 1714 1715 msg.portid = info->snd_portid; 1716 msg.seq = info->snd_seq; 1717 1718 if (!info->attrs[TIPC_NLA_LINK_NAME]) 1719 return -EINVAL; 1720 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 1721 1722 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1723 if (!msg.skb) 1724 return -ENOMEM; 1725 1726 if (strcmp(name, tipc_bclink_name) == 0) { 1727 err = tipc_nl_add_bc_link(net, &msg); 1728 if (err) { 1729 nlmsg_free(msg.skb); 1730 return err; 1731 } 1732 } else { 1733 int bearer_id; 1734 struct tipc_node *node; 1735 struct tipc_link *link; 1736 1737 node = tipc_node_find_by_name(net, name, &bearer_id); 1738 if (!node) 1739 return -EINVAL; 1740 1741 tipc_node_read_lock(node); 1742 link = node->links[bearer_id].link; 1743 if (!link) { 1744 tipc_node_read_unlock(node); 1745 nlmsg_free(msg.skb); 1746 return -EINVAL; 1747 } 1748 1749 err = __tipc_nl_add_link(net, &msg, link, 0); 1750 tipc_node_read_unlock(node); 1751 if (err) { 1752 nlmsg_free(msg.skb); 1753 return err; 1754 } 1755 } 1756 1757 return genlmsg_reply(msg.skb, info); 1758 } 1759 1760 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 1761 { 1762 int err; 1763 char *link_name; 1764 unsigned int bearer_id; 1765 struct tipc_link *link; 1766 struct tipc_node *node; 1767 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1768 struct net *net = sock_net(skb->sk); 1769 struct tipc_link_entry *le; 1770 1771 if (!info->attrs[TIPC_NLA_LINK]) 1772 return -EINVAL; 1773 1774 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1775 info->attrs[TIPC_NLA_LINK], 1776 tipc_nl_link_policy); 1777 if (err) 1778 return err; 1779 1780 if (!attrs[TIPC_NLA_LINK_NAME]) 1781 return -EINVAL; 1782 1783 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1784 1785 if (strcmp(link_name, tipc_bclink_name) == 0) { 1786 err = tipc_bclink_reset_stats(net); 1787 if (err) 1788 return err; 1789 return 0; 1790 } 1791 1792 node = tipc_node_find_by_name(net, link_name, &bearer_id); 1793 if (!node) 1794 return -EINVAL; 1795 1796 le = &node->links[bearer_id]; 1797 tipc_node_read_lock(node); 1798 spin_lock_bh(&le->lock); 1799 link = node->links[bearer_id].link; 1800 if (!link) { 1801 spin_unlock_bh(&le->lock); 1802 tipc_node_read_unlock(node); 1803 return -EINVAL; 1804 } 1805 tipc_link_reset_stats(link); 1806 spin_unlock_bh(&le->lock); 1807 tipc_node_read_unlock(node); 1808 return 0; 1809 } 1810 1811 /* Caller should hold node lock */ 1812 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 1813 struct tipc_node *node, u32 *prev_link) 1814 { 1815 u32 i; 1816 int err; 1817 1818 for (i = *prev_link; i < MAX_BEARERS; i++) { 1819 *prev_link = i; 1820 1821 if (!node->links[i].link) 1822 continue; 1823 1824 err = __tipc_nl_add_link(net, msg, 1825 node->links[i].link, NLM_F_MULTI); 1826 if (err) 1827 return err; 1828 } 1829 *prev_link = 0; 1830 1831 return 0; 1832 } 1833 1834 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 1835 { 1836 struct net *net = sock_net(skb->sk); 1837 struct tipc_net *tn = net_generic(net, tipc_net_id); 1838 struct tipc_node *node; 1839 struct tipc_nl_msg msg; 1840 u32 prev_node = cb->args[0]; 1841 u32 prev_link = cb->args[1]; 1842 int done = cb->args[2]; 1843 int err; 1844 1845 if (done) 1846 return 0; 1847 1848 msg.skb = skb; 1849 msg.portid = NETLINK_CB(cb->skb).portid; 1850 msg.seq = cb->nlh->nlmsg_seq; 1851 1852 rcu_read_lock(); 1853 if (prev_node) { 1854 node = tipc_node_find(net, prev_node); 1855 if (!node) { 1856 /* We never set seq or call nl_dump_check_consistent() 1857 * this means that setting prev_seq here will cause the 1858 * consistence check to fail in the netlink callback 1859 * handler. Resulting in the last NLMSG_DONE message 1860 * having the NLM_F_DUMP_INTR flag set. 1861 */ 1862 cb->prev_seq = 1; 1863 goto out; 1864 } 1865 tipc_node_put(node); 1866 1867 list_for_each_entry_continue_rcu(node, &tn->node_list, 1868 list) { 1869 tipc_node_read_lock(node); 1870 err = __tipc_nl_add_node_links(net, &msg, node, 1871 &prev_link); 1872 tipc_node_read_unlock(node); 1873 if (err) 1874 goto out; 1875 1876 prev_node = node->addr; 1877 } 1878 } else { 1879 err = tipc_nl_add_bc_link(net, &msg); 1880 if (err) 1881 goto out; 1882 1883 list_for_each_entry_rcu(node, &tn->node_list, list) { 1884 tipc_node_read_lock(node); 1885 err = __tipc_nl_add_node_links(net, &msg, node, 1886 &prev_link); 1887 tipc_node_read_unlock(node); 1888 if (err) 1889 goto out; 1890 1891 prev_node = node->addr; 1892 } 1893 } 1894 done = 1; 1895 out: 1896 rcu_read_unlock(); 1897 1898 cb->args[0] = prev_node; 1899 cb->args[1] = prev_link; 1900 cb->args[2] = done; 1901 1902 return skb->len; 1903 } 1904