1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 #include "crypto.h" 48 49 #define INVALID_NODE_SIG 0x10000 50 #define NODE_CLEANUP_AFTER 300000 51 52 /* Flags used to take different actions according to flag type 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down 54 * TIPC_NOTIFY_NODE_UP: notify node is up 55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 56 */ 57 enum { 58 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 59 TIPC_NOTIFY_NODE_UP = (1 << 4), 60 TIPC_NOTIFY_LINK_UP = (1 << 6), 61 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 62 }; 63 64 struct tipc_link_entry { 65 struct tipc_link *link; 66 spinlock_t lock; /* per link */ 67 u32 mtu; 68 struct sk_buff_head inputq; 69 struct tipc_media_addr maddr; 70 }; 71 72 struct tipc_bclink_entry { 73 struct tipc_link *link; 74 struct sk_buff_head inputq1; 75 struct sk_buff_head arrvq; 76 struct sk_buff_head inputq2; 77 struct sk_buff_head namedq; 78 u16 named_rcv_nxt; 79 bool named_open; 80 }; 81 82 /** 83 * struct tipc_node - TIPC node structure 84 * @addr: network address of node 85 * @ref: reference counter to node object 86 * @lock: rwlock governing access to structure 87 * @net: the applicable net namespace 88 * @hash: links to adjacent nodes in unsorted hash chain 89 * @inputq: pointer to input queue containing messages for msg event 90 * @namedq: pointer to name table input queue with name table messages 91 * @active_links: bearer ids of active links, used as index into links[] array 92 * @links: array containing references to all links to node 93 * @action_flags: bit mask of different types of node actions 94 * @state: connectivity state vs peer node 95 * @preliminary: a preliminary node or not 96 * @sync_point: sequence number where synch/failover is finished 97 * @list: links to adjacent nodes in sorted list of cluster's nodes 98 * @working_links: number of working links to node (both active and standby) 99 * @link_cnt: number of links to node 100 * @capabilities: bitmap, indicating peer node's functional capabilities 101 * @signature: node instance identifier 102 * @link_id: local and remote bearer ids of changing link, if any 103 * @publ_list: list of publications 104 * @rcu: rcu struct for tipc_node 105 * @delete_at: indicates the time for deleting a down node 106 * @crypto_rx: RX crypto handler 107 */ 108 struct tipc_node { 109 u32 addr; 110 struct kref kref; 111 rwlock_t lock; 112 struct net *net; 113 struct hlist_node hash; 114 int active_links[2]; 115 struct tipc_link_entry links[MAX_BEARERS]; 116 struct tipc_bclink_entry bc_entry; 117 int action_flags; 118 struct list_head list; 119 int state; 120 bool preliminary; 121 bool failover_sent; 122 u16 sync_point; 123 int link_cnt; 124 u16 working_links; 125 u16 capabilities; 126 u32 signature; 127 u32 link_id; 128 u8 peer_id[16]; 129 char peer_id_string[NODE_ID_STR_LEN]; 130 struct list_head publ_list; 131 struct list_head conn_sks; 132 unsigned long keepalive_intv; 133 struct timer_list timer; 134 struct rcu_head rcu; 135 unsigned long delete_at; 136 struct net *peer_net; 137 u32 peer_hash_mix; 138 #ifdef CONFIG_TIPC_CRYPTO 139 struct tipc_crypto *crypto_rx; 140 #endif 141 }; 142 143 /* Node FSM states and events: 144 */ 145 enum { 146 SELF_DOWN_PEER_DOWN = 0xdd, 147 SELF_UP_PEER_UP = 0xaa, 148 SELF_DOWN_PEER_LEAVING = 0xd1, 149 SELF_UP_PEER_COMING = 0xac, 150 SELF_COMING_PEER_UP = 0xca, 151 SELF_LEAVING_PEER_DOWN = 0x1d, 152 NODE_FAILINGOVER = 0xf0, 153 NODE_SYNCHING = 0xcc 154 }; 155 156 enum { 157 SELF_ESTABL_CONTACT_EVT = 0xece, 158 SELF_LOST_CONTACT_EVT = 0x1ce, 159 PEER_ESTABL_CONTACT_EVT = 0x9ece, 160 PEER_LOST_CONTACT_EVT = 0x91ce, 161 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 162 NODE_FAILOVER_END_EVT = 0xfee, 163 NODE_SYNCH_BEGIN_EVT = 0xcbe, 164 NODE_SYNCH_END_EVT = 0xcee 165 }; 166 167 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 168 struct sk_buff_head *xmitq, 169 struct tipc_media_addr **maddr); 170 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 171 bool delete); 172 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 173 static void tipc_node_delete(struct tipc_node *node); 174 static void tipc_node_timeout(struct timer_list *t); 175 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 176 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 177 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 178 static bool node_is_up(struct tipc_node *n); 179 static void tipc_node_delete_from_list(struct tipc_node *node); 180 181 struct tipc_sock_conn { 182 u32 port; 183 u32 peer_port; 184 u32 peer_node; 185 struct list_head list; 186 }; 187 188 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 189 { 190 int bearer_id = n->active_links[sel & 1]; 191 192 if (unlikely(bearer_id == INVALID_BEARER_ID)) 193 return NULL; 194 195 return n->links[bearer_id].link; 196 } 197 198 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 199 { 200 struct tipc_node *n; 201 int bearer_id; 202 unsigned int mtu = MAX_MSG_SIZE; 203 204 n = tipc_node_find(net, addr); 205 if (unlikely(!n)) 206 return mtu; 207 208 /* Allow MAX_MSG_SIZE when building connection oriented message 209 * if they are in the same core network 210 */ 211 if (n->peer_net && connected) { 212 tipc_node_put(n); 213 return mtu; 214 } 215 216 bearer_id = n->active_links[sel & 1]; 217 if (likely(bearer_id != INVALID_BEARER_ID)) 218 mtu = n->links[bearer_id].mtu; 219 tipc_node_put(n); 220 return mtu; 221 } 222 223 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 224 { 225 u8 *own_id = tipc_own_id(net); 226 struct tipc_node *n; 227 228 if (!own_id) 229 return true; 230 231 if (addr == tipc_own_addr(net)) { 232 memcpy(id, own_id, TIPC_NODEID_LEN); 233 return true; 234 } 235 n = tipc_node_find(net, addr); 236 if (!n) 237 return false; 238 239 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 240 tipc_node_put(n); 241 return true; 242 } 243 244 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 245 { 246 struct tipc_node *n; 247 u16 caps; 248 249 n = tipc_node_find(net, addr); 250 if (unlikely(!n)) 251 return TIPC_NODE_CAPABILITIES; 252 caps = n->capabilities; 253 tipc_node_put(n); 254 return caps; 255 } 256 257 u32 tipc_node_get_addr(struct tipc_node *node) 258 { 259 return (node) ? node->addr : 0; 260 } 261 262 char *tipc_node_get_id_str(struct tipc_node *node) 263 { 264 return node->peer_id_string; 265 } 266 267 #ifdef CONFIG_TIPC_CRYPTO 268 /** 269 * tipc_node_crypto_rx - Retrieve crypto RX handle from node 270 * Note: node ref counter must be held first! 271 */ 272 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n) 273 { 274 return (__n) ? __n->crypto_rx : NULL; 275 } 276 277 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos) 278 { 279 return container_of(pos, struct tipc_node, list)->crypto_rx; 280 } 281 282 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr) 283 { 284 struct tipc_node *n; 285 286 n = tipc_node_find(net, addr); 287 return (n) ? n->crypto_rx : NULL; 288 } 289 #endif 290 291 static void tipc_node_free(struct rcu_head *rp) 292 { 293 struct tipc_node *n = container_of(rp, struct tipc_node, rcu); 294 295 #ifdef CONFIG_TIPC_CRYPTO 296 tipc_crypto_stop(&n->crypto_rx); 297 #endif 298 kfree(n); 299 } 300 301 static void tipc_node_kref_release(struct kref *kref) 302 { 303 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 304 305 kfree(n->bc_entry.link); 306 call_rcu(&n->rcu, tipc_node_free); 307 } 308 309 void tipc_node_put(struct tipc_node *node) 310 { 311 kref_put(&node->kref, tipc_node_kref_release); 312 } 313 314 void tipc_node_get(struct tipc_node *node) 315 { 316 kref_get(&node->kref); 317 } 318 319 /* 320 * tipc_node_find - locate specified node object, if it exists 321 */ 322 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 323 { 324 struct tipc_net *tn = tipc_net(net); 325 struct tipc_node *node; 326 unsigned int thash = tipc_hashfn(addr); 327 328 rcu_read_lock(); 329 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 330 if (node->addr != addr || node->preliminary) 331 continue; 332 if (!kref_get_unless_zero(&node->kref)) 333 node = NULL; 334 break; 335 } 336 rcu_read_unlock(); 337 return node; 338 } 339 340 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 341 * Note: this function is called only when a discovery request failed 342 * to find the node by its 32-bit id, and is not time critical 343 */ 344 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 345 { 346 struct tipc_net *tn = tipc_net(net); 347 struct tipc_node *n; 348 bool found = false; 349 350 rcu_read_lock(); 351 list_for_each_entry_rcu(n, &tn->node_list, list) { 352 read_lock_bh(&n->lock); 353 if (!memcmp(id, n->peer_id, 16) && 354 kref_get_unless_zero(&n->kref)) 355 found = true; 356 read_unlock_bh(&n->lock); 357 if (found) 358 break; 359 } 360 rcu_read_unlock(); 361 return found ? n : NULL; 362 } 363 364 static void tipc_node_read_lock(struct tipc_node *n) 365 { 366 read_lock_bh(&n->lock); 367 } 368 369 static void tipc_node_read_unlock(struct tipc_node *n) 370 { 371 read_unlock_bh(&n->lock); 372 } 373 374 static void tipc_node_write_lock(struct tipc_node *n) 375 { 376 write_lock_bh(&n->lock); 377 } 378 379 static void tipc_node_write_unlock_fast(struct tipc_node *n) 380 { 381 write_unlock_bh(&n->lock); 382 } 383 384 static void tipc_node_write_unlock(struct tipc_node *n) 385 { 386 struct net *net = n->net; 387 u32 addr = 0; 388 u32 flags = n->action_flags; 389 u32 link_id = 0; 390 u32 bearer_id; 391 struct list_head *publ_list; 392 393 if (likely(!flags)) { 394 write_unlock_bh(&n->lock); 395 return; 396 } 397 398 addr = n->addr; 399 link_id = n->link_id; 400 bearer_id = link_id & 0xffff; 401 publ_list = &n->publ_list; 402 403 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 404 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 405 406 write_unlock_bh(&n->lock); 407 408 if (flags & TIPC_NOTIFY_NODE_DOWN) 409 tipc_publ_notify(net, publ_list, addr, n->capabilities); 410 411 if (flags & TIPC_NOTIFY_NODE_UP) 412 tipc_named_node_up(net, addr, n->capabilities); 413 414 if (flags & TIPC_NOTIFY_LINK_UP) { 415 tipc_mon_peer_up(net, addr, bearer_id); 416 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 417 TIPC_NODE_SCOPE, link_id, link_id); 418 } 419 if (flags & TIPC_NOTIFY_LINK_DOWN) { 420 tipc_mon_peer_down(net, addr, bearer_id); 421 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 422 addr, link_id); 423 } 424 } 425 426 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 427 { 428 int net_id = tipc_netid(n->net); 429 struct tipc_net *tn_peer; 430 struct net *tmp; 431 u32 hash_chk; 432 433 if (n->peer_net) 434 return; 435 436 for_each_net_rcu(tmp) { 437 tn_peer = tipc_net(tmp); 438 if (!tn_peer) 439 continue; 440 /* Integrity checking whether node exists in namespace or not */ 441 if (tn_peer->net_id != net_id) 442 continue; 443 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 444 continue; 445 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 446 if (hash_mixes ^ hash_chk) 447 continue; 448 n->peer_net = tmp; 449 n->peer_hash_mix = hash_mixes; 450 break; 451 } 452 } 453 454 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, 455 u16 capabilities, u32 hash_mixes, 456 bool preliminary) 457 { 458 struct tipc_net *tn = net_generic(net, tipc_net_id); 459 struct tipc_node *n, *temp_node; 460 struct tipc_link *l; 461 unsigned long intv; 462 int bearer_id; 463 int i; 464 465 spin_lock_bh(&tn->node_list_lock); 466 n = tipc_node_find(net, addr) ?: 467 tipc_node_find_by_id(net, peer_id); 468 if (n) { 469 if (!n->preliminary) 470 goto update; 471 if (preliminary) 472 goto exit; 473 /* A preliminary node becomes "real" now, refresh its data */ 474 tipc_node_write_lock(n); 475 n->preliminary = false; 476 n->addr = addr; 477 hlist_del_rcu(&n->hash); 478 hlist_add_head_rcu(&n->hash, 479 &tn->node_htable[tipc_hashfn(addr)]); 480 list_del_rcu(&n->list); 481 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 482 if (n->addr < temp_node->addr) 483 break; 484 } 485 list_add_tail_rcu(&n->list, &temp_node->list); 486 tipc_node_write_unlock_fast(n); 487 488 update: 489 if (n->peer_hash_mix ^ hash_mixes) 490 tipc_node_assign_peer_net(n, hash_mixes); 491 if (n->capabilities == capabilities) 492 goto exit; 493 /* Same node may come back with new capabilities */ 494 tipc_node_write_lock(n); 495 n->capabilities = capabilities; 496 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 497 l = n->links[bearer_id].link; 498 if (l) 499 tipc_link_update_caps(l, capabilities); 500 } 501 tipc_node_write_unlock_fast(n); 502 503 /* Calculate cluster capabilities */ 504 tn->capabilities = TIPC_NODE_CAPABILITIES; 505 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 506 tn->capabilities &= temp_node->capabilities; 507 } 508 509 tipc_bcast_toggle_rcast(net, 510 (tn->capabilities & TIPC_BCAST_RCAST)); 511 512 goto exit; 513 } 514 n = kzalloc(sizeof(*n), GFP_ATOMIC); 515 if (!n) { 516 pr_warn("Node creation failed, no memory\n"); 517 goto exit; 518 } 519 tipc_nodeid2string(n->peer_id_string, peer_id); 520 #ifdef CONFIG_TIPC_CRYPTO 521 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) { 522 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string); 523 kfree(n); 524 n = NULL; 525 goto exit; 526 } 527 #endif 528 n->addr = addr; 529 n->preliminary = preliminary; 530 memcpy(&n->peer_id, peer_id, 16); 531 n->net = net; 532 n->peer_net = NULL; 533 n->peer_hash_mix = 0; 534 /* Assign kernel local namespace if exists */ 535 tipc_node_assign_peer_net(n, hash_mixes); 536 n->capabilities = capabilities; 537 kref_init(&n->kref); 538 rwlock_init(&n->lock); 539 INIT_HLIST_NODE(&n->hash); 540 INIT_LIST_HEAD(&n->list); 541 INIT_LIST_HEAD(&n->publ_list); 542 INIT_LIST_HEAD(&n->conn_sks); 543 skb_queue_head_init(&n->bc_entry.namedq); 544 skb_queue_head_init(&n->bc_entry.inputq1); 545 __skb_queue_head_init(&n->bc_entry.arrvq); 546 skb_queue_head_init(&n->bc_entry.inputq2); 547 for (i = 0; i < MAX_BEARERS; i++) 548 spin_lock_init(&n->links[i].lock); 549 n->state = SELF_DOWN_PEER_LEAVING; 550 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 551 n->signature = INVALID_NODE_SIG; 552 n->active_links[0] = INVALID_BEARER_ID; 553 n->active_links[1] = INVALID_BEARER_ID; 554 n->bc_entry.link = NULL; 555 tipc_node_get(n); 556 timer_setup(&n->timer, tipc_node_timeout, 0); 557 /* Start a slow timer anyway, crypto needs it */ 558 n->keepalive_intv = 10000; 559 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 560 if (!mod_timer(&n->timer, intv)) 561 tipc_node_get(n); 562 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 563 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 564 if (n->addr < temp_node->addr) 565 break; 566 } 567 list_add_tail_rcu(&n->list, &temp_node->list); 568 /* Calculate cluster capabilities */ 569 tn->capabilities = TIPC_NODE_CAPABILITIES; 570 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 571 tn->capabilities &= temp_node->capabilities; 572 } 573 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 574 trace_tipc_node_create(n, true, " "); 575 exit: 576 spin_unlock_bh(&tn->node_list_lock); 577 return n; 578 } 579 580 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 581 { 582 unsigned long tol = tipc_link_tolerance(l); 583 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 584 585 /* Link with lowest tolerance determines timer interval */ 586 if (intv < n->keepalive_intv) 587 n->keepalive_intv = intv; 588 589 /* Ensure link's abort limit corresponds to current tolerance */ 590 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 591 } 592 593 static void tipc_node_delete_from_list(struct tipc_node *node) 594 { 595 #ifdef CONFIG_TIPC_CRYPTO 596 tipc_crypto_key_flush(node->crypto_rx); 597 #endif 598 list_del_rcu(&node->list); 599 hlist_del_rcu(&node->hash); 600 tipc_node_put(node); 601 } 602 603 static void tipc_node_delete(struct tipc_node *node) 604 { 605 trace_tipc_node_delete(node, true, " "); 606 tipc_node_delete_from_list(node); 607 608 del_timer_sync(&node->timer); 609 tipc_node_put(node); 610 } 611 612 void tipc_node_stop(struct net *net) 613 { 614 struct tipc_net *tn = tipc_net(net); 615 struct tipc_node *node, *t_node; 616 617 spin_lock_bh(&tn->node_list_lock); 618 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 619 tipc_node_delete(node); 620 spin_unlock_bh(&tn->node_list_lock); 621 } 622 623 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 624 { 625 struct tipc_node *n; 626 627 if (in_own_node(net, addr)) 628 return; 629 630 n = tipc_node_find(net, addr); 631 if (!n) { 632 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 633 return; 634 } 635 tipc_node_write_lock(n); 636 list_add_tail(subscr, &n->publ_list); 637 tipc_node_write_unlock_fast(n); 638 tipc_node_put(n); 639 } 640 641 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 642 { 643 struct tipc_node *n; 644 645 if (in_own_node(net, addr)) 646 return; 647 648 n = tipc_node_find(net, addr); 649 if (!n) { 650 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 651 return; 652 } 653 tipc_node_write_lock(n); 654 list_del_init(subscr); 655 tipc_node_write_unlock_fast(n); 656 tipc_node_put(n); 657 } 658 659 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 660 { 661 struct tipc_node *node; 662 struct tipc_sock_conn *conn; 663 int err = 0; 664 665 if (in_own_node(net, dnode)) 666 return 0; 667 668 node = tipc_node_find(net, dnode); 669 if (!node) { 670 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 671 return -EHOSTUNREACH; 672 } 673 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 674 if (!conn) { 675 err = -EHOSTUNREACH; 676 goto exit; 677 } 678 conn->peer_node = dnode; 679 conn->port = port; 680 conn->peer_port = peer_port; 681 682 tipc_node_write_lock(node); 683 list_add_tail(&conn->list, &node->conn_sks); 684 tipc_node_write_unlock(node); 685 exit: 686 tipc_node_put(node); 687 return err; 688 } 689 690 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 691 { 692 struct tipc_node *node; 693 struct tipc_sock_conn *conn, *safe; 694 695 if (in_own_node(net, dnode)) 696 return; 697 698 node = tipc_node_find(net, dnode); 699 if (!node) 700 return; 701 702 tipc_node_write_lock(node); 703 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 704 if (port != conn->port) 705 continue; 706 list_del(&conn->list); 707 kfree(conn); 708 } 709 tipc_node_write_unlock(node); 710 tipc_node_put(node); 711 } 712 713 static void tipc_node_clear_links(struct tipc_node *node) 714 { 715 int i; 716 717 for (i = 0; i < MAX_BEARERS; i++) { 718 struct tipc_link_entry *le = &node->links[i]; 719 720 if (le->link) { 721 kfree(le->link); 722 le->link = NULL; 723 node->link_cnt--; 724 } 725 } 726 } 727 728 /* tipc_node_cleanup - delete nodes that does not 729 * have active links for NODE_CLEANUP_AFTER time 730 */ 731 static bool tipc_node_cleanup(struct tipc_node *peer) 732 { 733 struct tipc_node *temp_node; 734 struct tipc_net *tn = tipc_net(peer->net); 735 bool deleted = false; 736 737 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 738 if (!spin_trylock_bh(&tn->node_list_lock)) 739 return false; 740 741 tipc_node_write_lock(peer); 742 743 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 744 tipc_node_clear_links(peer); 745 tipc_node_delete_from_list(peer); 746 deleted = true; 747 } 748 tipc_node_write_unlock(peer); 749 750 if (!deleted) { 751 spin_unlock_bh(&tn->node_list_lock); 752 return deleted; 753 } 754 755 /* Calculate cluster capabilities */ 756 tn->capabilities = TIPC_NODE_CAPABILITIES; 757 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 758 tn->capabilities &= temp_node->capabilities; 759 } 760 tipc_bcast_toggle_rcast(peer->net, 761 (tn->capabilities & TIPC_BCAST_RCAST)); 762 spin_unlock_bh(&tn->node_list_lock); 763 return deleted; 764 } 765 766 /* tipc_node_timeout - handle expiration of node timer 767 */ 768 static void tipc_node_timeout(struct timer_list *t) 769 { 770 struct tipc_node *n = from_timer(n, t, timer); 771 struct tipc_link_entry *le; 772 struct sk_buff_head xmitq; 773 int remains = n->link_cnt; 774 int bearer_id; 775 int rc = 0; 776 777 trace_tipc_node_timeout(n, false, " "); 778 if (!node_is_up(n) && tipc_node_cleanup(n)) { 779 /*Removing the reference of Timer*/ 780 tipc_node_put(n); 781 return; 782 } 783 784 #ifdef CONFIG_TIPC_CRYPTO 785 /* Take any crypto key related actions first */ 786 tipc_crypto_timeout(n->crypto_rx); 787 #endif 788 __skb_queue_head_init(&xmitq); 789 790 /* Initial node interval to value larger (10 seconds), then it will be 791 * recalculated with link lowest tolerance 792 */ 793 tipc_node_read_lock(n); 794 n->keepalive_intv = 10000; 795 tipc_node_read_unlock(n); 796 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 797 tipc_node_read_lock(n); 798 le = &n->links[bearer_id]; 799 if (le->link) { 800 spin_lock_bh(&le->lock); 801 /* Link tolerance may change asynchronously: */ 802 tipc_node_calculate_timer(n, le->link); 803 rc = tipc_link_timeout(le->link, &xmitq); 804 spin_unlock_bh(&le->lock); 805 remains--; 806 } 807 tipc_node_read_unlock(n); 808 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n); 809 if (rc & TIPC_LINK_DOWN_EVT) 810 tipc_node_link_down(n, bearer_id, false); 811 } 812 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 813 } 814 815 /** 816 * __tipc_node_link_up - handle addition of link 817 * Node lock must be held by caller 818 * Link becomes active (alone or shared) or standby, depending on its priority. 819 */ 820 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 821 struct sk_buff_head *xmitq) 822 { 823 int *slot0 = &n->active_links[0]; 824 int *slot1 = &n->active_links[1]; 825 struct tipc_link *ol = node_active_link(n, 0); 826 struct tipc_link *nl = n->links[bearer_id].link; 827 828 if (!nl || tipc_link_is_up(nl)) 829 return; 830 831 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 832 if (!tipc_link_is_up(nl)) 833 return; 834 835 n->working_links++; 836 n->action_flags |= TIPC_NOTIFY_LINK_UP; 837 n->link_id = tipc_link_id(nl); 838 839 /* Leave room for tunnel header when returning 'mtu' to users: */ 840 n->links[bearer_id].mtu = tipc_link_mss(nl); 841 842 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 843 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 844 845 pr_debug("Established link <%s> on network plane %c\n", 846 tipc_link_name(nl), tipc_link_plane(nl)); 847 trace_tipc_node_link_up(n, true, " "); 848 849 /* Ensure that a STATE message goes first */ 850 tipc_link_build_state_msg(nl, xmitq); 851 852 /* First link? => give it both slots */ 853 if (!ol) { 854 *slot0 = bearer_id; 855 *slot1 = bearer_id; 856 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 857 n->action_flags |= TIPC_NOTIFY_NODE_UP; 858 tipc_link_set_active(nl, true); 859 tipc_bcast_add_peer(n->net, nl, xmitq); 860 return; 861 } 862 863 /* Second link => redistribute slots */ 864 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 865 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 866 *slot0 = bearer_id; 867 *slot1 = bearer_id; 868 tipc_link_set_active(nl, true); 869 tipc_link_set_active(ol, false); 870 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 871 tipc_link_set_active(nl, true); 872 *slot1 = bearer_id; 873 } else { 874 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 875 } 876 877 /* Prepare synchronization with first link */ 878 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 879 } 880 881 /** 882 * tipc_node_link_up - handle addition of link 883 * 884 * Link becomes active (alone or shared) or standby, depending on its priority. 885 */ 886 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 887 struct sk_buff_head *xmitq) 888 { 889 struct tipc_media_addr *maddr; 890 891 tipc_node_write_lock(n); 892 __tipc_node_link_up(n, bearer_id, xmitq); 893 maddr = &n->links[bearer_id].maddr; 894 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n); 895 tipc_node_write_unlock(n); 896 } 897 898 /** 899 * tipc_node_link_failover() - start failover in case "half-failover" 900 * 901 * This function is only called in a very special situation where link 902 * failover can be already started on peer node but not on this node. 903 * This can happen when e.g. 904 * 1. Both links <1A-2A>, <1B-2B> down 905 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 906 * disturbance, wrong session, etc.) 907 * 3. Link <1B-2B> up 908 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 909 * 5. Node 2 starts failover onto link <1B-2B> 910 * 911 * ==> Node 1 does never start link/node failover! 912 * 913 * @n: tipc node structure 914 * @l: link peer endpoint failingover (- can be NULL) 915 * @tnl: tunnel link 916 * @xmitq: queue for messages to be xmited on tnl link later 917 */ 918 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 919 struct tipc_link *tnl, 920 struct sk_buff_head *xmitq) 921 { 922 /* Avoid to be "self-failover" that can never end */ 923 if (!tipc_link_is_up(tnl)) 924 return; 925 926 /* Don't rush, failure link may be in the process of resetting */ 927 if (l && !tipc_link_is_reset(l)) 928 return; 929 930 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 931 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 932 933 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 934 tipc_link_failover_prepare(l, tnl, xmitq); 935 936 if (l) 937 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 938 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 939 } 940 941 /** 942 * __tipc_node_link_down - handle loss of link 943 */ 944 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 945 struct sk_buff_head *xmitq, 946 struct tipc_media_addr **maddr) 947 { 948 struct tipc_link_entry *le = &n->links[*bearer_id]; 949 int *slot0 = &n->active_links[0]; 950 int *slot1 = &n->active_links[1]; 951 int i, highest = 0, prio; 952 struct tipc_link *l, *_l, *tnl; 953 954 l = n->links[*bearer_id].link; 955 if (!l || tipc_link_is_reset(l)) 956 return; 957 958 n->working_links--; 959 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 960 n->link_id = tipc_link_id(l); 961 962 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 963 964 pr_debug("Lost link <%s> on network plane %c\n", 965 tipc_link_name(l), tipc_link_plane(l)); 966 967 /* Select new active link if any available */ 968 *slot0 = INVALID_BEARER_ID; 969 *slot1 = INVALID_BEARER_ID; 970 for (i = 0; i < MAX_BEARERS; i++) { 971 _l = n->links[i].link; 972 if (!_l || !tipc_link_is_up(_l)) 973 continue; 974 if (_l == l) 975 continue; 976 prio = tipc_link_prio(_l); 977 if (prio < highest) 978 continue; 979 if (prio > highest) { 980 highest = prio; 981 *slot0 = i; 982 *slot1 = i; 983 continue; 984 } 985 *slot1 = i; 986 } 987 988 if (!node_is_up(n)) { 989 if (tipc_link_peer_is_down(l)) 990 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 991 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 992 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 993 tipc_link_fsm_evt(l, LINK_RESET_EVT); 994 tipc_link_reset(l); 995 tipc_link_build_reset_msg(l, xmitq); 996 *maddr = &n->links[*bearer_id].maddr; 997 node_lost_contact(n, &le->inputq); 998 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 999 return; 1000 } 1001 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 1002 1003 /* There is still a working link => initiate failover */ 1004 *bearer_id = n->active_links[0]; 1005 tnl = n->links[*bearer_id].link; 1006 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1007 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1008 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 1009 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 1010 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 1011 tipc_link_reset(l); 1012 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1013 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1014 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 1015 *maddr = &n->links[*bearer_id].maddr; 1016 } 1017 1018 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 1019 { 1020 struct tipc_link_entry *le = &n->links[bearer_id]; 1021 struct tipc_media_addr *maddr = NULL; 1022 struct tipc_link *l = le->link; 1023 int old_bearer_id = bearer_id; 1024 struct sk_buff_head xmitq; 1025 1026 if (!l) 1027 return; 1028 1029 __skb_queue_head_init(&xmitq); 1030 1031 tipc_node_write_lock(n); 1032 if (!tipc_link_is_establishing(l)) { 1033 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 1034 } else { 1035 /* Defuse pending tipc_node_link_up() */ 1036 tipc_link_reset(l); 1037 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1038 } 1039 if (delete) { 1040 kfree(l); 1041 le->link = NULL; 1042 n->link_cnt--; 1043 } 1044 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 1045 tipc_node_write_unlock(n); 1046 if (delete) 1047 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 1048 if (!skb_queue_empty(&xmitq)) 1049 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n); 1050 tipc_sk_rcv(n->net, &le->inputq); 1051 } 1052 1053 static bool node_is_up(struct tipc_node *n) 1054 { 1055 return n->active_links[0] != INVALID_BEARER_ID; 1056 } 1057 1058 bool tipc_node_is_up(struct net *net, u32 addr) 1059 { 1060 struct tipc_node *n; 1061 bool retval = false; 1062 1063 if (in_own_node(net, addr)) 1064 return true; 1065 1066 n = tipc_node_find(net, addr); 1067 if (!n) 1068 return false; 1069 retval = node_is_up(n); 1070 tipc_node_put(n); 1071 return retval; 1072 } 1073 1074 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 1075 { 1076 struct tipc_node *n; 1077 1078 addr ^= tipc_net(net)->random; 1079 while ((n = tipc_node_find(net, addr))) { 1080 tipc_node_put(n); 1081 addr++; 1082 } 1083 return addr; 1084 } 1085 1086 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 1087 * Returns suggested address if any, otherwise 0 1088 */ 1089 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1090 { 1091 struct tipc_net *tn = tipc_net(net); 1092 struct tipc_node *n; 1093 bool preliminary; 1094 u32 sugg_addr; 1095 1096 /* Suggest new address if some other peer is using this one */ 1097 n = tipc_node_find(net, addr); 1098 if (n) { 1099 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1100 addr = 0; 1101 tipc_node_put(n); 1102 if (!addr) 1103 return 0; 1104 return tipc_node_suggest_addr(net, addr); 1105 } 1106 1107 /* Suggest previously used address if peer is known */ 1108 n = tipc_node_find_by_id(net, id); 1109 if (n) { 1110 sugg_addr = n->addr; 1111 preliminary = n->preliminary; 1112 tipc_node_put(n); 1113 if (!preliminary) 1114 return sugg_addr; 1115 } 1116 1117 /* Even this node may be in conflict */ 1118 if (tn->trial_addr == addr) 1119 return tipc_node_suggest_addr(net, addr); 1120 1121 return 0; 1122 } 1123 1124 void tipc_node_check_dest(struct net *net, u32 addr, 1125 u8 *peer_id, struct tipc_bearer *b, 1126 u16 capabilities, u32 signature, u32 hash_mixes, 1127 struct tipc_media_addr *maddr, 1128 bool *respond, bool *dupl_addr) 1129 { 1130 struct tipc_node *n; 1131 struct tipc_link *l, *snd_l; 1132 struct tipc_link_entry *le; 1133 bool addr_match = false; 1134 bool sign_match = false; 1135 bool link_up = false; 1136 bool accept_addr = false; 1137 bool reset = true; 1138 char *if_name; 1139 unsigned long intv; 1140 u16 session; 1141 1142 *dupl_addr = false; 1143 *respond = false; 1144 1145 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes, 1146 false); 1147 if (!n) 1148 return; 1149 1150 tipc_node_write_lock(n); 1151 if (unlikely(!n->bc_entry.link)) { 1152 snd_l = tipc_bc_sndlink(net); 1153 if (!tipc_link_bc_create(net, tipc_own_addr(net), 1154 addr, peer_id, U16_MAX, 1155 tipc_link_min_win(snd_l), 1156 tipc_link_max_win(snd_l), 1157 n->capabilities, 1158 &n->bc_entry.inputq1, 1159 &n->bc_entry.namedq, snd_l, 1160 &n->bc_entry.link)) { 1161 pr_warn("Broadcast rcv link creation failed, no mem\n"); 1162 tipc_node_write_unlock_fast(n); 1163 tipc_node_put(n); 1164 return; 1165 } 1166 } 1167 1168 le = &n->links[b->identity]; 1169 1170 /* Prepare to validate requesting node's signature and media address */ 1171 l = le->link; 1172 link_up = l && tipc_link_is_up(l); 1173 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1174 sign_match = (signature == n->signature); 1175 1176 /* These three flags give us eight permutations: */ 1177 1178 if (sign_match && addr_match && link_up) { 1179 /* All is fine. Do nothing. */ 1180 reset = false; 1181 /* Peer node is not a container/local namespace */ 1182 if (!n->peer_hash_mix) 1183 n->peer_hash_mix = hash_mixes; 1184 } else if (sign_match && addr_match && !link_up) { 1185 /* Respond. The link will come up in due time */ 1186 *respond = true; 1187 } else if (sign_match && !addr_match && link_up) { 1188 /* Peer has changed i/f address without rebooting. 1189 * If so, the link will reset soon, and the next 1190 * discovery will be accepted. So we can ignore it. 1191 * It may also be an cloned or malicious peer having 1192 * chosen the same node address and signature as an 1193 * existing one. 1194 * Ignore requests until the link goes down, if ever. 1195 */ 1196 *dupl_addr = true; 1197 } else if (sign_match && !addr_match && !link_up) { 1198 /* Peer link has changed i/f address without rebooting. 1199 * It may also be a cloned or malicious peer; we can't 1200 * distinguish between the two. 1201 * The signature is correct, so we must accept. 1202 */ 1203 accept_addr = true; 1204 *respond = true; 1205 } else if (!sign_match && addr_match && link_up) { 1206 /* Peer node rebooted. Two possibilities: 1207 * - Delayed re-discovery; this link endpoint has already 1208 * reset and re-established contact with the peer, before 1209 * receiving a discovery message from that node. 1210 * (The peer happened to receive one from this node first). 1211 * - The peer came back so fast that our side has not 1212 * discovered it yet. Probing from this side will soon 1213 * reset the link, since there can be no working link 1214 * endpoint at the peer end, and the link will re-establish. 1215 * Accept the signature, since it comes from a known peer. 1216 */ 1217 n->signature = signature; 1218 } else if (!sign_match && addr_match && !link_up) { 1219 /* The peer node has rebooted. 1220 * Accept signature, since it is a known peer. 1221 */ 1222 n->signature = signature; 1223 *respond = true; 1224 } else if (!sign_match && !addr_match && link_up) { 1225 /* Peer rebooted with new address, or a new/duplicate peer. 1226 * Ignore until the link goes down, if ever. 1227 */ 1228 *dupl_addr = true; 1229 } else if (!sign_match && !addr_match && !link_up) { 1230 /* Peer rebooted with new address, or it is a new peer. 1231 * Accept signature and address. 1232 */ 1233 n->signature = signature; 1234 accept_addr = true; 1235 *respond = true; 1236 } 1237 1238 if (!accept_addr) 1239 goto exit; 1240 1241 /* Now create new link if not already existing */ 1242 if (!l) { 1243 if (n->link_cnt == 2) 1244 goto exit; 1245 1246 if_name = strchr(b->name, ':') + 1; 1247 get_random_bytes(&session, sizeof(u16)); 1248 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1249 b->net_plane, b->mtu, b->priority, 1250 b->min_win, b->max_win, session, 1251 tipc_own_addr(net), addr, peer_id, 1252 n->capabilities, 1253 tipc_bc_sndlink(n->net), n->bc_entry.link, 1254 &le->inputq, 1255 &n->bc_entry.namedq, &l)) { 1256 *respond = false; 1257 goto exit; 1258 } 1259 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1260 tipc_link_reset(l); 1261 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1262 if (n->state == NODE_FAILINGOVER) 1263 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1264 le->link = l; 1265 n->link_cnt++; 1266 tipc_node_calculate_timer(n, l); 1267 if (n->link_cnt == 1) { 1268 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1269 if (!mod_timer(&n->timer, intv)) 1270 tipc_node_get(n); 1271 } 1272 } 1273 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1274 exit: 1275 tipc_node_write_unlock(n); 1276 if (reset && l && !tipc_link_is_reset(l)) 1277 tipc_node_link_down(n, b->identity, false); 1278 tipc_node_put(n); 1279 } 1280 1281 void tipc_node_delete_links(struct net *net, int bearer_id) 1282 { 1283 struct tipc_net *tn = net_generic(net, tipc_net_id); 1284 struct tipc_node *n; 1285 1286 rcu_read_lock(); 1287 list_for_each_entry_rcu(n, &tn->node_list, list) { 1288 tipc_node_link_down(n, bearer_id, true); 1289 } 1290 rcu_read_unlock(); 1291 } 1292 1293 static void tipc_node_reset_links(struct tipc_node *n) 1294 { 1295 int i; 1296 1297 pr_warn("Resetting all links to %x\n", n->addr); 1298 1299 trace_tipc_node_reset_links(n, true, " "); 1300 for (i = 0; i < MAX_BEARERS; i++) { 1301 tipc_node_link_down(n, i, false); 1302 } 1303 } 1304 1305 /* tipc_node_fsm_evt - node finite state machine 1306 * Determines when contact is allowed with peer node 1307 */ 1308 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1309 { 1310 int state = n->state; 1311 1312 switch (state) { 1313 case SELF_DOWN_PEER_DOWN: 1314 switch (evt) { 1315 case SELF_ESTABL_CONTACT_EVT: 1316 state = SELF_UP_PEER_COMING; 1317 break; 1318 case PEER_ESTABL_CONTACT_EVT: 1319 state = SELF_COMING_PEER_UP; 1320 break; 1321 case SELF_LOST_CONTACT_EVT: 1322 case PEER_LOST_CONTACT_EVT: 1323 break; 1324 case NODE_SYNCH_END_EVT: 1325 case NODE_SYNCH_BEGIN_EVT: 1326 case NODE_FAILOVER_BEGIN_EVT: 1327 case NODE_FAILOVER_END_EVT: 1328 default: 1329 goto illegal_evt; 1330 } 1331 break; 1332 case SELF_UP_PEER_UP: 1333 switch (evt) { 1334 case SELF_LOST_CONTACT_EVT: 1335 state = SELF_DOWN_PEER_LEAVING; 1336 break; 1337 case PEER_LOST_CONTACT_EVT: 1338 state = SELF_LEAVING_PEER_DOWN; 1339 break; 1340 case NODE_SYNCH_BEGIN_EVT: 1341 state = NODE_SYNCHING; 1342 break; 1343 case NODE_FAILOVER_BEGIN_EVT: 1344 state = NODE_FAILINGOVER; 1345 break; 1346 case SELF_ESTABL_CONTACT_EVT: 1347 case PEER_ESTABL_CONTACT_EVT: 1348 case NODE_SYNCH_END_EVT: 1349 case NODE_FAILOVER_END_EVT: 1350 break; 1351 default: 1352 goto illegal_evt; 1353 } 1354 break; 1355 case SELF_DOWN_PEER_LEAVING: 1356 switch (evt) { 1357 case PEER_LOST_CONTACT_EVT: 1358 state = SELF_DOWN_PEER_DOWN; 1359 break; 1360 case SELF_ESTABL_CONTACT_EVT: 1361 case PEER_ESTABL_CONTACT_EVT: 1362 case SELF_LOST_CONTACT_EVT: 1363 break; 1364 case NODE_SYNCH_END_EVT: 1365 case NODE_SYNCH_BEGIN_EVT: 1366 case NODE_FAILOVER_BEGIN_EVT: 1367 case NODE_FAILOVER_END_EVT: 1368 default: 1369 goto illegal_evt; 1370 } 1371 break; 1372 case SELF_UP_PEER_COMING: 1373 switch (evt) { 1374 case PEER_ESTABL_CONTACT_EVT: 1375 state = SELF_UP_PEER_UP; 1376 break; 1377 case SELF_LOST_CONTACT_EVT: 1378 state = SELF_DOWN_PEER_DOWN; 1379 break; 1380 case SELF_ESTABL_CONTACT_EVT: 1381 case PEER_LOST_CONTACT_EVT: 1382 case NODE_SYNCH_END_EVT: 1383 case NODE_FAILOVER_BEGIN_EVT: 1384 break; 1385 case NODE_SYNCH_BEGIN_EVT: 1386 case NODE_FAILOVER_END_EVT: 1387 default: 1388 goto illegal_evt; 1389 } 1390 break; 1391 case SELF_COMING_PEER_UP: 1392 switch (evt) { 1393 case SELF_ESTABL_CONTACT_EVT: 1394 state = SELF_UP_PEER_UP; 1395 break; 1396 case PEER_LOST_CONTACT_EVT: 1397 state = SELF_DOWN_PEER_DOWN; 1398 break; 1399 case SELF_LOST_CONTACT_EVT: 1400 case PEER_ESTABL_CONTACT_EVT: 1401 break; 1402 case NODE_SYNCH_END_EVT: 1403 case NODE_SYNCH_BEGIN_EVT: 1404 case NODE_FAILOVER_BEGIN_EVT: 1405 case NODE_FAILOVER_END_EVT: 1406 default: 1407 goto illegal_evt; 1408 } 1409 break; 1410 case SELF_LEAVING_PEER_DOWN: 1411 switch (evt) { 1412 case SELF_LOST_CONTACT_EVT: 1413 state = SELF_DOWN_PEER_DOWN; 1414 break; 1415 case SELF_ESTABL_CONTACT_EVT: 1416 case PEER_ESTABL_CONTACT_EVT: 1417 case PEER_LOST_CONTACT_EVT: 1418 break; 1419 case NODE_SYNCH_END_EVT: 1420 case NODE_SYNCH_BEGIN_EVT: 1421 case NODE_FAILOVER_BEGIN_EVT: 1422 case NODE_FAILOVER_END_EVT: 1423 default: 1424 goto illegal_evt; 1425 } 1426 break; 1427 case NODE_FAILINGOVER: 1428 switch (evt) { 1429 case SELF_LOST_CONTACT_EVT: 1430 state = SELF_DOWN_PEER_LEAVING; 1431 break; 1432 case PEER_LOST_CONTACT_EVT: 1433 state = SELF_LEAVING_PEER_DOWN; 1434 break; 1435 case NODE_FAILOVER_END_EVT: 1436 state = SELF_UP_PEER_UP; 1437 break; 1438 case NODE_FAILOVER_BEGIN_EVT: 1439 case SELF_ESTABL_CONTACT_EVT: 1440 case PEER_ESTABL_CONTACT_EVT: 1441 break; 1442 case NODE_SYNCH_BEGIN_EVT: 1443 case NODE_SYNCH_END_EVT: 1444 default: 1445 goto illegal_evt; 1446 } 1447 break; 1448 case NODE_SYNCHING: 1449 switch (evt) { 1450 case SELF_LOST_CONTACT_EVT: 1451 state = SELF_DOWN_PEER_LEAVING; 1452 break; 1453 case PEER_LOST_CONTACT_EVT: 1454 state = SELF_LEAVING_PEER_DOWN; 1455 break; 1456 case NODE_SYNCH_END_EVT: 1457 state = SELF_UP_PEER_UP; 1458 break; 1459 case NODE_FAILOVER_BEGIN_EVT: 1460 state = NODE_FAILINGOVER; 1461 break; 1462 case NODE_SYNCH_BEGIN_EVT: 1463 case SELF_ESTABL_CONTACT_EVT: 1464 case PEER_ESTABL_CONTACT_EVT: 1465 break; 1466 case NODE_FAILOVER_END_EVT: 1467 default: 1468 goto illegal_evt; 1469 } 1470 break; 1471 default: 1472 pr_err("Unknown node fsm state %x\n", state); 1473 break; 1474 } 1475 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1476 n->state = state; 1477 return; 1478 1479 illegal_evt: 1480 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1481 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1482 } 1483 1484 static void node_lost_contact(struct tipc_node *n, 1485 struct sk_buff_head *inputq) 1486 { 1487 struct tipc_sock_conn *conn, *safe; 1488 struct tipc_link *l; 1489 struct list_head *conns = &n->conn_sks; 1490 struct sk_buff *skb; 1491 uint i; 1492 1493 pr_debug("Lost contact with %x\n", n->addr); 1494 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1495 trace_tipc_node_lost_contact(n, true, " "); 1496 1497 /* Clean up broadcast state */ 1498 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1499 skb_queue_purge(&n->bc_entry.namedq); 1500 1501 /* Abort any ongoing link failover */ 1502 for (i = 0; i < MAX_BEARERS; i++) { 1503 l = n->links[i].link; 1504 if (l) 1505 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1506 } 1507 1508 /* Notify publications from this node */ 1509 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1510 n->peer_net = NULL; 1511 n->peer_hash_mix = 0; 1512 /* Notify sockets connected to node */ 1513 list_for_each_entry_safe(conn, safe, conns, list) { 1514 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1515 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1516 conn->peer_node, conn->port, 1517 conn->peer_port, TIPC_ERR_NO_NODE); 1518 if (likely(skb)) 1519 skb_queue_tail(inputq, skb); 1520 list_del(&conn->list); 1521 kfree(conn); 1522 } 1523 } 1524 1525 /** 1526 * tipc_node_get_linkname - get the name of a link 1527 * 1528 * @bearer_id: id of the bearer 1529 * @addr: peer node address 1530 * @linkname: link name output buffer 1531 * 1532 * Returns 0 on success 1533 */ 1534 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1535 char *linkname, size_t len) 1536 { 1537 struct tipc_link *link; 1538 int err = -EINVAL; 1539 struct tipc_node *node = tipc_node_find(net, addr); 1540 1541 if (!node) 1542 return err; 1543 1544 if (bearer_id >= MAX_BEARERS) 1545 goto exit; 1546 1547 tipc_node_read_lock(node); 1548 link = node->links[bearer_id].link; 1549 if (link) { 1550 strncpy(linkname, tipc_link_name(link), len); 1551 err = 0; 1552 } 1553 tipc_node_read_unlock(node); 1554 exit: 1555 tipc_node_put(node); 1556 return err; 1557 } 1558 1559 /* Caller should hold node lock for the passed node */ 1560 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1561 { 1562 void *hdr; 1563 struct nlattr *attrs; 1564 1565 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1566 NLM_F_MULTI, TIPC_NL_NODE_GET); 1567 if (!hdr) 1568 return -EMSGSIZE; 1569 1570 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1571 if (!attrs) 1572 goto msg_full; 1573 1574 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1575 goto attr_msg_full; 1576 if (node_is_up(node)) 1577 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1578 goto attr_msg_full; 1579 1580 nla_nest_end(msg->skb, attrs); 1581 genlmsg_end(msg->skb, hdr); 1582 1583 return 0; 1584 1585 attr_msg_full: 1586 nla_nest_cancel(msg->skb, attrs); 1587 msg_full: 1588 genlmsg_cancel(msg->skb, hdr); 1589 1590 return -EMSGSIZE; 1591 } 1592 1593 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1594 { 1595 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1596 struct sk_buff_head inputq; 1597 1598 switch (msg_user(hdr)) { 1599 case TIPC_LOW_IMPORTANCE: 1600 case TIPC_MEDIUM_IMPORTANCE: 1601 case TIPC_HIGH_IMPORTANCE: 1602 case TIPC_CRITICAL_IMPORTANCE: 1603 if (msg_connected(hdr) || msg_named(hdr) || 1604 msg_direct(hdr)) { 1605 tipc_loopback_trace(peer_net, list); 1606 spin_lock_init(&list->lock); 1607 tipc_sk_rcv(peer_net, list); 1608 return; 1609 } 1610 if (msg_mcast(hdr)) { 1611 tipc_loopback_trace(peer_net, list); 1612 skb_queue_head_init(&inputq); 1613 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1614 __skb_queue_purge(list); 1615 skb_queue_purge(&inputq); 1616 return; 1617 } 1618 return; 1619 case MSG_FRAGMENTER: 1620 if (tipc_msg_assemble(list)) { 1621 tipc_loopback_trace(peer_net, list); 1622 skb_queue_head_init(&inputq); 1623 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1624 __skb_queue_purge(list); 1625 skb_queue_purge(&inputq); 1626 } 1627 return; 1628 case GROUP_PROTOCOL: 1629 case CONN_MANAGER: 1630 tipc_loopback_trace(peer_net, list); 1631 spin_lock_init(&list->lock); 1632 tipc_sk_rcv(peer_net, list); 1633 return; 1634 case LINK_PROTOCOL: 1635 case NAME_DISTRIBUTOR: 1636 case TUNNEL_PROTOCOL: 1637 case BCAST_PROTOCOL: 1638 return; 1639 default: 1640 return; 1641 }; 1642 } 1643 1644 /** 1645 * tipc_node_xmit() is the general link level function for message sending 1646 * @net: the applicable net namespace 1647 * @list: chain of buffers containing message 1648 * @dnode: address of destination node 1649 * @selector: a number used for deterministic link selection 1650 * Consumes the buffer chain. 1651 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1652 */ 1653 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1654 u32 dnode, int selector) 1655 { 1656 struct tipc_link_entry *le = NULL; 1657 struct tipc_node *n; 1658 struct sk_buff_head xmitq; 1659 bool node_up = false; 1660 int bearer_id; 1661 int rc; 1662 1663 if (in_own_node(net, dnode)) { 1664 tipc_loopback_trace(net, list); 1665 spin_lock_init(&list->lock); 1666 tipc_sk_rcv(net, list); 1667 return 0; 1668 } 1669 1670 n = tipc_node_find(net, dnode); 1671 if (unlikely(!n)) { 1672 __skb_queue_purge(list); 1673 return -EHOSTUNREACH; 1674 } 1675 1676 tipc_node_read_lock(n); 1677 node_up = node_is_up(n); 1678 if (node_up && n->peer_net && check_net(n->peer_net)) { 1679 /* xmit inner linux container */ 1680 tipc_lxc_xmit(n->peer_net, list); 1681 if (likely(skb_queue_empty(list))) { 1682 tipc_node_read_unlock(n); 1683 tipc_node_put(n); 1684 return 0; 1685 } 1686 } 1687 1688 bearer_id = n->active_links[selector & 1]; 1689 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1690 tipc_node_read_unlock(n); 1691 tipc_node_put(n); 1692 __skb_queue_purge(list); 1693 return -EHOSTUNREACH; 1694 } 1695 1696 __skb_queue_head_init(&xmitq); 1697 le = &n->links[bearer_id]; 1698 spin_lock_bh(&le->lock); 1699 rc = tipc_link_xmit(le->link, list, &xmitq); 1700 spin_unlock_bh(&le->lock); 1701 tipc_node_read_unlock(n); 1702 1703 if (unlikely(rc == -ENOBUFS)) 1704 tipc_node_link_down(n, bearer_id, false); 1705 else 1706 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1707 1708 tipc_node_put(n); 1709 1710 return rc; 1711 } 1712 1713 /* tipc_node_xmit_skb(): send single buffer to destination 1714 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1715 * messages, which will not be rejected 1716 * The only exception is datagram messages rerouted after secondary 1717 * lookup, which are rare and safe to dispose of anyway. 1718 */ 1719 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1720 u32 selector) 1721 { 1722 struct sk_buff_head head; 1723 1724 __skb_queue_head_init(&head); 1725 __skb_queue_tail(&head, skb); 1726 tipc_node_xmit(net, &head, dnode, selector); 1727 return 0; 1728 } 1729 1730 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1731 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1732 */ 1733 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1734 { 1735 struct sk_buff *skb; 1736 u32 selector, dnode; 1737 1738 while ((skb = __skb_dequeue(xmitq))) { 1739 selector = msg_origport(buf_msg(skb)); 1740 dnode = msg_destnode(buf_msg(skb)); 1741 tipc_node_xmit_skb(net, skb, dnode, selector); 1742 } 1743 return 0; 1744 } 1745 1746 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) 1747 { 1748 struct sk_buff_head xmitq; 1749 struct sk_buff *txskb; 1750 struct tipc_node *n; 1751 u16 dummy; 1752 u32 dst; 1753 1754 /* Use broadcast if all nodes support it */ 1755 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { 1756 __skb_queue_head_init(&xmitq); 1757 __skb_queue_tail(&xmitq, skb); 1758 tipc_bcast_xmit(net, &xmitq, &dummy); 1759 return; 1760 } 1761 1762 /* Otherwise use legacy replicast method */ 1763 rcu_read_lock(); 1764 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1765 dst = n->addr; 1766 if (in_own_node(net, dst)) 1767 continue; 1768 if (!node_is_up(n)) 1769 continue; 1770 txskb = pskb_copy(skb, GFP_ATOMIC); 1771 if (!txskb) 1772 break; 1773 msg_set_destnode(buf_msg(txskb), dst); 1774 tipc_node_xmit_skb(net, txskb, dst, 0); 1775 } 1776 rcu_read_unlock(); 1777 kfree_skb(skb); 1778 } 1779 1780 static void tipc_node_mcast_rcv(struct tipc_node *n) 1781 { 1782 struct tipc_bclink_entry *be = &n->bc_entry; 1783 1784 /* 'arrvq' is under inputq2's lock protection */ 1785 spin_lock_bh(&be->inputq2.lock); 1786 spin_lock_bh(&be->inputq1.lock); 1787 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1788 spin_unlock_bh(&be->inputq1.lock); 1789 spin_unlock_bh(&be->inputq2.lock); 1790 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1791 } 1792 1793 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1794 int bearer_id, struct sk_buff_head *xmitq) 1795 { 1796 struct tipc_link *ucl; 1797 int rc; 1798 1799 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq); 1800 1801 if (rc & TIPC_LINK_DOWN_EVT) { 1802 tipc_node_reset_links(n); 1803 return; 1804 } 1805 1806 if (!(rc & TIPC_LINK_SND_STATE)) 1807 return; 1808 1809 /* If probe message, a STATE response will be sent anyway */ 1810 if (msg_probe(hdr)) 1811 return; 1812 1813 /* Produce a STATE message carrying broadcast NACK */ 1814 tipc_node_read_lock(n); 1815 ucl = n->links[bearer_id].link; 1816 if (ucl) 1817 tipc_link_build_state_msg(ucl, xmitq); 1818 tipc_node_read_unlock(n); 1819 } 1820 1821 /** 1822 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1823 * @net: the applicable net namespace 1824 * @skb: TIPC packet 1825 * @bearer_id: id of bearer message arrived on 1826 * 1827 * Invoked with no locks held. 1828 */ 1829 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1830 { 1831 int rc; 1832 struct sk_buff_head xmitq; 1833 struct tipc_bclink_entry *be; 1834 struct tipc_link_entry *le; 1835 struct tipc_msg *hdr = buf_msg(skb); 1836 int usr = msg_user(hdr); 1837 u32 dnode = msg_destnode(hdr); 1838 struct tipc_node *n; 1839 1840 __skb_queue_head_init(&xmitq); 1841 1842 /* If NACK for other node, let rcv link for that node peek into it */ 1843 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1844 n = tipc_node_find(net, dnode); 1845 else 1846 n = tipc_node_find(net, msg_prevnode(hdr)); 1847 if (!n) { 1848 kfree_skb(skb); 1849 return; 1850 } 1851 be = &n->bc_entry; 1852 le = &n->links[bearer_id]; 1853 1854 rc = tipc_bcast_rcv(net, be->link, skb); 1855 1856 /* Broadcast ACKs are sent on a unicast link */ 1857 if (rc & TIPC_LINK_SND_STATE) { 1858 tipc_node_read_lock(n); 1859 tipc_link_build_state_msg(le->link, &xmitq); 1860 tipc_node_read_unlock(n); 1861 } 1862 1863 if (!skb_queue_empty(&xmitq)) 1864 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1865 1866 if (!skb_queue_empty(&be->inputq1)) 1867 tipc_node_mcast_rcv(n); 1868 1869 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1870 if (!skb_queue_empty(&n->bc_entry.namedq)) 1871 tipc_named_rcv(net, &n->bc_entry.namedq, 1872 &n->bc_entry.named_rcv_nxt, 1873 &n->bc_entry.named_open); 1874 1875 /* If reassembly or retransmission failure => reset all links to peer */ 1876 if (rc & TIPC_LINK_DOWN_EVT) 1877 tipc_node_reset_links(n); 1878 1879 tipc_node_put(n); 1880 } 1881 1882 /** 1883 * tipc_node_check_state - check and if necessary update node state 1884 * @skb: TIPC packet 1885 * @bearer_id: identity of bearer delivering the packet 1886 * Returns true if state and msg are ok, otherwise false 1887 */ 1888 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1889 int bearer_id, struct sk_buff_head *xmitq) 1890 { 1891 struct tipc_msg *hdr = buf_msg(skb); 1892 int usr = msg_user(hdr); 1893 int mtyp = msg_type(hdr); 1894 u16 oseqno = msg_seqno(hdr); 1895 u16 exp_pkts = msg_msgcnt(hdr); 1896 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1897 int state = n->state; 1898 struct tipc_link *l, *tnl, *pl = NULL; 1899 struct tipc_media_addr *maddr; 1900 int pb_id; 1901 1902 if (trace_tipc_node_check_state_enabled()) { 1903 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1904 trace_tipc_node_check_state(n, true, " "); 1905 } 1906 l = n->links[bearer_id].link; 1907 if (!l) 1908 return false; 1909 rcv_nxt = tipc_link_rcv_nxt(l); 1910 1911 1912 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1913 return true; 1914 1915 /* Find parallel link, if any */ 1916 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1917 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1918 pl = n->links[pb_id].link; 1919 break; 1920 } 1921 } 1922 1923 if (!tipc_link_validate_msg(l, hdr)) { 1924 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1925 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1926 return false; 1927 } 1928 1929 /* Check and update node accesibility if applicable */ 1930 if (state == SELF_UP_PEER_COMING) { 1931 if (!tipc_link_is_up(l)) 1932 return true; 1933 if (!msg_peer_link_is_up(hdr)) 1934 return true; 1935 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1936 } 1937 1938 if (state == SELF_DOWN_PEER_LEAVING) { 1939 if (msg_peer_node_is_up(hdr)) 1940 return false; 1941 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1942 return true; 1943 } 1944 1945 if (state == SELF_LEAVING_PEER_DOWN) 1946 return false; 1947 1948 /* Ignore duplicate packets */ 1949 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1950 return true; 1951 1952 /* Initiate or update failover mode if applicable */ 1953 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1954 syncpt = oseqno + exp_pkts - 1; 1955 if (pl && !tipc_link_is_reset(pl)) { 1956 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1957 trace_tipc_node_link_down(n, true, 1958 "node link down <- failover!"); 1959 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1960 tipc_link_inputq(l)); 1961 } 1962 1963 /* If parallel link was already down, and this happened before 1964 * the tunnel link came up, node failover was never started. 1965 * Ensure that a FAILOVER_MSG is sent to get peer out of 1966 * NODE_FAILINGOVER state, also this node must accept 1967 * TUNNEL_MSGs from peer. 1968 */ 1969 if (n->state != NODE_FAILINGOVER) 1970 tipc_node_link_failover(n, pl, l, xmitq); 1971 1972 /* If pkts arrive out of order, use lowest calculated syncpt */ 1973 if (less(syncpt, n->sync_point)) 1974 n->sync_point = syncpt; 1975 } 1976 1977 /* Open parallel link when tunnel link reaches synch point */ 1978 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1979 if (!more(rcv_nxt, n->sync_point)) 1980 return true; 1981 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1982 if (pl) 1983 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1984 return true; 1985 } 1986 1987 /* No synching needed if only one link */ 1988 if (!pl || !tipc_link_is_up(pl)) 1989 return true; 1990 1991 /* Initiate synch mode if applicable */ 1992 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1993 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 1994 syncpt = msg_syncpt(hdr); 1995 else 1996 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 1997 if (!tipc_link_is_up(l)) 1998 __tipc_node_link_up(n, bearer_id, xmitq); 1999 if (n->state == SELF_UP_PEER_UP) { 2000 n->sync_point = syncpt; 2001 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 2002 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 2003 } 2004 } 2005 2006 /* Open tunnel link when parallel link reaches synch point */ 2007 if (n->state == NODE_SYNCHING) { 2008 if (tipc_link_is_synching(l)) { 2009 tnl = l; 2010 } else { 2011 tnl = pl; 2012 pl = l; 2013 } 2014 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 2015 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 2016 if (more(dlv_nxt, n->sync_point)) { 2017 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 2018 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 2019 return true; 2020 } 2021 if (l == pl) 2022 return true; 2023 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 2024 return true; 2025 if (usr == LINK_PROTOCOL) 2026 return true; 2027 return false; 2028 } 2029 return true; 2030 } 2031 2032 /** 2033 * tipc_rcv - process TIPC packets/messages arriving from off-node 2034 * @net: the applicable net namespace 2035 * @skb: TIPC packet 2036 * @b: pointer to bearer message arrived on 2037 * 2038 * Invoked with no locks held. Bearer pointer must point to a valid bearer 2039 * structure (i.e. cannot be NULL), but bearer can be inactive. 2040 */ 2041 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 2042 { 2043 struct sk_buff_head xmitq; 2044 struct tipc_link_entry *le; 2045 struct tipc_msg *hdr; 2046 struct tipc_node *n; 2047 int bearer_id = b->identity; 2048 u32 self = tipc_own_addr(net); 2049 int usr, rc = 0; 2050 u16 bc_ack; 2051 #ifdef CONFIG_TIPC_CRYPTO 2052 struct tipc_ehdr *ehdr; 2053 2054 /* Check if message must be decrypted first */ 2055 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb)) 2056 goto rcv; 2057 2058 ehdr = (struct tipc_ehdr *)skb->data; 2059 if (likely(ehdr->user != LINK_CONFIG)) { 2060 n = tipc_node_find(net, ntohl(ehdr->addr)); 2061 if (unlikely(!n)) 2062 goto discard; 2063 } else { 2064 n = tipc_node_find_by_id(net, ehdr->id); 2065 } 2066 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b); 2067 if (!skb) 2068 return; 2069 2070 rcv: 2071 #endif 2072 /* Ensure message is well-formed before touching the header */ 2073 if (unlikely(!tipc_msg_validate(&skb))) 2074 goto discard; 2075 __skb_queue_head_init(&xmitq); 2076 hdr = buf_msg(skb); 2077 usr = msg_user(hdr); 2078 bc_ack = msg_bcast_ack(hdr); 2079 2080 /* Handle arrival of discovery or broadcast packet */ 2081 if (unlikely(msg_non_seq(hdr))) { 2082 if (unlikely(usr == LINK_CONFIG)) 2083 return tipc_disc_rcv(net, skb, b); 2084 else 2085 return tipc_node_bc_rcv(net, skb, bearer_id); 2086 } 2087 2088 /* Discard unicast link messages destined for another node */ 2089 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 2090 goto discard; 2091 2092 /* Locate neighboring node that sent packet */ 2093 n = tipc_node_find(net, msg_prevnode(hdr)); 2094 if (unlikely(!n)) 2095 goto discard; 2096 le = &n->links[bearer_id]; 2097 2098 /* Ensure broadcast reception is in synch with peer's send state */ 2099 if (unlikely(usr == LINK_PROTOCOL)) { 2100 if (unlikely(skb_linearize(skb))) { 2101 tipc_node_put(n); 2102 goto discard; 2103 } 2104 hdr = buf_msg(skb); 2105 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 2106 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) { 2107 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 2108 } 2109 2110 /* Receive packet directly if conditions permit */ 2111 tipc_node_read_lock(n); 2112 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 2113 spin_lock_bh(&le->lock); 2114 if (le->link) { 2115 rc = tipc_link_rcv(le->link, skb, &xmitq); 2116 skb = NULL; 2117 } 2118 spin_unlock_bh(&le->lock); 2119 } 2120 tipc_node_read_unlock(n); 2121 2122 /* Check/update node state before receiving */ 2123 if (unlikely(skb)) { 2124 if (unlikely(skb_linearize(skb))) 2125 goto out_node_put; 2126 tipc_node_write_lock(n); 2127 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 2128 if (le->link) { 2129 rc = tipc_link_rcv(le->link, skb, &xmitq); 2130 skb = NULL; 2131 } 2132 } 2133 tipc_node_write_unlock(n); 2134 } 2135 2136 if (unlikely(rc & TIPC_LINK_UP_EVT)) 2137 tipc_node_link_up(n, bearer_id, &xmitq); 2138 2139 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 2140 tipc_node_link_down(n, bearer_id, false); 2141 2142 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 2143 tipc_named_rcv(net, &n->bc_entry.namedq, 2144 &n->bc_entry.named_rcv_nxt, 2145 &n->bc_entry.named_open); 2146 2147 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 2148 tipc_node_mcast_rcv(n); 2149 2150 if (!skb_queue_empty(&le->inputq)) 2151 tipc_sk_rcv(net, &le->inputq); 2152 2153 if (!skb_queue_empty(&xmitq)) 2154 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 2155 2156 out_node_put: 2157 tipc_node_put(n); 2158 discard: 2159 kfree_skb(skb); 2160 } 2161 2162 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2163 int prop) 2164 { 2165 struct tipc_net *tn = tipc_net(net); 2166 int bearer_id = b->identity; 2167 struct sk_buff_head xmitq; 2168 struct tipc_link_entry *e; 2169 struct tipc_node *n; 2170 2171 __skb_queue_head_init(&xmitq); 2172 2173 rcu_read_lock(); 2174 2175 list_for_each_entry_rcu(n, &tn->node_list, list) { 2176 tipc_node_write_lock(n); 2177 e = &n->links[bearer_id]; 2178 if (e->link) { 2179 if (prop == TIPC_NLA_PROP_TOL) 2180 tipc_link_set_tolerance(e->link, b->tolerance, 2181 &xmitq); 2182 else if (prop == TIPC_NLA_PROP_MTU) 2183 tipc_link_set_mtu(e->link, b->mtu); 2184 } 2185 tipc_node_write_unlock(n); 2186 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); 2187 } 2188 2189 rcu_read_unlock(); 2190 } 2191 2192 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2193 { 2194 struct net *net = sock_net(skb->sk); 2195 struct tipc_net *tn = net_generic(net, tipc_net_id); 2196 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2197 struct tipc_node *peer, *temp_node; 2198 u32 addr; 2199 int err; 2200 2201 /* We identify the peer by its net */ 2202 if (!info->attrs[TIPC_NLA_NET]) 2203 return -EINVAL; 2204 2205 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2206 info->attrs[TIPC_NLA_NET], 2207 tipc_nl_net_policy, info->extack); 2208 if (err) 2209 return err; 2210 2211 if (!attrs[TIPC_NLA_NET_ADDR]) 2212 return -EINVAL; 2213 2214 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2215 2216 if (in_own_node(net, addr)) 2217 return -ENOTSUPP; 2218 2219 spin_lock_bh(&tn->node_list_lock); 2220 peer = tipc_node_find(net, addr); 2221 if (!peer) { 2222 spin_unlock_bh(&tn->node_list_lock); 2223 return -ENXIO; 2224 } 2225 2226 tipc_node_write_lock(peer); 2227 if (peer->state != SELF_DOWN_PEER_DOWN && 2228 peer->state != SELF_DOWN_PEER_LEAVING) { 2229 tipc_node_write_unlock(peer); 2230 err = -EBUSY; 2231 goto err_out; 2232 } 2233 2234 tipc_node_clear_links(peer); 2235 tipc_node_write_unlock(peer); 2236 tipc_node_delete(peer); 2237 2238 /* Calculate cluster capabilities */ 2239 tn->capabilities = TIPC_NODE_CAPABILITIES; 2240 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 2241 tn->capabilities &= temp_node->capabilities; 2242 } 2243 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 2244 err = 0; 2245 err_out: 2246 tipc_node_put(peer); 2247 spin_unlock_bh(&tn->node_list_lock); 2248 2249 return err; 2250 } 2251 2252 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2253 { 2254 int err; 2255 struct net *net = sock_net(skb->sk); 2256 struct tipc_net *tn = net_generic(net, tipc_net_id); 2257 int done = cb->args[0]; 2258 int last_addr = cb->args[1]; 2259 struct tipc_node *node; 2260 struct tipc_nl_msg msg; 2261 2262 if (done) 2263 return 0; 2264 2265 msg.skb = skb; 2266 msg.portid = NETLINK_CB(cb->skb).portid; 2267 msg.seq = cb->nlh->nlmsg_seq; 2268 2269 rcu_read_lock(); 2270 if (last_addr) { 2271 node = tipc_node_find(net, last_addr); 2272 if (!node) { 2273 rcu_read_unlock(); 2274 /* We never set seq or call nl_dump_check_consistent() 2275 * this means that setting prev_seq here will cause the 2276 * consistence check to fail in the netlink callback 2277 * handler. Resulting in the NLMSG_DONE message having 2278 * the NLM_F_DUMP_INTR flag set if the node state 2279 * changed while we released the lock. 2280 */ 2281 cb->prev_seq = 1; 2282 return -EPIPE; 2283 } 2284 tipc_node_put(node); 2285 } 2286 2287 list_for_each_entry_rcu(node, &tn->node_list, list) { 2288 if (node->preliminary) 2289 continue; 2290 if (last_addr) { 2291 if (node->addr == last_addr) 2292 last_addr = 0; 2293 else 2294 continue; 2295 } 2296 2297 tipc_node_read_lock(node); 2298 err = __tipc_nl_add_node(&msg, node); 2299 if (err) { 2300 last_addr = node->addr; 2301 tipc_node_read_unlock(node); 2302 goto out; 2303 } 2304 2305 tipc_node_read_unlock(node); 2306 } 2307 done = 1; 2308 out: 2309 cb->args[0] = done; 2310 cb->args[1] = last_addr; 2311 rcu_read_unlock(); 2312 2313 return skb->len; 2314 } 2315 2316 /* tipc_node_find_by_name - locate owner node of link by link's name 2317 * @net: the applicable net namespace 2318 * @name: pointer to link name string 2319 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2320 * 2321 * Returns pointer to node owning the link, or 0 if no matching link is found. 2322 */ 2323 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2324 const char *link_name, 2325 unsigned int *bearer_id) 2326 { 2327 struct tipc_net *tn = net_generic(net, tipc_net_id); 2328 struct tipc_link *l; 2329 struct tipc_node *n; 2330 struct tipc_node *found_node = NULL; 2331 int i; 2332 2333 *bearer_id = 0; 2334 rcu_read_lock(); 2335 list_for_each_entry_rcu(n, &tn->node_list, list) { 2336 tipc_node_read_lock(n); 2337 for (i = 0; i < MAX_BEARERS; i++) { 2338 l = n->links[i].link; 2339 if (l && !strcmp(tipc_link_name(l), link_name)) { 2340 *bearer_id = i; 2341 found_node = n; 2342 break; 2343 } 2344 } 2345 tipc_node_read_unlock(n); 2346 if (found_node) 2347 break; 2348 } 2349 rcu_read_unlock(); 2350 2351 return found_node; 2352 } 2353 2354 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2355 { 2356 int err; 2357 int res = 0; 2358 int bearer_id; 2359 char *name; 2360 struct tipc_link *link; 2361 struct tipc_node *node; 2362 struct sk_buff_head xmitq; 2363 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2364 struct net *net = sock_net(skb->sk); 2365 2366 __skb_queue_head_init(&xmitq); 2367 2368 if (!info->attrs[TIPC_NLA_LINK]) 2369 return -EINVAL; 2370 2371 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2372 info->attrs[TIPC_NLA_LINK], 2373 tipc_nl_link_policy, info->extack); 2374 if (err) 2375 return err; 2376 2377 if (!attrs[TIPC_NLA_LINK_NAME]) 2378 return -EINVAL; 2379 2380 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2381 2382 if (strcmp(name, tipc_bclink_name) == 0) 2383 return tipc_nl_bc_link_set(net, attrs); 2384 2385 node = tipc_node_find_by_name(net, name, &bearer_id); 2386 if (!node) 2387 return -EINVAL; 2388 2389 tipc_node_read_lock(node); 2390 2391 link = node->links[bearer_id].link; 2392 if (!link) { 2393 res = -EINVAL; 2394 goto out; 2395 } 2396 2397 if (attrs[TIPC_NLA_LINK_PROP]) { 2398 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2399 2400 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); 2401 if (err) { 2402 res = err; 2403 goto out; 2404 } 2405 2406 if (props[TIPC_NLA_PROP_TOL]) { 2407 u32 tol; 2408 2409 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2410 tipc_link_set_tolerance(link, tol, &xmitq); 2411 } 2412 if (props[TIPC_NLA_PROP_PRIO]) { 2413 u32 prio; 2414 2415 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2416 tipc_link_set_prio(link, prio, &xmitq); 2417 } 2418 if (props[TIPC_NLA_PROP_WIN]) { 2419 u32 max_win; 2420 2421 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2422 tipc_link_set_queue_limits(link, 2423 tipc_link_min_win(link), 2424 max_win); 2425 } 2426 } 2427 2428 out: 2429 tipc_node_read_unlock(node); 2430 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr, 2431 NULL); 2432 return res; 2433 } 2434 2435 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2436 { 2437 struct net *net = genl_info_net(info); 2438 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2439 struct tipc_nl_msg msg; 2440 char *name; 2441 int err; 2442 2443 msg.portid = info->snd_portid; 2444 msg.seq = info->snd_seq; 2445 2446 if (!info->attrs[TIPC_NLA_LINK]) 2447 return -EINVAL; 2448 2449 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2450 info->attrs[TIPC_NLA_LINK], 2451 tipc_nl_link_policy, info->extack); 2452 if (err) 2453 return err; 2454 2455 if (!attrs[TIPC_NLA_LINK_NAME]) 2456 return -EINVAL; 2457 2458 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2459 2460 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2461 if (!msg.skb) 2462 return -ENOMEM; 2463 2464 if (strcmp(name, tipc_bclink_name) == 0) { 2465 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl); 2466 if (err) 2467 goto err_free; 2468 } else { 2469 int bearer_id; 2470 struct tipc_node *node; 2471 struct tipc_link *link; 2472 2473 node = tipc_node_find_by_name(net, name, &bearer_id); 2474 if (!node) { 2475 err = -EINVAL; 2476 goto err_free; 2477 } 2478 2479 tipc_node_read_lock(node); 2480 link = node->links[bearer_id].link; 2481 if (!link) { 2482 tipc_node_read_unlock(node); 2483 err = -EINVAL; 2484 goto err_free; 2485 } 2486 2487 err = __tipc_nl_add_link(net, &msg, link, 0); 2488 tipc_node_read_unlock(node); 2489 if (err) 2490 goto err_free; 2491 } 2492 2493 return genlmsg_reply(msg.skb, info); 2494 2495 err_free: 2496 nlmsg_free(msg.skb); 2497 return err; 2498 } 2499 2500 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2501 { 2502 int err; 2503 char *link_name; 2504 unsigned int bearer_id; 2505 struct tipc_link *link; 2506 struct tipc_node *node; 2507 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2508 struct net *net = sock_net(skb->sk); 2509 struct tipc_net *tn = tipc_net(net); 2510 struct tipc_link_entry *le; 2511 2512 if (!info->attrs[TIPC_NLA_LINK]) 2513 return -EINVAL; 2514 2515 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2516 info->attrs[TIPC_NLA_LINK], 2517 tipc_nl_link_policy, info->extack); 2518 if (err) 2519 return err; 2520 2521 if (!attrs[TIPC_NLA_LINK_NAME]) 2522 return -EINVAL; 2523 2524 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2525 2526 err = -EINVAL; 2527 if (!strcmp(link_name, tipc_bclink_name)) { 2528 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net)); 2529 if (err) 2530 return err; 2531 return 0; 2532 } else if (strstr(link_name, tipc_bclink_name)) { 2533 rcu_read_lock(); 2534 list_for_each_entry_rcu(node, &tn->node_list, list) { 2535 tipc_node_read_lock(node); 2536 link = node->bc_entry.link; 2537 if (link && !strcmp(link_name, tipc_link_name(link))) { 2538 err = tipc_bclink_reset_stats(net, link); 2539 tipc_node_read_unlock(node); 2540 break; 2541 } 2542 tipc_node_read_unlock(node); 2543 } 2544 rcu_read_unlock(); 2545 return err; 2546 } 2547 2548 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2549 if (!node) 2550 return -EINVAL; 2551 2552 le = &node->links[bearer_id]; 2553 tipc_node_read_lock(node); 2554 spin_lock_bh(&le->lock); 2555 link = node->links[bearer_id].link; 2556 if (!link) { 2557 spin_unlock_bh(&le->lock); 2558 tipc_node_read_unlock(node); 2559 return -EINVAL; 2560 } 2561 tipc_link_reset_stats(link); 2562 spin_unlock_bh(&le->lock); 2563 tipc_node_read_unlock(node); 2564 return 0; 2565 } 2566 2567 /* Caller should hold node lock */ 2568 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2569 struct tipc_node *node, u32 *prev_link, 2570 bool bc_link) 2571 { 2572 u32 i; 2573 int err; 2574 2575 for (i = *prev_link; i < MAX_BEARERS; i++) { 2576 *prev_link = i; 2577 2578 if (!node->links[i].link) 2579 continue; 2580 2581 err = __tipc_nl_add_link(net, msg, 2582 node->links[i].link, NLM_F_MULTI); 2583 if (err) 2584 return err; 2585 } 2586 2587 if (bc_link) { 2588 *prev_link = i; 2589 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link); 2590 if (err) 2591 return err; 2592 } 2593 2594 *prev_link = 0; 2595 2596 return 0; 2597 } 2598 2599 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2600 { 2601 struct net *net = sock_net(skb->sk); 2602 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2603 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 2604 struct tipc_net *tn = net_generic(net, tipc_net_id); 2605 struct tipc_node *node; 2606 struct tipc_nl_msg msg; 2607 u32 prev_node = cb->args[0]; 2608 u32 prev_link = cb->args[1]; 2609 int done = cb->args[2]; 2610 bool bc_link = cb->args[3]; 2611 int err; 2612 2613 if (done) 2614 return 0; 2615 2616 if (!prev_node) { 2617 /* Check if broadcast-receiver links dumping is needed */ 2618 if (attrs && attrs[TIPC_NLA_LINK]) { 2619 err = nla_parse_nested_deprecated(link, 2620 TIPC_NLA_LINK_MAX, 2621 attrs[TIPC_NLA_LINK], 2622 tipc_nl_link_policy, 2623 NULL); 2624 if (unlikely(err)) 2625 return err; 2626 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST])) 2627 return -EINVAL; 2628 bc_link = true; 2629 } 2630 } 2631 2632 msg.skb = skb; 2633 msg.portid = NETLINK_CB(cb->skb).portid; 2634 msg.seq = cb->nlh->nlmsg_seq; 2635 2636 rcu_read_lock(); 2637 if (prev_node) { 2638 node = tipc_node_find(net, prev_node); 2639 if (!node) { 2640 /* We never set seq or call nl_dump_check_consistent() 2641 * this means that setting prev_seq here will cause the 2642 * consistence check to fail in the netlink callback 2643 * handler. Resulting in the last NLMSG_DONE message 2644 * having the NLM_F_DUMP_INTR flag set. 2645 */ 2646 cb->prev_seq = 1; 2647 goto out; 2648 } 2649 tipc_node_put(node); 2650 2651 list_for_each_entry_continue_rcu(node, &tn->node_list, 2652 list) { 2653 tipc_node_read_lock(node); 2654 err = __tipc_nl_add_node_links(net, &msg, node, 2655 &prev_link, bc_link); 2656 tipc_node_read_unlock(node); 2657 if (err) 2658 goto out; 2659 2660 prev_node = node->addr; 2661 } 2662 } else { 2663 err = tipc_nl_add_bc_link(net, &msg, tn->bcl); 2664 if (err) 2665 goto out; 2666 2667 list_for_each_entry_rcu(node, &tn->node_list, list) { 2668 tipc_node_read_lock(node); 2669 err = __tipc_nl_add_node_links(net, &msg, node, 2670 &prev_link, bc_link); 2671 tipc_node_read_unlock(node); 2672 if (err) 2673 goto out; 2674 2675 prev_node = node->addr; 2676 } 2677 } 2678 done = 1; 2679 out: 2680 rcu_read_unlock(); 2681 2682 cb->args[0] = prev_node; 2683 cb->args[1] = prev_link; 2684 cb->args[2] = done; 2685 cb->args[3] = bc_link; 2686 2687 return skb->len; 2688 } 2689 2690 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2691 { 2692 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2693 struct net *net = sock_net(skb->sk); 2694 int err; 2695 2696 if (!info->attrs[TIPC_NLA_MON]) 2697 return -EINVAL; 2698 2699 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2700 info->attrs[TIPC_NLA_MON], 2701 tipc_nl_monitor_policy, 2702 info->extack); 2703 if (err) 2704 return err; 2705 2706 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2707 u32 val; 2708 2709 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2710 err = tipc_nl_monitor_set_threshold(net, val); 2711 if (err) 2712 return err; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2719 { 2720 struct nlattr *attrs; 2721 void *hdr; 2722 u32 val; 2723 2724 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2725 0, TIPC_NL_MON_GET); 2726 if (!hdr) 2727 return -EMSGSIZE; 2728 2729 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2730 if (!attrs) 2731 goto msg_full; 2732 2733 val = tipc_nl_monitor_get_threshold(net); 2734 2735 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2736 goto attr_msg_full; 2737 2738 nla_nest_end(msg->skb, attrs); 2739 genlmsg_end(msg->skb, hdr); 2740 2741 return 0; 2742 2743 attr_msg_full: 2744 nla_nest_cancel(msg->skb, attrs); 2745 msg_full: 2746 genlmsg_cancel(msg->skb, hdr); 2747 2748 return -EMSGSIZE; 2749 } 2750 2751 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2752 { 2753 struct net *net = sock_net(skb->sk); 2754 struct tipc_nl_msg msg; 2755 int err; 2756 2757 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2758 if (!msg.skb) 2759 return -ENOMEM; 2760 msg.portid = info->snd_portid; 2761 msg.seq = info->snd_seq; 2762 2763 err = __tipc_nl_add_monitor_prop(net, &msg); 2764 if (err) { 2765 nlmsg_free(msg.skb); 2766 return err; 2767 } 2768 2769 return genlmsg_reply(msg.skb, info); 2770 } 2771 2772 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2773 { 2774 struct net *net = sock_net(skb->sk); 2775 u32 prev_bearer = cb->args[0]; 2776 struct tipc_nl_msg msg; 2777 int bearer_id; 2778 int err; 2779 2780 if (prev_bearer == MAX_BEARERS) 2781 return 0; 2782 2783 msg.skb = skb; 2784 msg.portid = NETLINK_CB(cb->skb).portid; 2785 msg.seq = cb->nlh->nlmsg_seq; 2786 2787 rtnl_lock(); 2788 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2789 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2790 if (err) 2791 break; 2792 } 2793 rtnl_unlock(); 2794 cb->args[0] = bearer_id; 2795 2796 return skb->len; 2797 } 2798 2799 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2800 struct netlink_callback *cb) 2801 { 2802 struct net *net = sock_net(skb->sk); 2803 u32 prev_node = cb->args[1]; 2804 u32 bearer_id = cb->args[2]; 2805 int done = cb->args[0]; 2806 struct tipc_nl_msg msg; 2807 int err; 2808 2809 if (!prev_node) { 2810 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2811 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2812 2813 if (!attrs[TIPC_NLA_MON]) 2814 return -EINVAL; 2815 2816 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2817 attrs[TIPC_NLA_MON], 2818 tipc_nl_monitor_policy, 2819 NULL); 2820 if (err) 2821 return err; 2822 2823 if (!mon[TIPC_NLA_MON_REF]) 2824 return -EINVAL; 2825 2826 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2827 2828 if (bearer_id >= MAX_BEARERS) 2829 return -EINVAL; 2830 } 2831 2832 if (done) 2833 return 0; 2834 2835 msg.skb = skb; 2836 msg.portid = NETLINK_CB(cb->skb).portid; 2837 msg.seq = cb->nlh->nlmsg_seq; 2838 2839 rtnl_lock(); 2840 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2841 if (!err) 2842 done = 1; 2843 2844 rtnl_unlock(); 2845 cb->args[0] = done; 2846 cb->args[1] = prev_node; 2847 cb->args[2] = bearer_id; 2848 2849 return skb->len; 2850 } 2851 2852 #ifdef CONFIG_TIPC_CRYPTO 2853 static int tipc_nl_retrieve_key(struct nlattr **attrs, 2854 struct tipc_aead_key **key) 2855 { 2856 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; 2857 2858 if (!attr) 2859 return -ENODATA; 2860 2861 *key = (struct tipc_aead_key *)nla_data(attr); 2862 if (nla_len(attr) < tipc_aead_key_size(*key)) 2863 return -EINVAL; 2864 2865 return 0; 2866 } 2867 2868 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id) 2869 { 2870 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID]; 2871 2872 if (!attr) 2873 return -ENODATA; 2874 2875 if (nla_len(attr) < TIPC_NODEID_LEN) 2876 return -EINVAL; 2877 2878 *node_id = (u8 *)nla_data(attr); 2879 return 0; 2880 } 2881 2882 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv) 2883 { 2884 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING]; 2885 2886 if (!attr) 2887 return -ENODATA; 2888 2889 *intv = nla_get_u32(attr); 2890 return 0; 2891 } 2892 2893 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2894 { 2895 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1]; 2896 struct net *net = sock_net(skb->sk); 2897 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx; 2898 struct tipc_node *n = NULL; 2899 struct tipc_aead_key *ukey; 2900 bool rekeying = true, master_key = false; 2901 u8 *id, *own_id, mode; 2902 u32 intv = 0; 2903 int rc = 0; 2904 2905 if (!info->attrs[TIPC_NLA_NODE]) 2906 return -EINVAL; 2907 2908 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX, 2909 info->attrs[TIPC_NLA_NODE], 2910 tipc_nl_node_policy, info->extack); 2911 if (rc) 2912 return rc; 2913 2914 own_id = tipc_own_id(net); 2915 if (!own_id) { 2916 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)"); 2917 return -EPERM; 2918 } 2919 2920 rc = tipc_nl_retrieve_rekeying(attrs, &intv); 2921 if (rc == -ENODATA) 2922 rekeying = false; 2923 2924 rc = tipc_nl_retrieve_key(attrs, &ukey); 2925 if (rc == -ENODATA && rekeying) 2926 goto rekeying; 2927 else if (rc) 2928 return rc; 2929 2930 rc = tipc_aead_key_validate(ukey, info); 2931 if (rc) 2932 return rc; 2933 2934 rc = tipc_nl_retrieve_nodeid(attrs, &id); 2935 switch (rc) { 2936 case -ENODATA: 2937 mode = CLUSTER_KEY; 2938 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]); 2939 break; 2940 case 0: 2941 mode = PER_NODE_KEY; 2942 if (memcmp(id, own_id, NODE_ID_LEN)) { 2943 n = tipc_node_find_by_id(net, id) ?: 2944 tipc_node_create(net, 0, id, 0xffffu, 0, true); 2945 if (unlikely(!n)) 2946 return -ENOMEM; 2947 c = n->crypto_rx; 2948 } 2949 break; 2950 default: 2951 return rc; 2952 } 2953 2954 /* Initiate the TX/RX key */ 2955 rc = tipc_crypto_key_init(c, ukey, mode, master_key); 2956 if (n) 2957 tipc_node_put(n); 2958 2959 if (unlikely(rc < 0)) { 2960 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key"); 2961 return rc; 2962 } else if (c == tx) { 2963 /* Distribute TX key but not master one */ 2964 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL)) 2965 GENL_SET_ERR_MSG(info, "failed to replicate new key"); 2966 rekeying: 2967 /* Schedule TX rekeying if needed */ 2968 tipc_crypto_rekeying_sched(tx, rekeying, intv); 2969 } 2970 2971 return 0; 2972 } 2973 2974 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2975 { 2976 int err; 2977 2978 rtnl_lock(); 2979 err = __tipc_nl_node_set_key(skb, info); 2980 rtnl_unlock(); 2981 2982 return err; 2983 } 2984 2985 static int __tipc_nl_node_flush_key(struct sk_buff *skb, 2986 struct genl_info *info) 2987 { 2988 struct net *net = sock_net(skb->sk); 2989 struct tipc_net *tn = tipc_net(net); 2990 struct tipc_node *n; 2991 2992 tipc_crypto_key_flush(tn->crypto_tx); 2993 rcu_read_lock(); 2994 list_for_each_entry_rcu(n, &tn->node_list, list) 2995 tipc_crypto_key_flush(n->crypto_rx); 2996 rcu_read_unlock(); 2997 2998 return 0; 2999 } 3000 3001 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info) 3002 { 3003 int err; 3004 3005 rtnl_lock(); 3006 err = __tipc_nl_node_flush_key(skb, info); 3007 rtnl_unlock(); 3008 3009 return err; 3010 } 3011 #endif 3012 3013 /** 3014 * tipc_node_dump - dump TIPC node data 3015 * @n: tipc node to be dumped 3016 * @more: dump more? 3017 * - false: dump only tipc node data 3018 * - true: dump node link data as well 3019 * @buf: returned buffer of dump data in format 3020 */ 3021 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 3022 { 3023 int i = 0; 3024 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 3025 3026 if (!n) { 3027 i += scnprintf(buf, sz, "node data: (null)\n"); 3028 return i; 3029 } 3030 3031 i += scnprintf(buf, sz, "node data: %x", n->addr); 3032 i += scnprintf(buf + i, sz - i, " %x", n->state); 3033 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 3034 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 3035 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 3036 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 3037 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 3038 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 3039 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 3040 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 3041 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 3042 3043 if (!more) 3044 return i; 3045 3046 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 3047 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 3048 i += scnprintf(buf + i, sz - i, " media: "); 3049 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 3050 i += scnprintf(buf + i, sz - i, "\n"); 3051 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 3052 i += scnprintf(buf + i, sz - i, " inputq: "); 3053 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 3054 3055 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 3056 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 3057 i += scnprintf(buf + i, sz - i, " media: "); 3058 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 3059 i += scnprintf(buf + i, sz - i, "\n"); 3060 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 3061 i += scnprintf(buf + i, sz - i, " inputq: "); 3062 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 3063 3064 i += scnprintf(buf + i, sz - i, "bclink:\n "); 3065 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 3066 3067 return i; 3068 } 3069 3070 void tipc_node_pre_cleanup_net(struct net *exit_net) 3071 { 3072 struct tipc_node *n; 3073 struct tipc_net *tn; 3074 struct net *tmp; 3075 3076 rcu_read_lock(); 3077 for_each_net_rcu(tmp) { 3078 if (tmp == exit_net) 3079 continue; 3080 tn = tipc_net(tmp); 3081 if (!tn) 3082 continue; 3083 spin_lock_bh(&tn->node_list_lock); 3084 list_for_each_entry_rcu(n, &tn->node_list, list) { 3085 if (!n->peer_net) 3086 continue; 3087 if (n->peer_net != exit_net) 3088 continue; 3089 tipc_node_write_lock(n); 3090 n->peer_net = NULL; 3091 n->peer_hash_mix = 0; 3092 tipc_node_write_unlock_fast(n); 3093 break; 3094 } 3095 spin_unlock_bh(&tn->node_list_lock); 3096 } 3097 rcu_read_unlock(); 3098 } 3099