1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 #include "crypto.h" 48 49 #define INVALID_NODE_SIG 0x10000 50 #define NODE_CLEANUP_AFTER 300000 51 52 /* Flags used to take different actions according to flag type 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down 54 * TIPC_NOTIFY_NODE_UP: notify node is up 55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 56 */ 57 enum { 58 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 59 TIPC_NOTIFY_NODE_UP = (1 << 4), 60 TIPC_NOTIFY_LINK_UP = (1 << 6), 61 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 62 }; 63 64 struct tipc_link_entry { 65 struct tipc_link *link; 66 spinlock_t lock; /* per link */ 67 u32 mtu; 68 struct sk_buff_head inputq; 69 struct tipc_media_addr maddr; 70 }; 71 72 struct tipc_bclink_entry { 73 struct tipc_link *link; 74 struct sk_buff_head inputq1; 75 struct sk_buff_head arrvq; 76 struct sk_buff_head inputq2; 77 struct sk_buff_head namedq; 78 u16 named_rcv_nxt; 79 bool named_open; 80 }; 81 82 /** 83 * struct tipc_node - TIPC node structure 84 * @addr: network address of node 85 * @ref: reference counter to node object 86 * @lock: rwlock governing access to structure 87 * @net: the applicable net namespace 88 * @hash: links to adjacent nodes in unsorted hash chain 89 * @inputq: pointer to input queue containing messages for msg event 90 * @namedq: pointer to name table input queue with name table messages 91 * @active_links: bearer ids of active links, used as index into links[] array 92 * @links: array containing references to all links to node 93 * @action_flags: bit mask of different types of node actions 94 * @state: connectivity state vs peer node 95 * @preliminary: a preliminary node or not 96 * @sync_point: sequence number where synch/failover is finished 97 * @list: links to adjacent nodes in sorted list of cluster's nodes 98 * @working_links: number of working links to node (both active and standby) 99 * @link_cnt: number of links to node 100 * @capabilities: bitmap, indicating peer node's functional capabilities 101 * @signature: node instance identifier 102 * @link_id: local and remote bearer ids of changing link, if any 103 * @publ_list: list of publications 104 * @rcu: rcu struct for tipc_node 105 * @delete_at: indicates the time for deleting a down node 106 * @crypto_rx: RX crypto handler 107 */ 108 struct tipc_node { 109 u32 addr; 110 struct kref kref; 111 rwlock_t lock; 112 struct net *net; 113 struct hlist_node hash; 114 int active_links[2]; 115 struct tipc_link_entry links[MAX_BEARERS]; 116 struct tipc_bclink_entry bc_entry; 117 int action_flags; 118 struct list_head list; 119 int state; 120 bool preliminary; 121 bool failover_sent; 122 u16 sync_point; 123 int link_cnt; 124 u16 working_links; 125 u16 capabilities; 126 u32 signature; 127 u32 link_id; 128 u8 peer_id[16]; 129 char peer_id_string[NODE_ID_STR_LEN]; 130 struct list_head publ_list; 131 struct list_head conn_sks; 132 unsigned long keepalive_intv; 133 struct timer_list timer; 134 struct rcu_head rcu; 135 unsigned long delete_at; 136 struct net *peer_net; 137 u32 peer_hash_mix; 138 #ifdef CONFIG_TIPC_CRYPTO 139 struct tipc_crypto *crypto_rx; 140 #endif 141 }; 142 143 /* Node FSM states and events: 144 */ 145 enum { 146 SELF_DOWN_PEER_DOWN = 0xdd, 147 SELF_UP_PEER_UP = 0xaa, 148 SELF_DOWN_PEER_LEAVING = 0xd1, 149 SELF_UP_PEER_COMING = 0xac, 150 SELF_COMING_PEER_UP = 0xca, 151 SELF_LEAVING_PEER_DOWN = 0x1d, 152 NODE_FAILINGOVER = 0xf0, 153 NODE_SYNCHING = 0xcc 154 }; 155 156 enum { 157 SELF_ESTABL_CONTACT_EVT = 0xece, 158 SELF_LOST_CONTACT_EVT = 0x1ce, 159 PEER_ESTABL_CONTACT_EVT = 0x9ece, 160 PEER_LOST_CONTACT_EVT = 0x91ce, 161 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 162 NODE_FAILOVER_END_EVT = 0xfee, 163 NODE_SYNCH_BEGIN_EVT = 0xcbe, 164 NODE_SYNCH_END_EVT = 0xcee 165 }; 166 167 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 168 struct sk_buff_head *xmitq, 169 struct tipc_media_addr **maddr); 170 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 171 bool delete); 172 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 173 static void tipc_node_delete(struct tipc_node *node); 174 static void tipc_node_timeout(struct timer_list *t); 175 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 176 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 177 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 178 static bool node_is_up(struct tipc_node *n); 179 static void tipc_node_delete_from_list(struct tipc_node *node); 180 181 struct tipc_sock_conn { 182 u32 port; 183 u32 peer_port; 184 u32 peer_node; 185 struct list_head list; 186 }; 187 188 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 189 { 190 int bearer_id = n->active_links[sel & 1]; 191 192 if (unlikely(bearer_id == INVALID_BEARER_ID)) 193 return NULL; 194 195 return n->links[bearer_id].link; 196 } 197 198 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 199 { 200 struct tipc_node *n; 201 int bearer_id; 202 unsigned int mtu = MAX_MSG_SIZE; 203 204 n = tipc_node_find(net, addr); 205 if (unlikely(!n)) 206 return mtu; 207 208 /* Allow MAX_MSG_SIZE when building connection oriented message 209 * if they are in the same core network 210 */ 211 if (n->peer_net && connected) { 212 tipc_node_put(n); 213 return mtu; 214 } 215 216 bearer_id = n->active_links[sel & 1]; 217 if (likely(bearer_id != INVALID_BEARER_ID)) 218 mtu = n->links[bearer_id].mtu; 219 tipc_node_put(n); 220 return mtu; 221 } 222 223 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 224 { 225 u8 *own_id = tipc_own_id(net); 226 struct tipc_node *n; 227 228 if (!own_id) 229 return true; 230 231 if (addr == tipc_own_addr(net)) { 232 memcpy(id, own_id, TIPC_NODEID_LEN); 233 return true; 234 } 235 n = tipc_node_find(net, addr); 236 if (!n) 237 return false; 238 239 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 240 tipc_node_put(n); 241 return true; 242 } 243 244 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 245 { 246 struct tipc_node *n; 247 u16 caps; 248 249 n = tipc_node_find(net, addr); 250 if (unlikely(!n)) 251 return TIPC_NODE_CAPABILITIES; 252 caps = n->capabilities; 253 tipc_node_put(n); 254 return caps; 255 } 256 257 u32 tipc_node_get_addr(struct tipc_node *node) 258 { 259 return (node) ? node->addr : 0; 260 } 261 262 char *tipc_node_get_id_str(struct tipc_node *node) 263 { 264 return node->peer_id_string; 265 } 266 267 #ifdef CONFIG_TIPC_CRYPTO 268 /** 269 * tipc_node_crypto_rx - Retrieve crypto RX handle from node 270 * Note: node ref counter must be held first! 271 */ 272 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n) 273 { 274 return (__n) ? __n->crypto_rx : NULL; 275 } 276 277 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos) 278 { 279 return container_of(pos, struct tipc_node, list)->crypto_rx; 280 } 281 #endif 282 283 static void tipc_node_free(struct rcu_head *rp) 284 { 285 struct tipc_node *n = container_of(rp, struct tipc_node, rcu); 286 287 #ifdef CONFIG_TIPC_CRYPTO 288 tipc_crypto_stop(&n->crypto_rx); 289 #endif 290 kfree(n); 291 } 292 293 static void tipc_node_kref_release(struct kref *kref) 294 { 295 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 296 297 kfree(n->bc_entry.link); 298 call_rcu(&n->rcu, tipc_node_free); 299 } 300 301 void tipc_node_put(struct tipc_node *node) 302 { 303 kref_put(&node->kref, tipc_node_kref_release); 304 } 305 306 static void tipc_node_get(struct tipc_node *node) 307 { 308 kref_get(&node->kref); 309 } 310 311 /* 312 * tipc_node_find - locate specified node object, if it exists 313 */ 314 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 315 { 316 struct tipc_net *tn = tipc_net(net); 317 struct tipc_node *node; 318 unsigned int thash = tipc_hashfn(addr); 319 320 rcu_read_lock(); 321 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 322 if (node->addr != addr || node->preliminary) 323 continue; 324 if (!kref_get_unless_zero(&node->kref)) 325 node = NULL; 326 break; 327 } 328 rcu_read_unlock(); 329 return node; 330 } 331 332 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 333 * Note: this function is called only when a discovery request failed 334 * to find the node by its 32-bit id, and is not time critical 335 */ 336 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 337 { 338 struct tipc_net *tn = tipc_net(net); 339 struct tipc_node *n; 340 bool found = false; 341 342 rcu_read_lock(); 343 list_for_each_entry_rcu(n, &tn->node_list, list) { 344 read_lock_bh(&n->lock); 345 if (!memcmp(id, n->peer_id, 16) && 346 kref_get_unless_zero(&n->kref)) 347 found = true; 348 read_unlock_bh(&n->lock); 349 if (found) 350 break; 351 } 352 rcu_read_unlock(); 353 return found ? n : NULL; 354 } 355 356 static void tipc_node_read_lock(struct tipc_node *n) 357 { 358 read_lock_bh(&n->lock); 359 } 360 361 static void tipc_node_read_unlock(struct tipc_node *n) 362 { 363 read_unlock_bh(&n->lock); 364 } 365 366 static void tipc_node_write_lock(struct tipc_node *n) 367 { 368 write_lock_bh(&n->lock); 369 } 370 371 static void tipc_node_write_unlock_fast(struct tipc_node *n) 372 { 373 write_unlock_bh(&n->lock); 374 } 375 376 static void tipc_node_write_unlock(struct tipc_node *n) 377 { 378 struct net *net = n->net; 379 u32 addr = 0; 380 u32 flags = n->action_flags; 381 u32 link_id = 0; 382 u32 bearer_id; 383 struct list_head *publ_list; 384 385 if (likely(!flags)) { 386 write_unlock_bh(&n->lock); 387 return; 388 } 389 390 addr = n->addr; 391 link_id = n->link_id; 392 bearer_id = link_id & 0xffff; 393 publ_list = &n->publ_list; 394 395 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 396 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 397 398 write_unlock_bh(&n->lock); 399 400 if (flags & TIPC_NOTIFY_NODE_DOWN) 401 tipc_publ_notify(net, publ_list, addr, n->capabilities); 402 403 if (flags & TIPC_NOTIFY_NODE_UP) 404 tipc_named_node_up(net, addr, n->capabilities); 405 406 if (flags & TIPC_NOTIFY_LINK_UP) { 407 tipc_mon_peer_up(net, addr, bearer_id); 408 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 409 TIPC_NODE_SCOPE, link_id, link_id); 410 } 411 if (flags & TIPC_NOTIFY_LINK_DOWN) { 412 tipc_mon_peer_down(net, addr, bearer_id); 413 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 414 addr, link_id); 415 } 416 } 417 418 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 419 { 420 int net_id = tipc_netid(n->net); 421 struct tipc_net *tn_peer; 422 struct net *tmp; 423 u32 hash_chk; 424 425 if (n->peer_net) 426 return; 427 428 for_each_net_rcu(tmp) { 429 tn_peer = tipc_net(tmp); 430 if (!tn_peer) 431 continue; 432 /* Integrity checking whether node exists in namespace or not */ 433 if (tn_peer->net_id != net_id) 434 continue; 435 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 436 continue; 437 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 438 if (hash_mixes ^ hash_chk) 439 continue; 440 n->peer_net = tmp; 441 n->peer_hash_mix = hash_mixes; 442 break; 443 } 444 } 445 446 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, 447 u16 capabilities, u32 hash_mixes, 448 bool preliminary) 449 { 450 struct tipc_net *tn = net_generic(net, tipc_net_id); 451 struct tipc_node *n, *temp_node; 452 struct tipc_link *l; 453 unsigned long intv; 454 int bearer_id; 455 int i; 456 457 spin_lock_bh(&tn->node_list_lock); 458 n = tipc_node_find(net, addr) ?: 459 tipc_node_find_by_id(net, peer_id); 460 if (n) { 461 if (!n->preliminary) 462 goto update; 463 if (preliminary) 464 goto exit; 465 /* A preliminary node becomes "real" now, refresh its data */ 466 tipc_node_write_lock(n); 467 n->preliminary = false; 468 n->addr = addr; 469 hlist_del_rcu(&n->hash); 470 hlist_add_head_rcu(&n->hash, 471 &tn->node_htable[tipc_hashfn(addr)]); 472 list_del_rcu(&n->list); 473 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 474 if (n->addr < temp_node->addr) 475 break; 476 } 477 list_add_tail_rcu(&n->list, &temp_node->list); 478 tipc_node_write_unlock_fast(n); 479 480 update: 481 if (n->peer_hash_mix ^ hash_mixes) 482 tipc_node_assign_peer_net(n, hash_mixes); 483 if (n->capabilities == capabilities) 484 goto exit; 485 /* Same node may come back with new capabilities */ 486 tipc_node_write_lock(n); 487 n->capabilities = capabilities; 488 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 489 l = n->links[bearer_id].link; 490 if (l) 491 tipc_link_update_caps(l, capabilities); 492 } 493 tipc_node_write_unlock_fast(n); 494 495 /* Calculate cluster capabilities */ 496 tn->capabilities = TIPC_NODE_CAPABILITIES; 497 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 498 tn->capabilities &= temp_node->capabilities; 499 } 500 501 tipc_bcast_toggle_rcast(net, 502 (tn->capabilities & TIPC_BCAST_RCAST)); 503 504 goto exit; 505 } 506 n = kzalloc(sizeof(*n), GFP_ATOMIC); 507 if (!n) { 508 pr_warn("Node creation failed, no memory\n"); 509 goto exit; 510 } 511 tipc_nodeid2string(n->peer_id_string, peer_id); 512 #ifdef CONFIG_TIPC_CRYPTO 513 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) { 514 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string); 515 kfree(n); 516 n = NULL; 517 goto exit; 518 } 519 #endif 520 n->addr = addr; 521 n->preliminary = preliminary; 522 memcpy(&n->peer_id, peer_id, 16); 523 n->net = net; 524 n->peer_net = NULL; 525 n->peer_hash_mix = 0; 526 /* Assign kernel local namespace if exists */ 527 tipc_node_assign_peer_net(n, hash_mixes); 528 n->capabilities = capabilities; 529 kref_init(&n->kref); 530 rwlock_init(&n->lock); 531 INIT_HLIST_NODE(&n->hash); 532 INIT_LIST_HEAD(&n->list); 533 INIT_LIST_HEAD(&n->publ_list); 534 INIT_LIST_HEAD(&n->conn_sks); 535 skb_queue_head_init(&n->bc_entry.namedq); 536 skb_queue_head_init(&n->bc_entry.inputq1); 537 __skb_queue_head_init(&n->bc_entry.arrvq); 538 skb_queue_head_init(&n->bc_entry.inputq2); 539 for (i = 0; i < MAX_BEARERS; i++) 540 spin_lock_init(&n->links[i].lock); 541 n->state = SELF_DOWN_PEER_LEAVING; 542 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 543 n->signature = INVALID_NODE_SIG; 544 n->active_links[0] = INVALID_BEARER_ID; 545 n->active_links[1] = INVALID_BEARER_ID; 546 n->bc_entry.link = NULL; 547 tipc_node_get(n); 548 timer_setup(&n->timer, tipc_node_timeout, 0); 549 /* Start a slow timer anyway, crypto needs it */ 550 n->keepalive_intv = 10000; 551 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 552 if (!mod_timer(&n->timer, intv)) 553 tipc_node_get(n); 554 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 555 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 556 if (n->addr < temp_node->addr) 557 break; 558 } 559 list_add_tail_rcu(&n->list, &temp_node->list); 560 /* Calculate cluster capabilities */ 561 tn->capabilities = TIPC_NODE_CAPABILITIES; 562 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 563 tn->capabilities &= temp_node->capabilities; 564 } 565 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 566 trace_tipc_node_create(n, true, " "); 567 exit: 568 spin_unlock_bh(&tn->node_list_lock); 569 return n; 570 } 571 572 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 573 { 574 unsigned long tol = tipc_link_tolerance(l); 575 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 576 577 /* Link with lowest tolerance determines timer interval */ 578 if (intv < n->keepalive_intv) 579 n->keepalive_intv = intv; 580 581 /* Ensure link's abort limit corresponds to current tolerance */ 582 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 583 } 584 585 static void tipc_node_delete_from_list(struct tipc_node *node) 586 { 587 list_del_rcu(&node->list); 588 hlist_del_rcu(&node->hash); 589 tipc_node_put(node); 590 } 591 592 static void tipc_node_delete(struct tipc_node *node) 593 { 594 trace_tipc_node_delete(node, true, " "); 595 tipc_node_delete_from_list(node); 596 597 del_timer_sync(&node->timer); 598 tipc_node_put(node); 599 } 600 601 void tipc_node_stop(struct net *net) 602 { 603 struct tipc_net *tn = tipc_net(net); 604 struct tipc_node *node, *t_node; 605 606 spin_lock_bh(&tn->node_list_lock); 607 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 608 tipc_node_delete(node); 609 spin_unlock_bh(&tn->node_list_lock); 610 } 611 612 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 613 { 614 struct tipc_node *n; 615 616 if (in_own_node(net, addr)) 617 return; 618 619 n = tipc_node_find(net, addr); 620 if (!n) { 621 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 622 return; 623 } 624 tipc_node_write_lock(n); 625 list_add_tail(subscr, &n->publ_list); 626 tipc_node_write_unlock_fast(n); 627 tipc_node_put(n); 628 } 629 630 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 631 { 632 struct tipc_node *n; 633 634 if (in_own_node(net, addr)) 635 return; 636 637 n = tipc_node_find(net, addr); 638 if (!n) { 639 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 640 return; 641 } 642 tipc_node_write_lock(n); 643 list_del_init(subscr); 644 tipc_node_write_unlock_fast(n); 645 tipc_node_put(n); 646 } 647 648 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 649 { 650 struct tipc_node *node; 651 struct tipc_sock_conn *conn; 652 int err = 0; 653 654 if (in_own_node(net, dnode)) 655 return 0; 656 657 node = tipc_node_find(net, dnode); 658 if (!node) { 659 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 660 return -EHOSTUNREACH; 661 } 662 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 663 if (!conn) { 664 err = -EHOSTUNREACH; 665 goto exit; 666 } 667 conn->peer_node = dnode; 668 conn->port = port; 669 conn->peer_port = peer_port; 670 671 tipc_node_write_lock(node); 672 list_add_tail(&conn->list, &node->conn_sks); 673 tipc_node_write_unlock(node); 674 exit: 675 tipc_node_put(node); 676 return err; 677 } 678 679 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 680 { 681 struct tipc_node *node; 682 struct tipc_sock_conn *conn, *safe; 683 684 if (in_own_node(net, dnode)) 685 return; 686 687 node = tipc_node_find(net, dnode); 688 if (!node) 689 return; 690 691 tipc_node_write_lock(node); 692 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 693 if (port != conn->port) 694 continue; 695 list_del(&conn->list); 696 kfree(conn); 697 } 698 tipc_node_write_unlock(node); 699 tipc_node_put(node); 700 } 701 702 static void tipc_node_clear_links(struct tipc_node *node) 703 { 704 int i; 705 706 for (i = 0; i < MAX_BEARERS; i++) { 707 struct tipc_link_entry *le = &node->links[i]; 708 709 if (le->link) { 710 kfree(le->link); 711 le->link = NULL; 712 node->link_cnt--; 713 } 714 } 715 } 716 717 /* tipc_node_cleanup - delete nodes that does not 718 * have active links for NODE_CLEANUP_AFTER time 719 */ 720 static bool tipc_node_cleanup(struct tipc_node *peer) 721 { 722 struct tipc_node *temp_node; 723 struct tipc_net *tn = tipc_net(peer->net); 724 bool deleted = false; 725 726 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 727 if (!spin_trylock_bh(&tn->node_list_lock)) 728 return false; 729 730 tipc_node_write_lock(peer); 731 732 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 733 tipc_node_clear_links(peer); 734 tipc_node_delete_from_list(peer); 735 deleted = true; 736 } 737 tipc_node_write_unlock(peer); 738 739 if (!deleted) { 740 spin_unlock_bh(&tn->node_list_lock); 741 return deleted; 742 } 743 744 /* Calculate cluster capabilities */ 745 tn->capabilities = TIPC_NODE_CAPABILITIES; 746 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 747 tn->capabilities &= temp_node->capabilities; 748 } 749 tipc_bcast_toggle_rcast(peer->net, 750 (tn->capabilities & TIPC_BCAST_RCAST)); 751 spin_unlock_bh(&tn->node_list_lock); 752 return deleted; 753 } 754 755 /* tipc_node_timeout - handle expiration of node timer 756 */ 757 static void tipc_node_timeout(struct timer_list *t) 758 { 759 struct tipc_node *n = from_timer(n, t, timer); 760 struct tipc_link_entry *le; 761 struct sk_buff_head xmitq; 762 int remains = n->link_cnt; 763 int bearer_id; 764 int rc = 0; 765 766 trace_tipc_node_timeout(n, false, " "); 767 if (!node_is_up(n) && tipc_node_cleanup(n)) { 768 /*Removing the reference of Timer*/ 769 tipc_node_put(n); 770 return; 771 } 772 773 #ifdef CONFIG_TIPC_CRYPTO 774 /* Take any crypto key related actions first */ 775 tipc_crypto_timeout(n->crypto_rx); 776 #endif 777 __skb_queue_head_init(&xmitq); 778 779 /* Initial node interval to value larger (10 seconds), then it will be 780 * recalculated with link lowest tolerance 781 */ 782 tipc_node_read_lock(n); 783 n->keepalive_intv = 10000; 784 tipc_node_read_unlock(n); 785 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 786 tipc_node_read_lock(n); 787 le = &n->links[bearer_id]; 788 if (le->link) { 789 spin_lock_bh(&le->lock); 790 /* Link tolerance may change asynchronously: */ 791 tipc_node_calculate_timer(n, le->link); 792 rc = tipc_link_timeout(le->link, &xmitq); 793 spin_unlock_bh(&le->lock); 794 remains--; 795 } 796 tipc_node_read_unlock(n); 797 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n); 798 if (rc & TIPC_LINK_DOWN_EVT) 799 tipc_node_link_down(n, bearer_id, false); 800 } 801 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 802 } 803 804 /** 805 * __tipc_node_link_up - handle addition of link 806 * Node lock must be held by caller 807 * Link becomes active (alone or shared) or standby, depending on its priority. 808 */ 809 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 810 struct sk_buff_head *xmitq) 811 { 812 int *slot0 = &n->active_links[0]; 813 int *slot1 = &n->active_links[1]; 814 struct tipc_link *ol = node_active_link(n, 0); 815 struct tipc_link *nl = n->links[bearer_id].link; 816 817 if (!nl || tipc_link_is_up(nl)) 818 return; 819 820 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 821 if (!tipc_link_is_up(nl)) 822 return; 823 824 n->working_links++; 825 n->action_flags |= TIPC_NOTIFY_LINK_UP; 826 n->link_id = tipc_link_id(nl); 827 828 /* Leave room for tunnel header when returning 'mtu' to users: */ 829 n->links[bearer_id].mtu = tipc_link_mss(nl); 830 831 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 832 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 833 834 pr_debug("Established link <%s> on network plane %c\n", 835 tipc_link_name(nl), tipc_link_plane(nl)); 836 trace_tipc_node_link_up(n, true, " "); 837 838 /* Ensure that a STATE message goes first */ 839 tipc_link_build_state_msg(nl, xmitq); 840 841 /* First link? => give it both slots */ 842 if (!ol) { 843 *slot0 = bearer_id; 844 *slot1 = bearer_id; 845 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 846 n->action_flags |= TIPC_NOTIFY_NODE_UP; 847 tipc_link_set_active(nl, true); 848 tipc_bcast_add_peer(n->net, nl, xmitq); 849 return; 850 } 851 852 /* Second link => redistribute slots */ 853 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 854 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 855 *slot0 = bearer_id; 856 *slot1 = bearer_id; 857 tipc_link_set_active(nl, true); 858 tipc_link_set_active(ol, false); 859 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 860 tipc_link_set_active(nl, true); 861 *slot1 = bearer_id; 862 } else { 863 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 864 } 865 866 /* Prepare synchronization with first link */ 867 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 868 } 869 870 /** 871 * tipc_node_link_up - handle addition of link 872 * 873 * Link becomes active (alone or shared) or standby, depending on its priority. 874 */ 875 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 876 struct sk_buff_head *xmitq) 877 { 878 struct tipc_media_addr *maddr; 879 880 tipc_node_write_lock(n); 881 __tipc_node_link_up(n, bearer_id, xmitq); 882 maddr = &n->links[bearer_id].maddr; 883 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n); 884 tipc_node_write_unlock(n); 885 } 886 887 /** 888 * tipc_node_link_failover() - start failover in case "half-failover" 889 * 890 * This function is only called in a very special situation where link 891 * failover can be already started on peer node but not on this node. 892 * This can happen when e.g. 893 * 1. Both links <1A-2A>, <1B-2B> down 894 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 895 * disturbance, wrong session, etc.) 896 * 3. Link <1B-2B> up 897 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 898 * 5. Node 2 starts failover onto link <1B-2B> 899 * 900 * ==> Node 1 does never start link/node failover! 901 * 902 * @n: tipc node structure 903 * @l: link peer endpoint failingover (- can be NULL) 904 * @tnl: tunnel link 905 * @xmitq: queue for messages to be xmited on tnl link later 906 */ 907 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 908 struct tipc_link *tnl, 909 struct sk_buff_head *xmitq) 910 { 911 /* Avoid to be "self-failover" that can never end */ 912 if (!tipc_link_is_up(tnl)) 913 return; 914 915 /* Don't rush, failure link may be in the process of resetting */ 916 if (l && !tipc_link_is_reset(l)) 917 return; 918 919 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 920 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 921 922 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 923 tipc_link_failover_prepare(l, tnl, xmitq); 924 925 if (l) 926 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 927 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 928 } 929 930 /** 931 * __tipc_node_link_down - handle loss of link 932 */ 933 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 934 struct sk_buff_head *xmitq, 935 struct tipc_media_addr **maddr) 936 { 937 struct tipc_link_entry *le = &n->links[*bearer_id]; 938 int *slot0 = &n->active_links[0]; 939 int *slot1 = &n->active_links[1]; 940 int i, highest = 0, prio; 941 struct tipc_link *l, *_l, *tnl; 942 943 l = n->links[*bearer_id].link; 944 if (!l || tipc_link_is_reset(l)) 945 return; 946 947 n->working_links--; 948 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 949 n->link_id = tipc_link_id(l); 950 951 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 952 953 pr_debug("Lost link <%s> on network plane %c\n", 954 tipc_link_name(l), tipc_link_plane(l)); 955 956 /* Select new active link if any available */ 957 *slot0 = INVALID_BEARER_ID; 958 *slot1 = INVALID_BEARER_ID; 959 for (i = 0; i < MAX_BEARERS; i++) { 960 _l = n->links[i].link; 961 if (!_l || !tipc_link_is_up(_l)) 962 continue; 963 if (_l == l) 964 continue; 965 prio = tipc_link_prio(_l); 966 if (prio < highest) 967 continue; 968 if (prio > highest) { 969 highest = prio; 970 *slot0 = i; 971 *slot1 = i; 972 continue; 973 } 974 *slot1 = i; 975 } 976 977 if (!node_is_up(n)) { 978 if (tipc_link_peer_is_down(l)) 979 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 980 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 981 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 982 tipc_link_fsm_evt(l, LINK_RESET_EVT); 983 tipc_link_reset(l); 984 tipc_link_build_reset_msg(l, xmitq); 985 *maddr = &n->links[*bearer_id].maddr; 986 node_lost_contact(n, &le->inputq); 987 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 988 return; 989 } 990 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 991 992 /* There is still a working link => initiate failover */ 993 *bearer_id = n->active_links[0]; 994 tnl = n->links[*bearer_id].link; 995 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 996 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 997 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 998 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 999 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 1000 tipc_link_reset(l); 1001 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1002 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1003 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 1004 *maddr = &n->links[*bearer_id].maddr; 1005 } 1006 1007 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 1008 { 1009 struct tipc_link_entry *le = &n->links[bearer_id]; 1010 struct tipc_media_addr *maddr = NULL; 1011 struct tipc_link *l = le->link; 1012 int old_bearer_id = bearer_id; 1013 struct sk_buff_head xmitq; 1014 1015 if (!l) 1016 return; 1017 1018 __skb_queue_head_init(&xmitq); 1019 1020 tipc_node_write_lock(n); 1021 if (!tipc_link_is_establishing(l)) { 1022 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 1023 } else { 1024 /* Defuse pending tipc_node_link_up() */ 1025 tipc_link_reset(l); 1026 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1027 } 1028 if (delete) { 1029 kfree(l); 1030 le->link = NULL; 1031 n->link_cnt--; 1032 } 1033 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 1034 tipc_node_write_unlock(n); 1035 if (delete) 1036 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 1037 if (!skb_queue_empty(&xmitq)) 1038 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n); 1039 tipc_sk_rcv(n->net, &le->inputq); 1040 } 1041 1042 static bool node_is_up(struct tipc_node *n) 1043 { 1044 return n->active_links[0] != INVALID_BEARER_ID; 1045 } 1046 1047 bool tipc_node_is_up(struct net *net, u32 addr) 1048 { 1049 struct tipc_node *n; 1050 bool retval = false; 1051 1052 if (in_own_node(net, addr)) 1053 return true; 1054 1055 n = tipc_node_find(net, addr); 1056 if (!n) 1057 return false; 1058 retval = node_is_up(n); 1059 tipc_node_put(n); 1060 return retval; 1061 } 1062 1063 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 1064 { 1065 struct tipc_node *n; 1066 1067 addr ^= tipc_net(net)->random; 1068 while ((n = tipc_node_find(net, addr))) { 1069 tipc_node_put(n); 1070 addr++; 1071 } 1072 return addr; 1073 } 1074 1075 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 1076 * Returns suggested address if any, otherwise 0 1077 */ 1078 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1079 { 1080 struct tipc_net *tn = tipc_net(net); 1081 struct tipc_node *n; 1082 bool preliminary; 1083 u32 sugg_addr; 1084 1085 /* Suggest new address if some other peer is using this one */ 1086 n = tipc_node_find(net, addr); 1087 if (n) { 1088 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1089 addr = 0; 1090 tipc_node_put(n); 1091 if (!addr) 1092 return 0; 1093 return tipc_node_suggest_addr(net, addr); 1094 } 1095 1096 /* Suggest previously used address if peer is known */ 1097 n = tipc_node_find_by_id(net, id); 1098 if (n) { 1099 sugg_addr = n->addr; 1100 preliminary = n->preliminary; 1101 tipc_node_put(n); 1102 if (!preliminary) 1103 return sugg_addr; 1104 } 1105 1106 /* Even this node may be in conflict */ 1107 if (tn->trial_addr == addr) 1108 return tipc_node_suggest_addr(net, addr); 1109 1110 return 0; 1111 } 1112 1113 void tipc_node_check_dest(struct net *net, u32 addr, 1114 u8 *peer_id, struct tipc_bearer *b, 1115 u16 capabilities, u32 signature, u32 hash_mixes, 1116 struct tipc_media_addr *maddr, 1117 bool *respond, bool *dupl_addr) 1118 { 1119 struct tipc_node *n; 1120 struct tipc_link *l, *snd_l; 1121 struct tipc_link_entry *le; 1122 bool addr_match = false; 1123 bool sign_match = false; 1124 bool link_up = false; 1125 bool accept_addr = false; 1126 bool reset = true; 1127 char *if_name; 1128 unsigned long intv; 1129 u16 session; 1130 1131 *dupl_addr = false; 1132 *respond = false; 1133 1134 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes, 1135 false); 1136 if (!n) 1137 return; 1138 1139 tipc_node_write_lock(n); 1140 if (unlikely(!n->bc_entry.link)) { 1141 snd_l = tipc_bc_sndlink(net); 1142 if (!tipc_link_bc_create(net, tipc_own_addr(net), 1143 addr, peer_id, U16_MAX, 1144 tipc_link_min_win(snd_l), 1145 tipc_link_max_win(snd_l), 1146 n->capabilities, 1147 &n->bc_entry.inputq1, 1148 &n->bc_entry.namedq, snd_l, 1149 &n->bc_entry.link)) { 1150 pr_warn("Broadcast rcv link creation failed, no mem\n"); 1151 tipc_node_write_unlock_fast(n); 1152 tipc_node_put(n); 1153 return; 1154 } 1155 } 1156 1157 le = &n->links[b->identity]; 1158 1159 /* Prepare to validate requesting node's signature and media address */ 1160 l = le->link; 1161 link_up = l && tipc_link_is_up(l); 1162 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1163 sign_match = (signature == n->signature); 1164 1165 /* These three flags give us eight permutations: */ 1166 1167 if (sign_match && addr_match && link_up) { 1168 /* All is fine. Do nothing. */ 1169 reset = false; 1170 /* Peer node is not a container/local namespace */ 1171 if (!n->peer_hash_mix) 1172 n->peer_hash_mix = hash_mixes; 1173 } else if (sign_match && addr_match && !link_up) { 1174 /* Respond. The link will come up in due time */ 1175 *respond = true; 1176 } else if (sign_match && !addr_match && link_up) { 1177 /* Peer has changed i/f address without rebooting. 1178 * If so, the link will reset soon, and the next 1179 * discovery will be accepted. So we can ignore it. 1180 * It may also be an cloned or malicious peer having 1181 * chosen the same node address and signature as an 1182 * existing one. 1183 * Ignore requests until the link goes down, if ever. 1184 */ 1185 *dupl_addr = true; 1186 } else if (sign_match && !addr_match && !link_up) { 1187 /* Peer link has changed i/f address without rebooting. 1188 * It may also be a cloned or malicious peer; we can't 1189 * distinguish between the two. 1190 * The signature is correct, so we must accept. 1191 */ 1192 accept_addr = true; 1193 *respond = true; 1194 } else if (!sign_match && addr_match && link_up) { 1195 /* Peer node rebooted. Two possibilities: 1196 * - Delayed re-discovery; this link endpoint has already 1197 * reset and re-established contact with the peer, before 1198 * receiving a discovery message from that node. 1199 * (The peer happened to receive one from this node first). 1200 * - The peer came back so fast that our side has not 1201 * discovered it yet. Probing from this side will soon 1202 * reset the link, since there can be no working link 1203 * endpoint at the peer end, and the link will re-establish. 1204 * Accept the signature, since it comes from a known peer. 1205 */ 1206 n->signature = signature; 1207 } else if (!sign_match && addr_match && !link_up) { 1208 /* The peer node has rebooted. 1209 * Accept signature, since it is a known peer. 1210 */ 1211 n->signature = signature; 1212 *respond = true; 1213 } else if (!sign_match && !addr_match && link_up) { 1214 /* Peer rebooted with new address, or a new/duplicate peer. 1215 * Ignore until the link goes down, if ever. 1216 */ 1217 *dupl_addr = true; 1218 } else if (!sign_match && !addr_match && !link_up) { 1219 /* Peer rebooted with new address, or it is a new peer. 1220 * Accept signature and address. 1221 */ 1222 n->signature = signature; 1223 accept_addr = true; 1224 *respond = true; 1225 } 1226 1227 if (!accept_addr) 1228 goto exit; 1229 1230 /* Now create new link if not already existing */ 1231 if (!l) { 1232 if (n->link_cnt == 2) 1233 goto exit; 1234 1235 if_name = strchr(b->name, ':') + 1; 1236 get_random_bytes(&session, sizeof(u16)); 1237 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1238 b->net_plane, b->mtu, b->priority, 1239 b->min_win, b->max_win, session, 1240 tipc_own_addr(net), addr, peer_id, 1241 n->capabilities, 1242 tipc_bc_sndlink(n->net), n->bc_entry.link, 1243 &le->inputq, 1244 &n->bc_entry.namedq, &l)) { 1245 *respond = false; 1246 goto exit; 1247 } 1248 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1249 tipc_link_reset(l); 1250 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1251 if (n->state == NODE_FAILINGOVER) 1252 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1253 le->link = l; 1254 n->link_cnt++; 1255 tipc_node_calculate_timer(n, l); 1256 if (n->link_cnt == 1) { 1257 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1258 if (!mod_timer(&n->timer, intv)) 1259 tipc_node_get(n); 1260 } 1261 } 1262 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1263 exit: 1264 tipc_node_write_unlock(n); 1265 if (reset && l && !tipc_link_is_reset(l)) 1266 tipc_node_link_down(n, b->identity, false); 1267 tipc_node_put(n); 1268 } 1269 1270 void tipc_node_delete_links(struct net *net, int bearer_id) 1271 { 1272 struct tipc_net *tn = net_generic(net, tipc_net_id); 1273 struct tipc_node *n; 1274 1275 rcu_read_lock(); 1276 list_for_each_entry_rcu(n, &tn->node_list, list) { 1277 tipc_node_link_down(n, bearer_id, true); 1278 } 1279 rcu_read_unlock(); 1280 } 1281 1282 static void tipc_node_reset_links(struct tipc_node *n) 1283 { 1284 int i; 1285 1286 pr_warn("Resetting all links to %x\n", n->addr); 1287 1288 trace_tipc_node_reset_links(n, true, " "); 1289 for (i = 0; i < MAX_BEARERS; i++) { 1290 tipc_node_link_down(n, i, false); 1291 } 1292 } 1293 1294 /* tipc_node_fsm_evt - node finite state machine 1295 * Determines when contact is allowed with peer node 1296 */ 1297 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1298 { 1299 int state = n->state; 1300 1301 switch (state) { 1302 case SELF_DOWN_PEER_DOWN: 1303 switch (evt) { 1304 case SELF_ESTABL_CONTACT_EVT: 1305 state = SELF_UP_PEER_COMING; 1306 break; 1307 case PEER_ESTABL_CONTACT_EVT: 1308 state = SELF_COMING_PEER_UP; 1309 break; 1310 case SELF_LOST_CONTACT_EVT: 1311 case PEER_LOST_CONTACT_EVT: 1312 break; 1313 case NODE_SYNCH_END_EVT: 1314 case NODE_SYNCH_BEGIN_EVT: 1315 case NODE_FAILOVER_BEGIN_EVT: 1316 case NODE_FAILOVER_END_EVT: 1317 default: 1318 goto illegal_evt; 1319 } 1320 break; 1321 case SELF_UP_PEER_UP: 1322 switch (evt) { 1323 case SELF_LOST_CONTACT_EVT: 1324 state = SELF_DOWN_PEER_LEAVING; 1325 break; 1326 case PEER_LOST_CONTACT_EVT: 1327 state = SELF_LEAVING_PEER_DOWN; 1328 break; 1329 case NODE_SYNCH_BEGIN_EVT: 1330 state = NODE_SYNCHING; 1331 break; 1332 case NODE_FAILOVER_BEGIN_EVT: 1333 state = NODE_FAILINGOVER; 1334 break; 1335 case SELF_ESTABL_CONTACT_EVT: 1336 case PEER_ESTABL_CONTACT_EVT: 1337 case NODE_SYNCH_END_EVT: 1338 case NODE_FAILOVER_END_EVT: 1339 break; 1340 default: 1341 goto illegal_evt; 1342 } 1343 break; 1344 case SELF_DOWN_PEER_LEAVING: 1345 switch (evt) { 1346 case PEER_LOST_CONTACT_EVT: 1347 state = SELF_DOWN_PEER_DOWN; 1348 break; 1349 case SELF_ESTABL_CONTACT_EVT: 1350 case PEER_ESTABL_CONTACT_EVT: 1351 case SELF_LOST_CONTACT_EVT: 1352 break; 1353 case NODE_SYNCH_END_EVT: 1354 case NODE_SYNCH_BEGIN_EVT: 1355 case NODE_FAILOVER_BEGIN_EVT: 1356 case NODE_FAILOVER_END_EVT: 1357 default: 1358 goto illegal_evt; 1359 } 1360 break; 1361 case SELF_UP_PEER_COMING: 1362 switch (evt) { 1363 case PEER_ESTABL_CONTACT_EVT: 1364 state = SELF_UP_PEER_UP; 1365 break; 1366 case SELF_LOST_CONTACT_EVT: 1367 state = SELF_DOWN_PEER_DOWN; 1368 break; 1369 case SELF_ESTABL_CONTACT_EVT: 1370 case PEER_LOST_CONTACT_EVT: 1371 case NODE_SYNCH_END_EVT: 1372 case NODE_FAILOVER_BEGIN_EVT: 1373 break; 1374 case NODE_SYNCH_BEGIN_EVT: 1375 case NODE_FAILOVER_END_EVT: 1376 default: 1377 goto illegal_evt; 1378 } 1379 break; 1380 case SELF_COMING_PEER_UP: 1381 switch (evt) { 1382 case SELF_ESTABL_CONTACT_EVT: 1383 state = SELF_UP_PEER_UP; 1384 break; 1385 case PEER_LOST_CONTACT_EVT: 1386 state = SELF_DOWN_PEER_DOWN; 1387 break; 1388 case SELF_LOST_CONTACT_EVT: 1389 case PEER_ESTABL_CONTACT_EVT: 1390 break; 1391 case NODE_SYNCH_END_EVT: 1392 case NODE_SYNCH_BEGIN_EVT: 1393 case NODE_FAILOVER_BEGIN_EVT: 1394 case NODE_FAILOVER_END_EVT: 1395 default: 1396 goto illegal_evt; 1397 } 1398 break; 1399 case SELF_LEAVING_PEER_DOWN: 1400 switch (evt) { 1401 case SELF_LOST_CONTACT_EVT: 1402 state = SELF_DOWN_PEER_DOWN; 1403 break; 1404 case SELF_ESTABL_CONTACT_EVT: 1405 case PEER_ESTABL_CONTACT_EVT: 1406 case PEER_LOST_CONTACT_EVT: 1407 break; 1408 case NODE_SYNCH_END_EVT: 1409 case NODE_SYNCH_BEGIN_EVT: 1410 case NODE_FAILOVER_BEGIN_EVT: 1411 case NODE_FAILOVER_END_EVT: 1412 default: 1413 goto illegal_evt; 1414 } 1415 break; 1416 case NODE_FAILINGOVER: 1417 switch (evt) { 1418 case SELF_LOST_CONTACT_EVT: 1419 state = SELF_DOWN_PEER_LEAVING; 1420 break; 1421 case PEER_LOST_CONTACT_EVT: 1422 state = SELF_LEAVING_PEER_DOWN; 1423 break; 1424 case NODE_FAILOVER_END_EVT: 1425 state = SELF_UP_PEER_UP; 1426 break; 1427 case NODE_FAILOVER_BEGIN_EVT: 1428 case SELF_ESTABL_CONTACT_EVT: 1429 case PEER_ESTABL_CONTACT_EVT: 1430 break; 1431 case NODE_SYNCH_BEGIN_EVT: 1432 case NODE_SYNCH_END_EVT: 1433 default: 1434 goto illegal_evt; 1435 } 1436 break; 1437 case NODE_SYNCHING: 1438 switch (evt) { 1439 case SELF_LOST_CONTACT_EVT: 1440 state = SELF_DOWN_PEER_LEAVING; 1441 break; 1442 case PEER_LOST_CONTACT_EVT: 1443 state = SELF_LEAVING_PEER_DOWN; 1444 break; 1445 case NODE_SYNCH_END_EVT: 1446 state = SELF_UP_PEER_UP; 1447 break; 1448 case NODE_FAILOVER_BEGIN_EVT: 1449 state = NODE_FAILINGOVER; 1450 break; 1451 case NODE_SYNCH_BEGIN_EVT: 1452 case SELF_ESTABL_CONTACT_EVT: 1453 case PEER_ESTABL_CONTACT_EVT: 1454 break; 1455 case NODE_FAILOVER_END_EVT: 1456 default: 1457 goto illegal_evt; 1458 } 1459 break; 1460 default: 1461 pr_err("Unknown node fsm state %x\n", state); 1462 break; 1463 } 1464 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1465 n->state = state; 1466 return; 1467 1468 illegal_evt: 1469 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1470 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1471 } 1472 1473 static void node_lost_contact(struct tipc_node *n, 1474 struct sk_buff_head *inputq) 1475 { 1476 struct tipc_sock_conn *conn, *safe; 1477 struct tipc_link *l; 1478 struct list_head *conns = &n->conn_sks; 1479 struct sk_buff *skb; 1480 uint i; 1481 1482 pr_debug("Lost contact with %x\n", n->addr); 1483 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1484 trace_tipc_node_lost_contact(n, true, " "); 1485 1486 /* Clean up broadcast state */ 1487 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1488 __skb_queue_purge(&n->bc_entry.namedq); 1489 1490 /* Abort any ongoing link failover */ 1491 for (i = 0; i < MAX_BEARERS; i++) { 1492 l = n->links[i].link; 1493 if (l) 1494 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1495 } 1496 1497 /* Notify publications from this node */ 1498 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1499 n->peer_net = NULL; 1500 n->peer_hash_mix = 0; 1501 /* Notify sockets connected to node */ 1502 list_for_each_entry_safe(conn, safe, conns, list) { 1503 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1504 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1505 conn->peer_node, conn->port, 1506 conn->peer_port, TIPC_ERR_NO_NODE); 1507 if (likely(skb)) 1508 skb_queue_tail(inputq, skb); 1509 list_del(&conn->list); 1510 kfree(conn); 1511 } 1512 } 1513 1514 /** 1515 * tipc_node_get_linkname - get the name of a link 1516 * 1517 * @bearer_id: id of the bearer 1518 * @addr: peer node address 1519 * @linkname: link name output buffer 1520 * 1521 * Returns 0 on success 1522 */ 1523 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1524 char *linkname, size_t len) 1525 { 1526 struct tipc_link *link; 1527 int err = -EINVAL; 1528 struct tipc_node *node = tipc_node_find(net, addr); 1529 1530 if (!node) 1531 return err; 1532 1533 if (bearer_id >= MAX_BEARERS) 1534 goto exit; 1535 1536 tipc_node_read_lock(node); 1537 link = node->links[bearer_id].link; 1538 if (link) { 1539 strncpy(linkname, tipc_link_name(link), len); 1540 err = 0; 1541 } 1542 tipc_node_read_unlock(node); 1543 exit: 1544 tipc_node_put(node); 1545 return err; 1546 } 1547 1548 /* Caller should hold node lock for the passed node */ 1549 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1550 { 1551 void *hdr; 1552 struct nlattr *attrs; 1553 1554 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1555 NLM_F_MULTI, TIPC_NL_NODE_GET); 1556 if (!hdr) 1557 return -EMSGSIZE; 1558 1559 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1560 if (!attrs) 1561 goto msg_full; 1562 1563 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1564 goto attr_msg_full; 1565 if (node_is_up(node)) 1566 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1567 goto attr_msg_full; 1568 1569 nla_nest_end(msg->skb, attrs); 1570 genlmsg_end(msg->skb, hdr); 1571 1572 return 0; 1573 1574 attr_msg_full: 1575 nla_nest_cancel(msg->skb, attrs); 1576 msg_full: 1577 genlmsg_cancel(msg->skb, hdr); 1578 1579 return -EMSGSIZE; 1580 } 1581 1582 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1583 { 1584 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1585 struct sk_buff_head inputq; 1586 1587 switch (msg_user(hdr)) { 1588 case TIPC_LOW_IMPORTANCE: 1589 case TIPC_MEDIUM_IMPORTANCE: 1590 case TIPC_HIGH_IMPORTANCE: 1591 case TIPC_CRITICAL_IMPORTANCE: 1592 if (msg_connected(hdr) || msg_named(hdr) || 1593 msg_direct(hdr)) { 1594 tipc_loopback_trace(peer_net, list); 1595 spin_lock_init(&list->lock); 1596 tipc_sk_rcv(peer_net, list); 1597 return; 1598 } 1599 if (msg_mcast(hdr)) { 1600 tipc_loopback_trace(peer_net, list); 1601 skb_queue_head_init(&inputq); 1602 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1603 __skb_queue_purge(list); 1604 skb_queue_purge(&inputq); 1605 return; 1606 } 1607 return; 1608 case MSG_FRAGMENTER: 1609 if (tipc_msg_assemble(list)) { 1610 tipc_loopback_trace(peer_net, list); 1611 skb_queue_head_init(&inputq); 1612 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1613 __skb_queue_purge(list); 1614 skb_queue_purge(&inputq); 1615 } 1616 return; 1617 case GROUP_PROTOCOL: 1618 case CONN_MANAGER: 1619 tipc_loopback_trace(peer_net, list); 1620 spin_lock_init(&list->lock); 1621 tipc_sk_rcv(peer_net, list); 1622 return; 1623 case LINK_PROTOCOL: 1624 case NAME_DISTRIBUTOR: 1625 case TUNNEL_PROTOCOL: 1626 case BCAST_PROTOCOL: 1627 return; 1628 default: 1629 return; 1630 }; 1631 } 1632 1633 /** 1634 * tipc_node_xmit() is the general link level function for message sending 1635 * @net: the applicable net namespace 1636 * @list: chain of buffers containing message 1637 * @dnode: address of destination node 1638 * @selector: a number used for deterministic link selection 1639 * Consumes the buffer chain. 1640 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1641 */ 1642 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1643 u32 dnode, int selector) 1644 { 1645 struct tipc_link_entry *le = NULL; 1646 struct tipc_node *n; 1647 struct sk_buff_head xmitq; 1648 bool node_up = false; 1649 int bearer_id; 1650 int rc; 1651 1652 if (in_own_node(net, dnode)) { 1653 tipc_loopback_trace(net, list); 1654 spin_lock_init(&list->lock); 1655 tipc_sk_rcv(net, list); 1656 return 0; 1657 } 1658 1659 n = tipc_node_find(net, dnode); 1660 if (unlikely(!n)) { 1661 __skb_queue_purge(list); 1662 return -EHOSTUNREACH; 1663 } 1664 1665 tipc_node_read_lock(n); 1666 node_up = node_is_up(n); 1667 if (node_up && n->peer_net && check_net(n->peer_net)) { 1668 /* xmit inner linux container */ 1669 tipc_lxc_xmit(n->peer_net, list); 1670 if (likely(skb_queue_empty(list))) { 1671 tipc_node_read_unlock(n); 1672 tipc_node_put(n); 1673 return 0; 1674 } 1675 } 1676 1677 bearer_id = n->active_links[selector & 1]; 1678 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1679 tipc_node_read_unlock(n); 1680 tipc_node_put(n); 1681 __skb_queue_purge(list); 1682 return -EHOSTUNREACH; 1683 } 1684 1685 __skb_queue_head_init(&xmitq); 1686 le = &n->links[bearer_id]; 1687 spin_lock_bh(&le->lock); 1688 rc = tipc_link_xmit(le->link, list, &xmitq); 1689 spin_unlock_bh(&le->lock); 1690 tipc_node_read_unlock(n); 1691 1692 if (unlikely(rc == -ENOBUFS)) 1693 tipc_node_link_down(n, bearer_id, false); 1694 else 1695 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1696 1697 tipc_node_put(n); 1698 1699 return rc; 1700 } 1701 1702 /* tipc_node_xmit_skb(): send single buffer to destination 1703 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1704 * messages, which will not be rejected 1705 * The only exception is datagram messages rerouted after secondary 1706 * lookup, which are rare and safe to dispose of anyway. 1707 */ 1708 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1709 u32 selector) 1710 { 1711 struct sk_buff_head head; 1712 1713 __skb_queue_head_init(&head); 1714 __skb_queue_tail(&head, skb); 1715 tipc_node_xmit(net, &head, dnode, selector); 1716 return 0; 1717 } 1718 1719 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1720 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1721 */ 1722 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1723 { 1724 struct sk_buff *skb; 1725 u32 selector, dnode; 1726 1727 while ((skb = __skb_dequeue(xmitq))) { 1728 selector = msg_origport(buf_msg(skb)); 1729 dnode = msg_destnode(buf_msg(skb)); 1730 tipc_node_xmit_skb(net, skb, dnode, selector); 1731 } 1732 return 0; 1733 } 1734 1735 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) 1736 { 1737 struct sk_buff_head xmitq; 1738 struct sk_buff *txskb; 1739 struct tipc_node *n; 1740 u16 dummy; 1741 u32 dst; 1742 1743 /* Use broadcast if all nodes support it */ 1744 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { 1745 __skb_queue_head_init(&xmitq); 1746 __skb_queue_tail(&xmitq, skb); 1747 tipc_bcast_xmit(net, &xmitq, &dummy); 1748 return; 1749 } 1750 1751 /* Otherwise use legacy replicast method */ 1752 rcu_read_lock(); 1753 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1754 dst = n->addr; 1755 if (in_own_node(net, dst)) 1756 continue; 1757 if (!node_is_up(n)) 1758 continue; 1759 txskb = pskb_copy(skb, GFP_ATOMIC); 1760 if (!txskb) 1761 break; 1762 msg_set_destnode(buf_msg(txskb), dst); 1763 tipc_node_xmit_skb(net, txskb, dst, 0); 1764 } 1765 rcu_read_unlock(); 1766 kfree_skb(skb); 1767 } 1768 1769 static void tipc_node_mcast_rcv(struct tipc_node *n) 1770 { 1771 struct tipc_bclink_entry *be = &n->bc_entry; 1772 1773 /* 'arrvq' is under inputq2's lock protection */ 1774 spin_lock_bh(&be->inputq2.lock); 1775 spin_lock_bh(&be->inputq1.lock); 1776 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1777 spin_unlock_bh(&be->inputq1.lock); 1778 spin_unlock_bh(&be->inputq2.lock); 1779 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1780 } 1781 1782 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1783 int bearer_id, struct sk_buff_head *xmitq) 1784 { 1785 struct tipc_link *ucl; 1786 int rc; 1787 1788 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq); 1789 1790 if (rc & TIPC_LINK_DOWN_EVT) { 1791 tipc_node_reset_links(n); 1792 return; 1793 } 1794 1795 if (!(rc & TIPC_LINK_SND_STATE)) 1796 return; 1797 1798 /* If probe message, a STATE response will be sent anyway */ 1799 if (msg_probe(hdr)) 1800 return; 1801 1802 /* Produce a STATE message carrying broadcast NACK */ 1803 tipc_node_read_lock(n); 1804 ucl = n->links[bearer_id].link; 1805 if (ucl) 1806 tipc_link_build_state_msg(ucl, xmitq); 1807 tipc_node_read_unlock(n); 1808 } 1809 1810 /** 1811 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1812 * @net: the applicable net namespace 1813 * @skb: TIPC packet 1814 * @bearer_id: id of bearer message arrived on 1815 * 1816 * Invoked with no locks held. 1817 */ 1818 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1819 { 1820 int rc; 1821 struct sk_buff_head xmitq; 1822 struct tipc_bclink_entry *be; 1823 struct tipc_link_entry *le; 1824 struct tipc_msg *hdr = buf_msg(skb); 1825 int usr = msg_user(hdr); 1826 u32 dnode = msg_destnode(hdr); 1827 struct tipc_node *n; 1828 1829 __skb_queue_head_init(&xmitq); 1830 1831 /* If NACK for other node, let rcv link for that node peek into it */ 1832 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1833 n = tipc_node_find(net, dnode); 1834 else 1835 n = tipc_node_find(net, msg_prevnode(hdr)); 1836 if (!n) { 1837 kfree_skb(skb); 1838 return; 1839 } 1840 be = &n->bc_entry; 1841 le = &n->links[bearer_id]; 1842 1843 rc = tipc_bcast_rcv(net, be->link, skb); 1844 1845 /* Broadcast ACKs are sent on a unicast link */ 1846 if (rc & TIPC_LINK_SND_STATE) { 1847 tipc_node_read_lock(n); 1848 tipc_link_build_state_msg(le->link, &xmitq); 1849 tipc_node_read_unlock(n); 1850 } 1851 1852 if (!skb_queue_empty(&xmitq)) 1853 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1854 1855 if (!skb_queue_empty(&be->inputq1)) 1856 tipc_node_mcast_rcv(n); 1857 1858 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1859 if (!skb_queue_empty(&n->bc_entry.namedq)) 1860 tipc_named_rcv(net, &n->bc_entry.namedq, 1861 &n->bc_entry.named_rcv_nxt, 1862 &n->bc_entry.named_open); 1863 1864 /* If reassembly or retransmission failure => reset all links to peer */ 1865 if (rc & TIPC_LINK_DOWN_EVT) 1866 tipc_node_reset_links(n); 1867 1868 tipc_node_put(n); 1869 } 1870 1871 /** 1872 * tipc_node_check_state - check and if necessary update node state 1873 * @skb: TIPC packet 1874 * @bearer_id: identity of bearer delivering the packet 1875 * Returns true if state and msg are ok, otherwise false 1876 */ 1877 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1878 int bearer_id, struct sk_buff_head *xmitq) 1879 { 1880 struct tipc_msg *hdr = buf_msg(skb); 1881 int usr = msg_user(hdr); 1882 int mtyp = msg_type(hdr); 1883 u16 oseqno = msg_seqno(hdr); 1884 u16 exp_pkts = msg_msgcnt(hdr); 1885 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1886 int state = n->state; 1887 struct tipc_link *l, *tnl, *pl = NULL; 1888 struct tipc_media_addr *maddr; 1889 int pb_id; 1890 1891 if (trace_tipc_node_check_state_enabled()) { 1892 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1893 trace_tipc_node_check_state(n, true, " "); 1894 } 1895 l = n->links[bearer_id].link; 1896 if (!l) 1897 return false; 1898 rcv_nxt = tipc_link_rcv_nxt(l); 1899 1900 1901 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1902 return true; 1903 1904 /* Find parallel link, if any */ 1905 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1906 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1907 pl = n->links[pb_id].link; 1908 break; 1909 } 1910 } 1911 1912 if (!tipc_link_validate_msg(l, hdr)) { 1913 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1914 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1915 return false; 1916 } 1917 1918 /* Check and update node accesibility if applicable */ 1919 if (state == SELF_UP_PEER_COMING) { 1920 if (!tipc_link_is_up(l)) 1921 return true; 1922 if (!msg_peer_link_is_up(hdr)) 1923 return true; 1924 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1925 } 1926 1927 if (state == SELF_DOWN_PEER_LEAVING) { 1928 if (msg_peer_node_is_up(hdr)) 1929 return false; 1930 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1931 return true; 1932 } 1933 1934 if (state == SELF_LEAVING_PEER_DOWN) 1935 return false; 1936 1937 /* Ignore duplicate packets */ 1938 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1939 return true; 1940 1941 /* Initiate or update failover mode if applicable */ 1942 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1943 syncpt = oseqno + exp_pkts - 1; 1944 if (pl && !tipc_link_is_reset(pl)) { 1945 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1946 trace_tipc_node_link_down(n, true, 1947 "node link down <- failover!"); 1948 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1949 tipc_link_inputq(l)); 1950 } 1951 1952 /* If parallel link was already down, and this happened before 1953 * the tunnel link came up, node failover was never started. 1954 * Ensure that a FAILOVER_MSG is sent to get peer out of 1955 * NODE_FAILINGOVER state, also this node must accept 1956 * TUNNEL_MSGs from peer. 1957 */ 1958 if (n->state != NODE_FAILINGOVER) 1959 tipc_node_link_failover(n, pl, l, xmitq); 1960 1961 /* If pkts arrive out of order, use lowest calculated syncpt */ 1962 if (less(syncpt, n->sync_point)) 1963 n->sync_point = syncpt; 1964 } 1965 1966 /* Open parallel link when tunnel link reaches synch point */ 1967 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1968 if (!more(rcv_nxt, n->sync_point)) 1969 return true; 1970 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1971 if (pl) 1972 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1973 return true; 1974 } 1975 1976 /* No synching needed if only one link */ 1977 if (!pl || !tipc_link_is_up(pl)) 1978 return true; 1979 1980 /* Initiate synch mode if applicable */ 1981 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1982 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 1983 syncpt = msg_syncpt(hdr); 1984 else 1985 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 1986 if (!tipc_link_is_up(l)) 1987 __tipc_node_link_up(n, bearer_id, xmitq); 1988 if (n->state == SELF_UP_PEER_UP) { 1989 n->sync_point = syncpt; 1990 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1991 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1992 } 1993 } 1994 1995 /* Open tunnel link when parallel link reaches synch point */ 1996 if (n->state == NODE_SYNCHING) { 1997 if (tipc_link_is_synching(l)) { 1998 tnl = l; 1999 } else { 2000 tnl = pl; 2001 pl = l; 2002 } 2003 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 2004 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 2005 if (more(dlv_nxt, n->sync_point)) { 2006 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 2007 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 2008 return true; 2009 } 2010 if (l == pl) 2011 return true; 2012 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 2013 return true; 2014 if (usr == LINK_PROTOCOL) 2015 return true; 2016 return false; 2017 } 2018 return true; 2019 } 2020 2021 /** 2022 * tipc_rcv - process TIPC packets/messages arriving from off-node 2023 * @net: the applicable net namespace 2024 * @skb: TIPC packet 2025 * @b: pointer to bearer message arrived on 2026 * 2027 * Invoked with no locks held. Bearer pointer must point to a valid bearer 2028 * structure (i.e. cannot be NULL), but bearer can be inactive. 2029 */ 2030 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 2031 { 2032 struct sk_buff_head xmitq; 2033 struct tipc_link_entry *le; 2034 struct tipc_msg *hdr; 2035 struct tipc_node *n; 2036 int bearer_id = b->identity; 2037 u32 self = tipc_own_addr(net); 2038 int usr, rc = 0; 2039 u16 bc_ack; 2040 #ifdef CONFIG_TIPC_CRYPTO 2041 struct tipc_ehdr *ehdr; 2042 2043 /* Check if message must be decrypted first */ 2044 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb)) 2045 goto rcv; 2046 2047 ehdr = (struct tipc_ehdr *)skb->data; 2048 if (likely(ehdr->user != LINK_CONFIG)) { 2049 n = tipc_node_find(net, ntohl(ehdr->addr)); 2050 if (unlikely(!n)) 2051 goto discard; 2052 } else { 2053 n = tipc_node_find_by_id(net, ehdr->id); 2054 } 2055 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b); 2056 if (!skb) 2057 return; 2058 2059 rcv: 2060 #endif 2061 /* Ensure message is well-formed before touching the header */ 2062 if (unlikely(!tipc_msg_validate(&skb))) 2063 goto discard; 2064 __skb_queue_head_init(&xmitq); 2065 hdr = buf_msg(skb); 2066 usr = msg_user(hdr); 2067 bc_ack = msg_bcast_ack(hdr); 2068 2069 /* Handle arrival of discovery or broadcast packet */ 2070 if (unlikely(msg_non_seq(hdr))) { 2071 if (unlikely(usr == LINK_CONFIG)) 2072 return tipc_disc_rcv(net, skb, b); 2073 else 2074 return tipc_node_bc_rcv(net, skb, bearer_id); 2075 } 2076 2077 /* Discard unicast link messages destined for another node */ 2078 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 2079 goto discard; 2080 2081 /* Locate neighboring node that sent packet */ 2082 n = tipc_node_find(net, msg_prevnode(hdr)); 2083 if (unlikely(!n)) 2084 goto discard; 2085 le = &n->links[bearer_id]; 2086 2087 /* Ensure broadcast reception is in synch with peer's send state */ 2088 if (unlikely(usr == LINK_PROTOCOL)) { 2089 if (unlikely(skb_linearize(skb))) { 2090 tipc_node_put(n); 2091 goto discard; 2092 } 2093 hdr = buf_msg(skb); 2094 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 2095 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) { 2096 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 2097 } 2098 2099 /* Receive packet directly if conditions permit */ 2100 tipc_node_read_lock(n); 2101 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 2102 spin_lock_bh(&le->lock); 2103 if (le->link) { 2104 rc = tipc_link_rcv(le->link, skb, &xmitq); 2105 skb = NULL; 2106 } 2107 spin_unlock_bh(&le->lock); 2108 } 2109 tipc_node_read_unlock(n); 2110 2111 /* Check/update node state before receiving */ 2112 if (unlikely(skb)) { 2113 if (unlikely(skb_linearize(skb))) 2114 goto out_node_put; 2115 tipc_node_write_lock(n); 2116 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 2117 if (le->link) { 2118 rc = tipc_link_rcv(le->link, skb, &xmitq); 2119 skb = NULL; 2120 } 2121 } 2122 tipc_node_write_unlock(n); 2123 } 2124 2125 if (unlikely(rc & TIPC_LINK_UP_EVT)) 2126 tipc_node_link_up(n, bearer_id, &xmitq); 2127 2128 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 2129 tipc_node_link_down(n, bearer_id, false); 2130 2131 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 2132 tipc_named_rcv(net, &n->bc_entry.namedq, 2133 &n->bc_entry.named_rcv_nxt, 2134 &n->bc_entry.named_open); 2135 2136 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 2137 tipc_node_mcast_rcv(n); 2138 2139 if (!skb_queue_empty(&le->inputq)) 2140 tipc_sk_rcv(net, &le->inputq); 2141 2142 if (!skb_queue_empty(&xmitq)) 2143 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 2144 2145 out_node_put: 2146 tipc_node_put(n); 2147 discard: 2148 kfree_skb(skb); 2149 } 2150 2151 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2152 int prop) 2153 { 2154 struct tipc_net *tn = tipc_net(net); 2155 int bearer_id = b->identity; 2156 struct sk_buff_head xmitq; 2157 struct tipc_link_entry *e; 2158 struct tipc_node *n; 2159 2160 __skb_queue_head_init(&xmitq); 2161 2162 rcu_read_lock(); 2163 2164 list_for_each_entry_rcu(n, &tn->node_list, list) { 2165 tipc_node_write_lock(n); 2166 e = &n->links[bearer_id]; 2167 if (e->link) { 2168 if (prop == TIPC_NLA_PROP_TOL) 2169 tipc_link_set_tolerance(e->link, b->tolerance, 2170 &xmitq); 2171 else if (prop == TIPC_NLA_PROP_MTU) 2172 tipc_link_set_mtu(e->link, b->mtu); 2173 } 2174 tipc_node_write_unlock(n); 2175 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); 2176 } 2177 2178 rcu_read_unlock(); 2179 } 2180 2181 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2182 { 2183 struct net *net = sock_net(skb->sk); 2184 struct tipc_net *tn = net_generic(net, tipc_net_id); 2185 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2186 struct tipc_node *peer, *temp_node; 2187 u32 addr; 2188 int err; 2189 2190 /* We identify the peer by its net */ 2191 if (!info->attrs[TIPC_NLA_NET]) 2192 return -EINVAL; 2193 2194 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2195 info->attrs[TIPC_NLA_NET], 2196 tipc_nl_net_policy, info->extack); 2197 if (err) 2198 return err; 2199 2200 if (!attrs[TIPC_NLA_NET_ADDR]) 2201 return -EINVAL; 2202 2203 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2204 2205 if (in_own_node(net, addr)) 2206 return -ENOTSUPP; 2207 2208 spin_lock_bh(&tn->node_list_lock); 2209 peer = tipc_node_find(net, addr); 2210 if (!peer) { 2211 spin_unlock_bh(&tn->node_list_lock); 2212 return -ENXIO; 2213 } 2214 2215 tipc_node_write_lock(peer); 2216 if (peer->state != SELF_DOWN_PEER_DOWN && 2217 peer->state != SELF_DOWN_PEER_LEAVING) { 2218 tipc_node_write_unlock(peer); 2219 err = -EBUSY; 2220 goto err_out; 2221 } 2222 2223 tipc_node_clear_links(peer); 2224 tipc_node_write_unlock(peer); 2225 tipc_node_delete(peer); 2226 2227 /* Calculate cluster capabilities */ 2228 tn->capabilities = TIPC_NODE_CAPABILITIES; 2229 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 2230 tn->capabilities &= temp_node->capabilities; 2231 } 2232 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 2233 err = 0; 2234 err_out: 2235 tipc_node_put(peer); 2236 spin_unlock_bh(&tn->node_list_lock); 2237 2238 return err; 2239 } 2240 2241 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2242 { 2243 int err; 2244 struct net *net = sock_net(skb->sk); 2245 struct tipc_net *tn = net_generic(net, tipc_net_id); 2246 int done = cb->args[0]; 2247 int last_addr = cb->args[1]; 2248 struct tipc_node *node; 2249 struct tipc_nl_msg msg; 2250 2251 if (done) 2252 return 0; 2253 2254 msg.skb = skb; 2255 msg.portid = NETLINK_CB(cb->skb).portid; 2256 msg.seq = cb->nlh->nlmsg_seq; 2257 2258 rcu_read_lock(); 2259 if (last_addr) { 2260 node = tipc_node_find(net, last_addr); 2261 if (!node) { 2262 rcu_read_unlock(); 2263 /* We never set seq or call nl_dump_check_consistent() 2264 * this means that setting prev_seq here will cause the 2265 * consistence check to fail in the netlink callback 2266 * handler. Resulting in the NLMSG_DONE message having 2267 * the NLM_F_DUMP_INTR flag set if the node state 2268 * changed while we released the lock. 2269 */ 2270 cb->prev_seq = 1; 2271 return -EPIPE; 2272 } 2273 tipc_node_put(node); 2274 } 2275 2276 list_for_each_entry_rcu(node, &tn->node_list, list) { 2277 if (node->preliminary) 2278 continue; 2279 if (last_addr) { 2280 if (node->addr == last_addr) 2281 last_addr = 0; 2282 else 2283 continue; 2284 } 2285 2286 tipc_node_read_lock(node); 2287 err = __tipc_nl_add_node(&msg, node); 2288 if (err) { 2289 last_addr = node->addr; 2290 tipc_node_read_unlock(node); 2291 goto out; 2292 } 2293 2294 tipc_node_read_unlock(node); 2295 } 2296 done = 1; 2297 out: 2298 cb->args[0] = done; 2299 cb->args[1] = last_addr; 2300 rcu_read_unlock(); 2301 2302 return skb->len; 2303 } 2304 2305 /* tipc_node_find_by_name - locate owner node of link by link's name 2306 * @net: the applicable net namespace 2307 * @name: pointer to link name string 2308 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2309 * 2310 * Returns pointer to node owning the link, or 0 if no matching link is found. 2311 */ 2312 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2313 const char *link_name, 2314 unsigned int *bearer_id) 2315 { 2316 struct tipc_net *tn = net_generic(net, tipc_net_id); 2317 struct tipc_link *l; 2318 struct tipc_node *n; 2319 struct tipc_node *found_node = NULL; 2320 int i; 2321 2322 *bearer_id = 0; 2323 rcu_read_lock(); 2324 list_for_each_entry_rcu(n, &tn->node_list, list) { 2325 tipc_node_read_lock(n); 2326 for (i = 0; i < MAX_BEARERS; i++) { 2327 l = n->links[i].link; 2328 if (l && !strcmp(tipc_link_name(l), link_name)) { 2329 *bearer_id = i; 2330 found_node = n; 2331 break; 2332 } 2333 } 2334 tipc_node_read_unlock(n); 2335 if (found_node) 2336 break; 2337 } 2338 rcu_read_unlock(); 2339 2340 return found_node; 2341 } 2342 2343 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2344 { 2345 int err; 2346 int res = 0; 2347 int bearer_id; 2348 char *name; 2349 struct tipc_link *link; 2350 struct tipc_node *node; 2351 struct sk_buff_head xmitq; 2352 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2353 struct net *net = sock_net(skb->sk); 2354 2355 __skb_queue_head_init(&xmitq); 2356 2357 if (!info->attrs[TIPC_NLA_LINK]) 2358 return -EINVAL; 2359 2360 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2361 info->attrs[TIPC_NLA_LINK], 2362 tipc_nl_link_policy, info->extack); 2363 if (err) 2364 return err; 2365 2366 if (!attrs[TIPC_NLA_LINK_NAME]) 2367 return -EINVAL; 2368 2369 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2370 2371 if (strcmp(name, tipc_bclink_name) == 0) 2372 return tipc_nl_bc_link_set(net, attrs); 2373 2374 node = tipc_node_find_by_name(net, name, &bearer_id); 2375 if (!node) 2376 return -EINVAL; 2377 2378 tipc_node_read_lock(node); 2379 2380 link = node->links[bearer_id].link; 2381 if (!link) { 2382 res = -EINVAL; 2383 goto out; 2384 } 2385 2386 if (attrs[TIPC_NLA_LINK_PROP]) { 2387 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2388 2389 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); 2390 if (err) { 2391 res = err; 2392 goto out; 2393 } 2394 2395 if (props[TIPC_NLA_PROP_TOL]) { 2396 u32 tol; 2397 2398 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2399 tipc_link_set_tolerance(link, tol, &xmitq); 2400 } 2401 if (props[TIPC_NLA_PROP_PRIO]) { 2402 u32 prio; 2403 2404 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2405 tipc_link_set_prio(link, prio, &xmitq); 2406 } 2407 if (props[TIPC_NLA_PROP_WIN]) { 2408 u32 max_win; 2409 2410 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2411 tipc_link_set_queue_limits(link, 2412 tipc_link_min_win(link), 2413 max_win); 2414 } 2415 } 2416 2417 out: 2418 tipc_node_read_unlock(node); 2419 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr, 2420 NULL); 2421 return res; 2422 } 2423 2424 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2425 { 2426 struct net *net = genl_info_net(info); 2427 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2428 struct tipc_nl_msg msg; 2429 char *name; 2430 int err; 2431 2432 msg.portid = info->snd_portid; 2433 msg.seq = info->snd_seq; 2434 2435 if (!info->attrs[TIPC_NLA_LINK]) 2436 return -EINVAL; 2437 2438 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2439 info->attrs[TIPC_NLA_LINK], 2440 tipc_nl_link_policy, info->extack); 2441 if (err) 2442 return err; 2443 2444 if (!attrs[TIPC_NLA_LINK_NAME]) 2445 return -EINVAL; 2446 2447 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2448 2449 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2450 if (!msg.skb) 2451 return -ENOMEM; 2452 2453 if (strcmp(name, tipc_bclink_name) == 0) { 2454 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl); 2455 if (err) 2456 goto err_free; 2457 } else { 2458 int bearer_id; 2459 struct tipc_node *node; 2460 struct tipc_link *link; 2461 2462 node = tipc_node_find_by_name(net, name, &bearer_id); 2463 if (!node) { 2464 err = -EINVAL; 2465 goto err_free; 2466 } 2467 2468 tipc_node_read_lock(node); 2469 link = node->links[bearer_id].link; 2470 if (!link) { 2471 tipc_node_read_unlock(node); 2472 err = -EINVAL; 2473 goto err_free; 2474 } 2475 2476 err = __tipc_nl_add_link(net, &msg, link, 0); 2477 tipc_node_read_unlock(node); 2478 if (err) 2479 goto err_free; 2480 } 2481 2482 return genlmsg_reply(msg.skb, info); 2483 2484 err_free: 2485 nlmsg_free(msg.skb); 2486 return err; 2487 } 2488 2489 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2490 { 2491 int err; 2492 char *link_name; 2493 unsigned int bearer_id; 2494 struct tipc_link *link; 2495 struct tipc_node *node; 2496 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2497 struct net *net = sock_net(skb->sk); 2498 struct tipc_net *tn = tipc_net(net); 2499 struct tipc_link_entry *le; 2500 2501 if (!info->attrs[TIPC_NLA_LINK]) 2502 return -EINVAL; 2503 2504 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2505 info->attrs[TIPC_NLA_LINK], 2506 tipc_nl_link_policy, info->extack); 2507 if (err) 2508 return err; 2509 2510 if (!attrs[TIPC_NLA_LINK_NAME]) 2511 return -EINVAL; 2512 2513 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2514 2515 err = -EINVAL; 2516 if (!strcmp(link_name, tipc_bclink_name)) { 2517 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net)); 2518 if (err) 2519 return err; 2520 return 0; 2521 } else if (strstr(link_name, tipc_bclink_name)) { 2522 rcu_read_lock(); 2523 list_for_each_entry_rcu(node, &tn->node_list, list) { 2524 tipc_node_read_lock(node); 2525 link = node->bc_entry.link; 2526 if (link && !strcmp(link_name, tipc_link_name(link))) { 2527 err = tipc_bclink_reset_stats(net, link); 2528 tipc_node_read_unlock(node); 2529 break; 2530 } 2531 tipc_node_read_unlock(node); 2532 } 2533 rcu_read_unlock(); 2534 return err; 2535 } 2536 2537 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2538 if (!node) 2539 return -EINVAL; 2540 2541 le = &node->links[bearer_id]; 2542 tipc_node_read_lock(node); 2543 spin_lock_bh(&le->lock); 2544 link = node->links[bearer_id].link; 2545 if (!link) { 2546 spin_unlock_bh(&le->lock); 2547 tipc_node_read_unlock(node); 2548 return -EINVAL; 2549 } 2550 tipc_link_reset_stats(link); 2551 spin_unlock_bh(&le->lock); 2552 tipc_node_read_unlock(node); 2553 return 0; 2554 } 2555 2556 /* Caller should hold node lock */ 2557 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2558 struct tipc_node *node, u32 *prev_link, 2559 bool bc_link) 2560 { 2561 u32 i; 2562 int err; 2563 2564 for (i = *prev_link; i < MAX_BEARERS; i++) { 2565 *prev_link = i; 2566 2567 if (!node->links[i].link) 2568 continue; 2569 2570 err = __tipc_nl_add_link(net, msg, 2571 node->links[i].link, NLM_F_MULTI); 2572 if (err) 2573 return err; 2574 } 2575 2576 if (bc_link) { 2577 *prev_link = i; 2578 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link); 2579 if (err) 2580 return err; 2581 } 2582 2583 *prev_link = 0; 2584 2585 return 0; 2586 } 2587 2588 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2589 { 2590 struct net *net = sock_net(skb->sk); 2591 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2592 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 2593 struct tipc_net *tn = net_generic(net, tipc_net_id); 2594 struct tipc_node *node; 2595 struct tipc_nl_msg msg; 2596 u32 prev_node = cb->args[0]; 2597 u32 prev_link = cb->args[1]; 2598 int done = cb->args[2]; 2599 bool bc_link = cb->args[3]; 2600 int err; 2601 2602 if (done) 2603 return 0; 2604 2605 if (!prev_node) { 2606 /* Check if broadcast-receiver links dumping is needed */ 2607 if (attrs && attrs[TIPC_NLA_LINK]) { 2608 err = nla_parse_nested_deprecated(link, 2609 TIPC_NLA_LINK_MAX, 2610 attrs[TIPC_NLA_LINK], 2611 tipc_nl_link_policy, 2612 NULL); 2613 if (unlikely(err)) 2614 return err; 2615 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST])) 2616 return -EINVAL; 2617 bc_link = true; 2618 } 2619 } 2620 2621 msg.skb = skb; 2622 msg.portid = NETLINK_CB(cb->skb).portid; 2623 msg.seq = cb->nlh->nlmsg_seq; 2624 2625 rcu_read_lock(); 2626 if (prev_node) { 2627 node = tipc_node_find(net, prev_node); 2628 if (!node) { 2629 /* We never set seq or call nl_dump_check_consistent() 2630 * this means that setting prev_seq here will cause the 2631 * consistence check to fail in the netlink callback 2632 * handler. Resulting in the last NLMSG_DONE message 2633 * having the NLM_F_DUMP_INTR flag set. 2634 */ 2635 cb->prev_seq = 1; 2636 goto out; 2637 } 2638 tipc_node_put(node); 2639 2640 list_for_each_entry_continue_rcu(node, &tn->node_list, 2641 list) { 2642 tipc_node_read_lock(node); 2643 err = __tipc_nl_add_node_links(net, &msg, node, 2644 &prev_link, bc_link); 2645 tipc_node_read_unlock(node); 2646 if (err) 2647 goto out; 2648 2649 prev_node = node->addr; 2650 } 2651 } else { 2652 err = tipc_nl_add_bc_link(net, &msg, tn->bcl); 2653 if (err) 2654 goto out; 2655 2656 list_for_each_entry_rcu(node, &tn->node_list, list) { 2657 tipc_node_read_lock(node); 2658 err = __tipc_nl_add_node_links(net, &msg, node, 2659 &prev_link, bc_link); 2660 tipc_node_read_unlock(node); 2661 if (err) 2662 goto out; 2663 2664 prev_node = node->addr; 2665 } 2666 } 2667 done = 1; 2668 out: 2669 rcu_read_unlock(); 2670 2671 cb->args[0] = prev_node; 2672 cb->args[1] = prev_link; 2673 cb->args[2] = done; 2674 cb->args[3] = bc_link; 2675 2676 return skb->len; 2677 } 2678 2679 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2680 { 2681 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2682 struct net *net = sock_net(skb->sk); 2683 int err; 2684 2685 if (!info->attrs[TIPC_NLA_MON]) 2686 return -EINVAL; 2687 2688 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2689 info->attrs[TIPC_NLA_MON], 2690 tipc_nl_monitor_policy, 2691 info->extack); 2692 if (err) 2693 return err; 2694 2695 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2696 u32 val; 2697 2698 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2699 err = tipc_nl_monitor_set_threshold(net, val); 2700 if (err) 2701 return err; 2702 } 2703 2704 return 0; 2705 } 2706 2707 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2708 { 2709 struct nlattr *attrs; 2710 void *hdr; 2711 u32 val; 2712 2713 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2714 0, TIPC_NL_MON_GET); 2715 if (!hdr) 2716 return -EMSGSIZE; 2717 2718 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2719 if (!attrs) 2720 goto msg_full; 2721 2722 val = tipc_nl_monitor_get_threshold(net); 2723 2724 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2725 goto attr_msg_full; 2726 2727 nla_nest_end(msg->skb, attrs); 2728 genlmsg_end(msg->skb, hdr); 2729 2730 return 0; 2731 2732 attr_msg_full: 2733 nla_nest_cancel(msg->skb, attrs); 2734 msg_full: 2735 genlmsg_cancel(msg->skb, hdr); 2736 2737 return -EMSGSIZE; 2738 } 2739 2740 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2741 { 2742 struct net *net = sock_net(skb->sk); 2743 struct tipc_nl_msg msg; 2744 int err; 2745 2746 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2747 if (!msg.skb) 2748 return -ENOMEM; 2749 msg.portid = info->snd_portid; 2750 msg.seq = info->snd_seq; 2751 2752 err = __tipc_nl_add_monitor_prop(net, &msg); 2753 if (err) { 2754 nlmsg_free(msg.skb); 2755 return err; 2756 } 2757 2758 return genlmsg_reply(msg.skb, info); 2759 } 2760 2761 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2762 { 2763 struct net *net = sock_net(skb->sk); 2764 u32 prev_bearer = cb->args[0]; 2765 struct tipc_nl_msg msg; 2766 int bearer_id; 2767 int err; 2768 2769 if (prev_bearer == MAX_BEARERS) 2770 return 0; 2771 2772 msg.skb = skb; 2773 msg.portid = NETLINK_CB(cb->skb).portid; 2774 msg.seq = cb->nlh->nlmsg_seq; 2775 2776 rtnl_lock(); 2777 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2778 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2779 if (err) 2780 break; 2781 } 2782 rtnl_unlock(); 2783 cb->args[0] = bearer_id; 2784 2785 return skb->len; 2786 } 2787 2788 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2789 struct netlink_callback *cb) 2790 { 2791 struct net *net = sock_net(skb->sk); 2792 u32 prev_node = cb->args[1]; 2793 u32 bearer_id = cb->args[2]; 2794 int done = cb->args[0]; 2795 struct tipc_nl_msg msg; 2796 int err; 2797 2798 if (!prev_node) { 2799 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2800 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2801 2802 if (!attrs[TIPC_NLA_MON]) 2803 return -EINVAL; 2804 2805 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2806 attrs[TIPC_NLA_MON], 2807 tipc_nl_monitor_policy, 2808 NULL); 2809 if (err) 2810 return err; 2811 2812 if (!mon[TIPC_NLA_MON_REF]) 2813 return -EINVAL; 2814 2815 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2816 2817 if (bearer_id >= MAX_BEARERS) 2818 return -EINVAL; 2819 } 2820 2821 if (done) 2822 return 0; 2823 2824 msg.skb = skb; 2825 msg.portid = NETLINK_CB(cb->skb).portid; 2826 msg.seq = cb->nlh->nlmsg_seq; 2827 2828 rtnl_lock(); 2829 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2830 if (!err) 2831 done = 1; 2832 2833 rtnl_unlock(); 2834 cb->args[0] = done; 2835 cb->args[1] = prev_node; 2836 cb->args[2] = bearer_id; 2837 2838 return skb->len; 2839 } 2840 2841 #ifdef CONFIG_TIPC_CRYPTO 2842 static int tipc_nl_retrieve_key(struct nlattr **attrs, 2843 struct tipc_aead_key **key) 2844 { 2845 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; 2846 2847 if (!attr) 2848 return -ENODATA; 2849 2850 *key = (struct tipc_aead_key *)nla_data(attr); 2851 if (nla_len(attr) < tipc_aead_key_size(*key)) 2852 return -EINVAL; 2853 2854 return 0; 2855 } 2856 2857 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id) 2858 { 2859 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID]; 2860 2861 if (!attr) 2862 return -ENODATA; 2863 2864 if (nla_len(attr) < TIPC_NODEID_LEN) 2865 return -EINVAL; 2866 2867 *node_id = (u8 *)nla_data(attr); 2868 return 0; 2869 } 2870 2871 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2872 { 2873 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1]; 2874 struct net *net = sock_net(skb->sk); 2875 struct tipc_net *tn = tipc_net(net); 2876 struct tipc_node *n = NULL; 2877 struct tipc_aead_key *ukey; 2878 struct tipc_crypto *c; 2879 u8 *id, *own_id; 2880 int rc = 0; 2881 2882 if (!info->attrs[TIPC_NLA_NODE]) 2883 return -EINVAL; 2884 2885 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX, 2886 info->attrs[TIPC_NLA_NODE], 2887 tipc_nl_node_policy, info->extack); 2888 if (rc) 2889 goto exit; 2890 2891 own_id = tipc_own_id(net); 2892 if (!own_id) { 2893 rc = -EPERM; 2894 goto exit; 2895 } 2896 2897 rc = tipc_nl_retrieve_key(attrs, &ukey); 2898 if (rc) 2899 goto exit; 2900 2901 rc = tipc_aead_key_validate(ukey); 2902 if (rc) 2903 goto exit; 2904 2905 rc = tipc_nl_retrieve_nodeid(attrs, &id); 2906 switch (rc) { 2907 case -ENODATA: 2908 /* Cluster key mode */ 2909 rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY); 2910 break; 2911 case 0: 2912 /* Per-node key mode */ 2913 if (!memcmp(id, own_id, NODE_ID_LEN)) { 2914 c = tn->crypto_tx; 2915 } else { 2916 n = tipc_node_find_by_id(net, id) ?: 2917 tipc_node_create(net, 0, id, 0xffffu, 0, true); 2918 if (unlikely(!n)) { 2919 rc = -ENOMEM; 2920 break; 2921 } 2922 c = n->crypto_rx; 2923 } 2924 2925 rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY); 2926 if (n) 2927 tipc_node_put(n); 2928 break; 2929 default: 2930 break; 2931 } 2932 2933 exit: 2934 return (rc < 0) ? rc : 0; 2935 } 2936 2937 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2938 { 2939 int err; 2940 2941 rtnl_lock(); 2942 err = __tipc_nl_node_set_key(skb, info); 2943 rtnl_unlock(); 2944 2945 return err; 2946 } 2947 2948 static int __tipc_nl_node_flush_key(struct sk_buff *skb, 2949 struct genl_info *info) 2950 { 2951 struct net *net = sock_net(skb->sk); 2952 struct tipc_net *tn = tipc_net(net); 2953 struct tipc_node *n; 2954 2955 tipc_crypto_key_flush(tn->crypto_tx); 2956 rcu_read_lock(); 2957 list_for_each_entry_rcu(n, &tn->node_list, list) 2958 tipc_crypto_key_flush(n->crypto_rx); 2959 rcu_read_unlock(); 2960 2961 pr_info("All keys are flushed!\n"); 2962 return 0; 2963 } 2964 2965 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info) 2966 { 2967 int err; 2968 2969 rtnl_lock(); 2970 err = __tipc_nl_node_flush_key(skb, info); 2971 rtnl_unlock(); 2972 2973 return err; 2974 } 2975 #endif 2976 2977 /** 2978 * tipc_node_dump - dump TIPC node data 2979 * @n: tipc node to be dumped 2980 * @more: dump more? 2981 * - false: dump only tipc node data 2982 * - true: dump node link data as well 2983 * @buf: returned buffer of dump data in format 2984 */ 2985 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 2986 { 2987 int i = 0; 2988 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 2989 2990 if (!n) { 2991 i += scnprintf(buf, sz, "node data: (null)\n"); 2992 return i; 2993 } 2994 2995 i += scnprintf(buf, sz, "node data: %x", n->addr); 2996 i += scnprintf(buf + i, sz - i, " %x", n->state); 2997 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 2998 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 2999 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 3000 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 3001 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 3002 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 3003 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 3004 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 3005 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 3006 3007 if (!more) 3008 return i; 3009 3010 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 3011 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 3012 i += scnprintf(buf + i, sz - i, " media: "); 3013 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 3014 i += scnprintf(buf + i, sz - i, "\n"); 3015 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 3016 i += scnprintf(buf + i, sz - i, " inputq: "); 3017 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 3018 3019 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 3020 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 3021 i += scnprintf(buf + i, sz - i, " media: "); 3022 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 3023 i += scnprintf(buf + i, sz - i, "\n"); 3024 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 3025 i += scnprintf(buf + i, sz - i, " inputq: "); 3026 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 3027 3028 i += scnprintf(buf + i, sz - i, "bclink:\n "); 3029 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 3030 3031 return i; 3032 } 3033 3034 void tipc_node_pre_cleanup_net(struct net *exit_net) 3035 { 3036 struct tipc_node *n; 3037 struct tipc_net *tn; 3038 struct net *tmp; 3039 3040 rcu_read_lock(); 3041 for_each_net_rcu(tmp) { 3042 if (tmp == exit_net) 3043 continue; 3044 tn = tipc_net(tmp); 3045 if (!tn) 3046 continue; 3047 spin_lock_bh(&tn->node_list_lock); 3048 list_for_each_entry_rcu(n, &tn->node_list, list) { 3049 if (!n->peer_net) 3050 continue; 3051 if (n->peer_net != exit_net) 3052 continue; 3053 tipc_node_write_lock(n); 3054 n->peer_net = NULL; 3055 n->peer_hash_mix = 0; 3056 tipc_node_write_unlock_fast(n); 3057 break; 3058 } 3059 spin_unlock_bh(&tn->node_list_lock); 3060 } 3061 rcu_read_unlock(); 3062 } 3063