1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 47 #define INVALID_NODE_SIG 0x10000 48 #define NODE_CLEANUP_AFTER 300000 49 50 /* Flags used to take different actions according to flag type 51 * TIPC_NOTIFY_NODE_DOWN: notify node is down 52 * TIPC_NOTIFY_NODE_UP: notify node is up 53 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 54 */ 55 enum { 56 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 57 TIPC_NOTIFY_NODE_UP = (1 << 4), 58 TIPC_NOTIFY_LINK_UP = (1 << 6), 59 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 60 }; 61 62 struct tipc_link_entry { 63 struct tipc_link *link; 64 spinlock_t lock; /* per link */ 65 u32 mtu; 66 struct sk_buff_head inputq; 67 struct tipc_media_addr maddr; 68 }; 69 70 struct tipc_bclink_entry { 71 struct tipc_link *link; 72 struct sk_buff_head inputq1; 73 struct sk_buff_head arrvq; 74 struct sk_buff_head inputq2; 75 struct sk_buff_head namedq; 76 }; 77 78 /** 79 * struct tipc_node - TIPC node structure 80 * @addr: network address of node 81 * @ref: reference counter to node object 82 * @lock: rwlock governing access to structure 83 * @net: the applicable net namespace 84 * @hash: links to adjacent nodes in unsorted hash chain 85 * @inputq: pointer to input queue containing messages for msg event 86 * @namedq: pointer to name table input queue with name table messages 87 * @active_links: bearer ids of active links, used as index into links[] array 88 * @links: array containing references to all links to node 89 * @action_flags: bit mask of different types of node actions 90 * @state: connectivity state vs peer node 91 * @sync_point: sequence number where synch/failover is finished 92 * @list: links to adjacent nodes in sorted list of cluster's nodes 93 * @working_links: number of working links to node (both active and standby) 94 * @link_cnt: number of links to node 95 * @capabilities: bitmap, indicating peer node's functional capabilities 96 * @signature: node instance identifier 97 * @link_id: local and remote bearer ids of changing link, if any 98 * @publ_list: list of publications 99 * @rcu: rcu struct for tipc_node 100 * @delete_at: indicates the time for deleting a down node 101 */ 102 struct tipc_node { 103 u32 addr; 104 struct kref kref; 105 rwlock_t lock; 106 struct net *net; 107 struct hlist_node hash; 108 int active_links[2]; 109 struct tipc_link_entry links[MAX_BEARERS]; 110 struct tipc_bclink_entry bc_entry; 111 int action_flags; 112 struct list_head list; 113 int state; 114 bool failover_sent; 115 u16 sync_point; 116 int link_cnt; 117 u16 working_links; 118 u16 capabilities; 119 u32 signature; 120 u32 link_id; 121 u8 peer_id[16]; 122 struct list_head publ_list; 123 struct list_head conn_sks; 124 unsigned long keepalive_intv; 125 struct timer_list timer; 126 struct rcu_head rcu; 127 unsigned long delete_at; 128 }; 129 130 /* Node FSM states and events: 131 */ 132 enum { 133 SELF_DOWN_PEER_DOWN = 0xdd, 134 SELF_UP_PEER_UP = 0xaa, 135 SELF_DOWN_PEER_LEAVING = 0xd1, 136 SELF_UP_PEER_COMING = 0xac, 137 SELF_COMING_PEER_UP = 0xca, 138 SELF_LEAVING_PEER_DOWN = 0x1d, 139 NODE_FAILINGOVER = 0xf0, 140 NODE_SYNCHING = 0xcc 141 }; 142 143 enum { 144 SELF_ESTABL_CONTACT_EVT = 0xece, 145 SELF_LOST_CONTACT_EVT = 0x1ce, 146 PEER_ESTABL_CONTACT_EVT = 0x9ece, 147 PEER_LOST_CONTACT_EVT = 0x91ce, 148 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 149 NODE_FAILOVER_END_EVT = 0xfee, 150 NODE_SYNCH_BEGIN_EVT = 0xcbe, 151 NODE_SYNCH_END_EVT = 0xcee 152 }; 153 154 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 155 struct sk_buff_head *xmitq, 156 struct tipc_media_addr **maddr); 157 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 158 bool delete); 159 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 160 static void tipc_node_delete(struct tipc_node *node); 161 static void tipc_node_timeout(struct timer_list *t); 162 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 163 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 164 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 165 static void tipc_node_put(struct tipc_node *node); 166 static bool node_is_up(struct tipc_node *n); 167 static void tipc_node_delete_from_list(struct tipc_node *node); 168 169 struct tipc_sock_conn { 170 u32 port; 171 u32 peer_port; 172 u32 peer_node; 173 struct list_head list; 174 }; 175 176 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 177 { 178 int bearer_id = n->active_links[sel & 1]; 179 180 if (unlikely(bearer_id == INVALID_BEARER_ID)) 181 return NULL; 182 183 return n->links[bearer_id].link; 184 } 185 186 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) 187 { 188 struct tipc_node *n; 189 int bearer_id; 190 unsigned int mtu = MAX_MSG_SIZE; 191 192 n = tipc_node_find(net, addr); 193 if (unlikely(!n)) 194 return mtu; 195 196 bearer_id = n->active_links[sel & 1]; 197 if (likely(bearer_id != INVALID_BEARER_ID)) 198 mtu = n->links[bearer_id].mtu; 199 tipc_node_put(n); 200 return mtu; 201 } 202 203 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 204 { 205 u8 *own_id = tipc_own_id(net); 206 struct tipc_node *n; 207 208 if (!own_id) 209 return true; 210 211 if (addr == tipc_own_addr(net)) { 212 memcpy(id, own_id, TIPC_NODEID_LEN); 213 return true; 214 } 215 n = tipc_node_find(net, addr); 216 if (!n) 217 return false; 218 219 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 220 tipc_node_put(n); 221 return true; 222 } 223 224 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 225 { 226 struct tipc_node *n; 227 u16 caps; 228 229 n = tipc_node_find(net, addr); 230 if (unlikely(!n)) 231 return TIPC_NODE_CAPABILITIES; 232 caps = n->capabilities; 233 tipc_node_put(n); 234 return caps; 235 } 236 237 static void tipc_node_kref_release(struct kref *kref) 238 { 239 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 240 241 kfree(n->bc_entry.link); 242 kfree_rcu(n, rcu); 243 } 244 245 static void tipc_node_put(struct tipc_node *node) 246 { 247 kref_put(&node->kref, tipc_node_kref_release); 248 } 249 250 static void tipc_node_get(struct tipc_node *node) 251 { 252 kref_get(&node->kref); 253 } 254 255 /* 256 * tipc_node_find - locate specified node object, if it exists 257 */ 258 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 259 { 260 struct tipc_net *tn = tipc_net(net); 261 struct tipc_node *node; 262 unsigned int thash = tipc_hashfn(addr); 263 264 rcu_read_lock(); 265 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 266 if (node->addr != addr) 267 continue; 268 if (!kref_get_unless_zero(&node->kref)) 269 node = NULL; 270 break; 271 } 272 rcu_read_unlock(); 273 return node; 274 } 275 276 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 277 * Note: this function is called only when a discovery request failed 278 * to find the node by its 32-bit id, and is not time critical 279 */ 280 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 281 { 282 struct tipc_net *tn = tipc_net(net); 283 struct tipc_node *n; 284 bool found = false; 285 286 rcu_read_lock(); 287 list_for_each_entry_rcu(n, &tn->node_list, list) { 288 read_lock_bh(&n->lock); 289 if (!memcmp(id, n->peer_id, 16) && 290 kref_get_unless_zero(&n->kref)) 291 found = true; 292 read_unlock_bh(&n->lock); 293 if (found) 294 break; 295 } 296 rcu_read_unlock(); 297 return found ? n : NULL; 298 } 299 300 static void tipc_node_read_lock(struct tipc_node *n) 301 { 302 read_lock_bh(&n->lock); 303 } 304 305 static void tipc_node_read_unlock(struct tipc_node *n) 306 { 307 read_unlock_bh(&n->lock); 308 } 309 310 static void tipc_node_write_lock(struct tipc_node *n) 311 { 312 write_lock_bh(&n->lock); 313 } 314 315 static void tipc_node_write_unlock_fast(struct tipc_node *n) 316 { 317 write_unlock_bh(&n->lock); 318 } 319 320 static void tipc_node_write_unlock(struct tipc_node *n) 321 { 322 struct net *net = n->net; 323 u32 addr = 0; 324 u32 flags = n->action_flags; 325 u32 link_id = 0; 326 u32 bearer_id; 327 struct list_head *publ_list; 328 329 if (likely(!flags)) { 330 write_unlock_bh(&n->lock); 331 return; 332 } 333 334 addr = n->addr; 335 link_id = n->link_id; 336 bearer_id = link_id & 0xffff; 337 publ_list = &n->publ_list; 338 339 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 340 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 341 342 write_unlock_bh(&n->lock); 343 344 if (flags & TIPC_NOTIFY_NODE_DOWN) 345 tipc_publ_notify(net, publ_list, addr); 346 347 if (flags & TIPC_NOTIFY_NODE_UP) 348 tipc_named_node_up(net, addr); 349 350 if (flags & TIPC_NOTIFY_LINK_UP) { 351 tipc_mon_peer_up(net, addr, bearer_id); 352 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 353 TIPC_NODE_SCOPE, link_id, link_id); 354 } 355 if (flags & TIPC_NOTIFY_LINK_DOWN) { 356 tipc_mon_peer_down(net, addr, bearer_id); 357 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 358 addr, link_id); 359 } 360 } 361 362 static struct tipc_node *tipc_node_create(struct net *net, u32 addr, 363 u8 *peer_id, u16 capabilities) 364 { 365 struct tipc_net *tn = net_generic(net, tipc_net_id); 366 struct tipc_node *n, *temp_node; 367 struct tipc_link *l; 368 int bearer_id; 369 int i; 370 371 spin_lock_bh(&tn->node_list_lock); 372 n = tipc_node_find(net, addr); 373 if (n) { 374 if (n->capabilities == capabilities) 375 goto exit; 376 /* Same node may come back with new capabilities */ 377 write_lock_bh(&n->lock); 378 n->capabilities = capabilities; 379 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 380 l = n->links[bearer_id].link; 381 if (l) 382 tipc_link_update_caps(l, capabilities); 383 } 384 write_unlock_bh(&n->lock); 385 goto exit; 386 } 387 n = kzalloc(sizeof(*n), GFP_ATOMIC); 388 if (!n) { 389 pr_warn("Node creation failed, no memory\n"); 390 goto exit; 391 } 392 n->addr = addr; 393 memcpy(&n->peer_id, peer_id, 16); 394 n->net = net; 395 n->capabilities = capabilities; 396 kref_init(&n->kref); 397 rwlock_init(&n->lock); 398 INIT_HLIST_NODE(&n->hash); 399 INIT_LIST_HEAD(&n->list); 400 INIT_LIST_HEAD(&n->publ_list); 401 INIT_LIST_HEAD(&n->conn_sks); 402 skb_queue_head_init(&n->bc_entry.namedq); 403 skb_queue_head_init(&n->bc_entry.inputq1); 404 __skb_queue_head_init(&n->bc_entry.arrvq); 405 skb_queue_head_init(&n->bc_entry.inputq2); 406 for (i = 0; i < MAX_BEARERS; i++) 407 spin_lock_init(&n->links[i].lock); 408 n->state = SELF_DOWN_PEER_LEAVING; 409 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 410 n->signature = INVALID_NODE_SIG; 411 n->active_links[0] = INVALID_BEARER_ID; 412 n->active_links[1] = INVALID_BEARER_ID; 413 if (!tipc_link_bc_create(net, tipc_own_addr(net), 414 addr, U16_MAX, 415 tipc_link_window(tipc_bc_sndlink(net)), 416 n->capabilities, 417 &n->bc_entry.inputq1, 418 &n->bc_entry.namedq, 419 tipc_bc_sndlink(net), 420 &n->bc_entry.link)) { 421 pr_warn("Broadcast rcv link creation failed, no memory\n"); 422 kfree(n); 423 n = NULL; 424 goto exit; 425 } 426 tipc_node_get(n); 427 timer_setup(&n->timer, tipc_node_timeout, 0); 428 n->keepalive_intv = U32_MAX; 429 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 431 if (n->addr < temp_node->addr) 432 break; 433 } 434 list_add_tail_rcu(&n->list, &temp_node->list); 435 exit: 436 spin_unlock_bh(&tn->node_list_lock); 437 return n; 438 } 439 440 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 441 { 442 unsigned long tol = tipc_link_tolerance(l); 443 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 444 445 /* Link with lowest tolerance determines timer interval */ 446 if (intv < n->keepalive_intv) 447 n->keepalive_intv = intv; 448 449 /* Ensure link's abort limit corresponds to current tolerance */ 450 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 451 } 452 453 static void tipc_node_delete_from_list(struct tipc_node *node) 454 { 455 list_del_rcu(&node->list); 456 hlist_del_rcu(&node->hash); 457 tipc_node_put(node); 458 } 459 460 static void tipc_node_delete(struct tipc_node *node) 461 { 462 tipc_node_delete_from_list(node); 463 464 del_timer_sync(&node->timer); 465 tipc_node_put(node); 466 } 467 468 void tipc_node_stop(struct net *net) 469 { 470 struct tipc_net *tn = tipc_net(net); 471 struct tipc_node *node, *t_node; 472 473 spin_lock_bh(&tn->node_list_lock); 474 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 475 tipc_node_delete(node); 476 spin_unlock_bh(&tn->node_list_lock); 477 } 478 479 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 480 { 481 struct tipc_node *n; 482 483 if (in_own_node(net, addr)) 484 return; 485 486 n = tipc_node_find(net, addr); 487 if (!n) { 488 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 489 return; 490 } 491 tipc_node_write_lock(n); 492 list_add_tail(subscr, &n->publ_list); 493 tipc_node_write_unlock_fast(n); 494 tipc_node_put(n); 495 } 496 497 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 498 { 499 struct tipc_node *n; 500 501 if (in_own_node(net, addr)) 502 return; 503 504 n = tipc_node_find(net, addr); 505 if (!n) { 506 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 507 return; 508 } 509 tipc_node_write_lock(n); 510 list_del_init(subscr); 511 tipc_node_write_unlock_fast(n); 512 tipc_node_put(n); 513 } 514 515 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 516 { 517 struct tipc_node *node; 518 struct tipc_sock_conn *conn; 519 int err = 0; 520 521 if (in_own_node(net, dnode)) 522 return 0; 523 524 node = tipc_node_find(net, dnode); 525 if (!node) { 526 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 527 return -EHOSTUNREACH; 528 } 529 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 530 if (!conn) { 531 err = -EHOSTUNREACH; 532 goto exit; 533 } 534 conn->peer_node = dnode; 535 conn->port = port; 536 conn->peer_port = peer_port; 537 538 tipc_node_write_lock(node); 539 list_add_tail(&conn->list, &node->conn_sks); 540 tipc_node_write_unlock(node); 541 exit: 542 tipc_node_put(node); 543 return err; 544 } 545 546 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 547 { 548 struct tipc_node *node; 549 struct tipc_sock_conn *conn, *safe; 550 551 if (in_own_node(net, dnode)) 552 return; 553 554 node = tipc_node_find(net, dnode); 555 if (!node) 556 return; 557 558 tipc_node_write_lock(node); 559 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 560 if (port != conn->port) 561 continue; 562 list_del(&conn->list); 563 kfree(conn); 564 } 565 tipc_node_write_unlock(node); 566 tipc_node_put(node); 567 } 568 569 static void tipc_node_clear_links(struct tipc_node *node) 570 { 571 int i; 572 573 for (i = 0; i < MAX_BEARERS; i++) { 574 struct tipc_link_entry *le = &node->links[i]; 575 576 if (le->link) { 577 kfree(le->link); 578 le->link = NULL; 579 node->link_cnt--; 580 } 581 } 582 } 583 584 /* tipc_node_cleanup - delete nodes that does not 585 * have active links for NODE_CLEANUP_AFTER time 586 */ 587 static bool tipc_node_cleanup(struct tipc_node *peer) 588 { 589 struct tipc_net *tn = tipc_net(peer->net); 590 bool deleted = false; 591 592 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 593 if (!spin_trylock_bh(&tn->node_list_lock)) 594 return false; 595 596 tipc_node_write_lock(peer); 597 598 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 599 tipc_node_clear_links(peer); 600 tipc_node_delete_from_list(peer); 601 deleted = true; 602 } 603 tipc_node_write_unlock(peer); 604 spin_unlock_bh(&tn->node_list_lock); 605 return deleted; 606 } 607 608 /* tipc_node_timeout - handle expiration of node timer 609 */ 610 static void tipc_node_timeout(struct timer_list *t) 611 { 612 struct tipc_node *n = from_timer(n, t, timer); 613 struct tipc_link_entry *le; 614 struct sk_buff_head xmitq; 615 int remains = n->link_cnt; 616 int bearer_id; 617 int rc = 0; 618 619 if (!node_is_up(n) && tipc_node_cleanup(n)) { 620 /*Removing the reference of Timer*/ 621 tipc_node_put(n); 622 return; 623 } 624 625 __skb_queue_head_init(&xmitq); 626 627 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 628 tipc_node_read_lock(n); 629 le = &n->links[bearer_id]; 630 if (le->link) { 631 spin_lock_bh(&le->lock); 632 /* Link tolerance may change asynchronously: */ 633 tipc_node_calculate_timer(n, le->link); 634 rc = tipc_link_timeout(le->link, &xmitq); 635 spin_unlock_bh(&le->lock); 636 remains--; 637 } 638 tipc_node_read_unlock(n); 639 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 640 if (rc & TIPC_LINK_DOWN_EVT) 641 tipc_node_link_down(n, bearer_id, false); 642 } 643 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 644 } 645 646 /** 647 * __tipc_node_link_up - handle addition of link 648 * Node lock must be held by caller 649 * Link becomes active (alone or shared) or standby, depending on its priority. 650 */ 651 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 652 struct sk_buff_head *xmitq) 653 { 654 int *slot0 = &n->active_links[0]; 655 int *slot1 = &n->active_links[1]; 656 struct tipc_link *ol = node_active_link(n, 0); 657 struct tipc_link *nl = n->links[bearer_id].link; 658 659 if (!nl || tipc_link_is_up(nl)) 660 return; 661 662 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 663 if (!tipc_link_is_up(nl)) 664 return; 665 666 n->working_links++; 667 n->action_flags |= TIPC_NOTIFY_LINK_UP; 668 n->link_id = tipc_link_id(nl); 669 670 /* Leave room for tunnel header when returning 'mtu' to users: */ 671 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 672 673 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 674 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 675 676 pr_debug("Established link <%s> on network plane %c\n", 677 tipc_link_name(nl), tipc_link_plane(nl)); 678 679 /* Ensure that a STATE message goes first */ 680 tipc_link_build_state_msg(nl, xmitq); 681 682 /* First link? => give it both slots */ 683 if (!ol) { 684 *slot0 = bearer_id; 685 *slot1 = bearer_id; 686 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 687 n->failover_sent = false; 688 n->action_flags |= TIPC_NOTIFY_NODE_UP; 689 tipc_link_set_active(nl, true); 690 tipc_bcast_add_peer(n->net, nl, xmitq); 691 return; 692 } 693 694 /* Second link => redistribute slots */ 695 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 696 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 697 *slot0 = bearer_id; 698 *slot1 = bearer_id; 699 tipc_link_set_active(nl, true); 700 tipc_link_set_active(ol, false); 701 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 702 tipc_link_set_active(nl, true); 703 *slot1 = bearer_id; 704 } else { 705 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 706 } 707 708 /* Prepare synchronization with first link */ 709 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 710 } 711 712 /** 713 * tipc_node_link_up - handle addition of link 714 * 715 * Link becomes active (alone or shared) or standby, depending on its priority. 716 */ 717 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 718 struct sk_buff_head *xmitq) 719 { 720 struct tipc_media_addr *maddr; 721 722 tipc_node_write_lock(n); 723 __tipc_node_link_up(n, bearer_id, xmitq); 724 maddr = &n->links[bearer_id].maddr; 725 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); 726 tipc_node_write_unlock(n); 727 } 728 729 /** 730 * __tipc_node_link_down - handle loss of link 731 */ 732 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 733 struct sk_buff_head *xmitq, 734 struct tipc_media_addr **maddr) 735 { 736 struct tipc_link_entry *le = &n->links[*bearer_id]; 737 int *slot0 = &n->active_links[0]; 738 int *slot1 = &n->active_links[1]; 739 int i, highest = 0, prio; 740 struct tipc_link *l, *_l, *tnl; 741 742 l = n->links[*bearer_id].link; 743 if (!l || tipc_link_is_reset(l)) 744 return; 745 746 n->working_links--; 747 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 748 n->link_id = tipc_link_id(l); 749 750 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 751 752 pr_debug("Lost link <%s> on network plane %c\n", 753 tipc_link_name(l), tipc_link_plane(l)); 754 755 /* Select new active link if any available */ 756 *slot0 = INVALID_BEARER_ID; 757 *slot1 = INVALID_BEARER_ID; 758 for (i = 0; i < MAX_BEARERS; i++) { 759 _l = n->links[i].link; 760 if (!_l || !tipc_link_is_up(_l)) 761 continue; 762 if (_l == l) 763 continue; 764 prio = tipc_link_prio(_l); 765 if (prio < highest) 766 continue; 767 if (prio > highest) { 768 highest = prio; 769 *slot0 = i; 770 *slot1 = i; 771 continue; 772 } 773 *slot1 = i; 774 } 775 776 if (!node_is_up(n)) { 777 if (tipc_link_peer_is_down(l)) 778 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 779 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 780 tipc_link_fsm_evt(l, LINK_RESET_EVT); 781 tipc_link_reset(l); 782 tipc_link_build_reset_msg(l, xmitq); 783 *maddr = &n->links[*bearer_id].maddr; 784 node_lost_contact(n, &le->inputq); 785 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 786 return; 787 } 788 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 789 790 /* There is still a working link => initiate failover */ 791 *bearer_id = n->active_links[0]; 792 tnl = n->links[*bearer_id].link; 793 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 794 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 795 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 796 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 797 tipc_link_reset(l); 798 tipc_link_fsm_evt(l, LINK_RESET_EVT); 799 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 800 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 801 *maddr = &n->links[*bearer_id].maddr; 802 } 803 804 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 805 { 806 struct tipc_link_entry *le = &n->links[bearer_id]; 807 struct tipc_link *l = le->link; 808 struct tipc_media_addr *maddr; 809 struct sk_buff_head xmitq; 810 int old_bearer_id = bearer_id; 811 812 if (!l) 813 return; 814 815 __skb_queue_head_init(&xmitq); 816 817 tipc_node_write_lock(n); 818 if (!tipc_link_is_establishing(l)) { 819 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 820 if (delete) { 821 kfree(l); 822 le->link = NULL; 823 n->link_cnt--; 824 } 825 } else { 826 /* Defuse pending tipc_node_link_up() */ 827 tipc_link_fsm_evt(l, LINK_RESET_EVT); 828 } 829 tipc_node_write_unlock(n); 830 if (delete) 831 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 832 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 833 tipc_sk_rcv(n->net, &le->inputq); 834 } 835 836 static bool node_is_up(struct tipc_node *n) 837 { 838 return n->active_links[0] != INVALID_BEARER_ID; 839 } 840 841 bool tipc_node_is_up(struct net *net, u32 addr) 842 { 843 struct tipc_node *n; 844 bool retval = false; 845 846 if (in_own_node(net, addr)) 847 return true; 848 849 n = tipc_node_find(net, addr); 850 if (!n) 851 return false; 852 retval = node_is_up(n); 853 tipc_node_put(n); 854 return retval; 855 } 856 857 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 858 { 859 struct tipc_node *n; 860 861 addr ^= tipc_net(net)->random; 862 while ((n = tipc_node_find(net, addr))) { 863 tipc_node_put(n); 864 addr++; 865 } 866 return addr; 867 } 868 869 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 870 * Returns suggested address if any, otherwise 0 871 */ 872 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 873 { 874 struct tipc_net *tn = tipc_net(net); 875 struct tipc_node *n; 876 877 /* Suggest new address if some other peer is using this one */ 878 n = tipc_node_find(net, addr); 879 if (n) { 880 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 881 addr = 0; 882 tipc_node_put(n); 883 if (!addr) 884 return 0; 885 return tipc_node_suggest_addr(net, addr); 886 } 887 888 /* Suggest previously used address if peer is known */ 889 n = tipc_node_find_by_id(net, id); 890 if (n) { 891 addr = n->addr; 892 tipc_node_put(n); 893 return addr; 894 } 895 896 /* Even this node may be in conflict */ 897 if (tn->trial_addr == addr) 898 return tipc_node_suggest_addr(net, addr); 899 900 return 0; 901 } 902 903 void tipc_node_check_dest(struct net *net, u32 addr, 904 u8 *peer_id, struct tipc_bearer *b, 905 u16 capabilities, u32 signature, 906 struct tipc_media_addr *maddr, 907 bool *respond, bool *dupl_addr) 908 { 909 struct tipc_node *n; 910 struct tipc_link *l; 911 struct tipc_link_entry *le; 912 bool addr_match = false; 913 bool sign_match = false; 914 bool link_up = false; 915 bool accept_addr = false; 916 bool reset = true; 917 char *if_name; 918 unsigned long intv; 919 u16 session; 920 921 *dupl_addr = false; 922 *respond = false; 923 924 n = tipc_node_create(net, addr, peer_id, capabilities); 925 if (!n) 926 return; 927 928 tipc_node_write_lock(n); 929 930 le = &n->links[b->identity]; 931 932 /* Prepare to validate requesting node's signature and media address */ 933 l = le->link; 934 link_up = l && tipc_link_is_up(l); 935 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 936 sign_match = (signature == n->signature); 937 938 /* These three flags give us eight permutations: */ 939 940 if (sign_match && addr_match && link_up) { 941 /* All is fine. Do nothing. */ 942 reset = false; 943 } else if (sign_match && addr_match && !link_up) { 944 /* Respond. The link will come up in due time */ 945 *respond = true; 946 } else if (sign_match && !addr_match && link_up) { 947 /* Peer has changed i/f address without rebooting. 948 * If so, the link will reset soon, and the next 949 * discovery will be accepted. So we can ignore it. 950 * It may also be an cloned or malicious peer having 951 * chosen the same node address and signature as an 952 * existing one. 953 * Ignore requests until the link goes down, if ever. 954 */ 955 *dupl_addr = true; 956 } else if (sign_match && !addr_match && !link_up) { 957 /* Peer link has changed i/f address without rebooting. 958 * It may also be a cloned or malicious peer; we can't 959 * distinguish between the two. 960 * The signature is correct, so we must accept. 961 */ 962 accept_addr = true; 963 *respond = true; 964 } else if (!sign_match && addr_match && link_up) { 965 /* Peer node rebooted. Two possibilities: 966 * - Delayed re-discovery; this link endpoint has already 967 * reset and re-established contact with the peer, before 968 * receiving a discovery message from that node. 969 * (The peer happened to receive one from this node first). 970 * - The peer came back so fast that our side has not 971 * discovered it yet. Probing from this side will soon 972 * reset the link, since there can be no working link 973 * endpoint at the peer end, and the link will re-establish. 974 * Accept the signature, since it comes from a known peer. 975 */ 976 n->signature = signature; 977 } else if (!sign_match && addr_match && !link_up) { 978 /* The peer node has rebooted. 979 * Accept signature, since it is a known peer. 980 */ 981 n->signature = signature; 982 *respond = true; 983 } else if (!sign_match && !addr_match && link_up) { 984 /* Peer rebooted with new address, or a new/duplicate peer. 985 * Ignore until the link goes down, if ever. 986 */ 987 *dupl_addr = true; 988 } else if (!sign_match && !addr_match && !link_up) { 989 /* Peer rebooted with new address, or it is a new peer. 990 * Accept signature and address. 991 */ 992 n->signature = signature; 993 accept_addr = true; 994 *respond = true; 995 } 996 997 if (!accept_addr) 998 goto exit; 999 1000 /* Now create new link if not already existing */ 1001 if (!l) { 1002 if (n->link_cnt == 2) 1003 goto exit; 1004 1005 if_name = strchr(b->name, ':') + 1; 1006 get_random_bytes(&session, sizeof(u16)); 1007 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1008 b->net_plane, b->mtu, b->priority, 1009 b->window, session, 1010 tipc_own_addr(net), addr, peer_id, 1011 n->capabilities, 1012 tipc_bc_sndlink(n->net), n->bc_entry.link, 1013 &le->inputq, 1014 &n->bc_entry.namedq, &l)) { 1015 *respond = false; 1016 goto exit; 1017 } 1018 tipc_link_reset(l); 1019 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1020 if (n->state == NODE_FAILINGOVER) 1021 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1022 le->link = l; 1023 n->link_cnt++; 1024 tipc_node_calculate_timer(n, l); 1025 if (n->link_cnt == 1) { 1026 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1027 if (!mod_timer(&n->timer, intv)) 1028 tipc_node_get(n); 1029 } 1030 } 1031 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1032 exit: 1033 tipc_node_write_unlock(n); 1034 if (reset && l && !tipc_link_is_reset(l)) 1035 tipc_node_link_down(n, b->identity, false); 1036 tipc_node_put(n); 1037 } 1038 1039 void tipc_node_delete_links(struct net *net, int bearer_id) 1040 { 1041 struct tipc_net *tn = net_generic(net, tipc_net_id); 1042 struct tipc_node *n; 1043 1044 rcu_read_lock(); 1045 list_for_each_entry_rcu(n, &tn->node_list, list) { 1046 tipc_node_link_down(n, bearer_id, true); 1047 } 1048 rcu_read_unlock(); 1049 } 1050 1051 static void tipc_node_reset_links(struct tipc_node *n) 1052 { 1053 int i; 1054 1055 pr_warn("Resetting all links to %x\n", n->addr); 1056 1057 for (i = 0; i < MAX_BEARERS; i++) { 1058 tipc_node_link_down(n, i, false); 1059 } 1060 } 1061 1062 /* tipc_node_fsm_evt - node finite state machine 1063 * Determines when contact is allowed with peer node 1064 */ 1065 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1066 { 1067 int state = n->state; 1068 1069 switch (state) { 1070 case SELF_DOWN_PEER_DOWN: 1071 switch (evt) { 1072 case SELF_ESTABL_CONTACT_EVT: 1073 state = SELF_UP_PEER_COMING; 1074 break; 1075 case PEER_ESTABL_CONTACT_EVT: 1076 state = SELF_COMING_PEER_UP; 1077 break; 1078 case SELF_LOST_CONTACT_EVT: 1079 case PEER_LOST_CONTACT_EVT: 1080 break; 1081 case NODE_SYNCH_END_EVT: 1082 case NODE_SYNCH_BEGIN_EVT: 1083 case NODE_FAILOVER_BEGIN_EVT: 1084 case NODE_FAILOVER_END_EVT: 1085 default: 1086 goto illegal_evt; 1087 } 1088 break; 1089 case SELF_UP_PEER_UP: 1090 switch (evt) { 1091 case SELF_LOST_CONTACT_EVT: 1092 state = SELF_DOWN_PEER_LEAVING; 1093 break; 1094 case PEER_LOST_CONTACT_EVT: 1095 state = SELF_LEAVING_PEER_DOWN; 1096 break; 1097 case NODE_SYNCH_BEGIN_EVT: 1098 state = NODE_SYNCHING; 1099 break; 1100 case NODE_FAILOVER_BEGIN_EVT: 1101 state = NODE_FAILINGOVER; 1102 break; 1103 case SELF_ESTABL_CONTACT_EVT: 1104 case PEER_ESTABL_CONTACT_EVT: 1105 case NODE_SYNCH_END_EVT: 1106 case NODE_FAILOVER_END_EVT: 1107 break; 1108 default: 1109 goto illegal_evt; 1110 } 1111 break; 1112 case SELF_DOWN_PEER_LEAVING: 1113 switch (evt) { 1114 case PEER_LOST_CONTACT_EVT: 1115 state = SELF_DOWN_PEER_DOWN; 1116 break; 1117 case SELF_ESTABL_CONTACT_EVT: 1118 case PEER_ESTABL_CONTACT_EVT: 1119 case SELF_LOST_CONTACT_EVT: 1120 break; 1121 case NODE_SYNCH_END_EVT: 1122 case NODE_SYNCH_BEGIN_EVT: 1123 case NODE_FAILOVER_BEGIN_EVT: 1124 case NODE_FAILOVER_END_EVT: 1125 default: 1126 goto illegal_evt; 1127 } 1128 break; 1129 case SELF_UP_PEER_COMING: 1130 switch (evt) { 1131 case PEER_ESTABL_CONTACT_EVT: 1132 state = SELF_UP_PEER_UP; 1133 break; 1134 case SELF_LOST_CONTACT_EVT: 1135 state = SELF_DOWN_PEER_DOWN; 1136 break; 1137 case SELF_ESTABL_CONTACT_EVT: 1138 case PEER_LOST_CONTACT_EVT: 1139 case NODE_SYNCH_END_EVT: 1140 case NODE_FAILOVER_BEGIN_EVT: 1141 break; 1142 case NODE_SYNCH_BEGIN_EVT: 1143 case NODE_FAILOVER_END_EVT: 1144 default: 1145 goto illegal_evt; 1146 } 1147 break; 1148 case SELF_COMING_PEER_UP: 1149 switch (evt) { 1150 case SELF_ESTABL_CONTACT_EVT: 1151 state = SELF_UP_PEER_UP; 1152 break; 1153 case PEER_LOST_CONTACT_EVT: 1154 state = SELF_DOWN_PEER_DOWN; 1155 break; 1156 case SELF_LOST_CONTACT_EVT: 1157 case PEER_ESTABL_CONTACT_EVT: 1158 break; 1159 case NODE_SYNCH_END_EVT: 1160 case NODE_SYNCH_BEGIN_EVT: 1161 case NODE_FAILOVER_BEGIN_EVT: 1162 case NODE_FAILOVER_END_EVT: 1163 default: 1164 goto illegal_evt; 1165 } 1166 break; 1167 case SELF_LEAVING_PEER_DOWN: 1168 switch (evt) { 1169 case SELF_LOST_CONTACT_EVT: 1170 state = SELF_DOWN_PEER_DOWN; 1171 break; 1172 case SELF_ESTABL_CONTACT_EVT: 1173 case PEER_ESTABL_CONTACT_EVT: 1174 case PEER_LOST_CONTACT_EVT: 1175 break; 1176 case NODE_SYNCH_END_EVT: 1177 case NODE_SYNCH_BEGIN_EVT: 1178 case NODE_FAILOVER_BEGIN_EVT: 1179 case NODE_FAILOVER_END_EVT: 1180 default: 1181 goto illegal_evt; 1182 } 1183 break; 1184 case NODE_FAILINGOVER: 1185 switch (evt) { 1186 case SELF_LOST_CONTACT_EVT: 1187 state = SELF_DOWN_PEER_LEAVING; 1188 break; 1189 case PEER_LOST_CONTACT_EVT: 1190 state = SELF_LEAVING_PEER_DOWN; 1191 break; 1192 case NODE_FAILOVER_END_EVT: 1193 state = SELF_UP_PEER_UP; 1194 break; 1195 case NODE_FAILOVER_BEGIN_EVT: 1196 case SELF_ESTABL_CONTACT_EVT: 1197 case PEER_ESTABL_CONTACT_EVT: 1198 break; 1199 case NODE_SYNCH_BEGIN_EVT: 1200 case NODE_SYNCH_END_EVT: 1201 default: 1202 goto illegal_evt; 1203 } 1204 break; 1205 case NODE_SYNCHING: 1206 switch (evt) { 1207 case SELF_LOST_CONTACT_EVT: 1208 state = SELF_DOWN_PEER_LEAVING; 1209 break; 1210 case PEER_LOST_CONTACT_EVT: 1211 state = SELF_LEAVING_PEER_DOWN; 1212 break; 1213 case NODE_SYNCH_END_EVT: 1214 state = SELF_UP_PEER_UP; 1215 break; 1216 case NODE_FAILOVER_BEGIN_EVT: 1217 state = NODE_FAILINGOVER; 1218 break; 1219 case NODE_SYNCH_BEGIN_EVT: 1220 case SELF_ESTABL_CONTACT_EVT: 1221 case PEER_ESTABL_CONTACT_EVT: 1222 break; 1223 case NODE_FAILOVER_END_EVT: 1224 default: 1225 goto illegal_evt; 1226 } 1227 break; 1228 default: 1229 pr_err("Unknown node fsm state %x\n", state); 1230 break; 1231 } 1232 n->state = state; 1233 return; 1234 1235 illegal_evt: 1236 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1237 } 1238 1239 static void node_lost_contact(struct tipc_node *n, 1240 struct sk_buff_head *inputq) 1241 { 1242 struct tipc_sock_conn *conn, *safe; 1243 struct tipc_link *l; 1244 struct list_head *conns = &n->conn_sks; 1245 struct sk_buff *skb; 1246 uint i; 1247 1248 pr_debug("Lost contact with %x\n", n->addr); 1249 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1250 1251 /* Clean up broadcast state */ 1252 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1253 1254 /* Abort any ongoing link failover */ 1255 for (i = 0; i < MAX_BEARERS; i++) { 1256 l = n->links[i].link; 1257 if (l) 1258 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1259 } 1260 1261 /* Notify publications from this node */ 1262 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1263 1264 /* Notify sockets connected to node */ 1265 list_for_each_entry_safe(conn, safe, conns, list) { 1266 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1267 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1268 conn->peer_node, conn->port, 1269 conn->peer_port, TIPC_ERR_NO_NODE); 1270 if (likely(skb)) 1271 skb_queue_tail(inputq, skb); 1272 list_del(&conn->list); 1273 kfree(conn); 1274 } 1275 } 1276 1277 /** 1278 * tipc_node_get_linkname - get the name of a link 1279 * 1280 * @bearer_id: id of the bearer 1281 * @node: peer node address 1282 * @linkname: link name output buffer 1283 * 1284 * Returns 0 on success 1285 */ 1286 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1287 char *linkname, size_t len) 1288 { 1289 struct tipc_link *link; 1290 int err = -EINVAL; 1291 struct tipc_node *node = tipc_node_find(net, addr); 1292 1293 if (!node) 1294 return err; 1295 1296 if (bearer_id >= MAX_BEARERS) 1297 goto exit; 1298 1299 tipc_node_read_lock(node); 1300 link = node->links[bearer_id].link; 1301 if (link) { 1302 strncpy(linkname, tipc_link_name(link), len); 1303 err = 0; 1304 } 1305 tipc_node_read_unlock(node); 1306 exit: 1307 tipc_node_put(node); 1308 return err; 1309 } 1310 1311 /* Caller should hold node lock for the passed node */ 1312 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1313 { 1314 void *hdr; 1315 struct nlattr *attrs; 1316 1317 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1318 NLM_F_MULTI, TIPC_NL_NODE_GET); 1319 if (!hdr) 1320 return -EMSGSIZE; 1321 1322 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); 1323 if (!attrs) 1324 goto msg_full; 1325 1326 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1327 goto attr_msg_full; 1328 if (node_is_up(node)) 1329 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1330 goto attr_msg_full; 1331 1332 nla_nest_end(msg->skb, attrs); 1333 genlmsg_end(msg->skb, hdr); 1334 1335 return 0; 1336 1337 attr_msg_full: 1338 nla_nest_cancel(msg->skb, attrs); 1339 msg_full: 1340 genlmsg_cancel(msg->skb, hdr); 1341 1342 return -EMSGSIZE; 1343 } 1344 1345 /** 1346 * tipc_node_xmit() is the general link level function for message sending 1347 * @net: the applicable net namespace 1348 * @list: chain of buffers containing message 1349 * @dnode: address of destination node 1350 * @selector: a number used for deterministic link selection 1351 * Consumes the buffer chain. 1352 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1353 */ 1354 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1355 u32 dnode, int selector) 1356 { 1357 struct tipc_link_entry *le = NULL; 1358 struct tipc_node *n; 1359 struct sk_buff_head xmitq; 1360 int bearer_id; 1361 int rc; 1362 1363 if (in_own_node(net, dnode)) { 1364 tipc_sk_rcv(net, list); 1365 return 0; 1366 } 1367 1368 n = tipc_node_find(net, dnode); 1369 if (unlikely(!n)) { 1370 skb_queue_purge(list); 1371 return -EHOSTUNREACH; 1372 } 1373 1374 tipc_node_read_lock(n); 1375 bearer_id = n->active_links[selector & 1]; 1376 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1377 tipc_node_read_unlock(n); 1378 tipc_node_put(n); 1379 skb_queue_purge(list); 1380 return -EHOSTUNREACH; 1381 } 1382 1383 __skb_queue_head_init(&xmitq); 1384 le = &n->links[bearer_id]; 1385 spin_lock_bh(&le->lock); 1386 rc = tipc_link_xmit(le->link, list, &xmitq); 1387 spin_unlock_bh(&le->lock); 1388 tipc_node_read_unlock(n); 1389 1390 if (unlikely(rc == -ENOBUFS)) 1391 tipc_node_link_down(n, bearer_id, false); 1392 else 1393 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1394 1395 tipc_node_put(n); 1396 1397 return rc; 1398 } 1399 1400 /* tipc_node_xmit_skb(): send single buffer to destination 1401 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1402 * messages, which will not be rejected 1403 * The only exception is datagram messages rerouted after secondary 1404 * lookup, which are rare and safe to dispose of anyway. 1405 */ 1406 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1407 u32 selector) 1408 { 1409 struct sk_buff_head head; 1410 1411 skb_queue_head_init(&head); 1412 __skb_queue_tail(&head, skb); 1413 tipc_node_xmit(net, &head, dnode, selector); 1414 return 0; 1415 } 1416 1417 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1418 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1419 */ 1420 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1421 { 1422 struct sk_buff *skb; 1423 u32 selector, dnode; 1424 1425 while ((skb = __skb_dequeue(xmitq))) { 1426 selector = msg_origport(buf_msg(skb)); 1427 dnode = msg_destnode(buf_msg(skb)); 1428 tipc_node_xmit_skb(net, skb, dnode, selector); 1429 } 1430 return 0; 1431 } 1432 1433 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1434 { 1435 struct sk_buff *txskb; 1436 struct tipc_node *n; 1437 u32 dst; 1438 1439 rcu_read_lock(); 1440 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1441 dst = n->addr; 1442 if (in_own_node(net, dst)) 1443 continue; 1444 if (!node_is_up(n)) 1445 continue; 1446 txskb = pskb_copy(skb, GFP_ATOMIC); 1447 if (!txskb) 1448 break; 1449 msg_set_destnode(buf_msg(txskb), dst); 1450 tipc_node_xmit_skb(net, txskb, dst, 0); 1451 } 1452 rcu_read_unlock(); 1453 1454 kfree_skb(skb); 1455 } 1456 1457 static void tipc_node_mcast_rcv(struct tipc_node *n) 1458 { 1459 struct tipc_bclink_entry *be = &n->bc_entry; 1460 1461 /* 'arrvq' is under inputq2's lock protection */ 1462 spin_lock_bh(&be->inputq2.lock); 1463 spin_lock_bh(&be->inputq1.lock); 1464 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1465 spin_unlock_bh(&be->inputq1.lock); 1466 spin_unlock_bh(&be->inputq2.lock); 1467 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1468 } 1469 1470 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1471 int bearer_id, struct sk_buff_head *xmitq) 1472 { 1473 struct tipc_link *ucl; 1474 int rc; 1475 1476 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); 1477 1478 if (rc & TIPC_LINK_DOWN_EVT) { 1479 tipc_node_reset_links(n); 1480 return; 1481 } 1482 1483 if (!(rc & TIPC_LINK_SND_STATE)) 1484 return; 1485 1486 /* If probe message, a STATE response will be sent anyway */ 1487 if (msg_probe(hdr)) 1488 return; 1489 1490 /* Produce a STATE message carrying broadcast NACK */ 1491 tipc_node_read_lock(n); 1492 ucl = n->links[bearer_id].link; 1493 if (ucl) 1494 tipc_link_build_state_msg(ucl, xmitq); 1495 tipc_node_read_unlock(n); 1496 } 1497 1498 /** 1499 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1500 * @net: the applicable net namespace 1501 * @skb: TIPC packet 1502 * @bearer_id: id of bearer message arrived on 1503 * 1504 * Invoked with no locks held. 1505 */ 1506 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1507 { 1508 int rc; 1509 struct sk_buff_head xmitq; 1510 struct tipc_bclink_entry *be; 1511 struct tipc_link_entry *le; 1512 struct tipc_msg *hdr = buf_msg(skb); 1513 int usr = msg_user(hdr); 1514 u32 dnode = msg_destnode(hdr); 1515 struct tipc_node *n; 1516 1517 __skb_queue_head_init(&xmitq); 1518 1519 /* If NACK for other node, let rcv link for that node peek into it */ 1520 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1521 n = tipc_node_find(net, dnode); 1522 else 1523 n = tipc_node_find(net, msg_prevnode(hdr)); 1524 if (!n) { 1525 kfree_skb(skb); 1526 return; 1527 } 1528 be = &n->bc_entry; 1529 le = &n->links[bearer_id]; 1530 1531 rc = tipc_bcast_rcv(net, be->link, skb); 1532 1533 /* Broadcast ACKs are sent on a unicast link */ 1534 if (rc & TIPC_LINK_SND_STATE) { 1535 tipc_node_read_lock(n); 1536 tipc_link_build_state_msg(le->link, &xmitq); 1537 tipc_node_read_unlock(n); 1538 } 1539 1540 if (!skb_queue_empty(&xmitq)) 1541 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1542 1543 if (!skb_queue_empty(&be->inputq1)) 1544 tipc_node_mcast_rcv(n); 1545 1546 /* If reassembly or retransmission failure => reset all links to peer */ 1547 if (rc & TIPC_LINK_DOWN_EVT) 1548 tipc_node_reset_links(n); 1549 1550 tipc_node_put(n); 1551 } 1552 1553 /** 1554 * tipc_node_check_state - check and if necessary update node state 1555 * @skb: TIPC packet 1556 * @bearer_id: identity of bearer delivering the packet 1557 * Returns true if state and msg are ok, otherwise false 1558 */ 1559 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1560 int bearer_id, struct sk_buff_head *xmitq) 1561 { 1562 struct tipc_msg *hdr = buf_msg(skb); 1563 int usr = msg_user(hdr); 1564 int mtyp = msg_type(hdr); 1565 u16 oseqno = msg_seqno(hdr); 1566 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1567 u16 exp_pkts = msg_msgcnt(hdr); 1568 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1569 int state = n->state; 1570 struct tipc_link *l, *tnl, *pl = NULL; 1571 struct tipc_media_addr *maddr; 1572 int pb_id; 1573 1574 l = n->links[bearer_id].link; 1575 if (!l) 1576 return false; 1577 rcv_nxt = tipc_link_rcv_nxt(l); 1578 1579 1580 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1581 return true; 1582 1583 /* Find parallel link, if any */ 1584 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1585 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1586 pl = n->links[pb_id].link; 1587 break; 1588 } 1589 } 1590 1591 if (!tipc_link_validate_msg(l, hdr)) 1592 return false; 1593 1594 /* Check and update node accesibility if applicable */ 1595 if (state == SELF_UP_PEER_COMING) { 1596 if (!tipc_link_is_up(l)) 1597 return true; 1598 if (!msg_peer_link_is_up(hdr)) 1599 return true; 1600 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1601 } 1602 1603 if (state == SELF_DOWN_PEER_LEAVING) { 1604 if (msg_peer_node_is_up(hdr)) 1605 return false; 1606 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1607 return true; 1608 } 1609 1610 if (state == SELF_LEAVING_PEER_DOWN) 1611 return false; 1612 1613 /* Ignore duplicate packets */ 1614 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1615 return true; 1616 1617 /* Initiate or update failover mode if applicable */ 1618 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1619 syncpt = oseqno + exp_pkts - 1; 1620 if (pl && tipc_link_is_up(pl)) { 1621 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1622 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1623 tipc_link_inputq(l)); 1624 } 1625 /* If parallel link was already down, and this happened before 1626 * the tunnel link came up, FAILOVER was never sent. Ensure that 1627 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state. 1628 */ 1629 if (n->state != NODE_FAILINGOVER && !n->failover_sent) { 1630 tipc_link_create_dummy_tnl_msg(l, xmitq); 1631 n->failover_sent = true; 1632 } 1633 /* If pkts arrive out of order, use lowest calculated syncpt */ 1634 if (less(syncpt, n->sync_point)) 1635 n->sync_point = syncpt; 1636 } 1637 1638 /* Open parallel link when tunnel link reaches synch point */ 1639 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1640 if (!more(rcv_nxt, n->sync_point)) 1641 return true; 1642 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1643 if (pl) 1644 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1645 return true; 1646 } 1647 1648 /* No synching needed if only one link */ 1649 if (!pl || !tipc_link_is_up(pl)) 1650 return true; 1651 1652 /* Initiate synch mode if applicable */ 1653 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1654 syncpt = iseqno + exp_pkts - 1; 1655 if (!tipc_link_is_up(l)) 1656 __tipc_node_link_up(n, bearer_id, xmitq); 1657 if (n->state == SELF_UP_PEER_UP) { 1658 n->sync_point = syncpt; 1659 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1660 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1661 } 1662 } 1663 1664 /* Open tunnel link when parallel link reaches synch point */ 1665 if (n->state == NODE_SYNCHING) { 1666 if (tipc_link_is_synching(l)) { 1667 tnl = l; 1668 } else { 1669 tnl = pl; 1670 pl = l; 1671 } 1672 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1673 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1674 if (more(dlv_nxt, n->sync_point)) { 1675 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1676 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1677 return true; 1678 } 1679 if (l == pl) 1680 return true; 1681 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1682 return true; 1683 if (usr == LINK_PROTOCOL) 1684 return true; 1685 return false; 1686 } 1687 return true; 1688 } 1689 1690 /** 1691 * tipc_rcv - process TIPC packets/messages arriving from off-node 1692 * @net: the applicable net namespace 1693 * @skb: TIPC packet 1694 * @bearer: pointer to bearer message arrived on 1695 * 1696 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1697 * structure (i.e. cannot be NULL), but bearer can be inactive. 1698 */ 1699 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1700 { 1701 struct sk_buff_head xmitq; 1702 struct tipc_node *n; 1703 struct tipc_msg *hdr; 1704 int bearer_id = b->identity; 1705 struct tipc_link_entry *le; 1706 u32 self = tipc_own_addr(net); 1707 int usr, rc = 0; 1708 u16 bc_ack; 1709 1710 __skb_queue_head_init(&xmitq); 1711 1712 /* Ensure message is well-formed before touching the header */ 1713 if (unlikely(!tipc_msg_validate(&skb))) 1714 goto discard; 1715 hdr = buf_msg(skb); 1716 usr = msg_user(hdr); 1717 bc_ack = msg_bcast_ack(hdr); 1718 1719 /* Handle arrival of discovery or broadcast packet */ 1720 if (unlikely(msg_non_seq(hdr))) { 1721 if (unlikely(usr == LINK_CONFIG)) 1722 return tipc_disc_rcv(net, skb, b); 1723 else 1724 return tipc_node_bc_rcv(net, skb, bearer_id); 1725 } 1726 1727 /* Discard unicast link messages destined for another node */ 1728 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 1729 goto discard; 1730 1731 /* Locate neighboring node that sent packet */ 1732 n = tipc_node_find(net, msg_prevnode(hdr)); 1733 if (unlikely(!n)) 1734 goto discard; 1735 le = &n->links[bearer_id]; 1736 1737 /* Ensure broadcast reception is in synch with peer's send state */ 1738 if (unlikely(usr == LINK_PROTOCOL)) 1739 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1740 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1741 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 1742 1743 /* Receive packet directly if conditions permit */ 1744 tipc_node_read_lock(n); 1745 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1746 spin_lock_bh(&le->lock); 1747 if (le->link) { 1748 rc = tipc_link_rcv(le->link, skb, &xmitq); 1749 skb = NULL; 1750 } 1751 spin_unlock_bh(&le->lock); 1752 } 1753 tipc_node_read_unlock(n); 1754 1755 /* Check/update node state before receiving */ 1756 if (unlikely(skb)) { 1757 if (unlikely(skb_linearize(skb))) 1758 goto discard; 1759 tipc_node_write_lock(n); 1760 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1761 if (le->link) { 1762 rc = tipc_link_rcv(le->link, skb, &xmitq); 1763 skb = NULL; 1764 } 1765 } 1766 tipc_node_write_unlock(n); 1767 } 1768 1769 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1770 tipc_node_link_up(n, bearer_id, &xmitq); 1771 1772 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1773 tipc_node_link_down(n, bearer_id, false); 1774 1775 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1776 tipc_named_rcv(net, &n->bc_entry.namedq); 1777 1778 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 1779 tipc_node_mcast_rcv(n); 1780 1781 if (!skb_queue_empty(&le->inputq)) 1782 tipc_sk_rcv(net, &le->inputq); 1783 1784 if (!skb_queue_empty(&xmitq)) 1785 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1786 1787 tipc_node_put(n); 1788 discard: 1789 kfree_skb(skb); 1790 } 1791 1792 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 1793 int prop) 1794 { 1795 struct tipc_net *tn = tipc_net(net); 1796 int bearer_id = b->identity; 1797 struct sk_buff_head xmitq; 1798 struct tipc_link_entry *e; 1799 struct tipc_node *n; 1800 1801 __skb_queue_head_init(&xmitq); 1802 1803 rcu_read_lock(); 1804 1805 list_for_each_entry_rcu(n, &tn->node_list, list) { 1806 tipc_node_write_lock(n); 1807 e = &n->links[bearer_id]; 1808 if (e->link) { 1809 if (prop == TIPC_NLA_PROP_TOL) 1810 tipc_link_set_tolerance(e->link, b->tolerance, 1811 &xmitq); 1812 else if (prop == TIPC_NLA_PROP_MTU) 1813 tipc_link_set_mtu(e->link, b->mtu); 1814 } 1815 tipc_node_write_unlock(n); 1816 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr); 1817 } 1818 1819 rcu_read_unlock(); 1820 } 1821 1822 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 1823 { 1824 struct net *net = sock_net(skb->sk); 1825 struct tipc_net *tn = net_generic(net, tipc_net_id); 1826 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 1827 struct tipc_node *peer; 1828 u32 addr; 1829 int err; 1830 1831 /* We identify the peer by its net */ 1832 if (!info->attrs[TIPC_NLA_NET]) 1833 return -EINVAL; 1834 1835 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, 1836 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy, 1837 info->extack); 1838 if (err) 1839 return err; 1840 1841 if (!attrs[TIPC_NLA_NET_ADDR]) 1842 return -EINVAL; 1843 1844 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 1845 1846 if (in_own_node(net, addr)) 1847 return -ENOTSUPP; 1848 1849 spin_lock_bh(&tn->node_list_lock); 1850 peer = tipc_node_find(net, addr); 1851 if (!peer) { 1852 spin_unlock_bh(&tn->node_list_lock); 1853 return -ENXIO; 1854 } 1855 1856 tipc_node_write_lock(peer); 1857 if (peer->state != SELF_DOWN_PEER_DOWN && 1858 peer->state != SELF_DOWN_PEER_LEAVING) { 1859 tipc_node_write_unlock(peer); 1860 err = -EBUSY; 1861 goto err_out; 1862 } 1863 1864 tipc_node_clear_links(peer); 1865 tipc_node_write_unlock(peer); 1866 tipc_node_delete(peer); 1867 1868 err = 0; 1869 err_out: 1870 tipc_node_put(peer); 1871 spin_unlock_bh(&tn->node_list_lock); 1872 1873 return err; 1874 } 1875 1876 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 1877 { 1878 int err; 1879 struct net *net = sock_net(skb->sk); 1880 struct tipc_net *tn = net_generic(net, tipc_net_id); 1881 int done = cb->args[0]; 1882 int last_addr = cb->args[1]; 1883 struct tipc_node *node; 1884 struct tipc_nl_msg msg; 1885 1886 if (done) 1887 return 0; 1888 1889 msg.skb = skb; 1890 msg.portid = NETLINK_CB(cb->skb).portid; 1891 msg.seq = cb->nlh->nlmsg_seq; 1892 1893 rcu_read_lock(); 1894 if (last_addr) { 1895 node = tipc_node_find(net, last_addr); 1896 if (!node) { 1897 rcu_read_unlock(); 1898 /* We never set seq or call nl_dump_check_consistent() 1899 * this means that setting prev_seq here will cause the 1900 * consistence check to fail in the netlink callback 1901 * handler. Resulting in the NLMSG_DONE message having 1902 * the NLM_F_DUMP_INTR flag set if the node state 1903 * changed while we released the lock. 1904 */ 1905 cb->prev_seq = 1; 1906 return -EPIPE; 1907 } 1908 tipc_node_put(node); 1909 } 1910 1911 list_for_each_entry_rcu(node, &tn->node_list, list) { 1912 if (last_addr) { 1913 if (node->addr == last_addr) 1914 last_addr = 0; 1915 else 1916 continue; 1917 } 1918 1919 tipc_node_read_lock(node); 1920 err = __tipc_nl_add_node(&msg, node); 1921 if (err) { 1922 last_addr = node->addr; 1923 tipc_node_read_unlock(node); 1924 goto out; 1925 } 1926 1927 tipc_node_read_unlock(node); 1928 } 1929 done = 1; 1930 out: 1931 cb->args[0] = done; 1932 cb->args[1] = last_addr; 1933 rcu_read_unlock(); 1934 1935 return skb->len; 1936 } 1937 1938 /* tipc_node_find_by_name - locate owner node of link by link's name 1939 * @net: the applicable net namespace 1940 * @name: pointer to link name string 1941 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1942 * 1943 * Returns pointer to node owning the link, or 0 if no matching link is found. 1944 */ 1945 static struct tipc_node *tipc_node_find_by_name(struct net *net, 1946 const char *link_name, 1947 unsigned int *bearer_id) 1948 { 1949 struct tipc_net *tn = net_generic(net, tipc_net_id); 1950 struct tipc_link *l; 1951 struct tipc_node *n; 1952 struct tipc_node *found_node = NULL; 1953 int i; 1954 1955 *bearer_id = 0; 1956 rcu_read_lock(); 1957 list_for_each_entry_rcu(n, &tn->node_list, list) { 1958 tipc_node_read_lock(n); 1959 for (i = 0; i < MAX_BEARERS; i++) { 1960 l = n->links[i].link; 1961 if (l && !strcmp(tipc_link_name(l), link_name)) { 1962 *bearer_id = i; 1963 found_node = n; 1964 break; 1965 } 1966 } 1967 tipc_node_read_unlock(n); 1968 if (found_node) 1969 break; 1970 } 1971 rcu_read_unlock(); 1972 1973 return found_node; 1974 } 1975 1976 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 1977 { 1978 int err; 1979 int res = 0; 1980 int bearer_id; 1981 char *name; 1982 struct tipc_link *link; 1983 struct tipc_node *node; 1984 struct sk_buff_head xmitq; 1985 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1986 struct net *net = sock_net(skb->sk); 1987 1988 __skb_queue_head_init(&xmitq); 1989 1990 if (!info->attrs[TIPC_NLA_LINK]) 1991 return -EINVAL; 1992 1993 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1994 info->attrs[TIPC_NLA_LINK], 1995 tipc_nl_link_policy, info->extack); 1996 if (err) 1997 return err; 1998 1999 if (!attrs[TIPC_NLA_LINK_NAME]) 2000 return -EINVAL; 2001 2002 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2003 2004 if (strcmp(name, tipc_bclink_name) == 0) 2005 return tipc_nl_bc_link_set(net, attrs); 2006 2007 node = tipc_node_find_by_name(net, name, &bearer_id); 2008 if (!node) 2009 return -EINVAL; 2010 2011 tipc_node_read_lock(node); 2012 2013 link = node->links[bearer_id].link; 2014 if (!link) { 2015 res = -EINVAL; 2016 goto out; 2017 } 2018 2019 if (attrs[TIPC_NLA_LINK_PROP]) { 2020 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2021 2022 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 2023 props); 2024 if (err) { 2025 res = err; 2026 goto out; 2027 } 2028 2029 if (props[TIPC_NLA_PROP_TOL]) { 2030 u32 tol; 2031 2032 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2033 tipc_link_set_tolerance(link, tol, &xmitq); 2034 } 2035 if (props[TIPC_NLA_PROP_PRIO]) { 2036 u32 prio; 2037 2038 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2039 tipc_link_set_prio(link, prio, &xmitq); 2040 } 2041 if (props[TIPC_NLA_PROP_WIN]) { 2042 u32 win; 2043 2044 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2045 tipc_link_set_queue_limits(link, win); 2046 } 2047 } 2048 2049 out: 2050 tipc_node_read_unlock(node); 2051 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 2052 return res; 2053 } 2054 2055 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2056 { 2057 struct net *net = genl_info_net(info); 2058 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2059 struct tipc_nl_msg msg; 2060 char *name; 2061 int err; 2062 2063 msg.portid = info->snd_portid; 2064 msg.seq = info->snd_seq; 2065 2066 if (!info->attrs[TIPC_NLA_LINK]) 2067 return -EINVAL; 2068 2069 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 2070 info->attrs[TIPC_NLA_LINK], 2071 tipc_nl_link_policy, info->extack); 2072 if (err) 2073 return err; 2074 2075 if (!attrs[TIPC_NLA_LINK_NAME]) 2076 return -EINVAL; 2077 2078 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2079 2080 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2081 if (!msg.skb) 2082 return -ENOMEM; 2083 2084 if (strcmp(name, tipc_bclink_name) == 0) { 2085 err = tipc_nl_add_bc_link(net, &msg); 2086 if (err) 2087 goto err_free; 2088 } else { 2089 int bearer_id; 2090 struct tipc_node *node; 2091 struct tipc_link *link; 2092 2093 node = tipc_node_find_by_name(net, name, &bearer_id); 2094 if (!node) { 2095 err = -EINVAL; 2096 goto err_free; 2097 } 2098 2099 tipc_node_read_lock(node); 2100 link = node->links[bearer_id].link; 2101 if (!link) { 2102 tipc_node_read_unlock(node); 2103 err = -EINVAL; 2104 goto err_free; 2105 } 2106 2107 err = __tipc_nl_add_link(net, &msg, link, 0); 2108 tipc_node_read_unlock(node); 2109 if (err) 2110 goto err_free; 2111 } 2112 2113 return genlmsg_reply(msg.skb, info); 2114 2115 err_free: 2116 nlmsg_free(msg.skb); 2117 return err; 2118 } 2119 2120 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2121 { 2122 int err; 2123 char *link_name; 2124 unsigned int bearer_id; 2125 struct tipc_link *link; 2126 struct tipc_node *node; 2127 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2128 struct net *net = sock_net(skb->sk); 2129 struct tipc_link_entry *le; 2130 2131 if (!info->attrs[TIPC_NLA_LINK]) 2132 return -EINVAL; 2133 2134 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 2135 info->attrs[TIPC_NLA_LINK], 2136 tipc_nl_link_policy, info->extack); 2137 if (err) 2138 return err; 2139 2140 if (!attrs[TIPC_NLA_LINK_NAME]) 2141 return -EINVAL; 2142 2143 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2144 2145 if (strcmp(link_name, tipc_bclink_name) == 0) { 2146 err = tipc_bclink_reset_stats(net); 2147 if (err) 2148 return err; 2149 return 0; 2150 } 2151 2152 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2153 if (!node) 2154 return -EINVAL; 2155 2156 le = &node->links[bearer_id]; 2157 tipc_node_read_lock(node); 2158 spin_lock_bh(&le->lock); 2159 link = node->links[bearer_id].link; 2160 if (!link) { 2161 spin_unlock_bh(&le->lock); 2162 tipc_node_read_unlock(node); 2163 return -EINVAL; 2164 } 2165 tipc_link_reset_stats(link); 2166 spin_unlock_bh(&le->lock); 2167 tipc_node_read_unlock(node); 2168 return 0; 2169 } 2170 2171 /* Caller should hold node lock */ 2172 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2173 struct tipc_node *node, u32 *prev_link) 2174 { 2175 u32 i; 2176 int err; 2177 2178 for (i = *prev_link; i < MAX_BEARERS; i++) { 2179 *prev_link = i; 2180 2181 if (!node->links[i].link) 2182 continue; 2183 2184 err = __tipc_nl_add_link(net, msg, 2185 node->links[i].link, NLM_F_MULTI); 2186 if (err) 2187 return err; 2188 } 2189 *prev_link = 0; 2190 2191 return 0; 2192 } 2193 2194 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2195 { 2196 struct net *net = sock_net(skb->sk); 2197 struct tipc_net *tn = net_generic(net, tipc_net_id); 2198 struct tipc_node *node; 2199 struct tipc_nl_msg msg; 2200 u32 prev_node = cb->args[0]; 2201 u32 prev_link = cb->args[1]; 2202 int done = cb->args[2]; 2203 int err; 2204 2205 if (done) 2206 return 0; 2207 2208 msg.skb = skb; 2209 msg.portid = NETLINK_CB(cb->skb).portid; 2210 msg.seq = cb->nlh->nlmsg_seq; 2211 2212 rcu_read_lock(); 2213 if (prev_node) { 2214 node = tipc_node_find(net, prev_node); 2215 if (!node) { 2216 /* We never set seq or call nl_dump_check_consistent() 2217 * this means that setting prev_seq here will cause the 2218 * consistence check to fail in the netlink callback 2219 * handler. Resulting in the last NLMSG_DONE message 2220 * having the NLM_F_DUMP_INTR flag set. 2221 */ 2222 cb->prev_seq = 1; 2223 goto out; 2224 } 2225 tipc_node_put(node); 2226 2227 list_for_each_entry_continue_rcu(node, &tn->node_list, 2228 list) { 2229 tipc_node_read_lock(node); 2230 err = __tipc_nl_add_node_links(net, &msg, node, 2231 &prev_link); 2232 tipc_node_read_unlock(node); 2233 if (err) 2234 goto out; 2235 2236 prev_node = node->addr; 2237 } 2238 } else { 2239 err = tipc_nl_add_bc_link(net, &msg); 2240 if (err) 2241 goto out; 2242 2243 list_for_each_entry_rcu(node, &tn->node_list, list) { 2244 tipc_node_read_lock(node); 2245 err = __tipc_nl_add_node_links(net, &msg, node, 2246 &prev_link); 2247 tipc_node_read_unlock(node); 2248 if (err) 2249 goto out; 2250 2251 prev_node = node->addr; 2252 } 2253 } 2254 done = 1; 2255 out: 2256 rcu_read_unlock(); 2257 2258 cb->args[0] = prev_node; 2259 cb->args[1] = prev_link; 2260 cb->args[2] = done; 2261 2262 return skb->len; 2263 } 2264 2265 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2266 { 2267 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2268 struct net *net = sock_net(skb->sk); 2269 int err; 2270 2271 if (!info->attrs[TIPC_NLA_MON]) 2272 return -EINVAL; 2273 2274 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX, 2275 info->attrs[TIPC_NLA_MON], 2276 tipc_nl_monitor_policy, info->extack); 2277 if (err) 2278 return err; 2279 2280 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2281 u32 val; 2282 2283 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2284 err = tipc_nl_monitor_set_threshold(net, val); 2285 if (err) 2286 return err; 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2293 { 2294 struct nlattr *attrs; 2295 void *hdr; 2296 u32 val; 2297 2298 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2299 0, TIPC_NL_MON_GET); 2300 if (!hdr) 2301 return -EMSGSIZE; 2302 2303 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON); 2304 if (!attrs) 2305 goto msg_full; 2306 2307 val = tipc_nl_monitor_get_threshold(net); 2308 2309 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2310 goto attr_msg_full; 2311 2312 nla_nest_end(msg->skb, attrs); 2313 genlmsg_end(msg->skb, hdr); 2314 2315 return 0; 2316 2317 attr_msg_full: 2318 nla_nest_cancel(msg->skb, attrs); 2319 msg_full: 2320 genlmsg_cancel(msg->skb, hdr); 2321 2322 return -EMSGSIZE; 2323 } 2324 2325 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2326 { 2327 struct net *net = sock_net(skb->sk); 2328 struct tipc_nl_msg msg; 2329 int err; 2330 2331 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2332 if (!msg.skb) 2333 return -ENOMEM; 2334 msg.portid = info->snd_portid; 2335 msg.seq = info->snd_seq; 2336 2337 err = __tipc_nl_add_monitor_prop(net, &msg); 2338 if (err) { 2339 nlmsg_free(msg.skb); 2340 return err; 2341 } 2342 2343 return genlmsg_reply(msg.skb, info); 2344 } 2345 2346 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2347 { 2348 struct net *net = sock_net(skb->sk); 2349 u32 prev_bearer = cb->args[0]; 2350 struct tipc_nl_msg msg; 2351 int bearer_id; 2352 int err; 2353 2354 if (prev_bearer == MAX_BEARERS) 2355 return 0; 2356 2357 msg.skb = skb; 2358 msg.portid = NETLINK_CB(cb->skb).portid; 2359 msg.seq = cb->nlh->nlmsg_seq; 2360 2361 rtnl_lock(); 2362 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2363 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2364 if (err) 2365 break; 2366 } 2367 rtnl_unlock(); 2368 cb->args[0] = bearer_id; 2369 2370 return skb->len; 2371 } 2372 2373 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2374 struct netlink_callback *cb) 2375 { 2376 struct net *net = sock_net(skb->sk); 2377 u32 prev_node = cb->args[1]; 2378 u32 bearer_id = cb->args[2]; 2379 int done = cb->args[0]; 2380 struct tipc_nl_msg msg; 2381 int err; 2382 2383 if (!prev_node) { 2384 struct nlattr **attrs; 2385 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2386 2387 err = tipc_nlmsg_parse(cb->nlh, &attrs); 2388 if (err) 2389 return err; 2390 2391 if (!attrs[TIPC_NLA_MON]) 2392 return -EINVAL; 2393 2394 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX, 2395 attrs[TIPC_NLA_MON], 2396 tipc_nl_monitor_policy, NULL); 2397 if (err) 2398 return err; 2399 2400 if (!mon[TIPC_NLA_MON_REF]) 2401 return -EINVAL; 2402 2403 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2404 2405 if (bearer_id >= MAX_BEARERS) 2406 return -EINVAL; 2407 } 2408 2409 if (done) 2410 return 0; 2411 2412 msg.skb = skb; 2413 msg.portid = NETLINK_CB(cb->skb).portid; 2414 msg.seq = cb->nlh->nlmsg_seq; 2415 2416 rtnl_lock(); 2417 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2418 if (!err) 2419 done = 1; 2420 2421 rtnl_unlock(); 2422 cb->args[0] = done; 2423 cb->args[1] = prev_node; 2424 cb->args[2] = bearer_id; 2425 2426 return skb->len; 2427 } 2428