1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 47 #define INVALID_NODE_SIG 0x10000 48 #define NODE_CLEANUP_AFTER 300000 49 50 /* Flags used to take different actions according to flag type 51 * TIPC_NOTIFY_NODE_DOWN: notify node is down 52 * TIPC_NOTIFY_NODE_UP: notify node is up 53 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 54 */ 55 enum { 56 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 57 TIPC_NOTIFY_NODE_UP = (1 << 4), 58 TIPC_NOTIFY_LINK_UP = (1 << 6), 59 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 60 }; 61 62 struct tipc_link_entry { 63 struct tipc_link *link; 64 spinlock_t lock; /* per link */ 65 u32 mtu; 66 struct sk_buff_head inputq; 67 struct tipc_media_addr maddr; 68 }; 69 70 struct tipc_bclink_entry { 71 struct tipc_link *link; 72 struct sk_buff_head inputq1; 73 struct sk_buff_head arrvq; 74 struct sk_buff_head inputq2; 75 struct sk_buff_head namedq; 76 }; 77 78 /** 79 * struct tipc_node - TIPC node structure 80 * @addr: network address of node 81 * @ref: reference counter to node object 82 * @lock: rwlock governing access to structure 83 * @net: the applicable net namespace 84 * @hash: links to adjacent nodes in unsorted hash chain 85 * @inputq: pointer to input queue containing messages for msg event 86 * @namedq: pointer to name table input queue with name table messages 87 * @active_links: bearer ids of active links, used as index into links[] array 88 * @links: array containing references to all links to node 89 * @action_flags: bit mask of different types of node actions 90 * @state: connectivity state vs peer node 91 * @sync_point: sequence number where synch/failover is finished 92 * @list: links to adjacent nodes in sorted list of cluster's nodes 93 * @working_links: number of working links to node (both active and standby) 94 * @link_cnt: number of links to node 95 * @capabilities: bitmap, indicating peer node's functional capabilities 96 * @signature: node instance identifier 97 * @link_id: local and remote bearer ids of changing link, if any 98 * @publ_list: list of publications 99 * @rcu: rcu struct for tipc_node 100 * @delete_at: indicates the time for deleting a down node 101 */ 102 struct tipc_node { 103 u32 addr; 104 struct kref kref; 105 rwlock_t lock; 106 struct net *net; 107 struct hlist_node hash; 108 int active_links[2]; 109 struct tipc_link_entry links[MAX_BEARERS]; 110 struct tipc_bclink_entry bc_entry; 111 int action_flags; 112 struct list_head list; 113 int state; 114 bool failover_sent; 115 u16 sync_point; 116 int link_cnt; 117 u16 working_links; 118 u16 capabilities; 119 u32 signature; 120 u32 link_id; 121 u8 peer_id[16]; 122 struct list_head publ_list; 123 struct list_head conn_sks; 124 unsigned long keepalive_intv; 125 struct timer_list timer; 126 struct rcu_head rcu; 127 unsigned long delete_at; 128 }; 129 130 /* Node FSM states and events: 131 */ 132 enum { 133 SELF_DOWN_PEER_DOWN = 0xdd, 134 SELF_UP_PEER_UP = 0xaa, 135 SELF_DOWN_PEER_LEAVING = 0xd1, 136 SELF_UP_PEER_COMING = 0xac, 137 SELF_COMING_PEER_UP = 0xca, 138 SELF_LEAVING_PEER_DOWN = 0x1d, 139 NODE_FAILINGOVER = 0xf0, 140 NODE_SYNCHING = 0xcc 141 }; 142 143 enum { 144 SELF_ESTABL_CONTACT_EVT = 0xece, 145 SELF_LOST_CONTACT_EVT = 0x1ce, 146 PEER_ESTABL_CONTACT_EVT = 0x9ece, 147 PEER_LOST_CONTACT_EVT = 0x91ce, 148 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 149 NODE_FAILOVER_END_EVT = 0xfee, 150 NODE_SYNCH_BEGIN_EVT = 0xcbe, 151 NODE_SYNCH_END_EVT = 0xcee 152 }; 153 154 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 155 struct sk_buff_head *xmitq, 156 struct tipc_media_addr **maddr); 157 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 158 bool delete); 159 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 160 static void tipc_node_delete(struct tipc_node *node); 161 static void tipc_node_timeout(struct timer_list *t); 162 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 163 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 164 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 165 static void tipc_node_put(struct tipc_node *node); 166 static bool node_is_up(struct tipc_node *n); 167 static void tipc_node_delete_from_list(struct tipc_node *node); 168 169 struct tipc_sock_conn { 170 u32 port; 171 u32 peer_port; 172 u32 peer_node; 173 struct list_head list; 174 }; 175 176 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 177 { 178 int bearer_id = n->active_links[sel & 1]; 179 180 if (unlikely(bearer_id == INVALID_BEARER_ID)) 181 return NULL; 182 183 return n->links[bearer_id].link; 184 } 185 186 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) 187 { 188 struct tipc_node *n; 189 int bearer_id; 190 unsigned int mtu = MAX_MSG_SIZE; 191 192 n = tipc_node_find(net, addr); 193 if (unlikely(!n)) 194 return mtu; 195 196 bearer_id = n->active_links[sel & 1]; 197 if (likely(bearer_id != INVALID_BEARER_ID)) 198 mtu = n->links[bearer_id].mtu; 199 tipc_node_put(n); 200 return mtu; 201 } 202 203 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 204 { 205 u8 *own_id = tipc_own_id(net); 206 struct tipc_node *n; 207 208 if (!own_id) 209 return true; 210 211 if (addr == tipc_own_addr(net)) { 212 memcpy(id, own_id, TIPC_NODEID_LEN); 213 return true; 214 } 215 n = tipc_node_find(net, addr); 216 if (!n) 217 return false; 218 219 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 220 tipc_node_put(n); 221 return true; 222 } 223 224 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 225 { 226 struct tipc_node *n; 227 u16 caps; 228 229 n = tipc_node_find(net, addr); 230 if (unlikely(!n)) 231 return TIPC_NODE_CAPABILITIES; 232 caps = n->capabilities; 233 tipc_node_put(n); 234 return caps; 235 } 236 237 static void tipc_node_kref_release(struct kref *kref) 238 { 239 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 240 241 kfree(n->bc_entry.link); 242 kfree_rcu(n, rcu); 243 } 244 245 static void tipc_node_put(struct tipc_node *node) 246 { 247 kref_put(&node->kref, tipc_node_kref_release); 248 } 249 250 static void tipc_node_get(struct tipc_node *node) 251 { 252 kref_get(&node->kref); 253 } 254 255 /* 256 * tipc_node_find - locate specified node object, if it exists 257 */ 258 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 259 { 260 struct tipc_net *tn = tipc_net(net); 261 struct tipc_node *node; 262 unsigned int thash = tipc_hashfn(addr); 263 264 rcu_read_lock(); 265 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 266 if (node->addr != addr) 267 continue; 268 if (!kref_get_unless_zero(&node->kref)) 269 node = NULL; 270 break; 271 } 272 rcu_read_unlock(); 273 return node; 274 } 275 276 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 277 * Note: this function is called only when a discovery request failed 278 * to find the node by its 32-bit id, and is not time critical 279 */ 280 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 281 { 282 struct tipc_net *tn = tipc_net(net); 283 struct tipc_node *n; 284 bool found = false; 285 286 rcu_read_lock(); 287 list_for_each_entry_rcu(n, &tn->node_list, list) { 288 read_lock_bh(&n->lock); 289 if (!memcmp(id, n->peer_id, 16) && 290 kref_get_unless_zero(&n->kref)) 291 found = true; 292 read_unlock_bh(&n->lock); 293 if (found) 294 break; 295 } 296 rcu_read_unlock(); 297 return found ? n : NULL; 298 } 299 300 static void tipc_node_read_lock(struct tipc_node *n) 301 { 302 read_lock_bh(&n->lock); 303 } 304 305 static void tipc_node_read_unlock(struct tipc_node *n) 306 { 307 read_unlock_bh(&n->lock); 308 } 309 310 static void tipc_node_write_lock(struct tipc_node *n) 311 { 312 write_lock_bh(&n->lock); 313 } 314 315 static void tipc_node_write_unlock_fast(struct tipc_node *n) 316 { 317 write_unlock_bh(&n->lock); 318 } 319 320 static void tipc_node_write_unlock(struct tipc_node *n) 321 { 322 struct net *net = n->net; 323 u32 addr = 0; 324 u32 flags = n->action_flags; 325 u32 link_id = 0; 326 u32 bearer_id; 327 struct list_head *publ_list; 328 329 if (likely(!flags)) { 330 write_unlock_bh(&n->lock); 331 return; 332 } 333 334 addr = n->addr; 335 link_id = n->link_id; 336 bearer_id = link_id & 0xffff; 337 publ_list = &n->publ_list; 338 339 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 340 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 341 342 write_unlock_bh(&n->lock); 343 344 if (flags & TIPC_NOTIFY_NODE_DOWN) 345 tipc_publ_notify(net, publ_list, addr); 346 347 if (flags & TIPC_NOTIFY_NODE_UP) 348 tipc_named_node_up(net, addr); 349 350 if (flags & TIPC_NOTIFY_LINK_UP) { 351 tipc_mon_peer_up(net, addr, bearer_id); 352 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 353 TIPC_NODE_SCOPE, link_id, link_id); 354 } 355 if (flags & TIPC_NOTIFY_LINK_DOWN) { 356 tipc_mon_peer_down(net, addr, bearer_id); 357 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 358 addr, link_id); 359 } 360 } 361 362 static struct tipc_node *tipc_node_create(struct net *net, u32 addr, 363 u8 *peer_id, u16 capabilities) 364 { 365 struct tipc_net *tn = net_generic(net, tipc_net_id); 366 struct tipc_node *n, *temp_node; 367 struct tipc_link *l; 368 int bearer_id; 369 int i; 370 371 spin_lock_bh(&tn->node_list_lock); 372 n = tipc_node_find(net, addr); 373 if (n) { 374 if (n->capabilities == capabilities) 375 goto exit; 376 /* Same node may come back with new capabilities */ 377 write_lock_bh(&n->lock); 378 n->capabilities = capabilities; 379 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 380 l = n->links[bearer_id].link; 381 if (l) 382 tipc_link_update_caps(l, capabilities); 383 } 384 write_unlock_bh(&n->lock); 385 goto exit; 386 } 387 n = kzalloc(sizeof(*n), GFP_ATOMIC); 388 if (!n) { 389 pr_warn("Node creation failed, no memory\n"); 390 goto exit; 391 } 392 n->addr = addr; 393 memcpy(&n->peer_id, peer_id, 16); 394 n->net = net; 395 n->capabilities = capabilities; 396 kref_init(&n->kref); 397 rwlock_init(&n->lock); 398 INIT_HLIST_NODE(&n->hash); 399 INIT_LIST_HEAD(&n->list); 400 INIT_LIST_HEAD(&n->publ_list); 401 INIT_LIST_HEAD(&n->conn_sks); 402 skb_queue_head_init(&n->bc_entry.namedq); 403 skb_queue_head_init(&n->bc_entry.inputq1); 404 __skb_queue_head_init(&n->bc_entry.arrvq); 405 skb_queue_head_init(&n->bc_entry.inputq2); 406 for (i = 0; i < MAX_BEARERS; i++) 407 spin_lock_init(&n->links[i].lock); 408 n->state = SELF_DOWN_PEER_LEAVING; 409 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 410 n->signature = INVALID_NODE_SIG; 411 n->active_links[0] = INVALID_BEARER_ID; 412 n->active_links[1] = INVALID_BEARER_ID; 413 if (!tipc_link_bc_create(net, tipc_own_addr(net), 414 addr, U16_MAX, 415 tipc_link_window(tipc_bc_sndlink(net)), 416 n->capabilities, 417 &n->bc_entry.inputq1, 418 &n->bc_entry.namedq, 419 tipc_bc_sndlink(net), 420 &n->bc_entry.link)) { 421 pr_warn("Broadcast rcv link creation failed, no memory\n"); 422 kfree(n); 423 n = NULL; 424 goto exit; 425 } 426 tipc_node_get(n); 427 timer_setup(&n->timer, tipc_node_timeout, 0); 428 n->keepalive_intv = U32_MAX; 429 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 431 if (n->addr < temp_node->addr) 432 break; 433 } 434 list_add_tail_rcu(&n->list, &temp_node->list); 435 exit: 436 spin_unlock_bh(&tn->node_list_lock); 437 return n; 438 } 439 440 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 441 { 442 unsigned long tol = tipc_link_tolerance(l); 443 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 444 445 /* Link with lowest tolerance determines timer interval */ 446 if (intv < n->keepalive_intv) 447 n->keepalive_intv = intv; 448 449 /* Ensure link's abort limit corresponds to current tolerance */ 450 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 451 } 452 453 static void tipc_node_delete_from_list(struct tipc_node *node) 454 { 455 list_del_rcu(&node->list); 456 hlist_del_rcu(&node->hash); 457 tipc_node_put(node); 458 } 459 460 static void tipc_node_delete(struct tipc_node *node) 461 { 462 tipc_node_delete_from_list(node); 463 464 del_timer_sync(&node->timer); 465 tipc_node_put(node); 466 } 467 468 void tipc_node_stop(struct net *net) 469 { 470 struct tipc_net *tn = tipc_net(net); 471 struct tipc_node *node, *t_node; 472 473 spin_lock_bh(&tn->node_list_lock); 474 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 475 tipc_node_delete(node); 476 spin_unlock_bh(&tn->node_list_lock); 477 } 478 479 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 480 { 481 struct tipc_node *n; 482 483 if (in_own_node(net, addr)) 484 return; 485 486 n = tipc_node_find(net, addr); 487 if (!n) { 488 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 489 return; 490 } 491 tipc_node_write_lock(n); 492 list_add_tail(subscr, &n->publ_list); 493 tipc_node_write_unlock_fast(n); 494 tipc_node_put(n); 495 } 496 497 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 498 { 499 struct tipc_node *n; 500 501 if (in_own_node(net, addr)) 502 return; 503 504 n = tipc_node_find(net, addr); 505 if (!n) { 506 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 507 return; 508 } 509 tipc_node_write_lock(n); 510 list_del_init(subscr); 511 tipc_node_write_unlock_fast(n); 512 tipc_node_put(n); 513 } 514 515 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 516 { 517 struct tipc_node *node; 518 struct tipc_sock_conn *conn; 519 int err = 0; 520 521 if (in_own_node(net, dnode)) 522 return 0; 523 524 node = tipc_node_find(net, dnode); 525 if (!node) { 526 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 527 return -EHOSTUNREACH; 528 } 529 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 530 if (!conn) { 531 err = -EHOSTUNREACH; 532 goto exit; 533 } 534 conn->peer_node = dnode; 535 conn->port = port; 536 conn->peer_port = peer_port; 537 538 tipc_node_write_lock(node); 539 list_add_tail(&conn->list, &node->conn_sks); 540 tipc_node_write_unlock(node); 541 exit: 542 tipc_node_put(node); 543 return err; 544 } 545 546 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 547 { 548 struct tipc_node *node; 549 struct tipc_sock_conn *conn, *safe; 550 551 if (in_own_node(net, dnode)) 552 return; 553 554 node = tipc_node_find(net, dnode); 555 if (!node) 556 return; 557 558 tipc_node_write_lock(node); 559 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 560 if (port != conn->port) 561 continue; 562 list_del(&conn->list); 563 kfree(conn); 564 } 565 tipc_node_write_unlock(node); 566 tipc_node_put(node); 567 } 568 569 static void tipc_node_clear_links(struct tipc_node *node) 570 { 571 int i; 572 573 for (i = 0; i < MAX_BEARERS; i++) { 574 struct tipc_link_entry *le = &node->links[i]; 575 576 if (le->link) { 577 kfree(le->link); 578 le->link = NULL; 579 node->link_cnt--; 580 } 581 } 582 } 583 584 /* tipc_node_cleanup - delete nodes that does not 585 * have active links for NODE_CLEANUP_AFTER time 586 */ 587 static int tipc_node_cleanup(struct tipc_node *peer) 588 { 589 struct tipc_net *tn = tipc_net(peer->net); 590 bool deleted = false; 591 592 spin_lock_bh(&tn->node_list_lock); 593 tipc_node_write_lock(peer); 594 595 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 596 tipc_node_clear_links(peer); 597 tipc_node_delete_from_list(peer); 598 deleted = true; 599 } 600 tipc_node_write_unlock(peer); 601 spin_unlock_bh(&tn->node_list_lock); 602 return deleted; 603 } 604 605 /* tipc_node_timeout - handle expiration of node timer 606 */ 607 static void tipc_node_timeout(struct timer_list *t) 608 { 609 struct tipc_node *n = from_timer(n, t, timer); 610 struct tipc_link_entry *le; 611 struct sk_buff_head xmitq; 612 int remains = n->link_cnt; 613 int bearer_id; 614 int rc = 0; 615 616 if (!node_is_up(n) && tipc_node_cleanup(n)) { 617 /*Removing the reference of Timer*/ 618 tipc_node_put(n); 619 return; 620 } 621 622 __skb_queue_head_init(&xmitq); 623 624 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 625 tipc_node_read_lock(n); 626 le = &n->links[bearer_id]; 627 if (le->link) { 628 spin_lock_bh(&le->lock); 629 /* Link tolerance may change asynchronously: */ 630 tipc_node_calculate_timer(n, le->link); 631 rc = tipc_link_timeout(le->link, &xmitq); 632 spin_unlock_bh(&le->lock); 633 remains--; 634 } 635 tipc_node_read_unlock(n); 636 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 637 if (rc & TIPC_LINK_DOWN_EVT) 638 tipc_node_link_down(n, bearer_id, false); 639 } 640 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 641 } 642 643 /** 644 * __tipc_node_link_up - handle addition of link 645 * Node lock must be held by caller 646 * Link becomes active (alone or shared) or standby, depending on its priority. 647 */ 648 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 649 struct sk_buff_head *xmitq) 650 { 651 int *slot0 = &n->active_links[0]; 652 int *slot1 = &n->active_links[1]; 653 struct tipc_link *ol = node_active_link(n, 0); 654 struct tipc_link *nl = n->links[bearer_id].link; 655 656 if (!nl || tipc_link_is_up(nl)) 657 return; 658 659 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 660 if (!tipc_link_is_up(nl)) 661 return; 662 663 n->working_links++; 664 n->action_flags |= TIPC_NOTIFY_LINK_UP; 665 n->link_id = tipc_link_id(nl); 666 667 /* Leave room for tunnel header when returning 'mtu' to users: */ 668 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 669 670 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 671 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 672 673 pr_debug("Established link <%s> on network plane %c\n", 674 tipc_link_name(nl), tipc_link_plane(nl)); 675 676 /* Ensure that a STATE message goes first */ 677 tipc_link_build_state_msg(nl, xmitq); 678 679 /* First link? => give it both slots */ 680 if (!ol) { 681 *slot0 = bearer_id; 682 *slot1 = bearer_id; 683 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 684 n->failover_sent = false; 685 n->action_flags |= TIPC_NOTIFY_NODE_UP; 686 tipc_link_set_active(nl, true); 687 tipc_bcast_add_peer(n->net, nl, xmitq); 688 return; 689 } 690 691 /* Second link => redistribute slots */ 692 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 693 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 694 *slot0 = bearer_id; 695 *slot1 = bearer_id; 696 tipc_link_set_active(nl, true); 697 tipc_link_set_active(ol, false); 698 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 699 tipc_link_set_active(nl, true); 700 *slot1 = bearer_id; 701 } else { 702 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 703 } 704 705 /* Prepare synchronization with first link */ 706 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 707 } 708 709 /** 710 * tipc_node_link_up - handle addition of link 711 * 712 * Link becomes active (alone or shared) or standby, depending on its priority. 713 */ 714 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 715 struct sk_buff_head *xmitq) 716 { 717 struct tipc_media_addr *maddr; 718 719 tipc_node_write_lock(n); 720 __tipc_node_link_up(n, bearer_id, xmitq); 721 maddr = &n->links[bearer_id].maddr; 722 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); 723 tipc_node_write_unlock(n); 724 } 725 726 /** 727 * __tipc_node_link_down - handle loss of link 728 */ 729 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 730 struct sk_buff_head *xmitq, 731 struct tipc_media_addr **maddr) 732 { 733 struct tipc_link_entry *le = &n->links[*bearer_id]; 734 int *slot0 = &n->active_links[0]; 735 int *slot1 = &n->active_links[1]; 736 int i, highest = 0, prio; 737 struct tipc_link *l, *_l, *tnl; 738 739 l = n->links[*bearer_id].link; 740 if (!l || tipc_link_is_reset(l)) 741 return; 742 743 n->working_links--; 744 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 745 n->link_id = tipc_link_id(l); 746 747 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 748 749 pr_debug("Lost link <%s> on network plane %c\n", 750 tipc_link_name(l), tipc_link_plane(l)); 751 752 /* Select new active link if any available */ 753 *slot0 = INVALID_BEARER_ID; 754 *slot1 = INVALID_BEARER_ID; 755 for (i = 0; i < MAX_BEARERS; i++) { 756 _l = n->links[i].link; 757 if (!_l || !tipc_link_is_up(_l)) 758 continue; 759 if (_l == l) 760 continue; 761 prio = tipc_link_prio(_l); 762 if (prio < highest) 763 continue; 764 if (prio > highest) { 765 highest = prio; 766 *slot0 = i; 767 *slot1 = i; 768 continue; 769 } 770 *slot1 = i; 771 } 772 773 if (!node_is_up(n)) { 774 if (tipc_link_peer_is_down(l)) 775 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 776 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 777 tipc_link_fsm_evt(l, LINK_RESET_EVT); 778 tipc_link_reset(l); 779 tipc_link_build_reset_msg(l, xmitq); 780 *maddr = &n->links[*bearer_id].maddr; 781 node_lost_contact(n, &le->inputq); 782 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 783 return; 784 } 785 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 786 787 /* There is still a working link => initiate failover */ 788 *bearer_id = n->active_links[0]; 789 tnl = n->links[*bearer_id].link; 790 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 791 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 792 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 793 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 794 tipc_link_reset(l); 795 tipc_link_fsm_evt(l, LINK_RESET_EVT); 796 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 797 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 798 *maddr = &n->links[*bearer_id].maddr; 799 } 800 801 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 802 { 803 struct tipc_link_entry *le = &n->links[bearer_id]; 804 struct tipc_link *l = le->link; 805 struct tipc_media_addr *maddr; 806 struct sk_buff_head xmitq; 807 int old_bearer_id = bearer_id; 808 809 if (!l) 810 return; 811 812 __skb_queue_head_init(&xmitq); 813 814 tipc_node_write_lock(n); 815 if (!tipc_link_is_establishing(l)) { 816 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 817 if (delete) { 818 kfree(l); 819 le->link = NULL; 820 n->link_cnt--; 821 } 822 } else { 823 /* Defuse pending tipc_node_link_up() */ 824 tipc_link_fsm_evt(l, LINK_RESET_EVT); 825 } 826 tipc_node_write_unlock(n); 827 if (delete) 828 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 829 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 830 tipc_sk_rcv(n->net, &le->inputq); 831 } 832 833 static bool node_is_up(struct tipc_node *n) 834 { 835 return n->active_links[0] != INVALID_BEARER_ID; 836 } 837 838 bool tipc_node_is_up(struct net *net, u32 addr) 839 { 840 struct tipc_node *n; 841 bool retval = false; 842 843 if (in_own_node(net, addr)) 844 return true; 845 846 n = tipc_node_find(net, addr); 847 if (!n) 848 return false; 849 retval = node_is_up(n); 850 tipc_node_put(n); 851 return retval; 852 } 853 854 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 855 { 856 struct tipc_node *n; 857 858 addr ^= tipc_net(net)->random; 859 while ((n = tipc_node_find(net, addr))) { 860 tipc_node_put(n); 861 addr++; 862 } 863 return addr; 864 } 865 866 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 867 * Returns suggested address if any, otherwise 0 868 */ 869 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 870 { 871 struct tipc_net *tn = tipc_net(net); 872 struct tipc_node *n; 873 874 /* Suggest new address if some other peer is using this one */ 875 n = tipc_node_find(net, addr); 876 if (n) { 877 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 878 addr = 0; 879 tipc_node_put(n); 880 if (!addr) 881 return 0; 882 return tipc_node_suggest_addr(net, addr); 883 } 884 885 /* Suggest previously used address if peer is known */ 886 n = tipc_node_find_by_id(net, id); 887 if (n) { 888 addr = n->addr; 889 tipc_node_put(n); 890 return addr; 891 } 892 893 /* Even this node may be in conflict */ 894 if (tn->trial_addr == addr) 895 return tipc_node_suggest_addr(net, addr); 896 897 return 0; 898 } 899 900 void tipc_node_check_dest(struct net *net, u32 addr, 901 u8 *peer_id, struct tipc_bearer *b, 902 u16 capabilities, u32 signature, 903 struct tipc_media_addr *maddr, 904 bool *respond, bool *dupl_addr) 905 { 906 struct tipc_node *n; 907 struct tipc_link *l; 908 struct tipc_link_entry *le; 909 bool addr_match = false; 910 bool sign_match = false; 911 bool link_up = false; 912 bool accept_addr = false; 913 bool reset = true; 914 char *if_name; 915 unsigned long intv; 916 u16 session; 917 918 *dupl_addr = false; 919 *respond = false; 920 921 n = tipc_node_create(net, addr, peer_id, capabilities); 922 if (!n) 923 return; 924 925 tipc_node_write_lock(n); 926 927 le = &n->links[b->identity]; 928 929 /* Prepare to validate requesting node's signature and media address */ 930 l = le->link; 931 link_up = l && tipc_link_is_up(l); 932 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 933 sign_match = (signature == n->signature); 934 935 /* These three flags give us eight permutations: */ 936 937 if (sign_match && addr_match && link_up) { 938 /* All is fine. Do nothing. */ 939 reset = false; 940 } else if (sign_match && addr_match && !link_up) { 941 /* Respond. The link will come up in due time */ 942 *respond = true; 943 } else if (sign_match && !addr_match && link_up) { 944 /* Peer has changed i/f address without rebooting. 945 * If so, the link will reset soon, and the next 946 * discovery will be accepted. So we can ignore it. 947 * It may also be an cloned or malicious peer having 948 * chosen the same node address and signature as an 949 * existing one. 950 * Ignore requests until the link goes down, if ever. 951 */ 952 *dupl_addr = true; 953 } else if (sign_match && !addr_match && !link_up) { 954 /* Peer link has changed i/f address without rebooting. 955 * It may also be a cloned or malicious peer; we can't 956 * distinguish between the two. 957 * The signature is correct, so we must accept. 958 */ 959 accept_addr = true; 960 *respond = true; 961 } else if (!sign_match && addr_match && link_up) { 962 /* Peer node rebooted. Two possibilities: 963 * - Delayed re-discovery; this link endpoint has already 964 * reset and re-established contact with the peer, before 965 * receiving a discovery message from that node. 966 * (The peer happened to receive one from this node first). 967 * - The peer came back so fast that our side has not 968 * discovered it yet. Probing from this side will soon 969 * reset the link, since there can be no working link 970 * endpoint at the peer end, and the link will re-establish. 971 * Accept the signature, since it comes from a known peer. 972 */ 973 n->signature = signature; 974 } else if (!sign_match && addr_match && !link_up) { 975 /* The peer node has rebooted. 976 * Accept signature, since it is a known peer. 977 */ 978 n->signature = signature; 979 *respond = true; 980 } else if (!sign_match && !addr_match && link_up) { 981 /* Peer rebooted with new address, or a new/duplicate peer. 982 * Ignore until the link goes down, if ever. 983 */ 984 *dupl_addr = true; 985 } else if (!sign_match && !addr_match && !link_up) { 986 /* Peer rebooted with new address, or it is a new peer. 987 * Accept signature and address. 988 */ 989 n->signature = signature; 990 accept_addr = true; 991 *respond = true; 992 } 993 994 if (!accept_addr) 995 goto exit; 996 997 /* Now create new link if not already existing */ 998 if (!l) { 999 if (n->link_cnt == 2) 1000 goto exit; 1001 1002 if_name = strchr(b->name, ':') + 1; 1003 get_random_bytes(&session, sizeof(u16)); 1004 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1005 b->net_plane, b->mtu, b->priority, 1006 b->window, session, 1007 tipc_own_addr(net), addr, peer_id, 1008 n->capabilities, 1009 tipc_bc_sndlink(n->net), n->bc_entry.link, 1010 &le->inputq, 1011 &n->bc_entry.namedq, &l)) { 1012 *respond = false; 1013 goto exit; 1014 } 1015 tipc_link_reset(l); 1016 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1017 if (n->state == NODE_FAILINGOVER) 1018 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1019 le->link = l; 1020 n->link_cnt++; 1021 tipc_node_calculate_timer(n, l); 1022 if (n->link_cnt == 1) { 1023 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1024 if (!mod_timer(&n->timer, intv)) 1025 tipc_node_get(n); 1026 } 1027 } 1028 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1029 exit: 1030 tipc_node_write_unlock(n); 1031 if (reset && l && !tipc_link_is_reset(l)) 1032 tipc_node_link_down(n, b->identity, false); 1033 tipc_node_put(n); 1034 } 1035 1036 void tipc_node_delete_links(struct net *net, int bearer_id) 1037 { 1038 struct tipc_net *tn = net_generic(net, tipc_net_id); 1039 struct tipc_node *n; 1040 1041 rcu_read_lock(); 1042 list_for_each_entry_rcu(n, &tn->node_list, list) { 1043 tipc_node_link_down(n, bearer_id, true); 1044 } 1045 rcu_read_unlock(); 1046 } 1047 1048 static void tipc_node_reset_links(struct tipc_node *n) 1049 { 1050 int i; 1051 1052 pr_warn("Resetting all links to %x\n", n->addr); 1053 1054 for (i = 0; i < MAX_BEARERS; i++) { 1055 tipc_node_link_down(n, i, false); 1056 } 1057 } 1058 1059 /* tipc_node_fsm_evt - node finite state machine 1060 * Determines when contact is allowed with peer node 1061 */ 1062 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1063 { 1064 int state = n->state; 1065 1066 switch (state) { 1067 case SELF_DOWN_PEER_DOWN: 1068 switch (evt) { 1069 case SELF_ESTABL_CONTACT_EVT: 1070 state = SELF_UP_PEER_COMING; 1071 break; 1072 case PEER_ESTABL_CONTACT_EVT: 1073 state = SELF_COMING_PEER_UP; 1074 break; 1075 case SELF_LOST_CONTACT_EVT: 1076 case PEER_LOST_CONTACT_EVT: 1077 break; 1078 case NODE_SYNCH_END_EVT: 1079 case NODE_SYNCH_BEGIN_EVT: 1080 case NODE_FAILOVER_BEGIN_EVT: 1081 case NODE_FAILOVER_END_EVT: 1082 default: 1083 goto illegal_evt; 1084 } 1085 break; 1086 case SELF_UP_PEER_UP: 1087 switch (evt) { 1088 case SELF_LOST_CONTACT_EVT: 1089 state = SELF_DOWN_PEER_LEAVING; 1090 break; 1091 case PEER_LOST_CONTACT_EVT: 1092 state = SELF_LEAVING_PEER_DOWN; 1093 break; 1094 case NODE_SYNCH_BEGIN_EVT: 1095 state = NODE_SYNCHING; 1096 break; 1097 case NODE_FAILOVER_BEGIN_EVT: 1098 state = NODE_FAILINGOVER; 1099 break; 1100 case SELF_ESTABL_CONTACT_EVT: 1101 case PEER_ESTABL_CONTACT_EVT: 1102 case NODE_SYNCH_END_EVT: 1103 case NODE_FAILOVER_END_EVT: 1104 break; 1105 default: 1106 goto illegal_evt; 1107 } 1108 break; 1109 case SELF_DOWN_PEER_LEAVING: 1110 switch (evt) { 1111 case PEER_LOST_CONTACT_EVT: 1112 state = SELF_DOWN_PEER_DOWN; 1113 break; 1114 case SELF_ESTABL_CONTACT_EVT: 1115 case PEER_ESTABL_CONTACT_EVT: 1116 case SELF_LOST_CONTACT_EVT: 1117 break; 1118 case NODE_SYNCH_END_EVT: 1119 case NODE_SYNCH_BEGIN_EVT: 1120 case NODE_FAILOVER_BEGIN_EVT: 1121 case NODE_FAILOVER_END_EVT: 1122 default: 1123 goto illegal_evt; 1124 } 1125 break; 1126 case SELF_UP_PEER_COMING: 1127 switch (evt) { 1128 case PEER_ESTABL_CONTACT_EVT: 1129 state = SELF_UP_PEER_UP; 1130 break; 1131 case SELF_LOST_CONTACT_EVT: 1132 state = SELF_DOWN_PEER_DOWN; 1133 break; 1134 case SELF_ESTABL_CONTACT_EVT: 1135 case PEER_LOST_CONTACT_EVT: 1136 case NODE_SYNCH_END_EVT: 1137 case NODE_FAILOVER_BEGIN_EVT: 1138 break; 1139 case NODE_SYNCH_BEGIN_EVT: 1140 case NODE_FAILOVER_END_EVT: 1141 default: 1142 goto illegal_evt; 1143 } 1144 break; 1145 case SELF_COMING_PEER_UP: 1146 switch (evt) { 1147 case SELF_ESTABL_CONTACT_EVT: 1148 state = SELF_UP_PEER_UP; 1149 break; 1150 case PEER_LOST_CONTACT_EVT: 1151 state = SELF_DOWN_PEER_DOWN; 1152 break; 1153 case SELF_LOST_CONTACT_EVT: 1154 case PEER_ESTABL_CONTACT_EVT: 1155 break; 1156 case NODE_SYNCH_END_EVT: 1157 case NODE_SYNCH_BEGIN_EVT: 1158 case NODE_FAILOVER_BEGIN_EVT: 1159 case NODE_FAILOVER_END_EVT: 1160 default: 1161 goto illegal_evt; 1162 } 1163 break; 1164 case SELF_LEAVING_PEER_DOWN: 1165 switch (evt) { 1166 case SELF_LOST_CONTACT_EVT: 1167 state = SELF_DOWN_PEER_DOWN; 1168 break; 1169 case SELF_ESTABL_CONTACT_EVT: 1170 case PEER_ESTABL_CONTACT_EVT: 1171 case PEER_LOST_CONTACT_EVT: 1172 break; 1173 case NODE_SYNCH_END_EVT: 1174 case NODE_SYNCH_BEGIN_EVT: 1175 case NODE_FAILOVER_BEGIN_EVT: 1176 case NODE_FAILOVER_END_EVT: 1177 default: 1178 goto illegal_evt; 1179 } 1180 break; 1181 case NODE_FAILINGOVER: 1182 switch (evt) { 1183 case SELF_LOST_CONTACT_EVT: 1184 state = SELF_DOWN_PEER_LEAVING; 1185 break; 1186 case PEER_LOST_CONTACT_EVT: 1187 state = SELF_LEAVING_PEER_DOWN; 1188 break; 1189 case NODE_FAILOVER_END_EVT: 1190 state = SELF_UP_PEER_UP; 1191 break; 1192 case NODE_FAILOVER_BEGIN_EVT: 1193 case SELF_ESTABL_CONTACT_EVT: 1194 case PEER_ESTABL_CONTACT_EVT: 1195 break; 1196 case NODE_SYNCH_BEGIN_EVT: 1197 case NODE_SYNCH_END_EVT: 1198 default: 1199 goto illegal_evt; 1200 } 1201 break; 1202 case NODE_SYNCHING: 1203 switch (evt) { 1204 case SELF_LOST_CONTACT_EVT: 1205 state = SELF_DOWN_PEER_LEAVING; 1206 break; 1207 case PEER_LOST_CONTACT_EVT: 1208 state = SELF_LEAVING_PEER_DOWN; 1209 break; 1210 case NODE_SYNCH_END_EVT: 1211 state = SELF_UP_PEER_UP; 1212 break; 1213 case NODE_FAILOVER_BEGIN_EVT: 1214 state = NODE_FAILINGOVER; 1215 break; 1216 case NODE_SYNCH_BEGIN_EVT: 1217 case SELF_ESTABL_CONTACT_EVT: 1218 case PEER_ESTABL_CONTACT_EVT: 1219 break; 1220 case NODE_FAILOVER_END_EVT: 1221 default: 1222 goto illegal_evt; 1223 } 1224 break; 1225 default: 1226 pr_err("Unknown node fsm state %x\n", state); 1227 break; 1228 } 1229 n->state = state; 1230 return; 1231 1232 illegal_evt: 1233 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1234 } 1235 1236 static void node_lost_contact(struct tipc_node *n, 1237 struct sk_buff_head *inputq) 1238 { 1239 struct tipc_sock_conn *conn, *safe; 1240 struct tipc_link *l; 1241 struct list_head *conns = &n->conn_sks; 1242 struct sk_buff *skb; 1243 uint i; 1244 1245 pr_debug("Lost contact with %x\n", n->addr); 1246 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1247 1248 /* Clean up broadcast state */ 1249 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1250 1251 /* Abort any ongoing link failover */ 1252 for (i = 0; i < MAX_BEARERS; i++) { 1253 l = n->links[i].link; 1254 if (l) 1255 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1256 } 1257 1258 /* Notify publications from this node */ 1259 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1260 1261 /* Notify sockets connected to node */ 1262 list_for_each_entry_safe(conn, safe, conns, list) { 1263 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1264 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1265 conn->peer_node, conn->port, 1266 conn->peer_port, TIPC_ERR_NO_NODE); 1267 if (likely(skb)) 1268 skb_queue_tail(inputq, skb); 1269 list_del(&conn->list); 1270 kfree(conn); 1271 } 1272 } 1273 1274 /** 1275 * tipc_node_get_linkname - get the name of a link 1276 * 1277 * @bearer_id: id of the bearer 1278 * @node: peer node address 1279 * @linkname: link name output buffer 1280 * 1281 * Returns 0 on success 1282 */ 1283 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1284 char *linkname, size_t len) 1285 { 1286 struct tipc_link *link; 1287 int err = -EINVAL; 1288 struct tipc_node *node = tipc_node_find(net, addr); 1289 1290 if (!node) 1291 return err; 1292 1293 if (bearer_id >= MAX_BEARERS) 1294 goto exit; 1295 1296 tipc_node_read_lock(node); 1297 link = node->links[bearer_id].link; 1298 if (link) { 1299 strncpy(linkname, tipc_link_name(link), len); 1300 err = 0; 1301 } 1302 tipc_node_read_unlock(node); 1303 exit: 1304 tipc_node_put(node); 1305 return err; 1306 } 1307 1308 /* Caller should hold node lock for the passed node */ 1309 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1310 { 1311 void *hdr; 1312 struct nlattr *attrs; 1313 1314 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1315 NLM_F_MULTI, TIPC_NL_NODE_GET); 1316 if (!hdr) 1317 return -EMSGSIZE; 1318 1319 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); 1320 if (!attrs) 1321 goto msg_full; 1322 1323 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1324 goto attr_msg_full; 1325 if (node_is_up(node)) 1326 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1327 goto attr_msg_full; 1328 1329 nla_nest_end(msg->skb, attrs); 1330 genlmsg_end(msg->skb, hdr); 1331 1332 return 0; 1333 1334 attr_msg_full: 1335 nla_nest_cancel(msg->skb, attrs); 1336 msg_full: 1337 genlmsg_cancel(msg->skb, hdr); 1338 1339 return -EMSGSIZE; 1340 } 1341 1342 /** 1343 * tipc_node_xmit() is the general link level function for message sending 1344 * @net: the applicable net namespace 1345 * @list: chain of buffers containing message 1346 * @dnode: address of destination node 1347 * @selector: a number used for deterministic link selection 1348 * Consumes the buffer chain. 1349 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1350 */ 1351 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1352 u32 dnode, int selector) 1353 { 1354 struct tipc_link_entry *le = NULL; 1355 struct tipc_node *n; 1356 struct sk_buff_head xmitq; 1357 int bearer_id; 1358 int rc; 1359 1360 if (in_own_node(net, dnode)) { 1361 tipc_sk_rcv(net, list); 1362 return 0; 1363 } 1364 1365 n = tipc_node_find(net, dnode); 1366 if (unlikely(!n)) { 1367 skb_queue_purge(list); 1368 return -EHOSTUNREACH; 1369 } 1370 1371 tipc_node_read_lock(n); 1372 bearer_id = n->active_links[selector & 1]; 1373 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1374 tipc_node_read_unlock(n); 1375 tipc_node_put(n); 1376 skb_queue_purge(list); 1377 return -EHOSTUNREACH; 1378 } 1379 1380 __skb_queue_head_init(&xmitq); 1381 le = &n->links[bearer_id]; 1382 spin_lock_bh(&le->lock); 1383 rc = tipc_link_xmit(le->link, list, &xmitq); 1384 spin_unlock_bh(&le->lock); 1385 tipc_node_read_unlock(n); 1386 1387 if (unlikely(rc == -ENOBUFS)) 1388 tipc_node_link_down(n, bearer_id, false); 1389 else 1390 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1391 1392 tipc_node_put(n); 1393 1394 return rc; 1395 } 1396 1397 /* tipc_node_xmit_skb(): send single buffer to destination 1398 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1399 * messages, which will not be rejected 1400 * The only exception is datagram messages rerouted after secondary 1401 * lookup, which are rare and safe to dispose of anyway. 1402 */ 1403 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1404 u32 selector) 1405 { 1406 struct sk_buff_head head; 1407 1408 skb_queue_head_init(&head); 1409 __skb_queue_tail(&head, skb); 1410 tipc_node_xmit(net, &head, dnode, selector); 1411 return 0; 1412 } 1413 1414 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1415 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1416 */ 1417 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1418 { 1419 struct sk_buff *skb; 1420 u32 selector, dnode; 1421 1422 while ((skb = __skb_dequeue(xmitq))) { 1423 selector = msg_origport(buf_msg(skb)); 1424 dnode = msg_destnode(buf_msg(skb)); 1425 tipc_node_xmit_skb(net, skb, dnode, selector); 1426 } 1427 return 0; 1428 } 1429 1430 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1431 { 1432 struct sk_buff *txskb; 1433 struct tipc_node *n; 1434 u32 dst; 1435 1436 rcu_read_lock(); 1437 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1438 dst = n->addr; 1439 if (in_own_node(net, dst)) 1440 continue; 1441 if (!node_is_up(n)) 1442 continue; 1443 txskb = pskb_copy(skb, GFP_ATOMIC); 1444 if (!txskb) 1445 break; 1446 msg_set_destnode(buf_msg(txskb), dst); 1447 tipc_node_xmit_skb(net, txskb, dst, 0); 1448 } 1449 rcu_read_unlock(); 1450 1451 kfree_skb(skb); 1452 } 1453 1454 static void tipc_node_mcast_rcv(struct tipc_node *n) 1455 { 1456 struct tipc_bclink_entry *be = &n->bc_entry; 1457 1458 /* 'arrvq' is under inputq2's lock protection */ 1459 spin_lock_bh(&be->inputq2.lock); 1460 spin_lock_bh(&be->inputq1.lock); 1461 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1462 spin_unlock_bh(&be->inputq1.lock); 1463 spin_unlock_bh(&be->inputq2.lock); 1464 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1465 } 1466 1467 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1468 int bearer_id, struct sk_buff_head *xmitq) 1469 { 1470 struct tipc_link *ucl; 1471 int rc; 1472 1473 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); 1474 1475 if (rc & TIPC_LINK_DOWN_EVT) { 1476 tipc_node_reset_links(n); 1477 return; 1478 } 1479 1480 if (!(rc & TIPC_LINK_SND_STATE)) 1481 return; 1482 1483 /* If probe message, a STATE response will be sent anyway */ 1484 if (msg_probe(hdr)) 1485 return; 1486 1487 /* Produce a STATE message carrying broadcast NACK */ 1488 tipc_node_read_lock(n); 1489 ucl = n->links[bearer_id].link; 1490 if (ucl) 1491 tipc_link_build_state_msg(ucl, xmitq); 1492 tipc_node_read_unlock(n); 1493 } 1494 1495 /** 1496 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1497 * @net: the applicable net namespace 1498 * @skb: TIPC packet 1499 * @bearer_id: id of bearer message arrived on 1500 * 1501 * Invoked with no locks held. 1502 */ 1503 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1504 { 1505 int rc; 1506 struct sk_buff_head xmitq; 1507 struct tipc_bclink_entry *be; 1508 struct tipc_link_entry *le; 1509 struct tipc_msg *hdr = buf_msg(skb); 1510 int usr = msg_user(hdr); 1511 u32 dnode = msg_destnode(hdr); 1512 struct tipc_node *n; 1513 1514 __skb_queue_head_init(&xmitq); 1515 1516 /* If NACK for other node, let rcv link for that node peek into it */ 1517 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1518 n = tipc_node_find(net, dnode); 1519 else 1520 n = tipc_node_find(net, msg_prevnode(hdr)); 1521 if (!n) { 1522 kfree_skb(skb); 1523 return; 1524 } 1525 be = &n->bc_entry; 1526 le = &n->links[bearer_id]; 1527 1528 rc = tipc_bcast_rcv(net, be->link, skb); 1529 1530 /* Broadcast ACKs are sent on a unicast link */ 1531 if (rc & TIPC_LINK_SND_STATE) { 1532 tipc_node_read_lock(n); 1533 tipc_link_build_state_msg(le->link, &xmitq); 1534 tipc_node_read_unlock(n); 1535 } 1536 1537 if (!skb_queue_empty(&xmitq)) 1538 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1539 1540 if (!skb_queue_empty(&be->inputq1)) 1541 tipc_node_mcast_rcv(n); 1542 1543 /* If reassembly or retransmission failure => reset all links to peer */ 1544 if (rc & TIPC_LINK_DOWN_EVT) 1545 tipc_node_reset_links(n); 1546 1547 tipc_node_put(n); 1548 } 1549 1550 /** 1551 * tipc_node_check_state - check and if necessary update node state 1552 * @skb: TIPC packet 1553 * @bearer_id: identity of bearer delivering the packet 1554 * Returns true if state and msg are ok, otherwise false 1555 */ 1556 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1557 int bearer_id, struct sk_buff_head *xmitq) 1558 { 1559 struct tipc_msg *hdr = buf_msg(skb); 1560 int usr = msg_user(hdr); 1561 int mtyp = msg_type(hdr); 1562 u16 oseqno = msg_seqno(hdr); 1563 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1564 u16 exp_pkts = msg_msgcnt(hdr); 1565 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1566 int state = n->state; 1567 struct tipc_link *l, *tnl, *pl = NULL; 1568 struct tipc_media_addr *maddr; 1569 int pb_id; 1570 1571 l = n->links[bearer_id].link; 1572 if (!l) 1573 return false; 1574 rcv_nxt = tipc_link_rcv_nxt(l); 1575 1576 1577 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1578 return true; 1579 1580 /* Find parallel link, if any */ 1581 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1582 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1583 pl = n->links[pb_id].link; 1584 break; 1585 } 1586 } 1587 1588 if (!tipc_link_validate_msg(l, hdr)) 1589 return false; 1590 1591 /* Check and update node accesibility if applicable */ 1592 if (state == SELF_UP_PEER_COMING) { 1593 if (!tipc_link_is_up(l)) 1594 return true; 1595 if (!msg_peer_link_is_up(hdr)) 1596 return true; 1597 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1598 } 1599 1600 if (state == SELF_DOWN_PEER_LEAVING) { 1601 if (msg_peer_node_is_up(hdr)) 1602 return false; 1603 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1604 return true; 1605 } 1606 1607 if (state == SELF_LEAVING_PEER_DOWN) 1608 return false; 1609 1610 /* Ignore duplicate packets */ 1611 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1612 return true; 1613 1614 /* Initiate or update failover mode if applicable */ 1615 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1616 syncpt = oseqno + exp_pkts - 1; 1617 if (pl && tipc_link_is_up(pl)) { 1618 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1619 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1620 tipc_link_inputq(l)); 1621 } 1622 /* If parallel link was already down, and this happened before 1623 * the tunnel link came up, FAILOVER was never sent. Ensure that 1624 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state. 1625 */ 1626 if (n->state != NODE_FAILINGOVER && !n->failover_sent) { 1627 tipc_link_create_dummy_tnl_msg(l, xmitq); 1628 n->failover_sent = true; 1629 } 1630 /* If pkts arrive out of order, use lowest calculated syncpt */ 1631 if (less(syncpt, n->sync_point)) 1632 n->sync_point = syncpt; 1633 } 1634 1635 /* Open parallel link when tunnel link reaches synch point */ 1636 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1637 if (!more(rcv_nxt, n->sync_point)) 1638 return true; 1639 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1640 if (pl) 1641 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1642 return true; 1643 } 1644 1645 /* No synching needed if only one link */ 1646 if (!pl || !tipc_link_is_up(pl)) 1647 return true; 1648 1649 /* Initiate synch mode if applicable */ 1650 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1651 syncpt = iseqno + exp_pkts - 1; 1652 if (!tipc_link_is_up(l)) 1653 __tipc_node_link_up(n, bearer_id, xmitq); 1654 if (n->state == SELF_UP_PEER_UP) { 1655 n->sync_point = syncpt; 1656 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1657 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1658 } 1659 } 1660 1661 /* Open tunnel link when parallel link reaches synch point */ 1662 if (n->state == NODE_SYNCHING) { 1663 if (tipc_link_is_synching(l)) { 1664 tnl = l; 1665 } else { 1666 tnl = pl; 1667 pl = l; 1668 } 1669 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1670 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1671 if (more(dlv_nxt, n->sync_point)) { 1672 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1673 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1674 return true; 1675 } 1676 if (l == pl) 1677 return true; 1678 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1679 return true; 1680 if (usr == LINK_PROTOCOL) 1681 return true; 1682 return false; 1683 } 1684 return true; 1685 } 1686 1687 /** 1688 * tipc_rcv - process TIPC packets/messages arriving from off-node 1689 * @net: the applicable net namespace 1690 * @skb: TIPC packet 1691 * @bearer: pointer to bearer message arrived on 1692 * 1693 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1694 * structure (i.e. cannot be NULL), but bearer can be inactive. 1695 */ 1696 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1697 { 1698 struct sk_buff_head xmitq; 1699 struct tipc_node *n; 1700 struct tipc_msg *hdr; 1701 int bearer_id = b->identity; 1702 struct tipc_link_entry *le; 1703 u32 self = tipc_own_addr(net); 1704 int usr, rc = 0; 1705 u16 bc_ack; 1706 1707 __skb_queue_head_init(&xmitq); 1708 1709 /* Ensure message is well-formed before touching the header */ 1710 if (unlikely(!tipc_msg_validate(&skb))) 1711 goto discard; 1712 hdr = buf_msg(skb); 1713 usr = msg_user(hdr); 1714 bc_ack = msg_bcast_ack(hdr); 1715 1716 /* Handle arrival of discovery or broadcast packet */ 1717 if (unlikely(msg_non_seq(hdr))) { 1718 if (unlikely(usr == LINK_CONFIG)) 1719 return tipc_disc_rcv(net, skb, b); 1720 else 1721 return tipc_node_bc_rcv(net, skb, bearer_id); 1722 } 1723 1724 /* Discard unicast link messages destined for another node */ 1725 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 1726 goto discard; 1727 1728 /* Locate neighboring node that sent packet */ 1729 n = tipc_node_find(net, msg_prevnode(hdr)); 1730 if (unlikely(!n)) 1731 goto discard; 1732 le = &n->links[bearer_id]; 1733 1734 /* Ensure broadcast reception is in synch with peer's send state */ 1735 if (unlikely(usr == LINK_PROTOCOL)) 1736 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1737 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1738 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 1739 1740 /* Receive packet directly if conditions permit */ 1741 tipc_node_read_lock(n); 1742 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1743 spin_lock_bh(&le->lock); 1744 if (le->link) { 1745 rc = tipc_link_rcv(le->link, skb, &xmitq); 1746 skb = NULL; 1747 } 1748 spin_unlock_bh(&le->lock); 1749 } 1750 tipc_node_read_unlock(n); 1751 1752 /* Check/update node state before receiving */ 1753 if (unlikely(skb)) { 1754 if (unlikely(skb_linearize(skb))) 1755 goto discard; 1756 tipc_node_write_lock(n); 1757 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1758 if (le->link) { 1759 rc = tipc_link_rcv(le->link, skb, &xmitq); 1760 skb = NULL; 1761 } 1762 } 1763 tipc_node_write_unlock(n); 1764 } 1765 1766 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1767 tipc_node_link_up(n, bearer_id, &xmitq); 1768 1769 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1770 tipc_node_link_down(n, bearer_id, false); 1771 1772 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1773 tipc_named_rcv(net, &n->bc_entry.namedq); 1774 1775 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 1776 tipc_node_mcast_rcv(n); 1777 1778 if (!skb_queue_empty(&le->inputq)) 1779 tipc_sk_rcv(net, &le->inputq); 1780 1781 if (!skb_queue_empty(&xmitq)) 1782 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1783 1784 tipc_node_put(n); 1785 discard: 1786 kfree_skb(skb); 1787 } 1788 1789 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 1790 int prop) 1791 { 1792 struct tipc_net *tn = tipc_net(net); 1793 int bearer_id = b->identity; 1794 struct sk_buff_head xmitq; 1795 struct tipc_link_entry *e; 1796 struct tipc_node *n; 1797 1798 __skb_queue_head_init(&xmitq); 1799 1800 rcu_read_lock(); 1801 1802 list_for_each_entry_rcu(n, &tn->node_list, list) { 1803 tipc_node_write_lock(n); 1804 e = &n->links[bearer_id]; 1805 if (e->link) { 1806 if (prop == TIPC_NLA_PROP_TOL) 1807 tipc_link_set_tolerance(e->link, b->tolerance, 1808 &xmitq); 1809 else if (prop == TIPC_NLA_PROP_MTU) 1810 tipc_link_set_mtu(e->link, b->mtu); 1811 } 1812 tipc_node_write_unlock(n); 1813 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr); 1814 } 1815 1816 rcu_read_unlock(); 1817 } 1818 1819 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 1820 { 1821 struct net *net = sock_net(skb->sk); 1822 struct tipc_net *tn = net_generic(net, tipc_net_id); 1823 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 1824 struct tipc_node *peer; 1825 u32 addr; 1826 int err; 1827 1828 /* We identify the peer by its net */ 1829 if (!info->attrs[TIPC_NLA_NET]) 1830 return -EINVAL; 1831 1832 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, 1833 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy, 1834 info->extack); 1835 if (err) 1836 return err; 1837 1838 if (!attrs[TIPC_NLA_NET_ADDR]) 1839 return -EINVAL; 1840 1841 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 1842 1843 if (in_own_node(net, addr)) 1844 return -ENOTSUPP; 1845 1846 spin_lock_bh(&tn->node_list_lock); 1847 peer = tipc_node_find(net, addr); 1848 if (!peer) { 1849 spin_unlock_bh(&tn->node_list_lock); 1850 return -ENXIO; 1851 } 1852 1853 tipc_node_write_lock(peer); 1854 if (peer->state != SELF_DOWN_PEER_DOWN && 1855 peer->state != SELF_DOWN_PEER_LEAVING) { 1856 tipc_node_write_unlock(peer); 1857 err = -EBUSY; 1858 goto err_out; 1859 } 1860 1861 tipc_node_clear_links(peer); 1862 tipc_node_write_unlock(peer); 1863 tipc_node_delete(peer); 1864 1865 err = 0; 1866 err_out: 1867 tipc_node_put(peer); 1868 spin_unlock_bh(&tn->node_list_lock); 1869 1870 return err; 1871 } 1872 1873 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 1874 { 1875 int err; 1876 struct net *net = sock_net(skb->sk); 1877 struct tipc_net *tn = net_generic(net, tipc_net_id); 1878 int done = cb->args[0]; 1879 int last_addr = cb->args[1]; 1880 struct tipc_node *node; 1881 struct tipc_nl_msg msg; 1882 1883 if (done) 1884 return 0; 1885 1886 msg.skb = skb; 1887 msg.portid = NETLINK_CB(cb->skb).portid; 1888 msg.seq = cb->nlh->nlmsg_seq; 1889 1890 rcu_read_lock(); 1891 if (last_addr) { 1892 node = tipc_node_find(net, last_addr); 1893 if (!node) { 1894 rcu_read_unlock(); 1895 /* We never set seq or call nl_dump_check_consistent() 1896 * this means that setting prev_seq here will cause the 1897 * consistence check to fail in the netlink callback 1898 * handler. Resulting in the NLMSG_DONE message having 1899 * the NLM_F_DUMP_INTR flag set if the node state 1900 * changed while we released the lock. 1901 */ 1902 cb->prev_seq = 1; 1903 return -EPIPE; 1904 } 1905 tipc_node_put(node); 1906 } 1907 1908 list_for_each_entry_rcu(node, &tn->node_list, list) { 1909 if (last_addr) { 1910 if (node->addr == last_addr) 1911 last_addr = 0; 1912 else 1913 continue; 1914 } 1915 1916 tipc_node_read_lock(node); 1917 err = __tipc_nl_add_node(&msg, node); 1918 if (err) { 1919 last_addr = node->addr; 1920 tipc_node_read_unlock(node); 1921 goto out; 1922 } 1923 1924 tipc_node_read_unlock(node); 1925 } 1926 done = 1; 1927 out: 1928 cb->args[0] = done; 1929 cb->args[1] = last_addr; 1930 rcu_read_unlock(); 1931 1932 return skb->len; 1933 } 1934 1935 /* tipc_node_find_by_name - locate owner node of link by link's name 1936 * @net: the applicable net namespace 1937 * @name: pointer to link name string 1938 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1939 * 1940 * Returns pointer to node owning the link, or 0 if no matching link is found. 1941 */ 1942 static struct tipc_node *tipc_node_find_by_name(struct net *net, 1943 const char *link_name, 1944 unsigned int *bearer_id) 1945 { 1946 struct tipc_net *tn = net_generic(net, tipc_net_id); 1947 struct tipc_link *l; 1948 struct tipc_node *n; 1949 struct tipc_node *found_node = NULL; 1950 int i; 1951 1952 *bearer_id = 0; 1953 rcu_read_lock(); 1954 list_for_each_entry_rcu(n, &tn->node_list, list) { 1955 tipc_node_read_lock(n); 1956 for (i = 0; i < MAX_BEARERS; i++) { 1957 l = n->links[i].link; 1958 if (l && !strcmp(tipc_link_name(l), link_name)) { 1959 *bearer_id = i; 1960 found_node = n; 1961 break; 1962 } 1963 } 1964 tipc_node_read_unlock(n); 1965 if (found_node) 1966 break; 1967 } 1968 rcu_read_unlock(); 1969 1970 return found_node; 1971 } 1972 1973 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 1974 { 1975 int err; 1976 int res = 0; 1977 int bearer_id; 1978 char *name; 1979 struct tipc_link *link; 1980 struct tipc_node *node; 1981 struct sk_buff_head xmitq; 1982 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1983 struct net *net = sock_net(skb->sk); 1984 1985 __skb_queue_head_init(&xmitq); 1986 1987 if (!info->attrs[TIPC_NLA_LINK]) 1988 return -EINVAL; 1989 1990 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1991 info->attrs[TIPC_NLA_LINK], 1992 tipc_nl_link_policy, info->extack); 1993 if (err) 1994 return err; 1995 1996 if (!attrs[TIPC_NLA_LINK_NAME]) 1997 return -EINVAL; 1998 1999 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2000 2001 if (strcmp(name, tipc_bclink_name) == 0) 2002 return tipc_nl_bc_link_set(net, attrs); 2003 2004 node = tipc_node_find_by_name(net, name, &bearer_id); 2005 if (!node) 2006 return -EINVAL; 2007 2008 tipc_node_read_lock(node); 2009 2010 link = node->links[bearer_id].link; 2011 if (!link) { 2012 res = -EINVAL; 2013 goto out; 2014 } 2015 2016 if (attrs[TIPC_NLA_LINK_PROP]) { 2017 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2018 2019 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 2020 props); 2021 if (err) { 2022 res = err; 2023 goto out; 2024 } 2025 2026 if (props[TIPC_NLA_PROP_TOL]) { 2027 u32 tol; 2028 2029 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2030 tipc_link_set_tolerance(link, tol, &xmitq); 2031 } 2032 if (props[TIPC_NLA_PROP_PRIO]) { 2033 u32 prio; 2034 2035 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2036 tipc_link_set_prio(link, prio, &xmitq); 2037 } 2038 if (props[TIPC_NLA_PROP_WIN]) { 2039 u32 win; 2040 2041 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2042 tipc_link_set_queue_limits(link, win); 2043 } 2044 } 2045 2046 out: 2047 tipc_node_read_unlock(node); 2048 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 2049 return res; 2050 } 2051 2052 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2053 { 2054 struct net *net = genl_info_net(info); 2055 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2056 struct tipc_nl_msg msg; 2057 char *name; 2058 int err; 2059 2060 msg.portid = info->snd_portid; 2061 msg.seq = info->snd_seq; 2062 2063 if (!info->attrs[TIPC_NLA_LINK]) 2064 return -EINVAL; 2065 2066 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 2067 info->attrs[TIPC_NLA_LINK], 2068 tipc_nl_link_policy, info->extack); 2069 if (err) 2070 return err; 2071 2072 if (!attrs[TIPC_NLA_LINK_NAME]) 2073 return -EINVAL; 2074 2075 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2076 2077 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2078 if (!msg.skb) 2079 return -ENOMEM; 2080 2081 if (strcmp(name, tipc_bclink_name) == 0) { 2082 err = tipc_nl_add_bc_link(net, &msg); 2083 if (err) 2084 goto err_free; 2085 } else { 2086 int bearer_id; 2087 struct tipc_node *node; 2088 struct tipc_link *link; 2089 2090 node = tipc_node_find_by_name(net, name, &bearer_id); 2091 if (!node) { 2092 err = -EINVAL; 2093 goto err_free; 2094 } 2095 2096 tipc_node_read_lock(node); 2097 link = node->links[bearer_id].link; 2098 if (!link) { 2099 tipc_node_read_unlock(node); 2100 err = -EINVAL; 2101 goto err_free; 2102 } 2103 2104 err = __tipc_nl_add_link(net, &msg, link, 0); 2105 tipc_node_read_unlock(node); 2106 if (err) 2107 goto err_free; 2108 } 2109 2110 return genlmsg_reply(msg.skb, info); 2111 2112 err_free: 2113 nlmsg_free(msg.skb); 2114 return err; 2115 } 2116 2117 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2118 { 2119 int err; 2120 char *link_name; 2121 unsigned int bearer_id; 2122 struct tipc_link *link; 2123 struct tipc_node *node; 2124 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2125 struct net *net = sock_net(skb->sk); 2126 struct tipc_link_entry *le; 2127 2128 if (!info->attrs[TIPC_NLA_LINK]) 2129 return -EINVAL; 2130 2131 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 2132 info->attrs[TIPC_NLA_LINK], 2133 tipc_nl_link_policy, info->extack); 2134 if (err) 2135 return err; 2136 2137 if (!attrs[TIPC_NLA_LINK_NAME]) 2138 return -EINVAL; 2139 2140 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2141 2142 if (strcmp(link_name, tipc_bclink_name) == 0) { 2143 err = tipc_bclink_reset_stats(net); 2144 if (err) 2145 return err; 2146 return 0; 2147 } 2148 2149 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2150 if (!node) 2151 return -EINVAL; 2152 2153 le = &node->links[bearer_id]; 2154 tipc_node_read_lock(node); 2155 spin_lock_bh(&le->lock); 2156 link = node->links[bearer_id].link; 2157 if (!link) { 2158 spin_unlock_bh(&le->lock); 2159 tipc_node_read_unlock(node); 2160 return -EINVAL; 2161 } 2162 tipc_link_reset_stats(link); 2163 spin_unlock_bh(&le->lock); 2164 tipc_node_read_unlock(node); 2165 return 0; 2166 } 2167 2168 /* Caller should hold node lock */ 2169 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2170 struct tipc_node *node, u32 *prev_link) 2171 { 2172 u32 i; 2173 int err; 2174 2175 for (i = *prev_link; i < MAX_BEARERS; i++) { 2176 *prev_link = i; 2177 2178 if (!node->links[i].link) 2179 continue; 2180 2181 err = __tipc_nl_add_link(net, msg, 2182 node->links[i].link, NLM_F_MULTI); 2183 if (err) 2184 return err; 2185 } 2186 *prev_link = 0; 2187 2188 return 0; 2189 } 2190 2191 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2192 { 2193 struct net *net = sock_net(skb->sk); 2194 struct tipc_net *tn = net_generic(net, tipc_net_id); 2195 struct tipc_node *node; 2196 struct tipc_nl_msg msg; 2197 u32 prev_node = cb->args[0]; 2198 u32 prev_link = cb->args[1]; 2199 int done = cb->args[2]; 2200 int err; 2201 2202 if (done) 2203 return 0; 2204 2205 msg.skb = skb; 2206 msg.portid = NETLINK_CB(cb->skb).portid; 2207 msg.seq = cb->nlh->nlmsg_seq; 2208 2209 rcu_read_lock(); 2210 if (prev_node) { 2211 node = tipc_node_find(net, prev_node); 2212 if (!node) { 2213 /* We never set seq or call nl_dump_check_consistent() 2214 * this means that setting prev_seq here will cause the 2215 * consistence check to fail in the netlink callback 2216 * handler. Resulting in the last NLMSG_DONE message 2217 * having the NLM_F_DUMP_INTR flag set. 2218 */ 2219 cb->prev_seq = 1; 2220 goto out; 2221 } 2222 tipc_node_put(node); 2223 2224 list_for_each_entry_continue_rcu(node, &tn->node_list, 2225 list) { 2226 tipc_node_read_lock(node); 2227 err = __tipc_nl_add_node_links(net, &msg, node, 2228 &prev_link); 2229 tipc_node_read_unlock(node); 2230 if (err) 2231 goto out; 2232 2233 prev_node = node->addr; 2234 } 2235 } else { 2236 err = tipc_nl_add_bc_link(net, &msg); 2237 if (err) 2238 goto out; 2239 2240 list_for_each_entry_rcu(node, &tn->node_list, list) { 2241 tipc_node_read_lock(node); 2242 err = __tipc_nl_add_node_links(net, &msg, node, 2243 &prev_link); 2244 tipc_node_read_unlock(node); 2245 if (err) 2246 goto out; 2247 2248 prev_node = node->addr; 2249 } 2250 } 2251 done = 1; 2252 out: 2253 rcu_read_unlock(); 2254 2255 cb->args[0] = prev_node; 2256 cb->args[1] = prev_link; 2257 cb->args[2] = done; 2258 2259 return skb->len; 2260 } 2261 2262 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2263 { 2264 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2265 struct net *net = sock_net(skb->sk); 2266 int err; 2267 2268 if (!info->attrs[TIPC_NLA_MON]) 2269 return -EINVAL; 2270 2271 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX, 2272 info->attrs[TIPC_NLA_MON], 2273 tipc_nl_monitor_policy, info->extack); 2274 if (err) 2275 return err; 2276 2277 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2278 u32 val; 2279 2280 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2281 err = tipc_nl_monitor_set_threshold(net, val); 2282 if (err) 2283 return err; 2284 } 2285 2286 return 0; 2287 } 2288 2289 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2290 { 2291 struct nlattr *attrs; 2292 void *hdr; 2293 u32 val; 2294 2295 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2296 0, TIPC_NL_MON_GET); 2297 if (!hdr) 2298 return -EMSGSIZE; 2299 2300 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON); 2301 if (!attrs) 2302 goto msg_full; 2303 2304 val = tipc_nl_monitor_get_threshold(net); 2305 2306 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2307 goto attr_msg_full; 2308 2309 nla_nest_end(msg->skb, attrs); 2310 genlmsg_end(msg->skb, hdr); 2311 2312 return 0; 2313 2314 attr_msg_full: 2315 nla_nest_cancel(msg->skb, attrs); 2316 msg_full: 2317 genlmsg_cancel(msg->skb, hdr); 2318 2319 return -EMSGSIZE; 2320 } 2321 2322 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2323 { 2324 struct net *net = sock_net(skb->sk); 2325 struct tipc_nl_msg msg; 2326 int err; 2327 2328 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2329 if (!msg.skb) 2330 return -ENOMEM; 2331 msg.portid = info->snd_portid; 2332 msg.seq = info->snd_seq; 2333 2334 err = __tipc_nl_add_monitor_prop(net, &msg); 2335 if (err) { 2336 nlmsg_free(msg.skb); 2337 return err; 2338 } 2339 2340 return genlmsg_reply(msg.skb, info); 2341 } 2342 2343 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2344 { 2345 struct net *net = sock_net(skb->sk); 2346 u32 prev_bearer = cb->args[0]; 2347 struct tipc_nl_msg msg; 2348 int bearer_id; 2349 int err; 2350 2351 if (prev_bearer == MAX_BEARERS) 2352 return 0; 2353 2354 msg.skb = skb; 2355 msg.portid = NETLINK_CB(cb->skb).portid; 2356 msg.seq = cb->nlh->nlmsg_seq; 2357 2358 rtnl_lock(); 2359 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2360 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2361 if (err) 2362 break; 2363 } 2364 rtnl_unlock(); 2365 cb->args[0] = bearer_id; 2366 2367 return skb->len; 2368 } 2369 2370 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2371 struct netlink_callback *cb) 2372 { 2373 struct net *net = sock_net(skb->sk); 2374 u32 prev_node = cb->args[1]; 2375 u32 bearer_id = cb->args[2]; 2376 int done = cb->args[0]; 2377 struct tipc_nl_msg msg; 2378 int err; 2379 2380 if (!prev_node) { 2381 struct nlattr **attrs; 2382 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2383 2384 err = tipc_nlmsg_parse(cb->nlh, &attrs); 2385 if (err) 2386 return err; 2387 2388 if (!attrs[TIPC_NLA_MON]) 2389 return -EINVAL; 2390 2391 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX, 2392 attrs[TIPC_NLA_MON], 2393 tipc_nl_monitor_policy, NULL); 2394 if (err) 2395 return err; 2396 2397 if (!mon[TIPC_NLA_MON_REF]) 2398 return -EINVAL; 2399 2400 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2401 2402 if (bearer_id >= MAX_BEARERS) 2403 return -EINVAL; 2404 } 2405 2406 if (done) 2407 return 0; 2408 2409 msg.skb = skb; 2410 msg.portid = NETLINK_CB(cb->skb).portid; 2411 msg.seq = cb->nlh->nlmsg_seq; 2412 2413 rtnl_lock(); 2414 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2415 if (!err) 2416 done = 1; 2417 2418 rtnl_unlock(); 2419 cb->args[0] = done; 2420 cb->args[1] = prev_node; 2421 cb->args[2] = bearer_id; 2422 2423 return skb->len; 2424 } 2425