1 /* 2 * net/tipc/name_table.c: TIPC name table code 3 * 4 * Copyright (c) 2000-2006, 2014-2018, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <net/sock.h> 38 #include <linux/list_sort.h> 39 #include <linux/rbtree_augmented.h> 40 #include "core.h" 41 #include "netlink.h" 42 #include "name_table.h" 43 #include "name_distr.h" 44 #include "subscr.h" 45 #include "bcast.h" 46 #include "addr.h" 47 #include "node.h" 48 #include "group.h" 49 50 /** 51 * struct service_range - container for all bindings of a service range 52 * @lower: service range lower bound 53 * @upper: service range upper bound 54 * @tree_node: member of service range RB tree 55 * @max: largest 'upper' in this node subtree 56 * @local_publ: list of identical publications made from this node 57 * Used by closest_first lookup and multicast lookup algorithm 58 * @all_publ: all publications identical to this one, whatever node and scope 59 * Used by round-robin lookup algorithm 60 */ 61 struct service_range { 62 u32 lower; 63 u32 upper; 64 struct rb_node tree_node; 65 u32 max; 66 struct list_head local_publ; 67 struct list_head all_publ; 68 }; 69 70 /** 71 * struct tipc_service - container for all published instances of a service type 72 * @type: 32 bit 'type' value for service 73 * @publ_cnt: increasing counter for publications in this service 74 * @ranges: rb tree containing all service ranges for this service 75 * @service_list: links to adjacent name ranges in hash chain 76 * @subscriptions: list of subscriptions for this service type 77 * @lock: spinlock controlling access to pertaining service ranges/publications 78 * @rcu: RCU callback head used for deferred freeing 79 */ 80 struct tipc_service { 81 u32 type; 82 u32 publ_cnt; 83 struct rb_root ranges; 84 struct hlist_node service_list; 85 struct list_head subscriptions; 86 spinlock_t lock; /* Covers service range list */ 87 struct rcu_head rcu; 88 }; 89 90 #define service_range_upper(sr) ((sr)->upper) 91 RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks, 92 struct service_range, tree_node, u32, max, 93 service_range_upper) 94 95 #define service_range_entry(rbtree_node) \ 96 (container_of(rbtree_node, struct service_range, tree_node)) 97 98 #define service_range_overlap(sr, start, end) \ 99 ((sr)->lower <= (end) && (sr)->upper >= (start)) 100 101 /** 102 * service_range_foreach_match - iterate over tipc service rbtree for each 103 * range match 104 * @sr: the service range pointer as a loop cursor 105 * @sc: the pointer to tipc service which holds the service range rbtree 106 * @start, end: the range (end >= start) for matching 107 */ 108 #define service_range_foreach_match(sr, sc, start, end) \ 109 for (sr = service_range_match_first((sc)->ranges.rb_node, \ 110 start, \ 111 end); \ 112 sr; \ 113 sr = service_range_match_next(&(sr)->tree_node, \ 114 start, \ 115 end)) 116 117 /** 118 * service_range_match_first - find first service range matching a range 119 * @n: the root node of service range rbtree for searching 120 * @start, end: the range (end >= start) for matching 121 * 122 * Return: the leftmost service range node in the rbtree that overlaps the 123 * specific range if any. Otherwise, returns NULL. 124 */ 125 static struct service_range *service_range_match_first(struct rb_node *n, 126 u32 start, u32 end) 127 { 128 struct service_range *sr; 129 struct rb_node *l, *r; 130 131 /* Non overlaps in tree at all? */ 132 if (!n || service_range_entry(n)->max < start) 133 return NULL; 134 135 while (n) { 136 l = n->rb_left; 137 if (l && service_range_entry(l)->max >= start) { 138 /* A leftmost overlap range node must be one in the left 139 * subtree. If not, it has lower > end, then nodes on 140 * the right side cannot satisfy the condition either. 141 */ 142 n = l; 143 continue; 144 } 145 146 /* No one in the left subtree can match, return if this node is 147 * an overlap i.e. leftmost. 148 */ 149 sr = service_range_entry(n); 150 if (service_range_overlap(sr, start, end)) 151 return sr; 152 153 /* Ok, try to lookup on the right side */ 154 r = n->rb_right; 155 if (sr->lower <= end && 156 r && service_range_entry(r)->max >= start) { 157 n = r; 158 continue; 159 } 160 break; 161 } 162 163 return NULL; 164 } 165 166 /** 167 * service_range_match_next - find next service range matching a range 168 * @n: a node in service range rbtree from which the searching starts 169 * @start, end: the range (end >= start) for matching 170 * 171 * Return: the next service range node to the given node in the rbtree that 172 * overlaps the specific range if any. Otherwise, returns NULL. 173 */ 174 static struct service_range *service_range_match_next(struct rb_node *n, 175 u32 start, u32 end) 176 { 177 struct service_range *sr; 178 struct rb_node *p, *r; 179 180 while (n) { 181 r = n->rb_right; 182 if (r && service_range_entry(r)->max >= start) 183 /* A next overlap range node must be one in the right 184 * subtree. If not, it has lower > end, then any next 185 * successor (- an ancestor) of this node cannot 186 * satisfy the condition either. 187 */ 188 return service_range_match_first(r, start, end); 189 190 /* No one in the right subtree can match, go up to find an 191 * ancestor of this node which is parent of a left-hand child. 192 */ 193 while ((p = rb_parent(n)) && n == p->rb_right) 194 n = p; 195 if (!p) 196 break; 197 198 /* Return if this ancestor is an overlap */ 199 sr = service_range_entry(p); 200 if (service_range_overlap(sr, start, end)) 201 return sr; 202 203 /* Ok, try to lookup more from this ancestor */ 204 if (sr->lower <= end) { 205 n = p; 206 continue; 207 } 208 break; 209 } 210 211 return NULL; 212 } 213 214 static int hash(int x) 215 { 216 return x & (TIPC_NAMETBL_SIZE - 1); 217 } 218 219 /** 220 * tipc_publ_create - create a publication structure 221 */ 222 static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper, 223 u32 scope, u32 node, u32 port, 224 u32 key) 225 { 226 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 227 228 if (!publ) 229 return NULL; 230 231 publ->type = type; 232 publ->lower = lower; 233 publ->upper = upper; 234 publ->scope = scope; 235 publ->node = node; 236 publ->port = port; 237 publ->key = key; 238 INIT_LIST_HEAD(&publ->binding_sock); 239 INIT_LIST_HEAD(&publ->binding_node); 240 INIT_LIST_HEAD(&publ->local_publ); 241 INIT_LIST_HEAD(&publ->all_publ); 242 INIT_LIST_HEAD(&publ->list); 243 return publ; 244 } 245 246 /** 247 * tipc_service_create - create a service structure for the specified 'type' 248 * 249 * Allocates a single range structure and sets it to all 0's. 250 */ 251 static struct tipc_service *tipc_service_create(u32 type, struct hlist_head *hd) 252 { 253 struct tipc_service *service = kzalloc(sizeof(*service), GFP_ATOMIC); 254 255 if (!service) { 256 pr_warn("Service creation failed, no memory\n"); 257 return NULL; 258 } 259 260 spin_lock_init(&service->lock); 261 service->type = type; 262 service->ranges = RB_ROOT; 263 INIT_HLIST_NODE(&service->service_list); 264 INIT_LIST_HEAD(&service->subscriptions); 265 hlist_add_head_rcu(&service->service_list, hd); 266 return service; 267 } 268 269 /* tipc_service_find_range - find service range matching publication parameters 270 */ 271 static struct service_range *tipc_service_find_range(struct tipc_service *sc, 272 u32 lower, u32 upper) 273 { 274 struct service_range *sr; 275 276 service_range_foreach_match(sr, sc, lower, upper) { 277 /* Look for exact match */ 278 if (sr->lower == lower && sr->upper == upper) 279 return sr; 280 } 281 282 return NULL; 283 } 284 285 static struct service_range *tipc_service_create_range(struct tipc_service *sc, 286 u32 lower, u32 upper) 287 { 288 struct rb_node **n, *parent = NULL; 289 struct service_range *sr; 290 291 n = &sc->ranges.rb_node; 292 while (*n) { 293 parent = *n; 294 sr = service_range_entry(parent); 295 if (lower == sr->lower && upper == sr->upper) 296 return sr; 297 if (sr->max < upper) 298 sr->max = upper; 299 if (lower <= sr->lower) 300 n = &parent->rb_left; 301 else 302 n = &parent->rb_right; 303 } 304 sr = kzalloc(sizeof(*sr), GFP_ATOMIC); 305 if (!sr) 306 return NULL; 307 sr->lower = lower; 308 sr->upper = upper; 309 sr->max = upper; 310 INIT_LIST_HEAD(&sr->local_publ); 311 INIT_LIST_HEAD(&sr->all_publ); 312 rb_link_node(&sr->tree_node, parent, n); 313 rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 314 return sr; 315 } 316 317 static struct publication *tipc_service_insert_publ(struct net *net, 318 struct tipc_service *sc, 319 u32 type, u32 lower, 320 u32 upper, u32 scope, 321 u32 node, u32 port, 322 u32 key) 323 { 324 struct tipc_subscription *sub, *tmp; 325 struct service_range *sr; 326 struct publication *p; 327 bool first = false; 328 329 sr = tipc_service_create_range(sc, lower, upper); 330 if (!sr) 331 goto err; 332 333 first = list_empty(&sr->all_publ); 334 335 /* Return if the publication already exists */ 336 list_for_each_entry(p, &sr->all_publ, all_publ) { 337 if (p->key == key && (!p->node || p->node == node)) 338 return NULL; 339 } 340 341 /* Create and insert publication */ 342 p = tipc_publ_create(type, lower, upper, scope, node, port, key); 343 if (!p) 344 goto err; 345 /* Suppose there shouldn't be a huge gap btw publs i.e. >INT_MAX */ 346 p->id = sc->publ_cnt++; 347 if (in_own_node(net, node)) 348 list_add(&p->local_publ, &sr->local_publ); 349 list_add(&p->all_publ, &sr->all_publ); 350 351 /* Any subscriptions waiting for notification? */ 352 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 353 tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_PUBLISHED, 354 p->port, p->node, p->scope, first); 355 } 356 return p; 357 err: 358 pr_warn("Failed to bind to %u,%u,%u, no memory\n", type, lower, upper); 359 return NULL; 360 } 361 362 /** 363 * tipc_service_remove_publ - remove a publication from a service 364 */ 365 static struct publication *tipc_service_remove_publ(struct service_range *sr, 366 u32 node, u32 key) 367 { 368 struct publication *p; 369 370 list_for_each_entry(p, &sr->all_publ, all_publ) { 371 if (p->key != key || (node && node != p->node)) 372 continue; 373 list_del(&p->all_publ); 374 list_del(&p->local_publ); 375 return p; 376 } 377 return NULL; 378 } 379 380 /** 381 * Code reused: time_after32() for the same purpose 382 */ 383 #define publication_after(pa, pb) time_after32((pa)->id, (pb)->id) 384 static int tipc_publ_sort(void *priv, struct list_head *a, 385 struct list_head *b) 386 { 387 struct publication *pa, *pb; 388 389 pa = container_of(a, struct publication, list); 390 pb = container_of(b, struct publication, list); 391 return publication_after(pa, pb); 392 } 393 394 /** 395 * tipc_service_subscribe - attach a subscription, and optionally 396 * issue the prescribed number of events if there is any service 397 * range overlapping with the requested range 398 */ 399 static void tipc_service_subscribe(struct tipc_service *service, 400 struct tipc_subscription *sub) 401 { 402 struct tipc_subscr *sb = &sub->evt.s; 403 struct publication *p, *first, *tmp; 404 struct list_head publ_list; 405 struct service_range *sr; 406 struct tipc_name_seq ns; 407 u32 filter; 408 409 ns.type = tipc_sub_read(sb, seq.type); 410 ns.lower = tipc_sub_read(sb, seq.lower); 411 ns.upper = tipc_sub_read(sb, seq.upper); 412 filter = tipc_sub_read(sb, filter); 413 414 tipc_sub_get(sub); 415 list_add(&sub->service_list, &service->subscriptions); 416 417 if (filter & TIPC_SUB_NO_STATUS) 418 return; 419 420 INIT_LIST_HEAD(&publ_list); 421 service_range_foreach_match(sr, service, ns.lower, ns.upper) { 422 first = NULL; 423 list_for_each_entry(p, &sr->all_publ, all_publ) { 424 if (filter & TIPC_SUB_PORTS) 425 list_add_tail(&p->list, &publ_list); 426 else if (!first || publication_after(first, p)) 427 /* Pick this range's *first* publication */ 428 first = p; 429 } 430 if (first) 431 list_add_tail(&first->list, &publ_list); 432 } 433 434 /* Sort the publications before reporting */ 435 list_sort(NULL, &publ_list, tipc_publ_sort); 436 list_for_each_entry_safe(p, tmp, &publ_list, list) { 437 tipc_sub_report_overlap(sub, p->lower, p->upper, 438 TIPC_PUBLISHED, p->port, p->node, 439 p->scope, true); 440 list_del_init(&p->list); 441 } 442 } 443 444 static struct tipc_service *tipc_service_find(struct net *net, u32 type) 445 { 446 struct name_table *nt = tipc_name_table(net); 447 struct hlist_head *service_head; 448 struct tipc_service *service; 449 450 service_head = &nt->services[hash(type)]; 451 hlist_for_each_entry_rcu(service, service_head, service_list) { 452 if (service->type == type) 453 return service; 454 } 455 return NULL; 456 }; 457 458 struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, 459 u32 lower, u32 upper, 460 u32 scope, u32 node, 461 u32 port, u32 key) 462 { 463 struct name_table *nt = tipc_name_table(net); 464 struct tipc_service *sc; 465 struct publication *p; 466 467 if (scope > TIPC_NODE_SCOPE || lower > upper) { 468 pr_debug("Failed to bind illegal {%u,%u,%u} with scope %u\n", 469 type, lower, upper, scope); 470 return NULL; 471 } 472 sc = tipc_service_find(net, type); 473 if (!sc) 474 sc = tipc_service_create(type, &nt->services[hash(type)]); 475 if (!sc) 476 return NULL; 477 478 spin_lock_bh(&sc->lock); 479 p = tipc_service_insert_publ(net, sc, type, lower, upper, 480 scope, node, port, key); 481 spin_unlock_bh(&sc->lock); 482 return p; 483 } 484 485 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, 486 u32 lower, u32 upper, 487 u32 node, u32 key) 488 { 489 struct tipc_service *sc = tipc_service_find(net, type); 490 struct tipc_subscription *sub, *tmp; 491 struct service_range *sr = NULL; 492 struct publication *p = NULL; 493 bool last; 494 495 if (!sc) 496 return NULL; 497 498 spin_lock_bh(&sc->lock); 499 sr = tipc_service_find_range(sc, lower, upper); 500 if (!sr) 501 goto exit; 502 p = tipc_service_remove_publ(sr, node, key); 503 if (!p) 504 goto exit; 505 506 /* Notify any waiting subscriptions */ 507 last = list_empty(&sr->all_publ); 508 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 509 tipc_sub_report_overlap(sub, lower, upper, TIPC_WITHDRAWN, 510 p->port, node, p->scope, last); 511 } 512 513 /* Remove service range item if this was its last publication */ 514 if (list_empty(&sr->all_publ)) { 515 rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 516 kfree(sr); 517 } 518 519 /* Delete service item if this no more publications and subscriptions */ 520 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { 521 hlist_del_init_rcu(&sc->service_list); 522 kfree_rcu(sc, rcu); 523 } 524 exit: 525 spin_unlock_bh(&sc->lock); 526 return p; 527 } 528 529 /** 530 * tipc_nametbl_translate - perform service instance to socket translation 531 * 532 * On entry, 'dnode' is the search domain used during translation. 533 * 534 * On exit: 535 * - if translation is deferred to another node, leave 'dnode' unchanged and 536 * return 0 537 * - if translation is attempted and succeeds, set 'dnode' to the publishing 538 * node and return the published (non-zero) port number 539 * - if translation is attempted and fails, set 'dnode' to 0 and return 0 540 * 541 * Note that for legacy users (node configured with Z.C.N address format) the 542 * 'closest-first' lookup algorithm must be maintained, i.e., if dnode is 0 543 * we must look in the local binding list first 544 */ 545 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *dnode) 546 { 547 struct tipc_net *tn = tipc_net(net); 548 bool legacy = tn->legacy_addr_format; 549 u32 self = tipc_own_addr(net); 550 struct service_range *sr; 551 struct tipc_service *sc; 552 struct list_head *list; 553 struct publication *p; 554 u32 port = 0; 555 u32 node = 0; 556 557 if (!tipc_in_scope(legacy, *dnode, self)) 558 return 0; 559 560 rcu_read_lock(); 561 sc = tipc_service_find(net, type); 562 if (unlikely(!sc)) 563 goto exit; 564 565 spin_lock_bh(&sc->lock); 566 service_range_foreach_match(sr, sc, instance, instance) { 567 /* Select lookup algo: local, closest-first or round-robin */ 568 if (*dnode == self) { 569 list = &sr->local_publ; 570 if (list_empty(list)) 571 continue; 572 p = list_first_entry(list, struct publication, 573 local_publ); 574 list_move_tail(&p->local_publ, &sr->local_publ); 575 } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) { 576 list = &sr->local_publ; 577 p = list_first_entry(list, struct publication, 578 local_publ); 579 list_move_tail(&p->local_publ, &sr->local_publ); 580 } else { 581 list = &sr->all_publ; 582 p = list_first_entry(list, struct publication, 583 all_publ); 584 list_move_tail(&p->all_publ, &sr->all_publ); 585 } 586 port = p->port; 587 node = p->node; 588 /* Todo: as for legacy, pick the first matching range only, a 589 * "true" round-robin will be performed as needed. 590 */ 591 break; 592 } 593 spin_unlock_bh(&sc->lock); 594 595 exit: 596 rcu_read_unlock(); 597 *dnode = node; 598 return port; 599 } 600 601 bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope, 602 struct list_head *dsts, int *dstcnt, u32 exclude, 603 bool all) 604 { 605 u32 self = tipc_own_addr(net); 606 struct service_range *sr; 607 struct tipc_service *sc; 608 struct publication *p; 609 610 *dstcnt = 0; 611 rcu_read_lock(); 612 sc = tipc_service_find(net, type); 613 if (unlikely(!sc)) 614 goto exit; 615 616 spin_lock_bh(&sc->lock); 617 618 /* Todo: a full search i.e. service_range_foreach_match() instead? */ 619 sr = service_range_match_first(sc->ranges.rb_node, instance, instance); 620 if (!sr) 621 goto no_match; 622 623 list_for_each_entry(p, &sr->all_publ, all_publ) { 624 if (p->scope != scope) 625 continue; 626 if (p->port == exclude && p->node == self) 627 continue; 628 tipc_dest_push(dsts, p->node, p->port); 629 (*dstcnt)++; 630 if (all) 631 continue; 632 list_move_tail(&p->all_publ, &sr->all_publ); 633 break; 634 } 635 no_match: 636 spin_unlock_bh(&sc->lock); 637 exit: 638 rcu_read_unlock(); 639 return !list_empty(dsts); 640 } 641 642 void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, 643 u32 scope, bool exact, struct list_head *dports) 644 { 645 struct service_range *sr; 646 struct tipc_service *sc; 647 struct publication *p; 648 649 rcu_read_lock(); 650 sc = tipc_service_find(net, type); 651 if (!sc) 652 goto exit; 653 654 spin_lock_bh(&sc->lock); 655 service_range_foreach_match(sr, sc, lower, upper) { 656 list_for_each_entry(p, &sr->local_publ, local_publ) { 657 if (p->scope == scope || (!exact && p->scope < scope)) 658 tipc_dest_push(dports, 0, p->port); 659 } 660 } 661 spin_unlock_bh(&sc->lock); 662 exit: 663 rcu_read_unlock(); 664 } 665 666 /* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes 667 * - Creates list of nodes that overlap the given multicast address 668 * - Determines if any node local destinations overlap 669 */ 670 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, 671 u32 upper, struct tipc_nlist *nodes) 672 { 673 struct service_range *sr; 674 struct tipc_service *sc; 675 struct publication *p; 676 677 rcu_read_lock(); 678 sc = tipc_service_find(net, type); 679 if (!sc) 680 goto exit; 681 682 spin_lock_bh(&sc->lock); 683 service_range_foreach_match(sr, sc, lower, upper) { 684 list_for_each_entry(p, &sr->all_publ, all_publ) { 685 tipc_nlist_add(nodes, p->node); 686 } 687 } 688 spin_unlock_bh(&sc->lock); 689 exit: 690 rcu_read_unlock(); 691 } 692 693 /* tipc_nametbl_build_group - build list of communication group members 694 */ 695 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, 696 u32 type, u32 scope) 697 { 698 struct service_range *sr; 699 struct tipc_service *sc; 700 struct publication *p; 701 struct rb_node *n; 702 703 rcu_read_lock(); 704 sc = tipc_service_find(net, type); 705 if (!sc) 706 goto exit; 707 708 spin_lock_bh(&sc->lock); 709 for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { 710 sr = container_of(n, struct service_range, tree_node); 711 list_for_each_entry(p, &sr->all_publ, all_publ) { 712 if (p->scope != scope) 713 continue; 714 tipc_group_add_member(grp, p->node, p->port, p->lower); 715 } 716 } 717 spin_unlock_bh(&sc->lock); 718 exit: 719 rcu_read_unlock(); 720 } 721 722 /* tipc_nametbl_publish - add service binding to name table 723 */ 724 struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, 725 u32 upper, u32 scope, u32 port, 726 u32 key) 727 { 728 struct name_table *nt = tipc_name_table(net); 729 struct tipc_net *tn = tipc_net(net); 730 struct publication *p = NULL; 731 struct sk_buff *skb = NULL; 732 u32 rc_dests; 733 734 spin_lock_bh(&tn->nametbl_lock); 735 736 if (nt->local_publ_count >= TIPC_MAX_PUBL) { 737 pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL); 738 goto exit; 739 } 740 741 p = tipc_nametbl_insert_publ(net, type, lower, upper, scope, 742 tipc_own_addr(net), port, key); 743 if (p) { 744 nt->local_publ_count++; 745 skb = tipc_named_publish(net, p); 746 } 747 rc_dests = nt->rc_dests; 748 exit: 749 spin_unlock_bh(&tn->nametbl_lock); 750 751 if (skb) 752 tipc_node_broadcast(net, skb, rc_dests); 753 return p; 754 755 } 756 757 /** 758 * tipc_nametbl_withdraw - withdraw a service binding 759 */ 760 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, 761 u32 upper, u32 key) 762 { 763 struct name_table *nt = tipc_name_table(net); 764 struct tipc_net *tn = tipc_net(net); 765 u32 self = tipc_own_addr(net); 766 struct sk_buff *skb = NULL; 767 struct publication *p; 768 u32 rc_dests; 769 770 spin_lock_bh(&tn->nametbl_lock); 771 772 p = tipc_nametbl_remove_publ(net, type, lower, upper, self, key); 773 if (p) { 774 nt->local_publ_count--; 775 skb = tipc_named_withdraw(net, p); 776 list_del_init(&p->binding_sock); 777 kfree_rcu(p, rcu); 778 } else { 779 pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", 780 type, lower, upper, key); 781 } 782 rc_dests = nt->rc_dests; 783 spin_unlock_bh(&tn->nametbl_lock); 784 785 if (skb) { 786 tipc_node_broadcast(net, skb, rc_dests); 787 return 1; 788 } 789 return 0; 790 } 791 792 /** 793 * tipc_nametbl_subscribe - add a subscription object to the name table 794 */ 795 bool tipc_nametbl_subscribe(struct tipc_subscription *sub) 796 { 797 struct name_table *nt = tipc_name_table(sub->net); 798 struct tipc_net *tn = tipc_net(sub->net); 799 struct tipc_subscr *s = &sub->evt.s; 800 u32 type = tipc_sub_read(s, seq.type); 801 struct tipc_service *sc; 802 bool res = true; 803 804 spin_lock_bh(&tn->nametbl_lock); 805 sc = tipc_service_find(sub->net, type); 806 if (!sc) 807 sc = tipc_service_create(type, &nt->services[hash(type)]); 808 if (sc) { 809 spin_lock_bh(&sc->lock); 810 tipc_service_subscribe(sc, sub); 811 spin_unlock_bh(&sc->lock); 812 } else { 813 pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, 814 tipc_sub_read(s, seq.lower), 815 tipc_sub_read(s, seq.upper)); 816 res = false; 817 } 818 spin_unlock_bh(&tn->nametbl_lock); 819 return res; 820 } 821 822 /** 823 * tipc_nametbl_unsubscribe - remove a subscription object from name table 824 */ 825 void tipc_nametbl_unsubscribe(struct tipc_subscription *sub) 826 { 827 struct tipc_net *tn = tipc_net(sub->net); 828 struct tipc_subscr *s = &sub->evt.s; 829 u32 type = tipc_sub_read(s, seq.type); 830 struct tipc_service *sc; 831 832 spin_lock_bh(&tn->nametbl_lock); 833 sc = tipc_service_find(sub->net, type); 834 if (!sc) 835 goto exit; 836 837 spin_lock_bh(&sc->lock); 838 list_del_init(&sub->service_list); 839 tipc_sub_put(sub); 840 841 /* Delete service item if no more publications and subscriptions */ 842 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { 843 hlist_del_init_rcu(&sc->service_list); 844 kfree_rcu(sc, rcu); 845 } 846 spin_unlock_bh(&sc->lock); 847 exit: 848 spin_unlock_bh(&tn->nametbl_lock); 849 } 850 851 int tipc_nametbl_init(struct net *net) 852 { 853 struct tipc_net *tn = tipc_net(net); 854 struct name_table *nt; 855 int i; 856 857 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 858 if (!nt) 859 return -ENOMEM; 860 861 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) 862 INIT_HLIST_HEAD(&nt->services[i]); 863 864 INIT_LIST_HEAD(&nt->node_scope); 865 INIT_LIST_HEAD(&nt->cluster_scope); 866 rwlock_init(&nt->cluster_scope_lock); 867 tn->nametbl = nt; 868 spin_lock_init(&tn->nametbl_lock); 869 return 0; 870 } 871 872 /** 873 * tipc_service_delete - purge all publications for a service and delete it 874 */ 875 static void tipc_service_delete(struct net *net, struct tipc_service *sc) 876 { 877 struct service_range *sr, *tmpr; 878 struct publication *p, *tmp; 879 880 spin_lock_bh(&sc->lock); 881 rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { 882 list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { 883 tipc_service_remove_publ(sr, p->node, p->key); 884 kfree_rcu(p, rcu); 885 } 886 rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 887 kfree(sr); 888 } 889 hlist_del_init_rcu(&sc->service_list); 890 spin_unlock_bh(&sc->lock); 891 kfree_rcu(sc, rcu); 892 } 893 894 void tipc_nametbl_stop(struct net *net) 895 { 896 struct name_table *nt = tipc_name_table(net); 897 struct tipc_net *tn = tipc_net(net); 898 struct hlist_head *service_head; 899 struct tipc_service *service; 900 u32 i; 901 902 /* Verify name table is empty and purge any lingering 903 * publications, then release the name table 904 */ 905 spin_lock_bh(&tn->nametbl_lock); 906 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 907 if (hlist_empty(&nt->services[i])) 908 continue; 909 service_head = &nt->services[i]; 910 hlist_for_each_entry_rcu(service, service_head, service_list) { 911 tipc_service_delete(net, service); 912 } 913 } 914 spin_unlock_bh(&tn->nametbl_lock); 915 916 synchronize_net(); 917 kfree(nt); 918 } 919 920 static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, 921 struct tipc_service *service, 922 struct service_range *sr, 923 u32 *last_key) 924 { 925 struct publication *p; 926 struct nlattr *attrs; 927 struct nlattr *b; 928 void *hdr; 929 930 if (*last_key) { 931 list_for_each_entry(p, &sr->all_publ, all_publ) 932 if (p->key == *last_key) 933 break; 934 if (p->key != *last_key) 935 return -EPIPE; 936 } else { 937 p = list_first_entry(&sr->all_publ, 938 struct publication, 939 all_publ); 940 } 941 942 list_for_each_entry_from(p, &sr->all_publ, all_publ) { 943 *last_key = p->key; 944 945 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, 946 &tipc_genl_family, NLM_F_MULTI, 947 TIPC_NL_NAME_TABLE_GET); 948 if (!hdr) 949 return -EMSGSIZE; 950 951 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE); 952 if (!attrs) 953 goto msg_full; 954 955 b = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE_PUBL); 956 if (!b) 957 goto attr_msg_full; 958 959 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, service->type)) 960 goto publ_msg_full; 961 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sr->lower)) 962 goto publ_msg_full; 963 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sr->upper)) 964 goto publ_msg_full; 965 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope)) 966 goto publ_msg_full; 967 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node)) 968 goto publ_msg_full; 969 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->port)) 970 goto publ_msg_full; 971 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key)) 972 goto publ_msg_full; 973 974 nla_nest_end(msg->skb, b); 975 nla_nest_end(msg->skb, attrs); 976 genlmsg_end(msg->skb, hdr); 977 } 978 *last_key = 0; 979 980 return 0; 981 982 publ_msg_full: 983 nla_nest_cancel(msg->skb, b); 984 attr_msg_full: 985 nla_nest_cancel(msg->skb, attrs); 986 msg_full: 987 genlmsg_cancel(msg->skb, hdr); 988 989 return -EMSGSIZE; 990 } 991 992 static int __tipc_nl_service_range_list(struct tipc_nl_msg *msg, 993 struct tipc_service *sc, 994 u32 *last_lower, u32 *last_key) 995 { 996 struct service_range *sr; 997 struct rb_node *n; 998 int err; 999 1000 for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { 1001 sr = container_of(n, struct service_range, tree_node); 1002 if (sr->lower < *last_lower) 1003 continue; 1004 err = __tipc_nl_add_nametable_publ(msg, sc, sr, last_key); 1005 if (err) { 1006 *last_lower = sr->lower; 1007 return err; 1008 } 1009 } 1010 *last_lower = 0; 1011 return 0; 1012 } 1013 1014 static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg, 1015 u32 *last_type, u32 *last_lower, u32 *last_key) 1016 { 1017 struct tipc_net *tn = tipc_net(net); 1018 struct tipc_service *service = NULL; 1019 struct hlist_head *head; 1020 int err; 1021 int i; 1022 1023 if (*last_type) 1024 i = hash(*last_type); 1025 else 1026 i = 0; 1027 1028 for (; i < TIPC_NAMETBL_SIZE; i++) { 1029 head = &tn->nametbl->services[i]; 1030 1031 if (*last_type || 1032 (!i && *last_key && (*last_lower == *last_key))) { 1033 service = tipc_service_find(net, *last_type); 1034 if (!service) 1035 return -EPIPE; 1036 } else { 1037 hlist_for_each_entry_rcu(service, head, service_list) 1038 break; 1039 if (!service) 1040 continue; 1041 } 1042 1043 hlist_for_each_entry_from_rcu(service, service_list) { 1044 spin_lock_bh(&service->lock); 1045 err = __tipc_nl_service_range_list(msg, service, 1046 last_lower, 1047 last_key); 1048 1049 if (err) { 1050 *last_type = service->type; 1051 spin_unlock_bh(&service->lock); 1052 return err; 1053 } 1054 spin_unlock_bh(&service->lock); 1055 } 1056 *last_type = 0; 1057 } 1058 return 0; 1059 } 1060 1061 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) 1062 { 1063 struct net *net = sock_net(skb->sk); 1064 u32 last_type = cb->args[0]; 1065 u32 last_lower = cb->args[1]; 1066 u32 last_key = cb->args[2]; 1067 int done = cb->args[3]; 1068 struct tipc_nl_msg msg; 1069 int err; 1070 1071 if (done) 1072 return 0; 1073 1074 msg.skb = skb; 1075 msg.portid = NETLINK_CB(cb->skb).portid; 1076 msg.seq = cb->nlh->nlmsg_seq; 1077 1078 rcu_read_lock(); 1079 err = tipc_nl_service_list(net, &msg, &last_type, 1080 &last_lower, &last_key); 1081 if (!err) { 1082 done = 1; 1083 } else if (err != -EMSGSIZE) { 1084 /* We never set seq or call nl_dump_check_consistent() this 1085 * means that setting prev_seq here will cause the consistence 1086 * check to fail in the netlink callback handler. Resulting in 1087 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if 1088 * we got an error. 1089 */ 1090 cb->prev_seq = 1; 1091 } 1092 rcu_read_unlock(); 1093 1094 cb->args[0] = last_type; 1095 cb->args[1] = last_lower; 1096 cb->args[2] = last_key; 1097 cb->args[3] = done; 1098 1099 return skb->len; 1100 } 1101 1102 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) 1103 { 1104 struct tipc_dest *dst; 1105 1106 list_for_each_entry(dst, l, list) { 1107 if (dst->node == node && dst->port == port) 1108 return dst; 1109 } 1110 return NULL; 1111 } 1112 1113 bool tipc_dest_push(struct list_head *l, u32 node, u32 port) 1114 { 1115 struct tipc_dest *dst; 1116 1117 if (tipc_dest_find(l, node, port)) 1118 return false; 1119 1120 dst = kmalloc(sizeof(*dst), GFP_ATOMIC); 1121 if (unlikely(!dst)) 1122 return false; 1123 dst->node = node; 1124 dst->port = port; 1125 list_add(&dst->list, l); 1126 return true; 1127 } 1128 1129 bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port) 1130 { 1131 struct tipc_dest *dst; 1132 1133 if (list_empty(l)) 1134 return false; 1135 dst = list_first_entry(l, typeof(*dst), list); 1136 if (port) 1137 *port = dst->port; 1138 if (node) 1139 *node = dst->node; 1140 list_del(&dst->list); 1141 kfree(dst); 1142 return true; 1143 } 1144 1145 bool tipc_dest_del(struct list_head *l, u32 node, u32 port) 1146 { 1147 struct tipc_dest *dst; 1148 1149 dst = tipc_dest_find(l, node, port); 1150 if (!dst) 1151 return false; 1152 list_del(&dst->list); 1153 kfree(dst); 1154 return true; 1155 } 1156 1157 void tipc_dest_list_purge(struct list_head *l) 1158 { 1159 struct tipc_dest *dst, *tmp; 1160 1161 list_for_each_entry_safe(dst, tmp, l, list) { 1162 list_del(&dst->list); 1163 kfree(dst); 1164 } 1165 } 1166 1167 int tipc_dest_list_len(struct list_head *l) 1168 { 1169 struct tipc_dest *dst; 1170 int i = 0; 1171 1172 list_for_each_entry(dst, l, list) { 1173 i++; 1174 } 1175 return i; 1176 } 1177