1 /* 2 * net/tipc/name_table.c: TIPC name table code 3 * 4 * Copyright (c) 2000-2006, 2014-2018, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems 6 * Copyright (c) 2020, Red Hat Inc 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <net/sock.h> 39 #include <linux/list_sort.h> 40 #include <linux/rbtree_augmented.h> 41 #include "core.h" 42 #include "netlink.h" 43 #include "name_table.h" 44 #include "name_distr.h" 45 #include "subscr.h" 46 #include "bcast.h" 47 #include "addr.h" 48 #include "node.h" 49 #include "group.h" 50 51 /** 52 * struct service_range - container for all bindings of a service range 53 * @lower: service range lower bound 54 * @upper: service range upper bound 55 * @tree_node: member of service range RB tree 56 * @max: largest 'upper' in this node subtree 57 * @local_publ: list of identical publications made from this node 58 * Used by closest_first lookup and multicast lookup algorithm 59 * @all_publ: all publications identical to this one, whatever node and scope 60 * Used by round-robin lookup algorithm 61 */ 62 struct service_range { 63 u32 lower; 64 u32 upper; 65 struct rb_node tree_node; 66 u32 max; 67 struct list_head local_publ; 68 struct list_head all_publ; 69 }; 70 71 /** 72 * struct tipc_service - container for all published instances of a service type 73 * @type: 32 bit 'type' value for service 74 * @publ_cnt: increasing counter for publications in this service 75 * @ranges: rb tree containing all service ranges for this service 76 * @service_list: links to adjacent name ranges in hash chain 77 * @subscriptions: list of subscriptions for this service type 78 * @lock: spinlock controlling access to pertaining service ranges/publications 79 * @rcu: RCU callback head used for deferred freeing 80 */ 81 struct tipc_service { 82 u32 type; 83 u32 publ_cnt; 84 struct rb_root ranges; 85 struct hlist_node service_list; 86 struct list_head subscriptions; 87 spinlock_t lock; /* Covers service range list */ 88 struct rcu_head rcu; 89 }; 90 91 #define service_range_upper(sr) ((sr)->upper) 92 RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks, 93 struct service_range, tree_node, u32, max, 94 service_range_upper) 95 96 #define service_range_entry(rbtree_node) \ 97 (container_of(rbtree_node, struct service_range, tree_node)) 98 99 #define service_range_overlap(sr, start, end) \ 100 ((sr)->lower <= (end) && (sr)->upper >= (start)) 101 102 /** 103 * service_range_foreach_match - iterate over tipc service rbtree for each 104 * range match 105 * @sr: the service range pointer as a loop cursor 106 * @sc: the pointer to tipc service which holds the service range rbtree 107 * @start, end: the range (end >= start) for matching 108 */ 109 #define service_range_foreach_match(sr, sc, start, end) \ 110 for (sr = service_range_match_first((sc)->ranges.rb_node, \ 111 start, \ 112 end); \ 113 sr; \ 114 sr = service_range_match_next(&(sr)->tree_node, \ 115 start, \ 116 end)) 117 118 /** 119 * service_range_match_first - find first service range matching a range 120 * @n: the root node of service range rbtree for searching 121 * @start, end: the range (end >= start) for matching 122 * 123 * Return: the leftmost service range node in the rbtree that overlaps the 124 * specific range if any. Otherwise, returns NULL. 125 */ 126 static struct service_range *service_range_match_first(struct rb_node *n, 127 u32 start, u32 end) 128 { 129 struct service_range *sr; 130 struct rb_node *l, *r; 131 132 /* Non overlaps in tree at all? */ 133 if (!n || service_range_entry(n)->max < start) 134 return NULL; 135 136 while (n) { 137 l = n->rb_left; 138 if (l && service_range_entry(l)->max >= start) { 139 /* A leftmost overlap range node must be one in the left 140 * subtree. If not, it has lower > end, then nodes on 141 * the right side cannot satisfy the condition either. 142 */ 143 n = l; 144 continue; 145 } 146 147 /* No one in the left subtree can match, return if this node is 148 * an overlap i.e. leftmost. 149 */ 150 sr = service_range_entry(n); 151 if (service_range_overlap(sr, start, end)) 152 return sr; 153 154 /* Ok, try to lookup on the right side */ 155 r = n->rb_right; 156 if (sr->lower <= end && 157 r && service_range_entry(r)->max >= start) { 158 n = r; 159 continue; 160 } 161 break; 162 } 163 164 return NULL; 165 } 166 167 /** 168 * service_range_match_next - find next service range matching a range 169 * @n: a node in service range rbtree from which the searching starts 170 * @start, end: the range (end >= start) for matching 171 * 172 * Return: the next service range node to the given node in the rbtree that 173 * overlaps the specific range if any. Otherwise, returns NULL. 174 */ 175 static struct service_range *service_range_match_next(struct rb_node *n, 176 u32 start, u32 end) 177 { 178 struct service_range *sr; 179 struct rb_node *p, *r; 180 181 while (n) { 182 r = n->rb_right; 183 if (r && service_range_entry(r)->max >= start) 184 /* A next overlap range node must be one in the right 185 * subtree. If not, it has lower > end, then any next 186 * successor (- an ancestor) of this node cannot 187 * satisfy the condition either. 188 */ 189 return service_range_match_first(r, start, end); 190 191 /* No one in the right subtree can match, go up to find an 192 * ancestor of this node which is parent of a left-hand child. 193 */ 194 while ((p = rb_parent(n)) && n == p->rb_right) 195 n = p; 196 if (!p) 197 break; 198 199 /* Return if this ancestor is an overlap */ 200 sr = service_range_entry(p); 201 if (service_range_overlap(sr, start, end)) 202 return sr; 203 204 /* Ok, try to lookup more from this ancestor */ 205 if (sr->lower <= end) { 206 n = p; 207 continue; 208 } 209 break; 210 } 211 212 return NULL; 213 } 214 215 static int hash(int x) 216 { 217 return x & (TIPC_NAMETBL_SIZE - 1); 218 } 219 220 /** 221 * tipc_publ_create - create a publication structure 222 */ 223 static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper, 224 u32 scope, u32 node, u32 port, 225 u32 key) 226 { 227 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 228 229 if (!publ) 230 return NULL; 231 232 publ->type = type; 233 publ->lower = lower; 234 publ->upper = upper; 235 publ->scope = scope; 236 publ->node = node; 237 publ->port = port; 238 publ->key = key; 239 INIT_LIST_HEAD(&publ->binding_sock); 240 INIT_LIST_HEAD(&publ->binding_node); 241 INIT_LIST_HEAD(&publ->local_publ); 242 INIT_LIST_HEAD(&publ->all_publ); 243 INIT_LIST_HEAD(&publ->list); 244 return publ; 245 } 246 247 /** 248 * tipc_service_create - create a service structure for the specified 'type' 249 * 250 * Allocates a single range structure and sets it to all 0's. 251 */ 252 static struct tipc_service *tipc_service_create(u32 type, struct hlist_head *hd) 253 { 254 struct tipc_service *service = kzalloc(sizeof(*service), GFP_ATOMIC); 255 256 if (!service) { 257 pr_warn("Service creation failed, no memory\n"); 258 return NULL; 259 } 260 261 spin_lock_init(&service->lock); 262 service->type = type; 263 service->ranges = RB_ROOT; 264 INIT_HLIST_NODE(&service->service_list); 265 INIT_LIST_HEAD(&service->subscriptions); 266 hlist_add_head_rcu(&service->service_list, hd); 267 return service; 268 } 269 270 /* tipc_service_find_range - find service range matching publication parameters 271 */ 272 static struct service_range *tipc_service_find_range(struct tipc_service *sc, 273 u32 lower, u32 upper) 274 { 275 struct service_range *sr; 276 277 service_range_foreach_match(sr, sc, lower, upper) { 278 /* Look for exact match */ 279 if (sr->lower == lower && sr->upper == upper) 280 return sr; 281 } 282 283 return NULL; 284 } 285 286 static struct service_range *tipc_service_create_range(struct tipc_service *sc, 287 u32 lower, u32 upper) 288 { 289 struct rb_node **n, *parent = NULL; 290 struct service_range *sr; 291 292 n = &sc->ranges.rb_node; 293 while (*n) { 294 parent = *n; 295 sr = service_range_entry(parent); 296 if (lower == sr->lower && upper == sr->upper) 297 return sr; 298 if (sr->max < upper) 299 sr->max = upper; 300 if (lower <= sr->lower) 301 n = &parent->rb_left; 302 else 303 n = &parent->rb_right; 304 } 305 sr = kzalloc(sizeof(*sr), GFP_ATOMIC); 306 if (!sr) 307 return NULL; 308 sr->lower = lower; 309 sr->upper = upper; 310 sr->max = upper; 311 INIT_LIST_HEAD(&sr->local_publ); 312 INIT_LIST_HEAD(&sr->all_publ); 313 rb_link_node(&sr->tree_node, parent, n); 314 rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 315 return sr; 316 } 317 318 static struct publication *tipc_service_insert_publ(struct net *net, 319 struct tipc_service *sc, 320 u32 type, u32 lower, 321 u32 upper, u32 scope, 322 u32 node, u32 port, 323 u32 key) 324 { 325 struct tipc_subscription *sub, *tmp; 326 struct service_range *sr; 327 struct publication *p; 328 bool first = false; 329 330 sr = tipc_service_create_range(sc, lower, upper); 331 if (!sr) 332 goto err; 333 334 first = list_empty(&sr->all_publ); 335 336 /* Return if the publication already exists */ 337 list_for_each_entry(p, &sr->all_publ, all_publ) { 338 if (p->key == key && (!p->node || p->node == node)) 339 return NULL; 340 } 341 342 /* Create and insert publication */ 343 p = tipc_publ_create(type, lower, upper, scope, node, port, key); 344 if (!p) 345 goto err; 346 /* Suppose there shouldn't be a huge gap btw publs i.e. >INT_MAX */ 347 p->id = sc->publ_cnt++; 348 if (in_own_node(net, node)) 349 list_add(&p->local_publ, &sr->local_publ); 350 list_add(&p->all_publ, &sr->all_publ); 351 352 /* Any subscriptions waiting for notification? */ 353 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 354 tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_PUBLISHED, 355 p->port, p->node, p->scope, first); 356 } 357 return p; 358 err: 359 pr_warn("Failed to bind to %u,%u,%u, no memory\n", type, lower, upper); 360 return NULL; 361 } 362 363 /** 364 * tipc_service_remove_publ - remove a publication from a service 365 */ 366 static struct publication *tipc_service_remove_publ(struct service_range *sr, 367 u32 node, u32 key) 368 { 369 struct publication *p; 370 371 list_for_each_entry(p, &sr->all_publ, all_publ) { 372 if (p->key != key || (node && node != p->node)) 373 continue; 374 list_del(&p->all_publ); 375 list_del(&p->local_publ); 376 return p; 377 } 378 return NULL; 379 } 380 381 /** 382 * Code reused: time_after32() for the same purpose 383 */ 384 #define publication_after(pa, pb) time_after32((pa)->id, (pb)->id) 385 static int tipc_publ_sort(void *priv, struct list_head *a, 386 struct list_head *b) 387 { 388 struct publication *pa, *pb; 389 390 pa = container_of(a, struct publication, list); 391 pb = container_of(b, struct publication, list); 392 return publication_after(pa, pb); 393 } 394 395 /** 396 * tipc_service_subscribe - attach a subscription, and optionally 397 * issue the prescribed number of events if there is any service 398 * range overlapping with the requested range 399 */ 400 static void tipc_service_subscribe(struct tipc_service *service, 401 struct tipc_subscription *sub) 402 { 403 struct tipc_subscr *sb = &sub->evt.s; 404 struct publication *p, *first, *tmp; 405 struct list_head publ_list; 406 struct service_range *sr; 407 struct tipc_service_range r; 408 u32 filter; 409 410 r.type = tipc_sub_read(sb, seq.type); 411 r.lower = tipc_sub_read(sb, seq.lower); 412 r.upper = tipc_sub_read(sb, seq.upper); 413 filter = tipc_sub_read(sb, filter); 414 415 tipc_sub_get(sub); 416 list_add(&sub->service_list, &service->subscriptions); 417 418 if (filter & TIPC_SUB_NO_STATUS) 419 return; 420 421 INIT_LIST_HEAD(&publ_list); 422 service_range_foreach_match(sr, service, r.lower, r.upper) { 423 first = NULL; 424 list_for_each_entry(p, &sr->all_publ, all_publ) { 425 if (filter & TIPC_SUB_PORTS) 426 list_add_tail(&p->list, &publ_list); 427 else if (!first || publication_after(first, p)) 428 /* Pick this range's *first* publication */ 429 first = p; 430 } 431 if (first) 432 list_add_tail(&first->list, &publ_list); 433 } 434 435 /* Sort the publications before reporting */ 436 list_sort(NULL, &publ_list, tipc_publ_sort); 437 list_for_each_entry_safe(p, tmp, &publ_list, list) { 438 tipc_sub_report_overlap(sub, p->lower, p->upper, 439 TIPC_PUBLISHED, p->port, p->node, 440 p->scope, true); 441 list_del_init(&p->list); 442 } 443 } 444 445 static struct tipc_service *tipc_service_find(struct net *net, u32 type) 446 { 447 struct name_table *nt = tipc_name_table(net); 448 struct hlist_head *service_head; 449 struct tipc_service *service; 450 451 service_head = &nt->services[hash(type)]; 452 hlist_for_each_entry_rcu(service, service_head, service_list) { 453 if (service->type == type) 454 return service; 455 } 456 return NULL; 457 }; 458 459 struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, 460 u32 lower, u32 upper, 461 u32 scope, u32 node, 462 u32 port, u32 key) 463 { 464 struct name_table *nt = tipc_name_table(net); 465 struct tipc_service *sc; 466 struct publication *p; 467 468 if (scope > TIPC_NODE_SCOPE || lower > upper) { 469 pr_debug("Failed to bind illegal {%u,%u,%u} with scope %u\n", 470 type, lower, upper, scope); 471 return NULL; 472 } 473 sc = tipc_service_find(net, type); 474 if (!sc) 475 sc = tipc_service_create(type, &nt->services[hash(type)]); 476 if (!sc) 477 return NULL; 478 479 spin_lock_bh(&sc->lock); 480 p = tipc_service_insert_publ(net, sc, type, lower, upper, 481 scope, node, port, key); 482 spin_unlock_bh(&sc->lock); 483 return p; 484 } 485 486 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, 487 u32 lower, u32 upper, 488 u32 node, u32 key) 489 { 490 struct tipc_service *sc = tipc_service_find(net, type); 491 struct tipc_subscription *sub, *tmp; 492 struct service_range *sr = NULL; 493 struct publication *p = NULL; 494 bool last; 495 496 if (!sc) 497 return NULL; 498 499 spin_lock_bh(&sc->lock); 500 sr = tipc_service_find_range(sc, lower, upper); 501 if (!sr) 502 goto exit; 503 p = tipc_service_remove_publ(sr, node, key); 504 if (!p) 505 goto exit; 506 507 /* Notify any waiting subscriptions */ 508 last = list_empty(&sr->all_publ); 509 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 510 tipc_sub_report_overlap(sub, lower, upper, TIPC_WITHDRAWN, 511 p->port, node, p->scope, last); 512 } 513 514 /* Remove service range item if this was its last publication */ 515 if (list_empty(&sr->all_publ)) { 516 rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 517 kfree(sr); 518 } 519 520 /* Delete service item if this no more publications and subscriptions */ 521 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { 522 hlist_del_init_rcu(&sc->service_list); 523 kfree_rcu(sc, rcu); 524 } 525 exit: 526 spin_unlock_bh(&sc->lock); 527 return p; 528 } 529 530 /** 531 * tipc_nametbl_translate - perform service instance to socket translation 532 * 533 * On entry, 'dnode' is the search domain used during translation. 534 * 535 * On exit: 536 * - if translation is deferred to another node, leave 'dnode' unchanged and 537 * return 0 538 * - if translation is attempted and succeeds, set 'dnode' to the publishing 539 * node and return the published (non-zero) port number 540 * - if translation is attempted and fails, set 'dnode' to 0 and return 0 541 * 542 * Note that for legacy users (node configured with Z.C.N address format) the 543 * 'closest-first' lookup algorithm must be maintained, i.e., if dnode is 0 544 * we must look in the local binding list first 545 */ 546 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *dnode) 547 { 548 struct tipc_net *tn = tipc_net(net); 549 bool legacy = tn->legacy_addr_format; 550 u32 self = tipc_own_addr(net); 551 struct service_range *sr; 552 struct tipc_service *sc; 553 struct list_head *list; 554 struct publication *p; 555 u32 port = 0; 556 u32 node = 0; 557 558 if (!tipc_in_scope(legacy, *dnode, self)) 559 return 0; 560 561 rcu_read_lock(); 562 sc = tipc_service_find(net, type); 563 if (unlikely(!sc)) 564 goto exit; 565 566 spin_lock_bh(&sc->lock); 567 service_range_foreach_match(sr, sc, instance, instance) { 568 /* Select lookup algo: local, closest-first or round-robin */ 569 if (*dnode == self) { 570 list = &sr->local_publ; 571 if (list_empty(list)) 572 continue; 573 p = list_first_entry(list, struct publication, 574 local_publ); 575 list_move_tail(&p->local_publ, &sr->local_publ); 576 } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) { 577 list = &sr->local_publ; 578 p = list_first_entry(list, struct publication, 579 local_publ); 580 list_move_tail(&p->local_publ, &sr->local_publ); 581 } else { 582 list = &sr->all_publ; 583 p = list_first_entry(list, struct publication, 584 all_publ); 585 list_move_tail(&p->all_publ, &sr->all_publ); 586 } 587 port = p->port; 588 node = p->node; 589 /* Todo: as for legacy, pick the first matching range only, a 590 * "true" round-robin will be performed as needed. 591 */ 592 break; 593 } 594 spin_unlock_bh(&sc->lock); 595 596 exit: 597 rcu_read_unlock(); 598 *dnode = node; 599 return port; 600 } 601 602 bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope, 603 struct list_head *dsts, int *dstcnt, u32 exclude, 604 bool all) 605 { 606 u32 self = tipc_own_addr(net); 607 struct service_range *sr; 608 struct tipc_service *sc; 609 struct publication *p; 610 611 *dstcnt = 0; 612 rcu_read_lock(); 613 sc = tipc_service_find(net, type); 614 if (unlikely(!sc)) 615 goto exit; 616 617 spin_lock_bh(&sc->lock); 618 619 /* Todo: a full search i.e. service_range_foreach_match() instead? */ 620 sr = service_range_match_first(sc->ranges.rb_node, instance, instance); 621 if (!sr) 622 goto no_match; 623 624 list_for_each_entry(p, &sr->all_publ, all_publ) { 625 if (p->scope != scope) 626 continue; 627 if (p->port == exclude && p->node == self) 628 continue; 629 tipc_dest_push(dsts, p->node, p->port); 630 (*dstcnt)++; 631 if (all) 632 continue; 633 list_move_tail(&p->all_publ, &sr->all_publ); 634 break; 635 } 636 no_match: 637 spin_unlock_bh(&sc->lock); 638 exit: 639 rcu_read_unlock(); 640 return !list_empty(dsts); 641 } 642 643 void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, 644 u32 scope, bool exact, struct list_head *dports) 645 { 646 struct service_range *sr; 647 struct tipc_service *sc; 648 struct publication *p; 649 650 rcu_read_lock(); 651 sc = tipc_service_find(net, type); 652 if (!sc) 653 goto exit; 654 655 spin_lock_bh(&sc->lock); 656 service_range_foreach_match(sr, sc, lower, upper) { 657 list_for_each_entry(p, &sr->local_publ, local_publ) { 658 if (p->scope == scope || (!exact && p->scope < scope)) 659 tipc_dest_push(dports, 0, p->port); 660 } 661 } 662 spin_unlock_bh(&sc->lock); 663 exit: 664 rcu_read_unlock(); 665 } 666 667 /* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes 668 * - Creates list of nodes that overlap the given multicast address 669 * - Determines if any node local destinations overlap 670 */ 671 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, 672 u32 upper, struct tipc_nlist *nodes) 673 { 674 struct service_range *sr; 675 struct tipc_service *sc; 676 struct publication *p; 677 678 rcu_read_lock(); 679 sc = tipc_service_find(net, type); 680 if (!sc) 681 goto exit; 682 683 spin_lock_bh(&sc->lock); 684 service_range_foreach_match(sr, sc, lower, upper) { 685 list_for_each_entry(p, &sr->all_publ, all_publ) { 686 tipc_nlist_add(nodes, p->node); 687 } 688 } 689 spin_unlock_bh(&sc->lock); 690 exit: 691 rcu_read_unlock(); 692 } 693 694 /* tipc_nametbl_build_group - build list of communication group members 695 */ 696 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, 697 u32 type, u32 scope) 698 { 699 struct service_range *sr; 700 struct tipc_service *sc; 701 struct publication *p; 702 struct rb_node *n; 703 704 rcu_read_lock(); 705 sc = tipc_service_find(net, type); 706 if (!sc) 707 goto exit; 708 709 spin_lock_bh(&sc->lock); 710 for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { 711 sr = container_of(n, struct service_range, tree_node); 712 list_for_each_entry(p, &sr->all_publ, all_publ) { 713 if (p->scope != scope) 714 continue; 715 tipc_group_add_member(grp, p->node, p->port, p->lower); 716 } 717 } 718 spin_unlock_bh(&sc->lock); 719 exit: 720 rcu_read_unlock(); 721 } 722 723 /* tipc_nametbl_publish - add service binding to name table 724 */ 725 struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, 726 u32 upper, u32 scope, u32 port, 727 u32 key) 728 { 729 struct name_table *nt = tipc_name_table(net); 730 struct tipc_net *tn = tipc_net(net); 731 struct publication *p = NULL; 732 struct sk_buff *skb = NULL; 733 u32 rc_dests; 734 735 spin_lock_bh(&tn->nametbl_lock); 736 737 if (nt->local_publ_count >= TIPC_MAX_PUBL) { 738 pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL); 739 goto exit; 740 } 741 742 p = tipc_nametbl_insert_publ(net, type, lower, upper, scope, 743 tipc_own_addr(net), port, key); 744 if (p) { 745 nt->local_publ_count++; 746 skb = tipc_named_publish(net, p); 747 } 748 rc_dests = nt->rc_dests; 749 exit: 750 spin_unlock_bh(&tn->nametbl_lock); 751 752 if (skb) 753 tipc_node_broadcast(net, skb, rc_dests); 754 return p; 755 756 } 757 758 /** 759 * tipc_nametbl_withdraw - withdraw a service binding 760 */ 761 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, 762 u32 upper, u32 key) 763 { 764 struct name_table *nt = tipc_name_table(net); 765 struct tipc_net *tn = tipc_net(net); 766 u32 self = tipc_own_addr(net); 767 struct sk_buff *skb = NULL; 768 struct publication *p; 769 u32 rc_dests; 770 771 spin_lock_bh(&tn->nametbl_lock); 772 773 p = tipc_nametbl_remove_publ(net, type, lower, upper, self, key); 774 if (p) { 775 nt->local_publ_count--; 776 skb = tipc_named_withdraw(net, p); 777 list_del_init(&p->binding_sock); 778 kfree_rcu(p, rcu); 779 } else { 780 pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", 781 type, lower, upper, key); 782 } 783 rc_dests = nt->rc_dests; 784 spin_unlock_bh(&tn->nametbl_lock); 785 786 if (skb) { 787 tipc_node_broadcast(net, skb, rc_dests); 788 return 1; 789 } 790 return 0; 791 } 792 793 /** 794 * tipc_nametbl_subscribe - add a subscription object to the name table 795 */ 796 bool tipc_nametbl_subscribe(struct tipc_subscription *sub) 797 { 798 struct name_table *nt = tipc_name_table(sub->net); 799 struct tipc_net *tn = tipc_net(sub->net); 800 struct tipc_subscr *s = &sub->evt.s; 801 u32 type = tipc_sub_read(s, seq.type); 802 struct tipc_service *sc; 803 bool res = true; 804 805 spin_lock_bh(&tn->nametbl_lock); 806 sc = tipc_service_find(sub->net, type); 807 if (!sc) 808 sc = tipc_service_create(type, &nt->services[hash(type)]); 809 if (sc) { 810 spin_lock_bh(&sc->lock); 811 tipc_service_subscribe(sc, sub); 812 spin_unlock_bh(&sc->lock); 813 } else { 814 pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, 815 tipc_sub_read(s, seq.lower), 816 tipc_sub_read(s, seq.upper)); 817 res = false; 818 } 819 spin_unlock_bh(&tn->nametbl_lock); 820 return res; 821 } 822 823 /** 824 * tipc_nametbl_unsubscribe - remove a subscription object from name table 825 */ 826 void tipc_nametbl_unsubscribe(struct tipc_subscription *sub) 827 { 828 struct tipc_net *tn = tipc_net(sub->net); 829 struct tipc_subscr *s = &sub->evt.s; 830 u32 type = tipc_sub_read(s, seq.type); 831 struct tipc_service *sc; 832 833 spin_lock_bh(&tn->nametbl_lock); 834 sc = tipc_service_find(sub->net, type); 835 if (!sc) 836 goto exit; 837 838 spin_lock_bh(&sc->lock); 839 list_del_init(&sub->service_list); 840 tipc_sub_put(sub); 841 842 /* Delete service item if no more publications and subscriptions */ 843 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { 844 hlist_del_init_rcu(&sc->service_list); 845 kfree_rcu(sc, rcu); 846 } 847 spin_unlock_bh(&sc->lock); 848 exit: 849 spin_unlock_bh(&tn->nametbl_lock); 850 } 851 852 int tipc_nametbl_init(struct net *net) 853 { 854 struct tipc_net *tn = tipc_net(net); 855 struct name_table *nt; 856 int i; 857 858 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 859 if (!nt) 860 return -ENOMEM; 861 862 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) 863 INIT_HLIST_HEAD(&nt->services[i]); 864 865 INIT_LIST_HEAD(&nt->node_scope); 866 INIT_LIST_HEAD(&nt->cluster_scope); 867 rwlock_init(&nt->cluster_scope_lock); 868 tn->nametbl = nt; 869 spin_lock_init(&tn->nametbl_lock); 870 return 0; 871 } 872 873 /** 874 * tipc_service_delete - purge all publications for a service and delete it 875 */ 876 static void tipc_service_delete(struct net *net, struct tipc_service *sc) 877 { 878 struct service_range *sr, *tmpr; 879 struct publication *p, *tmp; 880 881 spin_lock_bh(&sc->lock); 882 rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { 883 list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { 884 tipc_service_remove_publ(sr, p->node, p->key); 885 kfree_rcu(p, rcu); 886 } 887 rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); 888 kfree(sr); 889 } 890 hlist_del_init_rcu(&sc->service_list); 891 spin_unlock_bh(&sc->lock); 892 kfree_rcu(sc, rcu); 893 } 894 895 void tipc_nametbl_stop(struct net *net) 896 { 897 struct name_table *nt = tipc_name_table(net); 898 struct tipc_net *tn = tipc_net(net); 899 struct hlist_head *service_head; 900 struct tipc_service *service; 901 u32 i; 902 903 /* Verify name table is empty and purge any lingering 904 * publications, then release the name table 905 */ 906 spin_lock_bh(&tn->nametbl_lock); 907 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 908 if (hlist_empty(&nt->services[i])) 909 continue; 910 service_head = &nt->services[i]; 911 hlist_for_each_entry_rcu(service, service_head, service_list) { 912 tipc_service_delete(net, service); 913 } 914 } 915 spin_unlock_bh(&tn->nametbl_lock); 916 917 synchronize_net(); 918 kfree(nt); 919 } 920 921 static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, 922 struct tipc_service *service, 923 struct service_range *sr, 924 u32 *last_key) 925 { 926 struct publication *p; 927 struct nlattr *attrs; 928 struct nlattr *b; 929 void *hdr; 930 931 if (*last_key) { 932 list_for_each_entry(p, &sr->all_publ, all_publ) 933 if (p->key == *last_key) 934 break; 935 if (p->key != *last_key) 936 return -EPIPE; 937 } else { 938 p = list_first_entry(&sr->all_publ, 939 struct publication, 940 all_publ); 941 } 942 943 list_for_each_entry_from(p, &sr->all_publ, all_publ) { 944 *last_key = p->key; 945 946 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, 947 &tipc_genl_family, NLM_F_MULTI, 948 TIPC_NL_NAME_TABLE_GET); 949 if (!hdr) 950 return -EMSGSIZE; 951 952 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE); 953 if (!attrs) 954 goto msg_full; 955 956 b = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE_PUBL); 957 if (!b) 958 goto attr_msg_full; 959 960 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, service->type)) 961 goto publ_msg_full; 962 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sr->lower)) 963 goto publ_msg_full; 964 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sr->upper)) 965 goto publ_msg_full; 966 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope)) 967 goto publ_msg_full; 968 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node)) 969 goto publ_msg_full; 970 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->port)) 971 goto publ_msg_full; 972 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key)) 973 goto publ_msg_full; 974 975 nla_nest_end(msg->skb, b); 976 nla_nest_end(msg->skb, attrs); 977 genlmsg_end(msg->skb, hdr); 978 } 979 *last_key = 0; 980 981 return 0; 982 983 publ_msg_full: 984 nla_nest_cancel(msg->skb, b); 985 attr_msg_full: 986 nla_nest_cancel(msg->skb, attrs); 987 msg_full: 988 genlmsg_cancel(msg->skb, hdr); 989 990 return -EMSGSIZE; 991 } 992 993 static int __tipc_nl_service_range_list(struct tipc_nl_msg *msg, 994 struct tipc_service *sc, 995 u32 *last_lower, u32 *last_key) 996 { 997 struct service_range *sr; 998 struct rb_node *n; 999 int err; 1000 1001 for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { 1002 sr = container_of(n, struct service_range, tree_node); 1003 if (sr->lower < *last_lower) 1004 continue; 1005 err = __tipc_nl_add_nametable_publ(msg, sc, sr, last_key); 1006 if (err) { 1007 *last_lower = sr->lower; 1008 return err; 1009 } 1010 } 1011 *last_lower = 0; 1012 return 0; 1013 } 1014 1015 static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg, 1016 u32 *last_type, u32 *last_lower, u32 *last_key) 1017 { 1018 struct tipc_net *tn = tipc_net(net); 1019 struct tipc_service *service = NULL; 1020 struct hlist_head *head; 1021 int err; 1022 int i; 1023 1024 if (*last_type) 1025 i = hash(*last_type); 1026 else 1027 i = 0; 1028 1029 for (; i < TIPC_NAMETBL_SIZE; i++) { 1030 head = &tn->nametbl->services[i]; 1031 1032 if (*last_type || 1033 (!i && *last_key && (*last_lower == *last_key))) { 1034 service = tipc_service_find(net, *last_type); 1035 if (!service) 1036 return -EPIPE; 1037 } else { 1038 hlist_for_each_entry_rcu(service, head, service_list) 1039 break; 1040 if (!service) 1041 continue; 1042 } 1043 1044 hlist_for_each_entry_from_rcu(service, service_list) { 1045 spin_lock_bh(&service->lock); 1046 err = __tipc_nl_service_range_list(msg, service, 1047 last_lower, 1048 last_key); 1049 1050 if (err) { 1051 *last_type = service->type; 1052 spin_unlock_bh(&service->lock); 1053 return err; 1054 } 1055 spin_unlock_bh(&service->lock); 1056 } 1057 *last_type = 0; 1058 } 1059 return 0; 1060 } 1061 1062 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) 1063 { 1064 struct net *net = sock_net(skb->sk); 1065 u32 last_type = cb->args[0]; 1066 u32 last_lower = cb->args[1]; 1067 u32 last_key = cb->args[2]; 1068 int done = cb->args[3]; 1069 struct tipc_nl_msg msg; 1070 int err; 1071 1072 if (done) 1073 return 0; 1074 1075 msg.skb = skb; 1076 msg.portid = NETLINK_CB(cb->skb).portid; 1077 msg.seq = cb->nlh->nlmsg_seq; 1078 1079 rcu_read_lock(); 1080 err = tipc_nl_service_list(net, &msg, &last_type, 1081 &last_lower, &last_key); 1082 if (!err) { 1083 done = 1; 1084 } else if (err != -EMSGSIZE) { 1085 /* We never set seq or call nl_dump_check_consistent() this 1086 * means that setting prev_seq here will cause the consistence 1087 * check to fail in the netlink callback handler. Resulting in 1088 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if 1089 * we got an error. 1090 */ 1091 cb->prev_seq = 1; 1092 } 1093 rcu_read_unlock(); 1094 1095 cb->args[0] = last_type; 1096 cb->args[1] = last_lower; 1097 cb->args[2] = last_key; 1098 cb->args[3] = done; 1099 1100 return skb->len; 1101 } 1102 1103 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) 1104 { 1105 struct tipc_dest *dst; 1106 1107 list_for_each_entry(dst, l, list) { 1108 if (dst->node == node && dst->port == port) 1109 return dst; 1110 } 1111 return NULL; 1112 } 1113 1114 bool tipc_dest_push(struct list_head *l, u32 node, u32 port) 1115 { 1116 struct tipc_dest *dst; 1117 1118 if (tipc_dest_find(l, node, port)) 1119 return false; 1120 1121 dst = kmalloc(sizeof(*dst), GFP_ATOMIC); 1122 if (unlikely(!dst)) 1123 return false; 1124 dst->node = node; 1125 dst->port = port; 1126 list_add(&dst->list, l); 1127 return true; 1128 } 1129 1130 bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port) 1131 { 1132 struct tipc_dest *dst; 1133 1134 if (list_empty(l)) 1135 return false; 1136 dst = list_first_entry(l, typeof(*dst), list); 1137 if (port) 1138 *port = dst->port; 1139 if (node) 1140 *node = dst->node; 1141 list_del(&dst->list); 1142 kfree(dst); 1143 return true; 1144 } 1145 1146 bool tipc_dest_del(struct list_head *l, u32 node, u32 port) 1147 { 1148 struct tipc_dest *dst; 1149 1150 dst = tipc_dest_find(l, node, port); 1151 if (!dst) 1152 return false; 1153 list_del(&dst->list); 1154 kfree(dst); 1155 return true; 1156 } 1157 1158 void tipc_dest_list_purge(struct list_head *l) 1159 { 1160 struct tipc_dest *dst, *tmp; 1161 1162 list_for_each_entry_safe(dst, tmp, l, list) { 1163 list_del(&dst->list); 1164 kfree(dst); 1165 } 1166 } 1167 1168 int tipc_dest_list_len(struct list_head *l) 1169 { 1170 struct tipc_dest *dst; 1171 int i = 0; 1172 1173 list_for_each_entry(dst, l, list) { 1174 i++; 1175 } 1176 return i; 1177 } 1178