1 /* 2 * net/tipc/name_table.c: TIPC name table code 3 * 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <net/sock.h> 38 #include "core.h" 39 #include "netlink.h" 40 #include "name_table.h" 41 #include "name_distr.h" 42 #include "subscr.h" 43 #include "bcast.h" 44 #include "addr.h" 45 #include "node.h" 46 #include "group.h" 47 #include <net/genetlink.h> 48 49 #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ 50 51 /** 52 * struct name_info - name sequence publication info 53 * @node_list: list of publications on own node of this <type,lower,upper> 54 * @cluster_list: list of all publications of this <type,lower,upper> 55 */ 56 struct name_info { 57 struct list_head node_list; 58 struct list_head cluster_list; 59 }; 60 61 /** 62 * struct sub_seq - container for all published instances of a name sequence 63 * @lower: name sequence lower bound 64 * @upper: name sequence upper bound 65 * @info: pointer to name sequence publication info 66 */ 67 struct sub_seq { 68 u32 lower; 69 u32 upper; 70 struct name_info *info; 71 }; 72 73 /** 74 * struct name_seq - container for all published instances of a name type 75 * @type: 32 bit 'type' value for name sequence 76 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; 77 * sub-sequences are sorted in ascending order 78 * @alloc: number of sub-sequences currently in array 79 * @first_free: array index of first unused sub-sequence entry 80 * @ns_list: links to adjacent name sequences in hash chain 81 * @subscriptions: list of subscriptions for this 'type' 82 * @lock: spinlock controlling access to publication lists of all sub-sequences 83 * @rcu: RCU callback head used for deferred freeing 84 */ 85 struct name_seq { 86 u32 type; 87 struct sub_seq *sseqs; 88 u32 alloc; 89 u32 first_free; 90 struct hlist_node ns_list; 91 struct list_head subscriptions; 92 spinlock_t lock; 93 struct rcu_head rcu; 94 }; 95 96 static int hash(int x) 97 { 98 return x & (TIPC_NAMETBL_SIZE - 1); 99 } 100 101 /** 102 * publ_create - create a publication structure 103 */ 104 static struct publication *publ_create(u32 type, u32 lower, u32 upper, 105 u32 scope, u32 node, u32 port_ref, 106 u32 key) 107 { 108 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 109 if (publ == NULL) { 110 pr_warn("Publication creation failure, no memory\n"); 111 return NULL; 112 } 113 114 publ->type = type; 115 publ->lower = lower; 116 publ->upper = upper; 117 publ->scope = scope; 118 publ->node = node; 119 publ->ref = port_ref; 120 publ->key = key; 121 INIT_LIST_HEAD(&publ->pport_list); 122 return publ; 123 } 124 125 /** 126 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures 127 */ 128 static struct sub_seq *tipc_subseq_alloc(u32 cnt) 129 { 130 return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); 131 } 132 133 /** 134 * tipc_nameseq_create - create a name sequence structure for the specified 'type' 135 * 136 * Allocates a single sub-sequence structure and sets it to all 0's. 137 */ 138 static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 139 { 140 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); 141 struct sub_seq *sseq = tipc_subseq_alloc(1); 142 143 if (!nseq || !sseq) { 144 pr_warn("Name sequence creation failed, no memory\n"); 145 kfree(nseq); 146 kfree(sseq); 147 return NULL; 148 } 149 150 spin_lock_init(&nseq->lock); 151 nseq->type = type; 152 nseq->sseqs = sseq; 153 nseq->alloc = 1; 154 INIT_HLIST_NODE(&nseq->ns_list); 155 INIT_LIST_HEAD(&nseq->subscriptions); 156 hlist_add_head_rcu(&nseq->ns_list, seq_head); 157 return nseq; 158 } 159 160 /** 161 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 162 * 163 * Very time-critical, so binary searches through sub-sequence array. 164 */ 165 static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, 166 u32 instance) 167 { 168 struct sub_seq *sseqs = nseq->sseqs; 169 int low = 0; 170 int high = nseq->first_free - 1; 171 int mid; 172 173 while (low <= high) { 174 mid = (low + high) / 2; 175 if (instance < sseqs[mid].lower) 176 high = mid - 1; 177 else if (instance > sseqs[mid].upper) 178 low = mid + 1; 179 else 180 return &sseqs[mid]; 181 } 182 return NULL; 183 } 184 185 /** 186 * nameseq_locate_subseq - determine position of name instance in sub-sequence 187 * 188 * Returns index in sub-sequence array of the entry that contains the specified 189 * instance value; if no entry contains that value, returns the position 190 * where a new entry for it would be inserted in the array. 191 * 192 * Note: Similar to binary search code for locating a sub-sequence. 193 */ 194 static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) 195 { 196 struct sub_seq *sseqs = nseq->sseqs; 197 int low = 0; 198 int high = nseq->first_free - 1; 199 int mid; 200 201 while (low <= high) { 202 mid = (low + high) / 2; 203 if (instance < sseqs[mid].lower) 204 high = mid - 1; 205 else if (instance > sseqs[mid].upper) 206 low = mid + 1; 207 else 208 return mid; 209 } 210 return low; 211 } 212 213 /** 214 * tipc_nameseq_insert_publ 215 */ 216 static struct publication *tipc_nameseq_insert_publ(struct net *net, 217 struct name_seq *nseq, 218 u32 type, u32 lower, 219 u32 upper, u32 scope, 220 u32 node, u32 port, u32 key) 221 { 222 struct tipc_subscription *s; 223 struct tipc_subscription *st; 224 struct publication *publ; 225 struct sub_seq *sseq; 226 struct name_info *info; 227 int created_subseq = 0; 228 229 sseq = nameseq_find_subseq(nseq, lower); 230 if (sseq) { 231 232 /* Lower end overlaps existing entry => need an exact match */ 233 if ((sseq->lower != lower) || (sseq->upper != upper)) { 234 return NULL; 235 } 236 237 info = sseq->info; 238 239 /* Check if an identical publication already exists */ 240 list_for_each_entry(publ, &info->cluster_list, cluster_list) { 241 if ((publ->ref == port) && (publ->key == key) && 242 (!publ->node || (publ->node == node))) 243 return NULL; 244 } 245 } else { 246 u32 inspos; 247 struct sub_seq *freesseq; 248 249 /* Find where lower end should be inserted */ 250 inspos = nameseq_locate_subseq(nseq, lower); 251 252 /* Fail if upper end overlaps into an existing entry */ 253 if ((inspos < nseq->first_free) && 254 (upper >= nseq->sseqs[inspos].lower)) { 255 return NULL; 256 } 257 258 /* Ensure there is space for new sub-sequence */ 259 if (nseq->first_free == nseq->alloc) { 260 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); 261 262 if (!sseqs) { 263 pr_warn("Cannot publish {%u,%u,%u}, no memory\n", 264 type, lower, upper); 265 return NULL; 266 } 267 memcpy(sseqs, nseq->sseqs, 268 nseq->alloc * sizeof(struct sub_seq)); 269 kfree(nseq->sseqs); 270 nseq->sseqs = sseqs; 271 nseq->alloc *= 2; 272 } 273 274 info = kzalloc(sizeof(*info), GFP_ATOMIC); 275 if (!info) { 276 pr_warn("Cannot publish {%u,%u,%u}, no memory\n", 277 type, lower, upper); 278 return NULL; 279 } 280 281 INIT_LIST_HEAD(&info->node_list); 282 INIT_LIST_HEAD(&info->cluster_list); 283 284 /* Insert new sub-sequence */ 285 sseq = &nseq->sseqs[inspos]; 286 freesseq = &nseq->sseqs[nseq->first_free]; 287 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); 288 memset(sseq, 0, sizeof(*sseq)); 289 nseq->first_free++; 290 sseq->lower = lower; 291 sseq->upper = upper; 292 sseq->info = info; 293 created_subseq = 1; 294 } 295 296 /* Insert a publication */ 297 publ = publ_create(type, lower, upper, scope, node, port, key); 298 if (!publ) 299 return NULL; 300 301 list_add(&publ->cluster_list, &info->cluster_list); 302 303 if (in_own_node(net, node)) 304 list_add(&publ->node_list, &info->node_list); 305 306 /* Any subscriptions waiting for notification? */ 307 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 308 tipc_sub_report_overlap(s, publ->lower, publ->upper, 309 TIPC_PUBLISHED, publ->ref, 310 publ->node, publ->scope, 311 created_subseq); 312 } 313 return publ; 314 } 315 316 /** 317 * tipc_nameseq_remove_publ 318 * 319 * NOTE: There may be cases where TIPC is asked to remove a publication 320 * that is not in the name table. For example, if another node issues a 321 * publication for a name sequence that overlaps an existing name sequence 322 * the publication will not be recorded, which means the publication won't 323 * be found when the name sequence is later withdrawn by that node. 324 * A failed withdraw request simply returns a failure indication and lets the 325 * caller issue any error or warning messages associated with such a problem. 326 */ 327 static struct publication *tipc_nameseq_remove_publ(struct net *net, 328 struct name_seq *nseq, 329 u32 inst, u32 node, 330 u32 ref, u32 key) 331 { 332 struct publication *publ; 333 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); 334 struct name_info *info; 335 struct sub_seq *free; 336 struct tipc_subscription *s, *st; 337 int removed_subseq = 0; 338 339 if (!sseq) 340 return NULL; 341 342 info = sseq->info; 343 344 /* Locate publication, if it exists */ 345 list_for_each_entry(publ, &info->cluster_list, cluster_list) { 346 if ((publ->key == key) && (publ->ref == ref) && 347 (!publ->node || (publ->node == node))) 348 goto found; 349 } 350 return NULL; 351 352 found: 353 list_del(&publ->cluster_list); 354 if (in_own_node(net, node)) 355 list_del(&publ->node_list); 356 357 /* Contract subseq list if no more publications for that subseq */ 358 if (list_empty(&info->cluster_list)) { 359 kfree(info); 360 free = &nseq->sseqs[nseq->first_free--]; 361 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq)); 362 removed_subseq = 1; 363 } 364 365 /* Notify any waiting subscriptions */ 366 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 367 tipc_sub_report_overlap(s, publ->lower, publ->upper, 368 TIPC_WITHDRAWN, publ->ref, publ->node, 369 publ->scope, removed_subseq); 370 } 371 372 return publ; 373 } 374 375 /** 376 * tipc_nameseq_subscribe - attach a subscription, and optionally 377 * issue the prescribed number of events if there is any sub- 378 * sequence overlapping with the requested sequence 379 */ 380 static void tipc_nameseq_subscribe(struct name_seq *nseq, 381 struct tipc_subscription *sub) 382 { 383 struct sub_seq *sseq = nseq->sseqs; 384 struct tipc_name_seq ns; 385 struct tipc_subscr *s = &sub->evt.s; 386 bool no_status; 387 388 ns.type = tipc_sub_read(s, seq.type); 389 ns.lower = tipc_sub_read(s, seq.lower); 390 ns.upper = tipc_sub_read(s, seq.upper); 391 no_status = tipc_sub_read(s, filter) & TIPC_SUB_NO_STATUS; 392 393 tipc_sub_get(sub); 394 list_add(&sub->nameseq_list, &nseq->subscriptions); 395 396 if (no_status || !sseq) 397 return; 398 399 while (sseq != &nseq->sseqs[nseq->first_free]) { 400 if (tipc_sub_check_overlap(&ns, sseq->lower, sseq->upper)) { 401 struct publication *crs; 402 struct name_info *info = sseq->info; 403 int must_report = 1; 404 405 list_for_each_entry(crs, &info->cluster_list, 406 cluster_list) { 407 tipc_sub_report_overlap(sub, sseq->lower, 408 sseq->upper, 409 TIPC_PUBLISHED, 410 crs->ref, crs->node, 411 crs->scope, 412 must_report); 413 must_report = 0; 414 } 415 } 416 sseq++; 417 } 418 } 419 420 static struct name_seq *nametbl_find_seq(struct net *net, u32 type) 421 { 422 struct tipc_net *tn = net_generic(net, tipc_net_id); 423 struct hlist_head *seq_head; 424 struct name_seq *ns; 425 426 seq_head = &tn->nametbl->seq_hlist[hash(type)]; 427 hlist_for_each_entry_rcu(ns, seq_head, ns_list) { 428 if (ns->type == type) 429 return ns; 430 } 431 432 return NULL; 433 }; 434 435 struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, 436 u32 lower, u32 upper, u32 scope, 437 u32 node, u32 port, u32 key) 438 { 439 struct tipc_net *tn = net_generic(net, tipc_net_id); 440 struct publication *publ; 441 struct name_seq *seq = nametbl_find_seq(net, type); 442 int index = hash(type); 443 444 if (scope > TIPC_NODE_SCOPE || lower > upper) { 445 pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n", 446 type, lower, upper, scope); 447 return NULL; 448 } 449 450 if (!seq) 451 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]); 452 if (!seq) 453 return NULL; 454 455 spin_lock_bh(&seq->lock); 456 publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper, 457 scope, node, port, key); 458 spin_unlock_bh(&seq->lock); 459 return publ; 460 } 461 462 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, 463 u32 lower, u32 node, u32 ref, 464 u32 key) 465 { 466 struct publication *publ; 467 struct name_seq *seq = nametbl_find_seq(net, type); 468 469 if (!seq) 470 return NULL; 471 472 spin_lock_bh(&seq->lock); 473 publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key); 474 if (!seq->first_free && list_empty(&seq->subscriptions)) { 475 hlist_del_init_rcu(&seq->ns_list); 476 kfree(seq->sseqs); 477 spin_unlock_bh(&seq->lock); 478 kfree_rcu(seq, rcu); 479 return publ; 480 } 481 spin_unlock_bh(&seq->lock); 482 return publ; 483 } 484 485 /** 486 * tipc_nametbl_translate - perform name translation 487 * 488 * On entry, 'destnode' is the search domain used during translation. 489 * 490 * On exit: 491 * - if name translation is deferred to another node/cluster/zone, 492 * leaves 'destnode' unchanged (will be non-zero) and returns 0 493 * - if name translation is attempted and succeeds, sets 'destnode' 494 * to publishing node and returns port reference (will be non-zero) 495 * - if name translation is attempted and fails, sets 'destnode' to 0 496 * and returns 0 497 */ 498 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, 499 u32 *destnode) 500 { 501 struct tipc_net *tn = net_generic(net, tipc_net_id); 502 struct sub_seq *sseq; 503 struct name_info *info; 504 struct publication *publ; 505 struct name_seq *seq; 506 u32 ref = 0; 507 u32 node = 0; 508 509 if (!tipc_in_scope(*destnode, tn->own_addr)) 510 return 0; 511 512 rcu_read_lock(); 513 seq = nametbl_find_seq(net, type); 514 if (unlikely(!seq)) 515 goto not_found; 516 spin_lock_bh(&seq->lock); 517 sseq = nameseq_find_subseq(seq, instance); 518 if (unlikely(!sseq)) 519 goto no_match; 520 info = sseq->info; 521 522 /* Closest-First Algorithm */ 523 if (likely(!*destnode)) { 524 if (!list_empty(&info->node_list)) { 525 publ = list_first_entry(&info->node_list, 526 struct publication, 527 node_list); 528 list_move_tail(&publ->node_list, 529 &info->node_list); 530 } else { 531 publ = list_first_entry(&info->cluster_list, 532 struct publication, 533 cluster_list); 534 list_move_tail(&publ->cluster_list, 535 &info->cluster_list); 536 } 537 } 538 539 /* Round-Robin Algorithm */ 540 else if (*destnode == tn->own_addr) { 541 if (list_empty(&info->node_list)) 542 goto no_match; 543 publ = list_first_entry(&info->node_list, struct publication, 544 node_list); 545 list_move_tail(&publ->node_list, &info->node_list); 546 } else { 547 publ = list_first_entry(&info->cluster_list, struct publication, 548 cluster_list); 549 list_move_tail(&publ->cluster_list, &info->cluster_list); 550 } 551 552 ref = publ->ref; 553 node = publ->node; 554 no_match: 555 spin_unlock_bh(&seq->lock); 556 not_found: 557 rcu_read_unlock(); 558 *destnode = node; 559 return ref; 560 } 561 562 bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope, 563 struct list_head *dsts, int *dstcnt, u32 exclude, 564 bool all) 565 { 566 u32 self = tipc_own_addr(net); 567 struct publication *publ; 568 struct name_info *info; 569 struct name_seq *seq; 570 struct sub_seq *sseq; 571 572 *dstcnt = 0; 573 rcu_read_lock(); 574 seq = nametbl_find_seq(net, type); 575 if (unlikely(!seq)) 576 goto exit; 577 spin_lock_bh(&seq->lock); 578 sseq = nameseq_find_subseq(seq, instance); 579 if (likely(sseq)) { 580 info = sseq->info; 581 list_for_each_entry(publ, &info->cluster_list, cluster_list) { 582 if (publ->scope != scope) 583 continue; 584 if (publ->ref == exclude && publ->node == self) 585 continue; 586 tipc_dest_push(dsts, publ->node, publ->ref); 587 (*dstcnt)++; 588 if (all) 589 continue; 590 list_move_tail(&publ->cluster_list, 591 &info->cluster_list); 592 break; 593 } 594 } 595 spin_unlock_bh(&seq->lock); 596 exit: 597 rcu_read_unlock(); 598 return !list_empty(dsts); 599 } 600 601 void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, 602 u32 scope, bool exact, struct list_head *dports) 603 { 604 struct sub_seq *sseq_stop; 605 struct name_info *info; 606 struct publication *p; 607 struct name_seq *seq; 608 struct sub_seq *sseq; 609 610 rcu_read_lock(); 611 seq = nametbl_find_seq(net, type); 612 if (!seq) 613 goto exit; 614 615 spin_lock_bh(&seq->lock); 616 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); 617 sseq_stop = seq->sseqs + seq->first_free; 618 for (; sseq != sseq_stop; sseq++) { 619 if (sseq->lower > upper) 620 break; 621 info = sseq->info; 622 list_for_each_entry(p, &info->node_list, node_list) { 623 if (p->scope == scope || (!exact && p->scope < scope)) 624 tipc_dest_push(dports, 0, p->ref); 625 } 626 } 627 spin_unlock_bh(&seq->lock); 628 exit: 629 rcu_read_unlock(); 630 } 631 632 /* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes 633 * - Creates list of nodes that overlap the given multicast address 634 * - Determines if any node local ports overlap 635 */ 636 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, 637 u32 upper, struct tipc_nlist *nodes) 638 { 639 struct sub_seq *sseq, *stop; 640 struct publication *publ; 641 struct name_info *info; 642 struct name_seq *seq; 643 644 rcu_read_lock(); 645 seq = nametbl_find_seq(net, type); 646 if (!seq) 647 goto exit; 648 649 spin_lock_bh(&seq->lock); 650 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); 651 stop = seq->sseqs + seq->first_free; 652 for (; sseq != stop && sseq->lower <= upper; sseq++) { 653 info = sseq->info; 654 list_for_each_entry(publ, &info->cluster_list, cluster_list) { 655 tipc_nlist_add(nodes, publ->node); 656 } 657 } 658 spin_unlock_bh(&seq->lock); 659 exit: 660 rcu_read_unlock(); 661 } 662 663 /* tipc_nametbl_build_group - build list of communication group members 664 */ 665 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, 666 u32 type, u32 scope) 667 { 668 struct sub_seq *sseq, *stop; 669 struct name_info *info; 670 struct publication *p; 671 struct name_seq *seq; 672 673 rcu_read_lock(); 674 seq = nametbl_find_seq(net, type); 675 if (!seq) 676 goto exit; 677 678 spin_lock_bh(&seq->lock); 679 sseq = seq->sseqs; 680 stop = seq->sseqs + seq->first_free; 681 for (; sseq != stop; sseq++) { 682 info = sseq->info; 683 list_for_each_entry(p, &info->cluster_list, cluster_list) { 684 if (p->scope != scope) 685 continue; 686 tipc_group_add_member(grp, p->node, p->ref, p->lower); 687 } 688 } 689 spin_unlock_bh(&seq->lock); 690 exit: 691 rcu_read_unlock(); 692 } 693 694 /* 695 * tipc_nametbl_publish - add name publication to network name tables 696 */ 697 struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, 698 u32 upper, u32 scope, u32 port_ref, 699 u32 key) 700 { 701 struct publication *publ; 702 struct sk_buff *buf = NULL; 703 struct tipc_net *tn = net_generic(net, tipc_net_id); 704 705 spin_lock_bh(&tn->nametbl_lock); 706 if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) { 707 pr_warn("Publication failed, local publication limit reached (%u)\n", 708 TIPC_MAX_PUBLICATIONS); 709 spin_unlock_bh(&tn->nametbl_lock); 710 return NULL; 711 } 712 713 publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope, 714 tn->own_addr, port_ref, key); 715 if (likely(publ)) { 716 tn->nametbl->local_publ_count++; 717 buf = tipc_named_publish(net, publ); 718 /* Any pending external events? */ 719 tipc_named_process_backlog(net); 720 } 721 spin_unlock_bh(&tn->nametbl_lock); 722 723 if (buf) 724 tipc_node_broadcast(net, buf); 725 return publ; 726 } 727 728 /** 729 * tipc_nametbl_withdraw - withdraw name publication from network name tables 730 */ 731 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, 732 u32 key) 733 { 734 struct publication *publ; 735 struct sk_buff *skb = NULL; 736 struct tipc_net *tn = net_generic(net, tipc_net_id); 737 738 spin_lock_bh(&tn->nametbl_lock); 739 publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr, 740 ref, key); 741 if (likely(publ)) { 742 tn->nametbl->local_publ_count--; 743 skb = tipc_named_withdraw(net, publ); 744 /* Any pending external events? */ 745 tipc_named_process_backlog(net); 746 list_del_init(&publ->pport_list); 747 kfree_rcu(publ, rcu); 748 } else { 749 pr_err("Unable to remove local publication\n" 750 "(type=%u, lower=%u, ref=%u, key=%u)\n", 751 type, lower, ref, key); 752 } 753 spin_unlock_bh(&tn->nametbl_lock); 754 755 if (skb) { 756 tipc_node_broadcast(net, skb); 757 return 1; 758 } 759 return 0; 760 } 761 762 /** 763 * tipc_nametbl_subscribe - add a subscription object to the name table 764 */ 765 void tipc_nametbl_subscribe(struct tipc_subscription *sub) 766 { 767 struct tipc_net *tn = tipc_net(sub->net); 768 struct tipc_subscr *s = &sub->evt.s; 769 u32 type = tipc_sub_read(s, seq.type); 770 int index = hash(type); 771 struct name_seq *seq; 772 struct tipc_name_seq ns; 773 774 spin_lock_bh(&tn->nametbl_lock); 775 seq = nametbl_find_seq(sub->net, type); 776 if (!seq) 777 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]); 778 if (seq) { 779 spin_lock_bh(&seq->lock); 780 tipc_nameseq_subscribe(seq, sub); 781 spin_unlock_bh(&seq->lock); 782 } else { 783 ns.type = tipc_sub_read(s, seq.type); 784 ns.lower = tipc_sub_read(s, seq.lower); 785 ns.upper = tipc_sub_read(s, seq.upper); 786 pr_warn("Failed to create subscription for {%u,%u,%u}\n", 787 ns.type, ns.lower, ns.upper); 788 } 789 spin_unlock_bh(&tn->nametbl_lock); 790 } 791 792 /** 793 * tipc_nametbl_unsubscribe - remove a subscription object from name table 794 */ 795 void tipc_nametbl_unsubscribe(struct tipc_subscription *sub) 796 { 797 struct tipc_subscr *s = &sub->evt.s; 798 struct tipc_net *tn = tipc_net(sub->net); 799 struct name_seq *seq; 800 u32 type = tipc_sub_read(s, seq.type); 801 802 spin_lock_bh(&tn->nametbl_lock); 803 seq = nametbl_find_seq(sub->net, type); 804 if (seq != NULL) { 805 spin_lock_bh(&seq->lock); 806 list_del_init(&sub->nameseq_list); 807 tipc_sub_put(sub); 808 if (!seq->first_free && list_empty(&seq->subscriptions)) { 809 hlist_del_init_rcu(&seq->ns_list); 810 kfree(seq->sseqs); 811 spin_unlock_bh(&seq->lock); 812 kfree_rcu(seq, rcu); 813 } else { 814 spin_unlock_bh(&seq->lock); 815 } 816 } 817 spin_unlock_bh(&tn->nametbl_lock); 818 } 819 820 int tipc_nametbl_init(struct net *net) 821 { 822 struct tipc_net *tn = net_generic(net, tipc_net_id); 823 struct name_table *tipc_nametbl; 824 int i; 825 826 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC); 827 if (!tipc_nametbl) 828 return -ENOMEM; 829 830 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) 831 INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]); 832 833 INIT_LIST_HEAD(&tipc_nametbl->node_scope); 834 INIT_LIST_HEAD(&tipc_nametbl->cluster_scope); 835 tn->nametbl = tipc_nametbl; 836 spin_lock_init(&tn->nametbl_lock); 837 return 0; 838 } 839 840 /** 841 * tipc_purge_publications - remove all publications for a given type 842 * 843 * tipc_nametbl_lock must be held when calling this function 844 */ 845 static void tipc_purge_publications(struct net *net, struct name_seq *seq) 846 { 847 struct publication *publ, *safe; 848 struct sub_seq *sseq; 849 struct name_info *info; 850 851 spin_lock_bh(&seq->lock); 852 sseq = seq->sseqs; 853 info = sseq->info; 854 list_for_each_entry_safe(publ, safe, &info->cluster_list, 855 cluster_list) { 856 tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node, 857 publ->ref, publ->key); 858 kfree_rcu(publ, rcu); 859 } 860 hlist_del_init_rcu(&seq->ns_list); 861 kfree(seq->sseqs); 862 spin_unlock_bh(&seq->lock); 863 864 kfree_rcu(seq, rcu); 865 } 866 867 void tipc_nametbl_stop(struct net *net) 868 { 869 u32 i; 870 struct name_seq *seq; 871 struct hlist_head *seq_head; 872 struct tipc_net *tn = net_generic(net, tipc_net_id); 873 struct name_table *tipc_nametbl = tn->nametbl; 874 875 /* Verify name table is empty and purge any lingering 876 * publications, then release the name table 877 */ 878 spin_lock_bh(&tn->nametbl_lock); 879 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 880 if (hlist_empty(&tipc_nametbl->seq_hlist[i])) 881 continue; 882 seq_head = &tipc_nametbl->seq_hlist[i]; 883 hlist_for_each_entry_rcu(seq, seq_head, ns_list) { 884 tipc_purge_publications(net, seq); 885 } 886 } 887 spin_unlock_bh(&tn->nametbl_lock); 888 889 synchronize_net(); 890 kfree(tipc_nametbl); 891 892 } 893 894 static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, 895 struct name_seq *seq, 896 struct sub_seq *sseq, u32 *last_publ) 897 { 898 void *hdr; 899 struct nlattr *attrs; 900 struct nlattr *publ; 901 struct publication *p; 902 903 if (*last_publ) { 904 list_for_each_entry(p, &sseq->info->cluster_list, 905 cluster_list) 906 if (p->key == *last_publ) 907 break; 908 if (p->key != *last_publ) 909 return -EPIPE; 910 } else { 911 p = list_first_entry(&sseq->info->cluster_list, 912 struct publication, 913 cluster_list); 914 } 915 916 list_for_each_entry_from(p, &sseq->info->cluster_list, cluster_list) { 917 *last_publ = p->key; 918 919 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, 920 &tipc_genl_family, NLM_F_MULTI, 921 TIPC_NL_NAME_TABLE_GET); 922 if (!hdr) 923 return -EMSGSIZE; 924 925 attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE); 926 if (!attrs) 927 goto msg_full; 928 929 publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL); 930 if (!publ) 931 goto attr_msg_full; 932 933 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type)) 934 goto publ_msg_full; 935 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower)) 936 goto publ_msg_full; 937 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper)) 938 goto publ_msg_full; 939 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope)) 940 goto publ_msg_full; 941 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node)) 942 goto publ_msg_full; 943 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref)) 944 goto publ_msg_full; 945 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key)) 946 goto publ_msg_full; 947 948 nla_nest_end(msg->skb, publ); 949 nla_nest_end(msg->skb, attrs); 950 genlmsg_end(msg->skb, hdr); 951 } 952 *last_publ = 0; 953 954 return 0; 955 956 publ_msg_full: 957 nla_nest_cancel(msg->skb, publ); 958 attr_msg_full: 959 nla_nest_cancel(msg->skb, attrs); 960 msg_full: 961 genlmsg_cancel(msg->skb, hdr); 962 963 return -EMSGSIZE; 964 } 965 966 static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq, 967 u32 *last_lower, u32 *last_publ) 968 { 969 struct sub_seq *sseq; 970 struct sub_seq *sseq_start; 971 int err; 972 973 if (*last_lower) { 974 sseq_start = nameseq_find_subseq(seq, *last_lower); 975 if (!sseq_start) 976 return -EPIPE; 977 } else { 978 sseq_start = seq->sseqs; 979 } 980 981 for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) { 982 err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ); 983 if (err) { 984 *last_lower = sseq->lower; 985 return err; 986 } 987 } 988 *last_lower = 0; 989 990 return 0; 991 } 992 993 static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg, 994 u32 *last_type, u32 *last_lower, u32 *last_publ) 995 { 996 struct tipc_net *tn = net_generic(net, tipc_net_id); 997 struct hlist_head *seq_head; 998 struct name_seq *seq = NULL; 999 int err; 1000 int i; 1001 1002 if (*last_type) 1003 i = hash(*last_type); 1004 else 1005 i = 0; 1006 1007 for (; i < TIPC_NAMETBL_SIZE; i++) { 1008 seq_head = &tn->nametbl->seq_hlist[i]; 1009 1010 if (*last_type) { 1011 seq = nametbl_find_seq(net, *last_type); 1012 if (!seq) 1013 return -EPIPE; 1014 } else { 1015 hlist_for_each_entry_rcu(seq, seq_head, ns_list) 1016 break; 1017 if (!seq) 1018 continue; 1019 } 1020 1021 hlist_for_each_entry_from_rcu(seq, ns_list) { 1022 spin_lock_bh(&seq->lock); 1023 err = __tipc_nl_subseq_list(msg, seq, last_lower, 1024 last_publ); 1025 1026 if (err) { 1027 *last_type = seq->type; 1028 spin_unlock_bh(&seq->lock); 1029 return err; 1030 } 1031 spin_unlock_bh(&seq->lock); 1032 } 1033 *last_type = 0; 1034 } 1035 return 0; 1036 } 1037 1038 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) 1039 { 1040 int err; 1041 int done = cb->args[3]; 1042 u32 last_type = cb->args[0]; 1043 u32 last_lower = cb->args[1]; 1044 u32 last_publ = cb->args[2]; 1045 struct net *net = sock_net(skb->sk); 1046 struct tipc_nl_msg msg; 1047 1048 if (done) 1049 return 0; 1050 1051 msg.skb = skb; 1052 msg.portid = NETLINK_CB(cb->skb).portid; 1053 msg.seq = cb->nlh->nlmsg_seq; 1054 1055 rcu_read_lock(); 1056 err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ); 1057 if (!err) { 1058 done = 1; 1059 } else if (err != -EMSGSIZE) { 1060 /* We never set seq or call nl_dump_check_consistent() this 1061 * means that setting prev_seq here will cause the consistence 1062 * check to fail in the netlink callback handler. Resulting in 1063 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if 1064 * we got an error. 1065 */ 1066 cb->prev_seq = 1; 1067 } 1068 rcu_read_unlock(); 1069 1070 cb->args[0] = last_type; 1071 cb->args[1] = last_lower; 1072 cb->args[2] = last_publ; 1073 cb->args[3] = done; 1074 1075 return skb->len; 1076 } 1077 1078 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) 1079 { 1080 u64 value = (u64)node << 32 | port; 1081 struct tipc_dest *dst; 1082 1083 list_for_each_entry(dst, l, list) { 1084 if (dst->value != value) 1085 continue; 1086 return dst; 1087 } 1088 return NULL; 1089 } 1090 1091 bool tipc_dest_push(struct list_head *l, u32 node, u32 port) 1092 { 1093 u64 value = (u64)node << 32 | port; 1094 struct tipc_dest *dst; 1095 1096 if (tipc_dest_find(l, node, port)) 1097 return false; 1098 1099 dst = kmalloc(sizeof(*dst), GFP_ATOMIC); 1100 if (unlikely(!dst)) 1101 return false; 1102 dst->value = value; 1103 list_add(&dst->list, l); 1104 return true; 1105 } 1106 1107 bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port) 1108 { 1109 struct tipc_dest *dst; 1110 1111 if (list_empty(l)) 1112 return false; 1113 dst = list_first_entry(l, typeof(*dst), list); 1114 if (port) 1115 *port = dst->port; 1116 if (node) 1117 *node = dst->node; 1118 list_del(&dst->list); 1119 kfree(dst); 1120 return true; 1121 } 1122 1123 bool tipc_dest_del(struct list_head *l, u32 node, u32 port) 1124 { 1125 struct tipc_dest *dst; 1126 1127 dst = tipc_dest_find(l, node, port); 1128 if (!dst) 1129 return false; 1130 list_del(&dst->list); 1131 kfree(dst); 1132 return true; 1133 } 1134 1135 void tipc_dest_list_purge(struct list_head *l) 1136 { 1137 struct tipc_dest *dst, *tmp; 1138 1139 list_for_each_entry_safe(dst, tmp, l, list) { 1140 list_del(&dst->list); 1141 kfree(dst); 1142 } 1143 } 1144 1145 int tipc_dest_list_len(struct list_head *l) 1146 { 1147 struct tipc_dest *dst; 1148 int i = 0; 1149 1150 list_for_each_entry(dst, l, list) { 1151 i++; 1152 } 1153 return i; 1154 } 1155