1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 46 #include <linux/pkt_sched.h> 47 48 /* 49 * Error message prefixes 50 */ 51 static const char *link_co_err = "Link tunneling error, "; 52 static const char *link_rst_msg = "Resetting link "; 53 54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, 56 [TIPC_NLA_LINK_NAME] = { 57 .type = NLA_STRING, 58 .len = TIPC_MAX_LINK_NAME 59 }, 60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, 61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, 62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, 63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, 64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, 65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, 66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, 67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } 68 }; 69 70 /* Properties valid for media, bearar and link */ 71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { 72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, 73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, 74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, 75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } 76 }; 77 78 /* 79 * Interval between NACKs when packets arrive out of order 80 */ 81 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 82 /* 83 * Out-of-range value for link session numbers 84 */ 85 #define WILDCARD_SESSION 0x10000 86 87 /* Link FSM states: 88 */ 89 enum { 90 LINK_ESTABLISHED = 0xe, 91 LINK_ESTABLISHING = 0xe << 4, 92 LINK_RESET = 0x1 << 8, 93 LINK_RESETTING = 0x2 << 12, 94 LINK_PEER_RESET = 0xd << 16, 95 LINK_FAILINGOVER = 0xf << 20, 96 LINK_SYNCHING = 0xc << 24 97 }; 98 99 /* Link FSM state checking routines 100 */ 101 static int link_is_up(struct tipc_link *l) 102 { 103 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 104 } 105 106 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 107 struct sk_buff_head *xmitq); 108 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 109 u16 rcvgap, int tolerance, int priority, 110 struct sk_buff_head *xmitq); 111 static void link_reset_statistics(struct tipc_link *l_ptr); 112 static void link_print(struct tipc_link *l_ptr, const char *str); 113 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 114 115 /* 116 * Simple non-static link routines (i.e. referenced outside this file) 117 */ 118 bool tipc_link_is_up(struct tipc_link *l) 119 { 120 return link_is_up(l); 121 } 122 123 bool tipc_link_is_reset(struct tipc_link *l) 124 { 125 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 126 } 127 128 bool tipc_link_is_synching(struct tipc_link *l) 129 { 130 return l->state == LINK_SYNCHING; 131 } 132 133 bool tipc_link_is_failingover(struct tipc_link *l) 134 { 135 return l->state == LINK_FAILINGOVER; 136 } 137 138 bool tipc_link_is_blocked(struct tipc_link *l) 139 { 140 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 141 } 142 143 int tipc_link_is_active(struct tipc_link *l) 144 { 145 struct tipc_node *n = l->owner; 146 147 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l); 148 } 149 150 static u32 link_own_addr(struct tipc_link *l) 151 { 152 return msg_prevnode(l->pmsg); 153 } 154 155 /** 156 * tipc_link_create - create a new link 157 * @n: pointer to associated node 158 * @b: pointer to associated bearer 159 * @ownnode: identity of own node 160 * @peer: identity of peer node 161 * @maddr: media address to be used 162 * @inputq: queue to put messages ready for delivery 163 * @namedq: queue to put binding table update messages ready for delivery 164 * @link: return value, pointer to put the created link 165 * 166 * Returns true if link was created, otherwise false 167 */ 168 bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session, 169 u32 ownnode, u32 peer, struct tipc_media_addr *maddr, 170 struct sk_buff_head *inputq, struct sk_buff_head *namedq, 171 struct tipc_link **link) 172 { 173 struct tipc_link *l; 174 struct tipc_msg *hdr; 175 char *if_name; 176 177 l = kzalloc(sizeof(*l), GFP_ATOMIC); 178 if (!l) 179 return false; 180 *link = l; 181 182 /* Note: peer i/f name is completed by reset/activate message */ 183 if_name = strchr(b->name, ':') + 1; 184 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 185 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), 186 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 187 188 l->addr = peer; 189 l->media_addr = maddr; 190 l->owner = n; 191 l->peer_session = WILDCARD_SESSION; 192 l->bearer_id = b->identity; 193 l->tolerance = b->tolerance; 194 l->net_plane = b->net_plane; 195 l->advertised_mtu = b->mtu; 196 l->mtu = b->mtu; 197 l->priority = b->priority; 198 tipc_link_set_queue_limits(l, b->window); 199 l->inputq = inputq; 200 l->namedq = namedq; 201 l->state = LINK_RESETTING; 202 l->pmsg = (struct tipc_msg *)&l->proto_msg; 203 hdr = l->pmsg; 204 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer); 205 msg_set_size(hdr, sizeof(l->proto_msg)); 206 msg_set_session(hdr, session); 207 msg_set_bearer_id(hdr, l->bearer_id); 208 strcpy((char *)msg_data(hdr), if_name); 209 __skb_queue_head_init(&l->transmq); 210 __skb_queue_head_init(&l->backlogq); 211 __skb_queue_head_init(&l->deferdq); 212 skb_queue_head_init(&l->wakeupq); 213 skb_queue_head_init(l->inputq); 214 return true; 215 } 216 217 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints. 218 * 219 * Give a newly added peer node the sequence number where it should 220 * start receiving and acking broadcast packets. 221 */ 222 void tipc_link_build_bcast_sync_msg(struct tipc_link *l, 223 struct sk_buff_head *xmitq) 224 { 225 struct sk_buff *skb; 226 struct sk_buff_head list; 227 u16 last_sent; 228 229 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 230 0, l->addr, link_own_addr(l), 0, 0, 0); 231 if (!skb) 232 return; 233 last_sent = tipc_bclink_get_last_sent(l->owner->net); 234 msg_set_last_bcast(buf_msg(skb), last_sent); 235 __skb_queue_head_init(&list); 236 __skb_queue_tail(&list, skb); 237 tipc_link_xmit(l, &list, xmitq); 238 } 239 240 /** 241 * tipc_link_fsm_evt - link finite state machine 242 * @l: pointer to link 243 * @evt: state machine event to be processed 244 */ 245 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 246 { 247 int rc = 0; 248 249 switch (l->state) { 250 case LINK_RESETTING: 251 switch (evt) { 252 case LINK_PEER_RESET_EVT: 253 l->state = LINK_PEER_RESET; 254 break; 255 case LINK_RESET_EVT: 256 l->state = LINK_RESET; 257 break; 258 case LINK_FAILURE_EVT: 259 case LINK_FAILOVER_BEGIN_EVT: 260 case LINK_ESTABLISH_EVT: 261 case LINK_FAILOVER_END_EVT: 262 case LINK_SYNCH_BEGIN_EVT: 263 case LINK_SYNCH_END_EVT: 264 default: 265 goto illegal_evt; 266 } 267 break; 268 case LINK_RESET: 269 switch (evt) { 270 case LINK_PEER_RESET_EVT: 271 l->state = LINK_ESTABLISHING; 272 break; 273 case LINK_FAILOVER_BEGIN_EVT: 274 l->state = LINK_FAILINGOVER; 275 case LINK_FAILURE_EVT: 276 case LINK_RESET_EVT: 277 case LINK_ESTABLISH_EVT: 278 case LINK_FAILOVER_END_EVT: 279 break; 280 case LINK_SYNCH_BEGIN_EVT: 281 case LINK_SYNCH_END_EVT: 282 default: 283 goto illegal_evt; 284 } 285 break; 286 case LINK_PEER_RESET: 287 switch (evt) { 288 case LINK_RESET_EVT: 289 l->state = LINK_ESTABLISHING; 290 break; 291 case LINK_PEER_RESET_EVT: 292 case LINK_ESTABLISH_EVT: 293 case LINK_FAILURE_EVT: 294 break; 295 case LINK_SYNCH_BEGIN_EVT: 296 case LINK_SYNCH_END_EVT: 297 case LINK_FAILOVER_BEGIN_EVT: 298 case LINK_FAILOVER_END_EVT: 299 default: 300 goto illegal_evt; 301 } 302 break; 303 case LINK_FAILINGOVER: 304 switch (evt) { 305 case LINK_FAILOVER_END_EVT: 306 l->state = LINK_RESET; 307 break; 308 case LINK_PEER_RESET_EVT: 309 case LINK_RESET_EVT: 310 case LINK_ESTABLISH_EVT: 311 case LINK_FAILURE_EVT: 312 break; 313 case LINK_FAILOVER_BEGIN_EVT: 314 case LINK_SYNCH_BEGIN_EVT: 315 case LINK_SYNCH_END_EVT: 316 default: 317 goto illegal_evt; 318 } 319 break; 320 case LINK_ESTABLISHING: 321 switch (evt) { 322 case LINK_ESTABLISH_EVT: 323 l->state = LINK_ESTABLISHED; 324 rc |= TIPC_LINK_UP_EVT; 325 break; 326 case LINK_FAILOVER_BEGIN_EVT: 327 l->state = LINK_FAILINGOVER; 328 break; 329 case LINK_PEER_RESET_EVT: 330 case LINK_RESET_EVT: 331 case LINK_FAILURE_EVT: 332 case LINK_SYNCH_BEGIN_EVT: 333 case LINK_FAILOVER_END_EVT: 334 break; 335 case LINK_SYNCH_END_EVT: 336 default: 337 goto illegal_evt; 338 } 339 break; 340 case LINK_ESTABLISHED: 341 switch (evt) { 342 case LINK_PEER_RESET_EVT: 343 l->state = LINK_PEER_RESET; 344 rc |= TIPC_LINK_DOWN_EVT; 345 break; 346 case LINK_FAILURE_EVT: 347 l->state = LINK_RESETTING; 348 rc |= TIPC_LINK_DOWN_EVT; 349 break; 350 case LINK_RESET_EVT: 351 l->state = LINK_RESET; 352 break; 353 case LINK_ESTABLISH_EVT: 354 case LINK_SYNCH_END_EVT: 355 break; 356 case LINK_SYNCH_BEGIN_EVT: 357 l->state = LINK_SYNCHING; 358 break; 359 case LINK_FAILOVER_BEGIN_EVT: 360 case LINK_FAILOVER_END_EVT: 361 default: 362 goto illegal_evt; 363 } 364 break; 365 case LINK_SYNCHING: 366 switch (evt) { 367 case LINK_PEER_RESET_EVT: 368 l->state = LINK_PEER_RESET; 369 rc |= TIPC_LINK_DOWN_EVT; 370 break; 371 case LINK_FAILURE_EVT: 372 l->state = LINK_RESETTING; 373 rc |= TIPC_LINK_DOWN_EVT; 374 break; 375 case LINK_RESET_EVT: 376 l->state = LINK_RESET; 377 break; 378 case LINK_ESTABLISH_EVT: 379 case LINK_SYNCH_BEGIN_EVT: 380 break; 381 case LINK_SYNCH_END_EVT: 382 l->state = LINK_ESTABLISHED; 383 break; 384 case LINK_FAILOVER_BEGIN_EVT: 385 case LINK_FAILOVER_END_EVT: 386 default: 387 goto illegal_evt; 388 } 389 break; 390 default: 391 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 392 } 393 return rc; 394 illegal_evt: 395 pr_err("Illegal FSM event %x in state %x on link %s\n", 396 evt, l->state, l->name); 397 return rc; 398 } 399 400 /* link_profile_stats - update statistical profiling of traffic 401 */ 402 static void link_profile_stats(struct tipc_link *l) 403 { 404 struct sk_buff *skb; 405 struct tipc_msg *msg; 406 int length; 407 408 /* Update counters used in statistical profiling of send traffic */ 409 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 410 l->stats.queue_sz_counts++; 411 412 skb = skb_peek(&l->transmq); 413 if (!skb) 414 return; 415 msg = buf_msg(skb); 416 length = msg_size(msg); 417 418 if (msg_user(msg) == MSG_FRAGMENTER) { 419 if (msg_type(msg) != FIRST_FRAGMENT) 420 return; 421 length = msg_size(msg_get_wrapped(msg)); 422 } 423 l->stats.msg_lengths_total += length; 424 l->stats.msg_length_counts++; 425 if (length <= 64) 426 l->stats.msg_length_profile[0]++; 427 else if (length <= 256) 428 l->stats.msg_length_profile[1]++; 429 else if (length <= 1024) 430 l->stats.msg_length_profile[2]++; 431 else if (length <= 4096) 432 l->stats.msg_length_profile[3]++; 433 else if (length <= 16384) 434 l->stats.msg_length_profile[4]++; 435 else if (length <= 32768) 436 l->stats.msg_length_profile[5]++; 437 else 438 l->stats.msg_length_profile[6]++; 439 } 440 441 /* tipc_link_timeout - perform periodic task as instructed from node timeout 442 */ 443 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 444 { 445 int rc = 0; 446 int mtyp = STATE_MSG; 447 bool xmit = false; 448 bool prb = false; 449 450 link_profile_stats(l); 451 452 switch (l->state) { 453 case LINK_ESTABLISHED: 454 case LINK_SYNCHING: 455 if (!l->silent_intv_cnt) { 456 if (tipc_bclink_acks_missing(l->owner)) 457 xmit = true; 458 } else if (l->silent_intv_cnt <= l->abort_limit) { 459 xmit = true; 460 prb = true; 461 } else { 462 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 463 } 464 l->silent_intv_cnt++; 465 break; 466 case LINK_RESET: 467 xmit = true; 468 mtyp = RESET_MSG; 469 break; 470 case LINK_ESTABLISHING: 471 xmit = true; 472 mtyp = ACTIVATE_MSG; 473 break; 474 case LINK_PEER_RESET: 475 case LINK_RESETTING: 476 case LINK_FAILINGOVER: 477 break; 478 default: 479 break; 480 } 481 482 if (xmit) 483 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq); 484 485 return rc; 486 } 487 488 /** 489 * link_schedule_user - schedule a message sender for wakeup after congestion 490 * @link: congested link 491 * @list: message that was attempted sent 492 * Create pseudo msg to send back to user when congestion abates 493 * Does not consume buffer list 494 */ 495 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) 496 { 497 struct tipc_msg *msg = buf_msg(skb_peek(list)); 498 int imp = msg_importance(msg); 499 u32 oport = msg_origport(msg); 500 u32 addr = link_own_addr(link); 501 struct sk_buff *skb; 502 503 /* This really cannot happen... */ 504 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { 505 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 506 return -ENOBUFS; 507 } 508 /* Non-blocking sender: */ 509 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) 510 return -ELINKCONG; 511 512 /* Create and schedule wakeup pseudo message */ 513 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 514 addr, addr, oport, 0, 0); 515 if (!skb) 516 return -ENOBUFS; 517 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); 518 TIPC_SKB_CB(skb)->chain_imp = imp; 519 skb_queue_tail(&link->wakeupq, skb); 520 link->stats.link_congs++; 521 return -ELINKCONG; 522 } 523 524 /** 525 * link_prepare_wakeup - prepare users for wakeup after congestion 526 * @link: congested link 527 * Move a number of waiting users, as permitted by available space in 528 * the send queue, from link wait queue to node wait queue for wakeup 529 */ 530 void link_prepare_wakeup(struct tipc_link *l) 531 { 532 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; 533 int imp, lim; 534 struct sk_buff *skb, *tmp; 535 536 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 537 imp = TIPC_SKB_CB(skb)->chain_imp; 538 lim = l->window + l->backlog[imp].limit; 539 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 540 if ((pnd[imp] + l->backlog[imp].len) >= lim) 541 break; 542 skb_unlink(skb, &l->wakeupq); 543 skb_queue_tail(l->inputq, skb); 544 } 545 } 546 547 /** 548 * tipc_link_reset_fragments - purge link's inbound message fragments queue 549 * @l_ptr: pointer to link 550 */ 551 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 552 { 553 kfree_skb(l_ptr->reasm_buf); 554 l_ptr->reasm_buf = NULL; 555 } 556 557 void tipc_link_purge_backlog(struct tipc_link *l) 558 { 559 __skb_queue_purge(&l->backlogq); 560 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 561 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 562 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 563 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 564 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 565 } 566 567 /** 568 * tipc_link_purge_queues - purge all pkt queues associated with link 569 * @l_ptr: pointer to link 570 */ 571 void tipc_link_purge_queues(struct tipc_link *l_ptr) 572 { 573 __skb_queue_purge(&l_ptr->deferdq); 574 __skb_queue_purge(&l_ptr->transmq); 575 tipc_link_purge_backlog(l_ptr); 576 tipc_link_reset_fragments(l_ptr); 577 } 578 579 void tipc_link_reset(struct tipc_link *l) 580 { 581 tipc_link_fsm_evt(l, LINK_RESET_EVT); 582 583 /* Link is down, accept any session */ 584 l->peer_session = WILDCARD_SESSION; 585 586 /* If peer is up, it only accepts an incremented session number */ 587 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1); 588 589 /* Prepare for renewed mtu size negotiation */ 590 l->mtu = l->advertised_mtu; 591 592 /* Clean up all queues: */ 593 __skb_queue_purge(&l->transmq); 594 __skb_queue_purge(&l->deferdq); 595 skb_queue_splice_init(&l->wakeupq, l->inputq); 596 597 tipc_link_purge_backlog(l); 598 kfree_skb(l->reasm_buf); 599 kfree_skb(l->failover_reasm_skb); 600 l->reasm_buf = NULL; 601 l->failover_reasm_skb = NULL; 602 l->rcv_unacked = 0; 603 l->snd_nxt = 1; 604 l->rcv_nxt = 1; 605 l->silent_intv_cnt = 0; 606 l->stats.recv_info = 0; 607 l->stale_count = 0; 608 link_reset_statistics(l); 609 } 610 611 /** 612 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked 613 * @link: link to use 614 * @list: chain of buffers containing message 615 * 616 * Consumes the buffer chain, except when returning an error code, 617 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 618 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 619 */ 620 int __tipc_link_xmit(struct net *net, struct tipc_link *link, 621 struct sk_buff_head *list) 622 { 623 struct tipc_msg *msg = buf_msg(skb_peek(list)); 624 unsigned int maxwin = link->window; 625 unsigned int i, imp = msg_importance(msg); 626 uint mtu = link->mtu; 627 u16 ack = mod(link->rcv_nxt - 1); 628 u16 seqno = link->snd_nxt; 629 u16 bc_last_in = link->owner->bclink.last_in; 630 struct tipc_media_addr *addr = link->media_addr; 631 struct sk_buff_head *transmq = &link->transmq; 632 struct sk_buff_head *backlogq = &link->backlogq; 633 struct sk_buff *skb, *bskb; 634 635 /* Match msg importance against this and all higher backlog limits: */ 636 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 637 if (unlikely(link->backlog[i].len >= link->backlog[i].limit)) 638 return link_schedule_user(link, list); 639 } 640 if (unlikely(msg_size(msg) > mtu)) 641 return -EMSGSIZE; 642 643 /* Prepare each packet for sending, and add to relevant queue: */ 644 while (skb_queue_len(list)) { 645 skb = skb_peek(list); 646 msg = buf_msg(skb); 647 msg_set_seqno(msg, seqno); 648 msg_set_ack(msg, ack); 649 msg_set_bcast_ack(msg, bc_last_in); 650 651 if (likely(skb_queue_len(transmq) < maxwin)) { 652 __skb_dequeue(list); 653 __skb_queue_tail(transmq, skb); 654 tipc_bearer_send(net, link->bearer_id, skb, addr); 655 link->rcv_unacked = 0; 656 seqno++; 657 continue; 658 } 659 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) { 660 kfree_skb(__skb_dequeue(list)); 661 link->stats.sent_bundled++; 662 continue; 663 } 664 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) { 665 kfree_skb(__skb_dequeue(list)); 666 __skb_queue_tail(backlogq, bskb); 667 link->backlog[msg_importance(buf_msg(bskb))].len++; 668 link->stats.sent_bundled++; 669 link->stats.sent_bundles++; 670 continue; 671 } 672 link->backlog[imp].len += skb_queue_len(list); 673 skb_queue_splice_tail_init(list, backlogq); 674 } 675 link->snd_nxt = seqno; 676 return 0; 677 } 678 679 /** 680 * tipc_link_xmit(): enqueue buffer list according to queue situation 681 * @link: link to use 682 * @list: chain of buffers containing message 683 * @xmitq: returned list of packets to be sent by caller 684 * 685 * Consumes the buffer chain, except when returning -ELINKCONG, 686 * since the caller then may want to make more send attempts. 687 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 688 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 689 */ 690 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 691 struct sk_buff_head *xmitq) 692 { 693 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 694 unsigned int maxwin = l->window; 695 unsigned int i, imp = msg_importance(hdr); 696 unsigned int mtu = l->mtu; 697 u16 ack = l->rcv_nxt - 1; 698 u16 seqno = l->snd_nxt; 699 u16 bc_last_in = l->owner->bclink.last_in; 700 struct sk_buff_head *transmq = &l->transmq; 701 struct sk_buff_head *backlogq = &l->backlogq; 702 struct sk_buff *skb, *_skb, *bskb; 703 704 /* Match msg importance against this and all higher backlog limits: */ 705 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 706 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 707 return link_schedule_user(l, list); 708 } 709 if (unlikely(msg_size(hdr) > mtu)) 710 return -EMSGSIZE; 711 712 /* Prepare each packet for sending, and add to relevant queue: */ 713 while (skb_queue_len(list)) { 714 skb = skb_peek(list); 715 hdr = buf_msg(skb); 716 msg_set_seqno(hdr, seqno); 717 msg_set_ack(hdr, ack); 718 msg_set_bcast_ack(hdr, bc_last_in); 719 720 if (likely(skb_queue_len(transmq) < maxwin)) { 721 _skb = skb_clone(skb, GFP_ATOMIC); 722 if (!_skb) 723 return -ENOBUFS; 724 __skb_dequeue(list); 725 __skb_queue_tail(transmq, skb); 726 __skb_queue_tail(xmitq, _skb); 727 l->rcv_unacked = 0; 728 seqno++; 729 continue; 730 } 731 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 732 kfree_skb(__skb_dequeue(list)); 733 l->stats.sent_bundled++; 734 continue; 735 } 736 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 737 kfree_skb(__skb_dequeue(list)); 738 __skb_queue_tail(backlogq, bskb); 739 l->backlog[msg_importance(buf_msg(bskb))].len++; 740 l->stats.sent_bundled++; 741 l->stats.sent_bundles++; 742 continue; 743 } 744 l->backlog[imp].len += skb_queue_len(list); 745 skb_queue_splice_tail_init(list, backlogq); 746 } 747 l->snd_nxt = seqno; 748 return 0; 749 } 750 751 /* 752 * tipc_link_sync_rcv - synchronize broadcast link endpoints. 753 * Receive the sequence number where we should start receiving and 754 * acking broadcast packets from a newly added peer node, and open 755 * up for reception of such packets. 756 * 757 * Called with node locked 758 */ 759 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) 760 { 761 struct tipc_msg *msg = buf_msg(buf); 762 763 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 764 n->bclink.recv_permitted = true; 765 kfree_skb(buf); 766 } 767 768 /* 769 * tipc_link_push_packets - push unsent packets to bearer 770 * 771 * Push out the unsent messages of a link where congestion 772 * has abated. Node is locked. 773 * 774 * Called with node locked 775 */ 776 void tipc_link_push_packets(struct tipc_link *link) 777 { 778 struct sk_buff *skb; 779 struct tipc_msg *msg; 780 u16 seqno = link->snd_nxt; 781 u16 ack = mod(link->rcv_nxt - 1); 782 783 while (skb_queue_len(&link->transmq) < link->window) { 784 skb = __skb_dequeue(&link->backlogq); 785 if (!skb) 786 break; 787 msg = buf_msg(skb); 788 link->backlog[msg_importance(msg)].len--; 789 msg_set_ack(msg, ack); 790 msg_set_seqno(msg, seqno); 791 seqno = mod(seqno + 1); 792 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 793 link->rcv_unacked = 0; 794 __skb_queue_tail(&link->transmq, skb); 795 tipc_bearer_send(link->owner->net, link->bearer_id, 796 skb, link->media_addr); 797 } 798 link->snd_nxt = seqno; 799 } 800 801 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) 802 { 803 struct sk_buff *skb, *_skb; 804 struct tipc_msg *hdr; 805 u16 seqno = l->snd_nxt; 806 u16 ack = l->rcv_nxt - 1; 807 808 while (skb_queue_len(&l->transmq) < l->window) { 809 skb = skb_peek(&l->backlogq); 810 if (!skb) 811 break; 812 _skb = skb_clone(skb, GFP_ATOMIC); 813 if (!_skb) 814 break; 815 __skb_dequeue(&l->backlogq); 816 hdr = buf_msg(skb); 817 l->backlog[msg_importance(hdr)].len--; 818 __skb_queue_tail(&l->transmq, skb); 819 __skb_queue_tail(xmitq, _skb); 820 msg_set_ack(hdr, ack); 821 msg_set_seqno(hdr, seqno); 822 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 823 l->rcv_unacked = 0; 824 seqno++; 825 } 826 l->snd_nxt = seqno; 827 } 828 829 static void link_retransmit_failure(struct tipc_link *l_ptr, 830 struct sk_buff *buf) 831 { 832 struct tipc_msg *msg = buf_msg(buf); 833 struct net *net = l_ptr->owner->net; 834 835 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 836 837 if (l_ptr->addr) { 838 /* Handle failure on standard link */ 839 link_print(l_ptr, "Resetting link "); 840 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 841 msg_user(msg), msg_type(msg), msg_size(msg), 842 msg_errcode(msg)); 843 pr_info("sqno %u, prev: %x, src: %x\n", 844 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg)); 845 } else { 846 /* Handle failure on broadcast link */ 847 struct tipc_node *n_ptr; 848 char addr_string[16]; 849 850 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 851 pr_cont("Outstanding acks: %lu\n", 852 (unsigned long) TIPC_SKB_CB(buf)->handle); 853 854 n_ptr = tipc_bclink_retransmit_to(net); 855 856 tipc_addr_string_fill(addr_string, n_ptr->addr); 857 pr_info("Broadcast link info for %s\n", addr_string); 858 pr_info("Reception permitted: %d, Acked: %u\n", 859 n_ptr->bclink.recv_permitted, 860 n_ptr->bclink.acked); 861 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 862 n_ptr->bclink.last_in, 863 n_ptr->bclink.oos_state, 864 n_ptr->bclink.last_sent); 865 866 n_ptr->action_flags |= TIPC_BCAST_RESET; 867 l_ptr->stale_count = 0; 868 } 869 } 870 871 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, 872 u32 retransmits) 873 { 874 struct tipc_msg *msg; 875 876 if (!skb) 877 return; 878 879 msg = buf_msg(skb); 880 881 /* Detect repeated retransmit failures */ 882 if (l_ptr->last_retransm == msg_seqno(msg)) { 883 if (++l_ptr->stale_count > 100) { 884 link_retransmit_failure(l_ptr, skb); 885 return; 886 } 887 } else { 888 l_ptr->last_retransm = msg_seqno(msg); 889 l_ptr->stale_count = 1; 890 } 891 892 skb_queue_walk_from(&l_ptr->transmq, skb) { 893 if (!retransmits) 894 break; 895 msg = buf_msg(skb); 896 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1)); 897 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 898 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb, 899 l_ptr->media_addr); 900 retransmits--; 901 l_ptr->stats.retransmitted++; 902 } 903 } 904 905 static int tipc_link_retransm(struct tipc_link *l, int retransm, 906 struct sk_buff_head *xmitq) 907 { 908 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 909 struct tipc_msg *hdr; 910 911 if (!skb) 912 return 0; 913 914 /* Detect repeated retransmit failures on same packet */ 915 if (likely(l->last_retransm != buf_seqno(skb))) { 916 l->last_retransm = buf_seqno(skb); 917 l->stale_count = 1; 918 } else if (++l->stale_count > 100) { 919 link_retransmit_failure(l, skb); 920 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 921 } 922 skb_queue_walk(&l->transmq, skb) { 923 if (!retransm) 924 return 0; 925 hdr = buf_msg(skb); 926 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 927 if (!_skb) 928 return 0; 929 hdr = buf_msg(_skb); 930 msg_set_ack(hdr, l->rcv_nxt - 1); 931 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 932 _skb->priority = TC_PRIO_CONTROL; 933 __skb_queue_tail(xmitq, _skb); 934 retransm--; 935 l->stats.retransmitted++; 936 } 937 return 0; 938 } 939 940 /* tipc_data_input - deliver data and name distr msgs to upper layer 941 * 942 * Consumes buffer if message is of right type 943 * Node lock must be held 944 */ 945 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb, 946 struct sk_buff_head *inputq) 947 { 948 struct tipc_node *node = link->owner; 949 950 switch (msg_user(buf_msg(skb))) { 951 case TIPC_LOW_IMPORTANCE: 952 case TIPC_MEDIUM_IMPORTANCE: 953 case TIPC_HIGH_IMPORTANCE: 954 case TIPC_CRITICAL_IMPORTANCE: 955 case CONN_MANAGER: 956 __skb_queue_tail(inputq, skb); 957 return true; 958 case NAME_DISTRIBUTOR: 959 node->bclink.recv_permitted = true; 960 skb_queue_tail(link->namedq, skb); 961 return true; 962 case MSG_BUNDLER: 963 case TUNNEL_PROTOCOL: 964 case MSG_FRAGMENTER: 965 case BCAST_PROTOCOL: 966 return false; 967 default: 968 pr_warn("Dropping received illegal msg type\n"); 969 kfree_skb(skb); 970 return false; 971 }; 972 } 973 974 /* tipc_link_input - process packet that has passed link protocol check 975 * 976 * Consumes buffer 977 */ 978 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 979 struct sk_buff_head *inputq) 980 { 981 struct tipc_node *node = l->owner; 982 struct tipc_msg *hdr = buf_msg(skb); 983 struct sk_buff **reasm_skb = &l->reasm_buf; 984 struct sk_buff *iskb; 985 int usr = msg_user(hdr); 986 int rc = 0; 987 int pos = 0; 988 int ipos = 0; 989 990 if (unlikely(usr == TUNNEL_PROTOCOL)) { 991 if (msg_type(hdr) == SYNCH_MSG) { 992 __skb_queue_purge(&l->deferdq); 993 goto drop; 994 } 995 if (!tipc_msg_extract(skb, &iskb, &ipos)) 996 return rc; 997 kfree_skb(skb); 998 skb = iskb; 999 hdr = buf_msg(skb); 1000 if (less(msg_seqno(hdr), l->drop_point)) 1001 goto drop; 1002 if (tipc_data_input(l, skb, inputq)) 1003 return rc; 1004 usr = msg_user(hdr); 1005 reasm_skb = &l->failover_reasm_skb; 1006 } 1007 1008 if (usr == MSG_BUNDLER) { 1009 l->stats.recv_bundles++; 1010 l->stats.recv_bundled += msg_msgcnt(hdr); 1011 while (tipc_msg_extract(skb, &iskb, &pos)) 1012 tipc_data_input(l, iskb, inputq); 1013 return 0; 1014 } else if (usr == MSG_FRAGMENTER) { 1015 l->stats.recv_fragments++; 1016 if (tipc_buf_append(reasm_skb, &skb)) { 1017 l->stats.recv_fragmented++; 1018 tipc_data_input(l, skb, inputq); 1019 } else if (!*reasm_skb) { 1020 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1021 } 1022 return 0; 1023 } else if (usr == BCAST_PROTOCOL) { 1024 tipc_link_sync_rcv(node, skb); 1025 return 0; 1026 } 1027 drop: 1028 kfree_skb(skb); 1029 return 0; 1030 } 1031 1032 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1033 { 1034 bool released = false; 1035 struct sk_buff *skb, *tmp; 1036 1037 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1038 if (more(buf_seqno(skb), acked)) 1039 break; 1040 __skb_unlink(skb, &l->transmq); 1041 kfree_skb(skb); 1042 released = true; 1043 } 1044 return released; 1045 } 1046 1047 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1048 * @link: the link that should handle the message 1049 * @skb: TIPC packet 1050 * @xmitq: queue to place packets to be sent after this call 1051 */ 1052 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1053 struct sk_buff_head *xmitq) 1054 { 1055 struct sk_buff_head *arrvq = &l->deferdq; 1056 struct sk_buff_head tmpq; 1057 struct tipc_msg *hdr; 1058 u16 seqno, rcv_nxt; 1059 int rc = 0; 1060 1061 __skb_queue_head_init(&tmpq); 1062 1063 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) { 1064 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV)) 1065 tipc_link_build_proto_msg(l, STATE_MSG, 0, 1066 0, 0, 0, xmitq); 1067 return rc; 1068 } 1069 1070 while ((skb = skb_peek(arrvq))) { 1071 hdr = buf_msg(skb); 1072 1073 /* Verify and update link state */ 1074 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) { 1075 __skb_dequeue(arrvq); 1076 rc = tipc_link_proto_rcv(l, skb, xmitq); 1077 continue; 1078 } 1079 1080 if (unlikely(!link_is_up(l))) { 1081 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1082 if (!link_is_up(l)) { 1083 kfree_skb(__skb_dequeue(arrvq)); 1084 goto exit; 1085 } 1086 } 1087 1088 l->silent_intv_cnt = 0; 1089 1090 /* Forward queues and wake up waiting users */ 1091 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1092 tipc_link_advance_backlog(l, xmitq); 1093 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1094 link_prepare_wakeup(l); 1095 } 1096 1097 /* Defer reception if there is a gap in the sequence */ 1098 seqno = msg_seqno(hdr); 1099 rcv_nxt = l->rcv_nxt; 1100 if (unlikely(less(rcv_nxt, seqno))) { 1101 l->stats.deferred_recv++; 1102 goto exit; 1103 } 1104 1105 __skb_dequeue(arrvq); 1106 1107 /* Drop if packet already received */ 1108 if (unlikely(more(rcv_nxt, seqno))) { 1109 l->stats.duplicates++; 1110 kfree_skb(skb); 1111 goto exit; 1112 } 1113 1114 /* Packet can be delivered */ 1115 l->rcv_nxt++; 1116 l->stats.recv_info++; 1117 if (unlikely(!tipc_data_input(l, skb, &tmpq))) 1118 rc = tipc_link_input(l, skb, &tmpq); 1119 1120 /* Ack at regular intervals */ 1121 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) { 1122 l->rcv_unacked = 0; 1123 l->stats.sent_acks++; 1124 tipc_link_build_proto_msg(l, STATE_MSG, 1125 0, 0, 0, 0, xmitq); 1126 } 1127 } 1128 exit: 1129 tipc_skb_queue_splice_tail(&tmpq, l->inputq); 1130 return rc; 1131 } 1132 1133 /** 1134 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1135 * 1136 * Returns increase in queue length (i.e. 0 or 1) 1137 */ 1138 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) 1139 { 1140 struct sk_buff *skb1; 1141 u16 seq_no = buf_seqno(skb); 1142 1143 /* Empty queue ? */ 1144 if (skb_queue_empty(list)) { 1145 __skb_queue_tail(list, skb); 1146 return 1; 1147 } 1148 1149 /* Last ? */ 1150 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) { 1151 __skb_queue_tail(list, skb); 1152 return 1; 1153 } 1154 1155 /* Locate insertion point in queue, then insert; discard if duplicate */ 1156 skb_queue_walk(list, skb1) { 1157 u16 curr_seqno = buf_seqno(skb1); 1158 1159 if (seq_no == curr_seqno) { 1160 kfree_skb(skb); 1161 return 0; 1162 } 1163 1164 if (less(seq_no, curr_seqno)) 1165 break; 1166 } 1167 1168 __skb_queue_before(list, skb1, skb); 1169 return 1; 1170 } 1171 1172 /* 1173 * Send protocol message to the other endpoint. 1174 */ 1175 void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, 1176 u32 gap, u32 tolerance, u32 priority) 1177 { 1178 struct sk_buff *skb = NULL; 1179 struct sk_buff_head xmitq; 1180 1181 __skb_queue_head_init(&xmitq); 1182 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, 1183 tolerance, priority, &xmitq); 1184 skb = __skb_dequeue(&xmitq); 1185 if (!skb) 1186 return; 1187 tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr); 1188 l->rcv_unacked = 0; 1189 kfree_skb(skb); 1190 } 1191 1192 /* tipc_link_build_proto_msg: prepare link protocol message for transmission 1193 */ 1194 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1195 u16 rcvgap, int tolerance, int priority, 1196 struct sk_buff_head *xmitq) 1197 { 1198 struct sk_buff *skb = NULL; 1199 struct tipc_msg *hdr = l->pmsg; 1200 u16 snd_nxt = l->snd_nxt; 1201 u16 rcv_nxt = l->rcv_nxt; 1202 u16 rcv_last = rcv_nxt - 1; 1203 int node_up = l->owner->bclink.recv_permitted; 1204 1205 /* Don't send protocol message during reset or link failover */ 1206 if (tipc_link_is_blocked(l)) 1207 return; 1208 1209 msg_set_type(hdr, mtyp); 1210 msg_set_net_plane(hdr, l->net_plane); 1211 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 1212 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net)); 1213 msg_set_link_tolerance(hdr, tolerance); 1214 msg_set_linkprio(hdr, priority); 1215 msg_set_redundant_link(hdr, node_up); 1216 msg_set_seq_gap(hdr, 0); 1217 1218 /* Compatibility: created msg must not be in sequence with pkt flow */ 1219 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2); 1220 1221 if (mtyp == STATE_MSG) { 1222 if (!tipc_link_is_up(l)) 1223 return; 1224 msg_set_next_sent(hdr, snd_nxt); 1225 1226 /* Override rcvgap if there are packets in deferred queue */ 1227 if (!skb_queue_empty(&l->deferdq)) 1228 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt; 1229 if (rcvgap) { 1230 msg_set_seq_gap(hdr, rcvgap); 1231 l->stats.sent_nacks++; 1232 } 1233 msg_set_ack(hdr, rcv_last); 1234 msg_set_probe(hdr, probe); 1235 if (probe) 1236 l->stats.sent_probes++; 1237 l->stats.sent_states++; 1238 } else { 1239 /* RESET_MSG or ACTIVATE_MSG */ 1240 msg_set_max_pkt(hdr, l->advertised_mtu); 1241 msg_set_ack(hdr, l->rcv_nxt - 1); 1242 msg_set_next_sent(hdr, 1); 1243 } 1244 skb = tipc_buf_acquire(msg_size(hdr)); 1245 if (!skb) 1246 return; 1247 skb_copy_to_linear_data(skb, hdr, msg_size(hdr)); 1248 skb->priority = TC_PRIO_CONTROL; 1249 __skb_queue_tail(xmitq, skb); 1250 } 1251 1252 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1253 * with contents of the link's tranmsit and backlog queues. 1254 */ 1255 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1256 int mtyp, struct sk_buff_head *xmitq) 1257 { 1258 struct sk_buff *skb, *tnlskb; 1259 struct tipc_msg *hdr, tnlhdr; 1260 struct sk_buff_head *queue = &l->transmq; 1261 struct sk_buff_head tmpxq, tnlq; 1262 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1263 1264 if (!tnl) 1265 return; 1266 1267 skb_queue_head_init(&tnlq); 1268 skb_queue_head_init(&tmpxq); 1269 1270 /* At least one packet required for safe algorithm => add dummy */ 1271 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1272 BASIC_H_SIZE, 0, l->addr, link_own_addr(l), 1273 0, 0, TIPC_ERR_NO_PORT); 1274 if (!skb) { 1275 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1276 return; 1277 } 1278 skb_queue_tail(&tnlq, skb); 1279 tipc_link_xmit(l, &tnlq, &tmpxq); 1280 __skb_queue_purge(&tmpxq); 1281 1282 /* Initialize reusable tunnel packet header */ 1283 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL, 1284 mtyp, INT_H_SIZE, l->addr); 1285 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1286 msg_set_msgcnt(&tnlhdr, pktcnt); 1287 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1288 tnl: 1289 /* Wrap each packet into a tunnel packet */ 1290 skb_queue_walk(queue, skb) { 1291 hdr = buf_msg(skb); 1292 if (queue == &l->backlogq) 1293 msg_set_seqno(hdr, seqno++); 1294 pktlen = msg_size(hdr); 1295 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1296 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1297 if (!tnlskb) { 1298 pr_warn("%sunable to send packet\n", link_co_err); 1299 return; 1300 } 1301 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1302 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1303 __skb_queue_tail(&tnlq, tnlskb); 1304 } 1305 if (queue != &l->backlogq) { 1306 queue = &l->backlogq; 1307 goto tnl; 1308 } 1309 1310 tipc_link_xmit(tnl, &tnlq, xmitq); 1311 1312 if (mtyp == FAILOVER_MSG) { 1313 tnl->drop_point = l->rcv_nxt; 1314 tnl->failover_reasm_skb = l->reasm_buf; 1315 l->reasm_buf = NULL; 1316 } 1317 } 1318 1319 /* tipc_link_proto_rcv(): receive link level protocol message : 1320 * Note that network plane id propagates through the network, and may 1321 * change at any time. The node with lowest numerical id determines 1322 * network plane 1323 */ 1324 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1325 struct sk_buff_head *xmitq) 1326 { 1327 struct tipc_msg *hdr = buf_msg(skb); 1328 u16 rcvgap = 0; 1329 u16 nacked_gap = msg_seq_gap(hdr); 1330 u16 peers_snd_nxt = msg_next_sent(hdr); 1331 u16 peers_tol = msg_link_tolerance(hdr); 1332 u16 peers_prio = msg_linkprio(hdr); 1333 u16 rcv_nxt = l->rcv_nxt; 1334 char *if_name; 1335 int rc = 0; 1336 1337 if (tipc_link_is_blocked(l)) 1338 goto exit; 1339 1340 if (link_own_addr(l) > msg_prevnode(hdr)) 1341 l->net_plane = msg_net_plane(hdr); 1342 1343 switch (msg_type(hdr)) { 1344 case RESET_MSG: 1345 1346 /* Ignore duplicate RESET with old session number */ 1347 if ((less_eq(msg_session(hdr), l->peer_session)) && 1348 (l->peer_session != WILDCARD_SESSION)) 1349 break; 1350 /* fall thru' */ 1351 1352 case ACTIVATE_MSG: 1353 1354 /* Complete own link name with peer's interface name */ 1355 if_name = strrchr(l->name, ':') + 1; 1356 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1357 break; 1358 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1359 break; 1360 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); 1361 1362 /* Update own tolerance if peer indicates a non-zero value */ 1363 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1364 l->tolerance = peers_tol; 1365 1366 /* Update own priority if peer's priority is higher */ 1367 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1368 l->priority = peers_prio; 1369 1370 if (msg_type(hdr) == RESET_MSG) { 1371 rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1372 } else if (!link_is_up(l)) { 1373 tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1374 rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1375 } 1376 l->peer_session = msg_session(hdr); 1377 l->peer_bearer_id = msg_bearer_id(hdr); 1378 if (l->mtu > msg_max_pkt(hdr)) 1379 l->mtu = msg_max_pkt(hdr); 1380 break; 1381 1382 case STATE_MSG: 1383 1384 /* Update own tolerance if peer indicates a non-zero value */ 1385 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1386 l->tolerance = peers_tol; 1387 1388 l->silent_intv_cnt = 0; 1389 l->stats.recv_states++; 1390 if (msg_probe(hdr)) 1391 l->stats.recv_probes++; 1392 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1393 if (!link_is_up(l)) 1394 break; 1395 1396 /* Send NACK if peer has sent pkts we haven't received yet */ 1397 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1398 rcvgap = peers_snd_nxt - l->rcv_nxt; 1399 if (rcvgap || (msg_probe(hdr))) 1400 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, 1401 0, 0, xmitq); 1402 tipc_link_release_pkts(l, msg_ack(hdr)); 1403 1404 /* If NACK, retransmit will now start at right position */ 1405 if (nacked_gap) { 1406 rc = tipc_link_retransm(l, nacked_gap, xmitq); 1407 l->stats.recv_nacks++; 1408 } 1409 1410 tipc_link_advance_backlog(l, xmitq); 1411 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1412 link_prepare_wakeup(l); 1413 } 1414 exit: 1415 kfree_skb(skb); 1416 return rc; 1417 } 1418 1419 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1420 { 1421 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1422 1423 l->window = win; 1424 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1425 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; 1426 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; 1427 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; 1428 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1429 } 1430 1431 /* tipc_link_find_owner - locate owner node of link by link's name 1432 * @net: the applicable net namespace 1433 * @name: pointer to link name string 1434 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1435 * 1436 * Returns pointer to node owning the link, or 0 if no matching link is found. 1437 */ 1438 static struct tipc_node *tipc_link_find_owner(struct net *net, 1439 const char *link_name, 1440 unsigned int *bearer_id) 1441 { 1442 struct tipc_net *tn = net_generic(net, tipc_net_id); 1443 struct tipc_link *l_ptr; 1444 struct tipc_node *n_ptr; 1445 struct tipc_node *found_node = NULL; 1446 int i; 1447 1448 *bearer_id = 0; 1449 rcu_read_lock(); 1450 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 1451 tipc_node_lock(n_ptr); 1452 for (i = 0; i < MAX_BEARERS; i++) { 1453 l_ptr = n_ptr->links[i].link; 1454 if (l_ptr && !strcmp(l_ptr->name, link_name)) { 1455 *bearer_id = i; 1456 found_node = n_ptr; 1457 break; 1458 } 1459 } 1460 tipc_node_unlock(n_ptr); 1461 if (found_node) 1462 break; 1463 } 1464 rcu_read_unlock(); 1465 1466 return found_node; 1467 } 1468 1469 /** 1470 * link_reset_statistics - reset link statistics 1471 * @l_ptr: pointer to link 1472 */ 1473 static void link_reset_statistics(struct tipc_link *l_ptr) 1474 { 1475 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 1476 l_ptr->stats.sent_info = l_ptr->snd_nxt; 1477 l_ptr->stats.recv_info = l_ptr->rcv_nxt; 1478 } 1479 1480 static void link_print(struct tipc_link *l, const char *str) 1481 { 1482 struct sk_buff *hskb = skb_peek(&l->transmq); 1483 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt; 1484 u16 tail = l->snd_nxt - 1; 1485 1486 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1487 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1488 skb_queue_len(&l->transmq), head, tail, 1489 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1490 } 1491 1492 /* Parse and validate nested (link) properties valid for media, bearer and link 1493 */ 1494 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1495 { 1496 int err; 1497 1498 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1499 tipc_nl_prop_policy); 1500 if (err) 1501 return err; 1502 1503 if (props[TIPC_NLA_PROP_PRIO]) { 1504 u32 prio; 1505 1506 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1507 if (prio > TIPC_MAX_LINK_PRI) 1508 return -EINVAL; 1509 } 1510 1511 if (props[TIPC_NLA_PROP_TOL]) { 1512 u32 tol; 1513 1514 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1515 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1516 return -EINVAL; 1517 } 1518 1519 if (props[TIPC_NLA_PROP_WIN]) { 1520 u32 win; 1521 1522 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1523 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1524 return -EINVAL; 1525 } 1526 1527 return 0; 1528 } 1529 1530 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) 1531 { 1532 int err; 1533 int res = 0; 1534 int bearer_id; 1535 char *name; 1536 struct tipc_link *link; 1537 struct tipc_node *node; 1538 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1539 struct net *net = sock_net(skb->sk); 1540 1541 if (!info->attrs[TIPC_NLA_LINK]) 1542 return -EINVAL; 1543 1544 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1545 info->attrs[TIPC_NLA_LINK], 1546 tipc_nl_link_policy); 1547 if (err) 1548 return err; 1549 1550 if (!attrs[TIPC_NLA_LINK_NAME]) 1551 return -EINVAL; 1552 1553 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1554 1555 if (strcmp(name, tipc_bclink_name) == 0) 1556 return tipc_nl_bc_link_set(net, attrs); 1557 1558 node = tipc_link_find_owner(net, name, &bearer_id); 1559 if (!node) 1560 return -EINVAL; 1561 1562 tipc_node_lock(node); 1563 1564 link = node->links[bearer_id].link; 1565 if (!link) { 1566 res = -EINVAL; 1567 goto out; 1568 } 1569 1570 if (attrs[TIPC_NLA_LINK_PROP]) { 1571 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1572 1573 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 1574 props); 1575 if (err) { 1576 res = err; 1577 goto out; 1578 } 1579 1580 if (props[TIPC_NLA_PROP_TOL]) { 1581 u32 tol; 1582 1583 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1584 link->tolerance = tol; 1585 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); 1586 } 1587 if (props[TIPC_NLA_PROP_PRIO]) { 1588 u32 prio; 1589 1590 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1591 link->priority = prio; 1592 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); 1593 } 1594 if (props[TIPC_NLA_PROP_WIN]) { 1595 u32 win; 1596 1597 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1598 tipc_link_set_queue_limits(link, win); 1599 } 1600 } 1601 1602 out: 1603 tipc_node_unlock(node); 1604 1605 return res; 1606 } 1607 1608 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1609 { 1610 int i; 1611 struct nlattr *stats; 1612 1613 struct nla_map { 1614 u32 key; 1615 u32 val; 1616 }; 1617 1618 struct nla_map map[] = { 1619 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1620 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1621 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1622 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1623 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1624 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1625 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1626 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1627 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1628 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1629 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1630 s->msg_length_counts : 1}, 1631 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1632 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1633 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1634 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1635 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1636 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1637 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1638 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1639 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1640 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1641 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1642 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1643 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1644 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1645 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1646 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1647 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1648 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1649 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1650 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1651 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1652 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1653 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1654 }; 1655 1656 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1657 if (!stats) 1658 return -EMSGSIZE; 1659 1660 for (i = 0; i < ARRAY_SIZE(map); i++) 1661 if (nla_put_u32(skb, map[i].key, map[i].val)) 1662 goto msg_full; 1663 1664 nla_nest_end(skb, stats); 1665 1666 return 0; 1667 msg_full: 1668 nla_nest_cancel(skb, stats); 1669 1670 return -EMSGSIZE; 1671 } 1672 1673 /* Caller should hold appropriate locks to protect the link */ 1674 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1675 struct tipc_link *link, int nlflags) 1676 { 1677 int err; 1678 void *hdr; 1679 struct nlattr *attrs; 1680 struct nlattr *prop; 1681 struct tipc_net *tn = net_generic(net, tipc_net_id); 1682 1683 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1684 nlflags, TIPC_NL_LINK_GET); 1685 if (!hdr) 1686 return -EMSGSIZE; 1687 1688 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1689 if (!attrs) 1690 goto msg_full; 1691 1692 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 1693 goto attr_msg_full; 1694 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 1695 tipc_cluster_mask(tn->own_addr))) 1696 goto attr_msg_full; 1697 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1698 goto attr_msg_full; 1699 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1700 goto attr_msg_full; 1701 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1702 goto attr_msg_full; 1703 1704 if (tipc_link_is_up(link)) 1705 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1706 goto attr_msg_full; 1707 if (tipc_link_is_active(link)) 1708 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 1709 goto attr_msg_full; 1710 1711 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1712 if (!prop) 1713 goto attr_msg_full; 1714 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1715 goto prop_msg_full; 1716 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 1717 goto prop_msg_full; 1718 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 1719 link->window)) 1720 goto prop_msg_full; 1721 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1722 goto prop_msg_full; 1723 nla_nest_end(msg->skb, prop); 1724 1725 err = __tipc_nl_add_stats(msg->skb, &link->stats); 1726 if (err) 1727 goto attr_msg_full; 1728 1729 nla_nest_end(msg->skb, attrs); 1730 genlmsg_end(msg->skb, hdr); 1731 1732 return 0; 1733 1734 prop_msg_full: 1735 nla_nest_cancel(msg->skb, prop); 1736 attr_msg_full: 1737 nla_nest_cancel(msg->skb, attrs); 1738 msg_full: 1739 genlmsg_cancel(msg->skb, hdr); 1740 1741 return -EMSGSIZE; 1742 } 1743 1744 /* Caller should hold node lock */ 1745 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 1746 struct tipc_node *node, u32 *prev_link) 1747 { 1748 u32 i; 1749 int err; 1750 1751 for (i = *prev_link; i < MAX_BEARERS; i++) { 1752 *prev_link = i; 1753 1754 if (!node->links[i].link) 1755 continue; 1756 1757 err = __tipc_nl_add_link(net, msg, 1758 node->links[i].link, NLM_F_MULTI); 1759 if (err) 1760 return err; 1761 } 1762 *prev_link = 0; 1763 1764 return 0; 1765 } 1766 1767 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) 1768 { 1769 struct net *net = sock_net(skb->sk); 1770 struct tipc_net *tn = net_generic(net, tipc_net_id); 1771 struct tipc_node *node; 1772 struct tipc_nl_msg msg; 1773 u32 prev_node = cb->args[0]; 1774 u32 prev_link = cb->args[1]; 1775 int done = cb->args[2]; 1776 int err; 1777 1778 if (done) 1779 return 0; 1780 1781 msg.skb = skb; 1782 msg.portid = NETLINK_CB(cb->skb).portid; 1783 msg.seq = cb->nlh->nlmsg_seq; 1784 1785 rcu_read_lock(); 1786 if (prev_node) { 1787 node = tipc_node_find(net, prev_node); 1788 if (!node) { 1789 /* We never set seq or call nl_dump_check_consistent() 1790 * this means that setting prev_seq here will cause the 1791 * consistence check to fail in the netlink callback 1792 * handler. Resulting in the last NLMSG_DONE message 1793 * having the NLM_F_DUMP_INTR flag set. 1794 */ 1795 cb->prev_seq = 1; 1796 goto out; 1797 } 1798 tipc_node_put(node); 1799 1800 list_for_each_entry_continue_rcu(node, &tn->node_list, 1801 list) { 1802 tipc_node_lock(node); 1803 err = __tipc_nl_add_node_links(net, &msg, node, 1804 &prev_link); 1805 tipc_node_unlock(node); 1806 if (err) 1807 goto out; 1808 1809 prev_node = node->addr; 1810 } 1811 } else { 1812 err = tipc_nl_add_bc_link(net, &msg); 1813 if (err) 1814 goto out; 1815 1816 list_for_each_entry_rcu(node, &tn->node_list, list) { 1817 tipc_node_lock(node); 1818 err = __tipc_nl_add_node_links(net, &msg, node, 1819 &prev_link); 1820 tipc_node_unlock(node); 1821 if (err) 1822 goto out; 1823 1824 prev_node = node->addr; 1825 } 1826 } 1827 done = 1; 1828 out: 1829 rcu_read_unlock(); 1830 1831 cb->args[0] = prev_node; 1832 cb->args[1] = prev_link; 1833 cb->args[2] = done; 1834 1835 return skb->len; 1836 } 1837 1838 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 1839 { 1840 struct net *net = genl_info_net(info); 1841 struct tipc_nl_msg msg; 1842 char *name; 1843 int err; 1844 1845 msg.portid = info->snd_portid; 1846 msg.seq = info->snd_seq; 1847 1848 if (!info->attrs[TIPC_NLA_LINK_NAME]) 1849 return -EINVAL; 1850 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 1851 1852 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1853 if (!msg.skb) 1854 return -ENOMEM; 1855 1856 if (strcmp(name, tipc_bclink_name) == 0) { 1857 err = tipc_nl_add_bc_link(net, &msg); 1858 if (err) { 1859 nlmsg_free(msg.skb); 1860 return err; 1861 } 1862 } else { 1863 int bearer_id; 1864 struct tipc_node *node; 1865 struct tipc_link *link; 1866 1867 node = tipc_link_find_owner(net, name, &bearer_id); 1868 if (!node) 1869 return -EINVAL; 1870 1871 tipc_node_lock(node); 1872 link = node->links[bearer_id].link; 1873 if (!link) { 1874 tipc_node_unlock(node); 1875 nlmsg_free(msg.skb); 1876 return -EINVAL; 1877 } 1878 1879 err = __tipc_nl_add_link(net, &msg, link, 0); 1880 tipc_node_unlock(node); 1881 if (err) { 1882 nlmsg_free(msg.skb); 1883 return err; 1884 } 1885 } 1886 1887 return genlmsg_reply(msg.skb, info); 1888 } 1889 1890 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) 1891 { 1892 int err; 1893 char *link_name; 1894 unsigned int bearer_id; 1895 struct tipc_link *link; 1896 struct tipc_node *node; 1897 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1898 struct net *net = sock_net(skb->sk); 1899 1900 if (!info->attrs[TIPC_NLA_LINK]) 1901 return -EINVAL; 1902 1903 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1904 info->attrs[TIPC_NLA_LINK], 1905 tipc_nl_link_policy); 1906 if (err) 1907 return err; 1908 1909 if (!attrs[TIPC_NLA_LINK_NAME]) 1910 return -EINVAL; 1911 1912 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1913 1914 if (strcmp(link_name, tipc_bclink_name) == 0) { 1915 err = tipc_bclink_reset_stats(net); 1916 if (err) 1917 return err; 1918 return 0; 1919 } 1920 1921 node = tipc_link_find_owner(net, link_name, &bearer_id); 1922 if (!node) 1923 return -EINVAL; 1924 1925 tipc_node_lock(node); 1926 1927 link = node->links[bearer_id].link; 1928 if (!link) { 1929 tipc_node_unlock(node); 1930 return -EINVAL; 1931 } 1932 1933 link_reset_statistics(link); 1934 1935 tipc_node_unlock(node); 1936 1937 return 0; 1938 } 1939