1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 #include "monitor.h" 46 47 #include <linux/pkt_sched.h> 48 49 struct tipc_stats { 50 u32 sent_pkts; 51 u32 recv_pkts; 52 u32 sent_states; 53 u32 recv_states; 54 u32 sent_probes; 55 u32 recv_probes; 56 u32 sent_nacks; 57 u32 recv_nacks; 58 u32 sent_acks; 59 u32 sent_bundled; 60 u32 sent_bundles; 61 u32 recv_bundled; 62 u32 recv_bundles; 63 u32 retransmitted; 64 u32 sent_fragmented; 65 u32 sent_fragments; 66 u32 recv_fragmented; 67 u32 recv_fragments; 68 u32 link_congs; /* # port sends blocked by congestion */ 69 u32 deferred_recv; 70 u32 duplicates; 71 u32 max_queue_sz; /* send queue size high water mark */ 72 u32 accu_queue_sz; /* used for send queue size profiling */ 73 u32 queue_sz_counts; /* used for send queue size profiling */ 74 u32 msg_length_counts; /* used for message length profiling */ 75 u32 msg_lengths_total; /* used for message length profiling */ 76 u32 msg_length_profile[7]; /* used for msg. length profiling */ 77 }; 78 79 /** 80 * struct tipc_link - TIPC link data structure 81 * @addr: network address of link's peer node 82 * @name: link name character string 83 * @media_addr: media address to use when sending messages over link 84 * @timer: link timer 85 * @net: pointer to namespace struct 86 * @refcnt: reference counter for permanent references (owner node & timer) 87 * @peer_session: link session # being used by peer end of link 88 * @peer_bearer_id: bearer id used by link's peer endpoint 89 * @bearer_id: local bearer id used by link 90 * @tolerance: minimum link continuity loss needed to reset link [in ms] 91 * @abort_limit: # of unacknowledged continuity probes needed to reset link 92 * @state: current state of link FSM 93 * @peer_caps: bitmap describing capabilities of peer node 94 * @silent_intv_cnt: # of timer intervals without any reception from peer 95 * @proto_msg: template for control messages generated by link 96 * @pmsg: convenience pointer to "proto_msg" field 97 * @priority: current link priority 98 * @net_plane: current link network plane ('A' through 'H') 99 * @mon_state: cookie with information needed by link monitor 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 101 * @exp_msg_count: # of tunnelled messages expected during link changeover 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 103 * @mtu: current maximum packet size for this link 104 * @advertised_mtu: advertised own mtu when link is being established 105 * @transmitq: queue for sent, non-acked messages 106 * @backlogq: queue for messages waiting to be sent 107 * @snt_nxt: next sequence number to use for outbound messages 108 * @prev_from: sequence number of most previous retransmission request 109 * @stale_cnt: counter for number of identical retransmit attempts 110 * @stale_limit: time when repeated identical retransmits must force link reset 111 * @ackers: # of peers that needs to ack each packet before it can be released 112 * @acked: # last packet acked by a certain peer. Used for broadcast. 113 * @rcv_nxt: next sequence number to expect for inbound messages 114 * @deferred_queue: deferred queue saved OOS b'cast message received from node 115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 116 * @inputq: buffer queue for messages to be delivered upwards 117 * @namedq: buffer queue for name table messages to be delivered upwards 118 * @next_out: ptr to first unsent outbound message in queue 119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate 120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 121 * @reasm_buf: head of partially reassembled inbound message fragments 122 * @bc_rcvr: marks that this is a broadcast receiver link 123 * @stats: collects statistics regarding link activity 124 */ 125 struct tipc_link { 126 u32 addr; 127 char name[TIPC_MAX_LINK_NAME]; 128 struct net *net; 129 130 /* Management and link supervision data */ 131 u16 peer_session; 132 u16 session; 133 u16 snd_nxt_state; 134 u16 rcv_nxt_state; 135 u32 peer_bearer_id; 136 u32 bearer_id; 137 u32 tolerance; 138 u32 abort_limit; 139 u32 state; 140 u16 peer_caps; 141 bool in_session; 142 bool active; 143 u32 silent_intv_cnt; 144 char if_name[TIPC_MAX_IF_NAME]; 145 u32 priority; 146 char net_plane; 147 struct tipc_mon_state mon_state; 148 u16 rst_cnt; 149 150 /* Failover/synch */ 151 u16 drop_point; 152 struct sk_buff *failover_reasm_skb; 153 154 /* Max packet negotiation */ 155 u16 mtu; 156 u16 advertised_mtu; 157 158 /* Sending */ 159 struct sk_buff_head transmq; 160 struct sk_buff_head backlogq; 161 struct { 162 u16 len; 163 u16 limit; 164 } backlog[5]; 165 u16 snd_nxt; 166 u16 prev_from; 167 u16 window; 168 u16 stale_cnt; 169 unsigned long stale_limit; 170 171 /* Reception */ 172 u16 rcv_nxt; 173 u32 rcv_unacked; 174 struct sk_buff_head deferdq; 175 struct sk_buff_head *inputq; 176 struct sk_buff_head *namedq; 177 178 /* Congestion handling */ 179 struct sk_buff_head wakeupq; 180 181 /* Fragmentation/reassembly */ 182 struct sk_buff *reasm_buf; 183 184 /* Broadcast */ 185 u16 ackers; 186 u16 acked; 187 struct tipc_link *bc_rcvlink; 188 struct tipc_link *bc_sndlink; 189 u8 nack_state; 190 bool bc_peer_is_up; 191 192 /* Statistics */ 193 struct tipc_stats stats; 194 }; 195 196 /* 197 * Error message prefixes 198 */ 199 static const char *link_co_err = "Link tunneling error, "; 200 static const char *link_rst_msg = "Resetting link "; 201 202 /* Send states for broadcast NACKs 203 */ 204 enum { 205 BC_NACK_SND_CONDITIONAL, 206 BC_NACK_SND_UNCONDITIONAL, 207 BC_NACK_SND_SUPPRESS, 208 }; 209 210 #define TIPC_BC_RETR_LIM msecs_to_jiffies(10) /* [ms] */ 211 212 /* 213 * Interval between NACKs when packets arrive out of order 214 */ 215 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 216 217 /* Link FSM states: 218 */ 219 enum { 220 LINK_ESTABLISHED = 0xe, 221 LINK_ESTABLISHING = 0xe << 4, 222 LINK_RESET = 0x1 << 8, 223 LINK_RESETTING = 0x2 << 12, 224 LINK_PEER_RESET = 0xd << 16, 225 LINK_FAILINGOVER = 0xf << 20, 226 LINK_SYNCHING = 0xc << 24 227 }; 228 229 /* Link FSM state checking routines 230 */ 231 static int link_is_up(struct tipc_link *l) 232 { 233 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 234 } 235 236 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 237 struct sk_buff_head *xmitq); 238 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 239 bool probe_reply, u16 rcvgap, 240 int tolerance, int priority, 241 struct sk_buff_head *xmitq); 242 static void link_print(struct tipc_link *l, const char *str); 243 static int tipc_link_build_nack_msg(struct tipc_link *l, 244 struct sk_buff_head *xmitq); 245 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 246 struct sk_buff_head *xmitq); 247 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); 248 249 /* 250 * Simple non-static link routines (i.e. referenced outside this file) 251 */ 252 bool tipc_link_is_up(struct tipc_link *l) 253 { 254 return link_is_up(l); 255 } 256 257 bool tipc_link_peer_is_down(struct tipc_link *l) 258 { 259 return l->state == LINK_PEER_RESET; 260 } 261 262 bool tipc_link_is_reset(struct tipc_link *l) 263 { 264 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 265 } 266 267 bool tipc_link_is_establishing(struct tipc_link *l) 268 { 269 return l->state == LINK_ESTABLISHING; 270 } 271 272 bool tipc_link_is_synching(struct tipc_link *l) 273 { 274 return l->state == LINK_SYNCHING; 275 } 276 277 bool tipc_link_is_failingover(struct tipc_link *l) 278 { 279 return l->state == LINK_FAILINGOVER; 280 } 281 282 bool tipc_link_is_blocked(struct tipc_link *l) 283 { 284 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 285 } 286 287 static bool link_is_bc_sndlink(struct tipc_link *l) 288 { 289 return !l->bc_sndlink; 290 } 291 292 static bool link_is_bc_rcvlink(struct tipc_link *l) 293 { 294 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); 295 } 296 297 void tipc_link_set_active(struct tipc_link *l, bool active) 298 { 299 l->active = active; 300 } 301 302 u32 tipc_link_id(struct tipc_link *l) 303 { 304 return l->peer_bearer_id << 16 | l->bearer_id; 305 } 306 307 int tipc_link_window(struct tipc_link *l) 308 { 309 return l->window; 310 } 311 312 int tipc_link_prio(struct tipc_link *l) 313 { 314 return l->priority; 315 } 316 317 unsigned long tipc_link_tolerance(struct tipc_link *l) 318 { 319 return l->tolerance; 320 } 321 322 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) 323 { 324 return l->inputq; 325 } 326 327 char tipc_link_plane(struct tipc_link *l) 328 { 329 return l->net_plane; 330 } 331 332 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) 333 { 334 l->peer_caps = capabilities; 335 } 336 337 void tipc_link_add_bc_peer(struct tipc_link *snd_l, 338 struct tipc_link *uc_l, 339 struct sk_buff_head *xmitq) 340 { 341 struct tipc_link *rcv_l = uc_l->bc_rcvlink; 342 343 snd_l->ackers++; 344 rcv_l->acked = snd_l->snd_nxt - 1; 345 snd_l->state = LINK_ESTABLISHED; 346 tipc_link_build_bc_init_msg(uc_l, xmitq); 347 } 348 349 void tipc_link_remove_bc_peer(struct tipc_link *snd_l, 350 struct tipc_link *rcv_l, 351 struct sk_buff_head *xmitq) 352 { 353 u16 ack = snd_l->snd_nxt - 1; 354 355 snd_l->ackers--; 356 rcv_l->bc_peer_is_up = true; 357 rcv_l->state = LINK_ESTABLISHED; 358 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 359 tipc_link_reset(rcv_l); 360 rcv_l->state = LINK_RESET; 361 if (!snd_l->ackers) { 362 tipc_link_reset(snd_l); 363 snd_l->state = LINK_RESET; 364 __skb_queue_purge(xmitq); 365 } 366 } 367 368 int tipc_link_bc_peers(struct tipc_link *l) 369 { 370 return l->ackers; 371 } 372 373 static u16 link_bc_rcv_gap(struct tipc_link *l) 374 { 375 struct sk_buff *skb = skb_peek(&l->deferdq); 376 u16 gap = 0; 377 378 if (more(l->snd_nxt, l->rcv_nxt)) 379 gap = l->snd_nxt - l->rcv_nxt; 380 if (skb) 381 gap = buf_seqno(skb) - l->rcv_nxt; 382 return gap; 383 } 384 385 void tipc_link_set_mtu(struct tipc_link *l, int mtu) 386 { 387 l->mtu = mtu; 388 } 389 390 int tipc_link_mtu(struct tipc_link *l) 391 { 392 return l->mtu; 393 } 394 395 u16 tipc_link_rcv_nxt(struct tipc_link *l) 396 { 397 return l->rcv_nxt; 398 } 399 400 u16 tipc_link_acked(struct tipc_link *l) 401 { 402 return l->acked; 403 } 404 405 char *tipc_link_name(struct tipc_link *l) 406 { 407 return l->name; 408 } 409 410 u32 tipc_link_state(struct tipc_link *l) 411 { 412 return l->state; 413 } 414 415 /** 416 * tipc_link_create - create a new link 417 * @n: pointer to associated node 418 * @if_name: associated interface name 419 * @bearer_id: id (index) of associated bearer 420 * @tolerance: link tolerance to be used by link 421 * @net_plane: network plane (A,B,c..) this link belongs to 422 * @mtu: mtu to be advertised by link 423 * @priority: priority to be used by link 424 * @window: send window to be used by link 425 * @session: session to be used by link 426 * @ownnode: identity of own node 427 * @peer: node id of peer node 428 * @peer_caps: bitmap describing peer node capabilities 429 * @bc_sndlink: the namespace global link used for broadcast sending 430 * @bc_rcvlink: the peer specific link used for broadcast reception 431 * @inputq: queue to put messages ready for delivery 432 * @namedq: queue to put binding table update messages ready for delivery 433 * @link: return value, pointer to put the created link 434 * 435 * Returns true if link was created, otherwise false 436 */ 437 bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 438 int tolerance, char net_plane, u32 mtu, int priority, 439 int window, u32 session, u32 self, 440 u32 peer, u8 *peer_id, u16 peer_caps, 441 struct tipc_link *bc_sndlink, 442 struct tipc_link *bc_rcvlink, 443 struct sk_buff_head *inputq, 444 struct sk_buff_head *namedq, 445 struct tipc_link **link) 446 { 447 char peer_str[NODE_ID_STR_LEN] = {0,}; 448 char self_str[NODE_ID_STR_LEN] = {0,}; 449 struct tipc_link *l; 450 451 l = kzalloc(sizeof(*l), GFP_ATOMIC); 452 if (!l) 453 return false; 454 *link = l; 455 l->session = session; 456 457 /* Set link name for unicast links only */ 458 if (peer_id) { 459 tipc_nodeid2string(self_str, tipc_own_id(net)); 460 if (strlen(self_str) > 16) 461 sprintf(self_str, "%x", self); 462 tipc_nodeid2string(peer_str, peer_id); 463 if (strlen(peer_str) > 16) 464 sprintf(peer_str, "%x", peer); 465 } 466 /* Peer i/f name will be completed by reset/activate message */ 467 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown", 468 self_str, if_name, peer_str); 469 470 strcpy(l->if_name, if_name); 471 l->addr = peer; 472 l->peer_caps = peer_caps; 473 l->net = net; 474 l->in_session = false; 475 l->bearer_id = bearer_id; 476 l->tolerance = tolerance; 477 if (bc_rcvlink) 478 bc_rcvlink->tolerance = tolerance; 479 l->net_plane = net_plane; 480 l->advertised_mtu = mtu; 481 l->mtu = mtu; 482 l->priority = priority; 483 tipc_link_set_queue_limits(l, window); 484 l->ackers = 1; 485 l->bc_sndlink = bc_sndlink; 486 l->bc_rcvlink = bc_rcvlink; 487 l->inputq = inputq; 488 l->namedq = namedq; 489 l->state = LINK_RESETTING; 490 __skb_queue_head_init(&l->transmq); 491 __skb_queue_head_init(&l->backlogq); 492 __skb_queue_head_init(&l->deferdq); 493 skb_queue_head_init(&l->wakeupq); 494 skb_queue_head_init(l->inputq); 495 return true; 496 } 497 498 /** 499 * tipc_link_bc_create - create new link to be used for broadcast 500 * @n: pointer to associated node 501 * @mtu: mtu to be used initially if no peers 502 * @window: send window to be used 503 * @inputq: queue to put messages ready for delivery 504 * @namedq: queue to put binding table update messages ready for delivery 505 * @link: return value, pointer to put the created link 506 * 507 * Returns true if link was created, otherwise false 508 */ 509 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, 510 int mtu, int window, u16 peer_caps, 511 struct sk_buff_head *inputq, 512 struct sk_buff_head *namedq, 513 struct tipc_link *bc_sndlink, 514 struct tipc_link **link) 515 { 516 struct tipc_link *l; 517 518 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, 519 0, ownnode, peer, NULL, peer_caps, bc_sndlink, 520 NULL, inputq, namedq, link)) 521 return false; 522 523 l = *link; 524 strcpy(l->name, tipc_bclink_name); 525 tipc_link_reset(l); 526 l->state = LINK_RESET; 527 l->ackers = 0; 528 l->bc_rcvlink = l; 529 530 /* Broadcast send link is always up */ 531 if (link_is_bc_sndlink(l)) 532 l->state = LINK_ESTABLISHED; 533 534 /* Disable replicast if even a single peer doesn't support it */ 535 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) 536 tipc_bcast_disable_rcast(net); 537 538 return true; 539 } 540 541 /** 542 * tipc_link_fsm_evt - link finite state machine 543 * @l: pointer to link 544 * @evt: state machine event to be processed 545 */ 546 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 547 { 548 int rc = 0; 549 550 switch (l->state) { 551 case LINK_RESETTING: 552 switch (evt) { 553 case LINK_PEER_RESET_EVT: 554 l->state = LINK_PEER_RESET; 555 break; 556 case LINK_RESET_EVT: 557 l->state = LINK_RESET; 558 break; 559 case LINK_FAILURE_EVT: 560 case LINK_FAILOVER_BEGIN_EVT: 561 case LINK_ESTABLISH_EVT: 562 case LINK_FAILOVER_END_EVT: 563 case LINK_SYNCH_BEGIN_EVT: 564 case LINK_SYNCH_END_EVT: 565 default: 566 goto illegal_evt; 567 } 568 break; 569 case LINK_RESET: 570 switch (evt) { 571 case LINK_PEER_RESET_EVT: 572 l->state = LINK_ESTABLISHING; 573 break; 574 case LINK_FAILOVER_BEGIN_EVT: 575 l->state = LINK_FAILINGOVER; 576 case LINK_FAILURE_EVT: 577 case LINK_RESET_EVT: 578 case LINK_ESTABLISH_EVT: 579 case LINK_FAILOVER_END_EVT: 580 break; 581 case LINK_SYNCH_BEGIN_EVT: 582 case LINK_SYNCH_END_EVT: 583 default: 584 goto illegal_evt; 585 } 586 break; 587 case LINK_PEER_RESET: 588 switch (evt) { 589 case LINK_RESET_EVT: 590 l->state = LINK_ESTABLISHING; 591 break; 592 case LINK_PEER_RESET_EVT: 593 case LINK_ESTABLISH_EVT: 594 case LINK_FAILURE_EVT: 595 break; 596 case LINK_SYNCH_BEGIN_EVT: 597 case LINK_SYNCH_END_EVT: 598 case LINK_FAILOVER_BEGIN_EVT: 599 case LINK_FAILOVER_END_EVT: 600 default: 601 goto illegal_evt; 602 } 603 break; 604 case LINK_FAILINGOVER: 605 switch (evt) { 606 case LINK_FAILOVER_END_EVT: 607 l->state = LINK_RESET; 608 break; 609 case LINK_PEER_RESET_EVT: 610 case LINK_RESET_EVT: 611 case LINK_ESTABLISH_EVT: 612 case LINK_FAILURE_EVT: 613 break; 614 case LINK_FAILOVER_BEGIN_EVT: 615 case LINK_SYNCH_BEGIN_EVT: 616 case LINK_SYNCH_END_EVT: 617 default: 618 goto illegal_evt; 619 } 620 break; 621 case LINK_ESTABLISHING: 622 switch (evt) { 623 case LINK_ESTABLISH_EVT: 624 l->state = LINK_ESTABLISHED; 625 break; 626 case LINK_FAILOVER_BEGIN_EVT: 627 l->state = LINK_FAILINGOVER; 628 break; 629 case LINK_RESET_EVT: 630 l->state = LINK_RESET; 631 break; 632 case LINK_FAILURE_EVT: 633 case LINK_PEER_RESET_EVT: 634 case LINK_SYNCH_BEGIN_EVT: 635 case LINK_FAILOVER_END_EVT: 636 break; 637 case LINK_SYNCH_END_EVT: 638 default: 639 goto illegal_evt; 640 } 641 break; 642 case LINK_ESTABLISHED: 643 switch (evt) { 644 case LINK_PEER_RESET_EVT: 645 l->state = LINK_PEER_RESET; 646 rc |= TIPC_LINK_DOWN_EVT; 647 break; 648 case LINK_FAILURE_EVT: 649 l->state = LINK_RESETTING; 650 rc |= TIPC_LINK_DOWN_EVT; 651 break; 652 case LINK_RESET_EVT: 653 l->state = LINK_RESET; 654 break; 655 case LINK_ESTABLISH_EVT: 656 case LINK_SYNCH_END_EVT: 657 break; 658 case LINK_SYNCH_BEGIN_EVT: 659 l->state = LINK_SYNCHING; 660 break; 661 case LINK_FAILOVER_BEGIN_EVT: 662 case LINK_FAILOVER_END_EVT: 663 default: 664 goto illegal_evt; 665 } 666 break; 667 case LINK_SYNCHING: 668 switch (evt) { 669 case LINK_PEER_RESET_EVT: 670 l->state = LINK_PEER_RESET; 671 rc |= TIPC_LINK_DOWN_EVT; 672 break; 673 case LINK_FAILURE_EVT: 674 l->state = LINK_RESETTING; 675 rc |= TIPC_LINK_DOWN_EVT; 676 break; 677 case LINK_RESET_EVT: 678 l->state = LINK_RESET; 679 break; 680 case LINK_ESTABLISH_EVT: 681 case LINK_SYNCH_BEGIN_EVT: 682 break; 683 case LINK_SYNCH_END_EVT: 684 l->state = LINK_ESTABLISHED; 685 break; 686 case LINK_FAILOVER_BEGIN_EVT: 687 case LINK_FAILOVER_END_EVT: 688 default: 689 goto illegal_evt; 690 } 691 break; 692 default: 693 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 694 } 695 return rc; 696 illegal_evt: 697 pr_err("Illegal FSM event %x in state %x on link %s\n", 698 evt, l->state, l->name); 699 return rc; 700 } 701 702 /* link_profile_stats - update statistical profiling of traffic 703 */ 704 static void link_profile_stats(struct tipc_link *l) 705 { 706 struct sk_buff *skb; 707 struct tipc_msg *msg; 708 int length; 709 710 /* Update counters used in statistical profiling of send traffic */ 711 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 712 l->stats.queue_sz_counts++; 713 714 skb = skb_peek(&l->transmq); 715 if (!skb) 716 return; 717 msg = buf_msg(skb); 718 length = msg_size(msg); 719 720 if (msg_user(msg) == MSG_FRAGMENTER) { 721 if (msg_type(msg) != FIRST_FRAGMENT) 722 return; 723 length = msg_size(msg_get_wrapped(msg)); 724 } 725 l->stats.msg_lengths_total += length; 726 l->stats.msg_length_counts++; 727 if (length <= 64) 728 l->stats.msg_length_profile[0]++; 729 else if (length <= 256) 730 l->stats.msg_length_profile[1]++; 731 else if (length <= 1024) 732 l->stats.msg_length_profile[2]++; 733 else if (length <= 4096) 734 l->stats.msg_length_profile[3]++; 735 else if (length <= 16384) 736 l->stats.msg_length_profile[4]++; 737 else if (length <= 32768) 738 l->stats.msg_length_profile[5]++; 739 else 740 l->stats.msg_length_profile[6]++; 741 } 742 743 /* tipc_link_timeout - perform periodic task as instructed from node timeout 744 */ 745 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 746 { 747 int mtyp = 0; 748 int rc = 0; 749 bool state = false; 750 bool probe = false; 751 bool setup = false; 752 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 753 u16 bc_acked = l->bc_rcvlink->acked; 754 struct tipc_mon_state *mstate = &l->mon_state; 755 756 switch (l->state) { 757 case LINK_ESTABLISHED: 758 case LINK_SYNCHING: 759 mtyp = STATE_MSG; 760 link_profile_stats(l); 761 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); 762 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) 763 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 764 state = bc_acked != bc_snt; 765 state |= l->bc_rcvlink->rcv_unacked; 766 state |= l->rcv_unacked; 767 state |= !skb_queue_empty(&l->transmq); 768 state |= !skb_queue_empty(&l->deferdq); 769 probe = mstate->probing; 770 probe |= l->silent_intv_cnt; 771 if (probe || mstate->monitoring) 772 l->silent_intv_cnt++; 773 break; 774 case LINK_RESET: 775 setup = l->rst_cnt++ <= 4; 776 setup |= !(l->rst_cnt % 16); 777 mtyp = RESET_MSG; 778 break; 779 case LINK_ESTABLISHING: 780 setup = true; 781 mtyp = ACTIVATE_MSG; 782 break; 783 case LINK_PEER_RESET: 784 case LINK_RESETTING: 785 case LINK_FAILINGOVER: 786 break; 787 default: 788 break; 789 } 790 791 if (state || probe || setup) 792 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq); 793 794 return rc; 795 } 796 797 /** 798 * link_schedule_user - schedule a message sender for wakeup after congestion 799 * @l: congested link 800 * @hdr: header of message that is being sent 801 * Create pseudo msg to send back to user when congestion abates 802 */ 803 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) 804 { 805 u32 dnode = tipc_own_addr(l->net); 806 u32 dport = msg_origport(hdr); 807 struct sk_buff *skb; 808 809 /* Create and schedule wakeup pseudo message */ 810 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 811 dnode, l->addr, dport, 0, 0); 812 if (!skb) 813 return -ENOBUFS; 814 msg_set_dest_droppable(buf_msg(skb), true); 815 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); 816 skb_queue_tail(&l->wakeupq, skb); 817 l->stats.link_congs++; 818 return -ELINKCONG; 819 } 820 821 /** 822 * link_prepare_wakeup - prepare users for wakeup after congestion 823 * @l: congested link 824 * Wake up a number of waiting users, as permitted by available space 825 * in the send queue 826 */ 827 static void link_prepare_wakeup(struct tipc_link *l) 828 { 829 struct sk_buff *skb, *tmp; 830 int imp, i = 0; 831 832 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 833 imp = TIPC_SKB_CB(skb)->chain_imp; 834 if (l->backlog[imp].len < l->backlog[imp].limit) { 835 skb_unlink(skb, &l->wakeupq); 836 skb_queue_tail(l->inputq, skb); 837 } else if (i++ > 10) { 838 break; 839 } 840 } 841 } 842 843 void tipc_link_reset(struct tipc_link *l) 844 { 845 struct sk_buff_head list; 846 847 __skb_queue_head_init(&list); 848 849 l->in_session = false; 850 l->session++; 851 l->mtu = l->advertised_mtu; 852 853 spin_lock_bh(&l->wakeupq.lock); 854 skb_queue_splice_init(&l->wakeupq, &list); 855 spin_unlock_bh(&l->wakeupq.lock); 856 857 spin_lock_bh(&l->inputq->lock); 858 skb_queue_splice_init(&list, l->inputq); 859 spin_unlock_bh(&l->inputq->lock); 860 861 __skb_queue_purge(&l->transmq); 862 __skb_queue_purge(&l->deferdq); 863 __skb_queue_purge(&l->backlogq); 864 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 865 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 866 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 867 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 868 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 869 kfree_skb(l->reasm_buf); 870 kfree_skb(l->failover_reasm_skb); 871 l->reasm_buf = NULL; 872 l->failover_reasm_skb = NULL; 873 l->rcv_unacked = 0; 874 l->snd_nxt = 1; 875 l->rcv_nxt = 1; 876 l->snd_nxt_state = 1; 877 l->rcv_nxt_state = 1; 878 l->acked = 0; 879 l->silent_intv_cnt = 0; 880 l->rst_cnt = 0; 881 l->stale_cnt = 0; 882 l->bc_peer_is_up = false; 883 memset(&l->mon_state, 0, sizeof(l->mon_state)); 884 tipc_link_reset_stats(l); 885 } 886 887 /** 888 * tipc_link_xmit(): enqueue buffer list according to queue situation 889 * @link: link to use 890 * @list: chain of buffers containing message 891 * @xmitq: returned list of packets to be sent by caller 892 * 893 * Consumes the buffer chain. 894 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 895 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 896 */ 897 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 898 struct sk_buff_head *xmitq) 899 { 900 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 901 unsigned int maxwin = l->window; 902 int imp = msg_importance(hdr); 903 unsigned int mtu = l->mtu; 904 u16 ack = l->rcv_nxt - 1; 905 u16 seqno = l->snd_nxt; 906 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 907 struct sk_buff_head *transmq = &l->transmq; 908 struct sk_buff_head *backlogq = &l->backlogq; 909 struct sk_buff *skb, *_skb, *bskb; 910 int pkt_cnt = skb_queue_len(list); 911 int rc = 0; 912 913 if (unlikely(msg_size(hdr) > mtu)) { 914 skb_queue_purge(list); 915 return -EMSGSIZE; 916 } 917 918 /* Allow oversubscription of one data msg per source at congestion */ 919 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { 920 if (imp == TIPC_SYSTEM_IMPORTANCE) { 921 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); 922 return -ENOBUFS; 923 } 924 rc = link_schedule_user(l, hdr); 925 } 926 927 if (pkt_cnt > 1) { 928 l->stats.sent_fragmented++; 929 l->stats.sent_fragments += pkt_cnt; 930 } 931 932 /* Prepare each packet for sending, and add to relevant queue: */ 933 while (skb_queue_len(list)) { 934 skb = skb_peek(list); 935 hdr = buf_msg(skb); 936 msg_set_seqno(hdr, seqno); 937 msg_set_ack(hdr, ack); 938 msg_set_bcast_ack(hdr, bc_ack); 939 940 if (likely(skb_queue_len(transmq) < maxwin)) { 941 _skb = skb_clone(skb, GFP_ATOMIC); 942 if (!_skb) { 943 skb_queue_purge(list); 944 return -ENOBUFS; 945 } 946 __skb_dequeue(list); 947 __skb_queue_tail(transmq, skb); 948 __skb_queue_tail(xmitq, _skb); 949 TIPC_SKB_CB(skb)->ackers = l->ackers; 950 l->rcv_unacked = 0; 951 l->stats.sent_pkts++; 952 seqno++; 953 continue; 954 } 955 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 956 kfree_skb(__skb_dequeue(list)); 957 l->stats.sent_bundled++; 958 continue; 959 } 960 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 961 kfree_skb(__skb_dequeue(list)); 962 __skb_queue_tail(backlogq, bskb); 963 l->backlog[msg_importance(buf_msg(bskb))].len++; 964 l->stats.sent_bundled++; 965 l->stats.sent_bundles++; 966 continue; 967 } 968 l->backlog[imp].len += skb_queue_len(list); 969 skb_queue_splice_tail_init(list, backlogq); 970 } 971 l->snd_nxt = seqno; 972 return rc; 973 } 974 975 static void tipc_link_advance_backlog(struct tipc_link *l, 976 struct sk_buff_head *xmitq) 977 { 978 struct sk_buff *skb, *_skb; 979 struct tipc_msg *hdr; 980 u16 seqno = l->snd_nxt; 981 u16 ack = l->rcv_nxt - 1; 982 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 983 984 while (skb_queue_len(&l->transmq) < l->window) { 985 skb = skb_peek(&l->backlogq); 986 if (!skb) 987 break; 988 _skb = skb_clone(skb, GFP_ATOMIC); 989 if (!_skb) 990 break; 991 __skb_dequeue(&l->backlogq); 992 hdr = buf_msg(skb); 993 l->backlog[msg_importance(hdr)].len--; 994 __skb_queue_tail(&l->transmq, skb); 995 __skb_queue_tail(xmitq, _skb); 996 TIPC_SKB_CB(skb)->ackers = l->ackers; 997 msg_set_seqno(hdr, seqno); 998 msg_set_ack(hdr, ack); 999 msg_set_bcast_ack(hdr, bc_ack); 1000 l->rcv_unacked = 0; 1001 l->stats.sent_pkts++; 1002 seqno++; 1003 } 1004 l->snd_nxt = seqno; 1005 } 1006 1007 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) 1008 { 1009 struct tipc_msg *hdr = buf_msg(skb); 1010 1011 pr_warn("Retransmission failure on link <%s>\n", l->name); 1012 link_print(l, "State of link "); 1013 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 1014 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); 1015 pr_info("sqno %u, prev: %x, src: %x\n", 1016 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); 1017 } 1018 1019 /* tipc_link_retrans() - retransmit one or more packets 1020 * @l: the link to transmit on 1021 * @r: the receiving link ordering the retransmit. Same as l if unicast 1022 * @from: retransmit from (inclusive) this sequence number 1023 * @to: retransmit to (inclusive) this sequence number 1024 * xmitq: queue for accumulating the retransmitted packets 1025 */ 1026 static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, 1027 u16 from, u16 to, struct sk_buff_head *xmitq) 1028 { 1029 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 1030 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1031 u16 ack = l->rcv_nxt - 1; 1032 struct tipc_msg *hdr; 1033 1034 if (!skb) 1035 return 0; 1036 if (less(to, from)) 1037 return 0; 1038 1039 /* Detect repeated retransmit failures on same packet */ 1040 if (r->prev_from != from) { 1041 r->prev_from = from; 1042 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); 1043 r->stale_cnt = 0; 1044 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { 1045 link_retransmit_failure(l, skb); 1046 if (link_is_bc_sndlink(l)) 1047 return TIPC_LINK_DOWN_EVT; 1048 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1049 } 1050 1051 skb_queue_walk(&l->transmq, skb) { 1052 hdr = buf_msg(skb); 1053 if (less(msg_seqno(hdr), from)) 1054 continue; 1055 if (more(msg_seqno(hdr), to)) 1056 break; 1057 if (link_is_bc_sndlink(l)) { 1058 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1059 continue; 1060 TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM; 1061 } 1062 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1063 if (!_skb) 1064 return 0; 1065 hdr = buf_msg(_skb); 1066 msg_set_ack(hdr, ack); 1067 msg_set_bcast_ack(hdr, bc_ack); 1068 _skb->priority = TC_PRIO_CONTROL; 1069 __skb_queue_tail(xmitq, _skb); 1070 l->stats.retransmitted++; 1071 } 1072 return 0; 1073 } 1074 1075 /* tipc_data_input - deliver data and name distr msgs to upper layer 1076 * 1077 * Consumes buffer if message is of right type 1078 * Node lock must be held 1079 */ 1080 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, 1081 struct sk_buff_head *inputq) 1082 { 1083 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; 1084 struct tipc_msg *hdr = buf_msg(skb); 1085 1086 switch (msg_user(hdr)) { 1087 case TIPC_LOW_IMPORTANCE: 1088 case TIPC_MEDIUM_IMPORTANCE: 1089 case TIPC_HIGH_IMPORTANCE: 1090 case TIPC_CRITICAL_IMPORTANCE: 1091 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) { 1092 skb_queue_tail(mc_inputq, skb); 1093 return true; 1094 } 1095 /* else: fall through */ 1096 case CONN_MANAGER: 1097 skb_queue_tail(inputq, skb); 1098 return true; 1099 case GROUP_PROTOCOL: 1100 skb_queue_tail(mc_inputq, skb); 1101 return true; 1102 case NAME_DISTRIBUTOR: 1103 l->bc_rcvlink->state = LINK_ESTABLISHED; 1104 skb_queue_tail(l->namedq, skb); 1105 return true; 1106 case MSG_BUNDLER: 1107 case TUNNEL_PROTOCOL: 1108 case MSG_FRAGMENTER: 1109 case BCAST_PROTOCOL: 1110 return false; 1111 default: 1112 pr_warn("Dropping received illegal msg type\n"); 1113 kfree_skb(skb); 1114 return false; 1115 }; 1116 } 1117 1118 /* tipc_link_input - process packet that has passed link protocol check 1119 * 1120 * Consumes buffer 1121 */ 1122 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 1123 struct sk_buff_head *inputq) 1124 { 1125 struct tipc_msg *hdr = buf_msg(skb); 1126 struct sk_buff **reasm_skb = &l->reasm_buf; 1127 struct sk_buff *iskb; 1128 struct sk_buff_head tmpq; 1129 int usr = msg_user(hdr); 1130 int rc = 0; 1131 int pos = 0; 1132 int ipos = 0; 1133 1134 if (unlikely(usr == TUNNEL_PROTOCOL)) { 1135 if (msg_type(hdr) == SYNCH_MSG) { 1136 __skb_queue_purge(&l->deferdq); 1137 goto drop; 1138 } 1139 if (!tipc_msg_extract(skb, &iskb, &ipos)) 1140 return rc; 1141 kfree_skb(skb); 1142 skb = iskb; 1143 hdr = buf_msg(skb); 1144 if (less(msg_seqno(hdr), l->drop_point)) 1145 goto drop; 1146 if (tipc_data_input(l, skb, inputq)) 1147 return rc; 1148 usr = msg_user(hdr); 1149 reasm_skb = &l->failover_reasm_skb; 1150 } 1151 1152 if (usr == MSG_BUNDLER) { 1153 skb_queue_head_init(&tmpq); 1154 l->stats.recv_bundles++; 1155 l->stats.recv_bundled += msg_msgcnt(hdr); 1156 while (tipc_msg_extract(skb, &iskb, &pos)) 1157 tipc_data_input(l, iskb, &tmpq); 1158 tipc_skb_queue_splice_tail(&tmpq, inputq); 1159 return 0; 1160 } else if (usr == MSG_FRAGMENTER) { 1161 l->stats.recv_fragments++; 1162 if (tipc_buf_append(reasm_skb, &skb)) { 1163 l->stats.recv_fragmented++; 1164 tipc_data_input(l, skb, inputq); 1165 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { 1166 pr_warn_ratelimited("Unable to build fragment list\n"); 1167 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1168 } 1169 return 0; 1170 } else if (usr == BCAST_PROTOCOL) { 1171 tipc_bcast_lock(l->net); 1172 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); 1173 tipc_bcast_unlock(l->net); 1174 } 1175 drop: 1176 kfree_skb(skb); 1177 return 0; 1178 } 1179 1180 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1181 { 1182 bool released = false; 1183 struct sk_buff *skb, *tmp; 1184 1185 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1186 if (more(buf_seqno(skb), acked)) 1187 break; 1188 __skb_unlink(skb, &l->transmq); 1189 kfree_skb(skb); 1190 released = true; 1191 } 1192 return released; 1193 } 1194 1195 /* tipc_link_build_state_msg: prepare link state message for transmission 1196 * 1197 * Note that sending of broadcast ack is coordinated among nodes, to reduce 1198 * risk of ack storms towards the sender 1199 */ 1200 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1201 { 1202 if (!l) 1203 return 0; 1204 1205 /* Broadcast ACK must be sent via a unicast link => defer to caller */ 1206 if (link_is_bc_rcvlink(l)) { 1207 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) 1208 return 0; 1209 l->rcv_unacked = 0; 1210 1211 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */ 1212 l->snd_nxt = l->rcv_nxt; 1213 return TIPC_LINK_SND_STATE; 1214 } 1215 1216 /* Unicast ACK */ 1217 l->rcv_unacked = 0; 1218 l->stats.sent_acks++; 1219 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); 1220 return 0; 1221 } 1222 1223 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message 1224 */ 1225 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1226 { 1227 int mtyp = RESET_MSG; 1228 struct sk_buff *skb; 1229 1230 if (l->state == LINK_ESTABLISHING) 1231 mtyp = ACTIVATE_MSG; 1232 1233 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq); 1234 1235 /* Inform peer that this endpoint is going down if applicable */ 1236 skb = skb_peek_tail(xmitq); 1237 if (skb && (l->state == LINK_RESET)) 1238 msg_set_peer_stopping(buf_msg(skb), 1); 1239 } 1240 1241 /* tipc_link_build_nack_msg: prepare link nack message for transmission 1242 * Note that sending of broadcast NACK is coordinated among nodes, to 1243 * reduce the risk of NACK storms towards the sender 1244 */ 1245 static int tipc_link_build_nack_msg(struct tipc_link *l, 1246 struct sk_buff_head *xmitq) 1247 { 1248 u32 def_cnt = ++l->stats.deferred_recv; 1249 int match1, match2; 1250 1251 if (link_is_bc_rcvlink(l)) { 1252 match1 = def_cnt & 0xf; 1253 match2 = tipc_own_addr(l->net) & 0xf; 1254 if (match1 == match2) 1255 return TIPC_LINK_SND_STATE; 1256 return 0; 1257 } 1258 1259 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) 1260 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); 1261 return 0; 1262 } 1263 1264 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1265 * @l: the link that should handle the message 1266 * @skb: TIPC packet 1267 * @xmitq: queue to place packets to be sent after this call 1268 */ 1269 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1270 struct sk_buff_head *xmitq) 1271 { 1272 struct sk_buff_head *defq = &l->deferdq; 1273 struct tipc_msg *hdr; 1274 u16 seqno, rcv_nxt, win_lim; 1275 int rc = 0; 1276 1277 do { 1278 hdr = buf_msg(skb); 1279 seqno = msg_seqno(hdr); 1280 rcv_nxt = l->rcv_nxt; 1281 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; 1282 1283 /* Verify and update link state */ 1284 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) 1285 return tipc_link_proto_rcv(l, skb, xmitq); 1286 1287 if (unlikely(!link_is_up(l))) { 1288 if (l->state == LINK_ESTABLISHING) 1289 rc = TIPC_LINK_UP_EVT; 1290 goto drop; 1291 } 1292 1293 /* Don't send probe at next timeout expiration */ 1294 l->silent_intv_cnt = 0; 1295 1296 /* Drop if outside receive window */ 1297 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { 1298 l->stats.duplicates++; 1299 goto drop; 1300 } 1301 1302 /* Forward queues and wake up waiting users */ 1303 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1304 l->stale_cnt = 0; 1305 tipc_link_advance_backlog(l, xmitq); 1306 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1307 link_prepare_wakeup(l); 1308 } 1309 1310 /* Defer delivery if sequence gap */ 1311 if (unlikely(seqno != rcv_nxt)) { 1312 __tipc_skb_queue_sorted(defq, seqno, skb); 1313 rc |= tipc_link_build_nack_msg(l, xmitq); 1314 break; 1315 } 1316 1317 /* Deliver packet */ 1318 l->rcv_nxt++; 1319 l->stats.recv_pkts++; 1320 if (!tipc_data_input(l, skb, l->inputq)) 1321 rc |= tipc_link_input(l, skb, l->inputq); 1322 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1323 rc |= tipc_link_build_state_msg(l, xmitq); 1324 if (unlikely(rc & ~TIPC_LINK_SND_STATE)) 1325 break; 1326 } while ((skb = __skb_dequeue(defq))); 1327 1328 return rc; 1329 drop: 1330 kfree_skb(skb); 1331 return rc; 1332 } 1333 1334 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1335 bool probe_reply, u16 rcvgap, 1336 int tolerance, int priority, 1337 struct sk_buff_head *xmitq) 1338 { 1339 struct tipc_link *bcl = l->bc_rcvlink; 1340 struct sk_buff *skb; 1341 struct tipc_msg *hdr; 1342 struct sk_buff_head *dfq = &l->deferdq; 1343 bool node_up = link_is_up(bcl); 1344 struct tipc_mon_state *mstate = &l->mon_state; 1345 int dlen = 0; 1346 void *data; 1347 1348 /* Don't send protocol message during reset or link failover */ 1349 if (tipc_link_is_blocked(l)) 1350 return; 1351 1352 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) 1353 return; 1354 1355 if (!skb_queue_empty(dfq)) 1356 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; 1357 1358 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, 1359 tipc_max_domain_size, l->addr, 1360 tipc_own_addr(l->net), 0, 0, 0); 1361 if (!skb) 1362 return; 1363 1364 hdr = buf_msg(skb); 1365 data = msg_data(hdr); 1366 msg_set_session(hdr, l->session); 1367 msg_set_bearer_id(hdr, l->bearer_id); 1368 msg_set_net_plane(hdr, l->net_plane); 1369 msg_set_next_sent(hdr, l->snd_nxt); 1370 msg_set_ack(hdr, l->rcv_nxt - 1); 1371 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); 1372 msg_set_bc_ack_invalid(hdr, !node_up); 1373 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1374 msg_set_link_tolerance(hdr, tolerance); 1375 msg_set_linkprio(hdr, priority); 1376 msg_set_redundant_link(hdr, node_up); 1377 msg_set_seq_gap(hdr, 0); 1378 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); 1379 1380 if (mtyp == STATE_MSG) { 1381 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) 1382 msg_set_seqno(hdr, l->snd_nxt_state++); 1383 msg_set_seq_gap(hdr, rcvgap); 1384 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); 1385 msg_set_probe(hdr, probe); 1386 msg_set_is_keepalive(hdr, probe || probe_reply); 1387 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); 1388 msg_set_size(hdr, INT_H_SIZE + dlen); 1389 skb_trim(skb, INT_H_SIZE + dlen); 1390 l->stats.sent_states++; 1391 l->rcv_unacked = 0; 1392 } else { 1393 /* RESET_MSG or ACTIVATE_MSG */ 1394 msg_set_max_pkt(hdr, l->advertised_mtu); 1395 strcpy(data, l->if_name); 1396 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1397 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); 1398 } 1399 if (probe) 1400 l->stats.sent_probes++; 1401 if (rcvgap) 1402 l->stats.sent_nacks++; 1403 skb->priority = TC_PRIO_CONTROL; 1404 __skb_queue_tail(xmitq, skb); 1405 } 1406 1407 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, 1408 struct sk_buff_head *xmitq) 1409 { 1410 u32 onode = tipc_own_addr(l->net); 1411 struct tipc_msg *hdr, *ihdr; 1412 struct sk_buff_head tnlq; 1413 struct sk_buff *skb; 1414 u32 dnode = l->addr; 1415 1416 skb_queue_head_init(&tnlq); 1417 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG, 1418 INT_H_SIZE, BASIC_H_SIZE, 1419 dnode, onode, 0, 0, 0); 1420 if (!skb) { 1421 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1422 return; 1423 } 1424 1425 hdr = buf_msg(skb); 1426 msg_set_msgcnt(hdr, 1); 1427 msg_set_bearer_id(hdr, l->peer_bearer_id); 1428 1429 ihdr = (struct tipc_msg *)msg_data(hdr); 1430 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1431 BASIC_H_SIZE, dnode); 1432 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT); 1433 __skb_queue_tail(&tnlq, skb); 1434 tipc_link_xmit(l, &tnlq, xmitq); 1435 } 1436 1437 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1438 * with contents of the link's transmit and backlog queues. 1439 */ 1440 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1441 int mtyp, struct sk_buff_head *xmitq) 1442 { 1443 struct sk_buff *skb, *tnlskb; 1444 struct tipc_msg *hdr, tnlhdr; 1445 struct sk_buff_head *queue = &l->transmq; 1446 struct sk_buff_head tmpxq, tnlq; 1447 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1448 1449 if (!tnl) 1450 return; 1451 1452 skb_queue_head_init(&tnlq); 1453 skb_queue_head_init(&tmpxq); 1454 1455 /* At least one packet required for safe algorithm => add dummy */ 1456 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1457 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 1458 0, 0, TIPC_ERR_NO_PORT); 1459 if (!skb) { 1460 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1461 return; 1462 } 1463 skb_queue_tail(&tnlq, skb); 1464 tipc_link_xmit(l, &tnlq, &tmpxq); 1465 __skb_queue_purge(&tmpxq); 1466 1467 /* Initialize reusable tunnel packet header */ 1468 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, 1469 mtyp, INT_H_SIZE, l->addr); 1470 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1471 msg_set_msgcnt(&tnlhdr, pktcnt); 1472 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1473 tnl: 1474 /* Wrap each packet into a tunnel packet */ 1475 skb_queue_walk(queue, skb) { 1476 hdr = buf_msg(skb); 1477 if (queue == &l->backlogq) 1478 msg_set_seqno(hdr, seqno++); 1479 pktlen = msg_size(hdr); 1480 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1481 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); 1482 if (!tnlskb) { 1483 pr_warn("%sunable to send packet\n", link_co_err); 1484 return; 1485 } 1486 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1487 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1488 __skb_queue_tail(&tnlq, tnlskb); 1489 } 1490 if (queue != &l->backlogq) { 1491 queue = &l->backlogq; 1492 goto tnl; 1493 } 1494 1495 tipc_link_xmit(tnl, &tnlq, xmitq); 1496 1497 if (mtyp == FAILOVER_MSG) { 1498 tnl->drop_point = l->rcv_nxt; 1499 tnl->failover_reasm_skb = l->reasm_buf; 1500 l->reasm_buf = NULL; 1501 } 1502 } 1503 1504 /* tipc_link_validate_msg(): validate message against current link state 1505 * Returns true if message should be accepted, otherwise false 1506 */ 1507 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) 1508 { 1509 u16 curr_session = l->peer_session; 1510 u16 session = msg_session(hdr); 1511 int mtyp = msg_type(hdr); 1512 1513 if (msg_user(hdr) != LINK_PROTOCOL) 1514 return true; 1515 1516 switch (mtyp) { 1517 case RESET_MSG: 1518 if (!l->in_session) 1519 return true; 1520 /* Accept only RESET with new session number */ 1521 return more(session, curr_session); 1522 case ACTIVATE_MSG: 1523 if (!l->in_session) 1524 return true; 1525 /* Accept only ACTIVATE with new or current session number */ 1526 return !less(session, curr_session); 1527 case STATE_MSG: 1528 /* Accept only STATE with current session number */ 1529 if (!l->in_session) 1530 return false; 1531 if (session != curr_session) 1532 return false; 1533 /* Extra sanity check */ 1534 if (!link_is_up(l) && msg_ack(hdr)) 1535 return false; 1536 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) 1537 return true; 1538 /* Accept only STATE with new sequence number */ 1539 return !less(msg_seqno(hdr), l->rcv_nxt_state); 1540 default: 1541 return false; 1542 } 1543 } 1544 1545 /* tipc_link_proto_rcv(): receive link level protocol message : 1546 * Note that network plane id propagates through the network, and may 1547 * change at any time. The node with lowest numerical id determines 1548 * network plane 1549 */ 1550 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1551 struct sk_buff_head *xmitq) 1552 { 1553 struct tipc_msg *hdr = buf_msg(skb); 1554 u16 rcvgap = 0; 1555 u16 ack = msg_ack(hdr); 1556 u16 gap = msg_seq_gap(hdr); 1557 u16 peers_snd_nxt = msg_next_sent(hdr); 1558 u16 peers_tol = msg_link_tolerance(hdr); 1559 u16 peers_prio = msg_linkprio(hdr); 1560 u16 rcv_nxt = l->rcv_nxt; 1561 u16 dlen = msg_data_sz(hdr); 1562 int mtyp = msg_type(hdr); 1563 bool reply = msg_probe(hdr); 1564 void *data; 1565 char *if_name; 1566 int rc = 0; 1567 1568 if (tipc_link_is_blocked(l) || !xmitq) 1569 goto exit; 1570 1571 if (tipc_own_addr(l->net) > msg_prevnode(hdr)) 1572 l->net_plane = msg_net_plane(hdr); 1573 1574 skb_linearize(skb); 1575 hdr = buf_msg(skb); 1576 data = msg_data(hdr); 1577 1578 if (!tipc_link_validate_msg(l, hdr)) 1579 goto exit; 1580 1581 switch (mtyp) { 1582 case RESET_MSG: 1583 case ACTIVATE_MSG: 1584 /* Complete own link name with peer's interface name */ 1585 if_name = strrchr(l->name, ':') + 1; 1586 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1587 break; 1588 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1589 break; 1590 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1591 1592 /* Update own tolerance if peer indicates a non-zero value */ 1593 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1594 l->tolerance = peers_tol; 1595 l->bc_rcvlink->tolerance = peers_tol; 1596 } 1597 /* Update own priority if peer's priority is higher */ 1598 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1599 l->priority = peers_prio; 1600 1601 /* If peer is going down we want full re-establish cycle */ 1602 if (msg_peer_stopping(hdr)) { 1603 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1604 break; 1605 } 1606 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1607 if (mtyp == RESET_MSG || !link_is_up(l)) 1608 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1609 1610 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1611 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING) 1612 rc = TIPC_LINK_UP_EVT; 1613 1614 l->peer_session = msg_session(hdr); 1615 l->in_session = true; 1616 l->peer_bearer_id = msg_bearer_id(hdr); 1617 if (l->mtu > msg_max_pkt(hdr)) 1618 l->mtu = msg_max_pkt(hdr); 1619 break; 1620 1621 case STATE_MSG: 1622 l->rcv_nxt_state = msg_seqno(hdr) + 1; 1623 1624 /* Update own tolerance if peer indicates a non-zero value */ 1625 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1626 l->tolerance = peers_tol; 1627 l->bc_rcvlink->tolerance = peers_tol; 1628 } 1629 /* Update own prio if peer indicates a different value */ 1630 if ((peers_prio != l->priority) && 1631 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1632 l->priority = peers_prio; 1633 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1634 } 1635 1636 l->silent_intv_cnt = 0; 1637 l->stats.recv_states++; 1638 if (msg_probe(hdr)) 1639 l->stats.recv_probes++; 1640 1641 if (!link_is_up(l)) { 1642 if (l->state == LINK_ESTABLISHING) 1643 rc = TIPC_LINK_UP_EVT; 1644 break; 1645 } 1646 tipc_mon_rcv(l->net, data, dlen, l->addr, 1647 &l->mon_state, l->bearer_id); 1648 1649 /* Send NACK if peer has sent pkts we haven't received yet */ 1650 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1651 rcvgap = peers_snd_nxt - l->rcv_nxt; 1652 if (rcvgap || reply) 1653 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply, 1654 rcvgap, 0, 0, xmitq); 1655 tipc_link_release_pkts(l, ack); 1656 1657 /* If NACK, retransmit will now start at right position */ 1658 if (gap) { 1659 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq); 1660 l->stats.recv_nacks++; 1661 } 1662 1663 tipc_link_advance_backlog(l, xmitq); 1664 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1665 link_prepare_wakeup(l); 1666 } 1667 exit: 1668 kfree_skb(skb); 1669 return rc; 1670 } 1671 1672 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message 1673 */ 1674 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, 1675 u16 peers_snd_nxt, 1676 struct sk_buff_head *xmitq) 1677 { 1678 struct sk_buff *skb; 1679 struct tipc_msg *hdr; 1680 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); 1681 u16 ack = l->rcv_nxt - 1; 1682 u16 gap_to = peers_snd_nxt - 1; 1683 1684 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 1685 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); 1686 if (!skb) 1687 return false; 1688 hdr = buf_msg(skb); 1689 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1690 msg_set_bcast_ack(hdr, ack); 1691 msg_set_bcgap_after(hdr, ack); 1692 if (dfrd_skb) 1693 gap_to = buf_seqno(dfrd_skb) - 1; 1694 msg_set_bcgap_to(hdr, gap_to); 1695 msg_set_non_seq(hdr, bcast); 1696 __skb_queue_tail(xmitq, skb); 1697 return true; 1698 } 1699 1700 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. 1701 * 1702 * Give a newly added peer node the sequence number where it should 1703 * start receiving and acking broadcast packets. 1704 */ 1705 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 1706 struct sk_buff_head *xmitq) 1707 { 1708 struct sk_buff_head list; 1709 1710 __skb_queue_head_init(&list); 1711 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1712 return; 1713 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true); 1714 tipc_link_xmit(l, &list, xmitq); 1715 } 1716 1717 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer 1718 */ 1719 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) 1720 { 1721 int mtyp = msg_type(hdr); 1722 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1723 1724 if (link_is_up(l)) 1725 return; 1726 1727 if (msg_user(hdr) == BCAST_PROTOCOL) { 1728 l->rcv_nxt = peers_snd_nxt; 1729 l->state = LINK_ESTABLISHED; 1730 return; 1731 } 1732 1733 if (l->peer_caps & TIPC_BCAST_SYNCH) 1734 return; 1735 1736 if (msg_peer_node_is_up(hdr)) 1737 return; 1738 1739 /* Compatibility: accept older, less safe initial synch data */ 1740 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) 1741 l->rcv_nxt = peers_snd_nxt; 1742 } 1743 1744 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 1745 */ 1746 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 1747 struct sk_buff_head *xmitq) 1748 { 1749 struct tipc_link *snd_l = l->bc_sndlink; 1750 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1751 u16 from = msg_bcast_ack(hdr) + 1; 1752 u16 to = from + msg_bc_gap(hdr) - 1; 1753 int rc = 0; 1754 1755 if (!link_is_up(l)) 1756 return rc; 1757 1758 if (!msg_peer_node_is_up(hdr)) 1759 return rc; 1760 1761 /* Open when peer ackowledges our bcast init msg (pkt #1) */ 1762 if (msg_ack(hdr)) 1763 l->bc_peer_is_up = true; 1764 1765 if (!l->bc_peer_is_up) 1766 return rc; 1767 1768 l->stats.recv_nacks++; 1769 1770 /* Ignore if peers_snd_nxt goes beyond receive window */ 1771 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1772 return rc; 1773 1774 rc = tipc_link_retrans(snd_l, l, from, to, xmitq); 1775 1776 l->snd_nxt = peers_snd_nxt; 1777 if (link_bc_rcv_gap(l)) 1778 rc |= TIPC_LINK_SND_STATE; 1779 1780 /* Return now if sender supports nack via STATE messages */ 1781 if (l->peer_caps & TIPC_BCAST_STATE_NACK) 1782 return rc; 1783 1784 /* Otherwise, be backwards compatible */ 1785 1786 if (!more(peers_snd_nxt, l->rcv_nxt)) { 1787 l->nack_state = BC_NACK_SND_CONDITIONAL; 1788 return 0; 1789 } 1790 1791 /* Don't NACK if one was recently sent or peeked */ 1792 if (l->nack_state == BC_NACK_SND_SUPPRESS) { 1793 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1794 return 0; 1795 } 1796 1797 /* Conditionally delay NACK sending until next synch rcv */ 1798 if (l->nack_state == BC_NACK_SND_CONDITIONAL) { 1799 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1800 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) 1801 return 0; 1802 } 1803 1804 /* Send NACK now but suppress next one */ 1805 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); 1806 l->nack_state = BC_NACK_SND_SUPPRESS; 1807 return 0; 1808 } 1809 1810 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, 1811 struct sk_buff_head *xmitq) 1812 { 1813 struct sk_buff *skb, *tmp; 1814 struct tipc_link *snd_l = l->bc_sndlink; 1815 1816 if (!link_is_up(l) || !l->bc_peer_is_up) 1817 return; 1818 1819 if (!more(acked, l->acked)) 1820 return; 1821 1822 /* Skip over packets peer has already acked */ 1823 skb_queue_walk(&snd_l->transmq, skb) { 1824 if (more(buf_seqno(skb), l->acked)) 1825 break; 1826 } 1827 1828 /* Update/release the packets peer is acking now */ 1829 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { 1830 if (more(buf_seqno(skb), acked)) 1831 break; 1832 if (!--TIPC_SKB_CB(skb)->ackers) { 1833 __skb_unlink(skb, &snd_l->transmq); 1834 kfree_skb(skb); 1835 } 1836 } 1837 l->acked = acked; 1838 tipc_link_advance_backlog(snd_l, xmitq); 1839 if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) 1840 link_prepare_wakeup(snd_l); 1841 } 1842 1843 /* tipc_link_bc_nack_rcv(): receive broadcast nack message 1844 * This function is here for backwards compatibility, since 1845 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5. 1846 */ 1847 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 1848 struct sk_buff_head *xmitq) 1849 { 1850 struct tipc_msg *hdr = buf_msg(skb); 1851 u32 dnode = msg_destnode(hdr); 1852 int mtyp = msg_type(hdr); 1853 u16 acked = msg_bcast_ack(hdr); 1854 u16 from = acked + 1; 1855 u16 to = msg_bcgap_to(hdr); 1856 u16 peers_snd_nxt = to + 1; 1857 int rc = 0; 1858 1859 kfree_skb(skb); 1860 1861 if (!tipc_link_is_up(l) || !l->bc_peer_is_up) 1862 return 0; 1863 1864 if (mtyp != STATE_MSG) 1865 return 0; 1866 1867 if (dnode == tipc_own_addr(l->net)) { 1868 tipc_link_bc_ack_rcv(l, acked, xmitq); 1869 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq); 1870 l->stats.recv_nacks++; 1871 return rc; 1872 } 1873 1874 /* Msg for other node => suppress own NACK at next sync if applicable */ 1875 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) 1876 l->nack_state = BC_NACK_SND_SUPPRESS; 1877 1878 return 0; 1879 } 1880 1881 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1882 { 1883 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE); 1884 1885 l->window = win; 1886 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); 1887 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); 1888 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); 1889 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); 1890 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1891 } 1892 1893 /** 1894 * link_reset_stats - reset link statistics 1895 * @l: pointer to link 1896 */ 1897 void tipc_link_reset_stats(struct tipc_link *l) 1898 { 1899 memset(&l->stats, 0, sizeof(l->stats)); 1900 } 1901 1902 static void link_print(struct tipc_link *l, const char *str) 1903 { 1904 struct sk_buff *hskb = skb_peek(&l->transmq); 1905 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; 1906 u16 tail = l->snd_nxt - 1; 1907 1908 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1909 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1910 skb_queue_len(&l->transmq), head, tail, 1911 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1912 } 1913 1914 /* Parse and validate nested (link) properties valid for media, bearer and link 1915 */ 1916 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1917 { 1918 int err; 1919 1920 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1921 tipc_nl_prop_policy, NULL); 1922 if (err) 1923 return err; 1924 1925 if (props[TIPC_NLA_PROP_PRIO]) { 1926 u32 prio; 1927 1928 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1929 if (prio > TIPC_MAX_LINK_PRI) 1930 return -EINVAL; 1931 } 1932 1933 if (props[TIPC_NLA_PROP_TOL]) { 1934 u32 tol; 1935 1936 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1937 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1938 return -EINVAL; 1939 } 1940 1941 if (props[TIPC_NLA_PROP_WIN]) { 1942 u32 win; 1943 1944 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1945 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1946 return -EINVAL; 1947 } 1948 1949 return 0; 1950 } 1951 1952 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1953 { 1954 int i; 1955 struct nlattr *stats; 1956 1957 struct nla_map { 1958 u32 key; 1959 u32 val; 1960 }; 1961 1962 struct nla_map map[] = { 1963 {TIPC_NLA_STATS_RX_INFO, 0}, 1964 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1965 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1966 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1967 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1968 {TIPC_NLA_STATS_TX_INFO, 0}, 1969 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1970 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1971 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1972 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1973 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1974 s->msg_length_counts : 1}, 1975 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1976 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1977 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1978 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1979 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1980 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1981 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1982 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1983 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1984 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1985 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1986 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1987 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1988 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1989 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1990 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1991 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1992 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1993 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1994 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1995 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1996 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1997 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1998 }; 1999 2000 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 2001 if (!stats) 2002 return -EMSGSIZE; 2003 2004 for (i = 0; i < ARRAY_SIZE(map); i++) 2005 if (nla_put_u32(skb, map[i].key, map[i].val)) 2006 goto msg_full; 2007 2008 nla_nest_end(skb, stats); 2009 2010 return 0; 2011 msg_full: 2012 nla_nest_cancel(skb, stats); 2013 2014 return -EMSGSIZE; 2015 } 2016 2017 /* Caller should hold appropriate locks to protect the link */ 2018 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 2019 struct tipc_link *link, int nlflags) 2020 { 2021 u32 self = tipc_own_addr(net); 2022 struct nlattr *attrs; 2023 struct nlattr *prop; 2024 void *hdr; 2025 int err; 2026 2027 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2028 nlflags, TIPC_NL_LINK_GET); 2029 if (!hdr) 2030 return -EMSGSIZE; 2031 2032 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 2033 if (!attrs) 2034 goto msg_full; 2035 2036 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 2037 goto attr_msg_full; 2038 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self))) 2039 goto attr_msg_full; 2040 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 2041 goto attr_msg_full; 2042 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) 2043 goto attr_msg_full; 2044 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) 2045 goto attr_msg_full; 2046 2047 if (tipc_link_is_up(link)) 2048 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 2049 goto attr_msg_full; 2050 if (link->active) 2051 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 2052 goto attr_msg_full; 2053 2054 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 2055 if (!prop) 2056 goto attr_msg_full; 2057 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 2058 goto prop_msg_full; 2059 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 2060 goto prop_msg_full; 2061 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 2062 link->window)) 2063 goto prop_msg_full; 2064 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 2065 goto prop_msg_full; 2066 nla_nest_end(msg->skb, prop); 2067 2068 err = __tipc_nl_add_stats(msg->skb, &link->stats); 2069 if (err) 2070 goto attr_msg_full; 2071 2072 nla_nest_end(msg->skb, attrs); 2073 genlmsg_end(msg->skb, hdr); 2074 2075 return 0; 2076 2077 prop_msg_full: 2078 nla_nest_cancel(msg->skb, prop); 2079 attr_msg_full: 2080 nla_nest_cancel(msg->skb, attrs); 2081 msg_full: 2082 genlmsg_cancel(msg->skb, hdr); 2083 2084 return -EMSGSIZE; 2085 } 2086 2087 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 2088 struct tipc_stats *stats) 2089 { 2090 int i; 2091 struct nlattr *nest; 2092 2093 struct nla_map { 2094 __u32 key; 2095 __u32 val; 2096 }; 2097 2098 struct nla_map map[] = { 2099 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, 2100 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2101 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2102 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2103 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2104 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, 2105 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2106 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2107 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 2108 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 2109 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 2110 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 2111 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 2112 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 2113 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 2114 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 2115 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 2116 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 2117 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 2118 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 2119 }; 2120 2121 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 2122 if (!nest) 2123 return -EMSGSIZE; 2124 2125 for (i = 0; i < ARRAY_SIZE(map); i++) 2126 if (nla_put_u32(skb, map[i].key, map[i].val)) 2127 goto msg_full; 2128 2129 nla_nest_end(skb, nest); 2130 2131 return 0; 2132 msg_full: 2133 nla_nest_cancel(skb, nest); 2134 2135 return -EMSGSIZE; 2136 } 2137 2138 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 2139 { 2140 int err; 2141 void *hdr; 2142 struct nlattr *attrs; 2143 struct nlattr *prop; 2144 struct tipc_net *tn = net_generic(net, tipc_net_id); 2145 struct tipc_link *bcl = tn->bcl; 2146 2147 if (!bcl) 2148 return 0; 2149 2150 tipc_bcast_lock(net); 2151 2152 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2153 NLM_F_MULTI, TIPC_NL_LINK_GET); 2154 if (!hdr) { 2155 tipc_bcast_unlock(net); 2156 return -EMSGSIZE; 2157 } 2158 2159 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 2160 if (!attrs) 2161 goto msg_full; 2162 2163 /* The broadcast link is always up */ 2164 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 2165 goto attr_msg_full; 2166 2167 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 2168 goto attr_msg_full; 2169 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2170 goto attr_msg_full; 2171 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) 2172 goto attr_msg_full; 2173 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) 2174 goto attr_msg_full; 2175 2176 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 2177 if (!prop) 2178 goto attr_msg_full; 2179 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 2180 goto prop_msg_full; 2181 nla_nest_end(msg->skb, prop); 2182 2183 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 2184 if (err) 2185 goto attr_msg_full; 2186 2187 tipc_bcast_unlock(net); 2188 nla_nest_end(msg->skb, attrs); 2189 genlmsg_end(msg->skb, hdr); 2190 2191 return 0; 2192 2193 prop_msg_full: 2194 nla_nest_cancel(msg->skb, prop); 2195 attr_msg_full: 2196 nla_nest_cancel(msg->skb, attrs); 2197 msg_full: 2198 tipc_bcast_unlock(net); 2199 genlmsg_cancel(msg->skb, hdr); 2200 2201 return -EMSGSIZE; 2202 } 2203 2204 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, 2205 struct sk_buff_head *xmitq) 2206 { 2207 l->tolerance = tol; 2208 if (l->bc_rcvlink) 2209 l->bc_rcvlink->tolerance = tol; 2210 if (link_is_up(l)) 2211 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); 2212 } 2213 2214 void tipc_link_set_prio(struct tipc_link *l, u32 prio, 2215 struct sk_buff_head *xmitq) 2216 { 2217 l->priority = prio; 2218 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq); 2219 } 2220 2221 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) 2222 { 2223 l->abort_limit = limit; 2224 } 2225