1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 46 #include <linux/pkt_sched.h> 47 48 struct tipc_stats { 49 u32 sent_info; /* used in counting # sent packets */ 50 u32 recv_info; /* used in counting # recv'd packets */ 51 u32 sent_states; 52 u32 recv_states; 53 u32 sent_probes; 54 u32 recv_probes; 55 u32 sent_nacks; 56 u32 recv_nacks; 57 u32 sent_acks; 58 u32 sent_bundled; 59 u32 sent_bundles; 60 u32 recv_bundled; 61 u32 recv_bundles; 62 u32 retransmitted; 63 u32 sent_fragmented; 64 u32 sent_fragments; 65 u32 recv_fragmented; 66 u32 recv_fragments; 67 u32 link_congs; /* # port sends blocked by congestion */ 68 u32 deferred_recv; 69 u32 duplicates; 70 u32 max_queue_sz; /* send queue size high water mark */ 71 u32 accu_queue_sz; /* used for send queue size profiling */ 72 u32 queue_sz_counts; /* used for send queue size profiling */ 73 u32 msg_length_counts; /* used for message length profiling */ 74 u32 msg_lengths_total; /* used for message length profiling */ 75 u32 msg_length_profile[7]; /* used for msg. length profiling */ 76 }; 77 78 /** 79 * struct tipc_link - TIPC link data structure 80 * @addr: network address of link's peer node 81 * @name: link name character string 82 * @media_addr: media address to use when sending messages over link 83 * @timer: link timer 84 * @net: pointer to namespace struct 85 * @refcnt: reference counter for permanent references (owner node & timer) 86 * @peer_session: link session # being used by peer end of link 87 * @peer_bearer_id: bearer id used by link's peer endpoint 88 * @bearer_id: local bearer id used by link 89 * @tolerance: minimum link continuity loss needed to reset link [in ms] 90 * @keepalive_intv: link keepalive timer interval 91 * @abort_limit: # of unacknowledged continuity probes needed to reset link 92 * @state: current state of link FSM 93 * @peer_caps: bitmap describing capabilities of peer node 94 * @silent_intv_cnt: # of timer intervals without any reception from peer 95 * @proto_msg: template for control messages generated by link 96 * @pmsg: convenience pointer to "proto_msg" field 97 * @priority: current link priority 98 * @net_plane: current link network plane ('A' through 'H') 99 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 100 * @exp_msg_count: # of tunnelled messages expected during link changeover 101 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 102 * @mtu: current maximum packet size for this link 103 * @advertised_mtu: advertised own mtu when link is being established 104 * @transmitq: queue for sent, non-acked messages 105 * @backlogq: queue for messages waiting to be sent 106 * @snt_nxt: next sequence number to use for outbound messages 107 * @last_retransmitted: sequence number of most recently retransmitted message 108 * @stale_count: # of identical retransmit requests made by peer 109 * @ackers: # of peers that needs to ack each packet before it can be released 110 * @acked: # last packet acked by a certain peer. Used for broadcast. 111 * @rcv_nxt: next sequence number to expect for inbound messages 112 * @deferred_queue: deferred queue saved OOS b'cast message received from node 113 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 114 * @inputq: buffer queue for messages to be delivered upwards 115 * @namedq: buffer queue for name table messages to be delivered upwards 116 * @next_out: ptr to first unsent outbound message in queue 117 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate 118 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 119 * @reasm_buf: head of partially reassembled inbound message fragments 120 * @bc_rcvr: marks that this is a broadcast receiver link 121 * @stats: collects statistics regarding link activity 122 */ 123 struct tipc_link { 124 u32 addr; 125 char name[TIPC_MAX_LINK_NAME]; 126 struct tipc_media_addr *media_addr; 127 struct net *net; 128 129 /* Management and link supervision data */ 130 u32 peer_session; 131 u32 peer_bearer_id; 132 u32 bearer_id; 133 u32 tolerance; 134 unsigned long keepalive_intv; 135 u32 abort_limit; 136 u32 state; 137 u16 peer_caps; 138 bool active; 139 u32 silent_intv_cnt; 140 struct { 141 unchar hdr[INT_H_SIZE]; 142 unchar body[TIPC_MAX_IF_NAME]; 143 } proto_msg; 144 struct tipc_msg *pmsg; 145 u32 priority; 146 char net_plane; 147 148 /* Failover/synch */ 149 u16 drop_point; 150 struct sk_buff *failover_reasm_skb; 151 152 /* Max packet negotiation */ 153 u16 mtu; 154 u16 advertised_mtu; 155 156 /* Sending */ 157 struct sk_buff_head transmq; 158 struct sk_buff_head backlogq; 159 struct { 160 u16 len; 161 u16 limit; 162 } backlog[5]; 163 u16 snd_nxt; 164 u16 last_retransm; 165 u16 window; 166 u32 stale_count; 167 168 /* Reception */ 169 u16 rcv_nxt; 170 u32 rcv_unacked; 171 struct sk_buff_head deferdq; 172 struct sk_buff_head *inputq; 173 struct sk_buff_head *namedq; 174 175 /* Congestion handling */ 176 struct sk_buff_head wakeupq; 177 178 /* Fragmentation/reassembly */ 179 struct sk_buff *reasm_buf; 180 181 /* Broadcast */ 182 u16 ackers; 183 u16 acked; 184 struct tipc_link *bc_rcvlink; 185 struct tipc_link *bc_sndlink; 186 int nack_state; 187 bool bc_peer_is_up; 188 189 /* Statistics */ 190 struct tipc_stats stats; 191 }; 192 193 /* 194 * Error message prefixes 195 */ 196 static const char *link_co_err = "Link tunneling error, "; 197 static const char *link_rst_msg = "Resetting link "; 198 199 /* Properties valid for media, bearar and link */ 200 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { 201 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, 202 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, 203 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, 204 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } 205 }; 206 207 /* Send states for broadcast NACKs 208 */ 209 enum { 210 BC_NACK_SND_CONDITIONAL, 211 BC_NACK_SND_UNCONDITIONAL, 212 BC_NACK_SND_SUPPRESS, 213 }; 214 215 /* 216 * Interval between NACKs when packets arrive out of order 217 */ 218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 219 /* 220 * Out-of-range value for link session numbers 221 */ 222 #define WILDCARD_SESSION 0x10000 223 224 /* Link FSM states: 225 */ 226 enum { 227 LINK_ESTABLISHED = 0xe, 228 LINK_ESTABLISHING = 0xe << 4, 229 LINK_RESET = 0x1 << 8, 230 LINK_RESETTING = 0x2 << 12, 231 LINK_PEER_RESET = 0xd << 16, 232 LINK_FAILINGOVER = 0xf << 20, 233 LINK_SYNCHING = 0xc << 24 234 }; 235 236 /* Link FSM state checking routines 237 */ 238 static int link_is_up(struct tipc_link *l) 239 { 240 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 241 } 242 243 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 244 struct sk_buff_head *xmitq); 245 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 246 u16 rcvgap, int tolerance, int priority, 247 struct sk_buff_head *xmitq); 248 static void link_print(struct tipc_link *l, const char *str); 249 static void tipc_link_build_nack_msg(struct tipc_link *l, 250 struct sk_buff_head *xmitq); 251 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 252 struct sk_buff_head *xmitq); 253 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); 254 255 /* 256 * Simple non-static link routines (i.e. referenced outside this file) 257 */ 258 bool tipc_link_is_up(struct tipc_link *l) 259 { 260 return link_is_up(l); 261 } 262 263 bool tipc_link_peer_is_down(struct tipc_link *l) 264 { 265 return l->state == LINK_PEER_RESET; 266 } 267 268 bool tipc_link_is_reset(struct tipc_link *l) 269 { 270 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 271 } 272 273 bool tipc_link_is_establishing(struct tipc_link *l) 274 { 275 return l->state == LINK_ESTABLISHING; 276 } 277 278 bool tipc_link_is_synching(struct tipc_link *l) 279 { 280 return l->state == LINK_SYNCHING; 281 } 282 283 bool tipc_link_is_failingover(struct tipc_link *l) 284 { 285 return l->state == LINK_FAILINGOVER; 286 } 287 288 bool tipc_link_is_blocked(struct tipc_link *l) 289 { 290 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 291 } 292 293 static bool link_is_bc_sndlink(struct tipc_link *l) 294 { 295 return !l->bc_sndlink; 296 } 297 298 static bool link_is_bc_rcvlink(struct tipc_link *l) 299 { 300 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); 301 } 302 303 int tipc_link_is_active(struct tipc_link *l) 304 { 305 return l->active; 306 } 307 308 void tipc_link_set_active(struct tipc_link *l, bool active) 309 { 310 l->active = active; 311 } 312 313 u32 tipc_link_id(struct tipc_link *l) 314 { 315 return l->peer_bearer_id << 16 | l->bearer_id; 316 } 317 318 int tipc_link_window(struct tipc_link *l) 319 { 320 return l->window; 321 } 322 323 int tipc_link_prio(struct tipc_link *l) 324 { 325 return l->priority; 326 } 327 328 unsigned long tipc_link_tolerance(struct tipc_link *l) 329 { 330 return l->tolerance; 331 } 332 333 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) 334 { 335 return l->inputq; 336 } 337 338 char tipc_link_plane(struct tipc_link *l) 339 { 340 return l->net_plane; 341 } 342 343 void tipc_link_add_bc_peer(struct tipc_link *snd_l, 344 struct tipc_link *uc_l, 345 struct sk_buff_head *xmitq) 346 { 347 struct tipc_link *rcv_l = uc_l->bc_rcvlink; 348 349 snd_l->ackers++; 350 rcv_l->acked = snd_l->snd_nxt - 1; 351 snd_l->state = LINK_ESTABLISHED; 352 tipc_link_build_bc_init_msg(uc_l, xmitq); 353 } 354 355 void tipc_link_remove_bc_peer(struct tipc_link *snd_l, 356 struct tipc_link *rcv_l, 357 struct sk_buff_head *xmitq) 358 { 359 u16 ack = snd_l->snd_nxt - 1; 360 361 snd_l->ackers--; 362 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 363 tipc_link_reset(rcv_l); 364 rcv_l->state = LINK_RESET; 365 if (!snd_l->ackers) { 366 tipc_link_reset(snd_l); 367 snd_l->state = LINK_RESET; 368 __skb_queue_purge(xmitq); 369 } 370 } 371 372 int tipc_link_bc_peers(struct tipc_link *l) 373 { 374 return l->ackers; 375 } 376 377 void tipc_link_set_mtu(struct tipc_link *l, int mtu) 378 { 379 l->mtu = mtu; 380 } 381 382 int tipc_link_mtu(struct tipc_link *l) 383 { 384 return l->mtu; 385 } 386 387 u16 tipc_link_rcv_nxt(struct tipc_link *l) 388 { 389 return l->rcv_nxt; 390 } 391 392 u16 tipc_link_acked(struct tipc_link *l) 393 { 394 return l->acked; 395 } 396 397 char *tipc_link_name(struct tipc_link *l) 398 { 399 return l->name; 400 } 401 402 static u32 link_own_addr(struct tipc_link *l) 403 { 404 return msg_prevnode(l->pmsg); 405 } 406 407 void tipc_link_reinit(struct tipc_link *l, u32 addr) 408 { 409 msg_set_prevnode(l->pmsg, addr); 410 } 411 412 /** 413 * tipc_link_create - create a new link 414 * @n: pointer to associated node 415 * @if_name: associated interface name 416 * @bearer_id: id (index) of associated bearer 417 * @tolerance: link tolerance to be used by link 418 * @net_plane: network plane (A,B,c..) this link belongs to 419 * @mtu: mtu to be advertised by link 420 * @priority: priority to be used by link 421 * @window: send window to be used by link 422 * @session: session to be used by link 423 * @ownnode: identity of own node 424 * @peer: node id of peer node 425 * @peer_caps: bitmap describing peer node capabilities 426 * @bc_sndlink: the namespace global link used for broadcast sending 427 * @bc_rcvlink: the peer specific link used for broadcast reception 428 * @inputq: queue to put messages ready for delivery 429 * @namedq: queue to put binding table update messages ready for delivery 430 * @link: return value, pointer to put the created link 431 * 432 * Returns true if link was created, otherwise false 433 */ 434 bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 435 int tolerance, char net_plane, u32 mtu, int priority, 436 int window, u32 session, u32 ownnode, u32 peer, 437 u16 peer_caps, 438 struct tipc_link *bc_sndlink, 439 struct tipc_link *bc_rcvlink, 440 struct sk_buff_head *inputq, 441 struct sk_buff_head *namedq, 442 struct tipc_link **link) 443 { 444 struct tipc_link *l; 445 struct tipc_msg *hdr; 446 447 l = kzalloc(sizeof(*l), GFP_ATOMIC); 448 if (!l) 449 return false; 450 *link = l; 451 l->pmsg = (struct tipc_msg *)&l->proto_msg; 452 hdr = l->pmsg; 453 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer); 454 msg_set_size(hdr, sizeof(l->proto_msg)); 455 msg_set_session(hdr, session); 456 msg_set_bearer_id(hdr, l->bearer_id); 457 458 /* Note: peer i/f name is completed by reset/activate message */ 459 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 460 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), 461 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 462 strcpy((char *)msg_data(hdr), if_name); 463 464 l->addr = peer; 465 l->peer_caps = peer_caps; 466 l->net = net; 467 l->peer_session = WILDCARD_SESSION; 468 l->bearer_id = bearer_id; 469 l->tolerance = tolerance; 470 l->net_plane = net_plane; 471 l->advertised_mtu = mtu; 472 l->mtu = mtu; 473 l->priority = priority; 474 tipc_link_set_queue_limits(l, window); 475 l->ackers = 1; 476 l->bc_sndlink = bc_sndlink; 477 l->bc_rcvlink = bc_rcvlink; 478 l->inputq = inputq; 479 l->namedq = namedq; 480 l->state = LINK_RESETTING; 481 __skb_queue_head_init(&l->transmq); 482 __skb_queue_head_init(&l->backlogq); 483 __skb_queue_head_init(&l->deferdq); 484 skb_queue_head_init(&l->wakeupq); 485 skb_queue_head_init(l->inputq); 486 return true; 487 } 488 489 /** 490 * tipc_link_bc_create - create new link to be used for broadcast 491 * @n: pointer to associated node 492 * @mtu: mtu to be used 493 * @window: send window to be used 494 * @inputq: queue to put messages ready for delivery 495 * @namedq: queue to put binding table update messages ready for delivery 496 * @link: return value, pointer to put the created link 497 * 498 * Returns true if link was created, otherwise false 499 */ 500 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, 501 int mtu, int window, u16 peer_caps, 502 struct sk_buff_head *inputq, 503 struct sk_buff_head *namedq, 504 struct tipc_link *bc_sndlink, 505 struct tipc_link **link) 506 { 507 struct tipc_link *l; 508 509 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, 510 0, ownnode, peer, peer_caps, bc_sndlink, 511 NULL, inputq, namedq, link)) 512 return false; 513 514 l = *link; 515 strcpy(l->name, tipc_bclink_name); 516 tipc_link_reset(l); 517 l->state = LINK_RESET; 518 l->ackers = 0; 519 l->bc_rcvlink = l; 520 521 /* Broadcast send link is always up */ 522 if (link_is_bc_sndlink(l)) 523 l->state = LINK_ESTABLISHED; 524 525 return true; 526 } 527 528 /** 529 * tipc_link_fsm_evt - link finite state machine 530 * @l: pointer to link 531 * @evt: state machine event to be processed 532 */ 533 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 534 { 535 int rc = 0; 536 537 switch (l->state) { 538 case LINK_RESETTING: 539 switch (evt) { 540 case LINK_PEER_RESET_EVT: 541 l->state = LINK_PEER_RESET; 542 break; 543 case LINK_RESET_EVT: 544 l->state = LINK_RESET; 545 break; 546 case LINK_FAILURE_EVT: 547 case LINK_FAILOVER_BEGIN_EVT: 548 case LINK_ESTABLISH_EVT: 549 case LINK_FAILOVER_END_EVT: 550 case LINK_SYNCH_BEGIN_EVT: 551 case LINK_SYNCH_END_EVT: 552 default: 553 goto illegal_evt; 554 } 555 break; 556 case LINK_RESET: 557 switch (evt) { 558 case LINK_PEER_RESET_EVT: 559 l->state = LINK_ESTABLISHING; 560 break; 561 case LINK_FAILOVER_BEGIN_EVT: 562 l->state = LINK_FAILINGOVER; 563 case LINK_FAILURE_EVT: 564 case LINK_RESET_EVT: 565 case LINK_ESTABLISH_EVT: 566 case LINK_FAILOVER_END_EVT: 567 break; 568 case LINK_SYNCH_BEGIN_EVT: 569 case LINK_SYNCH_END_EVT: 570 default: 571 goto illegal_evt; 572 } 573 break; 574 case LINK_PEER_RESET: 575 switch (evt) { 576 case LINK_RESET_EVT: 577 l->state = LINK_ESTABLISHING; 578 break; 579 case LINK_PEER_RESET_EVT: 580 case LINK_ESTABLISH_EVT: 581 case LINK_FAILURE_EVT: 582 break; 583 case LINK_SYNCH_BEGIN_EVT: 584 case LINK_SYNCH_END_EVT: 585 case LINK_FAILOVER_BEGIN_EVT: 586 case LINK_FAILOVER_END_EVT: 587 default: 588 goto illegal_evt; 589 } 590 break; 591 case LINK_FAILINGOVER: 592 switch (evt) { 593 case LINK_FAILOVER_END_EVT: 594 l->state = LINK_RESET; 595 break; 596 case LINK_PEER_RESET_EVT: 597 case LINK_RESET_EVT: 598 case LINK_ESTABLISH_EVT: 599 case LINK_FAILURE_EVT: 600 break; 601 case LINK_FAILOVER_BEGIN_EVT: 602 case LINK_SYNCH_BEGIN_EVT: 603 case LINK_SYNCH_END_EVT: 604 default: 605 goto illegal_evt; 606 } 607 break; 608 case LINK_ESTABLISHING: 609 switch (evt) { 610 case LINK_ESTABLISH_EVT: 611 l->state = LINK_ESTABLISHED; 612 break; 613 case LINK_FAILOVER_BEGIN_EVT: 614 l->state = LINK_FAILINGOVER; 615 break; 616 case LINK_RESET_EVT: 617 l->state = LINK_RESET; 618 break; 619 case LINK_FAILURE_EVT: 620 case LINK_PEER_RESET_EVT: 621 case LINK_SYNCH_BEGIN_EVT: 622 case LINK_FAILOVER_END_EVT: 623 break; 624 case LINK_SYNCH_END_EVT: 625 default: 626 goto illegal_evt; 627 } 628 break; 629 case LINK_ESTABLISHED: 630 switch (evt) { 631 case LINK_PEER_RESET_EVT: 632 l->state = LINK_PEER_RESET; 633 rc |= TIPC_LINK_DOWN_EVT; 634 break; 635 case LINK_FAILURE_EVT: 636 l->state = LINK_RESETTING; 637 rc |= TIPC_LINK_DOWN_EVT; 638 break; 639 case LINK_RESET_EVT: 640 l->state = LINK_RESET; 641 break; 642 case LINK_ESTABLISH_EVT: 643 case LINK_SYNCH_END_EVT: 644 break; 645 case LINK_SYNCH_BEGIN_EVT: 646 l->state = LINK_SYNCHING; 647 break; 648 case LINK_FAILOVER_BEGIN_EVT: 649 case LINK_FAILOVER_END_EVT: 650 default: 651 goto illegal_evt; 652 } 653 break; 654 case LINK_SYNCHING: 655 switch (evt) { 656 case LINK_PEER_RESET_EVT: 657 l->state = LINK_PEER_RESET; 658 rc |= TIPC_LINK_DOWN_EVT; 659 break; 660 case LINK_FAILURE_EVT: 661 l->state = LINK_RESETTING; 662 rc |= TIPC_LINK_DOWN_EVT; 663 break; 664 case LINK_RESET_EVT: 665 l->state = LINK_RESET; 666 break; 667 case LINK_ESTABLISH_EVT: 668 case LINK_SYNCH_BEGIN_EVT: 669 break; 670 case LINK_SYNCH_END_EVT: 671 l->state = LINK_ESTABLISHED; 672 break; 673 case LINK_FAILOVER_BEGIN_EVT: 674 case LINK_FAILOVER_END_EVT: 675 default: 676 goto illegal_evt; 677 } 678 break; 679 default: 680 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 681 } 682 return rc; 683 illegal_evt: 684 pr_err("Illegal FSM event %x in state %x on link %s\n", 685 evt, l->state, l->name); 686 return rc; 687 } 688 689 /* link_profile_stats - update statistical profiling of traffic 690 */ 691 static void link_profile_stats(struct tipc_link *l) 692 { 693 struct sk_buff *skb; 694 struct tipc_msg *msg; 695 int length; 696 697 /* Update counters used in statistical profiling of send traffic */ 698 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 699 l->stats.queue_sz_counts++; 700 701 skb = skb_peek(&l->transmq); 702 if (!skb) 703 return; 704 msg = buf_msg(skb); 705 length = msg_size(msg); 706 707 if (msg_user(msg) == MSG_FRAGMENTER) { 708 if (msg_type(msg) != FIRST_FRAGMENT) 709 return; 710 length = msg_size(msg_get_wrapped(msg)); 711 } 712 l->stats.msg_lengths_total += length; 713 l->stats.msg_length_counts++; 714 if (length <= 64) 715 l->stats.msg_length_profile[0]++; 716 else if (length <= 256) 717 l->stats.msg_length_profile[1]++; 718 else if (length <= 1024) 719 l->stats.msg_length_profile[2]++; 720 else if (length <= 4096) 721 l->stats.msg_length_profile[3]++; 722 else if (length <= 16384) 723 l->stats.msg_length_profile[4]++; 724 else if (length <= 32768) 725 l->stats.msg_length_profile[5]++; 726 else 727 l->stats.msg_length_profile[6]++; 728 } 729 730 /* tipc_link_timeout - perform periodic task as instructed from node timeout 731 */ 732 /* tipc_link_timeout - perform periodic task as instructed from node timeout 733 */ 734 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 735 { 736 int rc = 0; 737 int mtyp = STATE_MSG; 738 bool xmit = false; 739 bool prb = false; 740 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 741 u16 bc_acked = l->bc_rcvlink->acked; 742 bool bc_up = link_is_up(l->bc_rcvlink); 743 744 link_profile_stats(l); 745 746 switch (l->state) { 747 case LINK_ESTABLISHED: 748 case LINK_SYNCHING: 749 if (!l->silent_intv_cnt) { 750 if (bc_up && (bc_acked != bc_snt)) 751 xmit = true; 752 } else if (l->silent_intv_cnt <= l->abort_limit) { 753 xmit = true; 754 prb = true; 755 } else { 756 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 757 } 758 l->silent_intv_cnt++; 759 break; 760 case LINK_RESET: 761 xmit = true; 762 mtyp = RESET_MSG; 763 break; 764 case LINK_ESTABLISHING: 765 xmit = true; 766 mtyp = ACTIVATE_MSG; 767 break; 768 case LINK_PEER_RESET: 769 case LINK_RESETTING: 770 case LINK_FAILINGOVER: 771 break; 772 default: 773 break; 774 } 775 776 if (xmit) 777 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq); 778 779 return rc; 780 } 781 782 /** 783 * link_schedule_user - schedule a message sender for wakeup after congestion 784 * @link: congested link 785 * @list: message that was attempted sent 786 * Create pseudo msg to send back to user when congestion abates 787 * Does not consume buffer list 788 */ 789 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) 790 { 791 struct tipc_msg *msg = buf_msg(skb_peek(list)); 792 int imp = msg_importance(msg); 793 u32 oport = msg_origport(msg); 794 u32 addr = link_own_addr(link); 795 struct sk_buff *skb; 796 797 /* This really cannot happen... */ 798 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { 799 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 800 return -ENOBUFS; 801 } 802 /* Non-blocking sender: */ 803 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) 804 return -ELINKCONG; 805 806 /* Create and schedule wakeup pseudo message */ 807 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 808 addr, addr, oport, 0, 0); 809 if (!skb) 810 return -ENOBUFS; 811 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); 812 TIPC_SKB_CB(skb)->chain_imp = imp; 813 skb_queue_tail(&link->wakeupq, skb); 814 link->stats.link_congs++; 815 return -ELINKCONG; 816 } 817 818 /** 819 * link_prepare_wakeup - prepare users for wakeup after congestion 820 * @link: congested link 821 * Move a number of waiting users, as permitted by available space in 822 * the send queue, from link wait queue to node wait queue for wakeup 823 */ 824 void link_prepare_wakeup(struct tipc_link *l) 825 { 826 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; 827 int imp, lim; 828 struct sk_buff *skb, *tmp; 829 830 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 831 imp = TIPC_SKB_CB(skb)->chain_imp; 832 lim = l->window + l->backlog[imp].limit; 833 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 834 if ((pnd[imp] + l->backlog[imp].len) >= lim) 835 break; 836 skb_unlink(skb, &l->wakeupq); 837 skb_queue_tail(l->inputq, skb); 838 } 839 } 840 841 void tipc_link_reset(struct tipc_link *l) 842 { 843 /* Link is down, accept any session */ 844 l->peer_session = WILDCARD_SESSION; 845 846 /* If peer is up, it only accepts an incremented session number */ 847 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1); 848 849 /* Prepare for renewed mtu size negotiation */ 850 l->mtu = l->advertised_mtu; 851 852 /* Clean up all queues and counters: */ 853 __skb_queue_purge(&l->transmq); 854 __skb_queue_purge(&l->deferdq); 855 skb_queue_splice_init(&l->wakeupq, l->inputq); 856 __skb_queue_purge(&l->backlogq); 857 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 858 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 859 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 860 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 861 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 862 kfree_skb(l->reasm_buf); 863 kfree_skb(l->failover_reasm_skb); 864 l->reasm_buf = NULL; 865 l->failover_reasm_skb = NULL; 866 l->rcv_unacked = 0; 867 l->snd_nxt = 1; 868 l->rcv_nxt = 1; 869 l->acked = 0; 870 l->silent_intv_cnt = 0; 871 l->stats.recv_info = 0; 872 l->stale_count = 0; 873 l->bc_peer_is_up = false; 874 tipc_link_reset_stats(l); 875 } 876 877 /** 878 * tipc_link_xmit(): enqueue buffer list according to queue situation 879 * @link: link to use 880 * @list: chain of buffers containing message 881 * @xmitq: returned list of packets to be sent by caller 882 * 883 * Consumes the buffer chain, except when returning -ELINKCONG, 884 * since the caller then may want to make more send attempts. 885 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 886 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 887 */ 888 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 889 struct sk_buff_head *xmitq) 890 { 891 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 892 unsigned int maxwin = l->window; 893 unsigned int i, imp = msg_importance(hdr); 894 unsigned int mtu = l->mtu; 895 u16 ack = l->rcv_nxt - 1; 896 u16 seqno = l->snd_nxt; 897 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 898 struct sk_buff_head *transmq = &l->transmq; 899 struct sk_buff_head *backlogq = &l->backlogq; 900 struct sk_buff *skb, *_skb, *bskb; 901 902 /* Match msg importance against this and all higher backlog limits: */ 903 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 904 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 905 return link_schedule_user(l, list); 906 } 907 if (unlikely(msg_size(hdr) > mtu)) 908 return -EMSGSIZE; 909 910 /* Prepare each packet for sending, and add to relevant queue: */ 911 while (skb_queue_len(list)) { 912 skb = skb_peek(list); 913 hdr = buf_msg(skb); 914 msg_set_seqno(hdr, seqno); 915 msg_set_ack(hdr, ack); 916 msg_set_bcast_ack(hdr, bc_ack); 917 918 if (likely(skb_queue_len(transmq) < maxwin)) { 919 _skb = skb_clone(skb, GFP_ATOMIC); 920 if (!_skb) 921 return -ENOBUFS; 922 __skb_dequeue(list); 923 __skb_queue_tail(transmq, skb); 924 __skb_queue_tail(xmitq, _skb); 925 TIPC_SKB_CB(skb)->ackers = l->ackers; 926 l->rcv_unacked = 0; 927 seqno++; 928 continue; 929 } 930 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 931 kfree_skb(__skb_dequeue(list)); 932 l->stats.sent_bundled++; 933 continue; 934 } 935 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 936 kfree_skb(__skb_dequeue(list)); 937 __skb_queue_tail(backlogq, bskb); 938 l->backlog[msg_importance(buf_msg(bskb))].len++; 939 l->stats.sent_bundled++; 940 l->stats.sent_bundles++; 941 continue; 942 } 943 l->backlog[imp].len += skb_queue_len(list); 944 skb_queue_splice_tail_init(list, backlogq); 945 } 946 l->snd_nxt = seqno; 947 return 0; 948 } 949 950 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) 951 { 952 struct sk_buff *skb, *_skb; 953 struct tipc_msg *hdr; 954 u16 seqno = l->snd_nxt; 955 u16 ack = l->rcv_nxt - 1; 956 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 957 958 while (skb_queue_len(&l->transmq) < l->window) { 959 skb = skb_peek(&l->backlogq); 960 if (!skb) 961 break; 962 _skb = skb_clone(skb, GFP_ATOMIC); 963 if (!_skb) 964 break; 965 __skb_dequeue(&l->backlogq); 966 hdr = buf_msg(skb); 967 l->backlog[msg_importance(hdr)].len--; 968 __skb_queue_tail(&l->transmq, skb); 969 __skb_queue_tail(xmitq, _skb); 970 TIPC_SKB_CB(skb)->ackers = l->ackers; 971 msg_set_seqno(hdr, seqno); 972 msg_set_ack(hdr, ack); 973 msg_set_bcast_ack(hdr, bc_ack); 974 l->rcv_unacked = 0; 975 seqno++; 976 } 977 l->snd_nxt = seqno; 978 } 979 980 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) 981 { 982 struct tipc_msg *hdr = buf_msg(skb); 983 984 pr_warn("Retransmission failure on link <%s>\n", l->name); 985 link_print(l, "Resetting link "); 986 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 987 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); 988 pr_info("sqno %u, prev: %x, src: %x\n", 989 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); 990 } 991 992 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, 993 struct sk_buff_head *xmitq) 994 { 995 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 996 struct tipc_msg *hdr; 997 u16 ack = l->rcv_nxt - 1; 998 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 999 1000 if (!skb) 1001 return 0; 1002 1003 /* Detect repeated retransmit failures on same packet */ 1004 if (likely(l->last_retransm != buf_seqno(skb))) { 1005 l->last_retransm = buf_seqno(skb); 1006 l->stale_count = 1; 1007 } else if (++l->stale_count > 100) { 1008 link_retransmit_failure(l, skb); 1009 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1010 } 1011 1012 /* Move forward to where retransmission should start */ 1013 skb_queue_walk(&l->transmq, skb) { 1014 if (!less(buf_seqno(skb), from)) 1015 break; 1016 } 1017 1018 skb_queue_walk_from(&l->transmq, skb) { 1019 if (more(buf_seqno(skb), to)) 1020 break; 1021 hdr = buf_msg(skb); 1022 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1023 if (!_skb) 1024 return 0; 1025 hdr = buf_msg(_skb); 1026 msg_set_ack(hdr, ack); 1027 msg_set_bcast_ack(hdr, bc_ack); 1028 _skb->priority = TC_PRIO_CONTROL; 1029 __skb_queue_tail(xmitq, _skb); 1030 l->stats.retransmitted++; 1031 } 1032 return 0; 1033 } 1034 1035 /* tipc_data_input - deliver data and name distr msgs to upper layer 1036 * 1037 * Consumes buffer if message is of right type 1038 * Node lock must be held 1039 */ 1040 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, 1041 struct sk_buff_head *inputq) 1042 { 1043 switch (msg_user(buf_msg(skb))) { 1044 case TIPC_LOW_IMPORTANCE: 1045 case TIPC_MEDIUM_IMPORTANCE: 1046 case TIPC_HIGH_IMPORTANCE: 1047 case TIPC_CRITICAL_IMPORTANCE: 1048 case CONN_MANAGER: 1049 skb_queue_tail(inputq, skb); 1050 return true; 1051 case NAME_DISTRIBUTOR: 1052 l->bc_rcvlink->state = LINK_ESTABLISHED; 1053 skb_queue_tail(l->namedq, skb); 1054 return true; 1055 case MSG_BUNDLER: 1056 case TUNNEL_PROTOCOL: 1057 case MSG_FRAGMENTER: 1058 case BCAST_PROTOCOL: 1059 return false; 1060 default: 1061 pr_warn("Dropping received illegal msg type\n"); 1062 kfree_skb(skb); 1063 return false; 1064 }; 1065 } 1066 1067 /* tipc_link_input - process packet that has passed link protocol check 1068 * 1069 * Consumes buffer 1070 */ 1071 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 1072 struct sk_buff_head *inputq) 1073 { 1074 struct tipc_msg *hdr = buf_msg(skb); 1075 struct sk_buff **reasm_skb = &l->reasm_buf; 1076 struct sk_buff *iskb; 1077 struct sk_buff_head tmpq; 1078 int usr = msg_user(hdr); 1079 int rc = 0; 1080 int pos = 0; 1081 int ipos = 0; 1082 1083 if (unlikely(usr == TUNNEL_PROTOCOL)) { 1084 if (msg_type(hdr) == SYNCH_MSG) { 1085 __skb_queue_purge(&l->deferdq); 1086 goto drop; 1087 } 1088 if (!tipc_msg_extract(skb, &iskb, &ipos)) 1089 return rc; 1090 kfree_skb(skb); 1091 skb = iskb; 1092 hdr = buf_msg(skb); 1093 if (less(msg_seqno(hdr), l->drop_point)) 1094 goto drop; 1095 if (tipc_data_input(l, skb, inputq)) 1096 return rc; 1097 usr = msg_user(hdr); 1098 reasm_skb = &l->failover_reasm_skb; 1099 } 1100 1101 if (usr == MSG_BUNDLER) { 1102 skb_queue_head_init(&tmpq); 1103 l->stats.recv_bundles++; 1104 l->stats.recv_bundled += msg_msgcnt(hdr); 1105 while (tipc_msg_extract(skb, &iskb, &pos)) 1106 tipc_data_input(l, iskb, &tmpq); 1107 tipc_skb_queue_splice_tail(&tmpq, inputq); 1108 return 0; 1109 } else if (usr == MSG_FRAGMENTER) { 1110 l->stats.recv_fragments++; 1111 if (tipc_buf_append(reasm_skb, &skb)) { 1112 l->stats.recv_fragmented++; 1113 tipc_data_input(l, skb, inputq); 1114 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { 1115 pr_warn_ratelimited("Unable to build fragment list\n"); 1116 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1117 } 1118 return 0; 1119 } else if (usr == BCAST_PROTOCOL) { 1120 tipc_bcast_lock(l->net); 1121 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); 1122 tipc_bcast_unlock(l->net); 1123 } 1124 drop: 1125 kfree_skb(skb); 1126 return 0; 1127 } 1128 1129 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1130 { 1131 bool released = false; 1132 struct sk_buff *skb, *tmp; 1133 1134 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1135 if (more(buf_seqno(skb), acked)) 1136 break; 1137 __skb_unlink(skb, &l->transmq); 1138 kfree_skb(skb); 1139 released = true; 1140 } 1141 return released; 1142 } 1143 1144 /* tipc_link_build_ack_msg: prepare link acknowledge message for transmission 1145 * 1146 * Note that sending of broadcast ack is coordinated among nodes, to reduce 1147 * risk of ack storms towards the sender 1148 */ 1149 int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1150 { 1151 if (!l) 1152 return 0; 1153 1154 /* Broadcast ACK must be sent via a unicast link => defer to caller */ 1155 if (link_is_bc_rcvlink(l)) { 1156 if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf) 1157 return 0; 1158 l->rcv_unacked = 0; 1159 return TIPC_LINK_SND_BC_ACK; 1160 } 1161 1162 /* Unicast ACK */ 1163 l->rcv_unacked = 0; 1164 l->stats.sent_acks++; 1165 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1166 return 0; 1167 } 1168 1169 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message 1170 */ 1171 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1172 { 1173 int mtyp = RESET_MSG; 1174 1175 if (l->state == LINK_ESTABLISHING) 1176 mtyp = ACTIVATE_MSG; 1177 1178 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); 1179 } 1180 1181 /* tipc_link_build_nack_msg: prepare link nack message for transmission 1182 */ 1183 static void tipc_link_build_nack_msg(struct tipc_link *l, 1184 struct sk_buff_head *xmitq) 1185 { 1186 u32 def_cnt = ++l->stats.deferred_recv; 1187 1188 if (link_is_bc_rcvlink(l)) 1189 return; 1190 1191 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) 1192 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1193 } 1194 1195 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1196 * @l: the link that should handle the message 1197 * @skb: TIPC packet 1198 * @xmitq: queue to place packets to be sent after this call 1199 */ 1200 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1201 struct sk_buff_head *xmitq) 1202 { 1203 struct sk_buff_head *defq = &l->deferdq; 1204 struct tipc_msg *hdr; 1205 u16 seqno, rcv_nxt, win_lim; 1206 int rc = 0; 1207 1208 do { 1209 hdr = buf_msg(skb); 1210 seqno = msg_seqno(hdr); 1211 rcv_nxt = l->rcv_nxt; 1212 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; 1213 1214 /* Verify and update link state */ 1215 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) 1216 return tipc_link_proto_rcv(l, skb, xmitq); 1217 1218 if (unlikely(!link_is_up(l))) { 1219 if (l->state == LINK_ESTABLISHING) 1220 rc = TIPC_LINK_UP_EVT; 1221 goto drop; 1222 } 1223 1224 /* Don't send probe at next timeout expiration */ 1225 l->silent_intv_cnt = 0; 1226 1227 /* Drop if outside receive window */ 1228 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { 1229 l->stats.duplicates++; 1230 goto drop; 1231 } 1232 1233 /* Forward queues and wake up waiting users */ 1234 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1235 tipc_link_advance_backlog(l, xmitq); 1236 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1237 link_prepare_wakeup(l); 1238 } 1239 1240 /* Defer delivery if sequence gap */ 1241 if (unlikely(seqno != rcv_nxt)) { 1242 __tipc_skb_queue_sorted(defq, seqno, skb); 1243 tipc_link_build_nack_msg(l, xmitq); 1244 break; 1245 } 1246 1247 /* Deliver packet */ 1248 l->rcv_nxt++; 1249 l->stats.recv_info++; 1250 if (!tipc_data_input(l, skb, l->inputq)) 1251 rc |= tipc_link_input(l, skb, l->inputq); 1252 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1253 rc |= tipc_link_build_ack_msg(l, xmitq); 1254 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK)) 1255 break; 1256 } while ((skb = __skb_dequeue(defq))); 1257 1258 return rc; 1259 drop: 1260 kfree_skb(skb); 1261 return rc; 1262 } 1263 1264 /* 1265 * Send protocol message to the other endpoint. 1266 */ 1267 static void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, 1268 int probe_msg, u32 gap, u32 tolerance, 1269 u32 priority) 1270 { 1271 struct sk_buff *skb = NULL; 1272 struct sk_buff_head xmitq; 1273 1274 __skb_queue_head_init(&xmitq); 1275 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, 1276 tolerance, priority, &xmitq); 1277 skb = __skb_dequeue(&xmitq); 1278 if (!skb) 1279 return; 1280 tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr); 1281 l->rcv_unacked = 0; 1282 } 1283 1284 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1285 u16 rcvgap, int tolerance, int priority, 1286 struct sk_buff_head *xmitq) 1287 { 1288 struct sk_buff *skb = NULL; 1289 struct tipc_msg *hdr = l->pmsg; 1290 bool node_up = link_is_up(l->bc_rcvlink); 1291 1292 /* Don't send protocol message during reset or link failover */ 1293 if (tipc_link_is_blocked(l)) 1294 return; 1295 1296 msg_set_type(hdr, mtyp); 1297 msg_set_net_plane(hdr, l->net_plane); 1298 msg_set_next_sent(hdr, l->snd_nxt); 1299 msg_set_ack(hdr, l->rcv_nxt - 1); 1300 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1); 1301 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1302 msg_set_link_tolerance(hdr, tolerance); 1303 msg_set_linkprio(hdr, priority); 1304 msg_set_redundant_link(hdr, node_up); 1305 msg_set_seq_gap(hdr, 0); 1306 1307 /* Compatibility: created msg must not be in sequence with pkt flow */ 1308 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); 1309 1310 if (mtyp == STATE_MSG) { 1311 if (!tipc_link_is_up(l)) 1312 return; 1313 1314 /* Override rcvgap if there are packets in deferred queue */ 1315 if (!skb_queue_empty(&l->deferdq)) 1316 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt; 1317 if (rcvgap) { 1318 msg_set_seq_gap(hdr, rcvgap); 1319 l->stats.sent_nacks++; 1320 } 1321 msg_set_probe(hdr, probe); 1322 if (probe) 1323 l->stats.sent_probes++; 1324 l->stats.sent_states++; 1325 l->rcv_unacked = 0; 1326 } else { 1327 /* RESET_MSG or ACTIVATE_MSG */ 1328 msg_set_max_pkt(hdr, l->advertised_mtu); 1329 msg_set_ack(hdr, l->rcv_nxt - 1); 1330 msg_set_next_sent(hdr, 1); 1331 } 1332 skb = tipc_buf_acquire(msg_size(hdr)); 1333 if (!skb) 1334 return; 1335 skb_copy_to_linear_data(skb, hdr, msg_size(hdr)); 1336 skb->priority = TC_PRIO_CONTROL; 1337 __skb_queue_tail(xmitq, skb); 1338 } 1339 1340 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1341 * with contents of the link's transmit and backlog queues. 1342 */ 1343 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1344 int mtyp, struct sk_buff_head *xmitq) 1345 { 1346 struct sk_buff *skb, *tnlskb; 1347 struct tipc_msg *hdr, tnlhdr; 1348 struct sk_buff_head *queue = &l->transmq; 1349 struct sk_buff_head tmpxq, tnlq; 1350 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1351 1352 if (!tnl) 1353 return; 1354 1355 skb_queue_head_init(&tnlq); 1356 skb_queue_head_init(&tmpxq); 1357 1358 /* At least one packet required for safe algorithm => add dummy */ 1359 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1360 BASIC_H_SIZE, 0, l->addr, link_own_addr(l), 1361 0, 0, TIPC_ERR_NO_PORT); 1362 if (!skb) { 1363 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1364 return; 1365 } 1366 skb_queue_tail(&tnlq, skb); 1367 tipc_link_xmit(l, &tnlq, &tmpxq); 1368 __skb_queue_purge(&tmpxq); 1369 1370 /* Initialize reusable tunnel packet header */ 1371 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL, 1372 mtyp, INT_H_SIZE, l->addr); 1373 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1374 msg_set_msgcnt(&tnlhdr, pktcnt); 1375 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1376 tnl: 1377 /* Wrap each packet into a tunnel packet */ 1378 skb_queue_walk(queue, skb) { 1379 hdr = buf_msg(skb); 1380 if (queue == &l->backlogq) 1381 msg_set_seqno(hdr, seqno++); 1382 pktlen = msg_size(hdr); 1383 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1384 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1385 if (!tnlskb) { 1386 pr_warn("%sunable to send packet\n", link_co_err); 1387 return; 1388 } 1389 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1390 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1391 __skb_queue_tail(&tnlq, tnlskb); 1392 } 1393 if (queue != &l->backlogq) { 1394 queue = &l->backlogq; 1395 goto tnl; 1396 } 1397 1398 tipc_link_xmit(tnl, &tnlq, xmitq); 1399 1400 if (mtyp == FAILOVER_MSG) { 1401 tnl->drop_point = l->rcv_nxt; 1402 tnl->failover_reasm_skb = l->reasm_buf; 1403 l->reasm_buf = NULL; 1404 } 1405 } 1406 1407 /* tipc_link_proto_rcv(): receive link level protocol message : 1408 * Note that network plane id propagates through the network, and may 1409 * change at any time. The node with lowest numerical id determines 1410 * network plane 1411 */ 1412 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1413 struct sk_buff_head *xmitq) 1414 { 1415 struct tipc_msg *hdr = buf_msg(skb); 1416 u16 rcvgap = 0; 1417 u16 ack = msg_ack(hdr); 1418 u16 gap = msg_seq_gap(hdr); 1419 u16 peers_snd_nxt = msg_next_sent(hdr); 1420 u16 peers_tol = msg_link_tolerance(hdr); 1421 u16 peers_prio = msg_linkprio(hdr); 1422 u16 rcv_nxt = l->rcv_nxt; 1423 int mtyp = msg_type(hdr); 1424 char *if_name; 1425 int rc = 0; 1426 1427 if (tipc_link_is_blocked(l) || !xmitq) 1428 goto exit; 1429 1430 if (link_own_addr(l) > msg_prevnode(hdr)) 1431 l->net_plane = msg_net_plane(hdr); 1432 1433 switch (mtyp) { 1434 case RESET_MSG: 1435 1436 /* Ignore duplicate RESET with old session number */ 1437 if ((less_eq(msg_session(hdr), l->peer_session)) && 1438 (l->peer_session != WILDCARD_SESSION)) 1439 break; 1440 /* fall thru' */ 1441 1442 case ACTIVATE_MSG: 1443 skb_linearize(skb); 1444 hdr = buf_msg(skb); 1445 1446 /* Complete own link name with peer's interface name */ 1447 if_name = strrchr(l->name, ':') + 1; 1448 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1449 break; 1450 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1451 break; 1452 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); 1453 1454 /* Update own tolerance if peer indicates a non-zero value */ 1455 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1456 l->tolerance = peers_tol; 1457 1458 /* Update own priority if peer's priority is higher */ 1459 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1460 l->priority = peers_prio; 1461 1462 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1463 if ((mtyp == RESET_MSG) || !link_is_up(l)) 1464 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1465 1466 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1467 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1468 rc = TIPC_LINK_UP_EVT; 1469 1470 l->peer_session = msg_session(hdr); 1471 l->peer_bearer_id = msg_bearer_id(hdr); 1472 if (l->mtu > msg_max_pkt(hdr)) 1473 l->mtu = msg_max_pkt(hdr); 1474 break; 1475 1476 case STATE_MSG: 1477 1478 /* Update own tolerance if peer indicates a non-zero value */ 1479 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1480 l->tolerance = peers_tol; 1481 1482 l->silent_intv_cnt = 0; 1483 l->stats.recv_states++; 1484 if (msg_probe(hdr)) 1485 l->stats.recv_probes++; 1486 1487 if (!link_is_up(l)) { 1488 if (l->state == LINK_ESTABLISHING) 1489 rc = TIPC_LINK_UP_EVT; 1490 break; 1491 } 1492 1493 /* Send NACK if peer has sent pkts we haven't received yet */ 1494 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1495 rcvgap = peers_snd_nxt - l->rcv_nxt; 1496 if (rcvgap || (msg_probe(hdr))) 1497 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, 1498 0, 0, xmitq); 1499 tipc_link_release_pkts(l, ack); 1500 1501 /* If NACK, retransmit will now start at right position */ 1502 if (gap) { 1503 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq); 1504 l->stats.recv_nacks++; 1505 } 1506 1507 tipc_link_advance_backlog(l, xmitq); 1508 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1509 link_prepare_wakeup(l); 1510 } 1511 exit: 1512 kfree_skb(skb); 1513 return rc; 1514 } 1515 1516 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message 1517 */ 1518 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, 1519 u16 peers_snd_nxt, 1520 struct sk_buff_head *xmitq) 1521 { 1522 struct sk_buff *skb; 1523 struct tipc_msg *hdr; 1524 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); 1525 u16 ack = l->rcv_nxt - 1; 1526 u16 gap_to = peers_snd_nxt - 1; 1527 1528 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 1529 0, l->addr, link_own_addr(l), 0, 0, 0); 1530 if (!skb) 1531 return false; 1532 hdr = buf_msg(skb); 1533 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1534 msg_set_bcast_ack(hdr, ack); 1535 msg_set_bcgap_after(hdr, ack); 1536 if (dfrd_skb) 1537 gap_to = buf_seqno(dfrd_skb) - 1; 1538 msg_set_bcgap_to(hdr, gap_to); 1539 msg_set_non_seq(hdr, bcast); 1540 __skb_queue_tail(xmitq, skb); 1541 return true; 1542 } 1543 1544 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. 1545 * 1546 * Give a newly added peer node the sequence number where it should 1547 * start receiving and acking broadcast packets. 1548 */ 1549 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 1550 struct sk_buff_head *xmitq) 1551 { 1552 struct sk_buff_head list; 1553 1554 __skb_queue_head_init(&list); 1555 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1556 return; 1557 tipc_link_xmit(l, &list, xmitq); 1558 } 1559 1560 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer 1561 */ 1562 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) 1563 { 1564 int mtyp = msg_type(hdr); 1565 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1566 1567 if (link_is_up(l)) 1568 return; 1569 1570 if (msg_user(hdr) == BCAST_PROTOCOL) { 1571 l->rcv_nxt = peers_snd_nxt; 1572 l->state = LINK_ESTABLISHED; 1573 return; 1574 } 1575 1576 if (l->peer_caps & TIPC_BCAST_SYNCH) 1577 return; 1578 1579 if (msg_peer_node_is_up(hdr)) 1580 return; 1581 1582 /* Compatibility: accept older, less safe initial synch data */ 1583 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) 1584 l->rcv_nxt = peers_snd_nxt; 1585 } 1586 1587 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 1588 */ 1589 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 1590 struct sk_buff_head *xmitq) 1591 { 1592 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1593 1594 if (!link_is_up(l)) 1595 return; 1596 1597 if (!msg_peer_node_is_up(hdr)) 1598 return; 1599 1600 l->bc_peer_is_up = true; 1601 1602 /* Ignore if peers_snd_nxt goes beyond receive window */ 1603 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1604 return; 1605 1606 if (!more(peers_snd_nxt, l->rcv_nxt)) { 1607 l->nack_state = BC_NACK_SND_CONDITIONAL; 1608 return; 1609 } 1610 1611 /* Don't NACK if one was recently sent or peeked */ 1612 if (l->nack_state == BC_NACK_SND_SUPPRESS) { 1613 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1614 return; 1615 } 1616 1617 /* Conditionally delay NACK sending until next synch rcv */ 1618 if (l->nack_state == BC_NACK_SND_CONDITIONAL) { 1619 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1620 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) 1621 return; 1622 } 1623 1624 /* Send NACK now but suppress next one */ 1625 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); 1626 l->nack_state = BC_NACK_SND_SUPPRESS; 1627 } 1628 1629 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, 1630 struct sk_buff_head *xmitq) 1631 { 1632 struct sk_buff *skb, *tmp; 1633 struct tipc_link *snd_l = l->bc_sndlink; 1634 1635 if (!link_is_up(l) || !l->bc_peer_is_up) 1636 return; 1637 1638 if (!more(acked, l->acked)) 1639 return; 1640 1641 /* Skip over packets peer has already acked */ 1642 skb_queue_walk(&snd_l->transmq, skb) { 1643 if (more(buf_seqno(skb), l->acked)) 1644 break; 1645 } 1646 1647 /* Update/release the packets peer is acking now */ 1648 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { 1649 if (more(buf_seqno(skb), acked)) 1650 break; 1651 if (!--TIPC_SKB_CB(skb)->ackers) { 1652 __skb_unlink(skb, &snd_l->transmq); 1653 kfree_skb(skb); 1654 } 1655 } 1656 l->acked = acked; 1657 tipc_link_advance_backlog(snd_l, xmitq); 1658 if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) 1659 link_prepare_wakeup(snd_l); 1660 } 1661 1662 /* tipc_link_bc_nack_rcv(): receive broadcast nack message 1663 */ 1664 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 1665 struct sk_buff_head *xmitq) 1666 { 1667 struct tipc_msg *hdr = buf_msg(skb); 1668 u32 dnode = msg_destnode(hdr); 1669 int mtyp = msg_type(hdr); 1670 u16 acked = msg_bcast_ack(hdr); 1671 u16 from = acked + 1; 1672 u16 to = msg_bcgap_to(hdr); 1673 u16 peers_snd_nxt = to + 1; 1674 int rc = 0; 1675 1676 kfree_skb(skb); 1677 1678 if (!tipc_link_is_up(l) || !l->bc_peer_is_up) 1679 return 0; 1680 1681 if (mtyp != STATE_MSG) 1682 return 0; 1683 1684 if (dnode == link_own_addr(l)) { 1685 tipc_link_bc_ack_rcv(l, acked, xmitq); 1686 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq); 1687 l->stats.recv_nacks++; 1688 return rc; 1689 } 1690 1691 /* Msg for other node => suppress own NACK at next sync if applicable */ 1692 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) 1693 l->nack_state = BC_NACK_SND_SUPPRESS; 1694 1695 return 0; 1696 } 1697 1698 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1699 { 1700 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1701 1702 l->window = win; 1703 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1704 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; 1705 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; 1706 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; 1707 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1708 } 1709 1710 /** 1711 * link_reset_stats - reset link statistics 1712 * @l: pointer to link 1713 */ 1714 void tipc_link_reset_stats(struct tipc_link *l) 1715 { 1716 memset(&l->stats, 0, sizeof(l->stats)); 1717 if (!link_is_bc_sndlink(l)) { 1718 l->stats.sent_info = l->snd_nxt; 1719 l->stats.recv_info = l->rcv_nxt; 1720 } 1721 } 1722 1723 static void link_print(struct tipc_link *l, const char *str) 1724 { 1725 struct sk_buff *hskb = skb_peek(&l->transmq); 1726 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; 1727 u16 tail = l->snd_nxt - 1; 1728 1729 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1730 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1731 skb_queue_len(&l->transmq), head, tail, 1732 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1733 } 1734 1735 /* Parse and validate nested (link) properties valid for media, bearer and link 1736 */ 1737 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1738 { 1739 int err; 1740 1741 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1742 tipc_nl_prop_policy); 1743 if (err) 1744 return err; 1745 1746 if (props[TIPC_NLA_PROP_PRIO]) { 1747 u32 prio; 1748 1749 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1750 if (prio > TIPC_MAX_LINK_PRI) 1751 return -EINVAL; 1752 } 1753 1754 if (props[TIPC_NLA_PROP_TOL]) { 1755 u32 tol; 1756 1757 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1758 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1759 return -EINVAL; 1760 } 1761 1762 if (props[TIPC_NLA_PROP_WIN]) { 1763 u32 win; 1764 1765 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1766 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1767 return -EINVAL; 1768 } 1769 1770 return 0; 1771 } 1772 1773 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1774 { 1775 int i; 1776 struct nlattr *stats; 1777 1778 struct nla_map { 1779 u32 key; 1780 u32 val; 1781 }; 1782 1783 struct nla_map map[] = { 1784 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1785 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1786 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1787 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1788 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1789 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1790 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1791 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1792 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1793 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1794 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1795 s->msg_length_counts : 1}, 1796 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1797 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1798 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1799 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1800 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1801 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1802 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1803 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1804 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1805 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1806 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1807 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1808 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1809 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1810 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1811 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1812 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1813 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1814 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1815 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1816 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1817 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1818 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1819 }; 1820 1821 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1822 if (!stats) 1823 return -EMSGSIZE; 1824 1825 for (i = 0; i < ARRAY_SIZE(map); i++) 1826 if (nla_put_u32(skb, map[i].key, map[i].val)) 1827 goto msg_full; 1828 1829 nla_nest_end(skb, stats); 1830 1831 return 0; 1832 msg_full: 1833 nla_nest_cancel(skb, stats); 1834 1835 return -EMSGSIZE; 1836 } 1837 1838 /* Caller should hold appropriate locks to protect the link */ 1839 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1840 struct tipc_link *link, int nlflags) 1841 { 1842 int err; 1843 void *hdr; 1844 struct nlattr *attrs; 1845 struct nlattr *prop; 1846 struct tipc_net *tn = net_generic(net, tipc_net_id); 1847 1848 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1849 nlflags, TIPC_NL_LINK_GET); 1850 if (!hdr) 1851 return -EMSGSIZE; 1852 1853 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1854 if (!attrs) 1855 goto msg_full; 1856 1857 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 1858 goto attr_msg_full; 1859 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 1860 tipc_cluster_mask(tn->own_addr))) 1861 goto attr_msg_full; 1862 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1863 goto attr_msg_full; 1864 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1865 goto attr_msg_full; 1866 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1867 goto attr_msg_full; 1868 1869 if (tipc_link_is_up(link)) 1870 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1871 goto attr_msg_full; 1872 if (link->active) 1873 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 1874 goto attr_msg_full; 1875 1876 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1877 if (!prop) 1878 goto attr_msg_full; 1879 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1880 goto prop_msg_full; 1881 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 1882 goto prop_msg_full; 1883 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 1884 link->window)) 1885 goto prop_msg_full; 1886 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1887 goto prop_msg_full; 1888 nla_nest_end(msg->skb, prop); 1889 1890 err = __tipc_nl_add_stats(msg->skb, &link->stats); 1891 if (err) 1892 goto attr_msg_full; 1893 1894 nla_nest_end(msg->skb, attrs); 1895 genlmsg_end(msg->skb, hdr); 1896 1897 return 0; 1898 1899 prop_msg_full: 1900 nla_nest_cancel(msg->skb, prop); 1901 attr_msg_full: 1902 nla_nest_cancel(msg->skb, attrs); 1903 msg_full: 1904 genlmsg_cancel(msg->skb, hdr); 1905 1906 return -EMSGSIZE; 1907 } 1908 1909 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 1910 struct tipc_stats *stats) 1911 { 1912 int i; 1913 struct nlattr *nest; 1914 1915 struct nla_map { 1916 __u32 key; 1917 __u32 val; 1918 }; 1919 1920 struct nla_map map[] = { 1921 {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 1922 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 1923 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 1924 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 1925 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 1926 {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 1927 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 1928 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 1929 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 1930 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 1931 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 1932 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 1933 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 1934 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 1935 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 1936 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 1937 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 1938 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 1939 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 1940 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 1941 }; 1942 1943 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1944 if (!nest) 1945 return -EMSGSIZE; 1946 1947 for (i = 0; i < ARRAY_SIZE(map); i++) 1948 if (nla_put_u32(skb, map[i].key, map[i].val)) 1949 goto msg_full; 1950 1951 nla_nest_end(skb, nest); 1952 1953 return 0; 1954 msg_full: 1955 nla_nest_cancel(skb, nest); 1956 1957 return -EMSGSIZE; 1958 } 1959 1960 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 1961 { 1962 int err; 1963 void *hdr; 1964 struct nlattr *attrs; 1965 struct nlattr *prop; 1966 struct tipc_net *tn = net_generic(net, tipc_net_id); 1967 struct tipc_link *bcl = tn->bcl; 1968 1969 if (!bcl) 1970 return 0; 1971 1972 tipc_bcast_lock(net); 1973 1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1975 NLM_F_MULTI, TIPC_NL_LINK_GET); 1976 if (!hdr) { 1977 tipc_bcast_unlock(net); 1978 return -EMSGSIZE; 1979 } 1980 1981 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1982 if (!attrs) 1983 goto msg_full; 1984 1985 /* The broadcast link is always up */ 1986 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1987 goto attr_msg_full; 1988 1989 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 1990 goto attr_msg_full; 1991 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 1992 goto attr_msg_full; 1993 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 1994 goto attr_msg_full; 1995 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 1996 goto attr_msg_full; 1997 1998 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1999 if (!prop) 2000 goto attr_msg_full; 2001 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 2002 goto prop_msg_full; 2003 nla_nest_end(msg->skb, prop); 2004 2005 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 2006 if (err) 2007 goto attr_msg_full; 2008 2009 tipc_bcast_unlock(net); 2010 nla_nest_end(msg->skb, attrs); 2011 genlmsg_end(msg->skb, hdr); 2012 2013 return 0; 2014 2015 prop_msg_full: 2016 nla_nest_cancel(msg->skb, prop); 2017 attr_msg_full: 2018 nla_nest_cancel(msg->skb, attrs); 2019 msg_full: 2020 tipc_bcast_unlock(net); 2021 genlmsg_cancel(msg->skb, hdr); 2022 2023 return -EMSGSIZE; 2024 } 2025 2026 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol) 2027 { 2028 l->tolerance = tol; 2029 tipc_link_proto_xmit(l, STATE_MSG, 0, 0, tol, 0); 2030 } 2031 2032 void tipc_link_set_prio(struct tipc_link *l, u32 prio) 2033 { 2034 l->priority = prio; 2035 tipc_link_proto_xmit(l, STATE_MSG, 0, 0, 0, prio); 2036 } 2037 2038 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) 2039 { 2040 l->abort_limit = limit; 2041 } 2042