1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 #include "monitor.h" 46 47 #include <linux/pkt_sched.h> 48 49 struct tipc_stats { 50 u32 sent_pkts; 51 u32 recv_pkts; 52 u32 sent_states; 53 u32 recv_states; 54 u32 sent_probes; 55 u32 recv_probes; 56 u32 sent_nacks; 57 u32 recv_nacks; 58 u32 sent_acks; 59 u32 sent_bundled; 60 u32 sent_bundles; 61 u32 recv_bundled; 62 u32 recv_bundles; 63 u32 retransmitted; 64 u32 sent_fragmented; 65 u32 sent_fragments; 66 u32 recv_fragmented; 67 u32 recv_fragments; 68 u32 link_congs; /* # port sends blocked by congestion */ 69 u32 deferred_recv; 70 u32 duplicates; 71 u32 max_queue_sz; /* send queue size high water mark */ 72 u32 accu_queue_sz; /* used for send queue size profiling */ 73 u32 queue_sz_counts; /* used for send queue size profiling */ 74 u32 msg_length_counts; /* used for message length profiling */ 75 u32 msg_lengths_total; /* used for message length profiling */ 76 u32 msg_length_profile[7]; /* used for msg. length profiling */ 77 }; 78 79 /** 80 * struct tipc_link - TIPC link data structure 81 * @addr: network address of link's peer node 82 * @name: link name character string 83 * @media_addr: media address to use when sending messages over link 84 * @timer: link timer 85 * @net: pointer to namespace struct 86 * @refcnt: reference counter for permanent references (owner node & timer) 87 * @peer_session: link session # being used by peer end of link 88 * @peer_bearer_id: bearer id used by link's peer endpoint 89 * @bearer_id: local bearer id used by link 90 * @tolerance: minimum link continuity loss needed to reset link [in ms] 91 * @abort_limit: # of unacknowledged continuity probes needed to reset link 92 * @state: current state of link FSM 93 * @peer_caps: bitmap describing capabilities of peer node 94 * @silent_intv_cnt: # of timer intervals without any reception from peer 95 * @proto_msg: template for control messages generated by link 96 * @pmsg: convenience pointer to "proto_msg" field 97 * @priority: current link priority 98 * @net_plane: current link network plane ('A' through 'H') 99 * @mon_state: cookie with information needed by link monitor 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 101 * @exp_msg_count: # of tunnelled messages expected during link changeover 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 103 * @mtu: current maximum packet size for this link 104 * @advertised_mtu: advertised own mtu when link is being established 105 * @transmitq: queue for sent, non-acked messages 106 * @backlogq: queue for messages waiting to be sent 107 * @snt_nxt: next sequence number to use for outbound messages 108 * @last_retransmitted: sequence number of most recently retransmitted message 109 * @stale_count: # of identical retransmit requests made by peer 110 * @ackers: # of peers that needs to ack each packet before it can be released 111 * @acked: # last packet acked by a certain peer. Used for broadcast. 112 * @rcv_nxt: next sequence number to expect for inbound messages 113 * @deferred_queue: deferred queue saved OOS b'cast message received from node 114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 115 * @inputq: buffer queue for messages to be delivered upwards 116 * @namedq: buffer queue for name table messages to be delivered upwards 117 * @next_out: ptr to first unsent outbound message in queue 118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate 119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 120 * @reasm_buf: head of partially reassembled inbound message fragments 121 * @bc_rcvr: marks that this is a broadcast receiver link 122 * @stats: collects statistics regarding link activity 123 */ 124 struct tipc_link { 125 u32 addr; 126 char name[TIPC_MAX_LINK_NAME]; 127 struct net *net; 128 129 /* Management and link supervision data */ 130 u32 peer_session; 131 u32 session; 132 u32 peer_bearer_id; 133 u32 bearer_id; 134 u32 tolerance; 135 u32 abort_limit; 136 u32 state; 137 u16 peer_caps; 138 bool active; 139 u32 silent_intv_cnt; 140 char if_name[TIPC_MAX_IF_NAME]; 141 u32 priority; 142 char net_plane; 143 struct tipc_mon_state mon_state; 144 u16 rst_cnt; 145 146 /* Failover/synch */ 147 u16 drop_point; 148 struct sk_buff *failover_reasm_skb; 149 150 /* Max packet negotiation */ 151 u16 mtu; 152 u16 advertised_mtu; 153 154 /* Sending */ 155 struct sk_buff_head transmq; 156 struct sk_buff_head backlogq; 157 struct { 158 u16 len; 159 u16 limit; 160 } backlog[5]; 161 u16 snd_nxt; 162 u16 last_retransm; 163 u16 window; 164 u32 stale_count; 165 166 /* Reception */ 167 u16 rcv_nxt; 168 u32 rcv_unacked; 169 struct sk_buff_head deferdq; 170 struct sk_buff_head *inputq; 171 struct sk_buff_head *namedq; 172 173 /* Congestion handling */ 174 struct sk_buff_head wakeupq; 175 176 /* Fragmentation/reassembly */ 177 struct sk_buff *reasm_buf; 178 179 /* Broadcast */ 180 u16 ackers; 181 u16 acked; 182 struct tipc_link *bc_rcvlink; 183 struct tipc_link *bc_sndlink; 184 unsigned long prev_retr; 185 u16 prev_from; 186 u16 prev_to; 187 u8 nack_state; 188 bool bc_peer_is_up; 189 190 /* Statistics */ 191 struct tipc_stats stats; 192 }; 193 194 /* 195 * Error message prefixes 196 */ 197 static const char *link_co_err = "Link tunneling error, "; 198 static const char *link_rst_msg = "Resetting link "; 199 200 /* Send states for broadcast NACKs 201 */ 202 enum { 203 BC_NACK_SND_CONDITIONAL, 204 BC_NACK_SND_UNCONDITIONAL, 205 BC_NACK_SND_SUPPRESS, 206 }; 207 208 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */ 209 210 /* 211 * Interval between NACKs when packets arrive out of order 212 */ 213 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 214 215 /* Wildcard value for link session numbers. When it is known that 216 * peer endpoint is down, any session number must be accepted. 217 */ 218 #define ANY_SESSION 0x10000 219 220 /* Link FSM states: 221 */ 222 enum { 223 LINK_ESTABLISHED = 0xe, 224 LINK_ESTABLISHING = 0xe << 4, 225 LINK_RESET = 0x1 << 8, 226 LINK_RESETTING = 0x2 << 12, 227 LINK_PEER_RESET = 0xd << 16, 228 LINK_FAILINGOVER = 0xf << 20, 229 LINK_SYNCHING = 0xc << 24 230 }; 231 232 /* Link FSM state checking routines 233 */ 234 static int link_is_up(struct tipc_link *l) 235 { 236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 237 } 238 239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 240 struct sk_buff_head *xmitq); 241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 242 u16 rcvgap, int tolerance, int priority, 243 struct sk_buff_head *xmitq); 244 static void link_print(struct tipc_link *l, const char *str); 245 static int tipc_link_build_nack_msg(struct tipc_link *l, 246 struct sk_buff_head *xmitq); 247 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 248 struct sk_buff_head *xmitq); 249 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); 250 251 /* 252 * Simple non-static link routines (i.e. referenced outside this file) 253 */ 254 bool tipc_link_is_up(struct tipc_link *l) 255 { 256 return link_is_up(l); 257 } 258 259 bool tipc_link_peer_is_down(struct tipc_link *l) 260 { 261 return l->state == LINK_PEER_RESET; 262 } 263 264 bool tipc_link_is_reset(struct tipc_link *l) 265 { 266 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 267 } 268 269 bool tipc_link_is_establishing(struct tipc_link *l) 270 { 271 return l->state == LINK_ESTABLISHING; 272 } 273 274 bool tipc_link_is_synching(struct tipc_link *l) 275 { 276 return l->state == LINK_SYNCHING; 277 } 278 279 bool tipc_link_is_failingover(struct tipc_link *l) 280 { 281 return l->state == LINK_FAILINGOVER; 282 } 283 284 bool tipc_link_is_blocked(struct tipc_link *l) 285 { 286 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 287 } 288 289 static bool link_is_bc_sndlink(struct tipc_link *l) 290 { 291 return !l->bc_sndlink; 292 } 293 294 static bool link_is_bc_rcvlink(struct tipc_link *l) 295 { 296 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); 297 } 298 299 int tipc_link_is_active(struct tipc_link *l) 300 { 301 return l->active; 302 } 303 304 void tipc_link_set_active(struct tipc_link *l, bool active) 305 { 306 l->active = active; 307 } 308 309 u32 tipc_link_id(struct tipc_link *l) 310 { 311 return l->peer_bearer_id << 16 | l->bearer_id; 312 } 313 314 int tipc_link_window(struct tipc_link *l) 315 { 316 return l->window; 317 } 318 319 int tipc_link_prio(struct tipc_link *l) 320 { 321 return l->priority; 322 } 323 324 unsigned long tipc_link_tolerance(struct tipc_link *l) 325 { 326 return l->tolerance; 327 } 328 329 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) 330 { 331 return l->inputq; 332 } 333 334 char tipc_link_plane(struct tipc_link *l) 335 { 336 return l->net_plane; 337 } 338 339 void tipc_link_add_bc_peer(struct tipc_link *snd_l, 340 struct tipc_link *uc_l, 341 struct sk_buff_head *xmitq) 342 { 343 struct tipc_link *rcv_l = uc_l->bc_rcvlink; 344 345 snd_l->ackers++; 346 rcv_l->acked = snd_l->snd_nxt - 1; 347 snd_l->state = LINK_ESTABLISHED; 348 tipc_link_build_bc_init_msg(uc_l, xmitq); 349 } 350 351 void tipc_link_remove_bc_peer(struct tipc_link *snd_l, 352 struct tipc_link *rcv_l, 353 struct sk_buff_head *xmitq) 354 { 355 u16 ack = snd_l->snd_nxt - 1; 356 357 snd_l->ackers--; 358 rcv_l->bc_peer_is_up = true; 359 rcv_l->state = LINK_ESTABLISHED; 360 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 361 tipc_link_reset(rcv_l); 362 rcv_l->state = LINK_RESET; 363 if (!snd_l->ackers) { 364 tipc_link_reset(snd_l); 365 snd_l->state = LINK_RESET; 366 __skb_queue_purge(xmitq); 367 } 368 } 369 370 int tipc_link_bc_peers(struct tipc_link *l) 371 { 372 return l->ackers; 373 } 374 375 u16 link_bc_rcv_gap(struct tipc_link *l) 376 { 377 struct sk_buff *skb = skb_peek(&l->deferdq); 378 u16 gap = 0; 379 380 if (more(l->snd_nxt, l->rcv_nxt)) 381 gap = l->snd_nxt - l->rcv_nxt; 382 if (skb) 383 gap = buf_seqno(skb) - l->rcv_nxt; 384 return gap; 385 } 386 387 void tipc_link_set_mtu(struct tipc_link *l, int mtu) 388 { 389 l->mtu = mtu; 390 } 391 392 int tipc_link_mtu(struct tipc_link *l) 393 { 394 return l->mtu; 395 } 396 397 u16 tipc_link_rcv_nxt(struct tipc_link *l) 398 { 399 return l->rcv_nxt; 400 } 401 402 u16 tipc_link_acked(struct tipc_link *l) 403 { 404 return l->acked; 405 } 406 407 char *tipc_link_name(struct tipc_link *l) 408 { 409 return l->name; 410 } 411 412 /** 413 * tipc_link_create - create a new link 414 * @n: pointer to associated node 415 * @if_name: associated interface name 416 * @bearer_id: id (index) of associated bearer 417 * @tolerance: link tolerance to be used by link 418 * @net_plane: network plane (A,B,c..) this link belongs to 419 * @mtu: mtu to be advertised by link 420 * @priority: priority to be used by link 421 * @window: send window to be used by link 422 * @session: session to be used by link 423 * @ownnode: identity of own node 424 * @peer: node id of peer node 425 * @peer_caps: bitmap describing peer node capabilities 426 * @bc_sndlink: the namespace global link used for broadcast sending 427 * @bc_rcvlink: the peer specific link used for broadcast reception 428 * @inputq: queue to put messages ready for delivery 429 * @namedq: queue to put binding table update messages ready for delivery 430 * @link: return value, pointer to put the created link 431 * 432 * Returns true if link was created, otherwise false 433 */ 434 bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 435 int tolerance, char net_plane, u32 mtu, int priority, 436 int window, u32 session, u32 ownnode, u32 peer, 437 u16 peer_caps, 438 struct tipc_link *bc_sndlink, 439 struct tipc_link *bc_rcvlink, 440 struct sk_buff_head *inputq, 441 struct sk_buff_head *namedq, 442 struct tipc_link **link) 443 { 444 struct tipc_link *l; 445 446 l = kzalloc(sizeof(*l), GFP_ATOMIC); 447 if (!l) 448 return false; 449 *link = l; 450 l->session = session; 451 452 /* Note: peer i/f name is completed by reset/activate message */ 453 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 454 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), 455 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 456 strcpy(l->if_name, if_name); 457 l->addr = peer; 458 l->peer_caps = peer_caps; 459 l->net = net; 460 l->peer_session = ANY_SESSION; 461 l->bearer_id = bearer_id; 462 l->tolerance = tolerance; 463 l->net_plane = net_plane; 464 l->advertised_mtu = mtu; 465 l->mtu = mtu; 466 l->priority = priority; 467 tipc_link_set_queue_limits(l, window); 468 l->ackers = 1; 469 l->bc_sndlink = bc_sndlink; 470 l->bc_rcvlink = bc_rcvlink; 471 l->inputq = inputq; 472 l->namedq = namedq; 473 l->state = LINK_RESETTING; 474 __skb_queue_head_init(&l->transmq); 475 __skb_queue_head_init(&l->backlogq); 476 __skb_queue_head_init(&l->deferdq); 477 skb_queue_head_init(&l->wakeupq); 478 skb_queue_head_init(l->inputq); 479 return true; 480 } 481 482 /** 483 * tipc_link_bc_create - create new link to be used for broadcast 484 * @n: pointer to associated node 485 * @mtu: mtu to be used 486 * @window: send window to be used 487 * @inputq: queue to put messages ready for delivery 488 * @namedq: queue to put binding table update messages ready for delivery 489 * @link: return value, pointer to put the created link 490 * 491 * Returns true if link was created, otherwise false 492 */ 493 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, 494 int mtu, int window, u16 peer_caps, 495 struct sk_buff_head *inputq, 496 struct sk_buff_head *namedq, 497 struct tipc_link *bc_sndlink, 498 struct tipc_link **link) 499 { 500 struct tipc_link *l; 501 502 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, 503 0, ownnode, peer, peer_caps, bc_sndlink, 504 NULL, inputq, namedq, link)) 505 return false; 506 507 l = *link; 508 strcpy(l->name, tipc_bclink_name); 509 tipc_link_reset(l); 510 l->state = LINK_RESET; 511 l->ackers = 0; 512 l->bc_rcvlink = l; 513 514 /* Broadcast send link is always up */ 515 if (link_is_bc_sndlink(l)) 516 l->state = LINK_ESTABLISHED; 517 518 return true; 519 } 520 521 /** 522 * tipc_link_fsm_evt - link finite state machine 523 * @l: pointer to link 524 * @evt: state machine event to be processed 525 */ 526 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 527 { 528 int rc = 0; 529 530 switch (l->state) { 531 case LINK_RESETTING: 532 switch (evt) { 533 case LINK_PEER_RESET_EVT: 534 l->state = LINK_PEER_RESET; 535 break; 536 case LINK_RESET_EVT: 537 l->state = LINK_RESET; 538 break; 539 case LINK_FAILURE_EVT: 540 case LINK_FAILOVER_BEGIN_EVT: 541 case LINK_ESTABLISH_EVT: 542 case LINK_FAILOVER_END_EVT: 543 case LINK_SYNCH_BEGIN_EVT: 544 case LINK_SYNCH_END_EVT: 545 default: 546 goto illegal_evt; 547 } 548 break; 549 case LINK_RESET: 550 switch (evt) { 551 case LINK_PEER_RESET_EVT: 552 l->state = LINK_ESTABLISHING; 553 break; 554 case LINK_FAILOVER_BEGIN_EVT: 555 l->state = LINK_FAILINGOVER; 556 case LINK_FAILURE_EVT: 557 case LINK_RESET_EVT: 558 case LINK_ESTABLISH_EVT: 559 case LINK_FAILOVER_END_EVT: 560 break; 561 case LINK_SYNCH_BEGIN_EVT: 562 case LINK_SYNCH_END_EVT: 563 default: 564 goto illegal_evt; 565 } 566 break; 567 case LINK_PEER_RESET: 568 switch (evt) { 569 case LINK_RESET_EVT: 570 l->state = LINK_ESTABLISHING; 571 break; 572 case LINK_PEER_RESET_EVT: 573 case LINK_ESTABLISH_EVT: 574 case LINK_FAILURE_EVT: 575 break; 576 case LINK_SYNCH_BEGIN_EVT: 577 case LINK_SYNCH_END_EVT: 578 case LINK_FAILOVER_BEGIN_EVT: 579 case LINK_FAILOVER_END_EVT: 580 default: 581 goto illegal_evt; 582 } 583 break; 584 case LINK_FAILINGOVER: 585 switch (evt) { 586 case LINK_FAILOVER_END_EVT: 587 l->state = LINK_RESET; 588 break; 589 case LINK_PEER_RESET_EVT: 590 case LINK_RESET_EVT: 591 case LINK_ESTABLISH_EVT: 592 case LINK_FAILURE_EVT: 593 break; 594 case LINK_FAILOVER_BEGIN_EVT: 595 case LINK_SYNCH_BEGIN_EVT: 596 case LINK_SYNCH_END_EVT: 597 default: 598 goto illegal_evt; 599 } 600 break; 601 case LINK_ESTABLISHING: 602 switch (evt) { 603 case LINK_ESTABLISH_EVT: 604 l->state = LINK_ESTABLISHED; 605 break; 606 case LINK_FAILOVER_BEGIN_EVT: 607 l->state = LINK_FAILINGOVER; 608 break; 609 case LINK_RESET_EVT: 610 l->state = LINK_RESET; 611 break; 612 case LINK_FAILURE_EVT: 613 case LINK_PEER_RESET_EVT: 614 case LINK_SYNCH_BEGIN_EVT: 615 case LINK_FAILOVER_END_EVT: 616 break; 617 case LINK_SYNCH_END_EVT: 618 default: 619 goto illegal_evt; 620 } 621 break; 622 case LINK_ESTABLISHED: 623 switch (evt) { 624 case LINK_PEER_RESET_EVT: 625 l->state = LINK_PEER_RESET; 626 rc |= TIPC_LINK_DOWN_EVT; 627 break; 628 case LINK_FAILURE_EVT: 629 l->state = LINK_RESETTING; 630 rc |= TIPC_LINK_DOWN_EVT; 631 break; 632 case LINK_RESET_EVT: 633 l->state = LINK_RESET; 634 break; 635 case LINK_ESTABLISH_EVT: 636 case LINK_SYNCH_END_EVT: 637 break; 638 case LINK_SYNCH_BEGIN_EVT: 639 l->state = LINK_SYNCHING; 640 break; 641 case LINK_FAILOVER_BEGIN_EVT: 642 case LINK_FAILOVER_END_EVT: 643 default: 644 goto illegal_evt; 645 } 646 break; 647 case LINK_SYNCHING: 648 switch (evt) { 649 case LINK_PEER_RESET_EVT: 650 l->state = LINK_PEER_RESET; 651 rc |= TIPC_LINK_DOWN_EVT; 652 break; 653 case LINK_FAILURE_EVT: 654 l->state = LINK_RESETTING; 655 rc |= TIPC_LINK_DOWN_EVT; 656 break; 657 case LINK_RESET_EVT: 658 l->state = LINK_RESET; 659 break; 660 case LINK_ESTABLISH_EVT: 661 case LINK_SYNCH_BEGIN_EVT: 662 break; 663 case LINK_SYNCH_END_EVT: 664 l->state = LINK_ESTABLISHED; 665 break; 666 case LINK_FAILOVER_BEGIN_EVT: 667 case LINK_FAILOVER_END_EVT: 668 default: 669 goto illegal_evt; 670 } 671 break; 672 default: 673 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 674 } 675 return rc; 676 illegal_evt: 677 pr_err("Illegal FSM event %x in state %x on link %s\n", 678 evt, l->state, l->name); 679 return rc; 680 } 681 682 /* link_profile_stats - update statistical profiling of traffic 683 */ 684 static void link_profile_stats(struct tipc_link *l) 685 { 686 struct sk_buff *skb; 687 struct tipc_msg *msg; 688 int length; 689 690 /* Update counters used in statistical profiling of send traffic */ 691 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 692 l->stats.queue_sz_counts++; 693 694 skb = skb_peek(&l->transmq); 695 if (!skb) 696 return; 697 msg = buf_msg(skb); 698 length = msg_size(msg); 699 700 if (msg_user(msg) == MSG_FRAGMENTER) { 701 if (msg_type(msg) != FIRST_FRAGMENT) 702 return; 703 length = msg_size(msg_get_wrapped(msg)); 704 } 705 l->stats.msg_lengths_total += length; 706 l->stats.msg_length_counts++; 707 if (length <= 64) 708 l->stats.msg_length_profile[0]++; 709 else if (length <= 256) 710 l->stats.msg_length_profile[1]++; 711 else if (length <= 1024) 712 l->stats.msg_length_profile[2]++; 713 else if (length <= 4096) 714 l->stats.msg_length_profile[3]++; 715 else if (length <= 16384) 716 l->stats.msg_length_profile[4]++; 717 else if (length <= 32768) 718 l->stats.msg_length_profile[5]++; 719 else 720 l->stats.msg_length_profile[6]++; 721 } 722 723 /* tipc_link_timeout - perform periodic task as instructed from node timeout 724 */ 725 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 726 { 727 int mtyp = 0; 728 int rc = 0; 729 bool state = false; 730 bool probe = false; 731 bool setup = false; 732 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 733 u16 bc_acked = l->bc_rcvlink->acked; 734 struct tipc_mon_state *mstate = &l->mon_state; 735 736 switch (l->state) { 737 case LINK_ESTABLISHED: 738 case LINK_SYNCHING: 739 mtyp = STATE_MSG; 740 link_profile_stats(l); 741 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); 742 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) 743 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 744 state = bc_acked != bc_snt; 745 state |= l->bc_rcvlink->rcv_unacked; 746 state |= l->rcv_unacked; 747 state |= !skb_queue_empty(&l->transmq); 748 state |= !skb_queue_empty(&l->deferdq); 749 probe = mstate->probing; 750 probe |= l->silent_intv_cnt; 751 if (probe || mstate->monitoring) 752 l->silent_intv_cnt++; 753 break; 754 case LINK_RESET: 755 setup = l->rst_cnt++ <= 4; 756 setup |= !(l->rst_cnt % 16); 757 mtyp = RESET_MSG; 758 break; 759 case LINK_ESTABLISHING: 760 setup = true; 761 mtyp = ACTIVATE_MSG; 762 break; 763 case LINK_PEER_RESET: 764 case LINK_RESETTING: 765 case LINK_FAILINGOVER: 766 break; 767 default: 768 break; 769 } 770 771 if (state || probe || setup) 772 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq); 773 774 return rc; 775 } 776 777 /** 778 * link_schedule_user - schedule a message sender for wakeup after congestion 779 * @link: congested link 780 * @list: message that was attempted sent 781 * Create pseudo msg to send back to user when congestion abates 782 * Does not consume buffer list 783 */ 784 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) 785 { 786 struct tipc_msg *msg = buf_msg(skb_peek(list)); 787 int imp = msg_importance(msg); 788 u32 oport = msg_origport(msg); 789 u32 addr = tipc_own_addr(link->net); 790 struct sk_buff *skb; 791 792 /* This really cannot happen... */ 793 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { 794 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 795 return -ENOBUFS; 796 } 797 /* Non-blocking sender: */ 798 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) 799 return -ELINKCONG; 800 801 /* Create and schedule wakeup pseudo message */ 802 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 803 addr, addr, oport, 0, 0); 804 if (!skb) 805 return -ENOBUFS; 806 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); 807 TIPC_SKB_CB(skb)->chain_imp = imp; 808 skb_queue_tail(&link->wakeupq, skb); 809 link->stats.link_congs++; 810 return -ELINKCONG; 811 } 812 813 /** 814 * link_prepare_wakeup - prepare users for wakeup after congestion 815 * @link: congested link 816 * Move a number of waiting users, as permitted by available space in 817 * the send queue, from link wait queue to node wait queue for wakeup 818 */ 819 void link_prepare_wakeup(struct tipc_link *l) 820 { 821 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; 822 int imp, lim; 823 struct sk_buff *skb, *tmp; 824 825 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 826 imp = TIPC_SKB_CB(skb)->chain_imp; 827 lim = l->backlog[imp].limit; 828 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 829 if ((pnd[imp] + l->backlog[imp].len) >= lim) 830 break; 831 skb_unlink(skb, &l->wakeupq); 832 skb_queue_tail(l->inputq, skb); 833 } 834 } 835 836 void tipc_link_reset(struct tipc_link *l) 837 { 838 l->peer_session = ANY_SESSION; 839 l->session++; 840 l->mtu = l->advertised_mtu; 841 __skb_queue_purge(&l->transmq); 842 __skb_queue_purge(&l->deferdq); 843 skb_queue_splice_init(&l->wakeupq, l->inputq); 844 __skb_queue_purge(&l->backlogq); 845 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 846 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 847 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 848 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 849 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 850 kfree_skb(l->reasm_buf); 851 kfree_skb(l->failover_reasm_skb); 852 l->reasm_buf = NULL; 853 l->failover_reasm_skb = NULL; 854 l->rcv_unacked = 0; 855 l->snd_nxt = 1; 856 l->rcv_nxt = 1; 857 l->acked = 0; 858 l->silent_intv_cnt = 0; 859 l->rst_cnt = 0; 860 l->stale_count = 0; 861 l->bc_peer_is_up = false; 862 memset(&l->mon_state, 0, sizeof(l->mon_state)); 863 tipc_link_reset_stats(l); 864 } 865 866 /** 867 * tipc_link_xmit(): enqueue buffer list according to queue situation 868 * @link: link to use 869 * @list: chain of buffers containing message 870 * @xmitq: returned list of packets to be sent by caller 871 * 872 * Consumes the buffer chain, except when returning -ELINKCONG, 873 * since the caller then may want to make more send attempts. 874 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 875 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 876 */ 877 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 878 struct sk_buff_head *xmitq) 879 { 880 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 881 unsigned int maxwin = l->window; 882 unsigned int i, imp = msg_importance(hdr); 883 unsigned int mtu = l->mtu; 884 u16 ack = l->rcv_nxt - 1; 885 u16 seqno = l->snd_nxt; 886 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 887 struct sk_buff_head *transmq = &l->transmq; 888 struct sk_buff_head *backlogq = &l->backlogq; 889 struct sk_buff *skb, *_skb, *bskb; 890 int pkt_cnt = skb_queue_len(list); 891 892 /* Match msg importance against this and all higher backlog limits: */ 893 if (!skb_queue_empty(backlogq)) { 894 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 895 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 896 return link_schedule_user(l, list); 897 } 898 } 899 if (unlikely(msg_size(hdr) > mtu)) { 900 skb_queue_purge(list); 901 return -EMSGSIZE; 902 } 903 904 if (pkt_cnt > 1) { 905 l->stats.sent_fragmented++; 906 l->stats.sent_fragments += pkt_cnt; 907 } 908 909 /* Prepare each packet for sending, and add to relevant queue: */ 910 while (skb_queue_len(list)) { 911 skb = skb_peek(list); 912 hdr = buf_msg(skb); 913 msg_set_seqno(hdr, seqno); 914 msg_set_ack(hdr, ack); 915 msg_set_bcast_ack(hdr, bc_ack); 916 917 if (likely(skb_queue_len(transmq) < maxwin)) { 918 _skb = skb_clone(skb, GFP_ATOMIC); 919 if (!_skb) { 920 skb_queue_purge(list); 921 return -ENOBUFS; 922 } 923 __skb_dequeue(list); 924 __skb_queue_tail(transmq, skb); 925 __skb_queue_tail(xmitq, _skb); 926 TIPC_SKB_CB(skb)->ackers = l->ackers; 927 l->rcv_unacked = 0; 928 l->stats.sent_pkts++; 929 seqno++; 930 continue; 931 } 932 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 933 kfree_skb(__skb_dequeue(list)); 934 l->stats.sent_bundled++; 935 continue; 936 } 937 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 938 kfree_skb(__skb_dequeue(list)); 939 __skb_queue_tail(backlogq, bskb); 940 l->backlog[msg_importance(buf_msg(bskb))].len++; 941 l->stats.sent_bundled++; 942 l->stats.sent_bundles++; 943 continue; 944 } 945 l->backlog[imp].len += skb_queue_len(list); 946 skb_queue_splice_tail_init(list, backlogq); 947 } 948 l->snd_nxt = seqno; 949 return 0; 950 } 951 952 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) 953 { 954 struct sk_buff *skb, *_skb; 955 struct tipc_msg *hdr; 956 u16 seqno = l->snd_nxt; 957 u16 ack = l->rcv_nxt - 1; 958 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 959 960 while (skb_queue_len(&l->transmq) < l->window) { 961 skb = skb_peek(&l->backlogq); 962 if (!skb) 963 break; 964 _skb = skb_clone(skb, GFP_ATOMIC); 965 if (!_skb) 966 break; 967 __skb_dequeue(&l->backlogq); 968 hdr = buf_msg(skb); 969 l->backlog[msg_importance(hdr)].len--; 970 __skb_queue_tail(&l->transmq, skb); 971 __skb_queue_tail(xmitq, _skb); 972 TIPC_SKB_CB(skb)->ackers = l->ackers; 973 msg_set_seqno(hdr, seqno); 974 msg_set_ack(hdr, ack); 975 msg_set_bcast_ack(hdr, bc_ack); 976 l->rcv_unacked = 0; 977 l->stats.sent_pkts++; 978 seqno++; 979 } 980 l->snd_nxt = seqno; 981 } 982 983 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) 984 { 985 struct tipc_msg *hdr = buf_msg(skb); 986 987 pr_warn("Retransmission failure on link <%s>\n", l->name); 988 link_print(l, "Resetting link "); 989 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 990 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); 991 pr_info("sqno %u, prev: %x, src: %x\n", 992 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); 993 } 994 995 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, 996 struct sk_buff_head *xmitq) 997 { 998 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 999 struct tipc_msg *hdr; 1000 u16 ack = l->rcv_nxt - 1; 1001 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1002 1003 if (!skb) 1004 return 0; 1005 1006 /* Detect repeated retransmit failures on same packet */ 1007 if (likely(l->last_retransm != buf_seqno(skb))) { 1008 l->last_retransm = buf_seqno(skb); 1009 l->stale_count = 1; 1010 } else if (++l->stale_count > 100) { 1011 link_retransmit_failure(l, skb); 1012 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1013 } 1014 1015 /* Move forward to where retransmission should start */ 1016 skb_queue_walk(&l->transmq, skb) { 1017 if (!less(buf_seqno(skb), from)) 1018 break; 1019 } 1020 1021 skb_queue_walk_from(&l->transmq, skb) { 1022 if (more(buf_seqno(skb), to)) 1023 break; 1024 hdr = buf_msg(skb); 1025 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1026 if (!_skb) 1027 return 0; 1028 hdr = buf_msg(_skb); 1029 msg_set_ack(hdr, ack); 1030 msg_set_bcast_ack(hdr, bc_ack); 1031 _skb->priority = TC_PRIO_CONTROL; 1032 __skb_queue_tail(xmitq, _skb); 1033 l->stats.retransmitted++; 1034 } 1035 return 0; 1036 } 1037 1038 /* tipc_data_input - deliver data and name distr msgs to upper layer 1039 * 1040 * Consumes buffer if message is of right type 1041 * Node lock must be held 1042 */ 1043 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, 1044 struct sk_buff_head *inputq) 1045 { 1046 switch (msg_user(buf_msg(skb))) { 1047 case TIPC_LOW_IMPORTANCE: 1048 case TIPC_MEDIUM_IMPORTANCE: 1049 case TIPC_HIGH_IMPORTANCE: 1050 case TIPC_CRITICAL_IMPORTANCE: 1051 case CONN_MANAGER: 1052 skb_queue_tail(inputq, skb); 1053 return true; 1054 case NAME_DISTRIBUTOR: 1055 l->bc_rcvlink->state = LINK_ESTABLISHED; 1056 skb_queue_tail(l->namedq, skb); 1057 return true; 1058 case MSG_BUNDLER: 1059 case TUNNEL_PROTOCOL: 1060 case MSG_FRAGMENTER: 1061 case BCAST_PROTOCOL: 1062 return false; 1063 default: 1064 pr_warn("Dropping received illegal msg type\n"); 1065 kfree_skb(skb); 1066 return false; 1067 }; 1068 } 1069 1070 /* tipc_link_input - process packet that has passed link protocol check 1071 * 1072 * Consumes buffer 1073 */ 1074 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 1075 struct sk_buff_head *inputq) 1076 { 1077 struct tipc_msg *hdr = buf_msg(skb); 1078 struct sk_buff **reasm_skb = &l->reasm_buf; 1079 struct sk_buff *iskb; 1080 struct sk_buff_head tmpq; 1081 int usr = msg_user(hdr); 1082 int rc = 0; 1083 int pos = 0; 1084 int ipos = 0; 1085 1086 if (unlikely(usr == TUNNEL_PROTOCOL)) { 1087 if (msg_type(hdr) == SYNCH_MSG) { 1088 __skb_queue_purge(&l->deferdq); 1089 goto drop; 1090 } 1091 if (!tipc_msg_extract(skb, &iskb, &ipos)) 1092 return rc; 1093 kfree_skb(skb); 1094 skb = iskb; 1095 hdr = buf_msg(skb); 1096 if (less(msg_seqno(hdr), l->drop_point)) 1097 goto drop; 1098 if (tipc_data_input(l, skb, inputq)) 1099 return rc; 1100 usr = msg_user(hdr); 1101 reasm_skb = &l->failover_reasm_skb; 1102 } 1103 1104 if (usr == MSG_BUNDLER) { 1105 skb_queue_head_init(&tmpq); 1106 l->stats.recv_bundles++; 1107 l->stats.recv_bundled += msg_msgcnt(hdr); 1108 while (tipc_msg_extract(skb, &iskb, &pos)) 1109 tipc_data_input(l, iskb, &tmpq); 1110 tipc_skb_queue_splice_tail(&tmpq, inputq); 1111 return 0; 1112 } else if (usr == MSG_FRAGMENTER) { 1113 l->stats.recv_fragments++; 1114 if (tipc_buf_append(reasm_skb, &skb)) { 1115 l->stats.recv_fragmented++; 1116 tipc_data_input(l, skb, inputq); 1117 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { 1118 pr_warn_ratelimited("Unable to build fragment list\n"); 1119 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1120 } 1121 return 0; 1122 } else if (usr == BCAST_PROTOCOL) { 1123 tipc_bcast_lock(l->net); 1124 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); 1125 tipc_bcast_unlock(l->net); 1126 } 1127 drop: 1128 kfree_skb(skb); 1129 return 0; 1130 } 1131 1132 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1133 { 1134 bool released = false; 1135 struct sk_buff *skb, *tmp; 1136 1137 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1138 if (more(buf_seqno(skb), acked)) 1139 break; 1140 __skb_unlink(skb, &l->transmq); 1141 kfree_skb(skb); 1142 released = true; 1143 } 1144 return released; 1145 } 1146 1147 /* tipc_link_build_state_msg: prepare link state message for transmission 1148 * 1149 * Note that sending of broadcast ack is coordinated among nodes, to reduce 1150 * risk of ack storms towards the sender 1151 */ 1152 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1153 { 1154 if (!l) 1155 return 0; 1156 1157 /* Broadcast ACK must be sent via a unicast link => defer to caller */ 1158 if (link_is_bc_rcvlink(l)) { 1159 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) 1160 return 0; 1161 l->rcv_unacked = 0; 1162 1163 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */ 1164 l->snd_nxt = l->rcv_nxt; 1165 return TIPC_LINK_SND_STATE; 1166 } 1167 1168 /* Unicast ACK */ 1169 l->rcv_unacked = 0; 1170 l->stats.sent_acks++; 1171 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1172 return 0; 1173 } 1174 1175 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message 1176 */ 1177 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1178 { 1179 int mtyp = RESET_MSG; 1180 struct sk_buff *skb; 1181 1182 if (l->state == LINK_ESTABLISHING) 1183 mtyp = ACTIVATE_MSG; 1184 1185 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); 1186 1187 /* Inform peer that this endpoint is going down if applicable */ 1188 skb = skb_peek_tail(xmitq); 1189 if (skb && (l->state == LINK_RESET)) 1190 msg_set_peer_stopping(buf_msg(skb), 1); 1191 } 1192 1193 /* tipc_link_build_nack_msg: prepare link nack message for transmission 1194 * Note that sending of broadcast NACK is coordinated among nodes, to 1195 * reduce the risk of NACK storms towards the sender 1196 */ 1197 static int tipc_link_build_nack_msg(struct tipc_link *l, 1198 struct sk_buff_head *xmitq) 1199 { 1200 u32 def_cnt = ++l->stats.deferred_recv; 1201 int match1, match2; 1202 1203 if (link_is_bc_rcvlink(l)) { 1204 match1 = def_cnt & 0xf; 1205 match2 = tipc_own_addr(l->net) & 0xf; 1206 if (match1 == match2) 1207 return TIPC_LINK_SND_STATE; 1208 return 0; 1209 } 1210 1211 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) 1212 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1213 return 0; 1214 } 1215 1216 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1217 * @l: the link that should handle the message 1218 * @skb: TIPC packet 1219 * @xmitq: queue to place packets to be sent after this call 1220 */ 1221 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1222 struct sk_buff_head *xmitq) 1223 { 1224 struct sk_buff_head *defq = &l->deferdq; 1225 struct tipc_msg *hdr; 1226 u16 seqno, rcv_nxt, win_lim; 1227 int rc = 0; 1228 1229 do { 1230 hdr = buf_msg(skb); 1231 seqno = msg_seqno(hdr); 1232 rcv_nxt = l->rcv_nxt; 1233 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; 1234 1235 /* Verify and update link state */ 1236 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) 1237 return tipc_link_proto_rcv(l, skb, xmitq); 1238 1239 if (unlikely(!link_is_up(l))) { 1240 if (l->state == LINK_ESTABLISHING) 1241 rc = TIPC_LINK_UP_EVT; 1242 goto drop; 1243 } 1244 1245 /* Don't send probe at next timeout expiration */ 1246 l->silent_intv_cnt = 0; 1247 1248 /* Drop if outside receive window */ 1249 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { 1250 l->stats.duplicates++; 1251 goto drop; 1252 } 1253 1254 /* Forward queues and wake up waiting users */ 1255 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1256 tipc_link_advance_backlog(l, xmitq); 1257 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1258 link_prepare_wakeup(l); 1259 } 1260 1261 /* Defer delivery if sequence gap */ 1262 if (unlikely(seqno != rcv_nxt)) { 1263 __tipc_skb_queue_sorted(defq, seqno, skb); 1264 rc |= tipc_link_build_nack_msg(l, xmitq); 1265 break; 1266 } 1267 1268 /* Deliver packet */ 1269 l->rcv_nxt++; 1270 l->stats.recv_pkts++; 1271 if (!tipc_data_input(l, skb, l->inputq)) 1272 rc |= tipc_link_input(l, skb, l->inputq); 1273 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1274 rc |= tipc_link_build_state_msg(l, xmitq); 1275 if (unlikely(rc & ~TIPC_LINK_SND_STATE)) 1276 break; 1277 } while ((skb = __skb_dequeue(defq))); 1278 1279 return rc; 1280 drop: 1281 kfree_skb(skb); 1282 return rc; 1283 } 1284 1285 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1286 u16 rcvgap, int tolerance, int priority, 1287 struct sk_buff_head *xmitq) 1288 { 1289 struct tipc_link *bcl = l->bc_rcvlink; 1290 struct sk_buff *skb; 1291 struct tipc_msg *hdr; 1292 struct sk_buff_head *dfq = &l->deferdq; 1293 bool node_up = link_is_up(bcl); 1294 struct tipc_mon_state *mstate = &l->mon_state; 1295 int dlen = 0; 1296 void *data; 1297 1298 /* Don't send protocol message during reset or link failover */ 1299 if (tipc_link_is_blocked(l)) 1300 return; 1301 1302 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) 1303 return; 1304 1305 if (!skb_queue_empty(dfq)) 1306 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; 1307 1308 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, 1309 tipc_max_domain_size, l->addr, 1310 tipc_own_addr(l->net), 0, 0, 0); 1311 if (!skb) 1312 return; 1313 1314 hdr = buf_msg(skb); 1315 data = msg_data(hdr); 1316 msg_set_session(hdr, l->session); 1317 msg_set_bearer_id(hdr, l->bearer_id); 1318 msg_set_net_plane(hdr, l->net_plane); 1319 msg_set_next_sent(hdr, l->snd_nxt); 1320 msg_set_ack(hdr, l->rcv_nxt - 1); 1321 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); 1322 msg_set_bc_ack_invalid(hdr, !node_up); 1323 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1324 msg_set_link_tolerance(hdr, tolerance); 1325 msg_set_linkprio(hdr, priority); 1326 msg_set_redundant_link(hdr, node_up); 1327 msg_set_seq_gap(hdr, 0); 1328 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); 1329 1330 if (mtyp == STATE_MSG) { 1331 msg_set_seq_gap(hdr, rcvgap); 1332 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); 1333 msg_set_probe(hdr, probe); 1334 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); 1335 msg_set_size(hdr, INT_H_SIZE + dlen); 1336 skb_trim(skb, INT_H_SIZE + dlen); 1337 l->stats.sent_states++; 1338 l->rcv_unacked = 0; 1339 } else { 1340 /* RESET_MSG or ACTIVATE_MSG */ 1341 msg_set_max_pkt(hdr, l->advertised_mtu); 1342 strcpy(data, l->if_name); 1343 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1344 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); 1345 } 1346 if (probe) 1347 l->stats.sent_probes++; 1348 if (rcvgap) 1349 l->stats.sent_nacks++; 1350 skb->priority = TC_PRIO_CONTROL; 1351 __skb_queue_tail(xmitq, skb); 1352 } 1353 1354 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1355 * with contents of the link's transmit and backlog queues. 1356 */ 1357 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1358 int mtyp, struct sk_buff_head *xmitq) 1359 { 1360 struct sk_buff *skb, *tnlskb; 1361 struct tipc_msg *hdr, tnlhdr; 1362 struct sk_buff_head *queue = &l->transmq; 1363 struct sk_buff_head tmpxq, tnlq; 1364 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1365 1366 if (!tnl) 1367 return; 1368 1369 skb_queue_head_init(&tnlq); 1370 skb_queue_head_init(&tmpxq); 1371 1372 /* At least one packet required for safe algorithm => add dummy */ 1373 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1374 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 1375 0, 0, TIPC_ERR_NO_PORT); 1376 if (!skb) { 1377 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1378 return; 1379 } 1380 skb_queue_tail(&tnlq, skb); 1381 tipc_link_xmit(l, &tnlq, &tmpxq); 1382 __skb_queue_purge(&tmpxq); 1383 1384 /* Initialize reusable tunnel packet header */ 1385 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, 1386 mtyp, INT_H_SIZE, l->addr); 1387 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1388 msg_set_msgcnt(&tnlhdr, pktcnt); 1389 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1390 tnl: 1391 /* Wrap each packet into a tunnel packet */ 1392 skb_queue_walk(queue, skb) { 1393 hdr = buf_msg(skb); 1394 if (queue == &l->backlogq) 1395 msg_set_seqno(hdr, seqno++); 1396 pktlen = msg_size(hdr); 1397 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1398 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); 1399 if (!tnlskb) { 1400 pr_warn("%sunable to send packet\n", link_co_err); 1401 return; 1402 } 1403 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1404 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1405 __skb_queue_tail(&tnlq, tnlskb); 1406 } 1407 if (queue != &l->backlogq) { 1408 queue = &l->backlogq; 1409 goto tnl; 1410 } 1411 1412 tipc_link_xmit(tnl, &tnlq, xmitq); 1413 1414 if (mtyp == FAILOVER_MSG) { 1415 tnl->drop_point = l->rcv_nxt; 1416 tnl->failover_reasm_skb = l->reasm_buf; 1417 l->reasm_buf = NULL; 1418 } 1419 } 1420 1421 /* tipc_link_proto_rcv(): receive link level protocol message : 1422 * Note that network plane id propagates through the network, and may 1423 * change at any time. The node with lowest numerical id determines 1424 * network plane 1425 */ 1426 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1427 struct sk_buff_head *xmitq) 1428 { 1429 struct tipc_msg *hdr = buf_msg(skb); 1430 u16 rcvgap = 0; 1431 u16 ack = msg_ack(hdr); 1432 u16 gap = msg_seq_gap(hdr); 1433 u16 peers_snd_nxt = msg_next_sent(hdr); 1434 u16 peers_tol = msg_link_tolerance(hdr); 1435 u16 peers_prio = msg_linkprio(hdr); 1436 u16 rcv_nxt = l->rcv_nxt; 1437 u16 dlen = msg_data_sz(hdr); 1438 int mtyp = msg_type(hdr); 1439 void *data; 1440 char *if_name; 1441 int rc = 0; 1442 1443 if (tipc_link_is_blocked(l) || !xmitq) 1444 goto exit; 1445 1446 if (tipc_own_addr(l->net) > msg_prevnode(hdr)) 1447 l->net_plane = msg_net_plane(hdr); 1448 1449 skb_linearize(skb); 1450 hdr = buf_msg(skb); 1451 data = msg_data(hdr); 1452 1453 switch (mtyp) { 1454 case RESET_MSG: 1455 1456 /* Ignore duplicate RESET with old session number */ 1457 if ((less_eq(msg_session(hdr), l->peer_session)) && 1458 (l->peer_session != ANY_SESSION)) 1459 break; 1460 /* fall thru' */ 1461 1462 case ACTIVATE_MSG: 1463 1464 /* Complete own link name with peer's interface name */ 1465 if_name = strrchr(l->name, ':') + 1; 1466 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1467 break; 1468 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1469 break; 1470 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1471 1472 /* Update own tolerance if peer indicates a non-zero value */ 1473 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1474 l->tolerance = peers_tol; 1475 1476 /* Update own priority if peer's priority is higher */ 1477 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1478 l->priority = peers_prio; 1479 1480 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1481 if (msg_peer_stopping(hdr)) 1482 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1483 else if ((mtyp == RESET_MSG) || !link_is_up(l)) 1484 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1485 1486 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1487 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1488 rc = TIPC_LINK_UP_EVT; 1489 1490 l->peer_session = msg_session(hdr); 1491 l->peer_bearer_id = msg_bearer_id(hdr); 1492 if (l->mtu > msg_max_pkt(hdr)) 1493 l->mtu = msg_max_pkt(hdr); 1494 break; 1495 1496 case STATE_MSG: 1497 1498 /* Update own tolerance if peer indicates a non-zero value */ 1499 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1500 l->tolerance = peers_tol; 1501 1502 /* Update own prio if peer indicates a different value */ 1503 if ((peers_prio != l->priority) && 1504 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1505 l->priority = peers_prio; 1506 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1507 } 1508 1509 l->silent_intv_cnt = 0; 1510 l->stats.recv_states++; 1511 if (msg_probe(hdr)) 1512 l->stats.recv_probes++; 1513 1514 if (!link_is_up(l)) { 1515 if (l->state == LINK_ESTABLISHING) 1516 rc = TIPC_LINK_UP_EVT; 1517 break; 1518 } 1519 tipc_mon_rcv(l->net, data, dlen, l->addr, 1520 &l->mon_state, l->bearer_id); 1521 1522 /* Send NACK if peer has sent pkts we haven't received yet */ 1523 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1524 rcvgap = peers_snd_nxt - l->rcv_nxt; 1525 if (rcvgap || (msg_probe(hdr))) 1526 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, 1527 0, 0, xmitq); 1528 tipc_link_release_pkts(l, ack); 1529 1530 /* If NACK, retransmit will now start at right position */ 1531 if (gap) { 1532 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq); 1533 l->stats.recv_nacks++; 1534 } 1535 1536 tipc_link_advance_backlog(l, xmitq); 1537 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1538 link_prepare_wakeup(l); 1539 } 1540 exit: 1541 kfree_skb(skb); 1542 return rc; 1543 } 1544 1545 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message 1546 */ 1547 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, 1548 u16 peers_snd_nxt, 1549 struct sk_buff_head *xmitq) 1550 { 1551 struct sk_buff *skb; 1552 struct tipc_msg *hdr; 1553 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); 1554 u16 ack = l->rcv_nxt - 1; 1555 u16 gap_to = peers_snd_nxt - 1; 1556 1557 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 1558 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); 1559 if (!skb) 1560 return false; 1561 hdr = buf_msg(skb); 1562 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1563 msg_set_bcast_ack(hdr, ack); 1564 msg_set_bcgap_after(hdr, ack); 1565 if (dfrd_skb) 1566 gap_to = buf_seqno(dfrd_skb) - 1; 1567 msg_set_bcgap_to(hdr, gap_to); 1568 msg_set_non_seq(hdr, bcast); 1569 __skb_queue_tail(xmitq, skb); 1570 return true; 1571 } 1572 1573 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. 1574 * 1575 * Give a newly added peer node the sequence number where it should 1576 * start receiving and acking broadcast packets. 1577 */ 1578 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 1579 struct sk_buff_head *xmitq) 1580 { 1581 struct sk_buff_head list; 1582 1583 __skb_queue_head_init(&list); 1584 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1585 return; 1586 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true); 1587 tipc_link_xmit(l, &list, xmitq); 1588 } 1589 1590 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer 1591 */ 1592 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) 1593 { 1594 int mtyp = msg_type(hdr); 1595 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1596 1597 if (link_is_up(l)) 1598 return; 1599 1600 if (msg_user(hdr) == BCAST_PROTOCOL) { 1601 l->rcv_nxt = peers_snd_nxt; 1602 l->state = LINK_ESTABLISHED; 1603 return; 1604 } 1605 1606 if (l->peer_caps & TIPC_BCAST_SYNCH) 1607 return; 1608 1609 if (msg_peer_node_is_up(hdr)) 1610 return; 1611 1612 /* Compatibility: accept older, less safe initial synch data */ 1613 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) 1614 l->rcv_nxt = peers_snd_nxt; 1615 } 1616 1617 /* link_bc_retr eval()- check if the indicated range can be retransmitted now 1618 * - Adjust permitted range if there is overlap with previous retransmission 1619 */ 1620 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to) 1621 { 1622 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr); 1623 1624 if (less(*to, *from)) 1625 return false; 1626 1627 /* New retransmission request */ 1628 if ((elapsed > TIPC_BC_RETR_LIMIT) || 1629 less(*to, l->prev_from) || more(*from, l->prev_to)) { 1630 l->prev_from = *from; 1631 l->prev_to = *to; 1632 l->prev_retr = jiffies; 1633 return true; 1634 } 1635 1636 /* Inside range of previous retransmit */ 1637 if (!less(*from, l->prev_from) && !more(*to, l->prev_to)) 1638 return false; 1639 1640 /* Fully or partially outside previous range => exclude overlap */ 1641 if (less(*from, l->prev_from)) { 1642 *to = l->prev_from - 1; 1643 l->prev_from = *from; 1644 } 1645 if (more(*to, l->prev_to)) { 1646 *from = l->prev_to + 1; 1647 l->prev_to = *to; 1648 } 1649 l->prev_retr = jiffies; 1650 return true; 1651 } 1652 1653 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 1654 */ 1655 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 1656 struct sk_buff_head *xmitq) 1657 { 1658 struct tipc_link *snd_l = l->bc_sndlink; 1659 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1660 u16 from = msg_bcast_ack(hdr) + 1; 1661 u16 to = from + msg_bc_gap(hdr) - 1; 1662 int rc = 0; 1663 1664 if (!link_is_up(l)) 1665 return rc; 1666 1667 if (!msg_peer_node_is_up(hdr)) 1668 return rc; 1669 1670 /* Open when peer ackowledges our bcast init msg (pkt #1) */ 1671 if (msg_ack(hdr)) 1672 l->bc_peer_is_up = true; 1673 1674 if (!l->bc_peer_is_up) 1675 return rc; 1676 1677 l->stats.recv_nacks++; 1678 1679 /* Ignore if peers_snd_nxt goes beyond receive window */ 1680 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1681 return rc; 1682 1683 if (link_bc_retr_eval(snd_l, &from, &to)) 1684 rc = tipc_link_retrans(snd_l, from, to, xmitq); 1685 1686 l->snd_nxt = peers_snd_nxt; 1687 if (link_bc_rcv_gap(l)) 1688 rc |= TIPC_LINK_SND_STATE; 1689 1690 /* Return now if sender supports nack via STATE messages */ 1691 if (l->peer_caps & TIPC_BCAST_STATE_NACK) 1692 return rc; 1693 1694 /* Otherwise, be backwards compatible */ 1695 1696 if (!more(peers_snd_nxt, l->rcv_nxt)) { 1697 l->nack_state = BC_NACK_SND_CONDITIONAL; 1698 return 0; 1699 } 1700 1701 /* Don't NACK if one was recently sent or peeked */ 1702 if (l->nack_state == BC_NACK_SND_SUPPRESS) { 1703 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1704 return 0; 1705 } 1706 1707 /* Conditionally delay NACK sending until next synch rcv */ 1708 if (l->nack_state == BC_NACK_SND_CONDITIONAL) { 1709 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1710 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) 1711 return 0; 1712 } 1713 1714 /* Send NACK now but suppress next one */ 1715 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); 1716 l->nack_state = BC_NACK_SND_SUPPRESS; 1717 return 0; 1718 } 1719 1720 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, 1721 struct sk_buff_head *xmitq) 1722 { 1723 struct sk_buff *skb, *tmp; 1724 struct tipc_link *snd_l = l->bc_sndlink; 1725 1726 if (!link_is_up(l) || !l->bc_peer_is_up) 1727 return; 1728 1729 if (!more(acked, l->acked)) 1730 return; 1731 1732 /* Skip over packets peer has already acked */ 1733 skb_queue_walk(&snd_l->transmq, skb) { 1734 if (more(buf_seqno(skb), l->acked)) 1735 break; 1736 } 1737 1738 /* Update/release the packets peer is acking now */ 1739 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { 1740 if (more(buf_seqno(skb), acked)) 1741 break; 1742 if (!--TIPC_SKB_CB(skb)->ackers) { 1743 __skb_unlink(skb, &snd_l->transmq); 1744 kfree_skb(skb); 1745 } 1746 } 1747 l->acked = acked; 1748 tipc_link_advance_backlog(snd_l, xmitq); 1749 if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) 1750 link_prepare_wakeup(snd_l); 1751 } 1752 1753 /* tipc_link_bc_nack_rcv(): receive broadcast nack message 1754 * This function is here for backwards compatibility, since 1755 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5. 1756 */ 1757 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 1758 struct sk_buff_head *xmitq) 1759 { 1760 struct tipc_msg *hdr = buf_msg(skb); 1761 u32 dnode = msg_destnode(hdr); 1762 int mtyp = msg_type(hdr); 1763 u16 acked = msg_bcast_ack(hdr); 1764 u16 from = acked + 1; 1765 u16 to = msg_bcgap_to(hdr); 1766 u16 peers_snd_nxt = to + 1; 1767 int rc = 0; 1768 1769 kfree_skb(skb); 1770 1771 if (!tipc_link_is_up(l) || !l->bc_peer_is_up) 1772 return 0; 1773 1774 if (mtyp != STATE_MSG) 1775 return 0; 1776 1777 if (dnode == tipc_own_addr(l->net)) { 1778 tipc_link_bc_ack_rcv(l, acked, xmitq); 1779 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq); 1780 l->stats.recv_nacks++; 1781 return rc; 1782 } 1783 1784 /* Msg for other node => suppress own NACK at next sync if applicable */ 1785 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) 1786 l->nack_state = BC_NACK_SND_SUPPRESS; 1787 1788 return 0; 1789 } 1790 1791 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1792 { 1793 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1794 1795 l->window = win; 1796 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); 1797 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); 1798 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); 1799 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); 1800 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1801 } 1802 1803 /** 1804 * link_reset_stats - reset link statistics 1805 * @l: pointer to link 1806 */ 1807 void tipc_link_reset_stats(struct tipc_link *l) 1808 { 1809 memset(&l->stats, 0, sizeof(l->stats)); 1810 } 1811 1812 static void link_print(struct tipc_link *l, const char *str) 1813 { 1814 struct sk_buff *hskb = skb_peek(&l->transmq); 1815 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; 1816 u16 tail = l->snd_nxt - 1; 1817 1818 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1819 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1820 skb_queue_len(&l->transmq), head, tail, 1821 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1822 } 1823 1824 /* Parse and validate nested (link) properties valid for media, bearer and link 1825 */ 1826 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1827 { 1828 int err; 1829 1830 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1831 tipc_nl_prop_policy); 1832 if (err) 1833 return err; 1834 1835 if (props[TIPC_NLA_PROP_PRIO]) { 1836 u32 prio; 1837 1838 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1839 if (prio > TIPC_MAX_LINK_PRI) 1840 return -EINVAL; 1841 } 1842 1843 if (props[TIPC_NLA_PROP_TOL]) { 1844 u32 tol; 1845 1846 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1847 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1848 return -EINVAL; 1849 } 1850 1851 if (props[TIPC_NLA_PROP_WIN]) { 1852 u32 win; 1853 1854 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1855 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1856 return -EINVAL; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1863 { 1864 int i; 1865 struct nlattr *stats; 1866 1867 struct nla_map { 1868 u32 key; 1869 u32 val; 1870 }; 1871 1872 struct nla_map map[] = { 1873 {TIPC_NLA_STATS_RX_INFO, 0}, 1874 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1875 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1876 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1877 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1878 {TIPC_NLA_STATS_TX_INFO, 0}, 1879 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1880 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1881 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1882 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1883 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1884 s->msg_length_counts : 1}, 1885 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1886 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1887 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1888 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1889 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1890 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1891 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1892 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1893 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1894 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1895 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1896 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1897 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1898 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1899 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1900 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1901 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1902 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1903 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1904 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1905 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1906 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1907 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1908 }; 1909 1910 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1911 if (!stats) 1912 return -EMSGSIZE; 1913 1914 for (i = 0; i < ARRAY_SIZE(map); i++) 1915 if (nla_put_u32(skb, map[i].key, map[i].val)) 1916 goto msg_full; 1917 1918 nla_nest_end(skb, stats); 1919 1920 return 0; 1921 msg_full: 1922 nla_nest_cancel(skb, stats); 1923 1924 return -EMSGSIZE; 1925 } 1926 1927 /* Caller should hold appropriate locks to protect the link */ 1928 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1929 struct tipc_link *link, int nlflags) 1930 { 1931 int err; 1932 void *hdr; 1933 struct nlattr *attrs; 1934 struct nlattr *prop; 1935 struct tipc_net *tn = net_generic(net, tipc_net_id); 1936 1937 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1938 nlflags, TIPC_NL_LINK_GET); 1939 if (!hdr) 1940 return -EMSGSIZE; 1941 1942 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1943 if (!attrs) 1944 goto msg_full; 1945 1946 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 1947 goto attr_msg_full; 1948 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 1949 tipc_cluster_mask(tn->own_addr))) 1950 goto attr_msg_full; 1951 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1952 goto attr_msg_full; 1953 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) 1954 goto attr_msg_full; 1955 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) 1956 goto attr_msg_full; 1957 1958 if (tipc_link_is_up(link)) 1959 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1960 goto attr_msg_full; 1961 if (link->active) 1962 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 1963 goto attr_msg_full; 1964 1965 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1966 if (!prop) 1967 goto attr_msg_full; 1968 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1969 goto prop_msg_full; 1970 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 1971 goto prop_msg_full; 1972 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 1973 link->window)) 1974 goto prop_msg_full; 1975 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1976 goto prop_msg_full; 1977 nla_nest_end(msg->skb, prop); 1978 1979 err = __tipc_nl_add_stats(msg->skb, &link->stats); 1980 if (err) 1981 goto attr_msg_full; 1982 1983 nla_nest_end(msg->skb, attrs); 1984 genlmsg_end(msg->skb, hdr); 1985 1986 return 0; 1987 1988 prop_msg_full: 1989 nla_nest_cancel(msg->skb, prop); 1990 attr_msg_full: 1991 nla_nest_cancel(msg->skb, attrs); 1992 msg_full: 1993 genlmsg_cancel(msg->skb, hdr); 1994 1995 return -EMSGSIZE; 1996 } 1997 1998 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 1999 struct tipc_stats *stats) 2000 { 2001 int i; 2002 struct nlattr *nest; 2003 2004 struct nla_map { 2005 __u32 key; 2006 __u32 val; 2007 }; 2008 2009 struct nla_map map[] = { 2010 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, 2011 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2012 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2013 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2014 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2015 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, 2016 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2017 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2018 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 2019 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 2020 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 2021 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 2022 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 2023 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 2024 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 2025 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 2026 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 2027 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 2028 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 2029 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 2030 }; 2031 2032 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 2033 if (!nest) 2034 return -EMSGSIZE; 2035 2036 for (i = 0; i < ARRAY_SIZE(map); i++) 2037 if (nla_put_u32(skb, map[i].key, map[i].val)) 2038 goto msg_full; 2039 2040 nla_nest_end(skb, nest); 2041 2042 return 0; 2043 msg_full: 2044 nla_nest_cancel(skb, nest); 2045 2046 return -EMSGSIZE; 2047 } 2048 2049 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 2050 { 2051 int err; 2052 void *hdr; 2053 struct nlattr *attrs; 2054 struct nlattr *prop; 2055 struct tipc_net *tn = net_generic(net, tipc_net_id); 2056 struct tipc_link *bcl = tn->bcl; 2057 2058 if (!bcl) 2059 return 0; 2060 2061 tipc_bcast_lock(net); 2062 2063 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2064 NLM_F_MULTI, TIPC_NL_LINK_GET); 2065 if (!hdr) { 2066 tipc_bcast_unlock(net); 2067 return -EMSGSIZE; 2068 } 2069 2070 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 2071 if (!attrs) 2072 goto msg_full; 2073 2074 /* The broadcast link is always up */ 2075 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 2076 goto attr_msg_full; 2077 2078 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 2079 goto attr_msg_full; 2080 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2081 goto attr_msg_full; 2082 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) 2083 goto attr_msg_full; 2084 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) 2085 goto attr_msg_full; 2086 2087 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 2088 if (!prop) 2089 goto attr_msg_full; 2090 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 2091 goto prop_msg_full; 2092 nla_nest_end(msg->skb, prop); 2093 2094 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 2095 if (err) 2096 goto attr_msg_full; 2097 2098 tipc_bcast_unlock(net); 2099 nla_nest_end(msg->skb, attrs); 2100 genlmsg_end(msg->skb, hdr); 2101 2102 return 0; 2103 2104 prop_msg_full: 2105 nla_nest_cancel(msg->skb, prop); 2106 attr_msg_full: 2107 nla_nest_cancel(msg->skb, attrs); 2108 msg_full: 2109 tipc_bcast_unlock(net); 2110 genlmsg_cancel(msg->skb, hdr); 2111 2112 return -EMSGSIZE; 2113 } 2114 2115 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, 2116 struct sk_buff_head *xmitq) 2117 { 2118 l->tolerance = tol; 2119 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq); 2120 } 2121 2122 void tipc_link_set_prio(struct tipc_link *l, u32 prio, 2123 struct sk_buff_head *xmitq) 2124 { 2125 l->priority = prio; 2126 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq); 2127 } 2128 2129 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) 2130 { 2131 l->abort_limit = limit; 2132 } 2133