1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 #include "monitor.h" 46 47 #include <linux/pkt_sched.h> 48 49 struct tipc_stats { 50 u32 sent_info; /* used in counting # sent packets */ 51 u32 recv_info; /* used in counting # recv'd packets */ 52 u32 sent_states; 53 u32 recv_states; 54 u32 sent_probes; 55 u32 recv_probes; 56 u32 sent_nacks; 57 u32 recv_nacks; 58 u32 sent_acks; 59 u32 sent_bundled; 60 u32 sent_bundles; 61 u32 recv_bundled; 62 u32 recv_bundles; 63 u32 retransmitted; 64 u32 sent_fragmented; 65 u32 sent_fragments; 66 u32 recv_fragmented; 67 u32 recv_fragments; 68 u32 link_congs; /* # port sends blocked by congestion */ 69 u32 deferred_recv; 70 u32 duplicates; 71 u32 max_queue_sz; /* send queue size high water mark */ 72 u32 accu_queue_sz; /* used for send queue size profiling */ 73 u32 queue_sz_counts; /* used for send queue size profiling */ 74 u32 msg_length_counts; /* used for message length profiling */ 75 u32 msg_lengths_total; /* used for message length profiling */ 76 u32 msg_length_profile[7]; /* used for msg. length profiling */ 77 }; 78 79 /** 80 * struct tipc_link - TIPC link data structure 81 * @addr: network address of link's peer node 82 * @name: link name character string 83 * @media_addr: media address to use when sending messages over link 84 * @timer: link timer 85 * @net: pointer to namespace struct 86 * @refcnt: reference counter for permanent references (owner node & timer) 87 * @peer_session: link session # being used by peer end of link 88 * @peer_bearer_id: bearer id used by link's peer endpoint 89 * @bearer_id: local bearer id used by link 90 * @tolerance: minimum link continuity loss needed to reset link [in ms] 91 * @abort_limit: # of unacknowledged continuity probes needed to reset link 92 * @state: current state of link FSM 93 * @peer_caps: bitmap describing capabilities of peer node 94 * @silent_intv_cnt: # of timer intervals without any reception from peer 95 * @proto_msg: template for control messages generated by link 96 * @pmsg: convenience pointer to "proto_msg" field 97 * @priority: current link priority 98 * @net_plane: current link network plane ('A' through 'H') 99 * @mon_state: cookie with information needed by link monitor 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 101 * @exp_msg_count: # of tunnelled messages expected during link changeover 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 103 * @mtu: current maximum packet size for this link 104 * @advertised_mtu: advertised own mtu when link is being established 105 * @transmitq: queue for sent, non-acked messages 106 * @backlogq: queue for messages waiting to be sent 107 * @snt_nxt: next sequence number to use for outbound messages 108 * @last_retransmitted: sequence number of most recently retransmitted message 109 * @stale_count: # of identical retransmit requests made by peer 110 * @ackers: # of peers that needs to ack each packet before it can be released 111 * @acked: # last packet acked by a certain peer. Used for broadcast. 112 * @rcv_nxt: next sequence number to expect for inbound messages 113 * @deferred_queue: deferred queue saved OOS b'cast message received from node 114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 115 * @inputq: buffer queue for messages to be delivered upwards 116 * @namedq: buffer queue for name table messages to be delivered upwards 117 * @next_out: ptr to first unsent outbound message in queue 118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate 119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 120 * @reasm_buf: head of partially reassembled inbound message fragments 121 * @bc_rcvr: marks that this is a broadcast receiver link 122 * @stats: collects statistics regarding link activity 123 */ 124 struct tipc_link { 125 u32 addr; 126 char name[TIPC_MAX_LINK_NAME]; 127 struct net *net; 128 129 /* Management and link supervision data */ 130 u32 peer_session; 131 u32 session; 132 u32 peer_bearer_id; 133 u32 bearer_id; 134 u32 tolerance; 135 u32 abort_limit; 136 u32 state; 137 u16 peer_caps; 138 bool active; 139 u32 silent_intv_cnt; 140 char if_name[TIPC_MAX_IF_NAME]; 141 u32 priority; 142 char net_plane; 143 struct tipc_mon_state mon_state; 144 u16 rst_cnt; 145 146 /* Failover/synch */ 147 u16 drop_point; 148 struct sk_buff *failover_reasm_skb; 149 150 /* Max packet negotiation */ 151 u16 mtu; 152 u16 advertised_mtu; 153 154 /* Sending */ 155 struct sk_buff_head transmq; 156 struct sk_buff_head backlogq; 157 struct { 158 u16 len; 159 u16 limit; 160 } backlog[5]; 161 u16 snd_nxt; 162 u16 last_retransm; 163 u16 window; 164 u32 stale_count; 165 166 /* Reception */ 167 u16 rcv_nxt; 168 u32 rcv_unacked; 169 struct sk_buff_head deferdq; 170 struct sk_buff_head *inputq; 171 struct sk_buff_head *namedq; 172 173 /* Congestion handling */ 174 struct sk_buff_head wakeupq; 175 176 /* Fragmentation/reassembly */ 177 struct sk_buff *reasm_buf; 178 179 /* Broadcast */ 180 u16 ackers; 181 u16 acked; 182 struct tipc_link *bc_rcvlink; 183 struct tipc_link *bc_sndlink; 184 int nack_state; 185 bool bc_peer_is_up; 186 187 /* Statistics */ 188 struct tipc_stats stats; 189 }; 190 191 /* 192 * Error message prefixes 193 */ 194 static const char *link_co_err = "Link tunneling error, "; 195 static const char *link_rst_msg = "Resetting link "; 196 197 /* Send states for broadcast NACKs 198 */ 199 enum { 200 BC_NACK_SND_CONDITIONAL, 201 BC_NACK_SND_UNCONDITIONAL, 202 BC_NACK_SND_SUPPRESS, 203 }; 204 205 /* 206 * Interval between NACKs when packets arrive out of order 207 */ 208 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 209 210 /* Wildcard value for link session numbers. When it is known that 211 * peer endpoint is down, any session number must be accepted. 212 */ 213 #define ANY_SESSION 0x10000 214 215 /* Link FSM states: 216 */ 217 enum { 218 LINK_ESTABLISHED = 0xe, 219 LINK_ESTABLISHING = 0xe << 4, 220 LINK_RESET = 0x1 << 8, 221 LINK_RESETTING = 0x2 << 12, 222 LINK_PEER_RESET = 0xd << 16, 223 LINK_FAILINGOVER = 0xf << 20, 224 LINK_SYNCHING = 0xc << 24 225 }; 226 227 /* Link FSM state checking routines 228 */ 229 static int link_is_up(struct tipc_link *l) 230 { 231 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 232 } 233 234 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 235 struct sk_buff_head *xmitq); 236 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 237 u16 rcvgap, int tolerance, int priority, 238 struct sk_buff_head *xmitq); 239 static void link_print(struct tipc_link *l, const char *str); 240 static void tipc_link_build_nack_msg(struct tipc_link *l, 241 struct sk_buff_head *xmitq); 242 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 243 struct sk_buff_head *xmitq); 244 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); 245 246 /* 247 * Simple non-static link routines (i.e. referenced outside this file) 248 */ 249 bool tipc_link_is_up(struct tipc_link *l) 250 { 251 return link_is_up(l); 252 } 253 254 bool tipc_link_peer_is_down(struct tipc_link *l) 255 { 256 return l->state == LINK_PEER_RESET; 257 } 258 259 bool tipc_link_is_reset(struct tipc_link *l) 260 { 261 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 262 } 263 264 bool tipc_link_is_establishing(struct tipc_link *l) 265 { 266 return l->state == LINK_ESTABLISHING; 267 } 268 269 bool tipc_link_is_synching(struct tipc_link *l) 270 { 271 return l->state == LINK_SYNCHING; 272 } 273 274 bool tipc_link_is_failingover(struct tipc_link *l) 275 { 276 return l->state == LINK_FAILINGOVER; 277 } 278 279 bool tipc_link_is_blocked(struct tipc_link *l) 280 { 281 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 282 } 283 284 static bool link_is_bc_sndlink(struct tipc_link *l) 285 { 286 return !l->bc_sndlink; 287 } 288 289 static bool link_is_bc_rcvlink(struct tipc_link *l) 290 { 291 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); 292 } 293 294 int tipc_link_is_active(struct tipc_link *l) 295 { 296 return l->active; 297 } 298 299 void tipc_link_set_active(struct tipc_link *l, bool active) 300 { 301 l->active = active; 302 } 303 304 u32 tipc_link_id(struct tipc_link *l) 305 { 306 return l->peer_bearer_id << 16 | l->bearer_id; 307 } 308 309 int tipc_link_window(struct tipc_link *l) 310 { 311 return l->window; 312 } 313 314 int tipc_link_prio(struct tipc_link *l) 315 { 316 return l->priority; 317 } 318 319 unsigned long tipc_link_tolerance(struct tipc_link *l) 320 { 321 return l->tolerance; 322 } 323 324 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) 325 { 326 return l->inputq; 327 } 328 329 char tipc_link_plane(struct tipc_link *l) 330 { 331 return l->net_plane; 332 } 333 334 void tipc_link_add_bc_peer(struct tipc_link *snd_l, 335 struct tipc_link *uc_l, 336 struct sk_buff_head *xmitq) 337 { 338 struct tipc_link *rcv_l = uc_l->bc_rcvlink; 339 340 snd_l->ackers++; 341 rcv_l->acked = snd_l->snd_nxt - 1; 342 snd_l->state = LINK_ESTABLISHED; 343 tipc_link_build_bc_init_msg(uc_l, xmitq); 344 } 345 346 void tipc_link_remove_bc_peer(struct tipc_link *snd_l, 347 struct tipc_link *rcv_l, 348 struct sk_buff_head *xmitq) 349 { 350 u16 ack = snd_l->snd_nxt - 1; 351 352 snd_l->ackers--; 353 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 354 tipc_link_reset(rcv_l); 355 rcv_l->state = LINK_RESET; 356 if (!snd_l->ackers) { 357 tipc_link_reset(snd_l); 358 snd_l->state = LINK_RESET; 359 __skb_queue_purge(xmitq); 360 } 361 } 362 363 int tipc_link_bc_peers(struct tipc_link *l) 364 { 365 return l->ackers; 366 } 367 368 void tipc_link_set_mtu(struct tipc_link *l, int mtu) 369 { 370 l->mtu = mtu; 371 } 372 373 int tipc_link_mtu(struct tipc_link *l) 374 { 375 return l->mtu; 376 } 377 378 u16 tipc_link_rcv_nxt(struct tipc_link *l) 379 { 380 return l->rcv_nxt; 381 } 382 383 u16 tipc_link_acked(struct tipc_link *l) 384 { 385 return l->acked; 386 } 387 388 char *tipc_link_name(struct tipc_link *l) 389 { 390 return l->name; 391 } 392 393 /** 394 * tipc_link_create - create a new link 395 * @n: pointer to associated node 396 * @if_name: associated interface name 397 * @bearer_id: id (index) of associated bearer 398 * @tolerance: link tolerance to be used by link 399 * @net_plane: network plane (A,B,c..) this link belongs to 400 * @mtu: mtu to be advertised by link 401 * @priority: priority to be used by link 402 * @window: send window to be used by link 403 * @session: session to be used by link 404 * @ownnode: identity of own node 405 * @peer: node id of peer node 406 * @peer_caps: bitmap describing peer node capabilities 407 * @bc_sndlink: the namespace global link used for broadcast sending 408 * @bc_rcvlink: the peer specific link used for broadcast reception 409 * @inputq: queue to put messages ready for delivery 410 * @namedq: queue to put binding table update messages ready for delivery 411 * @link: return value, pointer to put the created link 412 * 413 * Returns true if link was created, otherwise false 414 */ 415 bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 416 int tolerance, char net_plane, u32 mtu, int priority, 417 int window, u32 session, u32 ownnode, u32 peer, 418 u16 peer_caps, 419 struct tipc_link *bc_sndlink, 420 struct tipc_link *bc_rcvlink, 421 struct sk_buff_head *inputq, 422 struct sk_buff_head *namedq, 423 struct tipc_link **link) 424 { 425 struct tipc_link *l; 426 427 l = kzalloc(sizeof(*l), GFP_ATOMIC); 428 if (!l) 429 return false; 430 *link = l; 431 l->session = session; 432 433 /* Note: peer i/f name is completed by reset/activate message */ 434 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 435 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), 436 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 437 strcpy(l->if_name, if_name); 438 l->addr = peer; 439 l->peer_caps = peer_caps; 440 l->net = net; 441 l->peer_session = ANY_SESSION; 442 l->bearer_id = bearer_id; 443 l->tolerance = tolerance; 444 l->net_plane = net_plane; 445 l->advertised_mtu = mtu; 446 l->mtu = mtu; 447 l->priority = priority; 448 tipc_link_set_queue_limits(l, window); 449 l->ackers = 1; 450 l->bc_sndlink = bc_sndlink; 451 l->bc_rcvlink = bc_rcvlink; 452 l->inputq = inputq; 453 l->namedq = namedq; 454 l->state = LINK_RESETTING; 455 __skb_queue_head_init(&l->transmq); 456 __skb_queue_head_init(&l->backlogq); 457 __skb_queue_head_init(&l->deferdq); 458 skb_queue_head_init(&l->wakeupq); 459 skb_queue_head_init(l->inputq); 460 return true; 461 } 462 463 /** 464 * tipc_link_bc_create - create new link to be used for broadcast 465 * @n: pointer to associated node 466 * @mtu: mtu to be used 467 * @window: send window to be used 468 * @inputq: queue to put messages ready for delivery 469 * @namedq: queue to put binding table update messages ready for delivery 470 * @link: return value, pointer to put the created link 471 * 472 * Returns true if link was created, otherwise false 473 */ 474 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, 475 int mtu, int window, u16 peer_caps, 476 struct sk_buff_head *inputq, 477 struct sk_buff_head *namedq, 478 struct tipc_link *bc_sndlink, 479 struct tipc_link **link) 480 { 481 struct tipc_link *l; 482 483 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, 484 0, ownnode, peer, peer_caps, bc_sndlink, 485 NULL, inputq, namedq, link)) 486 return false; 487 488 l = *link; 489 strcpy(l->name, tipc_bclink_name); 490 tipc_link_reset(l); 491 l->state = LINK_RESET; 492 l->ackers = 0; 493 l->bc_rcvlink = l; 494 495 /* Broadcast send link is always up */ 496 if (link_is_bc_sndlink(l)) 497 l->state = LINK_ESTABLISHED; 498 499 return true; 500 } 501 502 /** 503 * tipc_link_fsm_evt - link finite state machine 504 * @l: pointer to link 505 * @evt: state machine event to be processed 506 */ 507 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 508 { 509 int rc = 0; 510 511 switch (l->state) { 512 case LINK_RESETTING: 513 switch (evt) { 514 case LINK_PEER_RESET_EVT: 515 l->state = LINK_PEER_RESET; 516 break; 517 case LINK_RESET_EVT: 518 l->state = LINK_RESET; 519 break; 520 case LINK_FAILURE_EVT: 521 case LINK_FAILOVER_BEGIN_EVT: 522 case LINK_ESTABLISH_EVT: 523 case LINK_FAILOVER_END_EVT: 524 case LINK_SYNCH_BEGIN_EVT: 525 case LINK_SYNCH_END_EVT: 526 default: 527 goto illegal_evt; 528 } 529 break; 530 case LINK_RESET: 531 switch (evt) { 532 case LINK_PEER_RESET_EVT: 533 l->state = LINK_ESTABLISHING; 534 break; 535 case LINK_FAILOVER_BEGIN_EVT: 536 l->state = LINK_FAILINGOVER; 537 case LINK_FAILURE_EVT: 538 case LINK_RESET_EVT: 539 case LINK_ESTABLISH_EVT: 540 case LINK_FAILOVER_END_EVT: 541 break; 542 case LINK_SYNCH_BEGIN_EVT: 543 case LINK_SYNCH_END_EVT: 544 default: 545 goto illegal_evt; 546 } 547 break; 548 case LINK_PEER_RESET: 549 switch (evt) { 550 case LINK_RESET_EVT: 551 l->state = LINK_ESTABLISHING; 552 break; 553 case LINK_PEER_RESET_EVT: 554 case LINK_ESTABLISH_EVT: 555 case LINK_FAILURE_EVT: 556 break; 557 case LINK_SYNCH_BEGIN_EVT: 558 case LINK_SYNCH_END_EVT: 559 case LINK_FAILOVER_BEGIN_EVT: 560 case LINK_FAILOVER_END_EVT: 561 default: 562 goto illegal_evt; 563 } 564 break; 565 case LINK_FAILINGOVER: 566 switch (evt) { 567 case LINK_FAILOVER_END_EVT: 568 l->state = LINK_RESET; 569 break; 570 case LINK_PEER_RESET_EVT: 571 case LINK_RESET_EVT: 572 case LINK_ESTABLISH_EVT: 573 case LINK_FAILURE_EVT: 574 break; 575 case LINK_FAILOVER_BEGIN_EVT: 576 case LINK_SYNCH_BEGIN_EVT: 577 case LINK_SYNCH_END_EVT: 578 default: 579 goto illegal_evt; 580 } 581 break; 582 case LINK_ESTABLISHING: 583 switch (evt) { 584 case LINK_ESTABLISH_EVT: 585 l->state = LINK_ESTABLISHED; 586 break; 587 case LINK_FAILOVER_BEGIN_EVT: 588 l->state = LINK_FAILINGOVER; 589 break; 590 case LINK_RESET_EVT: 591 l->state = LINK_RESET; 592 break; 593 case LINK_FAILURE_EVT: 594 case LINK_PEER_RESET_EVT: 595 case LINK_SYNCH_BEGIN_EVT: 596 case LINK_FAILOVER_END_EVT: 597 break; 598 case LINK_SYNCH_END_EVT: 599 default: 600 goto illegal_evt; 601 } 602 break; 603 case LINK_ESTABLISHED: 604 switch (evt) { 605 case LINK_PEER_RESET_EVT: 606 l->state = LINK_PEER_RESET; 607 rc |= TIPC_LINK_DOWN_EVT; 608 break; 609 case LINK_FAILURE_EVT: 610 l->state = LINK_RESETTING; 611 rc |= TIPC_LINK_DOWN_EVT; 612 break; 613 case LINK_RESET_EVT: 614 l->state = LINK_RESET; 615 break; 616 case LINK_ESTABLISH_EVT: 617 case LINK_SYNCH_END_EVT: 618 break; 619 case LINK_SYNCH_BEGIN_EVT: 620 l->state = LINK_SYNCHING; 621 break; 622 case LINK_FAILOVER_BEGIN_EVT: 623 case LINK_FAILOVER_END_EVT: 624 default: 625 goto illegal_evt; 626 } 627 break; 628 case LINK_SYNCHING: 629 switch (evt) { 630 case LINK_PEER_RESET_EVT: 631 l->state = LINK_PEER_RESET; 632 rc |= TIPC_LINK_DOWN_EVT; 633 break; 634 case LINK_FAILURE_EVT: 635 l->state = LINK_RESETTING; 636 rc |= TIPC_LINK_DOWN_EVT; 637 break; 638 case LINK_RESET_EVT: 639 l->state = LINK_RESET; 640 break; 641 case LINK_ESTABLISH_EVT: 642 case LINK_SYNCH_BEGIN_EVT: 643 break; 644 case LINK_SYNCH_END_EVT: 645 l->state = LINK_ESTABLISHED; 646 break; 647 case LINK_FAILOVER_BEGIN_EVT: 648 case LINK_FAILOVER_END_EVT: 649 default: 650 goto illegal_evt; 651 } 652 break; 653 default: 654 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 655 } 656 return rc; 657 illegal_evt: 658 pr_err("Illegal FSM event %x in state %x on link %s\n", 659 evt, l->state, l->name); 660 return rc; 661 } 662 663 /* link_profile_stats - update statistical profiling of traffic 664 */ 665 static void link_profile_stats(struct tipc_link *l) 666 { 667 struct sk_buff *skb; 668 struct tipc_msg *msg; 669 int length; 670 671 /* Update counters used in statistical profiling of send traffic */ 672 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 673 l->stats.queue_sz_counts++; 674 675 skb = skb_peek(&l->transmq); 676 if (!skb) 677 return; 678 msg = buf_msg(skb); 679 length = msg_size(msg); 680 681 if (msg_user(msg) == MSG_FRAGMENTER) { 682 if (msg_type(msg) != FIRST_FRAGMENT) 683 return; 684 length = msg_size(msg_get_wrapped(msg)); 685 } 686 l->stats.msg_lengths_total += length; 687 l->stats.msg_length_counts++; 688 if (length <= 64) 689 l->stats.msg_length_profile[0]++; 690 else if (length <= 256) 691 l->stats.msg_length_profile[1]++; 692 else if (length <= 1024) 693 l->stats.msg_length_profile[2]++; 694 else if (length <= 4096) 695 l->stats.msg_length_profile[3]++; 696 else if (length <= 16384) 697 l->stats.msg_length_profile[4]++; 698 else if (length <= 32768) 699 l->stats.msg_length_profile[5]++; 700 else 701 l->stats.msg_length_profile[6]++; 702 } 703 704 /* tipc_link_timeout - perform periodic task as instructed from node timeout 705 */ 706 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 707 { 708 int mtyp, rc = 0; 709 bool state = false; 710 bool probe = false; 711 bool setup = false; 712 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 713 u16 bc_acked = l->bc_rcvlink->acked; 714 struct tipc_mon_state *mstate = &l->mon_state; 715 716 switch (l->state) { 717 case LINK_ESTABLISHED: 718 case LINK_SYNCHING: 719 mtyp = STATE_MSG; 720 link_profile_stats(l); 721 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); 722 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) 723 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 724 state = bc_acked != bc_snt; 725 state |= l->bc_rcvlink->rcv_unacked; 726 state |= l->rcv_unacked; 727 state |= !skb_queue_empty(&l->transmq); 728 state |= !skb_queue_empty(&l->deferdq); 729 probe = mstate->probing; 730 probe |= l->silent_intv_cnt; 731 if (probe || mstate->monitoring) 732 l->silent_intv_cnt++; 733 break; 734 case LINK_RESET: 735 setup = l->rst_cnt++ <= 4; 736 setup |= !(l->rst_cnt % 16); 737 mtyp = RESET_MSG; 738 break; 739 case LINK_ESTABLISHING: 740 setup = true; 741 mtyp = ACTIVATE_MSG; 742 break; 743 case LINK_PEER_RESET: 744 case LINK_RESETTING: 745 case LINK_FAILINGOVER: 746 break; 747 default: 748 break; 749 } 750 751 if (state || probe || setup) 752 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq); 753 754 return rc; 755 } 756 757 /** 758 * link_schedule_user - schedule a message sender for wakeup after congestion 759 * @link: congested link 760 * @list: message that was attempted sent 761 * Create pseudo msg to send back to user when congestion abates 762 * Does not consume buffer list 763 */ 764 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) 765 { 766 struct tipc_msg *msg = buf_msg(skb_peek(list)); 767 int imp = msg_importance(msg); 768 u32 oport = msg_origport(msg); 769 u32 addr = tipc_own_addr(link->net); 770 struct sk_buff *skb; 771 772 /* This really cannot happen... */ 773 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { 774 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 775 return -ENOBUFS; 776 } 777 /* Non-blocking sender: */ 778 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) 779 return -ELINKCONG; 780 781 /* Create and schedule wakeup pseudo message */ 782 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 783 addr, addr, oport, 0, 0); 784 if (!skb) 785 return -ENOBUFS; 786 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); 787 TIPC_SKB_CB(skb)->chain_imp = imp; 788 skb_queue_tail(&link->wakeupq, skb); 789 link->stats.link_congs++; 790 return -ELINKCONG; 791 } 792 793 /** 794 * link_prepare_wakeup - prepare users for wakeup after congestion 795 * @link: congested link 796 * Move a number of waiting users, as permitted by available space in 797 * the send queue, from link wait queue to node wait queue for wakeup 798 */ 799 void link_prepare_wakeup(struct tipc_link *l) 800 { 801 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; 802 int imp, lim; 803 struct sk_buff *skb, *tmp; 804 805 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 806 imp = TIPC_SKB_CB(skb)->chain_imp; 807 lim = l->window + l->backlog[imp].limit; 808 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 809 if ((pnd[imp] + l->backlog[imp].len) >= lim) 810 break; 811 skb_unlink(skb, &l->wakeupq); 812 skb_queue_tail(l->inputq, skb); 813 } 814 } 815 816 void tipc_link_reset(struct tipc_link *l) 817 { 818 l->peer_session = ANY_SESSION; 819 l->session++; 820 l->mtu = l->advertised_mtu; 821 __skb_queue_purge(&l->transmq); 822 __skb_queue_purge(&l->deferdq); 823 skb_queue_splice_init(&l->wakeupq, l->inputq); 824 __skb_queue_purge(&l->backlogq); 825 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 826 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 827 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 828 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 829 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 830 kfree_skb(l->reasm_buf); 831 kfree_skb(l->failover_reasm_skb); 832 l->reasm_buf = NULL; 833 l->failover_reasm_skb = NULL; 834 l->rcv_unacked = 0; 835 l->snd_nxt = 1; 836 l->rcv_nxt = 1; 837 l->acked = 0; 838 l->silent_intv_cnt = 0; 839 l->rst_cnt = 0; 840 l->stats.recv_info = 0; 841 l->stale_count = 0; 842 l->bc_peer_is_up = false; 843 memset(&l->mon_state, 0, sizeof(l->mon_state)); 844 tipc_link_reset_stats(l); 845 } 846 847 /** 848 * tipc_link_xmit(): enqueue buffer list according to queue situation 849 * @link: link to use 850 * @list: chain of buffers containing message 851 * @xmitq: returned list of packets to be sent by caller 852 * 853 * Consumes the buffer chain, except when returning -ELINKCONG, 854 * since the caller then may want to make more send attempts. 855 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 856 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 857 */ 858 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 859 struct sk_buff_head *xmitq) 860 { 861 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 862 unsigned int maxwin = l->window; 863 unsigned int i, imp = msg_importance(hdr); 864 unsigned int mtu = l->mtu; 865 u16 ack = l->rcv_nxt - 1; 866 u16 seqno = l->snd_nxt; 867 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 868 struct sk_buff_head *transmq = &l->transmq; 869 struct sk_buff_head *backlogq = &l->backlogq; 870 struct sk_buff *skb, *_skb, *bskb; 871 872 /* Match msg importance against this and all higher backlog limits: */ 873 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 874 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 875 return link_schedule_user(l, list); 876 } 877 if (unlikely(msg_size(hdr) > mtu)) { 878 skb_queue_purge(list); 879 return -EMSGSIZE; 880 } 881 882 /* Prepare each packet for sending, and add to relevant queue: */ 883 while (skb_queue_len(list)) { 884 skb = skb_peek(list); 885 hdr = buf_msg(skb); 886 msg_set_seqno(hdr, seqno); 887 msg_set_ack(hdr, ack); 888 msg_set_bcast_ack(hdr, bc_ack); 889 890 if (likely(skb_queue_len(transmq) < maxwin)) { 891 _skb = skb_clone(skb, GFP_ATOMIC); 892 if (!_skb) { 893 skb_queue_purge(list); 894 return -ENOBUFS; 895 } 896 __skb_dequeue(list); 897 __skb_queue_tail(transmq, skb); 898 __skb_queue_tail(xmitq, _skb); 899 TIPC_SKB_CB(skb)->ackers = l->ackers; 900 l->rcv_unacked = 0; 901 seqno++; 902 continue; 903 } 904 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 905 kfree_skb(__skb_dequeue(list)); 906 l->stats.sent_bundled++; 907 continue; 908 } 909 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 910 kfree_skb(__skb_dequeue(list)); 911 __skb_queue_tail(backlogq, bskb); 912 l->backlog[msg_importance(buf_msg(bskb))].len++; 913 l->stats.sent_bundled++; 914 l->stats.sent_bundles++; 915 continue; 916 } 917 l->backlog[imp].len += skb_queue_len(list); 918 skb_queue_splice_tail_init(list, backlogq); 919 } 920 l->snd_nxt = seqno; 921 return 0; 922 } 923 924 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) 925 { 926 struct sk_buff *skb, *_skb; 927 struct tipc_msg *hdr; 928 u16 seqno = l->snd_nxt; 929 u16 ack = l->rcv_nxt - 1; 930 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 931 932 while (skb_queue_len(&l->transmq) < l->window) { 933 skb = skb_peek(&l->backlogq); 934 if (!skb) 935 break; 936 _skb = skb_clone(skb, GFP_ATOMIC); 937 if (!_skb) 938 break; 939 __skb_dequeue(&l->backlogq); 940 hdr = buf_msg(skb); 941 l->backlog[msg_importance(hdr)].len--; 942 __skb_queue_tail(&l->transmq, skb); 943 __skb_queue_tail(xmitq, _skb); 944 TIPC_SKB_CB(skb)->ackers = l->ackers; 945 msg_set_seqno(hdr, seqno); 946 msg_set_ack(hdr, ack); 947 msg_set_bcast_ack(hdr, bc_ack); 948 l->rcv_unacked = 0; 949 seqno++; 950 } 951 l->snd_nxt = seqno; 952 } 953 954 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) 955 { 956 struct tipc_msg *hdr = buf_msg(skb); 957 958 pr_warn("Retransmission failure on link <%s>\n", l->name); 959 link_print(l, "Resetting link "); 960 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 961 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); 962 pr_info("sqno %u, prev: %x, src: %x\n", 963 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); 964 } 965 966 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, 967 struct sk_buff_head *xmitq) 968 { 969 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 970 struct tipc_msg *hdr; 971 u16 ack = l->rcv_nxt - 1; 972 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 973 974 if (!skb) 975 return 0; 976 977 /* Detect repeated retransmit failures on same packet */ 978 if (likely(l->last_retransm != buf_seqno(skb))) { 979 l->last_retransm = buf_seqno(skb); 980 l->stale_count = 1; 981 } else if (++l->stale_count > 100) { 982 link_retransmit_failure(l, skb); 983 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 984 } 985 986 /* Move forward to where retransmission should start */ 987 skb_queue_walk(&l->transmq, skb) { 988 if (!less(buf_seqno(skb), from)) 989 break; 990 } 991 992 skb_queue_walk_from(&l->transmq, skb) { 993 if (more(buf_seqno(skb), to)) 994 break; 995 hdr = buf_msg(skb); 996 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 997 if (!_skb) 998 return 0; 999 hdr = buf_msg(_skb); 1000 msg_set_ack(hdr, ack); 1001 msg_set_bcast_ack(hdr, bc_ack); 1002 _skb->priority = TC_PRIO_CONTROL; 1003 __skb_queue_tail(xmitq, _skb); 1004 l->stats.retransmitted++; 1005 } 1006 return 0; 1007 } 1008 1009 /* tipc_data_input - deliver data and name distr msgs to upper layer 1010 * 1011 * Consumes buffer if message is of right type 1012 * Node lock must be held 1013 */ 1014 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, 1015 struct sk_buff_head *inputq) 1016 { 1017 switch (msg_user(buf_msg(skb))) { 1018 case TIPC_LOW_IMPORTANCE: 1019 case TIPC_MEDIUM_IMPORTANCE: 1020 case TIPC_HIGH_IMPORTANCE: 1021 case TIPC_CRITICAL_IMPORTANCE: 1022 case CONN_MANAGER: 1023 skb_queue_tail(inputq, skb); 1024 return true; 1025 case NAME_DISTRIBUTOR: 1026 l->bc_rcvlink->state = LINK_ESTABLISHED; 1027 skb_queue_tail(l->namedq, skb); 1028 return true; 1029 case MSG_BUNDLER: 1030 case TUNNEL_PROTOCOL: 1031 case MSG_FRAGMENTER: 1032 case BCAST_PROTOCOL: 1033 return false; 1034 default: 1035 pr_warn("Dropping received illegal msg type\n"); 1036 kfree_skb(skb); 1037 return false; 1038 }; 1039 } 1040 1041 /* tipc_link_input - process packet that has passed link protocol check 1042 * 1043 * Consumes buffer 1044 */ 1045 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 1046 struct sk_buff_head *inputq) 1047 { 1048 struct tipc_msg *hdr = buf_msg(skb); 1049 struct sk_buff **reasm_skb = &l->reasm_buf; 1050 struct sk_buff *iskb; 1051 struct sk_buff_head tmpq; 1052 int usr = msg_user(hdr); 1053 int rc = 0; 1054 int pos = 0; 1055 int ipos = 0; 1056 1057 if (unlikely(usr == TUNNEL_PROTOCOL)) { 1058 if (msg_type(hdr) == SYNCH_MSG) { 1059 __skb_queue_purge(&l->deferdq); 1060 goto drop; 1061 } 1062 if (!tipc_msg_extract(skb, &iskb, &ipos)) 1063 return rc; 1064 kfree_skb(skb); 1065 skb = iskb; 1066 hdr = buf_msg(skb); 1067 if (less(msg_seqno(hdr), l->drop_point)) 1068 goto drop; 1069 if (tipc_data_input(l, skb, inputq)) 1070 return rc; 1071 usr = msg_user(hdr); 1072 reasm_skb = &l->failover_reasm_skb; 1073 } 1074 1075 if (usr == MSG_BUNDLER) { 1076 skb_queue_head_init(&tmpq); 1077 l->stats.recv_bundles++; 1078 l->stats.recv_bundled += msg_msgcnt(hdr); 1079 while (tipc_msg_extract(skb, &iskb, &pos)) 1080 tipc_data_input(l, iskb, &tmpq); 1081 tipc_skb_queue_splice_tail(&tmpq, inputq); 1082 return 0; 1083 } else if (usr == MSG_FRAGMENTER) { 1084 l->stats.recv_fragments++; 1085 if (tipc_buf_append(reasm_skb, &skb)) { 1086 l->stats.recv_fragmented++; 1087 tipc_data_input(l, skb, inputq); 1088 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { 1089 pr_warn_ratelimited("Unable to build fragment list\n"); 1090 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1091 } 1092 return 0; 1093 } else if (usr == BCAST_PROTOCOL) { 1094 tipc_bcast_lock(l->net); 1095 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); 1096 tipc_bcast_unlock(l->net); 1097 } 1098 drop: 1099 kfree_skb(skb); 1100 return 0; 1101 } 1102 1103 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1104 { 1105 bool released = false; 1106 struct sk_buff *skb, *tmp; 1107 1108 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1109 if (more(buf_seqno(skb), acked)) 1110 break; 1111 __skb_unlink(skb, &l->transmq); 1112 kfree_skb(skb); 1113 released = true; 1114 } 1115 return released; 1116 } 1117 1118 /* tipc_link_build_state_msg: prepare link state message for transmission 1119 * 1120 * Note that sending of broadcast ack is coordinated among nodes, to reduce 1121 * risk of ack storms towards the sender 1122 */ 1123 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1124 { 1125 if (!l) 1126 return 0; 1127 1128 /* Broadcast ACK must be sent via a unicast link => defer to caller */ 1129 if (link_is_bc_rcvlink(l)) { 1130 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) 1131 return 0; 1132 l->rcv_unacked = 0; 1133 return TIPC_LINK_SND_BC_ACK; 1134 } 1135 1136 /* Unicast ACK */ 1137 l->rcv_unacked = 0; 1138 l->stats.sent_acks++; 1139 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1140 return 0; 1141 } 1142 1143 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message 1144 */ 1145 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1146 { 1147 int mtyp = RESET_MSG; 1148 struct sk_buff *skb; 1149 1150 if (l->state == LINK_ESTABLISHING) 1151 mtyp = ACTIVATE_MSG; 1152 1153 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); 1154 1155 /* Inform peer that this endpoint is going down if applicable */ 1156 skb = skb_peek_tail(xmitq); 1157 if (skb && (l->state == LINK_RESET)) 1158 msg_set_peer_stopping(buf_msg(skb), 1); 1159 } 1160 1161 /* tipc_link_build_nack_msg: prepare link nack message for transmission 1162 */ 1163 static void tipc_link_build_nack_msg(struct tipc_link *l, 1164 struct sk_buff_head *xmitq) 1165 { 1166 u32 def_cnt = ++l->stats.deferred_recv; 1167 1168 if (link_is_bc_rcvlink(l)) 1169 return; 1170 1171 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) 1172 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); 1173 } 1174 1175 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1176 * @l: the link that should handle the message 1177 * @skb: TIPC packet 1178 * @xmitq: queue to place packets to be sent after this call 1179 */ 1180 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1181 struct sk_buff_head *xmitq) 1182 { 1183 struct sk_buff_head *defq = &l->deferdq; 1184 struct tipc_msg *hdr; 1185 u16 seqno, rcv_nxt, win_lim; 1186 int rc = 0; 1187 1188 do { 1189 hdr = buf_msg(skb); 1190 seqno = msg_seqno(hdr); 1191 rcv_nxt = l->rcv_nxt; 1192 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; 1193 1194 /* Verify and update link state */ 1195 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) 1196 return tipc_link_proto_rcv(l, skb, xmitq); 1197 1198 if (unlikely(!link_is_up(l))) { 1199 if (l->state == LINK_ESTABLISHING) 1200 rc = TIPC_LINK_UP_EVT; 1201 goto drop; 1202 } 1203 1204 /* Don't send probe at next timeout expiration */ 1205 l->silent_intv_cnt = 0; 1206 1207 /* Drop if outside receive window */ 1208 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { 1209 l->stats.duplicates++; 1210 goto drop; 1211 } 1212 1213 /* Forward queues and wake up waiting users */ 1214 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1215 tipc_link_advance_backlog(l, xmitq); 1216 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1217 link_prepare_wakeup(l); 1218 } 1219 1220 /* Defer delivery if sequence gap */ 1221 if (unlikely(seqno != rcv_nxt)) { 1222 __tipc_skb_queue_sorted(defq, seqno, skb); 1223 tipc_link_build_nack_msg(l, xmitq); 1224 break; 1225 } 1226 1227 /* Deliver packet */ 1228 l->rcv_nxt++; 1229 l->stats.recv_info++; 1230 if (!tipc_data_input(l, skb, l->inputq)) 1231 rc |= tipc_link_input(l, skb, l->inputq); 1232 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1233 rc |= tipc_link_build_state_msg(l, xmitq); 1234 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK)) 1235 break; 1236 } while ((skb = __skb_dequeue(defq))); 1237 1238 return rc; 1239 drop: 1240 kfree_skb(skb); 1241 return rc; 1242 } 1243 1244 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1245 u16 rcvgap, int tolerance, int priority, 1246 struct sk_buff_head *xmitq) 1247 { 1248 struct sk_buff *skb; 1249 struct tipc_msg *hdr; 1250 struct sk_buff_head *dfq = &l->deferdq; 1251 bool node_up = link_is_up(l->bc_rcvlink); 1252 struct tipc_mon_state *mstate = &l->mon_state; 1253 int dlen = 0; 1254 void *data; 1255 1256 /* Don't send protocol message during reset or link failover */ 1257 if (tipc_link_is_blocked(l)) 1258 return; 1259 1260 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) 1261 return; 1262 1263 if (!skb_queue_empty(dfq)) 1264 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; 1265 1266 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, 1267 tipc_max_domain_size, l->addr, 1268 tipc_own_addr(l->net), 0, 0, 0); 1269 if (!skb) 1270 return; 1271 1272 hdr = buf_msg(skb); 1273 data = msg_data(hdr); 1274 msg_set_session(hdr, l->session); 1275 msg_set_bearer_id(hdr, l->bearer_id); 1276 msg_set_net_plane(hdr, l->net_plane); 1277 msg_set_next_sent(hdr, l->snd_nxt); 1278 msg_set_ack(hdr, l->rcv_nxt - 1); 1279 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1); 1280 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1281 msg_set_link_tolerance(hdr, tolerance); 1282 msg_set_linkprio(hdr, priority); 1283 msg_set_redundant_link(hdr, node_up); 1284 msg_set_seq_gap(hdr, 0); 1285 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); 1286 1287 if (mtyp == STATE_MSG) { 1288 msg_set_seq_gap(hdr, rcvgap); 1289 msg_set_probe(hdr, probe); 1290 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); 1291 msg_set_size(hdr, INT_H_SIZE + dlen); 1292 skb_trim(skb, INT_H_SIZE + dlen); 1293 l->stats.sent_states++; 1294 l->rcv_unacked = 0; 1295 } else { 1296 /* RESET_MSG or ACTIVATE_MSG */ 1297 msg_set_max_pkt(hdr, l->advertised_mtu); 1298 strcpy(data, l->if_name); 1299 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1300 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); 1301 } 1302 if (probe) 1303 l->stats.sent_probes++; 1304 if (rcvgap) 1305 l->stats.sent_nacks++; 1306 skb->priority = TC_PRIO_CONTROL; 1307 __skb_queue_tail(xmitq, skb); 1308 } 1309 1310 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1311 * with contents of the link's transmit and backlog queues. 1312 */ 1313 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1314 int mtyp, struct sk_buff_head *xmitq) 1315 { 1316 struct sk_buff *skb, *tnlskb; 1317 struct tipc_msg *hdr, tnlhdr; 1318 struct sk_buff_head *queue = &l->transmq; 1319 struct sk_buff_head tmpxq, tnlq; 1320 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1321 1322 if (!tnl) 1323 return; 1324 1325 skb_queue_head_init(&tnlq); 1326 skb_queue_head_init(&tmpxq); 1327 1328 /* At least one packet required for safe algorithm => add dummy */ 1329 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1330 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 1331 0, 0, TIPC_ERR_NO_PORT); 1332 if (!skb) { 1333 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1334 return; 1335 } 1336 skb_queue_tail(&tnlq, skb); 1337 tipc_link_xmit(l, &tnlq, &tmpxq); 1338 __skb_queue_purge(&tmpxq); 1339 1340 /* Initialize reusable tunnel packet header */ 1341 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, 1342 mtyp, INT_H_SIZE, l->addr); 1343 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1344 msg_set_msgcnt(&tnlhdr, pktcnt); 1345 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1346 tnl: 1347 /* Wrap each packet into a tunnel packet */ 1348 skb_queue_walk(queue, skb) { 1349 hdr = buf_msg(skb); 1350 if (queue == &l->backlogq) 1351 msg_set_seqno(hdr, seqno++); 1352 pktlen = msg_size(hdr); 1353 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1354 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1355 if (!tnlskb) { 1356 pr_warn("%sunable to send packet\n", link_co_err); 1357 return; 1358 } 1359 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1360 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1361 __skb_queue_tail(&tnlq, tnlskb); 1362 } 1363 if (queue != &l->backlogq) { 1364 queue = &l->backlogq; 1365 goto tnl; 1366 } 1367 1368 tipc_link_xmit(tnl, &tnlq, xmitq); 1369 1370 if (mtyp == FAILOVER_MSG) { 1371 tnl->drop_point = l->rcv_nxt; 1372 tnl->failover_reasm_skb = l->reasm_buf; 1373 l->reasm_buf = NULL; 1374 } 1375 } 1376 1377 /* tipc_link_proto_rcv(): receive link level protocol message : 1378 * Note that network plane id propagates through the network, and may 1379 * change at any time. The node with lowest numerical id determines 1380 * network plane 1381 */ 1382 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1383 struct sk_buff_head *xmitq) 1384 { 1385 struct tipc_msg *hdr = buf_msg(skb); 1386 u16 rcvgap = 0; 1387 u16 ack = msg_ack(hdr); 1388 u16 gap = msg_seq_gap(hdr); 1389 u16 peers_snd_nxt = msg_next_sent(hdr); 1390 u16 peers_tol = msg_link_tolerance(hdr); 1391 u16 peers_prio = msg_linkprio(hdr); 1392 u16 rcv_nxt = l->rcv_nxt; 1393 u16 dlen = msg_data_sz(hdr); 1394 int mtyp = msg_type(hdr); 1395 void *data; 1396 char *if_name; 1397 int rc = 0; 1398 1399 if (tipc_link_is_blocked(l) || !xmitq) 1400 goto exit; 1401 1402 if (tipc_own_addr(l->net) > msg_prevnode(hdr)) 1403 l->net_plane = msg_net_plane(hdr); 1404 1405 skb_linearize(skb); 1406 hdr = buf_msg(skb); 1407 data = msg_data(hdr); 1408 1409 switch (mtyp) { 1410 case RESET_MSG: 1411 1412 /* Ignore duplicate RESET with old session number */ 1413 if ((less_eq(msg_session(hdr), l->peer_session)) && 1414 (l->peer_session != ANY_SESSION)) 1415 break; 1416 /* fall thru' */ 1417 1418 case ACTIVATE_MSG: 1419 1420 /* Complete own link name with peer's interface name */ 1421 if_name = strrchr(l->name, ':') + 1; 1422 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1423 break; 1424 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1425 break; 1426 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1427 1428 /* Update own tolerance if peer indicates a non-zero value */ 1429 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1430 l->tolerance = peers_tol; 1431 1432 /* Update own priority if peer's priority is higher */ 1433 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1434 l->priority = peers_prio; 1435 1436 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1437 if (msg_peer_stopping(hdr)) 1438 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1439 else if ((mtyp == RESET_MSG) || !link_is_up(l)) 1440 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1441 1442 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1443 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1444 rc = TIPC_LINK_UP_EVT; 1445 1446 l->peer_session = msg_session(hdr); 1447 l->peer_bearer_id = msg_bearer_id(hdr); 1448 if (l->mtu > msg_max_pkt(hdr)) 1449 l->mtu = msg_max_pkt(hdr); 1450 break; 1451 1452 case STATE_MSG: 1453 1454 /* Update own tolerance if peer indicates a non-zero value */ 1455 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1456 l->tolerance = peers_tol; 1457 1458 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, 1459 TIPC_MAX_LINK_PRI)) { 1460 l->priority = peers_prio; 1461 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1462 } 1463 1464 l->silent_intv_cnt = 0; 1465 l->stats.recv_states++; 1466 if (msg_probe(hdr)) 1467 l->stats.recv_probes++; 1468 1469 if (!link_is_up(l)) { 1470 if (l->state == LINK_ESTABLISHING) 1471 rc = TIPC_LINK_UP_EVT; 1472 break; 1473 } 1474 tipc_mon_rcv(l->net, data, dlen, l->addr, 1475 &l->mon_state, l->bearer_id); 1476 1477 /* Send NACK if peer has sent pkts we haven't received yet */ 1478 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1479 rcvgap = peers_snd_nxt - l->rcv_nxt; 1480 if (rcvgap || (msg_probe(hdr))) 1481 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, 1482 0, 0, xmitq); 1483 tipc_link_release_pkts(l, ack); 1484 1485 /* If NACK, retransmit will now start at right position */ 1486 if (gap) { 1487 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq); 1488 l->stats.recv_nacks++; 1489 } 1490 1491 tipc_link_advance_backlog(l, xmitq); 1492 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1493 link_prepare_wakeup(l); 1494 } 1495 exit: 1496 kfree_skb(skb); 1497 return rc; 1498 } 1499 1500 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message 1501 */ 1502 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, 1503 u16 peers_snd_nxt, 1504 struct sk_buff_head *xmitq) 1505 { 1506 struct sk_buff *skb; 1507 struct tipc_msg *hdr; 1508 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); 1509 u16 ack = l->rcv_nxt - 1; 1510 u16 gap_to = peers_snd_nxt - 1; 1511 1512 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 1513 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); 1514 if (!skb) 1515 return false; 1516 hdr = buf_msg(skb); 1517 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1518 msg_set_bcast_ack(hdr, ack); 1519 msg_set_bcgap_after(hdr, ack); 1520 if (dfrd_skb) 1521 gap_to = buf_seqno(dfrd_skb) - 1; 1522 msg_set_bcgap_to(hdr, gap_to); 1523 msg_set_non_seq(hdr, bcast); 1524 __skb_queue_tail(xmitq, skb); 1525 return true; 1526 } 1527 1528 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. 1529 * 1530 * Give a newly added peer node the sequence number where it should 1531 * start receiving and acking broadcast packets. 1532 */ 1533 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 1534 struct sk_buff_head *xmitq) 1535 { 1536 struct sk_buff_head list; 1537 1538 __skb_queue_head_init(&list); 1539 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1540 return; 1541 tipc_link_xmit(l, &list, xmitq); 1542 } 1543 1544 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer 1545 */ 1546 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) 1547 { 1548 int mtyp = msg_type(hdr); 1549 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1550 1551 if (link_is_up(l)) 1552 return; 1553 1554 if (msg_user(hdr) == BCAST_PROTOCOL) { 1555 l->rcv_nxt = peers_snd_nxt; 1556 l->state = LINK_ESTABLISHED; 1557 return; 1558 } 1559 1560 if (l->peer_caps & TIPC_BCAST_SYNCH) 1561 return; 1562 1563 if (msg_peer_node_is_up(hdr)) 1564 return; 1565 1566 /* Compatibility: accept older, less safe initial synch data */ 1567 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) 1568 l->rcv_nxt = peers_snd_nxt; 1569 } 1570 1571 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 1572 */ 1573 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 1574 struct sk_buff_head *xmitq) 1575 { 1576 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 1577 1578 if (!link_is_up(l)) 1579 return; 1580 1581 if (!msg_peer_node_is_up(hdr)) 1582 return; 1583 1584 l->bc_peer_is_up = true; 1585 1586 /* Ignore if peers_snd_nxt goes beyond receive window */ 1587 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1588 return; 1589 1590 if (!more(peers_snd_nxt, l->rcv_nxt)) { 1591 l->nack_state = BC_NACK_SND_CONDITIONAL; 1592 return; 1593 } 1594 1595 /* Don't NACK if one was recently sent or peeked */ 1596 if (l->nack_state == BC_NACK_SND_SUPPRESS) { 1597 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1598 return; 1599 } 1600 1601 /* Conditionally delay NACK sending until next synch rcv */ 1602 if (l->nack_state == BC_NACK_SND_CONDITIONAL) { 1603 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 1604 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) 1605 return; 1606 } 1607 1608 /* Send NACK now but suppress next one */ 1609 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); 1610 l->nack_state = BC_NACK_SND_SUPPRESS; 1611 } 1612 1613 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, 1614 struct sk_buff_head *xmitq) 1615 { 1616 struct sk_buff *skb, *tmp; 1617 struct tipc_link *snd_l = l->bc_sndlink; 1618 1619 if (!link_is_up(l) || !l->bc_peer_is_up) 1620 return; 1621 1622 if (!more(acked, l->acked)) 1623 return; 1624 1625 /* Skip over packets peer has already acked */ 1626 skb_queue_walk(&snd_l->transmq, skb) { 1627 if (more(buf_seqno(skb), l->acked)) 1628 break; 1629 } 1630 1631 /* Update/release the packets peer is acking now */ 1632 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { 1633 if (more(buf_seqno(skb), acked)) 1634 break; 1635 if (!--TIPC_SKB_CB(skb)->ackers) { 1636 __skb_unlink(skb, &snd_l->transmq); 1637 kfree_skb(skb); 1638 } 1639 } 1640 l->acked = acked; 1641 tipc_link_advance_backlog(snd_l, xmitq); 1642 if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) 1643 link_prepare_wakeup(snd_l); 1644 } 1645 1646 /* tipc_link_bc_nack_rcv(): receive broadcast nack message 1647 */ 1648 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 1649 struct sk_buff_head *xmitq) 1650 { 1651 struct tipc_msg *hdr = buf_msg(skb); 1652 u32 dnode = msg_destnode(hdr); 1653 int mtyp = msg_type(hdr); 1654 u16 acked = msg_bcast_ack(hdr); 1655 u16 from = acked + 1; 1656 u16 to = msg_bcgap_to(hdr); 1657 u16 peers_snd_nxt = to + 1; 1658 int rc = 0; 1659 1660 kfree_skb(skb); 1661 1662 if (!tipc_link_is_up(l) || !l->bc_peer_is_up) 1663 return 0; 1664 1665 if (mtyp != STATE_MSG) 1666 return 0; 1667 1668 if (dnode == tipc_own_addr(l->net)) { 1669 tipc_link_bc_ack_rcv(l, acked, xmitq); 1670 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq); 1671 l->stats.recv_nacks++; 1672 return rc; 1673 } 1674 1675 /* Msg for other node => suppress own NACK at next sync if applicable */ 1676 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) 1677 l->nack_state = BC_NACK_SND_SUPPRESS; 1678 1679 return 0; 1680 } 1681 1682 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1683 { 1684 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1685 1686 l->window = win; 1687 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1688 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; 1689 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; 1690 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; 1691 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1692 } 1693 1694 /** 1695 * link_reset_stats - reset link statistics 1696 * @l: pointer to link 1697 */ 1698 void tipc_link_reset_stats(struct tipc_link *l) 1699 { 1700 memset(&l->stats, 0, sizeof(l->stats)); 1701 if (!link_is_bc_sndlink(l)) { 1702 l->stats.sent_info = l->snd_nxt; 1703 l->stats.recv_info = l->rcv_nxt; 1704 } 1705 } 1706 1707 static void link_print(struct tipc_link *l, const char *str) 1708 { 1709 struct sk_buff *hskb = skb_peek(&l->transmq); 1710 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; 1711 u16 tail = l->snd_nxt - 1; 1712 1713 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1714 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1715 skb_queue_len(&l->transmq), head, tail, 1716 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1717 } 1718 1719 /* Parse and validate nested (link) properties valid for media, bearer and link 1720 */ 1721 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1722 { 1723 int err; 1724 1725 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1726 tipc_nl_prop_policy); 1727 if (err) 1728 return err; 1729 1730 if (props[TIPC_NLA_PROP_PRIO]) { 1731 u32 prio; 1732 1733 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1734 if (prio > TIPC_MAX_LINK_PRI) 1735 return -EINVAL; 1736 } 1737 1738 if (props[TIPC_NLA_PROP_TOL]) { 1739 u32 tol; 1740 1741 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1742 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1743 return -EINVAL; 1744 } 1745 1746 if (props[TIPC_NLA_PROP_WIN]) { 1747 u32 win; 1748 1749 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1750 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1751 return -EINVAL; 1752 } 1753 1754 return 0; 1755 } 1756 1757 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1758 { 1759 int i; 1760 struct nlattr *stats; 1761 1762 struct nla_map { 1763 u32 key; 1764 u32 val; 1765 }; 1766 1767 struct nla_map map[] = { 1768 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1769 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1770 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1771 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1772 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1773 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1774 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1775 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1776 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1777 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1778 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1779 s->msg_length_counts : 1}, 1780 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1781 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1782 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1783 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1784 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1785 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1786 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1787 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1788 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1789 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1790 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1791 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1792 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1793 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1794 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1795 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1796 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1797 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1798 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1799 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1800 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1801 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1802 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1803 }; 1804 1805 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1806 if (!stats) 1807 return -EMSGSIZE; 1808 1809 for (i = 0; i < ARRAY_SIZE(map); i++) 1810 if (nla_put_u32(skb, map[i].key, map[i].val)) 1811 goto msg_full; 1812 1813 nla_nest_end(skb, stats); 1814 1815 return 0; 1816 msg_full: 1817 nla_nest_cancel(skb, stats); 1818 1819 return -EMSGSIZE; 1820 } 1821 1822 /* Caller should hold appropriate locks to protect the link */ 1823 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1824 struct tipc_link *link, int nlflags) 1825 { 1826 int err; 1827 void *hdr; 1828 struct nlattr *attrs; 1829 struct nlattr *prop; 1830 struct tipc_net *tn = net_generic(net, tipc_net_id); 1831 1832 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1833 nlflags, TIPC_NL_LINK_GET); 1834 if (!hdr) 1835 return -EMSGSIZE; 1836 1837 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1838 if (!attrs) 1839 goto msg_full; 1840 1841 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 1842 goto attr_msg_full; 1843 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 1844 tipc_cluster_mask(tn->own_addr))) 1845 goto attr_msg_full; 1846 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1847 goto attr_msg_full; 1848 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1849 goto attr_msg_full; 1850 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1851 goto attr_msg_full; 1852 1853 if (tipc_link_is_up(link)) 1854 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1855 goto attr_msg_full; 1856 if (link->active) 1857 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 1858 goto attr_msg_full; 1859 1860 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1861 if (!prop) 1862 goto attr_msg_full; 1863 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1864 goto prop_msg_full; 1865 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 1866 goto prop_msg_full; 1867 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 1868 link->window)) 1869 goto prop_msg_full; 1870 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1871 goto prop_msg_full; 1872 nla_nest_end(msg->skb, prop); 1873 1874 err = __tipc_nl_add_stats(msg->skb, &link->stats); 1875 if (err) 1876 goto attr_msg_full; 1877 1878 nla_nest_end(msg->skb, attrs); 1879 genlmsg_end(msg->skb, hdr); 1880 1881 return 0; 1882 1883 prop_msg_full: 1884 nla_nest_cancel(msg->skb, prop); 1885 attr_msg_full: 1886 nla_nest_cancel(msg->skb, attrs); 1887 msg_full: 1888 genlmsg_cancel(msg->skb, hdr); 1889 1890 return -EMSGSIZE; 1891 } 1892 1893 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 1894 struct tipc_stats *stats) 1895 { 1896 int i; 1897 struct nlattr *nest; 1898 1899 struct nla_map { 1900 __u32 key; 1901 __u32 val; 1902 }; 1903 1904 struct nla_map map[] = { 1905 {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 1906 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 1907 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 1908 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 1909 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 1910 {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 1911 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 1912 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 1913 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 1914 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 1915 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 1916 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 1917 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 1918 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 1919 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 1920 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 1921 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 1922 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 1923 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 1924 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 1925 }; 1926 1927 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1928 if (!nest) 1929 return -EMSGSIZE; 1930 1931 for (i = 0; i < ARRAY_SIZE(map); i++) 1932 if (nla_put_u32(skb, map[i].key, map[i].val)) 1933 goto msg_full; 1934 1935 nla_nest_end(skb, nest); 1936 1937 return 0; 1938 msg_full: 1939 nla_nest_cancel(skb, nest); 1940 1941 return -EMSGSIZE; 1942 } 1943 1944 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 1945 { 1946 int err; 1947 void *hdr; 1948 struct nlattr *attrs; 1949 struct nlattr *prop; 1950 struct tipc_net *tn = net_generic(net, tipc_net_id); 1951 struct tipc_link *bcl = tn->bcl; 1952 1953 if (!bcl) 1954 return 0; 1955 1956 tipc_bcast_lock(net); 1957 1958 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1959 NLM_F_MULTI, TIPC_NL_LINK_GET); 1960 if (!hdr) { 1961 tipc_bcast_unlock(net); 1962 return -EMSGSIZE; 1963 } 1964 1965 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1966 if (!attrs) 1967 goto msg_full; 1968 1969 /* The broadcast link is always up */ 1970 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1971 goto attr_msg_full; 1972 1973 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 1974 goto attr_msg_full; 1975 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 1976 goto attr_msg_full; 1977 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 1978 goto attr_msg_full; 1979 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 1980 goto attr_msg_full; 1981 1982 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1983 if (!prop) 1984 goto attr_msg_full; 1985 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 1986 goto prop_msg_full; 1987 nla_nest_end(msg->skb, prop); 1988 1989 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 1990 if (err) 1991 goto attr_msg_full; 1992 1993 tipc_bcast_unlock(net); 1994 nla_nest_end(msg->skb, attrs); 1995 genlmsg_end(msg->skb, hdr); 1996 1997 return 0; 1998 1999 prop_msg_full: 2000 nla_nest_cancel(msg->skb, prop); 2001 attr_msg_full: 2002 nla_nest_cancel(msg->skb, attrs); 2003 msg_full: 2004 tipc_bcast_unlock(net); 2005 genlmsg_cancel(msg->skb, hdr); 2006 2007 return -EMSGSIZE; 2008 } 2009 2010 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, 2011 struct sk_buff_head *xmitq) 2012 { 2013 l->tolerance = tol; 2014 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq); 2015 } 2016 2017 void tipc_link_set_prio(struct tipc_link *l, u32 prio, 2018 struct sk_buff_head *xmitq) 2019 { 2020 l->priority = prio; 2021 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq); 2022 } 2023 2024 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) 2025 { 2026 l->abort_limit = limit; 2027 } 2028