1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 #include "monitor.h" 46 #include "trace.h" 47 48 #include <linux/pkt_sched.h> 49 50 struct tipc_stats { 51 u32 sent_pkts; 52 u32 recv_pkts; 53 u32 sent_states; 54 u32 recv_states; 55 u32 sent_probes; 56 u32 recv_probes; 57 u32 sent_nacks; 58 u32 recv_nacks; 59 u32 sent_acks; 60 u32 sent_bundled; 61 u32 sent_bundles; 62 u32 recv_bundled; 63 u32 recv_bundles; 64 u32 retransmitted; 65 u32 sent_fragmented; 66 u32 sent_fragments; 67 u32 recv_fragmented; 68 u32 recv_fragments; 69 u32 link_congs; /* # port sends blocked by congestion */ 70 u32 deferred_recv; 71 u32 duplicates; 72 u32 max_queue_sz; /* send queue size high water mark */ 73 u32 accu_queue_sz; /* used for send queue size profiling */ 74 u32 queue_sz_counts; /* used for send queue size profiling */ 75 u32 msg_length_counts; /* used for message length profiling */ 76 u32 msg_lengths_total; /* used for message length profiling */ 77 u32 msg_length_profile[7]; /* used for msg. length profiling */ 78 }; 79 80 /** 81 * struct tipc_link - TIPC link data structure 82 * @addr: network address of link's peer node 83 * @name: link name character string 84 * @media_addr: media address to use when sending messages over link 85 * @timer: link timer 86 * @net: pointer to namespace struct 87 * @refcnt: reference counter for permanent references (owner node & timer) 88 * @peer_session: link session # being used by peer end of link 89 * @peer_bearer_id: bearer id used by link's peer endpoint 90 * @bearer_id: local bearer id used by link 91 * @tolerance: minimum link continuity loss needed to reset link [in ms] 92 * @abort_limit: # of unacknowledged continuity probes needed to reset link 93 * @state: current state of link FSM 94 * @peer_caps: bitmap describing capabilities of peer node 95 * @silent_intv_cnt: # of timer intervals without any reception from peer 96 * @proto_msg: template for control messages generated by link 97 * @pmsg: convenience pointer to "proto_msg" field 98 * @priority: current link priority 99 * @net_plane: current link network plane ('A' through 'H') 100 * @mon_state: cookie with information needed by link monitor 101 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 102 * @exp_msg_count: # of tunnelled messages expected during link changeover 103 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 104 * @mtu: current maximum packet size for this link 105 * @advertised_mtu: advertised own mtu when link is being established 106 * @transmitq: queue for sent, non-acked messages 107 * @backlogq: queue for messages waiting to be sent 108 * @snt_nxt: next sequence number to use for outbound messages 109 * @ackers: # of peers that needs to ack each packet before it can be released 110 * @acked: # last packet acked by a certain peer. Used for broadcast. 111 * @rcv_nxt: next sequence number to expect for inbound messages 112 * @deferred_queue: deferred queue saved OOS b'cast message received from node 113 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 114 * @inputq: buffer queue for messages to be delivered upwards 115 * @namedq: buffer queue for name table messages to be delivered upwards 116 * @next_out: ptr to first unsent outbound message in queue 117 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate 118 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 119 * @reasm_buf: head of partially reassembled inbound message fragments 120 * @bc_rcvr: marks that this is a broadcast receiver link 121 * @stats: collects statistics regarding link activity 122 */ 123 struct tipc_link { 124 u32 addr; 125 char name[TIPC_MAX_LINK_NAME]; 126 struct net *net; 127 128 /* Management and link supervision data */ 129 u16 peer_session; 130 u16 session; 131 u16 snd_nxt_state; 132 u16 rcv_nxt_state; 133 u32 peer_bearer_id; 134 u32 bearer_id; 135 u32 tolerance; 136 u32 abort_limit; 137 u32 state; 138 u16 peer_caps; 139 bool in_session; 140 bool active; 141 u32 silent_intv_cnt; 142 char if_name[TIPC_MAX_IF_NAME]; 143 u32 priority; 144 char net_plane; 145 struct tipc_mon_state mon_state; 146 u16 rst_cnt; 147 148 /* Failover/synch */ 149 u16 drop_point; 150 struct sk_buff *failover_reasm_skb; 151 struct sk_buff_head failover_deferdq; 152 153 /* Max packet negotiation */ 154 u16 mtu; 155 u16 advertised_mtu; 156 157 /* Sending */ 158 struct sk_buff_head transmq; 159 struct sk_buff_head backlogq; 160 struct { 161 u16 len; 162 u16 limit; 163 } backlog[5]; 164 u16 snd_nxt; 165 u16 window; 166 167 /* Reception */ 168 u16 rcv_nxt; 169 u32 rcv_unacked; 170 struct sk_buff_head deferdq; 171 struct sk_buff_head *inputq; 172 struct sk_buff_head *namedq; 173 174 /* Congestion handling */ 175 struct sk_buff_head wakeupq; 176 177 /* Fragmentation/reassembly */ 178 struct sk_buff *reasm_buf; 179 180 /* Broadcast */ 181 u16 ackers; 182 u16 acked; 183 struct tipc_link *bc_rcvlink; 184 struct tipc_link *bc_sndlink; 185 u8 nack_state; 186 bool bc_peer_is_up; 187 188 /* Statistics */ 189 struct tipc_stats stats; 190 }; 191 192 /* 193 * Error message prefixes 194 */ 195 static const char *link_co_err = "Link tunneling error, "; 196 static const char *link_rst_msg = "Resetting link "; 197 198 /* Send states for broadcast NACKs 199 */ 200 enum { 201 BC_NACK_SND_CONDITIONAL, 202 BC_NACK_SND_UNCONDITIONAL, 203 BC_NACK_SND_SUPPRESS, 204 }; 205 206 #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10)) 207 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1)) 208 209 /* 210 * Interval between NACKs when packets arrive out of order 211 */ 212 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 213 214 /* Link FSM states: 215 */ 216 enum { 217 LINK_ESTABLISHED = 0xe, 218 LINK_ESTABLISHING = 0xe << 4, 219 LINK_RESET = 0x1 << 8, 220 LINK_RESETTING = 0x2 << 12, 221 LINK_PEER_RESET = 0xd << 16, 222 LINK_FAILINGOVER = 0xf << 20, 223 LINK_SYNCHING = 0xc << 24 224 }; 225 226 /* Link FSM state checking routines 227 */ 228 static int link_is_up(struct tipc_link *l) 229 { 230 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 231 } 232 233 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 234 struct sk_buff_head *xmitq); 235 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 236 bool probe_reply, u16 rcvgap, 237 int tolerance, int priority, 238 struct sk_buff_head *xmitq); 239 static void link_print(struct tipc_link *l, const char *str); 240 static int tipc_link_build_nack_msg(struct tipc_link *l, 241 struct sk_buff_head *xmitq); 242 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 243 struct sk_buff_head *xmitq); 244 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); 245 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data); 246 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, 247 struct tipc_gap_ack_blks *ga, 248 struct sk_buff_head *xmitq); 249 250 /* 251 * Simple non-static link routines (i.e. referenced outside this file) 252 */ 253 bool tipc_link_is_up(struct tipc_link *l) 254 { 255 return link_is_up(l); 256 } 257 258 bool tipc_link_peer_is_down(struct tipc_link *l) 259 { 260 return l->state == LINK_PEER_RESET; 261 } 262 263 bool tipc_link_is_reset(struct tipc_link *l) 264 { 265 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 266 } 267 268 bool tipc_link_is_establishing(struct tipc_link *l) 269 { 270 return l->state == LINK_ESTABLISHING; 271 } 272 273 bool tipc_link_is_synching(struct tipc_link *l) 274 { 275 return l->state == LINK_SYNCHING; 276 } 277 278 bool tipc_link_is_failingover(struct tipc_link *l) 279 { 280 return l->state == LINK_FAILINGOVER; 281 } 282 283 bool tipc_link_is_blocked(struct tipc_link *l) 284 { 285 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 286 } 287 288 static bool link_is_bc_sndlink(struct tipc_link *l) 289 { 290 return !l->bc_sndlink; 291 } 292 293 static bool link_is_bc_rcvlink(struct tipc_link *l) 294 { 295 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); 296 } 297 298 void tipc_link_set_active(struct tipc_link *l, bool active) 299 { 300 l->active = active; 301 } 302 303 u32 tipc_link_id(struct tipc_link *l) 304 { 305 return l->peer_bearer_id << 16 | l->bearer_id; 306 } 307 308 int tipc_link_window(struct tipc_link *l) 309 { 310 return l->window; 311 } 312 313 int tipc_link_prio(struct tipc_link *l) 314 { 315 return l->priority; 316 } 317 318 unsigned long tipc_link_tolerance(struct tipc_link *l) 319 { 320 return l->tolerance; 321 } 322 323 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) 324 { 325 return l->inputq; 326 } 327 328 char tipc_link_plane(struct tipc_link *l) 329 { 330 return l->net_plane; 331 } 332 333 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) 334 { 335 l->peer_caps = capabilities; 336 } 337 338 void tipc_link_add_bc_peer(struct tipc_link *snd_l, 339 struct tipc_link *uc_l, 340 struct sk_buff_head *xmitq) 341 { 342 struct tipc_link *rcv_l = uc_l->bc_rcvlink; 343 344 snd_l->ackers++; 345 rcv_l->acked = snd_l->snd_nxt - 1; 346 snd_l->state = LINK_ESTABLISHED; 347 tipc_link_build_bc_init_msg(uc_l, xmitq); 348 } 349 350 void tipc_link_remove_bc_peer(struct tipc_link *snd_l, 351 struct tipc_link *rcv_l, 352 struct sk_buff_head *xmitq) 353 { 354 u16 ack = snd_l->snd_nxt - 1; 355 356 snd_l->ackers--; 357 rcv_l->bc_peer_is_up = true; 358 rcv_l->state = LINK_ESTABLISHED; 359 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 360 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!"); 361 tipc_link_reset(rcv_l); 362 rcv_l->state = LINK_RESET; 363 if (!snd_l->ackers) { 364 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!"); 365 tipc_link_reset(snd_l); 366 snd_l->state = LINK_RESET; 367 __skb_queue_purge(xmitq); 368 } 369 } 370 371 int tipc_link_bc_peers(struct tipc_link *l) 372 { 373 return l->ackers; 374 } 375 376 static u16 link_bc_rcv_gap(struct tipc_link *l) 377 { 378 struct sk_buff *skb = skb_peek(&l->deferdq); 379 u16 gap = 0; 380 381 if (more(l->snd_nxt, l->rcv_nxt)) 382 gap = l->snd_nxt - l->rcv_nxt; 383 if (skb) 384 gap = buf_seqno(skb) - l->rcv_nxt; 385 return gap; 386 } 387 388 void tipc_link_set_mtu(struct tipc_link *l, int mtu) 389 { 390 l->mtu = mtu; 391 } 392 393 int tipc_link_mtu(struct tipc_link *l) 394 { 395 return l->mtu; 396 } 397 398 u16 tipc_link_rcv_nxt(struct tipc_link *l) 399 { 400 return l->rcv_nxt; 401 } 402 403 u16 tipc_link_acked(struct tipc_link *l) 404 { 405 return l->acked; 406 } 407 408 char *tipc_link_name(struct tipc_link *l) 409 { 410 return l->name; 411 } 412 413 u32 tipc_link_state(struct tipc_link *l) 414 { 415 return l->state; 416 } 417 418 /** 419 * tipc_link_create - create a new link 420 * @n: pointer to associated node 421 * @if_name: associated interface name 422 * @bearer_id: id (index) of associated bearer 423 * @tolerance: link tolerance to be used by link 424 * @net_plane: network plane (A,B,c..) this link belongs to 425 * @mtu: mtu to be advertised by link 426 * @priority: priority to be used by link 427 * @window: send window to be used by link 428 * @session: session to be used by link 429 * @ownnode: identity of own node 430 * @peer: node id of peer node 431 * @peer_caps: bitmap describing peer node capabilities 432 * @bc_sndlink: the namespace global link used for broadcast sending 433 * @bc_rcvlink: the peer specific link used for broadcast reception 434 * @inputq: queue to put messages ready for delivery 435 * @namedq: queue to put binding table update messages ready for delivery 436 * @link: return value, pointer to put the created link 437 * 438 * Returns true if link was created, otherwise false 439 */ 440 bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 441 int tolerance, char net_plane, u32 mtu, int priority, 442 int window, u32 session, u32 self, 443 u32 peer, u8 *peer_id, u16 peer_caps, 444 struct tipc_link *bc_sndlink, 445 struct tipc_link *bc_rcvlink, 446 struct sk_buff_head *inputq, 447 struct sk_buff_head *namedq, 448 struct tipc_link **link) 449 { 450 char peer_str[NODE_ID_STR_LEN] = {0,}; 451 char self_str[NODE_ID_STR_LEN] = {0,}; 452 struct tipc_link *l; 453 454 l = kzalloc(sizeof(*l), GFP_ATOMIC); 455 if (!l) 456 return false; 457 *link = l; 458 l->session = session; 459 460 /* Set link name for unicast links only */ 461 if (peer_id) { 462 tipc_nodeid2string(self_str, tipc_own_id(net)); 463 if (strlen(self_str) > 16) 464 sprintf(self_str, "%x", self); 465 tipc_nodeid2string(peer_str, peer_id); 466 if (strlen(peer_str) > 16) 467 sprintf(peer_str, "%x", peer); 468 } 469 /* Peer i/f name will be completed by reset/activate message */ 470 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown", 471 self_str, if_name, peer_str); 472 473 strcpy(l->if_name, if_name); 474 l->addr = peer; 475 l->peer_caps = peer_caps; 476 l->net = net; 477 l->in_session = false; 478 l->bearer_id = bearer_id; 479 l->tolerance = tolerance; 480 if (bc_rcvlink) 481 bc_rcvlink->tolerance = tolerance; 482 l->net_plane = net_plane; 483 l->advertised_mtu = mtu; 484 l->mtu = mtu; 485 l->priority = priority; 486 tipc_link_set_queue_limits(l, window); 487 l->ackers = 1; 488 l->bc_sndlink = bc_sndlink; 489 l->bc_rcvlink = bc_rcvlink; 490 l->inputq = inputq; 491 l->namedq = namedq; 492 l->state = LINK_RESETTING; 493 __skb_queue_head_init(&l->transmq); 494 __skb_queue_head_init(&l->backlogq); 495 __skb_queue_head_init(&l->deferdq); 496 __skb_queue_head_init(&l->failover_deferdq); 497 skb_queue_head_init(&l->wakeupq); 498 skb_queue_head_init(l->inputq); 499 return true; 500 } 501 502 /** 503 * tipc_link_bc_create - create new link to be used for broadcast 504 * @n: pointer to associated node 505 * @mtu: mtu to be used initially if no peers 506 * @window: send window to be used 507 * @inputq: queue to put messages ready for delivery 508 * @namedq: queue to put binding table update messages ready for delivery 509 * @link: return value, pointer to put the created link 510 * 511 * Returns true if link was created, otherwise false 512 */ 513 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, 514 int mtu, int window, u16 peer_caps, 515 struct sk_buff_head *inputq, 516 struct sk_buff_head *namedq, 517 struct tipc_link *bc_sndlink, 518 struct tipc_link **link) 519 { 520 struct tipc_link *l; 521 522 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, 523 0, ownnode, peer, NULL, peer_caps, bc_sndlink, 524 NULL, inputq, namedq, link)) 525 return false; 526 527 l = *link; 528 strcpy(l->name, tipc_bclink_name); 529 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!"); 530 tipc_link_reset(l); 531 l->state = LINK_RESET; 532 l->ackers = 0; 533 l->bc_rcvlink = l; 534 535 /* Broadcast send link is always up */ 536 if (link_is_bc_sndlink(l)) 537 l->state = LINK_ESTABLISHED; 538 539 /* Disable replicast if even a single peer doesn't support it */ 540 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) 541 tipc_bcast_disable_rcast(net); 542 543 return true; 544 } 545 546 /** 547 * tipc_link_fsm_evt - link finite state machine 548 * @l: pointer to link 549 * @evt: state machine event to be processed 550 */ 551 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 552 { 553 int rc = 0; 554 int old_state = l->state; 555 556 switch (l->state) { 557 case LINK_RESETTING: 558 switch (evt) { 559 case LINK_PEER_RESET_EVT: 560 l->state = LINK_PEER_RESET; 561 break; 562 case LINK_RESET_EVT: 563 l->state = LINK_RESET; 564 break; 565 case LINK_FAILURE_EVT: 566 case LINK_FAILOVER_BEGIN_EVT: 567 case LINK_ESTABLISH_EVT: 568 case LINK_FAILOVER_END_EVT: 569 case LINK_SYNCH_BEGIN_EVT: 570 case LINK_SYNCH_END_EVT: 571 default: 572 goto illegal_evt; 573 } 574 break; 575 case LINK_RESET: 576 switch (evt) { 577 case LINK_PEER_RESET_EVT: 578 l->state = LINK_ESTABLISHING; 579 break; 580 case LINK_FAILOVER_BEGIN_EVT: 581 l->state = LINK_FAILINGOVER; 582 case LINK_FAILURE_EVT: 583 case LINK_RESET_EVT: 584 case LINK_ESTABLISH_EVT: 585 case LINK_FAILOVER_END_EVT: 586 break; 587 case LINK_SYNCH_BEGIN_EVT: 588 case LINK_SYNCH_END_EVT: 589 default: 590 goto illegal_evt; 591 } 592 break; 593 case LINK_PEER_RESET: 594 switch (evt) { 595 case LINK_RESET_EVT: 596 l->state = LINK_ESTABLISHING; 597 break; 598 case LINK_PEER_RESET_EVT: 599 case LINK_ESTABLISH_EVT: 600 case LINK_FAILURE_EVT: 601 break; 602 case LINK_SYNCH_BEGIN_EVT: 603 case LINK_SYNCH_END_EVT: 604 case LINK_FAILOVER_BEGIN_EVT: 605 case LINK_FAILOVER_END_EVT: 606 default: 607 goto illegal_evt; 608 } 609 break; 610 case LINK_FAILINGOVER: 611 switch (evt) { 612 case LINK_FAILOVER_END_EVT: 613 l->state = LINK_RESET; 614 break; 615 case LINK_PEER_RESET_EVT: 616 case LINK_RESET_EVT: 617 case LINK_ESTABLISH_EVT: 618 case LINK_FAILURE_EVT: 619 break; 620 case LINK_FAILOVER_BEGIN_EVT: 621 case LINK_SYNCH_BEGIN_EVT: 622 case LINK_SYNCH_END_EVT: 623 default: 624 goto illegal_evt; 625 } 626 break; 627 case LINK_ESTABLISHING: 628 switch (evt) { 629 case LINK_ESTABLISH_EVT: 630 l->state = LINK_ESTABLISHED; 631 break; 632 case LINK_FAILOVER_BEGIN_EVT: 633 l->state = LINK_FAILINGOVER; 634 break; 635 case LINK_RESET_EVT: 636 l->state = LINK_RESET; 637 break; 638 case LINK_FAILURE_EVT: 639 case LINK_PEER_RESET_EVT: 640 case LINK_SYNCH_BEGIN_EVT: 641 case LINK_FAILOVER_END_EVT: 642 break; 643 case LINK_SYNCH_END_EVT: 644 default: 645 goto illegal_evt; 646 } 647 break; 648 case LINK_ESTABLISHED: 649 switch (evt) { 650 case LINK_PEER_RESET_EVT: 651 l->state = LINK_PEER_RESET; 652 rc |= TIPC_LINK_DOWN_EVT; 653 break; 654 case LINK_FAILURE_EVT: 655 l->state = LINK_RESETTING; 656 rc |= TIPC_LINK_DOWN_EVT; 657 break; 658 case LINK_RESET_EVT: 659 l->state = LINK_RESET; 660 break; 661 case LINK_ESTABLISH_EVT: 662 case LINK_SYNCH_END_EVT: 663 break; 664 case LINK_SYNCH_BEGIN_EVT: 665 l->state = LINK_SYNCHING; 666 break; 667 case LINK_FAILOVER_BEGIN_EVT: 668 case LINK_FAILOVER_END_EVT: 669 default: 670 goto illegal_evt; 671 } 672 break; 673 case LINK_SYNCHING: 674 switch (evt) { 675 case LINK_PEER_RESET_EVT: 676 l->state = LINK_PEER_RESET; 677 rc |= TIPC_LINK_DOWN_EVT; 678 break; 679 case LINK_FAILURE_EVT: 680 l->state = LINK_RESETTING; 681 rc |= TIPC_LINK_DOWN_EVT; 682 break; 683 case LINK_RESET_EVT: 684 l->state = LINK_RESET; 685 break; 686 case LINK_ESTABLISH_EVT: 687 case LINK_SYNCH_BEGIN_EVT: 688 break; 689 case LINK_SYNCH_END_EVT: 690 l->state = LINK_ESTABLISHED; 691 break; 692 case LINK_FAILOVER_BEGIN_EVT: 693 case LINK_FAILOVER_END_EVT: 694 default: 695 goto illegal_evt; 696 } 697 break; 698 default: 699 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 700 } 701 trace_tipc_link_fsm(l->name, old_state, l->state, evt); 702 return rc; 703 illegal_evt: 704 pr_err("Illegal FSM event %x in state %x on link %s\n", 705 evt, l->state, l->name); 706 trace_tipc_link_fsm(l->name, old_state, l->state, evt); 707 return rc; 708 } 709 710 /* link_profile_stats - update statistical profiling of traffic 711 */ 712 static void link_profile_stats(struct tipc_link *l) 713 { 714 struct sk_buff *skb; 715 struct tipc_msg *msg; 716 int length; 717 718 /* Update counters used in statistical profiling of send traffic */ 719 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 720 l->stats.queue_sz_counts++; 721 722 skb = skb_peek(&l->transmq); 723 if (!skb) 724 return; 725 msg = buf_msg(skb); 726 length = msg_size(msg); 727 728 if (msg_user(msg) == MSG_FRAGMENTER) { 729 if (msg_type(msg) != FIRST_FRAGMENT) 730 return; 731 length = msg_size(msg_inner_hdr(msg)); 732 } 733 l->stats.msg_lengths_total += length; 734 l->stats.msg_length_counts++; 735 if (length <= 64) 736 l->stats.msg_length_profile[0]++; 737 else if (length <= 256) 738 l->stats.msg_length_profile[1]++; 739 else if (length <= 1024) 740 l->stats.msg_length_profile[2]++; 741 else if (length <= 4096) 742 l->stats.msg_length_profile[3]++; 743 else if (length <= 16384) 744 l->stats.msg_length_profile[4]++; 745 else if (length <= 32768) 746 l->stats.msg_length_profile[5]++; 747 else 748 l->stats.msg_length_profile[6]++; 749 } 750 751 /** 752 * tipc_link_too_silent - check if link is "too silent" 753 * @l: tipc link to be checked 754 * 755 * Returns true if the link 'silent_intv_cnt' is about to reach the 756 * 'abort_limit' value, otherwise false 757 */ 758 bool tipc_link_too_silent(struct tipc_link *l) 759 { 760 return (l->silent_intv_cnt + 2 > l->abort_limit); 761 } 762 763 /* tipc_link_timeout - perform periodic task as instructed from node timeout 764 */ 765 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 766 { 767 int mtyp = 0; 768 int rc = 0; 769 bool state = false; 770 bool probe = false; 771 bool setup = false; 772 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 773 u16 bc_acked = l->bc_rcvlink->acked; 774 struct tipc_mon_state *mstate = &l->mon_state; 775 776 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " "); 777 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " "); 778 switch (l->state) { 779 case LINK_ESTABLISHED: 780 case LINK_SYNCHING: 781 mtyp = STATE_MSG; 782 link_profile_stats(l); 783 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); 784 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) 785 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 786 state = bc_acked != bc_snt; 787 state |= l->bc_rcvlink->rcv_unacked; 788 state |= l->rcv_unacked; 789 state |= !skb_queue_empty(&l->transmq); 790 state |= !skb_queue_empty(&l->deferdq); 791 probe = mstate->probing; 792 probe |= l->silent_intv_cnt; 793 if (probe || mstate->monitoring) 794 l->silent_intv_cnt++; 795 break; 796 case LINK_RESET: 797 setup = l->rst_cnt++ <= 4; 798 setup |= !(l->rst_cnt % 16); 799 mtyp = RESET_MSG; 800 break; 801 case LINK_ESTABLISHING: 802 setup = true; 803 mtyp = ACTIVATE_MSG; 804 break; 805 case LINK_PEER_RESET: 806 case LINK_RESETTING: 807 case LINK_FAILINGOVER: 808 break; 809 default: 810 break; 811 } 812 813 if (state || probe || setup) 814 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq); 815 816 return rc; 817 } 818 819 /** 820 * link_schedule_user - schedule a message sender for wakeup after congestion 821 * @l: congested link 822 * @hdr: header of message that is being sent 823 * Create pseudo msg to send back to user when congestion abates 824 */ 825 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) 826 { 827 u32 dnode = tipc_own_addr(l->net); 828 u32 dport = msg_origport(hdr); 829 struct sk_buff *skb; 830 831 /* Create and schedule wakeup pseudo message */ 832 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 833 dnode, l->addr, dport, 0, 0); 834 if (!skb) 835 return -ENOBUFS; 836 msg_set_dest_droppable(buf_msg(skb), true); 837 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); 838 skb_queue_tail(&l->wakeupq, skb); 839 l->stats.link_congs++; 840 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!"); 841 return -ELINKCONG; 842 } 843 844 /** 845 * link_prepare_wakeup - prepare users for wakeup after congestion 846 * @l: congested link 847 * Wake up a number of waiting users, as permitted by available space 848 * in the send queue 849 */ 850 static void link_prepare_wakeup(struct tipc_link *l) 851 { 852 struct sk_buff *skb, *tmp; 853 int imp, i = 0; 854 855 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 856 imp = TIPC_SKB_CB(skb)->chain_imp; 857 if (l->backlog[imp].len < l->backlog[imp].limit) { 858 skb_unlink(skb, &l->wakeupq); 859 skb_queue_tail(l->inputq, skb); 860 } else if (i++ > 10) { 861 break; 862 } 863 } 864 } 865 866 void tipc_link_reset(struct tipc_link *l) 867 { 868 struct sk_buff_head list; 869 870 __skb_queue_head_init(&list); 871 872 l->in_session = false; 873 /* Force re-synch of peer session number before establishing */ 874 l->peer_session--; 875 l->session++; 876 l->mtu = l->advertised_mtu; 877 878 spin_lock_bh(&l->wakeupq.lock); 879 skb_queue_splice_init(&l->wakeupq, &list); 880 spin_unlock_bh(&l->wakeupq.lock); 881 882 spin_lock_bh(&l->inputq->lock); 883 skb_queue_splice_init(&list, l->inputq); 884 spin_unlock_bh(&l->inputq->lock); 885 886 __skb_queue_purge(&l->transmq); 887 __skb_queue_purge(&l->deferdq); 888 __skb_queue_purge(&l->backlogq); 889 __skb_queue_purge(&l->failover_deferdq); 890 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 891 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 892 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 893 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 894 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 895 kfree_skb(l->reasm_buf); 896 kfree_skb(l->failover_reasm_skb); 897 l->reasm_buf = NULL; 898 l->failover_reasm_skb = NULL; 899 l->rcv_unacked = 0; 900 l->snd_nxt = 1; 901 l->rcv_nxt = 1; 902 l->snd_nxt_state = 1; 903 l->rcv_nxt_state = 1; 904 l->acked = 0; 905 l->silent_intv_cnt = 0; 906 l->rst_cnt = 0; 907 l->bc_peer_is_up = false; 908 memset(&l->mon_state, 0, sizeof(l->mon_state)); 909 tipc_link_reset_stats(l); 910 } 911 912 /** 913 * tipc_link_xmit(): enqueue buffer list according to queue situation 914 * @link: link to use 915 * @list: chain of buffers containing message 916 * @xmitq: returned list of packets to be sent by caller 917 * 918 * Consumes the buffer chain. 919 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 920 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 921 */ 922 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 923 struct sk_buff_head *xmitq) 924 { 925 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 926 unsigned int maxwin = l->window; 927 int imp = msg_importance(hdr); 928 unsigned int mtu = l->mtu; 929 u16 ack = l->rcv_nxt - 1; 930 u16 seqno = l->snd_nxt; 931 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 932 struct sk_buff_head *transmq = &l->transmq; 933 struct sk_buff_head *backlogq = &l->backlogq; 934 struct sk_buff *skb, *_skb, *bskb; 935 int pkt_cnt = skb_queue_len(list); 936 int rc = 0; 937 938 if (unlikely(msg_size(hdr) > mtu)) { 939 skb_queue_purge(list); 940 return -EMSGSIZE; 941 } 942 943 /* Allow oversubscription of one data msg per source at congestion */ 944 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { 945 if (imp == TIPC_SYSTEM_IMPORTANCE) { 946 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); 947 return -ENOBUFS; 948 } 949 rc = link_schedule_user(l, hdr); 950 } 951 952 if (pkt_cnt > 1) { 953 l->stats.sent_fragmented++; 954 l->stats.sent_fragments += pkt_cnt; 955 } 956 957 /* Prepare each packet for sending, and add to relevant queue: */ 958 while (skb_queue_len(list)) { 959 skb = skb_peek(list); 960 hdr = buf_msg(skb); 961 msg_set_seqno(hdr, seqno); 962 msg_set_ack(hdr, ack); 963 msg_set_bcast_ack(hdr, bc_ack); 964 965 if (likely(skb_queue_len(transmq) < maxwin)) { 966 _skb = skb_clone(skb, GFP_ATOMIC); 967 if (!_skb) { 968 skb_queue_purge(list); 969 return -ENOBUFS; 970 } 971 __skb_dequeue(list); 972 __skb_queue_tail(transmq, skb); 973 /* next retransmit attempt */ 974 if (link_is_bc_sndlink(l)) 975 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 976 __skb_queue_tail(xmitq, _skb); 977 TIPC_SKB_CB(skb)->ackers = l->ackers; 978 l->rcv_unacked = 0; 979 l->stats.sent_pkts++; 980 seqno++; 981 continue; 982 } 983 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 984 kfree_skb(__skb_dequeue(list)); 985 l->stats.sent_bundled++; 986 continue; 987 } 988 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 989 kfree_skb(__skb_dequeue(list)); 990 __skb_queue_tail(backlogq, bskb); 991 l->backlog[msg_importance(buf_msg(bskb))].len++; 992 l->stats.sent_bundled++; 993 l->stats.sent_bundles++; 994 continue; 995 } 996 l->backlog[imp].len += skb_queue_len(list); 997 skb_queue_splice_tail_init(list, backlogq); 998 } 999 l->snd_nxt = seqno; 1000 return rc; 1001 } 1002 1003 static void tipc_link_advance_backlog(struct tipc_link *l, 1004 struct sk_buff_head *xmitq) 1005 { 1006 struct sk_buff *skb, *_skb; 1007 struct tipc_msg *hdr; 1008 u16 seqno = l->snd_nxt; 1009 u16 ack = l->rcv_nxt - 1; 1010 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1011 1012 while (skb_queue_len(&l->transmq) < l->window) { 1013 skb = skb_peek(&l->backlogq); 1014 if (!skb) 1015 break; 1016 _skb = skb_clone(skb, GFP_ATOMIC); 1017 if (!_skb) 1018 break; 1019 __skb_dequeue(&l->backlogq); 1020 hdr = buf_msg(skb); 1021 l->backlog[msg_importance(hdr)].len--; 1022 __skb_queue_tail(&l->transmq, skb); 1023 /* next retransmit attempt */ 1024 if (link_is_bc_sndlink(l)) 1025 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1026 1027 __skb_queue_tail(xmitq, _skb); 1028 TIPC_SKB_CB(skb)->ackers = l->ackers; 1029 msg_set_seqno(hdr, seqno); 1030 msg_set_ack(hdr, ack); 1031 msg_set_bcast_ack(hdr, bc_ack); 1032 l->rcv_unacked = 0; 1033 l->stats.sent_pkts++; 1034 seqno++; 1035 } 1036 l->snd_nxt = seqno; 1037 } 1038 1039 /** 1040 * link_retransmit_failure() - Detect repeated retransmit failures 1041 * @l: tipc link sender 1042 * @r: tipc link receiver (= l in case of unicast) 1043 * @rc: returned code 1044 * 1045 * Return: true if the repeated retransmit failures happens, otherwise 1046 * false 1047 */ 1048 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, 1049 int *rc) 1050 { 1051 struct sk_buff *skb = skb_peek(&l->transmq); 1052 struct tipc_msg *hdr; 1053 1054 if (!skb) 1055 return false; 1056 1057 if (!TIPC_SKB_CB(skb)->retr_cnt) 1058 return false; 1059 1060 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + 1061 msecs_to_jiffies(r->tolerance))) 1062 return false; 1063 1064 hdr = buf_msg(skb); 1065 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) 1066 return false; 1067 1068 pr_warn("Retransmission failure on link <%s>\n", l->name); 1069 link_print(l, "State of link "); 1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 1071 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); 1072 pr_info("sqno %u, prev: %x, dest: %x\n", 1073 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr)); 1074 pr_info("retr_stamp %d, retr_cnt %d\n", 1075 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp), 1076 TIPC_SKB_CB(skb)->retr_cnt); 1077 1078 trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); 1079 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); 1080 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!"); 1081 1082 if (link_is_bc_sndlink(l)) { 1083 r->state = LINK_RESET; 1084 *rc = TIPC_LINK_DOWN_EVT; 1085 } else { 1086 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1087 } 1088 1089 return true; 1090 } 1091 1092 /* tipc_link_bc_retrans() - retransmit zero or more packets 1093 * @l: the link to transmit on 1094 * @r: the receiving link ordering the retransmit. Same as l if unicast 1095 * @from: retransmit from (inclusive) this sequence number 1096 * @to: retransmit to (inclusive) this sequence number 1097 * xmitq: queue for accumulating the retransmitted packets 1098 */ 1099 static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r, 1100 u16 from, u16 to, struct sk_buff_head *xmitq) 1101 { 1102 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 1103 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1104 u16 ack = l->rcv_nxt - 1; 1105 struct tipc_msg *hdr; 1106 int rc = 0; 1107 1108 if (!skb) 1109 return 0; 1110 if (less(to, from)) 1111 return 0; 1112 1113 trace_tipc_link_retrans(r, from, to, &l->transmq); 1114 1115 if (link_retransmit_failure(l, r, &rc)) 1116 return rc; 1117 1118 skb_queue_walk(&l->transmq, skb) { 1119 hdr = buf_msg(skb); 1120 if (less(msg_seqno(hdr), from)) 1121 continue; 1122 if (more(msg_seqno(hdr), to)) 1123 break; 1124 1125 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1126 continue; 1127 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1128 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); 1129 if (!_skb) 1130 return 0; 1131 hdr = buf_msg(_skb); 1132 msg_set_ack(hdr, ack); 1133 msg_set_bcast_ack(hdr, bc_ack); 1134 _skb->priority = TC_PRIO_CONTROL; 1135 __skb_queue_tail(xmitq, _skb); 1136 l->stats.retransmitted++; 1137 1138 /* Increase actual retrans counter & mark first time */ 1139 if (!TIPC_SKB_CB(skb)->retr_cnt++) 1140 TIPC_SKB_CB(skb)->retr_stamp = jiffies; 1141 } 1142 return 0; 1143 } 1144 1145 /* tipc_data_input - deliver data and name distr msgs to upper layer 1146 * 1147 * Consumes buffer if message is of right type 1148 * Node lock must be held 1149 */ 1150 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, 1151 struct sk_buff_head *inputq) 1152 { 1153 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; 1154 struct tipc_msg *hdr = buf_msg(skb); 1155 1156 switch (msg_user(hdr)) { 1157 case TIPC_LOW_IMPORTANCE: 1158 case TIPC_MEDIUM_IMPORTANCE: 1159 case TIPC_HIGH_IMPORTANCE: 1160 case TIPC_CRITICAL_IMPORTANCE: 1161 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) { 1162 skb_queue_tail(mc_inputq, skb); 1163 return true; 1164 } 1165 /* fall through */ 1166 case CONN_MANAGER: 1167 skb_queue_tail(inputq, skb); 1168 return true; 1169 case GROUP_PROTOCOL: 1170 skb_queue_tail(mc_inputq, skb); 1171 return true; 1172 case NAME_DISTRIBUTOR: 1173 l->bc_rcvlink->state = LINK_ESTABLISHED; 1174 skb_queue_tail(l->namedq, skb); 1175 return true; 1176 case MSG_BUNDLER: 1177 case TUNNEL_PROTOCOL: 1178 case MSG_FRAGMENTER: 1179 case BCAST_PROTOCOL: 1180 return false; 1181 default: 1182 pr_warn("Dropping received illegal msg type\n"); 1183 kfree_skb(skb); 1184 return true; 1185 }; 1186 } 1187 1188 /* tipc_link_input - process packet that has passed link protocol check 1189 * 1190 * Consumes buffer 1191 */ 1192 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 1193 struct sk_buff_head *inputq, 1194 struct sk_buff **reasm_skb) 1195 { 1196 struct tipc_msg *hdr = buf_msg(skb); 1197 struct sk_buff *iskb; 1198 struct sk_buff_head tmpq; 1199 int usr = msg_user(hdr); 1200 int pos = 0; 1201 1202 if (usr == MSG_BUNDLER) { 1203 skb_queue_head_init(&tmpq); 1204 l->stats.recv_bundles++; 1205 l->stats.recv_bundled += msg_msgcnt(hdr); 1206 while (tipc_msg_extract(skb, &iskb, &pos)) 1207 tipc_data_input(l, iskb, &tmpq); 1208 tipc_skb_queue_splice_tail(&tmpq, inputq); 1209 return 0; 1210 } else if (usr == MSG_FRAGMENTER) { 1211 l->stats.recv_fragments++; 1212 if (tipc_buf_append(reasm_skb, &skb)) { 1213 l->stats.recv_fragmented++; 1214 tipc_data_input(l, skb, inputq); 1215 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { 1216 pr_warn_ratelimited("Unable to build fragment list\n"); 1217 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1218 } 1219 return 0; 1220 } else if (usr == BCAST_PROTOCOL) { 1221 tipc_bcast_lock(l->net); 1222 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); 1223 tipc_bcast_unlock(l->net); 1224 } 1225 1226 kfree_skb(skb); 1227 return 0; 1228 } 1229 1230 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the 1231 * inner message along with the ones in the old link's 1232 * deferdq 1233 * @l: tunnel link 1234 * @skb: TUNNEL_PROTOCOL message 1235 * @inputq: queue to put messages ready for delivery 1236 */ 1237 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, 1238 struct sk_buff_head *inputq) 1239 { 1240 struct sk_buff **reasm_skb = &l->failover_reasm_skb; 1241 struct sk_buff_head *fdefq = &l->failover_deferdq; 1242 struct tipc_msg *hdr = buf_msg(skb); 1243 struct sk_buff *iskb; 1244 int ipos = 0; 1245 int rc = 0; 1246 u16 seqno; 1247 1248 /* SYNCH_MSG */ 1249 if (msg_type(hdr) == SYNCH_MSG) 1250 goto drop; 1251 1252 /* FAILOVER_MSG */ 1253 if (!tipc_msg_extract(skb, &iskb, &ipos)) { 1254 pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n", 1255 skb_queue_len(fdefq)); 1256 return rc; 1257 } 1258 1259 do { 1260 seqno = buf_seqno(iskb); 1261 1262 if (unlikely(less(seqno, l->drop_point))) { 1263 kfree_skb(iskb); 1264 continue; 1265 } 1266 1267 if (unlikely(seqno != l->drop_point)) { 1268 __tipc_skb_queue_sorted(fdefq, seqno, iskb); 1269 continue; 1270 } 1271 1272 l->drop_point++; 1273 1274 if (!tipc_data_input(l, iskb, inputq)) 1275 rc |= tipc_link_input(l, iskb, inputq, reasm_skb); 1276 if (unlikely(rc)) 1277 break; 1278 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point))); 1279 1280 drop: 1281 kfree_skb(skb); 1282 return rc; 1283 } 1284 1285 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1286 { 1287 bool released = false; 1288 struct sk_buff *skb, *tmp; 1289 1290 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1291 if (more(buf_seqno(skb), acked)) 1292 break; 1293 __skb_unlink(skb, &l->transmq); 1294 kfree_skb(skb); 1295 released = true; 1296 } 1297 return released; 1298 } 1299 1300 /* tipc_build_gap_ack_blks - build Gap ACK blocks 1301 * @l: tipc link that data have come with gaps in sequence if any 1302 * @data: data buffer to store the Gap ACK blocks after built 1303 * 1304 * returns the actual allocated memory size 1305 */ 1306 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data) 1307 { 1308 struct sk_buff *skb = skb_peek(&l->deferdq); 1309 struct tipc_gap_ack_blks *ga = data; 1310 u16 len, expect, seqno = 0; 1311 u8 n = 0; 1312 1313 if (!skb) 1314 goto exit; 1315 1316 expect = buf_seqno(skb); 1317 skb_queue_walk(&l->deferdq, skb) { 1318 seqno = buf_seqno(skb); 1319 if (unlikely(more(seqno, expect))) { 1320 ga->gacks[n].ack = htons(expect - 1); 1321 ga->gacks[n].gap = htons(seqno - expect); 1322 if (++n >= MAX_GAP_ACK_BLKS) { 1323 pr_info_ratelimited("Too few Gap ACK blocks!\n"); 1324 goto exit; 1325 } 1326 } else if (unlikely(less(seqno, expect))) { 1327 pr_warn("Unexpected skb in deferdq!\n"); 1328 continue; 1329 } 1330 expect = seqno + 1; 1331 } 1332 1333 /* last block */ 1334 ga->gacks[n].ack = htons(seqno); 1335 ga->gacks[n].gap = 0; 1336 n++; 1337 1338 exit: 1339 len = tipc_gap_ack_blks_sz(n); 1340 ga->len = htons(len); 1341 ga->gack_cnt = n; 1342 return len; 1343 } 1344 1345 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing 1346 * acked packets, also doing retransmissions if 1347 * gaps found 1348 * @l: tipc link with transmq queue to be advanced 1349 * @acked: seqno of last packet acked by peer without any gaps before 1350 * @gap: # of gap packets 1351 * @ga: buffer pointer to Gap ACK blocks from peer 1352 * @xmitq: queue for accumulating the retransmitted packets if any 1353 * 1354 * In case of a repeated retransmit failures, the call will return shortly 1355 * with a returned code (e.g. TIPC_LINK_DOWN_EVT) 1356 */ 1357 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, 1358 struct tipc_gap_ack_blks *ga, 1359 struct sk_buff_head *xmitq) 1360 { 1361 struct sk_buff *skb, *_skb, *tmp; 1362 struct tipc_msg *hdr; 1363 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1364 u16 ack = l->rcv_nxt - 1; 1365 bool passed = false; 1366 u16 seqno, n = 0; 1367 int rc = 0; 1368 1369 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1370 seqno = buf_seqno(skb); 1371 1372 next_gap_ack: 1373 if (less_eq(seqno, acked)) { 1374 /* release skb */ 1375 __skb_unlink(skb, &l->transmq); 1376 kfree_skb(skb); 1377 } else if (less_eq(seqno, acked + gap)) { 1378 /* First, check if repeated retrans failures occurs? */ 1379 if (!passed && link_retransmit_failure(l, l, &rc)) 1380 return rc; 1381 passed = true; 1382 1383 /* retransmit skb if unrestricted*/ 1384 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1385 continue; 1386 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 1387 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, 1388 GFP_ATOMIC); 1389 if (!_skb) 1390 continue; 1391 hdr = buf_msg(_skb); 1392 msg_set_ack(hdr, ack); 1393 msg_set_bcast_ack(hdr, bc_ack); 1394 _skb->priority = TC_PRIO_CONTROL; 1395 __skb_queue_tail(xmitq, _skb); 1396 l->stats.retransmitted++; 1397 1398 /* Increase actual retrans counter & mark first time */ 1399 if (!TIPC_SKB_CB(skb)->retr_cnt++) 1400 TIPC_SKB_CB(skb)->retr_stamp = jiffies; 1401 } else { 1402 /* retry with Gap ACK blocks if any */ 1403 if (!ga || n >= ga->gack_cnt) 1404 break; 1405 acked = ntohs(ga->gacks[n].ack); 1406 gap = ntohs(ga->gacks[n].gap); 1407 n++; 1408 goto next_gap_ack; 1409 } 1410 } 1411 1412 return 0; 1413 } 1414 1415 /* tipc_link_build_state_msg: prepare link state message for transmission 1416 * 1417 * Note that sending of broadcast ack is coordinated among nodes, to reduce 1418 * risk of ack storms towards the sender 1419 */ 1420 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1421 { 1422 if (!l) 1423 return 0; 1424 1425 /* Broadcast ACK must be sent via a unicast link => defer to caller */ 1426 if (link_is_bc_rcvlink(l)) { 1427 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) 1428 return 0; 1429 l->rcv_unacked = 0; 1430 1431 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */ 1432 l->snd_nxt = l->rcv_nxt; 1433 return TIPC_LINK_SND_STATE; 1434 } 1435 1436 /* Unicast ACK */ 1437 l->rcv_unacked = 0; 1438 l->stats.sent_acks++; 1439 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); 1440 return 0; 1441 } 1442 1443 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message 1444 */ 1445 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) 1446 { 1447 int mtyp = RESET_MSG; 1448 struct sk_buff *skb; 1449 1450 if (l->state == LINK_ESTABLISHING) 1451 mtyp = ACTIVATE_MSG; 1452 1453 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq); 1454 1455 /* Inform peer that this endpoint is going down if applicable */ 1456 skb = skb_peek_tail(xmitq); 1457 if (skb && (l->state == LINK_RESET)) 1458 msg_set_peer_stopping(buf_msg(skb), 1); 1459 } 1460 1461 /* tipc_link_build_nack_msg: prepare link nack message for transmission 1462 * Note that sending of broadcast NACK is coordinated among nodes, to 1463 * reduce the risk of NACK storms towards the sender 1464 */ 1465 static int tipc_link_build_nack_msg(struct tipc_link *l, 1466 struct sk_buff_head *xmitq) 1467 { 1468 u32 def_cnt = ++l->stats.deferred_recv; 1469 u32 defq_len = skb_queue_len(&l->deferdq); 1470 int match1, match2; 1471 1472 if (link_is_bc_rcvlink(l)) { 1473 match1 = def_cnt & 0xf; 1474 match2 = tipc_own_addr(l->net) & 0xf; 1475 if (match1 == match2) 1476 return TIPC_LINK_SND_STATE; 1477 return 0; 1478 } 1479 1480 if (defq_len >= 3 && !((defq_len - 3) % 16)) 1481 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); 1482 return 0; 1483 } 1484 1485 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1486 * @l: the link that should handle the message 1487 * @skb: TIPC packet 1488 * @xmitq: queue to place packets to be sent after this call 1489 */ 1490 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1491 struct sk_buff_head *xmitq) 1492 { 1493 struct sk_buff_head *defq = &l->deferdq; 1494 struct tipc_msg *hdr = buf_msg(skb); 1495 u16 seqno, rcv_nxt, win_lim; 1496 int rc = 0; 1497 1498 /* Verify and update link state */ 1499 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) 1500 return tipc_link_proto_rcv(l, skb, xmitq); 1501 1502 /* Don't send probe at next timeout expiration */ 1503 l->silent_intv_cnt = 0; 1504 1505 do { 1506 hdr = buf_msg(skb); 1507 seqno = msg_seqno(hdr); 1508 rcv_nxt = l->rcv_nxt; 1509 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; 1510 1511 if (unlikely(!link_is_up(l))) { 1512 if (l->state == LINK_ESTABLISHING) 1513 rc = TIPC_LINK_UP_EVT; 1514 goto drop; 1515 } 1516 1517 /* Drop if outside receive window */ 1518 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { 1519 l->stats.duplicates++; 1520 goto drop; 1521 } 1522 1523 /* Forward queues and wake up waiting users */ 1524 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1525 tipc_link_advance_backlog(l, xmitq); 1526 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1527 link_prepare_wakeup(l); 1528 } 1529 1530 /* Defer delivery if sequence gap */ 1531 if (unlikely(seqno != rcv_nxt)) { 1532 __tipc_skb_queue_sorted(defq, seqno, skb); 1533 rc |= tipc_link_build_nack_msg(l, xmitq); 1534 break; 1535 } 1536 1537 /* Deliver packet */ 1538 l->rcv_nxt++; 1539 l->stats.recv_pkts++; 1540 1541 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL)) 1542 rc |= tipc_link_tnl_rcv(l, skb, l->inputq); 1543 else if (!tipc_data_input(l, skb, l->inputq)) 1544 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf); 1545 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1546 rc |= tipc_link_build_state_msg(l, xmitq); 1547 if (unlikely(rc & ~TIPC_LINK_SND_STATE)) 1548 break; 1549 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt))); 1550 1551 return rc; 1552 drop: 1553 kfree_skb(skb); 1554 return rc; 1555 } 1556 1557 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1558 bool probe_reply, u16 rcvgap, 1559 int tolerance, int priority, 1560 struct sk_buff_head *xmitq) 1561 { 1562 struct tipc_link *bcl = l->bc_rcvlink; 1563 struct sk_buff *skb; 1564 struct tipc_msg *hdr; 1565 struct sk_buff_head *dfq = &l->deferdq; 1566 bool node_up = link_is_up(bcl); 1567 struct tipc_mon_state *mstate = &l->mon_state; 1568 int dlen = 0; 1569 void *data; 1570 u16 glen = 0; 1571 1572 /* Don't send protocol message during reset or link failover */ 1573 if (tipc_link_is_blocked(l)) 1574 return; 1575 1576 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) 1577 return; 1578 1579 if (!skb_queue_empty(dfq)) 1580 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; 1581 1582 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, 1583 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ, 1584 l->addr, tipc_own_addr(l->net), 0, 0, 0); 1585 if (!skb) 1586 return; 1587 1588 hdr = buf_msg(skb); 1589 data = msg_data(hdr); 1590 msg_set_session(hdr, l->session); 1591 msg_set_bearer_id(hdr, l->bearer_id); 1592 msg_set_net_plane(hdr, l->net_plane); 1593 msg_set_next_sent(hdr, l->snd_nxt); 1594 msg_set_ack(hdr, l->rcv_nxt - 1); 1595 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); 1596 msg_set_bc_ack_invalid(hdr, !node_up); 1597 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1598 msg_set_link_tolerance(hdr, tolerance); 1599 msg_set_linkprio(hdr, priority); 1600 msg_set_redundant_link(hdr, node_up); 1601 msg_set_seq_gap(hdr, 0); 1602 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); 1603 1604 if (mtyp == STATE_MSG) { 1605 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) 1606 msg_set_seqno(hdr, l->snd_nxt_state++); 1607 msg_set_seq_gap(hdr, rcvgap); 1608 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); 1609 msg_set_probe(hdr, probe); 1610 msg_set_is_keepalive(hdr, probe || probe_reply); 1611 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) 1612 glen = tipc_build_gap_ack_blks(l, data); 1613 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id); 1614 msg_set_size(hdr, INT_H_SIZE + glen + dlen); 1615 skb_trim(skb, INT_H_SIZE + glen + dlen); 1616 l->stats.sent_states++; 1617 l->rcv_unacked = 0; 1618 } else { 1619 /* RESET_MSG or ACTIVATE_MSG */ 1620 if (mtyp == ACTIVATE_MSG) { 1621 msg_set_dest_session_valid(hdr, 1); 1622 msg_set_dest_session(hdr, l->peer_session); 1623 } 1624 msg_set_max_pkt(hdr, l->advertised_mtu); 1625 strcpy(data, l->if_name); 1626 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1627 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); 1628 } 1629 if (probe) 1630 l->stats.sent_probes++; 1631 if (rcvgap) 1632 l->stats.sent_nacks++; 1633 skb->priority = TC_PRIO_CONTROL; 1634 __skb_queue_tail(xmitq, skb); 1635 trace_tipc_proto_build(skb, false, l->name); 1636 } 1637 1638 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, 1639 struct sk_buff_head *xmitq) 1640 { 1641 u32 onode = tipc_own_addr(l->net); 1642 struct tipc_msg *hdr, *ihdr; 1643 struct sk_buff_head tnlq; 1644 struct sk_buff *skb; 1645 u32 dnode = l->addr; 1646 1647 skb_queue_head_init(&tnlq); 1648 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG, 1649 INT_H_SIZE, BASIC_H_SIZE, 1650 dnode, onode, 0, 0, 0); 1651 if (!skb) { 1652 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1653 return; 1654 } 1655 1656 hdr = buf_msg(skb); 1657 msg_set_msgcnt(hdr, 1); 1658 msg_set_bearer_id(hdr, l->peer_bearer_id); 1659 1660 ihdr = (struct tipc_msg *)msg_data(hdr); 1661 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1662 BASIC_H_SIZE, dnode); 1663 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT); 1664 __skb_queue_tail(&tnlq, skb); 1665 tipc_link_xmit(l, &tnlq, xmitq); 1666 } 1667 1668 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1669 * with contents of the link's transmit and backlog queues. 1670 */ 1671 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1672 int mtyp, struct sk_buff_head *xmitq) 1673 { 1674 struct sk_buff_head *fdefq = &tnl->failover_deferdq; 1675 struct sk_buff *skb, *tnlskb; 1676 struct tipc_msg *hdr, tnlhdr; 1677 struct sk_buff_head *queue = &l->transmq; 1678 struct sk_buff_head tmpxq, tnlq; 1679 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1680 1681 if (!tnl) 1682 return; 1683 1684 skb_queue_head_init(&tnlq); 1685 skb_queue_head_init(&tmpxq); 1686 1687 /* At least one packet required for safe algorithm => add dummy */ 1688 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1689 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 1690 0, 0, TIPC_ERR_NO_PORT); 1691 if (!skb) { 1692 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1693 return; 1694 } 1695 skb_queue_tail(&tnlq, skb); 1696 tipc_link_xmit(l, &tnlq, &tmpxq); 1697 __skb_queue_purge(&tmpxq); 1698 1699 /* Initialize reusable tunnel packet header */ 1700 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, 1701 mtyp, INT_H_SIZE, l->addr); 1702 if (mtyp == SYNCH_MSG) 1703 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq)); 1704 else 1705 pktcnt = skb_queue_len(&l->transmq); 1706 pktcnt += skb_queue_len(&l->backlogq); 1707 msg_set_msgcnt(&tnlhdr, pktcnt); 1708 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1709 tnl: 1710 /* Wrap each packet into a tunnel packet */ 1711 skb_queue_walk(queue, skb) { 1712 hdr = buf_msg(skb); 1713 if (queue == &l->backlogq) 1714 msg_set_seqno(hdr, seqno++); 1715 pktlen = msg_size(hdr); 1716 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1717 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); 1718 if (!tnlskb) { 1719 pr_warn("%sunable to send packet\n", link_co_err); 1720 return; 1721 } 1722 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1723 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1724 __skb_queue_tail(&tnlq, tnlskb); 1725 } 1726 if (queue != &l->backlogq) { 1727 queue = &l->backlogq; 1728 goto tnl; 1729 } 1730 1731 tipc_link_xmit(tnl, &tnlq, xmitq); 1732 1733 if (mtyp == FAILOVER_MSG) { 1734 tnl->drop_point = l->rcv_nxt; 1735 tnl->failover_reasm_skb = l->reasm_buf; 1736 l->reasm_buf = NULL; 1737 1738 /* Failover the link's deferdq */ 1739 if (unlikely(!skb_queue_empty(fdefq))) { 1740 pr_warn("Link failover deferdq not empty: %d!\n", 1741 skb_queue_len(fdefq)); 1742 __skb_queue_purge(fdefq); 1743 } 1744 skb_queue_splice_init(&l->deferdq, fdefq); 1745 } 1746 } 1747 1748 /** 1749 * tipc_link_failover_prepare() - prepare tnl for link failover 1750 * 1751 * This is a special version of the precursor - tipc_link_tnl_prepare(), 1752 * see the tipc_node_link_failover() for details 1753 * 1754 * @l: failover link 1755 * @tnl: tunnel link 1756 * @xmitq: queue for messages to be xmited 1757 */ 1758 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl, 1759 struct sk_buff_head *xmitq) 1760 { 1761 struct sk_buff_head *fdefq = &tnl->failover_deferdq; 1762 1763 tipc_link_create_dummy_tnl_msg(tnl, xmitq); 1764 1765 /* This failover link enpoint was never established before, 1766 * so it has not received anything from peer. 1767 * Otherwise, it must be a normal failover situation or the 1768 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes 1769 * would have to start over from scratch instead. 1770 */ 1771 tnl->drop_point = 1; 1772 tnl->failover_reasm_skb = NULL; 1773 1774 /* Initiate the link's failover deferdq */ 1775 if (unlikely(!skb_queue_empty(fdefq))) { 1776 pr_warn("Link failover deferdq not empty: %d!\n", 1777 skb_queue_len(fdefq)); 1778 __skb_queue_purge(fdefq); 1779 } 1780 } 1781 1782 /* tipc_link_validate_msg(): validate message against current link state 1783 * Returns true if message should be accepted, otherwise false 1784 */ 1785 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) 1786 { 1787 u16 curr_session = l->peer_session; 1788 u16 session = msg_session(hdr); 1789 int mtyp = msg_type(hdr); 1790 1791 if (msg_user(hdr) != LINK_PROTOCOL) 1792 return true; 1793 1794 switch (mtyp) { 1795 case RESET_MSG: 1796 if (!l->in_session) 1797 return true; 1798 /* Accept only RESET with new session number */ 1799 return more(session, curr_session); 1800 case ACTIVATE_MSG: 1801 if (!l->in_session) 1802 return true; 1803 /* Accept only ACTIVATE with new or current session number */ 1804 return !less(session, curr_session); 1805 case STATE_MSG: 1806 /* Accept only STATE with current session number */ 1807 if (!l->in_session) 1808 return false; 1809 if (session != curr_session) 1810 return false; 1811 /* Extra sanity check */ 1812 if (!link_is_up(l) && msg_ack(hdr)) 1813 return false; 1814 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) 1815 return true; 1816 /* Accept only STATE with new sequence number */ 1817 return !less(msg_seqno(hdr), l->rcv_nxt_state); 1818 default: 1819 return false; 1820 } 1821 } 1822 1823 /* tipc_link_proto_rcv(): receive link level protocol message : 1824 * Note that network plane id propagates through the network, and may 1825 * change at any time. The node with lowest numerical id determines 1826 * network plane 1827 */ 1828 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1829 struct sk_buff_head *xmitq) 1830 { 1831 struct tipc_msg *hdr = buf_msg(skb); 1832 struct tipc_gap_ack_blks *ga = NULL; 1833 u16 rcvgap = 0; 1834 u16 ack = msg_ack(hdr); 1835 u16 gap = msg_seq_gap(hdr); 1836 u16 peers_snd_nxt = msg_next_sent(hdr); 1837 u16 peers_tol = msg_link_tolerance(hdr); 1838 u16 peers_prio = msg_linkprio(hdr); 1839 u16 rcv_nxt = l->rcv_nxt; 1840 u16 dlen = msg_data_sz(hdr); 1841 int mtyp = msg_type(hdr); 1842 bool reply = msg_probe(hdr); 1843 u16 glen = 0; 1844 void *data; 1845 char *if_name; 1846 int rc = 0; 1847 1848 trace_tipc_proto_rcv(skb, false, l->name); 1849 if (tipc_link_is_blocked(l) || !xmitq) 1850 goto exit; 1851 1852 if (tipc_own_addr(l->net) > msg_prevnode(hdr)) 1853 l->net_plane = msg_net_plane(hdr); 1854 1855 skb_linearize(skb); 1856 hdr = buf_msg(skb); 1857 data = msg_data(hdr); 1858 1859 if (!tipc_link_validate_msg(l, hdr)) { 1860 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!"); 1861 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!"); 1862 goto exit; 1863 } 1864 1865 switch (mtyp) { 1866 case RESET_MSG: 1867 case ACTIVATE_MSG: 1868 /* Complete own link name with peer's interface name */ 1869 if_name = strrchr(l->name, ':') + 1; 1870 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1871 break; 1872 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1873 break; 1874 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1875 1876 /* Update own tolerance if peer indicates a non-zero value */ 1877 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1878 l->tolerance = peers_tol; 1879 l->bc_rcvlink->tolerance = peers_tol; 1880 } 1881 /* Update own priority if peer's priority is higher */ 1882 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1883 l->priority = peers_prio; 1884 1885 /* If peer is going down we want full re-establish cycle */ 1886 if (msg_peer_stopping(hdr)) { 1887 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1888 break; 1889 } 1890 1891 /* If this endpoint was re-created while peer was ESTABLISHING 1892 * it doesn't know current session number. Force re-synch. 1893 */ 1894 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && 1895 l->session != msg_dest_session(hdr)) { 1896 if (less(l->session, msg_dest_session(hdr))) 1897 l->session = msg_dest_session(hdr) + 1; 1898 break; 1899 } 1900 1901 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1902 if (mtyp == RESET_MSG || !link_is_up(l)) 1903 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1904 1905 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1906 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING) 1907 rc = TIPC_LINK_UP_EVT; 1908 1909 l->peer_session = msg_session(hdr); 1910 l->in_session = true; 1911 l->peer_bearer_id = msg_bearer_id(hdr); 1912 if (l->mtu > msg_max_pkt(hdr)) 1913 l->mtu = msg_max_pkt(hdr); 1914 break; 1915 1916 case STATE_MSG: 1917 l->rcv_nxt_state = msg_seqno(hdr) + 1; 1918 1919 /* Update own tolerance if peer indicates a non-zero value */ 1920 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1921 l->tolerance = peers_tol; 1922 l->bc_rcvlink->tolerance = peers_tol; 1923 } 1924 /* Update own prio if peer indicates a different value */ 1925 if ((peers_prio != l->priority) && 1926 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1927 l->priority = peers_prio; 1928 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1929 } 1930 1931 l->silent_intv_cnt = 0; 1932 l->stats.recv_states++; 1933 if (msg_probe(hdr)) 1934 l->stats.recv_probes++; 1935 1936 if (!link_is_up(l)) { 1937 if (l->state == LINK_ESTABLISHING) 1938 rc = TIPC_LINK_UP_EVT; 1939 break; 1940 } 1941 1942 /* Receive Gap ACK blocks from peer if any */ 1943 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) { 1944 ga = (struct tipc_gap_ack_blks *)data; 1945 glen = ntohs(ga->len); 1946 /* sanity check: if failed, ignore Gap ACK blocks */ 1947 if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt)) 1948 ga = NULL; 1949 } 1950 1951 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, 1952 &l->mon_state, l->bearer_id); 1953 1954 /* Send NACK if peer has sent pkts we haven't received yet */ 1955 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1956 rcvgap = peers_snd_nxt - l->rcv_nxt; 1957 if (rcvgap || reply) 1958 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply, 1959 rcvgap, 0, 0, xmitq); 1960 1961 rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq); 1962 1963 /* If NACK, retransmit will now start at right position */ 1964 if (gap) 1965 l->stats.recv_nacks++; 1966 1967 tipc_link_advance_backlog(l, xmitq); 1968 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1969 link_prepare_wakeup(l); 1970 } 1971 exit: 1972 kfree_skb(skb); 1973 return rc; 1974 } 1975 1976 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message 1977 */ 1978 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, 1979 u16 peers_snd_nxt, 1980 struct sk_buff_head *xmitq) 1981 { 1982 struct sk_buff *skb; 1983 struct tipc_msg *hdr; 1984 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); 1985 u16 ack = l->rcv_nxt - 1; 1986 u16 gap_to = peers_snd_nxt - 1; 1987 1988 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 1989 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); 1990 if (!skb) 1991 return false; 1992 hdr = buf_msg(skb); 1993 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1994 msg_set_bcast_ack(hdr, ack); 1995 msg_set_bcgap_after(hdr, ack); 1996 if (dfrd_skb) 1997 gap_to = buf_seqno(dfrd_skb) - 1; 1998 msg_set_bcgap_to(hdr, gap_to); 1999 msg_set_non_seq(hdr, bcast); 2000 __skb_queue_tail(xmitq, skb); 2001 return true; 2002 } 2003 2004 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. 2005 * 2006 * Give a newly added peer node the sequence number where it should 2007 * start receiving and acking broadcast packets. 2008 */ 2009 static void tipc_link_build_bc_init_msg(struct tipc_link *l, 2010 struct sk_buff_head *xmitq) 2011 { 2012 struct sk_buff_head list; 2013 2014 __skb_queue_head_init(&list); 2015 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 2016 return; 2017 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true); 2018 tipc_link_xmit(l, &list, xmitq); 2019 } 2020 2021 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer 2022 */ 2023 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) 2024 { 2025 int mtyp = msg_type(hdr); 2026 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 2027 2028 if (link_is_up(l)) 2029 return; 2030 2031 if (msg_user(hdr) == BCAST_PROTOCOL) { 2032 l->rcv_nxt = peers_snd_nxt; 2033 l->state = LINK_ESTABLISHED; 2034 return; 2035 } 2036 2037 if (l->peer_caps & TIPC_BCAST_SYNCH) 2038 return; 2039 2040 if (msg_peer_node_is_up(hdr)) 2041 return; 2042 2043 /* Compatibility: accept older, less safe initial synch data */ 2044 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) 2045 l->rcv_nxt = peers_snd_nxt; 2046 } 2047 2048 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 2049 */ 2050 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 2051 struct sk_buff_head *xmitq) 2052 { 2053 struct tipc_link *snd_l = l->bc_sndlink; 2054 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); 2055 u16 from = msg_bcast_ack(hdr) + 1; 2056 u16 to = from + msg_bc_gap(hdr) - 1; 2057 int rc = 0; 2058 2059 if (!link_is_up(l)) 2060 return rc; 2061 2062 if (!msg_peer_node_is_up(hdr)) 2063 return rc; 2064 2065 /* Open when peer ackowledges our bcast init msg (pkt #1) */ 2066 if (msg_ack(hdr)) 2067 l->bc_peer_is_up = true; 2068 2069 if (!l->bc_peer_is_up) 2070 return rc; 2071 2072 l->stats.recv_nacks++; 2073 2074 /* Ignore if peers_snd_nxt goes beyond receive window */ 2075 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 2076 return rc; 2077 2078 rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq); 2079 2080 l->snd_nxt = peers_snd_nxt; 2081 if (link_bc_rcv_gap(l)) 2082 rc |= TIPC_LINK_SND_STATE; 2083 2084 /* Return now if sender supports nack via STATE messages */ 2085 if (l->peer_caps & TIPC_BCAST_STATE_NACK) 2086 return rc; 2087 2088 /* Otherwise, be backwards compatible */ 2089 2090 if (!more(peers_snd_nxt, l->rcv_nxt)) { 2091 l->nack_state = BC_NACK_SND_CONDITIONAL; 2092 return 0; 2093 } 2094 2095 /* Don't NACK if one was recently sent or peeked */ 2096 if (l->nack_state == BC_NACK_SND_SUPPRESS) { 2097 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 2098 return 0; 2099 } 2100 2101 /* Conditionally delay NACK sending until next synch rcv */ 2102 if (l->nack_state == BC_NACK_SND_CONDITIONAL) { 2103 l->nack_state = BC_NACK_SND_UNCONDITIONAL; 2104 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) 2105 return 0; 2106 } 2107 2108 /* Send NACK now but suppress next one */ 2109 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); 2110 l->nack_state = BC_NACK_SND_SUPPRESS; 2111 return 0; 2112 } 2113 2114 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, 2115 struct sk_buff_head *xmitq) 2116 { 2117 struct sk_buff *skb, *tmp; 2118 struct tipc_link *snd_l = l->bc_sndlink; 2119 2120 if (!link_is_up(l) || !l->bc_peer_is_up) 2121 return; 2122 2123 if (!more(acked, l->acked)) 2124 return; 2125 2126 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq); 2127 /* Skip over packets peer has already acked */ 2128 skb_queue_walk(&snd_l->transmq, skb) { 2129 if (more(buf_seqno(skb), l->acked)) 2130 break; 2131 } 2132 2133 /* Update/release the packets peer is acking now */ 2134 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { 2135 if (more(buf_seqno(skb), acked)) 2136 break; 2137 if (!--TIPC_SKB_CB(skb)->ackers) { 2138 __skb_unlink(skb, &snd_l->transmq); 2139 kfree_skb(skb); 2140 } 2141 } 2142 l->acked = acked; 2143 tipc_link_advance_backlog(snd_l, xmitq); 2144 if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) 2145 link_prepare_wakeup(snd_l); 2146 } 2147 2148 /* tipc_link_bc_nack_rcv(): receive broadcast nack message 2149 * This function is here for backwards compatibility, since 2150 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5. 2151 */ 2152 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 2153 struct sk_buff_head *xmitq) 2154 { 2155 struct tipc_msg *hdr = buf_msg(skb); 2156 u32 dnode = msg_destnode(hdr); 2157 int mtyp = msg_type(hdr); 2158 u16 acked = msg_bcast_ack(hdr); 2159 u16 from = acked + 1; 2160 u16 to = msg_bcgap_to(hdr); 2161 u16 peers_snd_nxt = to + 1; 2162 int rc = 0; 2163 2164 kfree_skb(skb); 2165 2166 if (!tipc_link_is_up(l) || !l->bc_peer_is_up) 2167 return 0; 2168 2169 if (mtyp != STATE_MSG) 2170 return 0; 2171 2172 if (dnode == tipc_own_addr(l->net)) { 2173 tipc_link_bc_ack_rcv(l, acked, xmitq); 2174 rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq); 2175 l->stats.recv_nacks++; 2176 return rc; 2177 } 2178 2179 /* Msg for other node => suppress own NACK at next sync if applicable */ 2180 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) 2181 l->nack_state = BC_NACK_SND_SUPPRESS; 2182 2183 return 0; 2184 } 2185 2186 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 2187 { 2188 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE); 2189 2190 l->window = win; 2191 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); 2192 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); 2193 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); 2194 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); 2195 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 2196 } 2197 2198 /** 2199 * link_reset_stats - reset link statistics 2200 * @l: pointer to link 2201 */ 2202 void tipc_link_reset_stats(struct tipc_link *l) 2203 { 2204 memset(&l->stats, 0, sizeof(l->stats)); 2205 } 2206 2207 static void link_print(struct tipc_link *l, const char *str) 2208 { 2209 struct sk_buff *hskb = skb_peek(&l->transmq); 2210 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; 2211 u16 tail = l->snd_nxt - 1; 2212 2213 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 2214 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 2215 skb_queue_len(&l->transmq), head, tail, 2216 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 2217 } 2218 2219 /* Parse and validate nested (link) properties valid for media, bearer and link 2220 */ 2221 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 2222 { 2223 int err; 2224 2225 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop, 2226 tipc_nl_prop_policy, NULL); 2227 if (err) 2228 return err; 2229 2230 if (props[TIPC_NLA_PROP_PRIO]) { 2231 u32 prio; 2232 2233 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2234 if (prio > TIPC_MAX_LINK_PRI) 2235 return -EINVAL; 2236 } 2237 2238 if (props[TIPC_NLA_PROP_TOL]) { 2239 u32 tol; 2240 2241 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2242 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 2243 return -EINVAL; 2244 } 2245 2246 if (props[TIPC_NLA_PROP_WIN]) { 2247 u32 win; 2248 2249 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2250 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 2251 return -EINVAL; 2252 } 2253 2254 return 0; 2255 } 2256 2257 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 2258 { 2259 int i; 2260 struct nlattr *stats; 2261 2262 struct nla_map { 2263 u32 key; 2264 u32 val; 2265 }; 2266 2267 struct nla_map map[] = { 2268 {TIPC_NLA_STATS_RX_INFO, 0}, 2269 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 2270 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 2271 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 2272 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 2273 {TIPC_NLA_STATS_TX_INFO, 0}, 2274 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 2275 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 2276 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 2277 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 2278 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 2279 s->msg_length_counts : 1}, 2280 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 2281 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 2282 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 2283 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 2284 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 2285 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 2286 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 2287 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 2288 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 2289 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 2290 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 2291 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 2292 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 2293 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 2294 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 2295 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 2296 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 2297 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 2298 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 2299 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 2300 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 2301 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 2302 (s->accu_queue_sz / s->queue_sz_counts) : 0} 2303 }; 2304 2305 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); 2306 if (!stats) 2307 return -EMSGSIZE; 2308 2309 for (i = 0; i < ARRAY_SIZE(map); i++) 2310 if (nla_put_u32(skb, map[i].key, map[i].val)) 2311 goto msg_full; 2312 2313 nla_nest_end(skb, stats); 2314 2315 return 0; 2316 msg_full: 2317 nla_nest_cancel(skb, stats); 2318 2319 return -EMSGSIZE; 2320 } 2321 2322 /* Caller should hold appropriate locks to protect the link */ 2323 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 2324 struct tipc_link *link, int nlflags) 2325 { 2326 u32 self = tipc_own_addr(net); 2327 struct nlattr *attrs; 2328 struct nlattr *prop; 2329 void *hdr; 2330 int err; 2331 2332 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2333 nlflags, TIPC_NL_LINK_GET); 2334 if (!hdr) 2335 return -EMSGSIZE; 2336 2337 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); 2338 if (!attrs) 2339 goto msg_full; 2340 2341 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 2342 goto attr_msg_full; 2343 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self))) 2344 goto attr_msg_full; 2345 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 2346 goto attr_msg_full; 2347 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) 2348 goto attr_msg_full; 2349 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) 2350 goto attr_msg_full; 2351 2352 if (tipc_link_is_up(link)) 2353 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 2354 goto attr_msg_full; 2355 if (link->active) 2356 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 2357 goto attr_msg_full; 2358 2359 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); 2360 if (!prop) 2361 goto attr_msg_full; 2362 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 2363 goto prop_msg_full; 2364 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 2365 goto prop_msg_full; 2366 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 2367 link->window)) 2368 goto prop_msg_full; 2369 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 2370 goto prop_msg_full; 2371 nla_nest_end(msg->skb, prop); 2372 2373 err = __tipc_nl_add_stats(msg->skb, &link->stats); 2374 if (err) 2375 goto attr_msg_full; 2376 2377 nla_nest_end(msg->skb, attrs); 2378 genlmsg_end(msg->skb, hdr); 2379 2380 return 0; 2381 2382 prop_msg_full: 2383 nla_nest_cancel(msg->skb, prop); 2384 attr_msg_full: 2385 nla_nest_cancel(msg->skb, attrs); 2386 msg_full: 2387 genlmsg_cancel(msg->skb, hdr); 2388 2389 return -EMSGSIZE; 2390 } 2391 2392 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 2393 struct tipc_stats *stats) 2394 { 2395 int i; 2396 struct nlattr *nest; 2397 2398 struct nla_map { 2399 __u32 key; 2400 __u32 val; 2401 }; 2402 2403 struct nla_map map[] = { 2404 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, 2405 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2406 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2407 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2408 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2409 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, 2410 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2411 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2412 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 2413 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 2414 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 2415 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 2416 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 2417 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 2418 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 2419 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 2420 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 2421 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 2422 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 2423 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 2424 }; 2425 2426 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); 2427 if (!nest) 2428 return -EMSGSIZE; 2429 2430 for (i = 0; i < ARRAY_SIZE(map); i++) 2431 if (nla_put_u32(skb, map[i].key, map[i].val)) 2432 goto msg_full; 2433 2434 nla_nest_end(skb, nest); 2435 2436 return 0; 2437 msg_full: 2438 nla_nest_cancel(skb, nest); 2439 2440 return -EMSGSIZE; 2441 } 2442 2443 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 2444 { 2445 int err; 2446 void *hdr; 2447 struct nlattr *attrs; 2448 struct nlattr *prop; 2449 struct tipc_net *tn = net_generic(net, tipc_net_id); 2450 u32 bc_mode = tipc_bcast_get_broadcast_mode(net); 2451 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); 2452 struct tipc_link *bcl = tn->bcl; 2453 2454 if (!bcl) 2455 return 0; 2456 2457 tipc_bcast_lock(net); 2458 2459 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2460 NLM_F_MULTI, TIPC_NL_LINK_GET); 2461 if (!hdr) { 2462 tipc_bcast_unlock(net); 2463 return -EMSGSIZE; 2464 } 2465 2466 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); 2467 if (!attrs) 2468 goto msg_full; 2469 2470 /* The broadcast link is always up */ 2471 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 2472 goto attr_msg_full; 2473 2474 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 2475 goto attr_msg_full; 2476 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2477 goto attr_msg_full; 2478 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) 2479 goto attr_msg_full; 2480 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) 2481 goto attr_msg_full; 2482 2483 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); 2484 if (!prop) 2485 goto attr_msg_full; 2486 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 2487 goto prop_msg_full; 2488 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode)) 2489 goto prop_msg_full; 2490 if (bc_mode & BCLINK_MODE_SEL) 2491 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO, 2492 bc_ratio)) 2493 goto prop_msg_full; 2494 nla_nest_end(msg->skb, prop); 2495 2496 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 2497 if (err) 2498 goto attr_msg_full; 2499 2500 tipc_bcast_unlock(net); 2501 nla_nest_end(msg->skb, attrs); 2502 genlmsg_end(msg->skb, hdr); 2503 2504 return 0; 2505 2506 prop_msg_full: 2507 nla_nest_cancel(msg->skb, prop); 2508 attr_msg_full: 2509 nla_nest_cancel(msg->skb, attrs); 2510 msg_full: 2511 tipc_bcast_unlock(net); 2512 genlmsg_cancel(msg->skb, hdr); 2513 2514 return -EMSGSIZE; 2515 } 2516 2517 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, 2518 struct sk_buff_head *xmitq) 2519 { 2520 l->tolerance = tol; 2521 if (l->bc_rcvlink) 2522 l->bc_rcvlink->tolerance = tol; 2523 if (link_is_up(l)) 2524 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); 2525 } 2526 2527 void tipc_link_set_prio(struct tipc_link *l, u32 prio, 2528 struct sk_buff_head *xmitq) 2529 { 2530 l->priority = prio; 2531 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq); 2532 } 2533 2534 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) 2535 { 2536 l->abort_limit = limit; 2537 } 2538 2539 char *tipc_link_name_ext(struct tipc_link *l, char *buf) 2540 { 2541 if (!l) 2542 scnprintf(buf, TIPC_MAX_LINK_NAME, "null"); 2543 else if (link_is_bc_sndlink(l)) 2544 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender"); 2545 else if (link_is_bc_rcvlink(l)) 2546 scnprintf(buf, TIPC_MAX_LINK_NAME, 2547 "broadcast-receiver, peer %x", l->addr); 2548 else 2549 memcpy(buf, l->name, TIPC_MAX_LINK_NAME); 2550 2551 return buf; 2552 } 2553 2554 /** 2555 * tipc_link_dump - dump TIPC link data 2556 * @l: tipc link to be dumped 2557 * @dqueues: bitmask to decide if any link queue to be dumped? 2558 * - TIPC_DUMP_NONE: don't dump link queues 2559 * - TIPC_DUMP_TRANSMQ: dump link transmq queue 2560 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue 2561 * - TIPC_DUMP_DEFERDQ: dump link deferd queue 2562 * - TIPC_DUMP_INPUTQ: dump link input queue 2563 * - TIPC_DUMP_WAKEUP: dump link wakeup queue 2564 * - TIPC_DUMP_ALL: dump all the link queues above 2565 * @buf: returned buffer of dump data in format 2566 */ 2567 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf) 2568 { 2569 int i = 0; 2570 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN; 2571 struct sk_buff_head *list; 2572 struct sk_buff *hskb, *tskb; 2573 u32 len; 2574 2575 if (!l) { 2576 i += scnprintf(buf, sz, "link data: (null)\n"); 2577 return i; 2578 } 2579 2580 i += scnprintf(buf, sz, "link data: %x", l->addr); 2581 i += scnprintf(buf + i, sz - i, " %x", l->state); 2582 i += scnprintf(buf + i, sz - i, " %u", l->in_session); 2583 i += scnprintf(buf + i, sz - i, " %u", l->session); 2584 i += scnprintf(buf + i, sz - i, " %u", l->peer_session); 2585 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt); 2586 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt); 2587 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state); 2588 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state); 2589 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); 2590 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); 2591 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); 2592 i += scnprintf(buf + i, sz - i, " %u", 0); 2593 i += scnprintf(buf + i, sz - i, " %u", 0); 2594 i += scnprintf(buf + i, sz - i, " %u", l->acked); 2595 2596 list = &l->transmq; 2597 len = skb_queue_len(list); 2598 hskb = skb_peek(list); 2599 tskb = skb_peek_tail(list); 2600 i += scnprintf(buf + i, sz - i, " | %u %u %u", len, 2601 (hskb) ? msg_seqno(buf_msg(hskb)) : 0, 2602 (tskb) ? msg_seqno(buf_msg(tskb)) : 0); 2603 2604 list = &l->deferdq; 2605 len = skb_queue_len(list); 2606 hskb = skb_peek(list); 2607 tskb = skb_peek_tail(list); 2608 i += scnprintf(buf + i, sz - i, " | %u %u %u", len, 2609 (hskb) ? msg_seqno(buf_msg(hskb)) : 0, 2610 (tskb) ? msg_seqno(buf_msg(tskb)) : 0); 2611 2612 list = &l->backlogq; 2613 len = skb_queue_len(list); 2614 hskb = skb_peek(list); 2615 tskb = skb_peek_tail(list); 2616 i += scnprintf(buf + i, sz - i, " | %u %u %u", len, 2617 (hskb) ? msg_seqno(buf_msg(hskb)) : 0, 2618 (tskb) ? msg_seqno(buf_msg(tskb)) : 0); 2619 2620 list = l->inputq; 2621 len = skb_queue_len(list); 2622 hskb = skb_peek(list); 2623 tskb = skb_peek_tail(list); 2624 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len, 2625 (hskb) ? msg_seqno(buf_msg(hskb)) : 0, 2626 (tskb) ? msg_seqno(buf_msg(tskb)) : 0); 2627 2628 if (dqueues & TIPC_DUMP_TRANSMQ) { 2629 i += scnprintf(buf + i, sz - i, "transmq: "); 2630 i += tipc_list_dump(&l->transmq, false, buf + i); 2631 } 2632 if (dqueues & TIPC_DUMP_BACKLOGQ) { 2633 i += scnprintf(buf + i, sz - i, 2634 "backlogq: <%u %u %u %u %u>, ", 2635 l->backlog[TIPC_LOW_IMPORTANCE].len, 2636 l->backlog[TIPC_MEDIUM_IMPORTANCE].len, 2637 l->backlog[TIPC_HIGH_IMPORTANCE].len, 2638 l->backlog[TIPC_CRITICAL_IMPORTANCE].len, 2639 l->backlog[TIPC_SYSTEM_IMPORTANCE].len); 2640 i += tipc_list_dump(&l->backlogq, false, buf + i); 2641 } 2642 if (dqueues & TIPC_DUMP_DEFERDQ) { 2643 i += scnprintf(buf + i, sz - i, "deferdq: "); 2644 i += tipc_list_dump(&l->deferdq, false, buf + i); 2645 } 2646 if (dqueues & TIPC_DUMP_INPUTQ) { 2647 i += scnprintf(buf + i, sz - i, "inputq: "); 2648 i += tipc_list_dump(l->inputq, false, buf + i); 2649 } 2650 if (dqueues & TIPC_DUMP_WAKEUP) { 2651 i += scnprintf(buf + i, sz - i, "wakeup: "); 2652 i += tipc_list_dump(&l->wakeupq, false, buf + i); 2653 } 2654 2655 return i; 2656 } 2657