1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int tipc_link_iovec_long_xmit(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf); 91 static void tipc_link_sync_xmit(struct tipc_link *l); 92 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 93 94 /* 95 * Simple link routines 96 */ 97 static unsigned int align(unsigned int i) 98 { 99 return (i + 3) & ~3u; 100 } 101 102 static void link_init_max_pkt(struct tipc_link *l_ptr) 103 { 104 u32 max_pkt; 105 106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 107 if (max_pkt > MAX_MSG_SIZE) 108 max_pkt = MAX_MSG_SIZE; 109 110 l_ptr->max_pkt_target = max_pkt; 111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 112 l_ptr->max_pkt = l_ptr->max_pkt_target; 113 else 114 l_ptr->max_pkt = MAX_PKT_DEFAULT; 115 116 l_ptr->max_pkt_probes = 0; 117 } 118 119 static u32 link_next_sent(struct tipc_link *l_ptr) 120 { 121 if (l_ptr->next_out) 122 return buf_seqno(l_ptr->next_out); 123 return mod(l_ptr->next_out_no); 124 } 125 126 static u32 link_last_sent(struct tipc_link *l_ptr) 127 { 128 return mod(link_next_sent(l_ptr) - 1); 129 } 130 131 /* 132 * Simple non-static link routines (i.e. referenced outside this file) 133 */ 134 int tipc_link_is_up(struct tipc_link *l_ptr) 135 { 136 if (!l_ptr) 137 return 0; 138 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 139 } 140 141 int tipc_link_is_active(struct tipc_link *l_ptr) 142 { 143 return (l_ptr->owner->active_links[0] == l_ptr) || 144 (l_ptr->owner->active_links[1] == l_ptr); 145 } 146 147 /** 148 * link_timeout - handle expiration of link timer 149 * @l_ptr: pointer to link 150 */ 151 static void link_timeout(struct tipc_link *l_ptr) 152 { 153 tipc_node_lock(l_ptr->owner); 154 155 /* update counters used in statistical profiling of send traffic */ 156 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 157 l_ptr->stats.queue_sz_counts++; 158 159 if (l_ptr->first_out) { 160 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 161 u32 length = msg_size(msg); 162 163 if ((msg_user(msg) == MSG_FRAGMENTER) && 164 (msg_type(msg) == FIRST_FRAGMENT)) { 165 length = msg_size(msg_get_wrapped(msg)); 166 } 167 if (length) { 168 l_ptr->stats.msg_lengths_total += length; 169 l_ptr->stats.msg_length_counts++; 170 if (length <= 64) 171 l_ptr->stats.msg_length_profile[0]++; 172 else if (length <= 256) 173 l_ptr->stats.msg_length_profile[1]++; 174 else if (length <= 1024) 175 l_ptr->stats.msg_length_profile[2]++; 176 else if (length <= 4096) 177 l_ptr->stats.msg_length_profile[3]++; 178 else if (length <= 16384) 179 l_ptr->stats.msg_length_profile[4]++; 180 else if (length <= 32768) 181 l_ptr->stats.msg_length_profile[5]++; 182 else 183 l_ptr->stats.msg_length_profile[6]++; 184 } 185 } 186 187 /* do all other link processing performed on a periodic basis */ 188 189 link_state_event(l_ptr, TIMEOUT_EVT); 190 191 if (l_ptr->next_out) 192 tipc_link_push_queue(l_ptr); 193 194 tipc_node_unlock(l_ptr->owner); 195 } 196 197 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 198 { 199 k_start_timer(&l_ptr->timer, time); 200 } 201 202 /** 203 * tipc_link_create - create a new link 204 * @n_ptr: pointer to associated node 205 * @b_ptr: pointer to associated bearer 206 * @media_addr: media address to use when sending messages over link 207 * 208 * Returns pointer to link. 209 */ 210 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 211 struct tipc_bearer *b_ptr, 212 const struct tipc_media_addr *media_addr) 213 { 214 struct tipc_link *l_ptr; 215 struct tipc_msg *msg; 216 char *if_name; 217 char addr_string[16]; 218 u32 peer = n_ptr->addr; 219 220 if (n_ptr->link_cnt >= 2) { 221 tipc_addr_string_fill(addr_string, n_ptr->addr); 222 pr_err("Attempt to establish third link to %s\n", addr_string); 223 return NULL; 224 } 225 226 if (n_ptr->links[b_ptr->identity]) { 227 tipc_addr_string_fill(addr_string, n_ptr->addr); 228 pr_err("Attempt to establish second link on <%s> to %s\n", 229 b_ptr->name, addr_string); 230 return NULL; 231 } 232 233 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 234 if (!l_ptr) { 235 pr_warn("Link creation failed, no memory\n"); 236 return NULL; 237 } 238 239 l_ptr->addr = peer; 240 if_name = strchr(b_ptr->name, ':') + 1; 241 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 242 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 243 tipc_node(tipc_own_addr), 244 if_name, 245 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 246 /* note: peer i/f name is updated by reset/activate message */ 247 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 248 l_ptr->owner = n_ptr; 249 l_ptr->checkpoint = 1; 250 l_ptr->peer_session = INVALID_SESSION; 251 l_ptr->b_ptr = b_ptr; 252 link_set_supervision_props(l_ptr, b_ptr->tolerance); 253 l_ptr->state = RESET_UNKNOWN; 254 255 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 256 msg = l_ptr->pmsg; 257 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 258 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 259 msg_set_session(msg, (tipc_random & 0xffff)); 260 msg_set_bearer_id(msg, b_ptr->identity); 261 strcpy((char *)msg_data(msg), if_name); 262 263 l_ptr->priority = b_ptr->priority; 264 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 265 266 link_init_max_pkt(l_ptr); 267 268 l_ptr->next_out_no = 1; 269 INIT_LIST_HEAD(&l_ptr->waiting_ports); 270 271 link_reset_statistics(l_ptr); 272 273 tipc_node_attach_link(n_ptr, l_ptr); 274 275 k_init_timer(&l_ptr->timer, (Handler)link_timeout, 276 (unsigned long)l_ptr); 277 278 link_state_event(l_ptr, STARTING_EVT); 279 280 return l_ptr; 281 } 282 283 void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) 284 { 285 struct tipc_link *l_ptr; 286 struct tipc_node *n_ptr; 287 288 rcu_read_lock(); 289 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 290 spin_lock_bh(&n_ptr->lock); 291 l_ptr = n_ptr->links[bearer_id]; 292 if (l_ptr) { 293 tipc_link_reset(l_ptr); 294 if (shutting_down || !tipc_node_is_up(n_ptr)) { 295 tipc_node_detach_link(l_ptr->owner, l_ptr); 296 tipc_link_reset_fragments(l_ptr); 297 spin_unlock_bh(&n_ptr->lock); 298 299 /* Nobody else can access this link now: */ 300 del_timer_sync(&l_ptr->timer); 301 kfree(l_ptr); 302 } else { 303 /* Detach/delete when failover is finished: */ 304 l_ptr->flags |= LINK_STOPPED; 305 spin_unlock_bh(&n_ptr->lock); 306 del_timer_sync(&l_ptr->timer); 307 } 308 continue; 309 } 310 spin_unlock_bh(&n_ptr->lock); 311 } 312 rcu_read_unlock(); 313 } 314 315 /** 316 * link_schedule_port - schedule port for deferred sending 317 * @l_ptr: pointer to link 318 * @origport: reference to sending port 319 * @sz: amount of data to be sent 320 * 321 * Schedules port for renewed sending of messages after link congestion 322 * has abated. 323 */ 324 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 325 { 326 struct tipc_port *p_ptr; 327 328 spin_lock_bh(&tipc_port_list_lock); 329 p_ptr = tipc_port_lock(origport); 330 if (p_ptr) { 331 if (!list_empty(&p_ptr->wait_list)) 332 goto exit; 333 p_ptr->congested = 1; 334 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 335 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 336 l_ptr->stats.link_congs++; 337 exit: 338 tipc_port_unlock(p_ptr); 339 } 340 spin_unlock_bh(&tipc_port_list_lock); 341 return -ELINKCONG; 342 } 343 344 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 345 { 346 struct tipc_port *p_ptr; 347 struct tipc_port *temp_p_ptr; 348 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 349 350 if (all) 351 win = 100000; 352 if (win <= 0) 353 return; 354 if (!spin_trylock_bh(&tipc_port_list_lock)) 355 return; 356 if (link_congested(l_ptr)) 357 goto exit; 358 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 359 wait_list) { 360 if (win <= 0) 361 break; 362 list_del_init(&p_ptr->wait_list); 363 spin_lock_bh(p_ptr->lock); 364 p_ptr->congested = 0; 365 tipc_port_wakeup(p_ptr); 366 win -= p_ptr->waiting_pkts; 367 spin_unlock_bh(p_ptr->lock); 368 } 369 370 exit: 371 spin_unlock_bh(&tipc_port_list_lock); 372 } 373 374 /** 375 * link_release_outqueue - purge link's outbound message queue 376 * @l_ptr: pointer to link 377 */ 378 static void link_release_outqueue(struct tipc_link *l_ptr) 379 { 380 kfree_skb_list(l_ptr->first_out); 381 l_ptr->first_out = NULL; 382 l_ptr->out_queue_size = 0; 383 } 384 385 /** 386 * tipc_link_reset_fragments - purge link's inbound message fragments queue 387 * @l_ptr: pointer to link 388 */ 389 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 390 { 391 kfree_skb(l_ptr->reasm_head); 392 l_ptr->reasm_head = NULL; 393 l_ptr->reasm_tail = NULL; 394 } 395 396 /** 397 * tipc_link_purge_queues - purge all pkt queues associated with link 398 * @l_ptr: pointer to link 399 */ 400 void tipc_link_purge_queues(struct tipc_link *l_ptr) 401 { 402 kfree_skb_list(l_ptr->oldest_deferred_in); 403 kfree_skb_list(l_ptr->first_out); 404 tipc_link_reset_fragments(l_ptr); 405 kfree_skb(l_ptr->proto_msg_queue); 406 l_ptr->proto_msg_queue = NULL; 407 } 408 409 void tipc_link_reset(struct tipc_link *l_ptr) 410 { 411 u32 prev_state = l_ptr->state; 412 u32 checkpoint = l_ptr->next_in_no; 413 int was_active_link = tipc_link_is_active(l_ptr); 414 415 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 416 417 /* Link is down, accept any session */ 418 l_ptr->peer_session = INVALID_SESSION; 419 420 /* Prepare for max packet size negotiation */ 421 link_init_max_pkt(l_ptr); 422 423 l_ptr->state = RESET_UNKNOWN; 424 425 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 426 return; 427 428 tipc_node_link_down(l_ptr->owner, l_ptr); 429 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 430 431 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 432 l_ptr->reset_checkpoint = checkpoint; 433 l_ptr->exp_msg_count = START_CHANGEOVER; 434 } 435 436 /* Clean up all queues: */ 437 link_release_outqueue(l_ptr); 438 kfree_skb(l_ptr->proto_msg_queue); 439 l_ptr->proto_msg_queue = NULL; 440 kfree_skb_list(l_ptr->oldest_deferred_in); 441 if (!list_empty(&l_ptr->waiting_ports)) 442 tipc_link_wakeup_ports(l_ptr, 1); 443 444 l_ptr->retransm_queue_head = 0; 445 l_ptr->retransm_queue_size = 0; 446 l_ptr->last_out = NULL; 447 l_ptr->first_out = NULL; 448 l_ptr->next_out = NULL; 449 l_ptr->unacked_window = 0; 450 l_ptr->checkpoint = 1; 451 l_ptr->next_out_no = 1; 452 l_ptr->deferred_inqueue_sz = 0; 453 l_ptr->oldest_deferred_in = NULL; 454 l_ptr->newest_deferred_in = NULL; 455 l_ptr->fsm_msg_cnt = 0; 456 l_ptr->stale_count = 0; 457 link_reset_statistics(l_ptr); 458 } 459 460 void tipc_link_reset_list(unsigned int bearer_id) 461 { 462 struct tipc_link *l_ptr; 463 struct tipc_node *n_ptr; 464 465 rcu_read_lock(); 466 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 467 spin_lock_bh(&n_ptr->lock); 468 l_ptr = n_ptr->links[bearer_id]; 469 if (l_ptr) 470 tipc_link_reset(l_ptr); 471 spin_unlock_bh(&n_ptr->lock); 472 } 473 rcu_read_unlock(); 474 } 475 476 static void link_activate(struct tipc_link *l_ptr) 477 { 478 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 479 tipc_node_link_up(l_ptr->owner, l_ptr); 480 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 481 } 482 483 /** 484 * link_state_event - link finite state machine 485 * @l_ptr: pointer to link 486 * @event: state machine event to process 487 */ 488 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 489 { 490 struct tipc_link *other; 491 u32 cont_intv = l_ptr->continuity_interval; 492 493 if (l_ptr->flags & LINK_STOPPED) 494 return; 495 496 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) 497 return; /* Not yet. */ 498 499 /* Check whether changeover is going on */ 500 if (l_ptr->exp_msg_count) { 501 if (event == TIMEOUT_EVT) 502 link_set_timer(l_ptr, cont_intv); 503 return; 504 } 505 506 switch (l_ptr->state) { 507 case WORKING_WORKING: 508 switch (event) { 509 case TRAFFIC_MSG_EVT: 510 case ACTIVATE_MSG: 511 break; 512 case TIMEOUT_EVT: 513 if (l_ptr->next_in_no != l_ptr->checkpoint) { 514 l_ptr->checkpoint = l_ptr->next_in_no; 515 if (tipc_bclink_acks_missing(l_ptr->owner)) { 516 tipc_link_proto_xmit(l_ptr, STATE_MSG, 517 0, 0, 0, 0, 0); 518 l_ptr->fsm_msg_cnt++; 519 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 520 tipc_link_proto_xmit(l_ptr, STATE_MSG, 521 1, 0, 0, 0, 0); 522 l_ptr->fsm_msg_cnt++; 523 } 524 link_set_timer(l_ptr, cont_intv); 525 break; 526 } 527 l_ptr->state = WORKING_UNKNOWN; 528 l_ptr->fsm_msg_cnt = 0; 529 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 530 l_ptr->fsm_msg_cnt++; 531 link_set_timer(l_ptr, cont_intv / 4); 532 break; 533 case RESET_MSG: 534 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 535 l_ptr->name); 536 tipc_link_reset(l_ptr); 537 l_ptr->state = RESET_RESET; 538 l_ptr->fsm_msg_cnt = 0; 539 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 540 0, 0, 0, 0, 0); 541 l_ptr->fsm_msg_cnt++; 542 link_set_timer(l_ptr, cont_intv); 543 break; 544 default: 545 pr_err("%s%u in WW state\n", link_unk_evt, event); 546 } 547 break; 548 case WORKING_UNKNOWN: 549 switch (event) { 550 case TRAFFIC_MSG_EVT: 551 case ACTIVATE_MSG: 552 l_ptr->state = WORKING_WORKING; 553 l_ptr->fsm_msg_cnt = 0; 554 link_set_timer(l_ptr, cont_intv); 555 break; 556 case RESET_MSG: 557 pr_info("%s<%s>, requested by peer while probing\n", 558 link_rst_msg, l_ptr->name); 559 tipc_link_reset(l_ptr); 560 l_ptr->state = RESET_RESET; 561 l_ptr->fsm_msg_cnt = 0; 562 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 563 0, 0, 0, 0, 0); 564 l_ptr->fsm_msg_cnt++; 565 link_set_timer(l_ptr, cont_intv); 566 break; 567 case TIMEOUT_EVT: 568 if (l_ptr->next_in_no != l_ptr->checkpoint) { 569 l_ptr->state = WORKING_WORKING; 570 l_ptr->fsm_msg_cnt = 0; 571 l_ptr->checkpoint = l_ptr->next_in_no; 572 if (tipc_bclink_acks_missing(l_ptr->owner)) { 573 tipc_link_proto_xmit(l_ptr, STATE_MSG, 574 0, 0, 0, 0, 0); 575 l_ptr->fsm_msg_cnt++; 576 } 577 link_set_timer(l_ptr, cont_intv); 578 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 579 tipc_link_proto_xmit(l_ptr, STATE_MSG, 580 1, 0, 0, 0, 0); 581 l_ptr->fsm_msg_cnt++; 582 link_set_timer(l_ptr, cont_intv / 4); 583 } else { /* Link has failed */ 584 pr_warn("%s<%s>, peer not responding\n", 585 link_rst_msg, l_ptr->name); 586 tipc_link_reset(l_ptr); 587 l_ptr->state = RESET_UNKNOWN; 588 l_ptr->fsm_msg_cnt = 0; 589 tipc_link_proto_xmit(l_ptr, RESET_MSG, 590 0, 0, 0, 0, 0); 591 l_ptr->fsm_msg_cnt++; 592 link_set_timer(l_ptr, cont_intv); 593 } 594 break; 595 default: 596 pr_err("%s%u in WU state\n", link_unk_evt, event); 597 } 598 break; 599 case RESET_UNKNOWN: 600 switch (event) { 601 case TRAFFIC_MSG_EVT: 602 break; 603 case ACTIVATE_MSG: 604 other = l_ptr->owner->active_links[0]; 605 if (other && link_working_unknown(other)) 606 break; 607 l_ptr->state = WORKING_WORKING; 608 l_ptr->fsm_msg_cnt = 0; 609 link_activate(l_ptr); 610 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 611 l_ptr->fsm_msg_cnt++; 612 if (l_ptr->owner->working_links == 1) 613 tipc_link_sync_xmit(l_ptr); 614 link_set_timer(l_ptr, cont_intv); 615 break; 616 case RESET_MSG: 617 l_ptr->state = RESET_RESET; 618 l_ptr->fsm_msg_cnt = 0; 619 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 620 1, 0, 0, 0, 0); 621 l_ptr->fsm_msg_cnt++; 622 link_set_timer(l_ptr, cont_intv); 623 break; 624 case STARTING_EVT: 625 l_ptr->flags |= LINK_STARTED; 626 /* fall through */ 627 case TIMEOUT_EVT: 628 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 629 l_ptr->fsm_msg_cnt++; 630 link_set_timer(l_ptr, cont_intv); 631 break; 632 default: 633 pr_err("%s%u in RU state\n", link_unk_evt, event); 634 } 635 break; 636 case RESET_RESET: 637 switch (event) { 638 case TRAFFIC_MSG_EVT: 639 case ACTIVATE_MSG: 640 other = l_ptr->owner->active_links[0]; 641 if (other && link_working_unknown(other)) 642 break; 643 l_ptr->state = WORKING_WORKING; 644 l_ptr->fsm_msg_cnt = 0; 645 link_activate(l_ptr); 646 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 647 l_ptr->fsm_msg_cnt++; 648 if (l_ptr->owner->working_links == 1) 649 tipc_link_sync_xmit(l_ptr); 650 link_set_timer(l_ptr, cont_intv); 651 break; 652 case RESET_MSG: 653 break; 654 case TIMEOUT_EVT: 655 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 656 0, 0, 0, 0, 0); 657 l_ptr->fsm_msg_cnt++; 658 link_set_timer(l_ptr, cont_intv); 659 break; 660 default: 661 pr_err("%s%u in RR state\n", link_unk_evt, event); 662 } 663 break; 664 default: 665 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 666 } 667 } 668 669 /* 670 * link_bundle_buf(): Append contents of a buffer to 671 * the tail of an existing one. 672 */ 673 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 674 struct sk_buff *buf) 675 { 676 struct tipc_msg *bundler_msg = buf_msg(bundler); 677 struct tipc_msg *msg = buf_msg(buf); 678 u32 size = msg_size(msg); 679 u32 bundle_size = msg_size(bundler_msg); 680 u32 to_pos = align(bundle_size); 681 u32 pad = to_pos - bundle_size; 682 683 if (msg_user(bundler_msg) != MSG_BUNDLER) 684 return 0; 685 if (msg_type(bundler_msg) != OPEN_MSG) 686 return 0; 687 if (skb_tailroom(bundler) < (pad + size)) 688 return 0; 689 if (l_ptr->max_pkt < (to_pos + size)) 690 return 0; 691 692 skb_put(bundler, pad + size); 693 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 694 msg_set_size(bundler_msg, to_pos + size); 695 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 696 kfree_skb(buf); 697 l_ptr->stats.sent_bundled++; 698 return 1; 699 } 700 701 static void link_add_to_outqueue(struct tipc_link *l_ptr, 702 struct sk_buff *buf, 703 struct tipc_msg *msg) 704 { 705 u32 ack = mod(l_ptr->next_in_no - 1); 706 u32 seqno = mod(l_ptr->next_out_no++); 707 708 msg_set_word(msg, 2, ((ack << 16) | seqno)); 709 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 710 buf->next = NULL; 711 if (l_ptr->first_out) { 712 l_ptr->last_out->next = buf; 713 l_ptr->last_out = buf; 714 } else 715 l_ptr->first_out = l_ptr->last_out = buf; 716 717 l_ptr->out_queue_size++; 718 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 719 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 720 } 721 722 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 723 struct sk_buff *buf_chain, 724 u32 long_msgno) 725 { 726 struct sk_buff *buf; 727 struct tipc_msg *msg; 728 729 if (!l_ptr->next_out) 730 l_ptr->next_out = buf_chain; 731 while (buf_chain) { 732 buf = buf_chain; 733 buf_chain = buf_chain->next; 734 735 msg = buf_msg(buf); 736 msg_set_long_msgno(msg, long_msgno); 737 link_add_to_outqueue(l_ptr, buf, msg); 738 } 739 } 740 741 /* 742 * tipc_link_xmit() is the 'full path' for messages, called from 743 * inside TIPC when the 'fast path' in tipc_send_xmit 744 * has failed, and from link_send() 745 */ 746 int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf) 747 { 748 struct tipc_msg *msg = buf_msg(buf); 749 u32 size = msg_size(msg); 750 u32 dsz = msg_data_sz(msg); 751 u32 queue_size = l_ptr->out_queue_size; 752 u32 imp = tipc_msg_tot_importance(msg); 753 u32 queue_limit = l_ptr->queue_limit[imp]; 754 u32 max_packet = l_ptr->max_pkt; 755 756 /* Match msg importance against queue limits: */ 757 if (unlikely(queue_size >= queue_limit)) { 758 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 759 link_schedule_port(l_ptr, msg_origport(msg), size); 760 kfree_skb(buf); 761 return -ELINKCONG; 762 } 763 kfree_skb(buf); 764 if (imp > CONN_MANAGER) { 765 pr_warn("%s<%s>, send queue full", link_rst_msg, 766 l_ptr->name); 767 tipc_link_reset(l_ptr); 768 } 769 return dsz; 770 } 771 772 /* Fragmentation needed ? */ 773 if (size > max_packet) 774 return tipc_link_frag_xmit(l_ptr, buf); 775 776 /* Packet can be queued or sent. */ 777 if (likely(!link_congested(l_ptr))) { 778 link_add_to_outqueue(l_ptr, buf, msg); 779 780 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 781 l_ptr->unacked_window = 0; 782 return dsz; 783 } 784 /* Congestion: can message be bundled ? */ 785 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 786 (msg_user(msg) != MSG_FRAGMENTER)) { 787 788 /* Try adding message to an existing bundle */ 789 if (l_ptr->next_out && 790 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 791 return dsz; 792 793 /* Try creating a new bundle */ 794 if (size <= max_packet * 2 / 3) { 795 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 796 struct tipc_msg bundler_hdr; 797 798 if (bundler) { 799 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 800 INT_H_SIZE, l_ptr->addr); 801 skb_copy_to_linear_data(bundler, &bundler_hdr, 802 INT_H_SIZE); 803 skb_trim(bundler, INT_H_SIZE); 804 link_bundle_buf(l_ptr, bundler, buf); 805 buf = bundler; 806 msg = buf_msg(buf); 807 l_ptr->stats.sent_bundles++; 808 } 809 } 810 } 811 if (!l_ptr->next_out) 812 l_ptr->next_out = buf; 813 link_add_to_outqueue(l_ptr, buf, msg); 814 return dsz; 815 } 816 817 /* 818 * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use 819 * has not been selected yet, and the the owner node is not locked 820 * Called by TIPC internal users, e.g. the name distributor 821 */ 822 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector) 823 { 824 struct tipc_link *l_ptr; 825 struct tipc_node *n_ptr; 826 int res = -ELINKCONG; 827 828 read_lock_bh(&tipc_net_lock); 829 n_ptr = tipc_node_find(dest); 830 if (n_ptr) { 831 tipc_node_lock(n_ptr); 832 l_ptr = n_ptr->active_links[selector & 1]; 833 if (l_ptr) 834 res = __tipc_link_xmit(l_ptr, buf); 835 else 836 kfree_skb(buf); 837 tipc_node_unlock(n_ptr); 838 } else { 839 kfree_skb(buf); 840 } 841 read_unlock_bh(&tipc_net_lock); 842 return res; 843 } 844 845 /* 846 * tipc_link_sync_xmit - synchronize broadcast link endpoints. 847 * 848 * Give a newly added peer node the sequence number where it should 849 * start receiving and acking broadcast packets. 850 * 851 * Called with node locked 852 */ 853 static void tipc_link_sync_xmit(struct tipc_link *l) 854 { 855 struct sk_buff *buf; 856 struct tipc_msg *msg; 857 858 buf = tipc_buf_acquire(INT_H_SIZE); 859 if (!buf) 860 return; 861 862 msg = buf_msg(buf); 863 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 864 msg_set_last_bcast(msg, l->owner->bclink.acked); 865 link_add_chain_to_outqueue(l, buf, 0); 866 tipc_link_push_queue(l); 867 } 868 869 /* 870 * tipc_link_sync_rcv - synchronize broadcast link endpoints. 871 * Receive the sequence number where we should start receiving and 872 * acking broadcast packets from a newly added peer node, and open 873 * up for reception of such packets. 874 * 875 * Called with node locked 876 */ 877 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) 878 { 879 struct tipc_msg *msg = buf_msg(buf); 880 881 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 882 n->bclink.recv_permitted = true; 883 kfree_skb(buf); 884 } 885 886 /* 887 * tipc_link_names_xmit - send name table entries to new neighbor 888 * 889 * Send routine for bulk delivery of name table messages when contact 890 * with a new neighbor occurs. No link congestion checking is performed 891 * because name table messages *must* be delivered. The messages must be 892 * small enough not to require fragmentation. 893 * Called without any locks held. 894 */ 895 void tipc_link_names_xmit(struct list_head *message_list, u32 dest) 896 { 897 struct tipc_node *n_ptr; 898 struct tipc_link *l_ptr; 899 struct sk_buff *buf; 900 struct sk_buff *temp_buf; 901 902 if (list_empty(message_list)) 903 return; 904 905 read_lock_bh(&tipc_net_lock); 906 n_ptr = tipc_node_find(dest); 907 if (n_ptr) { 908 tipc_node_lock(n_ptr); 909 l_ptr = n_ptr->active_links[0]; 910 if (l_ptr) { 911 /* convert circular list to linear list */ 912 ((struct sk_buff *)message_list->prev)->next = NULL; 913 link_add_chain_to_outqueue(l_ptr, 914 (struct sk_buff *)message_list->next, 0); 915 tipc_link_push_queue(l_ptr); 916 INIT_LIST_HEAD(message_list); 917 } 918 tipc_node_unlock(n_ptr); 919 } 920 read_unlock_bh(&tipc_net_lock); 921 922 /* discard the messages if they couldn't be sent */ 923 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 924 list_del((struct list_head *)buf); 925 kfree_skb(buf); 926 } 927 } 928 929 /* 930 * tipc_link_xmit_fast: Entry for data messages where the 931 * destination link is known and the header is complete, 932 * inclusive total message length. Very time critical. 933 * Link is locked. Returns user data length. 934 */ 935 static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 936 u32 *used_max_pkt) 937 { 938 struct tipc_msg *msg = buf_msg(buf); 939 int res = msg_data_sz(msg); 940 941 if (likely(!link_congested(l_ptr))) { 942 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 943 link_add_to_outqueue(l_ptr, buf, msg); 944 tipc_bearer_send(l_ptr->b_ptr, buf, 945 &l_ptr->media_addr); 946 l_ptr->unacked_window = 0; 947 return res; 948 } 949 else 950 *used_max_pkt = l_ptr->max_pkt; 951 } 952 return __tipc_link_xmit(l_ptr, buf); /* All other cases */ 953 } 954 955 /* 956 * tipc_link_iovec_xmit_fast: Entry for messages where the 957 * destination processor is known and the header is complete, 958 * except for total message length. 959 * Returns user data length or errno. 960 */ 961 int tipc_link_iovec_xmit_fast(struct tipc_port *sender, 962 struct iovec const *msg_sect, 963 unsigned int len, u32 destaddr) 964 { 965 struct tipc_msg *hdr = &sender->phdr; 966 struct tipc_link *l_ptr; 967 struct sk_buff *buf; 968 struct tipc_node *node; 969 int res; 970 u32 selector = msg_origport(hdr) & 1; 971 972 again: 973 /* 974 * Try building message using port's max_pkt hint. 975 * (Must not hold any locks while building message.) 976 */ 977 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 978 /* Exit if build request was invalid */ 979 if (unlikely(res < 0)) 980 return res; 981 982 read_lock_bh(&tipc_net_lock); 983 node = tipc_node_find(destaddr); 984 if (likely(node)) { 985 tipc_node_lock(node); 986 l_ptr = node->active_links[selector]; 987 if (likely(l_ptr)) { 988 if (likely(buf)) { 989 res = tipc_link_xmit_fast(l_ptr, buf, 990 &sender->max_pkt); 991 exit: 992 tipc_node_unlock(node); 993 read_unlock_bh(&tipc_net_lock); 994 return res; 995 } 996 997 /* Exit if link (or bearer) is congested */ 998 if (link_congested(l_ptr)) { 999 res = link_schedule_port(l_ptr, 1000 sender->ref, res); 1001 goto exit; 1002 } 1003 1004 /* 1005 * Message size exceeds max_pkt hint; update hint, 1006 * then re-try fast path or fragment the message 1007 */ 1008 sender->max_pkt = l_ptr->max_pkt; 1009 tipc_node_unlock(node); 1010 read_unlock_bh(&tipc_net_lock); 1011 1012 1013 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1014 goto again; 1015 1016 return tipc_link_iovec_long_xmit(sender, msg_sect, 1017 len, destaddr); 1018 } 1019 tipc_node_unlock(node); 1020 } 1021 read_unlock_bh(&tipc_net_lock); 1022 1023 /* Couldn't find a link to the destination node */ 1024 kfree_skb(buf); 1025 tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE); 1026 return -ENETUNREACH; 1027 } 1028 1029 /* 1030 * tipc_link_iovec_long_xmit(): Entry for long messages where the 1031 * destination node is known and the header is complete, 1032 * inclusive total message length. 1033 * Link and bearer congestion status have been checked to be ok, 1034 * and are ignored if they change. 1035 * 1036 * Note that fragments do not use the full link MTU so that they won't have 1037 * to undergo refragmentation if link changeover causes them to be sent 1038 * over another link with an additional tunnel header added as prefix. 1039 * (Refragmentation will still occur if the other link has a smaller MTU.) 1040 * 1041 * Returns user data length or errno. 1042 */ 1043 static int tipc_link_iovec_long_xmit(struct tipc_port *sender, 1044 struct iovec const *msg_sect, 1045 unsigned int len, u32 destaddr) 1046 { 1047 struct tipc_link *l_ptr; 1048 struct tipc_node *node; 1049 struct tipc_msg *hdr = &sender->phdr; 1050 u32 dsz = len; 1051 u32 max_pkt, fragm_sz, rest; 1052 struct tipc_msg fragm_hdr; 1053 struct sk_buff *buf, *buf_chain, *prev; 1054 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1055 const unchar __user *sect_crs; 1056 int curr_sect; 1057 u32 fragm_no; 1058 int res = 0; 1059 1060 again: 1061 fragm_no = 1; 1062 max_pkt = sender->max_pkt - INT_H_SIZE; 1063 /* leave room for tunnel header in case of link changeover */ 1064 fragm_sz = max_pkt - INT_H_SIZE; 1065 /* leave room for fragmentation header in each fragment */ 1066 rest = dsz; 1067 fragm_crs = 0; 1068 fragm_rest = 0; 1069 sect_rest = 0; 1070 sect_crs = NULL; 1071 curr_sect = -1; 1072 1073 /* Prepare reusable fragment header */ 1074 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1075 INT_H_SIZE, msg_destnode(hdr)); 1076 msg_set_size(&fragm_hdr, max_pkt); 1077 msg_set_fragm_no(&fragm_hdr, 1); 1078 1079 /* Prepare header of first fragment */ 1080 buf_chain = buf = tipc_buf_acquire(max_pkt); 1081 if (!buf) 1082 return -ENOMEM; 1083 buf->next = NULL; 1084 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1085 hsz = msg_hdr_sz(hdr); 1086 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1087 1088 /* Chop up message */ 1089 fragm_crs = INT_H_SIZE + hsz; 1090 fragm_rest = fragm_sz - hsz; 1091 1092 do { /* For all sections */ 1093 u32 sz; 1094 1095 if (!sect_rest) { 1096 sect_rest = msg_sect[++curr_sect].iov_len; 1097 sect_crs = msg_sect[curr_sect].iov_base; 1098 } 1099 1100 if (sect_rest < fragm_rest) 1101 sz = sect_rest; 1102 else 1103 sz = fragm_rest; 1104 1105 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1106 res = -EFAULT; 1107 error: 1108 kfree_skb_list(buf_chain); 1109 return res; 1110 } 1111 sect_crs += sz; 1112 sect_rest -= sz; 1113 fragm_crs += sz; 1114 fragm_rest -= sz; 1115 rest -= sz; 1116 1117 if (!fragm_rest && rest) { 1118 1119 /* Initiate new fragment: */ 1120 if (rest <= fragm_sz) { 1121 fragm_sz = rest; 1122 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1123 } else { 1124 msg_set_type(&fragm_hdr, FRAGMENT); 1125 } 1126 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1127 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1128 prev = buf; 1129 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1130 if (!buf) { 1131 res = -ENOMEM; 1132 goto error; 1133 } 1134 1135 buf->next = NULL; 1136 prev->next = buf; 1137 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1138 fragm_crs = INT_H_SIZE; 1139 fragm_rest = fragm_sz; 1140 } 1141 } while (rest > 0); 1142 1143 /* 1144 * Now we have a buffer chain. Select a link and check 1145 * that packet size is still OK 1146 */ 1147 node = tipc_node_find(destaddr); 1148 if (likely(node)) { 1149 tipc_node_lock(node); 1150 l_ptr = node->active_links[sender->ref & 1]; 1151 if (!l_ptr) { 1152 tipc_node_unlock(node); 1153 goto reject; 1154 } 1155 if (l_ptr->max_pkt < max_pkt) { 1156 sender->max_pkt = l_ptr->max_pkt; 1157 tipc_node_unlock(node); 1158 kfree_skb_list(buf_chain); 1159 goto again; 1160 } 1161 } else { 1162 reject: 1163 kfree_skb_list(buf_chain); 1164 tipc_port_iovec_reject(sender, hdr, msg_sect, len, 1165 TIPC_ERR_NO_NODE); 1166 return -ENETUNREACH; 1167 } 1168 1169 /* Append chain of fragments to send queue & send them */ 1170 l_ptr->long_msg_seq_no++; 1171 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1172 l_ptr->stats.sent_fragments += fragm_no; 1173 l_ptr->stats.sent_fragmented++; 1174 tipc_link_push_queue(l_ptr); 1175 tipc_node_unlock(node); 1176 return dsz; 1177 } 1178 1179 /* 1180 * tipc_link_push_packet: Push one unsent packet to the media 1181 */ 1182 static u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1183 { 1184 struct sk_buff *buf = l_ptr->first_out; 1185 u32 r_q_size = l_ptr->retransm_queue_size; 1186 u32 r_q_head = l_ptr->retransm_queue_head; 1187 1188 /* Step to position where retransmission failed, if any, */ 1189 /* consider that buffers may have been released in meantime */ 1190 if (r_q_size && buf) { 1191 u32 last = lesser(mod(r_q_head + r_q_size), 1192 link_last_sent(l_ptr)); 1193 u32 first = buf_seqno(buf); 1194 1195 while (buf && less(first, r_q_head)) { 1196 first = mod(first + 1); 1197 buf = buf->next; 1198 } 1199 l_ptr->retransm_queue_head = r_q_head = first; 1200 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1201 } 1202 1203 /* Continue retransmission now, if there is anything: */ 1204 if (r_q_size && buf) { 1205 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1206 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1207 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1208 l_ptr->retransm_queue_head = mod(++r_q_head); 1209 l_ptr->retransm_queue_size = --r_q_size; 1210 l_ptr->stats.retransmitted++; 1211 return 0; 1212 } 1213 1214 /* Send deferred protocol message, if any: */ 1215 buf = l_ptr->proto_msg_queue; 1216 if (buf) { 1217 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1218 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1219 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1220 l_ptr->unacked_window = 0; 1221 kfree_skb(buf); 1222 l_ptr->proto_msg_queue = NULL; 1223 return 0; 1224 } 1225 1226 /* Send one deferred data message, if send window not full: */ 1227 buf = l_ptr->next_out; 1228 if (buf) { 1229 struct tipc_msg *msg = buf_msg(buf); 1230 u32 next = msg_seqno(msg); 1231 u32 first = buf_seqno(l_ptr->first_out); 1232 1233 if (mod(next - first) < l_ptr->queue_limit[0]) { 1234 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1235 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1236 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1237 if (msg_user(msg) == MSG_BUNDLER) 1238 msg_set_type(msg, CLOSED_MSG); 1239 l_ptr->next_out = buf->next; 1240 return 0; 1241 } 1242 } 1243 return 1; 1244 } 1245 1246 /* 1247 * push_queue(): push out the unsent messages of a link where 1248 * congestion has abated. Node is locked 1249 */ 1250 void tipc_link_push_queue(struct tipc_link *l_ptr) 1251 { 1252 u32 res; 1253 1254 do { 1255 res = tipc_link_push_packet(l_ptr); 1256 } while (!res); 1257 } 1258 1259 static void link_reset_all(unsigned long addr) 1260 { 1261 struct tipc_node *n_ptr; 1262 char addr_string[16]; 1263 u32 i; 1264 1265 read_lock_bh(&tipc_net_lock); 1266 n_ptr = tipc_node_find((u32)addr); 1267 if (!n_ptr) { 1268 read_unlock_bh(&tipc_net_lock); 1269 return; /* node no longer exists */ 1270 } 1271 1272 tipc_node_lock(n_ptr); 1273 1274 pr_warn("Resetting all links to %s\n", 1275 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1276 1277 for (i = 0; i < MAX_BEARERS; i++) { 1278 if (n_ptr->links[i]) { 1279 link_print(n_ptr->links[i], "Resetting link\n"); 1280 tipc_link_reset(n_ptr->links[i]); 1281 } 1282 } 1283 1284 tipc_node_unlock(n_ptr); 1285 read_unlock_bh(&tipc_net_lock); 1286 } 1287 1288 static void link_retransmit_failure(struct tipc_link *l_ptr, 1289 struct sk_buff *buf) 1290 { 1291 struct tipc_msg *msg = buf_msg(buf); 1292 1293 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1294 1295 if (l_ptr->addr) { 1296 /* Handle failure on standard link */ 1297 link_print(l_ptr, "Resetting link\n"); 1298 tipc_link_reset(l_ptr); 1299 1300 } else { 1301 /* Handle failure on broadcast link */ 1302 struct tipc_node *n_ptr; 1303 char addr_string[16]; 1304 1305 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1306 pr_cont("Outstanding acks: %lu\n", 1307 (unsigned long) TIPC_SKB_CB(buf)->handle); 1308 1309 n_ptr = tipc_bclink_retransmit_to(); 1310 tipc_node_lock(n_ptr); 1311 1312 tipc_addr_string_fill(addr_string, n_ptr->addr); 1313 pr_info("Broadcast link info for %s\n", addr_string); 1314 pr_info("Reception permitted: %d, Acked: %u\n", 1315 n_ptr->bclink.recv_permitted, 1316 n_ptr->bclink.acked); 1317 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1318 n_ptr->bclink.last_in, 1319 n_ptr->bclink.oos_state, 1320 n_ptr->bclink.last_sent); 1321 1322 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1323 1324 tipc_node_unlock(n_ptr); 1325 1326 l_ptr->stale_count = 0; 1327 } 1328 } 1329 1330 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1331 u32 retransmits) 1332 { 1333 struct tipc_msg *msg; 1334 1335 if (!buf) 1336 return; 1337 1338 msg = buf_msg(buf); 1339 1340 /* Detect repeated retransmit failures */ 1341 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1342 if (++l_ptr->stale_count > 100) { 1343 link_retransmit_failure(l_ptr, buf); 1344 return; 1345 } 1346 } else { 1347 l_ptr->last_retransmitted = msg_seqno(msg); 1348 l_ptr->stale_count = 1; 1349 } 1350 1351 while (retransmits && (buf != l_ptr->next_out) && buf) { 1352 msg = buf_msg(buf); 1353 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1354 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1355 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1356 buf = buf->next; 1357 retransmits--; 1358 l_ptr->stats.retransmitted++; 1359 } 1360 1361 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1362 } 1363 1364 /** 1365 * link_insert_deferred_queue - insert deferred messages back into receive chain 1366 */ 1367 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1368 struct sk_buff *buf) 1369 { 1370 u32 seq_no; 1371 1372 if (l_ptr->oldest_deferred_in == NULL) 1373 return buf; 1374 1375 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1376 if (seq_no == mod(l_ptr->next_in_no)) { 1377 l_ptr->newest_deferred_in->next = buf; 1378 buf = l_ptr->oldest_deferred_in; 1379 l_ptr->oldest_deferred_in = NULL; 1380 l_ptr->deferred_inqueue_sz = 0; 1381 } 1382 return buf; 1383 } 1384 1385 /** 1386 * link_recv_buf_validate - validate basic format of received message 1387 * 1388 * This routine ensures a TIPC message has an acceptable header, and at least 1389 * as much data as the header indicates it should. The routine also ensures 1390 * that the entire message header is stored in the main fragment of the message 1391 * buffer, to simplify future access to message header fields. 1392 * 1393 * Note: Having extra info present in the message header or data areas is OK. 1394 * TIPC will ignore the excess, under the assumption that it is optional info 1395 * introduced by a later release of the protocol. 1396 */ 1397 static int link_recv_buf_validate(struct sk_buff *buf) 1398 { 1399 static u32 min_data_hdr_size[8] = { 1400 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1401 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1402 }; 1403 1404 struct tipc_msg *msg; 1405 u32 tipc_hdr[2]; 1406 u32 size; 1407 u32 hdr_size; 1408 u32 min_hdr_size; 1409 1410 /* If this packet comes from the defer queue, the skb has already 1411 * been validated 1412 */ 1413 if (unlikely(TIPC_SKB_CB(buf)->deferred)) 1414 return 1; 1415 1416 if (unlikely(buf->len < MIN_H_SIZE)) 1417 return 0; 1418 1419 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1420 if (msg == NULL) 1421 return 0; 1422 1423 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1424 return 0; 1425 1426 size = msg_size(msg); 1427 hdr_size = msg_hdr_sz(msg); 1428 min_hdr_size = msg_isdata(msg) ? 1429 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1430 1431 if (unlikely((hdr_size < min_hdr_size) || 1432 (size < hdr_size) || 1433 (buf->len < size) || 1434 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1435 return 0; 1436 1437 return pskb_may_pull(buf, hdr_size); 1438 } 1439 1440 /** 1441 * tipc_rcv - process TIPC packets/messages arriving from off-node 1442 * @head: pointer to message buffer chain 1443 * @tb_ptr: pointer to bearer message arrived on 1444 * 1445 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1446 * structure (i.e. cannot be NULL), but bearer can be inactive. 1447 */ 1448 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1449 { 1450 read_lock_bh(&tipc_net_lock); 1451 while (head) { 1452 struct tipc_node *n_ptr; 1453 struct tipc_link *l_ptr; 1454 struct sk_buff *crs; 1455 struct sk_buff *buf = head; 1456 struct tipc_msg *msg; 1457 u32 seq_no; 1458 u32 ackd; 1459 u32 released = 0; 1460 1461 head = head->next; 1462 buf->next = NULL; 1463 1464 /* Ensure message is well-formed */ 1465 if (unlikely(!link_recv_buf_validate(buf))) 1466 goto discard; 1467 1468 /* Ensure message data is a single contiguous unit */ 1469 if (unlikely(skb_linearize(buf))) 1470 goto discard; 1471 1472 /* Handle arrival of a non-unicast link message */ 1473 msg = buf_msg(buf); 1474 1475 if (unlikely(msg_non_seq(msg))) { 1476 if (msg_user(msg) == LINK_CONFIG) 1477 tipc_disc_rcv(buf, b_ptr); 1478 else 1479 tipc_bclink_rcv(buf); 1480 continue; 1481 } 1482 1483 /* Discard unicast link messages destined for another node */ 1484 if (unlikely(!msg_short(msg) && 1485 (msg_destnode(msg) != tipc_own_addr))) 1486 goto discard; 1487 1488 /* Locate neighboring node that sent message */ 1489 n_ptr = tipc_node_find(msg_prevnode(msg)); 1490 if (unlikely(!n_ptr)) 1491 goto discard; 1492 tipc_node_lock(n_ptr); 1493 1494 /* Locate unicast link endpoint that should handle message */ 1495 l_ptr = n_ptr->links[b_ptr->identity]; 1496 if (unlikely(!l_ptr)) 1497 goto unlock_discard; 1498 1499 /* Verify that communication with node is currently allowed */ 1500 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1501 msg_user(msg) == LINK_PROTOCOL && 1502 (msg_type(msg) == RESET_MSG || 1503 msg_type(msg) == ACTIVATE_MSG) && 1504 !msg_redundant_link(msg)) 1505 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1506 1507 if (n_ptr->block_setup) 1508 goto unlock_discard; 1509 1510 /* Validate message sequence number info */ 1511 seq_no = msg_seqno(msg); 1512 ackd = msg_ack(msg); 1513 1514 /* Release acked messages */ 1515 if (n_ptr->bclink.recv_permitted) 1516 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1517 1518 crs = l_ptr->first_out; 1519 while ((crs != l_ptr->next_out) && 1520 less_eq(buf_seqno(crs), ackd)) { 1521 struct sk_buff *next = crs->next; 1522 kfree_skb(crs); 1523 crs = next; 1524 released++; 1525 } 1526 if (released) { 1527 l_ptr->first_out = crs; 1528 l_ptr->out_queue_size -= released; 1529 } 1530 1531 /* Try sending any messages link endpoint has pending */ 1532 if (unlikely(l_ptr->next_out)) 1533 tipc_link_push_queue(l_ptr); 1534 1535 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1536 tipc_link_wakeup_ports(l_ptr, 0); 1537 1538 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1539 l_ptr->stats.sent_acks++; 1540 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1541 } 1542 1543 /* Process the incoming packet */ 1544 if (unlikely(!link_working_working(l_ptr))) { 1545 if (msg_user(msg) == LINK_PROTOCOL) { 1546 tipc_link_proto_rcv(l_ptr, buf); 1547 head = link_insert_deferred_queue(l_ptr, head); 1548 tipc_node_unlock(n_ptr); 1549 continue; 1550 } 1551 1552 /* Traffic message. Conditionally activate link */ 1553 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1554 1555 if (link_working_working(l_ptr)) { 1556 /* Re-insert buffer in front of queue */ 1557 buf->next = head; 1558 head = buf; 1559 tipc_node_unlock(n_ptr); 1560 continue; 1561 } 1562 goto unlock_discard; 1563 } 1564 1565 /* Link is now in state WORKING_WORKING */ 1566 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1567 link_handle_out_of_seq_msg(l_ptr, buf); 1568 head = link_insert_deferred_queue(l_ptr, head); 1569 tipc_node_unlock(n_ptr); 1570 continue; 1571 } 1572 l_ptr->next_in_no++; 1573 if (unlikely(l_ptr->oldest_deferred_in)) 1574 head = link_insert_deferred_queue(l_ptr, head); 1575 1576 /* Deliver packet/message to correct user: */ 1577 if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) { 1578 if (!tipc_link_tunnel_rcv(n_ptr, &buf)) { 1579 tipc_node_unlock(n_ptr); 1580 continue; 1581 } 1582 msg = buf_msg(buf); 1583 } else if (msg_user(msg) == MSG_FRAGMENTER) { 1584 int rc; 1585 1586 l_ptr->stats.recv_fragments++; 1587 rc = tipc_link_frag_rcv(&l_ptr->reasm_head, 1588 &l_ptr->reasm_tail, 1589 &buf); 1590 if (rc == LINK_REASM_COMPLETE) { 1591 l_ptr->stats.recv_fragmented++; 1592 msg = buf_msg(buf); 1593 } else { 1594 if (rc == LINK_REASM_ERROR) 1595 tipc_link_reset(l_ptr); 1596 tipc_node_unlock(n_ptr); 1597 continue; 1598 } 1599 } 1600 1601 switch (msg_user(msg)) { 1602 case TIPC_LOW_IMPORTANCE: 1603 case TIPC_MEDIUM_IMPORTANCE: 1604 case TIPC_HIGH_IMPORTANCE: 1605 case TIPC_CRITICAL_IMPORTANCE: 1606 tipc_node_unlock(n_ptr); 1607 tipc_port_rcv(buf); 1608 continue; 1609 case MSG_BUNDLER: 1610 l_ptr->stats.recv_bundles++; 1611 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1612 tipc_node_unlock(n_ptr); 1613 tipc_link_bundle_rcv(buf); 1614 continue; 1615 case NAME_DISTRIBUTOR: 1616 n_ptr->bclink.recv_permitted = true; 1617 tipc_node_unlock(n_ptr); 1618 tipc_named_rcv(buf); 1619 continue; 1620 case CONN_MANAGER: 1621 tipc_node_unlock(n_ptr); 1622 tipc_port_proto_rcv(buf); 1623 continue; 1624 case BCAST_PROTOCOL: 1625 tipc_link_sync_rcv(n_ptr, buf); 1626 break; 1627 default: 1628 kfree_skb(buf); 1629 break; 1630 } 1631 tipc_node_unlock(n_ptr); 1632 continue; 1633 unlock_discard: 1634 tipc_node_unlock(n_ptr); 1635 discard: 1636 kfree_skb(buf); 1637 } 1638 read_unlock_bh(&tipc_net_lock); 1639 } 1640 1641 /** 1642 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1643 * 1644 * Returns increase in queue length (i.e. 0 or 1) 1645 */ 1646 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1647 struct sk_buff *buf) 1648 { 1649 struct sk_buff *queue_buf; 1650 struct sk_buff **prev; 1651 u32 seq_no = buf_seqno(buf); 1652 1653 buf->next = NULL; 1654 1655 /* Empty queue ? */ 1656 if (*head == NULL) { 1657 *head = *tail = buf; 1658 return 1; 1659 } 1660 1661 /* Last ? */ 1662 if (less(buf_seqno(*tail), seq_no)) { 1663 (*tail)->next = buf; 1664 *tail = buf; 1665 return 1; 1666 } 1667 1668 /* Locate insertion point in queue, then insert; discard if duplicate */ 1669 prev = head; 1670 queue_buf = *head; 1671 for (;;) { 1672 u32 curr_seqno = buf_seqno(queue_buf); 1673 1674 if (seq_no == curr_seqno) { 1675 kfree_skb(buf); 1676 return 0; 1677 } 1678 1679 if (less(seq_no, curr_seqno)) 1680 break; 1681 1682 prev = &queue_buf->next; 1683 queue_buf = queue_buf->next; 1684 } 1685 1686 buf->next = queue_buf; 1687 *prev = buf; 1688 return 1; 1689 } 1690 1691 /* 1692 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1693 */ 1694 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1695 struct sk_buff *buf) 1696 { 1697 u32 seq_no = buf_seqno(buf); 1698 1699 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1700 tipc_link_proto_rcv(l_ptr, buf); 1701 return; 1702 } 1703 1704 /* Record OOS packet arrival (force mismatch on next timeout) */ 1705 l_ptr->checkpoint--; 1706 1707 /* 1708 * Discard packet if a duplicate; otherwise add it to deferred queue 1709 * and notify peer of gap as per protocol specification 1710 */ 1711 if (less(seq_no, mod(l_ptr->next_in_no))) { 1712 l_ptr->stats.duplicates++; 1713 kfree_skb(buf); 1714 return; 1715 } 1716 1717 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1718 &l_ptr->newest_deferred_in, buf)) { 1719 l_ptr->deferred_inqueue_sz++; 1720 l_ptr->stats.deferred_recv++; 1721 TIPC_SKB_CB(buf)->deferred = true; 1722 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1723 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1724 } else 1725 l_ptr->stats.duplicates++; 1726 } 1727 1728 /* 1729 * Send protocol message to the other endpoint. 1730 */ 1731 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, 1732 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) 1733 { 1734 struct sk_buff *buf = NULL; 1735 struct tipc_msg *msg = l_ptr->pmsg; 1736 u32 msg_size = sizeof(l_ptr->proto_msg); 1737 int r_flag; 1738 1739 /* Discard any previous message that was deferred due to congestion */ 1740 if (l_ptr->proto_msg_queue) { 1741 kfree_skb(l_ptr->proto_msg_queue); 1742 l_ptr->proto_msg_queue = NULL; 1743 } 1744 1745 /* Don't send protocol message during link changeover */ 1746 if (l_ptr->exp_msg_count) 1747 return; 1748 1749 /* Abort non-RESET send if communication with node is prohibited */ 1750 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1751 return; 1752 1753 /* Create protocol message with "out-of-sequence" sequence number */ 1754 msg_set_type(msg, msg_typ); 1755 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1756 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1757 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1758 1759 if (msg_typ == STATE_MSG) { 1760 u32 next_sent = mod(l_ptr->next_out_no); 1761 1762 if (!tipc_link_is_up(l_ptr)) 1763 return; 1764 if (l_ptr->next_out) 1765 next_sent = buf_seqno(l_ptr->next_out); 1766 msg_set_next_sent(msg, next_sent); 1767 if (l_ptr->oldest_deferred_in) { 1768 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1769 gap = mod(rec - mod(l_ptr->next_in_no)); 1770 } 1771 msg_set_seq_gap(msg, gap); 1772 if (gap) 1773 l_ptr->stats.sent_nacks++; 1774 msg_set_link_tolerance(msg, tolerance); 1775 msg_set_linkprio(msg, priority); 1776 msg_set_max_pkt(msg, ack_mtu); 1777 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1778 msg_set_probe(msg, probe_msg != 0); 1779 if (probe_msg) { 1780 u32 mtu = l_ptr->max_pkt; 1781 1782 if ((mtu < l_ptr->max_pkt_target) && 1783 link_working_working(l_ptr) && 1784 l_ptr->fsm_msg_cnt) { 1785 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1786 if (l_ptr->max_pkt_probes == 10) { 1787 l_ptr->max_pkt_target = (msg_size - 4); 1788 l_ptr->max_pkt_probes = 0; 1789 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1790 } 1791 l_ptr->max_pkt_probes++; 1792 } 1793 1794 l_ptr->stats.sent_probes++; 1795 } 1796 l_ptr->stats.sent_states++; 1797 } else { /* RESET_MSG or ACTIVATE_MSG */ 1798 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1799 msg_set_seq_gap(msg, 0); 1800 msg_set_next_sent(msg, 1); 1801 msg_set_probe(msg, 0); 1802 msg_set_link_tolerance(msg, l_ptr->tolerance); 1803 msg_set_linkprio(msg, l_ptr->priority); 1804 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1805 } 1806 1807 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1808 msg_set_redundant_link(msg, r_flag); 1809 msg_set_linkprio(msg, l_ptr->priority); 1810 msg_set_size(msg, msg_size); 1811 1812 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1813 1814 buf = tipc_buf_acquire(msg_size); 1815 if (!buf) 1816 return; 1817 1818 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1819 buf->priority = TC_PRIO_CONTROL; 1820 1821 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1822 l_ptr->unacked_window = 0; 1823 kfree_skb(buf); 1824 } 1825 1826 /* 1827 * Receive protocol message : 1828 * Note that network plane id propagates through the network, and may 1829 * change at any time. The node with lowest address rules 1830 */ 1831 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) 1832 { 1833 u32 rec_gap = 0; 1834 u32 max_pkt_info; 1835 u32 max_pkt_ack; 1836 u32 msg_tol; 1837 struct tipc_msg *msg = buf_msg(buf); 1838 1839 /* Discard protocol message during link changeover */ 1840 if (l_ptr->exp_msg_count) 1841 goto exit; 1842 1843 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1844 l_ptr->checkpoint--; 1845 1846 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1847 if (tipc_own_addr > msg_prevnode(msg)) 1848 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1849 1850 switch (msg_type(msg)) { 1851 1852 case RESET_MSG: 1853 if (!link_working_unknown(l_ptr) && 1854 (l_ptr->peer_session != INVALID_SESSION)) { 1855 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1856 break; /* duplicate or old reset: ignore */ 1857 } 1858 1859 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1860 link_working_unknown(l_ptr))) { 1861 /* 1862 * peer has lost contact -- don't allow peer's links 1863 * to reactivate before we recognize loss & clean up 1864 */ 1865 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1866 } 1867 1868 link_state_event(l_ptr, RESET_MSG); 1869 1870 /* fall thru' */ 1871 case ACTIVATE_MSG: 1872 /* Update link settings according other endpoint's values */ 1873 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1874 1875 msg_tol = msg_link_tolerance(msg); 1876 if (msg_tol > l_ptr->tolerance) 1877 link_set_supervision_props(l_ptr, msg_tol); 1878 1879 if (msg_linkprio(msg) > l_ptr->priority) 1880 l_ptr->priority = msg_linkprio(msg); 1881 1882 max_pkt_info = msg_max_pkt(msg); 1883 if (max_pkt_info) { 1884 if (max_pkt_info < l_ptr->max_pkt_target) 1885 l_ptr->max_pkt_target = max_pkt_info; 1886 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1887 l_ptr->max_pkt = l_ptr->max_pkt_target; 1888 } else { 1889 l_ptr->max_pkt = l_ptr->max_pkt_target; 1890 } 1891 1892 /* Synchronize broadcast link info, if not done previously */ 1893 if (!tipc_node_is_up(l_ptr->owner)) { 1894 l_ptr->owner->bclink.last_sent = 1895 l_ptr->owner->bclink.last_in = 1896 msg_last_bcast(msg); 1897 l_ptr->owner->bclink.oos_state = 0; 1898 } 1899 1900 l_ptr->peer_session = msg_session(msg); 1901 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1902 1903 if (msg_type(msg) == ACTIVATE_MSG) 1904 link_state_event(l_ptr, ACTIVATE_MSG); 1905 break; 1906 case STATE_MSG: 1907 1908 msg_tol = msg_link_tolerance(msg); 1909 if (msg_tol) 1910 link_set_supervision_props(l_ptr, msg_tol); 1911 1912 if (msg_linkprio(msg) && 1913 (msg_linkprio(msg) != l_ptr->priority)) { 1914 pr_warn("%s<%s>, priority change %u->%u\n", 1915 link_rst_msg, l_ptr->name, l_ptr->priority, 1916 msg_linkprio(msg)); 1917 l_ptr->priority = msg_linkprio(msg); 1918 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1919 break; 1920 } 1921 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1922 l_ptr->stats.recv_states++; 1923 if (link_reset_unknown(l_ptr)) 1924 break; 1925 1926 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1927 rec_gap = mod(msg_next_sent(msg) - 1928 mod(l_ptr->next_in_no)); 1929 } 1930 1931 max_pkt_ack = msg_max_pkt(msg); 1932 if (max_pkt_ack > l_ptr->max_pkt) { 1933 l_ptr->max_pkt = max_pkt_ack; 1934 l_ptr->max_pkt_probes = 0; 1935 } 1936 1937 max_pkt_ack = 0; 1938 if (msg_probe(msg)) { 1939 l_ptr->stats.recv_probes++; 1940 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1941 max_pkt_ack = msg_size(msg); 1942 } 1943 1944 /* Protocol message before retransmits, reduce loss risk */ 1945 if (l_ptr->owner->bclink.recv_permitted) 1946 tipc_bclink_update_link_state(l_ptr->owner, 1947 msg_last_bcast(msg)); 1948 1949 if (rec_gap || (msg_probe(msg))) { 1950 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, 1951 0, max_pkt_ack); 1952 } 1953 if (msg_seq_gap(msg)) { 1954 l_ptr->stats.recv_nacks++; 1955 tipc_link_retransmit(l_ptr, l_ptr->first_out, 1956 msg_seq_gap(msg)); 1957 } 1958 break; 1959 } 1960 exit: 1961 kfree_skb(buf); 1962 } 1963 1964 1965 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to 1966 * a different bearer. Owner node is locked. 1967 */ 1968 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, 1969 struct tipc_msg *tunnel_hdr, 1970 struct tipc_msg *msg, 1971 u32 selector) 1972 { 1973 struct tipc_link *tunnel; 1974 struct sk_buff *buf; 1975 u32 length = msg_size(msg); 1976 1977 tunnel = l_ptr->owner->active_links[selector & 1]; 1978 if (!tipc_link_is_up(tunnel)) { 1979 pr_warn("%stunnel link no longer available\n", link_co_err); 1980 return; 1981 } 1982 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 1983 buf = tipc_buf_acquire(length + INT_H_SIZE); 1984 if (!buf) { 1985 pr_warn("%sunable to send tunnel msg\n", link_co_err); 1986 return; 1987 } 1988 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 1989 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 1990 __tipc_link_xmit(tunnel, buf); 1991 } 1992 1993 1994 /* tipc_link_failover_send_queue(): A link has gone down, but a second 1995 * link is still active. We can do failover. Tunnel the failing link's 1996 * whole send queue via the remaining link. This way, we don't lose 1997 * any packets, and sequence order is preserved for subsequent traffic 1998 * sent over the remaining link. Owner node is locked. 1999 */ 2000 void tipc_link_failover_send_queue(struct tipc_link *l_ptr) 2001 { 2002 u32 msgcount = l_ptr->out_queue_size; 2003 struct sk_buff *crs = l_ptr->first_out; 2004 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 2005 struct tipc_msg tunnel_hdr; 2006 int split_bundles; 2007 2008 if (!tunnel) 2009 return; 2010 2011 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2012 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2013 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2014 msg_set_msgcnt(&tunnel_hdr, msgcount); 2015 2016 if (!l_ptr->first_out) { 2017 struct sk_buff *buf; 2018 2019 buf = tipc_buf_acquire(INT_H_SIZE); 2020 if (buf) { 2021 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2022 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2023 __tipc_link_xmit(tunnel, buf); 2024 } else { 2025 pr_warn("%sunable to send changeover msg\n", 2026 link_co_err); 2027 } 2028 return; 2029 } 2030 2031 split_bundles = (l_ptr->owner->active_links[0] != 2032 l_ptr->owner->active_links[1]); 2033 2034 while (crs) { 2035 struct tipc_msg *msg = buf_msg(crs); 2036 2037 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2038 struct tipc_msg *m = msg_get_wrapped(msg); 2039 unchar *pos = (unchar *)m; 2040 2041 msgcount = msg_msgcnt(msg); 2042 while (msgcount--) { 2043 msg_set_seqno(m, msg_seqno(msg)); 2044 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m, 2045 msg_link_selector(m)); 2046 pos += align(msg_size(m)); 2047 m = (struct tipc_msg *)pos; 2048 } 2049 } else { 2050 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, 2051 msg_link_selector(msg)); 2052 } 2053 crs = crs->next; 2054 } 2055 } 2056 2057 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a 2058 * duplicate of the first link's send queue via the new link. This way, we 2059 * are guaranteed that currently queued packets from a socket are delivered 2060 * before future traffic from the same socket, even if this is using the 2061 * new link. The last arriving copy of each duplicate packet is dropped at 2062 * the receiving end by the regular protocol check, so packet cardinality 2063 * and sequence order is preserved per sender/receiver socket pair. 2064 * Owner node is locked. 2065 */ 2066 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, 2067 struct tipc_link *tunnel) 2068 { 2069 struct sk_buff *iter; 2070 struct tipc_msg tunnel_hdr; 2071 2072 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2073 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2074 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2075 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2076 iter = l_ptr->first_out; 2077 while (iter) { 2078 struct sk_buff *outbuf; 2079 struct tipc_msg *msg = buf_msg(iter); 2080 u32 length = msg_size(msg); 2081 2082 if (msg_user(msg) == MSG_BUNDLER) 2083 msg_set_type(msg, CLOSED_MSG); 2084 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2085 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2086 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2087 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2088 if (outbuf == NULL) { 2089 pr_warn("%sunable to send duplicate msg\n", 2090 link_co_err); 2091 return; 2092 } 2093 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2094 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2095 length); 2096 __tipc_link_xmit(tunnel, outbuf); 2097 if (!tipc_link_is_up(l_ptr)) 2098 return; 2099 iter = iter->next; 2100 } 2101 } 2102 2103 /** 2104 * buf_extract - extracts embedded TIPC message from another message 2105 * @skb: encapsulating message buffer 2106 * @from_pos: offset to extract from 2107 * 2108 * Returns a new message buffer containing an embedded message. The 2109 * encapsulating message itself is left unchanged. 2110 */ 2111 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2112 { 2113 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2114 u32 size = msg_size(msg); 2115 struct sk_buff *eb; 2116 2117 eb = tipc_buf_acquire(size); 2118 if (eb) 2119 skb_copy_to_linear_data(eb, msg, size); 2120 return eb; 2121 } 2122 2123 2124 2125 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. 2126 * Owner node is locked. 2127 */ 2128 static void tipc_link_dup_rcv(struct tipc_link *l_ptr, 2129 struct sk_buff *t_buf) 2130 { 2131 struct sk_buff *buf; 2132 2133 if (!tipc_link_is_up(l_ptr)) 2134 return; 2135 2136 buf = buf_extract(t_buf, INT_H_SIZE); 2137 if (buf == NULL) { 2138 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); 2139 return; 2140 } 2141 2142 /* Add buffer to deferred queue, if applicable: */ 2143 link_handle_out_of_seq_msg(l_ptr, buf); 2144 } 2145 2146 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet 2147 * Owner node is locked. 2148 */ 2149 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, 2150 struct sk_buff *t_buf) 2151 { 2152 struct tipc_msg *t_msg = buf_msg(t_buf); 2153 struct sk_buff *buf = NULL; 2154 struct tipc_msg *msg; 2155 2156 if (tipc_link_is_up(l_ptr)) 2157 tipc_link_reset(l_ptr); 2158 2159 /* First failover packet? */ 2160 if (l_ptr->exp_msg_count == START_CHANGEOVER) 2161 l_ptr->exp_msg_count = msg_msgcnt(t_msg); 2162 2163 /* Should there be an inner packet? */ 2164 if (l_ptr->exp_msg_count) { 2165 l_ptr->exp_msg_count--; 2166 buf = buf_extract(t_buf, INT_H_SIZE); 2167 if (buf == NULL) { 2168 pr_warn("%sno inner failover pkt\n", link_co_err); 2169 goto exit; 2170 } 2171 msg = buf_msg(buf); 2172 2173 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { 2174 kfree_skb(buf); 2175 buf = NULL; 2176 goto exit; 2177 } 2178 if (msg_user(msg) == MSG_FRAGMENTER) { 2179 l_ptr->stats.recv_fragments++; 2180 tipc_link_frag_rcv(&l_ptr->reasm_head, 2181 &l_ptr->reasm_tail, 2182 &buf); 2183 } 2184 } 2185 exit: 2186 if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) { 2187 tipc_node_detach_link(l_ptr->owner, l_ptr); 2188 kfree(l_ptr); 2189 } 2190 return buf; 2191 } 2192 2193 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent 2194 * via other link as result of a failover (ORIGINAL_MSG) or 2195 * a new active link (DUPLICATE_MSG). Failover packets are 2196 * returned to the active link for delivery upwards. 2197 * Owner node is locked. 2198 */ 2199 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, 2200 struct sk_buff **buf) 2201 { 2202 struct sk_buff *t_buf = *buf; 2203 struct tipc_link *l_ptr; 2204 struct tipc_msg *t_msg = buf_msg(t_buf); 2205 u32 bearer_id = msg_bearer_id(t_msg); 2206 2207 *buf = NULL; 2208 2209 if (bearer_id >= MAX_BEARERS) 2210 goto exit; 2211 2212 l_ptr = n_ptr->links[bearer_id]; 2213 if (!l_ptr) 2214 goto exit; 2215 2216 if (msg_type(t_msg) == DUPLICATE_MSG) 2217 tipc_link_dup_rcv(l_ptr, t_buf); 2218 else if (msg_type(t_msg) == ORIGINAL_MSG) 2219 *buf = tipc_link_failover_rcv(l_ptr, t_buf); 2220 else 2221 pr_warn("%sunknown tunnel pkt received\n", link_co_err); 2222 exit: 2223 kfree_skb(t_buf); 2224 return *buf != NULL; 2225 } 2226 2227 /* 2228 * Bundler functionality: 2229 */ 2230 void tipc_link_bundle_rcv(struct sk_buff *buf) 2231 { 2232 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2233 u32 pos = INT_H_SIZE; 2234 struct sk_buff *obuf; 2235 2236 while (msgcount--) { 2237 obuf = buf_extract(buf, pos); 2238 if (obuf == NULL) { 2239 pr_warn("Link unable to unbundle message(s)\n"); 2240 break; 2241 } 2242 pos += align(msg_size(buf_msg(obuf))); 2243 tipc_net_route_msg(obuf); 2244 } 2245 kfree_skb(buf); 2246 } 2247 2248 /* 2249 * Fragmentation/defragmentation: 2250 */ 2251 2252 /* 2253 * tipc_link_frag_xmit: Entry for buffers needing fragmentation. 2254 * The buffer is complete, inclusive total message length. 2255 * Returns user data length. 2256 */ 2257 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf) 2258 { 2259 struct sk_buff *buf_chain = NULL; 2260 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2261 struct tipc_msg *inmsg = buf_msg(buf); 2262 struct tipc_msg fragm_hdr; 2263 u32 insize = msg_size(inmsg); 2264 u32 dsz = msg_data_sz(inmsg); 2265 unchar *crs = buf->data; 2266 u32 rest = insize; 2267 u32 pack_sz = l_ptr->max_pkt; 2268 u32 fragm_sz = pack_sz - INT_H_SIZE; 2269 u32 fragm_no = 0; 2270 u32 destaddr; 2271 2272 if (msg_short(inmsg)) 2273 destaddr = l_ptr->addr; 2274 else 2275 destaddr = msg_destnode(inmsg); 2276 2277 /* Prepare reusable fragment header: */ 2278 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2279 INT_H_SIZE, destaddr); 2280 2281 /* Chop up message: */ 2282 while (rest > 0) { 2283 struct sk_buff *fragm; 2284 2285 if (rest <= fragm_sz) { 2286 fragm_sz = rest; 2287 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2288 } 2289 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2290 if (fragm == NULL) { 2291 kfree_skb(buf); 2292 kfree_skb_list(buf_chain); 2293 return -ENOMEM; 2294 } 2295 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2296 fragm_no++; 2297 msg_set_fragm_no(&fragm_hdr, fragm_no); 2298 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2299 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2300 fragm_sz); 2301 buf_chain_tail->next = fragm; 2302 buf_chain_tail = fragm; 2303 2304 rest -= fragm_sz; 2305 crs += fragm_sz; 2306 msg_set_type(&fragm_hdr, FRAGMENT); 2307 } 2308 kfree_skb(buf); 2309 2310 /* Append chain of fragments to send queue & send them */ 2311 l_ptr->long_msg_seq_no++; 2312 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2313 l_ptr->stats.sent_fragments += fragm_no; 2314 l_ptr->stats.sent_fragmented++; 2315 tipc_link_push_queue(l_ptr); 2316 2317 return dsz; 2318 } 2319 2320 /* tipc_link_frag_rcv(): Called with node lock on. Returns 2321 * the reassembled buffer if message is complete. 2322 */ 2323 int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail, 2324 struct sk_buff **fbuf) 2325 { 2326 struct sk_buff *frag = *fbuf; 2327 struct tipc_msg *msg = buf_msg(frag); 2328 u32 fragid = msg_type(msg); 2329 bool headstolen; 2330 int delta; 2331 2332 skb_pull(frag, msg_hdr_sz(msg)); 2333 if (fragid == FIRST_FRAGMENT) { 2334 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2335 goto out_free; 2336 *head = frag; 2337 skb_frag_list_init(*head); 2338 *fbuf = NULL; 2339 return 0; 2340 } else if (*head && 2341 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2342 kfree_skb_partial(frag, headstolen); 2343 } else { 2344 if (!*head) 2345 goto out_free; 2346 if (!skb_has_frag_list(*head)) 2347 skb_shinfo(*head)->frag_list = frag; 2348 else 2349 (*tail)->next = frag; 2350 *tail = frag; 2351 (*head)->truesize += frag->truesize; 2352 } 2353 if (fragid == LAST_FRAGMENT) { 2354 *fbuf = *head; 2355 *tail = *head = NULL; 2356 return LINK_REASM_COMPLETE; 2357 } 2358 *fbuf = NULL; 2359 return 0; 2360 out_free: 2361 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2362 kfree_skb(*fbuf); 2363 *fbuf = NULL; 2364 return LINK_REASM_ERROR; 2365 } 2366 2367 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2368 { 2369 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2370 return; 2371 2372 l_ptr->tolerance = tolerance; 2373 l_ptr->continuity_interval = 2374 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2375 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2376 } 2377 2378 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2379 { 2380 /* Data messages from this node, inclusive FIRST_FRAGM */ 2381 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2382 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2383 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2384 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2385 /* Transiting data messages,inclusive FIRST_FRAGM */ 2386 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2387 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2388 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2389 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2390 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2391 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2392 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2393 /* FRAGMENT and LAST_FRAGMENT packets */ 2394 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2395 } 2396 2397 /* tipc_link_find_owner - locate owner node of link by link's name 2398 * @name: pointer to link name string 2399 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2400 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2401 * this also prevents link deletion. 2402 * 2403 * Returns pointer to node owning the link, or 0 if no matching link is found. 2404 */ 2405 static struct tipc_node *tipc_link_find_owner(const char *link_name, 2406 unsigned int *bearer_id) 2407 { 2408 struct tipc_link *l_ptr; 2409 struct tipc_node *n_ptr; 2410 struct tipc_node *found_node = 0; 2411 int i; 2412 2413 *bearer_id = 0; 2414 rcu_read_lock(); 2415 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 2416 tipc_node_lock(n_ptr); 2417 for (i = 0; i < MAX_BEARERS; i++) { 2418 l_ptr = n_ptr->links[i]; 2419 if (l_ptr && !strcmp(l_ptr->name, link_name)) { 2420 *bearer_id = i; 2421 found_node = n_ptr; 2422 break; 2423 } 2424 } 2425 tipc_node_unlock(n_ptr); 2426 if (found_node) 2427 break; 2428 } 2429 rcu_read_unlock(); 2430 2431 return found_node; 2432 } 2433 2434 /** 2435 * link_value_is_valid -- validate proposed link tolerance/priority/window 2436 * 2437 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2438 * @new_value: the new value 2439 * 2440 * Returns 1 if value is within range, 0 if not. 2441 */ 2442 static int link_value_is_valid(u16 cmd, u32 new_value) 2443 { 2444 switch (cmd) { 2445 case TIPC_CMD_SET_LINK_TOL: 2446 return (new_value >= TIPC_MIN_LINK_TOL) && 2447 (new_value <= TIPC_MAX_LINK_TOL); 2448 case TIPC_CMD_SET_LINK_PRI: 2449 return (new_value <= TIPC_MAX_LINK_PRI); 2450 case TIPC_CMD_SET_LINK_WINDOW: 2451 return (new_value >= TIPC_MIN_LINK_WIN) && 2452 (new_value <= TIPC_MAX_LINK_WIN); 2453 } 2454 return 0; 2455 } 2456 2457 /** 2458 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2459 * @name: ptr to link, bearer, or media name 2460 * @new_value: new value of link, bearer, or media setting 2461 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2462 * 2463 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2464 * 2465 * Returns 0 if value updated and negative value on error. 2466 */ 2467 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2468 { 2469 struct tipc_node *node; 2470 struct tipc_link *l_ptr; 2471 struct tipc_bearer *b_ptr; 2472 struct tipc_media *m_ptr; 2473 int bearer_id; 2474 int res = 0; 2475 2476 node = tipc_link_find_owner(name, &bearer_id); 2477 if (node) { 2478 tipc_node_lock(node); 2479 l_ptr = node->links[bearer_id]; 2480 2481 if (l_ptr) { 2482 switch (cmd) { 2483 case TIPC_CMD_SET_LINK_TOL: 2484 link_set_supervision_props(l_ptr, new_value); 2485 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 2486 new_value, 0, 0); 2487 break; 2488 case TIPC_CMD_SET_LINK_PRI: 2489 l_ptr->priority = new_value; 2490 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 2491 0, new_value, 0); 2492 break; 2493 case TIPC_CMD_SET_LINK_WINDOW: 2494 tipc_link_set_queue_limits(l_ptr, new_value); 2495 break; 2496 default: 2497 res = -EINVAL; 2498 break; 2499 } 2500 } 2501 tipc_node_unlock(node); 2502 return res; 2503 } 2504 2505 b_ptr = tipc_bearer_find(name); 2506 if (b_ptr) { 2507 switch (cmd) { 2508 case TIPC_CMD_SET_LINK_TOL: 2509 b_ptr->tolerance = new_value; 2510 break; 2511 case TIPC_CMD_SET_LINK_PRI: 2512 b_ptr->priority = new_value; 2513 break; 2514 case TIPC_CMD_SET_LINK_WINDOW: 2515 b_ptr->window = new_value; 2516 break; 2517 default: 2518 res = -EINVAL; 2519 break; 2520 } 2521 return res; 2522 } 2523 2524 m_ptr = tipc_media_find(name); 2525 if (!m_ptr) 2526 return -ENODEV; 2527 switch (cmd) { 2528 case TIPC_CMD_SET_LINK_TOL: 2529 m_ptr->tolerance = new_value; 2530 break; 2531 case TIPC_CMD_SET_LINK_PRI: 2532 m_ptr->priority = new_value; 2533 break; 2534 case TIPC_CMD_SET_LINK_WINDOW: 2535 m_ptr->window = new_value; 2536 break; 2537 default: 2538 res = -EINVAL; 2539 break; 2540 } 2541 return res; 2542 } 2543 2544 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2545 u16 cmd) 2546 { 2547 struct tipc_link_config *args; 2548 u32 new_value; 2549 int res; 2550 2551 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2552 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2553 2554 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2555 new_value = ntohl(args->value); 2556 2557 if (!link_value_is_valid(cmd, new_value)) 2558 return tipc_cfg_reply_error_string( 2559 "cannot change, value invalid"); 2560 2561 if (!strcmp(args->name, tipc_bclink_name)) { 2562 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2563 (tipc_bclink_set_queue_limits(new_value) == 0)) 2564 return tipc_cfg_reply_none(); 2565 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2566 " (cannot change setting on broadcast link)"); 2567 } 2568 2569 read_lock_bh(&tipc_net_lock); 2570 res = link_cmd_set_value(args->name, new_value, cmd); 2571 read_unlock_bh(&tipc_net_lock); 2572 if (res) 2573 return tipc_cfg_reply_error_string("cannot change link setting"); 2574 2575 return tipc_cfg_reply_none(); 2576 } 2577 2578 /** 2579 * link_reset_statistics - reset link statistics 2580 * @l_ptr: pointer to link 2581 */ 2582 static void link_reset_statistics(struct tipc_link *l_ptr) 2583 { 2584 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2585 l_ptr->stats.sent_info = l_ptr->next_out_no; 2586 l_ptr->stats.recv_info = l_ptr->next_in_no; 2587 } 2588 2589 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2590 { 2591 char *link_name; 2592 struct tipc_link *l_ptr; 2593 struct tipc_node *node; 2594 unsigned int bearer_id; 2595 2596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2597 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2598 2599 link_name = (char *)TLV_DATA(req_tlv_area); 2600 if (!strcmp(link_name, tipc_bclink_name)) { 2601 if (tipc_bclink_reset_stats()) 2602 return tipc_cfg_reply_error_string("link not found"); 2603 return tipc_cfg_reply_none(); 2604 } 2605 read_lock_bh(&tipc_net_lock); 2606 node = tipc_link_find_owner(link_name, &bearer_id); 2607 if (!node) { 2608 read_unlock_bh(&tipc_net_lock); 2609 return tipc_cfg_reply_error_string("link not found"); 2610 } 2611 tipc_node_lock(node); 2612 l_ptr = node->links[bearer_id]; 2613 if (!l_ptr) { 2614 tipc_node_unlock(node); 2615 read_unlock_bh(&tipc_net_lock); 2616 return tipc_cfg_reply_error_string("link not found"); 2617 } 2618 link_reset_statistics(l_ptr); 2619 tipc_node_unlock(node); 2620 read_unlock_bh(&tipc_net_lock); 2621 return tipc_cfg_reply_none(); 2622 } 2623 2624 /** 2625 * percent - convert count to a percentage of total (rounding up or down) 2626 */ 2627 static u32 percent(u32 count, u32 total) 2628 { 2629 return (count * 100 + (total / 2)) / total; 2630 } 2631 2632 /** 2633 * tipc_link_stats - print link statistics 2634 * @name: link name 2635 * @buf: print buffer area 2636 * @buf_size: size of print buffer area 2637 * 2638 * Returns length of print buffer data string (or 0 if error) 2639 */ 2640 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2641 { 2642 struct tipc_link *l; 2643 struct tipc_stats *s; 2644 struct tipc_node *node; 2645 char *status; 2646 u32 profile_total = 0; 2647 unsigned int bearer_id; 2648 int ret; 2649 2650 if (!strcmp(name, tipc_bclink_name)) 2651 return tipc_bclink_stats(buf, buf_size); 2652 2653 read_lock_bh(&tipc_net_lock); 2654 node = tipc_link_find_owner(name, &bearer_id); 2655 if (!node) { 2656 read_unlock_bh(&tipc_net_lock); 2657 return 0; 2658 } 2659 tipc_node_lock(node); 2660 2661 l = node->links[bearer_id]; 2662 if (!l) { 2663 tipc_node_unlock(node); 2664 read_unlock_bh(&tipc_net_lock); 2665 return 0; 2666 } 2667 2668 s = &l->stats; 2669 2670 if (tipc_link_is_active(l)) 2671 status = "ACTIVE"; 2672 else if (tipc_link_is_up(l)) 2673 status = "STANDBY"; 2674 else 2675 status = "DEFUNCT"; 2676 2677 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2678 " %s MTU:%u Priority:%u Tolerance:%u ms" 2679 " Window:%u packets\n", 2680 l->name, status, l->max_pkt, l->priority, 2681 l->tolerance, l->queue_limit[0]); 2682 2683 ret += tipc_snprintf(buf + ret, buf_size - ret, 2684 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2685 l->next_in_no - s->recv_info, s->recv_fragments, 2686 s->recv_fragmented, s->recv_bundles, 2687 s->recv_bundled); 2688 2689 ret += tipc_snprintf(buf + ret, buf_size - ret, 2690 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2691 l->next_out_no - s->sent_info, s->sent_fragments, 2692 s->sent_fragmented, s->sent_bundles, 2693 s->sent_bundled); 2694 2695 profile_total = s->msg_length_counts; 2696 if (!profile_total) 2697 profile_total = 1; 2698 2699 ret += tipc_snprintf(buf + ret, buf_size - ret, 2700 " TX profile sample:%u packets average:%u octets\n" 2701 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2702 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2703 s->msg_length_counts, 2704 s->msg_lengths_total / profile_total, 2705 percent(s->msg_length_profile[0], profile_total), 2706 percent(s->msg_length_profile[1], profile_total), 2707 percent(s->msg_length_profile[2], profile_total), 2708 percent(s->msg_length_profile[3], profile_total), 2709 percent(s->msg_length_profile[4], profile_total), 2710 percent(s->msg_length_profile[5], profile_total), 2711 percent(s->msg_length_profile[6], profile_total)); 2712 2713 ret += tipc_snprintf(buf + ret, buf_size - ret, 2714 " RX states:%u probes:%u naks:%u defs:%u" 2715 " dups:%u\n", s->recv_states, s->recv_probes, 2716 s->recv_nacks, s->deferred_recv, s->duplicates); 2717 2718 ret += tipc_snprintf(buf + ret, buf_size - ret, 2719 " TX states:%u probes:%u naks:%u acks:%u" 2720 " dups:%u\n", s->sent_states, s->sent_probes, 2721 s->sent_nacks, s->sent_acks, s->retransmitted); 2722 2723 ret += tipc_snprintf(buf + ret, buf_size - ret, 2724 " Congestion link:%u Send queue" 2725 " max:%u avg:%u\n", s->link_congs, 2726 s->max_queue_sz, s->queue_sz_counts ? 2727 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2728 2729 tipc_node_unlock(node); 2730 read_unlock_bh(&tipc_net_lock); 2731 return ret; 2732 } 2733 2734 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2735 { 2736 struct sk_buff *buf; 2737 struct tlv_desc *rep_tlv; 2738 int str_len; 2739 int pb_len; 2740 char *pb; 2741 2742 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2743 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2744 2745 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2746 if (!buf) 2747 return NULL; 2748 2749 rep_tlv = (struct tlv_desc *)buf->data; 2750 pb = TLV_DATA(rep_tlv); 2751 pb_len = ULTRA_STRING_MAX_LEN; 2752 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2753 pb, pb_len); 2754 if (!str_len) { 2755 kfree_skb(buf); 2756 return tipc_cfg_reply_error_string("link not found"); 2757 } 2758 str_len += 1; /* for "\0" */ 2759 skb_put(buf, TLV_SPACE(str_len)); 2760 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2761 2762 return buf; 2763 } 2764 2765 /** 2766 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2767 * @dest: network address of destination node 2768 * @selector: used to select from set of active links 2769 * 2770 * If no active link can be found, uses default maximum packet size. 2771 */ 2772 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2773 { 2774 struct tipc_node *n_ptr; 2775 struct tipc_link *l_ptr; 2776 u32 res = MAX_PKT_DEFAULT; 2777 2778 if (dest == tipc_own_addr) 2779 return MAX_MSG_SIZE; 2780 2781 read_lock_bh(&tipc_net_lock); 2782 n_ptr = tipc_node_find(dest); 2783 if (n_ptr) { 2784 tipc_node_lock(n_ptr); 2785 l_ptr = n_ptr->active_links[selector & 1]; 2786 if (l_ptr) 2787 res = l_ptr->max_pkt; 2788 tipc_node_unlock(n_ptr); 2789 } 2790 read_unlock_bh(&tipc_net_lock); 2791 return res; 2792 } 2793 2794 static void link_print(struct tipc_link *l_ptr, const char *str) 2795 { 2796 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2797 2798 if (link_working_unknown(l_ptr)) 2799 pr_cont(":WU\n"); 2800 else if (link_reset_reset(l_ptr)) 2801 pr_cont(":RR\n"); 2802 else if (link_reset_unknown(l_ptr)) 2803 pr_cont(":RU\n"); 2804 else if (link_working_working(l_ptr)) 2805 pr_cont(":WW\n"); 2806 else 2807 pr_cont("\n"); 2808 } 2809