1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int link_send_sections_long(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static void link_start(struct tipc_link *l_ptr); 91 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 92 static void tipc_link_send_sync(struct tipc_link *l); 93 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); 94 95 /* 96 * Simple link routines 97 */ 98 static unsigned int align(unsigned int i) 99 { 100 return (i + 3) & ~3u; 101 } 102 103 static void link_init_max_pkt(struct tipc_link *l_ptr) 104 { 105 u32 max_pkt; 106 107 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 if (max_pkt > MAX_MSG_SIZE) 109 max_pkt = MAX_MSG_SIZE; 110 111 l_ptr->max_pkt_target = max_pkt; 112 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 113 l_ptr->max_pkt = l_ptr->max_pkt_target; 114 else 115 l_ptr->max_pkt = MAX_PKT_DEFAULT; 116 117 l_ptr->max_pkt_probes = 0; 118 } 119 120 static u32 link_next_sent(struct tipc_link *l_ptr) 121 { 122 if (l_ptr->next_out) 123 return buf_seqno(l_ptr->next_out); 124 return mod(l_ptr->next_out_no); 125 } 126 127 static u32 link_last_sent(struct tipc_link *l_ptr) 128 { 129 return mod(link_next_sent(l_ptr) - 1); 130 } 131 132 /* 133 * Simple non-static link routines (i.e. referenced outside this file) 134 */ 135 int tipc_link_is_up(struct tipc_link *l_ptr) 136 { 137 if (!l_ptr) 138 return 0; 139 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 140 } 141 142 int tipc_link_is_active(struct tipc_link *l_ptr) 143 { 144 return (l_ptr->owner->active_links[0] == l_ptr) || 145 (l_ptr->owner->active_links[1] == l_ptr); 146 } 147 148 /** 149 * link_timeout - handle expiration of link timer 150 * @l_ptr: pointer to link 151 * 152 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 153 * with tipc_link_delete(). (There is no risk that the node will be deleted by 154 * another thread because tipc_link_delete() always cancels the link timer before 155 * tipc_node_delete() is called.) 156 */ 157 static void link_timeout(struct tipc_link *l_ptr) 158 { 159 tipc_node_lock(l_ptr->owner); 160 161 /* update counters used in statistical profiling of send traffic */ 162 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 163 l_ptr->stats.queue_sz_counts++; 164 165 if (l_ptr->first_out) { 166 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 167 u32 length = msg_size(msg); 168 169 if ((msg_user(msg) == MSG_FRAGMENTER) && 170 (msg_type(msg) == FIRST_FRAGMENT)) { 171 length = msg_size(msg_get_wrapped(msg)); 172 } 173 if (length) { 174 l_ptr->stats.msg_lengths_total += length; 175 l_ptr->stats.msg_length_counts++; 176 if (length <= 64) 177 l_ptr->stats.msg_length_profile[0]++; 178 else if (length <= 256) 179 l_ptr->stats.msg_length_profile[1]++; 180 else if (length <= 1024) 181 l_ptr->stats.msg_length_profile[2]++; 182 else if (length <= 4096) 183 l_ptr->stats.msg_length_profile[3]++; 184 else if (length <= 16384) 185 l_ptr->stats.msg_length_profile[4]++; 186 else if (length <= 32768) 187 l_ptr->stats.msg_length_profile[5]++; 188 else 189 l_ptr->stats.msg_length_profile[6]++; 190 } 191 } 192 193 /* do all other link processing performed on a periodic basis */ 194 195 link_state_event(l_ptr, TIMEOUT_EVT); 196 197 if (l_ptr->next_out) 198 tipc_link_push_queue(l_ptr); 199 200 tipc_node_unlock(l_ptr->owner); 201 } 202 203 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 204 { 205 k_start_timer(&l_ptr->timer, time); 206 } 207 208 /** 209 * tipc_link_create - create a new link 210 * @n_ptr: pointer to associated node 211 * @b_ptr: pointer to associated bearer 212 * @media_addr: media address to use when sending messages over link 213 * 214 * Returns pointer to link. 215 */ 216 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 217 struct tipc_bearer *b_ptr, 218 const struct tipc_media_addr *media_addr) 219 { 220 struct tipc_link *l_ptr; 221 struct tipc_msg *msg; 222 char *if_name; 223 char addr_string[16]; 224 u32 peer = n_ptr->addr; 225 226 if (n_ptr->link_cnt >= 2) { 227 tipc_addr_string_fill(addr_string, n_ptr->addr); 228 pr_err("Attempt to establish third link to %s\n", addr_string); 229 return NULL; 230 } 231 232 if (n_ptr->links[b_ptr->identity]) { 233 tipc_addr_string_fill(addr_string, n_ptr->addr); 234 pr_err("Attempt to establish second link on <%s> to %s\n", 235 b_ptr->name, addr_string); 236 return NULL; 237 } 238 239 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 240 if (!l_ptr) { 241 pr_warn("Link creation failed, no memory\n"); 242 return NULL; 243 } 244 245 l_ptr->addr = peer; 246 if_name = strchr(b_ptr->name, ':') + 1; 247 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 248 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 249 tipc_node(tipc_own_addr), 250 if_name, 251 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 252 /* note: peer i/f name is updated by reset/activate message */ 253 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 254 l_ptr->owner = n_ptr; 255 l_ptr->checkpoint = 1; 256 l_ptr->peer_session = INVALID_SESSION; 257 l_ptr->b_ptr = b_ptr; 258 link_set_supervision_props(l_ptr, b_ptr->tolerance); 259 l_ptr->state = RESET_UNKNOWN; 260 261 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 262 msg = l_ptr->pmsg; 263 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 264 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 265 msg_set_session(msg, (tipc_random & 0xffff)); 266 msg_set_bearer_id(msg, b_ptr->identity); 267 strcpy((char *)msg_data(msg), if_name); 268 269 l_ptr->priority = b_ptr->priority; 270 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 271 272 link_init_max_pkt(l_ptr); 273 274 l_ptr->next_out_no = 1; 275 INIT_LIST_HEAD(&l_ptr->waiting_ports); 276 277 link_reset_statistics(l_ptr); 278 279 tipc_node_attach_link(n_ptr, l_ptr); 280 281 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr); 282 list_add_tail(&l_ptr->link_list, &b_ptr->links); 283 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr); 284 285 return l_ptr; 286 } 287 288 /** 289 * tipc_link_delete - delete a link 290 * @l_ptr: pointer to link 291 * 292 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 293 * This routine must not grab the node lock until after link timer cancellation 294 * to avoid a potential deadlock situation. 295 */ 296 void tipc_link_delete(struct tipc_link *l_ptr) 297 { 298 if (!l_ptr) { 299 pr_err("Attempt to delete non-existent link\n"); 300 return; 301 } 302 303 k_cancel_timer(&l_ptr->timer); 304 305 tipc_node_lock(l_ptr->owner); 306 tipc_link_reset(l_ptr); 307 tipc_node_detach_link(l_ptr->owner, l_ptr); 308 tipc_link_stop(l_ptr); 309 list_del_init(&l_ptr->link_list); 310 tipc_node_unlock(l_ptr->owner); 311 k_term_timer(&l_ptr->timer); 312 kfree(l_ptr); 313 } 314 315 static void link_start(struct tipc_link *l_ptr) 316 { 317 tipc_node_lock(l_ptr->owner); 318 link_state_event(l_ptr, STARTING_EVT); 319 tipc_node_unlock(l_ptr->owner); 320 } 321 322 /** 323 * link_schedule_port - schedule port for deferred sending 324 * @l_ptr: pointer to link 325 * @origport: reference to sending port 326 * @sz: amount of data to be sent 327 * 328 * Schedules port for renewed sending of messages after link congestion 329 * has abated. 330 */ 331 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 332 { 333 struct tipc_port *p_ptr; 334 335 spin_lock_bh(&tipc_port_list_lock); 336 p_ptr = tipc_port_lock(origport); 337 if (p_ptr) { 338 if (!p_ptr->wakeup) 339 goto exit; 340 if (!list_empty(&p_ptr->wait_list)) 341 goto exit; 342 p_ptr->congested = 1; 343 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 344 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 345 l_ptr->stats.link_congs++; 346 exit: 347 tipc_port_unlock(p_ptr); 348 } 349 spin_unlock_bh(&tipc_port_list_lock); 350 return -ELINKCONG; 351 } 352 353 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 354 { 355 struct tipc_port *p_ptr; 356 struct tipc_port *temp_p_ptr; 357 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 358 359 if (all) 360 win = 100000; 361 if (win <= 0) 362 return; 363 if (!spin_trylock_bh(&tipc_port_list_lock)) 364 return; 365 if (link_congested(l_ptr)) 366 goto exit; 367 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 368 wait_list) { 369 if (win <= 0) 370 break; 371 list_del_init(&p_ptr->wait_list); 372 spin_lock_bh(p_ptr->lock); 373 p_ptr->congested = 0; 374 p_ptr->wakeup(p_ptr); 375 win -= p_ptr->waiting_pkts; 376 spin_unlock_bh(p_ptr->lock); 377 } 378 379 exit: 380 spin_unlock_bh(&tipc_port_list_lock); 381 } 382 383 /** 384 * link_release_outqueue - purge link's outbound message queue 385 * @l_ptr: pointer to link 386 */ 387 static void link_release_outqueue(struct tipc_link *l_ptr) 388 { 389 struct sk_buff *buf = l_ptr->first_out; 390 struct sk_buff *next; 391 392 while (buf) { 393 next = buf->next; 394 kfree_skb(buf); 395 buf = next; 396 } 397 l_ptr->first_out = NULL; 398 l_ptr->out_queue_size = 0; 399 } 400 401 /** 402 * tipc_link_reset_fragments - purge link's inbound message fragments queue 403 * @l_ptr: pointer to link 404 */ 405 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 406 { 407 kfree_skb(l_ptr->reasm_head); 408 l_ptr->reasm_head = NULL; 409 l_ptr->reasm_tail = NULL; 410 } 411 412 /** 413 * tipc_link_stop - purge all inbound and outbound messages associated with link 414 * @l_ptr: pointer to link 415 */ 416 void tipc_link_stop(struct tipc_link *l_ptr) 417 { 418 struct sk_buff *buf; 419 struct sk_buff *next; 420 421 buf = l_ptr->oldest_deferred_in; 422 while (buf) { 423 next = buf->next; 424 kfree_skb(buf); 425 buf = next; 426 } 427 428 buf = l_ptr->first_out; 429 while (buf) { 430 next = buf->next; 431 kfree_skb(buf); 432 buf = next; 433 } 434 435 tipc_link_reset_fragments(l_ptr); 436 437 kfree_skb(l_ptr->proto_msg_queue); 438 l_ptr->proto_msg_queue = NULL; 439 } 440 441 void tipc_link_reset(struct tipc_link *l_ptr) 442 { 443 struct sk_buff *buf; 444 u32 prev_state = l_ptr->state; 445 u32 checkpoint = l_ptr->next_in_no; 446 int was_active_link = tipc_link_is_active(l_ptr); 447 448 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 449 450 /* Link is down, accept any session */ 451 l_ptr->peer_session = INVALID_SESSION; 452 453 /* Prepare for max packet size negotiation */ 454 link_init_max_pkt(l_ptr); 455 456 l_ptr->state = RESET_UNKNOWN; 457 458 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 459 return; 460 461 tipc_node_link_down(l_ptr->owner, l_ptr); 462 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 463 464 if (was_active_link && tipc_node_active_links(l_ptr->owner) && 465 l_ptr->owner->permit_changeover) { 466 l_ptr->reset_checkpoint = checkpoint; 467 l_ptr->exp_msg_count = START_CHANGEOVER; 468 } 469 470 /* Clean up all queues: */ 471 link_release_outqueue(l_ptr); 472 kfree_skb(l_ptr->proto_msg_queue); 473 l_ptr->proto_msg_queue = NULL; 474 buf = l_ptr->oldest_deferred_in; 475 while (buf) { 476 struct sk_buff *next = buf->next; 477 kfree_skb(buf); 478 buf = next; 479 } 480 if (!list_empty(&l_ptr->waiting_ports)) 481 tipc_link_wakeup_ports(l_ptr, 1); 482 483 l_ptr->retransm_queue_head = 0; 484 l_ptr->retransm_queue_size = 0; 485 l_ptr->last_out = NULL; 486 l_ptr->first_out = NULL; 487 l_ptr->next_out = NULL; 488 l_ptr->unacked_window = 0; 489 l_ptr->checkpoint = 1; 490 l_ptr->next_out_no = 1; 491 l_ptr->deferred_inqueue_sz = 0; 492 l_ptr->oldest_deferred_in = NULL; 493 l_ptr->newest_deferred_in = NULL; 494 l_ptr->fsm_msg_cnt = 0; 495 l_ptr->stale_count = 0; 496 link_reset_statistics(l_ptr); 497 } 498 499 500 static void link_activate(struct tipc_link *l_ptr) 501 { 502 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 503 tipc_node_link_up(l_ptr->owner, l_ptr); 504 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 505 } 506 507 /** 508 * link_state_event - link finite state machine 509 * @l_ptr: pointer to link 510 * @event: state machine event to process 511 */ 512 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 513 { 514 struct tipc_link *other; 515 u32 cont_intv = l_ptr->continuity_interval; 516 517 if (!l_ptr->started && (event != STARTING_EVT)) 518 return; /* Not yet. */ 519 520 if (link_blocked(l_ptr)) { 521 if (event == TIMEOUT_EVT) 522 link_set_timer(l_ptr, cont_intv); 523 return; /* Changeover going on */ 524 } 525 526 switch (l_ptr->state) { 527 case WORKING_WORKING: 528 switch (event) { 529 case TRAFFIC_MSG_EVT: 530 case ACTIVATE_MSG: 531 break; 532 case TIMEOUT_EVT: 533 if (l_ptr->next_in_no != l_ptr->checkpoint) { 534 l_ptr->checkpoint = l_ptr->next_in_no; 535 if (tipc_bclink_acks_missing(l_ptr->owner)) { 536 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 537 0, 0, 0, 0, 0); 538 l_ptr->fsm_msg_cnt++; 539 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 540 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 541 1, 0, 0, 0, 0); 542 l_ptr->fsm_msg_cnt++; 543 } 544 link_set_timer(l_ptr, cont_intv); 545 break; 546 } 547 l_ptr->state = WORKING_UNKNOWN; 548 l_ptr->fsm_msg_cnt = 0; 549 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 550 l_ptr->fsm_msg_cnt++; 551 link_set_timer(l_ptr, cont_intv / 4); 552 break; 553 case RESET_MSG: 554 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 555 l_ptr->name); 556 tipc_link_reset(l_ptr); 557 l_ptr->state = RESET_RESET; 558 l_ptr->fsm_msg_cnt = 0; 559 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 560 l_ptr->fsm_msg_cnt++; 561 link_set_timer(l_ptr, cont_intv); 562 break; 563 default: 564 pr_err("%s%u in WW state\n", link_unk_evt, event); 565 } 566 break; 567 case WORKING_UNKNOWN: 568 switch (event) { 569 case TRAFFIC_MSG_EVT: 570 case ACTIVATE_MSG: 571 l_ptr->state = WORKING_WORKING; 572 l_ptr->fsm_msg_cnt = 0; 573 link_set_timer(l_ptr, cont_intv); 574 break; 575 case RESET_MSG: 576 pr_info("%s<%s>, requested by peer while probing\n", 577 link_rst_msg, l_ptr->name); 578 tipc_link_reset(l_ptr); 579 l_ptr->state = RESET_RESET; 580 l_ptr->fsm_msg_cnt = 0; 581 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 582 l_ptr->fsm_msg_cnt++; 583 link_set_timer(l_ptr, cont_intv); 584 break; 585 case TIMEOUT_EVT: 586 if (l_ptr->next_in_no != l_ptr->checkpoint) { 587 l_ptr->state = WORKING_WORKING; 588 l_ptr->fsm_msg_cnt = 0; 589 l_ptr->checkpoint = l_ptr->next_in_no; 590 if (tipc_bclink_acks_missing(l_ptr->owner)) { 591 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 592 0, 0, 0, 0, 0); 593 l_ptr->fsm_msg_cnt++; 594 } 595 link_set_timer(l_ptr, cont_intv); 596 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 597 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 598 1, 0, 0, 0, 0); 599 l_ptr->fsm_msg_cnt++; 600 link_set_timer(l_ptr, cont_intv / 4); 601 } else { /* Link has failed */ 602 pr_warn("%s<%s>, peer not responding\n", 603 link_rst_msg, l_ptr->name); 604 tipc_link_reset(l_ptr); 605 l_ptr->state = RESET_UNKNOWN; 606 l_ptr->fsm_msg_cnt = 0; 607 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 608 0, 0, 0, 0, 0); 609 l_ptr->fsm_msg_cnt++; 610 link_set_timer(l_ptr, cont_intv); 611 } 612 break; 613 default: 614 pr_err("%s%u in WU state\n", link_unk_evt, event); 615 } 616 break; 617 case RESET_UNKNOWN: 618 switch (event) { 619 case TRAFFIC_MSG_EVT: 620 break; 621 case ACTIVATE_MSG: 622 other = l_ptr->owner->active_links[0]; 623 if (other && link_working_unknown(other)) 624 break; 625 l_ptr->state = WORKING_WORKING; 626 l_ptr->fsm_msg_cnt = 0; 627 link_activate(l_ptr); 628 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 629 l_ptr->fsm_msg_cnt++; 630 if (l_ptr->owner->working_links == 1) 631 tipc_link_send_sync(l_ptr); 632 link_set_timer(l_ptr, cont_intv); 633 break; 634 case RESET_MSG: 635 l_ptr->state = RESET_RESET; 636 l_ptr->fsm_msg_cnt = 0; 637 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 638 l_ptr->fsm_msg_cnt++; 639 link_set_timer(l_ptr, cont_intv); 640 break; 641 case STARTING_EVT: 642 l_ptr->started = 1; 643 /* fall through */ 644 case TIMEOUT_EVT: 645 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 646 l_ptr->fsm_msg_cnt++; 647 link_set_timer(l_ptr, cont_intv); 648 break; 649 default: 650 pr_err("%s%u in RU state\n", link_unk_evt, event); 651 } 652 break; 653 case RESET_RESET: 654 switch (event) { 655 case TRAFFIC_MSG_EVT: 656 case ACTIVATE_MSG: 657 other = l_ptr->owner->active_links[0]; 658 if (other && link_working_unknown(other)) 659 break; 660 l_ptr->state = WORKING_WORKING; 661 l_ptr->fsm_msg_cnt = 0; 662 link_activate(l_ptr); 663 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 664 l_ptr->fsm_msg_cnt++; 665 if (l_ptr->owner->working_links == 1) 666 tipc_link_send_sync(l_ptr); 667 link_set_timer(l_ptr, cont_intv); 668 break; 669 case RESET_MSG: 670 break; 671 case TIMEOUT_EVT: 672 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 673 l_ptr->fsm_msg_cnt++; 674 link_set_timer(l_ptr, cont_intv); 675 break; 676 default: 677 pr_err("%s%u in RR state\n", link_unk_evt, event); 678 } 679 break; 680 default: 681 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 682 } 683 } 684 685 /* 686 * link_bundle_buf(): Append contents of a buffer to 687 * the tail of an existing one. 688 */ 689 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 690 struct sk_buff *buf) 691 { 692 struct tipc_msg *bundler_msg = buf_msg(bundler); 693 struct tipc_msg *msg = buf_msg(buf); 694 u32 size = msg_size(msg); 695 u32 bundle_size = msg_size(bundler_msg); 696 u32 to_pos = align(bundle_size); 697 u32 pad = to_pos - bundle_size; 698 699 if (msg_user(bundler_msg) != MSG_BUNDLER) 700 return 0; 701 if (msg_type(bundler_msg) != OPEN_MSG) 702 return 0; 703 if (skb_tailroom(bundler) < (pad + size)) 704 return 0; 705 if (l_ptr->max_pkt < (to_pos + size)) 706 return 0; 707 708 skb_put(bundler, pad + size); 709 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 710 msg_set_size(bundler_msg, to_pos + size); 711 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 712 kfree_skb(buf); 713 l_ptr->stats.sent_bundled++; 714 return 1; 715 } 716 717 static void link_add_to_outqueue(struct tipc_link *l_ptr, 718 struct sk_buff *buf, 719 struct tipc_msg *msg) 720 { 721 u32 ack = mod(l_ptr->next_in_no - 1); 722 u32 seqno = mod(l_ptr->next_out_no++); 723 724 msg_set_word(msg, 2, ((ack << 16) | seqno)); 725 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 726 buf->next = NULL; 727 if (l_ptr->first_out) { 728 l_ptr->last_out->next = buf; 729 l_ptr->last_out = buf; 730 } else 731 l_ptr->first_out = l_ptr->last_out = buf; 732 733 l_ptr->out_queue_size++; 734 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 735 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 736 } 737 738 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 739 struct sk_buff *buf_chain, 740 u32 long_msgno) 741 { 742 struct sk_buff *buf; 743 struct tipc_msg *msg; 744 745 if (!l_ptr->next_out) 746 l_ptr->next_out = buf_chain; 747 while (buf_chain) { 748 buf = buf_chain; 749 buf_chain = buf_chain->next; 750 751 msg = buf_msg(buf); 752 msg_set_long_msgno(msg, long_msgno); 753 link_add_to_outqueue(l_ptr, buf, msg); 754 } 755 } 756 757 /* 758 * tipc_link_send_buf() is the 'full path' for messages, called from 759 * inside TIPC when the 'fast path' in tipc_send_buf 760 * has failed, and from link_send() 761 */ 762 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 763 { 764 struct tipc_msg *msg = buf_msg(buf); 765 u32 size = msg_size(msg); 766 u32 dsz = msg_data_sz(msg); 767 u32 queue_size = l_ptr->out_queue_size; 768 u32 imp = tipc_msg_tot_importance(msg); 769 u32 queue_limit = l_ptr->queue_limit[imp]; 770 u32 max_packet = l_ptr->max_pkt; 771 772 /* Match msg importance against queue limits: */ 773 if (unlikely(queue_size >= queue_limit)) { 774 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 775 link_schedule_port(l_ptr, msg_origport(msg), size); 776 kfree_skb(buf); 777 return -ELINKCONG; 778 } 779 kfree_skb(buf); 780 if (imp > CONN_MANAGER) { 781 pr_warn("%s<%s>, send queue full", link_rst_msg, 782 l_ptr->name); 783 tipc_link_reset(l_ptr); 784 } 785 return dsz; 786 } 787 788 /* Fragmentation needed ? */ 789 if (size > max_packet) 790 return link_send_long_buf(l_ptr, buf); 791 792 /* Packet can be queued or sent. */ 793 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) && 794 !link_congested(l_ptr))) { 795 link_add_to_outqueue(l_ptr, buf, msg); 796 797 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 798 l_ptr->unacked_window = 0; 799 return dsz; 800 } 801 /* Congestion: can message be bundled ? */ 802 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 803 (msg_user(msg) != MSG_FRAGMENTER)) { 804 805 /* Try adding message to an existing bundle */ 806 if (l_ptr->next_out && 807 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 808 return dsz; 809 810 /* Try creating a new bundle */ 811 if (size <= max_packet * 2 / 3) { 812 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 813 struct tipc_msg bundler_hdr; 814 815 if (bundler) { 816 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 817 INT_H_SIZE, l_ptr->addr); 818 skb_copy_to_linear_data(bundler, &bundler_hdr, 819 INT_H_SIZE); 820 skb_trim(bundler, INT_H_SIZE); 821 link_bundle_buf(l_ptr, bundler, buf); 822 buf = bundler; 823 msg = buf_msg(buf); 824 l_ptr->stats.sent_bundles++; 825 } 826 } 827 } 828 if (!l_ptr->next_out) 829 l_ptr->next_out = buf; 830 link_add_to_outqueue(l_ptr, buf, msg); 831 return dsz; 832 } 833 834 /* 835 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 836 * not been selected yet, and the the owner node is not locked 837 * Called by TIPC internal users, e.g. the name distributor 838 */ 839 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 840 { 841 struct tipc_link *l_ptr; 842 struct tipc_node *n_ptr; 843 int res = -ELINKCONG; 844 845 read_lock_bh(&tipc_net_lock); 846 n_ptr = tipc_node_find(dest); 847 if (n_ptr) { 848 tipc_node_lock(n_ptr); 849 l_ptr = n_ptr->active_links[selector & 1]; 850 if (l_ptr) 851 res = tipc_link_send_buf(l_ptr, buf); 852 else 853 kfree_skb(buf); 854 tipc_node_unlock(n_ptr); 855 } else { 856 kfree_skb(buf); 857 } 858 read_unlock_bh(&tipc_net_lock); 859 return res; 860 } 861 862 /* 863 * tipc_link_send_sync - synchronize broadcast link endpoints. 864 * 865 * Give a newly added peer node the sequence number where it should 866 * start receiving and acking broadcast packets. 867 * 868 * Called with node locked 869 */ 870 static void tipc_link_send_sync(struct tipc_link *l) 871 { 872 struct sk_buff *buf; 873 struct tipc_msg *msg; 874 875 buf = tipc_buf_acquire(INT_H_SIZE); 876 if (!buf) 877 return; 878 879 msg = buf_msg(buf); 880 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 881 msg_set_last_bcast(msg, l->owner->bclink.acked); 882 link_add_chain_to_outqueue(l, buf, 0); 883 tipc_link_push_queue(l); 884 } 885 886 /* 887 * tipc_link_recv_sync - synchronize broadcast link endpoints. 888 * Receive the sequence number where we should start receiving and 889 * acking broadcast packets from a newly added peer node, and open 890 * up for reception of such packets. 891 * 892 * Called with node locked 893 */ 894 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) 895 { 896 struct tipc_msg *msg = buf_msg(buf); 897 898 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 899 n->bclink.recv_permitted = true; 900 kfree_skb(buf); 901 } 902 903 /* 904 * tipc_link_send_names - send name table entries to new neighbor 905 * 906 * Send routine for bulk delivery of name table messages when contact 907 * with a new neighbor occurs. No link congestion checking is performed 908 * because name table messages *must* be delivered. The messages must be 909 * small enough not to require fragmentation. 910 * Called without any locks held. 911 */ 912 void tipc_link_send_names(struct list_head *message_list, u32 dest) 913 { 914 struct tipc_node *n_ptr; 915 struct tipc_link *l_ptr; 916 struct sk_buff *buf; 917 struct sk_buff *temp_buf; 918 919 if (list_empty(message_list)) 920 return; 921 922 read_lock_bh(&tipc_net_lock); 923 n_ptr = tipc_node_find(dest); 924 if (n_ptr) { 925 tipc_node_lock(n_ptr); 926 l_ptr = n_ptr->active_links[0]; 927 if (l_ptr) { 928 /* convert circular list to linear list */ 929 ((struct sk_buff *)message_list->prev)->next = NULL; 930 link_add_chain_to_outqueue(l_ptr, 931 (struct sk_buff *)message_list->next, 0); 932 tipc_link_push_queue(l_ptr); 933 INIT_LIST_HEAD(message_list); 934 } 935 tipc_node_unlock(n_ptr); 936 } 937 read_unlock_bh(&tipc_net_lock); 938 939 /* discard the messages if they couldn't be sent */ 940 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 941 list_del((struct list_head *)buf); 942 kfree_skb(buf); 943 } 944 } 945 946 /* 947 * link_send_buf_fast: Entry for data messages where the 948 * destination link is known and the header is complete, 949 * inclusive total message length. Very time critical. 950 * Link is locked. Returns user data length. 951 */ 952 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 953 u32 *used_max_pkt) 954 { 955 struct tipc_msg *msg = buf_msg(buf); 956 int res = msg_data_sz(msg); 957 958 if (likely(!link_congested(l_ptr))) { 959 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 960 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) { 961 link_add_to_outqueue(l_ptr, buf, msg); 962 tipc_bearer_send(l_ptr->b_ptr, buf, 963 &l_ptr->media_addr); 964 l_ptr->unacked_window = 0; 965 return res; 966 } 967 } else 968 *used_max_pkt = l_ptr->max_pkt; 969 } 970 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 971 } 972 973 /* 974 * tipc_link_send_sections_fast: Entry for messages where the 975 * destination processor is known and the header is complete, 976 * except for total message length. 977 * Returns user data length or errno. 978 */ 979 int tipc_link_send_sections_fast(struct tipc_port *sender, 980 struct iovec const *msg_sect, 981 unsigned int len, u32 destaddr) 982 { 983 struct tipc_msg *hdr = &sender->phdr; 984 struct tipc_link *l_ptr; 985 struct sk_buff *buf; 986 struct tipc_node *node; 987 int res; 988 u32 selector = msg_origport(hdr) & 1; 989 990 again: 991 /* 992 * Try building message using port's max_pkt hint. 993 * (Must not hold any locks while building message.) 994 */ 995 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 996 /* Exit if build request was invalid */ 997 if (unlikely(res < 0)) 998 return res; 999 1000 read_lock_bh(&tipc_net_lock); 1001 node = tipc_node_find(destaddr); 1002 if (likely(node)) { 1003 tipc_node_lock(node); 1004 l_ptr = node->active_links[selector]; 1005 if (likely(l_ptr)) { 1006 if (likely(buf)) { 1007 res = link_send_buf_fast(l_ptr, buf, 1008 &sender->max_pkt); 1009 exit: 1010 tipc_node_unlock(node); 1011 read_unlock_bh(&tipc_net_lock); 1012 return res; 1013 } 1014 1015 /* Exit if link (or bearer) is congested */ 1016 if (link_congested(l_ptr) || 1017 tipc_bearer_blocked(l_ptr->b_ptr)) { 1018 res = link_schedule_port(l_ptr, 1019 sender->ref, res); 1020 goto exit; 1021 } 1022 1023 /* 1024 * Message size exceeds max_pkt hint; update hint, 1025 * then re-try fast path or fragment the message 1026 */ 1027 sender->max_pkt = l_ptr->max_pkt; 1028 tipc_node_unlock(node); 1029 read_unlock_bh(&tipc_net_lock); 1030 1031 1032 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1033 goto again; 1034 1035 return link_send_sections_long(sender, msg_sect, len, 1036 destaddr); 1037 } 1038 tipc_node_unlock(node); 1039 } 1040 read_unlock_bh(&tipc_net_lock); 1041 1042 /* Couldn't find a link to the destination node */ 1043 if (buf) 1044 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1045 if (res >= 0) 1046 return tipc_port_reject_sections(sender, hdr, msg_sect, 1047 len, TIPC_ERR_NO_NODE); 1048 return res; 1049 } 1050 1051 /* 1052 * link_send_sections_long(): Entry for long messages where the 1053 * destination node is known and the header is complete, 1054 * inclusive total message length. 1055 * Link and bearer congestion status have been checked to be ok, 1056 * and are ignored if they change. 1057 * 1058 * Note that fragments do not use the full link MTU so that they won't have 1059 * to undergo refragmentation if link changeover causes them to be sent 1060 * over another link with an additional tunnel header added as prefix. 1061 * (Refragmentation will still occur if the other link has a smaller MTU.) 1062 * 1063 * Returns user data length or errno. 1064 */ 1065 static int link_send_sections_long(struct tipc_port *sender, 1066 struct iovec const *msg_sect, 1067 unsigned int len, u32 destaddr) 1068 { 1069 struct tipc_link *l_ptr; 1070 struct tipc_node *node; 1071 struct tipc_msg *hdr = &sender->phdr; 1072 u32 dsz = len; 1073 u32 max_pkt, fragm_sz, rest; 1074 struct tipc_msg fragm_hdr; 1075 struct sk_buff *buf, *buf_chain, *prev; 1076 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1077 const unchar __user *sect_crs; 1078 int curr_sect; 1079 u32 fragm_no; 1080 int res = 0; 1081 1082 again: 1083 fragm_no = 1; 1084 max_pkt = sender->max_pkt - INT_H_SIZE; 1085 /* leave room for tunnel header in case of link changeover */ 1086 fragm_sz = max_pkt - INT_H_SIZE; 1087 /* leave room for fragmentation header in each fragment */ 1088 rest = dsz; 1089 fragm_crs = 0; 1090 fragm_rest = 0; 1091 sect_rest = 0; 1092 sect_crs = NULL; 1093 curr_sect = -1; 1094 1095 /* Prepare reusable fragment header */ 1096 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1097 INT_H_SIZE, msg_destnode(hdr)); 1098 msg_set_size(&fragm_hdr, max_pkt); 1099 msg_set_fragm_no(&fragm_hdr, 1); 1100 1101 /* Prepare header of first fragment */ 1102 buf_chain = buf = tipc_buf_acquire(max_pkt); 1103 if (!buf) 1104 return -ENOMEM; 1105 buf->next = NULL; 1106 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1107 hsz = msg_hdr_sz(hdr); 1108 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1109 1110 /* Chop up message */ 1111 fragm_crs = INT_H_SIZE + hsz; 1112 fragm_rest = fragm_sz - hsz; 1113 1114 do { /* For all sections */ 1115 u32 sz; 1116 1117 if (!sect_rest) { 1118 sect_rest = msg_sect[++curr_sect].iov_len; 1119 sect_crs = msg_sect[curr_sect].iov_base; 1120 } 1121 1122 if (sect_rest < fragm_rest) 1123 sz = sect_rest; 1124 else 1125 sz = fragm_rest; 1126 1127 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1128 res = -EFAULT; 1129 error: 1130 for (; buf_chain; buf_chain = buf) { 1131 buf = buf_chain->next; 1132 kfree_skb(buf_chain); 1133 } 1134 return res; 1135 } 1136 sect_crs += sz; 1137 sect_rest -= sz; 1138 fragm_crs += sz; 1139 fragm_rest -= sz; 1140 rest -= sz; 1141 1142 if (!fragm_rest && rest) { 1143 1144 /* Initiate new fragment: */ 1145 if (rest <= fragm_sz) { 1146 fragm_sz = rest; 1147 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1148 } else { 1149 msg_set_type(&fragm_hdr, FRAGMENT); 1150 } 1151 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1152 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1153 prev = buf; 1154 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1155 if (!buf) { 1156 res = -ENOMEM; 1157 goto error; 1158 } 1159 1160 buf->next = NULL; 1161 prev->next = buf; 1162 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1163 fragm_crs = INT_H_SIZE; 1164 fragm_rest = fragm_sz; 1165 } 1166 } while (rest > 0); 1167 1168 /* 1169 * Now we have a buffer chain. Select a link and check 1170 * that packet size is still OK 1171 */ 1172 node = tipc_node_find(destaddr); 1173 if (likely(node)) { 1174 tipc_node_lock(node); 1175 l_ptr = node->active_links[sender->ref & 1]; 1176 if (!l_ptr) { 1177 tipc_node_unlock(node); 1178 goto reject; 1179 } 1180 if (l_ptr->max_pkt < max_pkt) { 1181 sender->max_pkt = l_ptr->max_pkt; 1182 tipc_node_unlock(node); 1183 for (; buf_chain; buf_chain = buf) { 1184 buf = buf_chain->next; 1185 kfree_skb(buf_chain); 1186 } 1187 goto again; 1188 } 1189 } else { 1190 reject: 1191 for (; buf_chain; buf_chain = buf) { 1192 buf = buf_chain->next; 1193 kfree_skb(buf_chain); 1194 } 1195 return tipc_port_reject_sections(sender, hdr, msg_sect, 1196 len, TIPC_ERR_NO_NODE); 1197 } 1198 1199 /* Append chain of fragments to send queue & send them */ 1200 l_ptr->long_msg_seq_no++; 1201 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1202 l_ptr->stats.sent_fragments += fragm_no; 1203 l_ptr->stats.sent_fragmented++; 1204 tipc_link_push_queue(l_ptr); 1205 tipc_node_unlock(node); 1206 return dsz; 1207 } 1208 1209 /* 1210 * tipc_link_push_packet: Push one unsent packet to the media 1211 */ 1212 u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1213 { 1214 struct sk_buff *buf = l_ptr->first_out; 1215 u32 r_q_size = l_ptr->retransm_queue_size; 1216 u32 r_q_head = l_ptr->retransm_queue_head; 1217 1218 /* Step to position where retransmission failed, if any, */ 1219 /* consider that buffers may have been released in meantime */ 1220 if (r_q_size && buf) { 1221 u32 last = lesser(mod(r_q_head + r_q_size), 1222 link_last_sent(l_ptr)); 1223 u32 first = buf_seqno(buf); 1224 1225 while (buf && less(first, r_q_head)) { 1226 first = mod(first + 1); 1227 buf = buf->next; 1228 } 1229 l_ptr->retransm_queue_head = r_q_head = first; 1230 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1231 } 1232 1233 /* Continue retransmission now, if there is anything: */ 1234 if (r_q_size && buf) { 1235 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1236 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1237 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1238 l_ptr->retransm_queue_head = mod(++r_q_head); 1239 l_ptr->retransm_queue_size = --r_q_size; 1240 l_ptr->stats.retransmitted++; 1241 return 0; 1242 } 1243 1244 /* Send deferred protocol message, if any: */ 1245 buf = l_ptr->proto_msg_queue; 1246 if (buf) { 1247 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1248 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1249 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1250 l_ptr->unacked_window = 0; 1251 kfree_skb(buf); 1252 l_ptr->proto_msg_queue = NULL; 1253 return 0; 1254 } 1255 1256 /* Send one deferred data message, if send window not full: */ 1257 buf = l_ptr->next_out; 1258 if (buf) { 1259 struct tipc_msg *msg = buf_msg(buf); 1260 u32 next = msg_seqno(msg); 1261 u32 first = buf_seqno(l_ptr->first_out); 1262 1263 if (mod(next - first) < l_ptr->queue_limit[0]) { 1264 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1265 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1266 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1267 if (msg_user(msg) == MSG_BUNDLER) 1268 msg_set_type(msg, CLOSED_MSG); 1269 l_ptr->next_out = buf->next; 1270 return 0; 1271 } 1272 } 1273 return 1; 1274 } 1275 1276 /* 1277 * push_queue(): push out the unsent messages of a link where 1278 * congestion has abated. Node is locked 1279 */ 1280 void tipc_link_push_queue(struct tipc_link *l_ptr) 1281 { 1282 u32 res; 1283 1284 if (tipc_bearer_blocked(l_ptr->b_ptr)) 1285 return; 1286 1287 do { 1288 res = tipc_link_push_packet(l_ptr); 1289 } while (!res); 1290 } 1291 1292 static void link_reset_all(unsigned long addr) 1293 { 1294 struct tipc_node *n_ptr; 1295 char addr_string[16]; 1296 u32 i; 1297 1298 read_lock_bh(&tipc_net_lock); 1299 n_ptr = tipc_node_find((u32)addr); 1300 if (!n_ptr) { 1301 read_unlock_bh(&tipc_net_lock); 1302 return; /* node no longer exists */ 1303 } 1304 1305 tipc_node_lock(n_ptr); 1306 1307 pr_warn("Resetting all links to %s\n", 1308 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1309 1310 for (i = 0; i < MAX_BEARERS; i++) { 1311 if (n_ptr->links[i]) { 1312 link_print(n_ptr->links[i], "Resetting link\n"); 1313 tipc_link_reset(n_ptr->links[i]); 1314 } 1315 } 1316 1317 tipc_node_unlock(n_ptr); 1318 read_unlock_bh(&tipc_net_lock); 1319 } 1320 1321 static void link_retransmit_failure(struct tipc_link *l_ptr, 1322 struct sk_buff *buf) 1323 { 1324 struct tipc_msg *msg = buf_msg(buf); 1325 1326 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1327 1328 if (l_ptr->addr) { 1329 /* Handle failure on standard link */ 1330 link_print(l_ptr, "Resetting link\n"); 1331 tipc_link_reset(l_ptr); 1332 1333 } else { 1334 /* Handle failure on broadcast link */ 1335 struct tipc_node *n_ptr; 1336 char addr_string[16]; 1337 1338 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1339 pr_cont("Outstanding acks: %lu\n", 1340 (unsigned long) TIPC_SKB_CB(buf)->handle); 1341 1342 n_ptr = tipc_bclink_retransmit_to(); 1343 tipc_node_lock(n_ptr); 1344 1345 tipc_addr_string_fill(addr_string, n_ptr->addr); 1346 pr_info("Broadcast link info for %s\n", addr_string); 1347 pr_info("Reception permitted: %d, Acked: %u\n", 1348 n_ptr->bclink.recv_permitted, 1349 n_ptr->bclink.acked); 1350 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1351 n_ptr->bclink.last_in, 1352 n_ptr->bclink.oos_state, 1353 n_ptr->bclink.last_sent); 1354 1355 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1356 1357 tipc_node_unlock(n_ptr); 1358 1359 l_ptr->stale_count = 0; 1360 } 1361 } 1362 1363 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1364 u32 retransmits) 1365 { 1366 struct tipc_msg *msg; 1367 1368 if (!buf) 1369 return; 1370 1371 msg = buf_msg(buf); 1372 1373 if (tipc_bearer_blocked(l_ptr->b_ptr)) { 1374 if (l_ptr->retransm_queue_size == 0) { 1375 l_ptr->retransm_queue_head = msg_seqno(msg); 1376 l_ptr->retransm_queue_size = retransmits; 1377 } else { 1378 pr_err("Unexpected retransmit on link %s (qsize=%d)\n", 1379 l_ptr->name, l_ptr->retransm_queue_size); 1380 } 1381 return; 1382 } else { 1383 /* Detect repeated retransmit failures on unblocked bearer */ 1384 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1385 if (++l_ptr->stale_count > 100) { 1386 link_retransmit_failure(l_ptr, buf); 1387 return; 1388 } 1389 } else { 1390 l_ptr->last_retransmitted = msg_seqno(msg); 1391 l_ptr->stale_count = 1; 1392 } 1393 } 1394 1395 while (retransmits && (buf != l_ptr->next_out) && buf) { 1396 msg = buf_msg(buf); 1397 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1398 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1399 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1400 buf = buf->next; 1401 retransmits--; 1402 l_ptr->stats.retransmitted++; 1403 } 1404 1405 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1406 } 1407 1408 /** 1409 * link_insert_deferred_queue - insert deferred messages back into receive chain 1410 */ 1411 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1412 struct sk_buff *buf) 1413 { 1414 u32 seq_no; 1415 1416 if (l_ptr->oldest_deferred_in == NULL) 1417 return buf; 1418 1419 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1420 if (seq_no == mod(l_ptr->next_in_no)) { 1421 l_ptr->newest_deferred_in->next = buf; 1422 buf = l_ptr->oldest_deferred_in; 1423 l_ptr->oldest_deferred_in = NULL; 1424 l_ptr->deferred_inqueue_sz = 0; 1425 } 1426 return buf; 1427 } 1428 1429 /** 1430 * link_recv_buf_validate - validate basic format of received message 1431 * 1432 * This routine ensures a TIPC message has an acceptable header, and at least 1433 * as much data as the header indicates it should. The routine also ensures 1434 * that the entire message header is stored in the main fragment of the message 1435 * buffer, to simplify future access to message header fields. 1436 * 1437 * Note: Having extra info present in the message header or data areas is OK. 1438 * TIPC will ignore the excess, under the assumption that it is optional info 1439 * introduced by a later release of the protocol. 1440 */ 1441 static int link_recv_buf_validate(struct sk_buff *buf) 1442 { 1443 static u32 min_data_hdr_size[8] = { 1444 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1445 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1446 }; 1447 1448 struct tipc_msg *msg; 1449 u32 tipc_hdr[2]; 1450 u32 size; 1451 u32 hdr_size; 1452 u32 min_hdr_size; 1453 1454 if (unlikely(buf->len < MIN_H_SIZE)) 1455 return 0; 1456 1457 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1458 if (msg == NULL) 1459 return 0; 1460 1461 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1462 return 0; 1463 1464 size = msg_size(msg); 1465 hdr_size = msg_hdr_sz(msg); 1466 min_hdr_size = msg_isdata(msg) ? 1467 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1468 1469 if (unlikely((hdr_size < min_hdr_size) || 1470 (size < hdr_size) || 1471 (buf->len < size) || 1472 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1473 return 0; 1474 1475 return pskb_may_pull(buf, hdr_size); 1476 } 1477 1478 /** 1479 * tipc_recv_msg - process TIPC messages arriving from off-node 1480 * @head: pointer to message buffer chain 1481 * @tb_ptr: pointer to bearer message arrived on 1482 * 1483 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1484 * structure (i.e. cannot be NULL), but bearer can be inactive. 1485 */ 1486 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1487 { 1488 read_lock_bh(&tipc_net_lock); 1489 while (head) { 1490 struct tipc_node *n_ptr; 1491 struct tipc_link *l_ptr; 1492 struct sk_buff *crs; 1493 struct sk_buff *buf = head; 1494 struct tipc_msg *msg; 1495 u32 seq_no; 1496 u32 ackd; 1497 u32 released = 0; 1498 int type; 1499 1500 head = head->next; 1501 1502 /* Ensure bearer is still enabled */ 1503 if (unlikely(!b_ptr->active)) 1504 goto discard; 1505 1506 /* Ensure message is well-formed */ 1507 if (unlikely(!link_recv_buf_validate(buf))) 1508 goto discard; 1509 1510 /* Ensure message data is a single contiguous unit */ 1511 if (unlikely(skb_linearize(buf))) 1512 goto discard; 1513 1514 /* Handle arrival of a non-unicast link message */ 1515 msg = buf_msg(buf); 1516 1517 if (unlikely(msg_non_seq(msg))) { 1518 if (msg_user(msg) == LINK_CONFIG) 1519 tipc_disc_recv_msg(buf, b_ptr); 1520 else 1521 tipc_bclink_recv_pkt(buf); 1522 continue; 1523 } 1524 1525 /* Discard unicast link messages destined for another node */ 1526 if (unlikely(!msg_short(msg) && 1527 (msg_destnode(msg) != tipc_own_addr))) 1528 goto discard; 1529 1530 /* Locate neighboring node that sent message */ 1531 n_ptr = tipc_node_find(msg_prevnode(msg)); 1532 if (unlikely(!n_ptr)) 1533 goto discard; 1534 tipc_node_lock(n_ptr); 1535 1536 /* Locate unicast link endpoint that should handle message */ 1537 l_ptr = n_ptr->links[b_ptr->identity]; 1538 if (unlikely(!l_ptr)) 1539 goto unlock_discard; 1540 1541 /* Verify that communication with node is currently allowed */ 1542 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1543 msg_user(msg) == LINK_PROTOCOL && 1544 (msg_type(msg) == RESET_MSG || 1545 msg_type(msg) == ACTIVATE_MSG) && 1546 !msg_redundant_link(msg)) 1547 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1548 1549 if (n_ptr->block_setup) 1550 goto unlock_discard; 1551 1552 /* Validate message sequence number info */ 1553 seq_no = msg_seqno(msg); 1554 ackd = msg_ack(msg); 1555 1556 /* Release acked messages */ 1557 if (n_ptr->bclink.recv_permitted) 1558 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1559 1560 crs = l_ptr->first_out; 1561 while ((crs != l_ptr->next_out) && 1562 less_eq(buf_seqno(crs), ackd)) { 1563 struct sk_buff *next = crs->next; 1564 1565 kfree_skb(crs); 1566 crs = next; 1567 released++; 1568 } 1569 if (released) { 1570 l_ptr->first_out = crs; 1571 l_ptr->out_queue_size -= released; 1572 } 1573 1574 /* Try sending any messages link endpoint has pending */ 1575 if (unlikely(l_ptr->next_out)) 1576 tipc_link_push_queue(l_ptr); 1577 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1578 tipc_link_wakeup_ports(l_ptr, 0); 1579 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1580 l_ptr->stats.sent_acks++; 1581 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1582 } 1583 1584 /* Now (finally!) process the incoming message */ 1585 protocol_check: 1586 if (unlikely(!link_working_working(l_ptr))) { 1587 if (msg_user(msg) == LINK_PROTOCOL) { 1588 link_recv_proto_msg(l_ptr, buf); 1589 head = link_insert_deferred_queue(l_ptr, head); 1590 tipc_node_unlock(n_ptr); 1591 continue; 1592 } 1593 1594 /* Traffic message. Conditionally activate link */ 1595 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1596 1597 if (link_working_working(l_ptr)) { 1598 /* Re-insert buffer in front of queue */ 1599 buf->next = head; 1600 head = buf; 1601 tipc_node_unlock(n_ptr); 1602 continue; 1603 } 1604 goto unlock_discard; 1605 } 1606 1607 /* Link is now in state WORKING_WORKING */ 1608 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1609 link_handle_out_of_seq_msg(l_ptr, buf); 1610 head = link_insert_deferred_queue(l_ptr, head); 1611 tipc_node_unlock(n_ptr); 1612 continue; 1613 } 1614 l_ptr->next_in_no++; 1615 if (unlikely(l_ptr->oldest_deferred_in)) 1616 head = link_insert_deferred_queue(l_ptr, head); 1617 deliver: 1618 if (likely(msg_isdata(msg))) { 1619 tipc_node_unlock(n_ptr); 1620 tipc_port_recv_msg(buf); 1621 continue; 1622 } 1623 switch (msg_user(msg)) { 1624 int ret; 1625 case MSG_BUNDLER: 1626 l_ptr->stats.recv_bundles++; 1627 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1628 tipc_node_unlock(n_ptr); 1629 tipc_link_recv_bundle(buf); 1630 continue; 1631 case NAME_DISTRIBUTOR: 1632 n_ptr->bclink.recv_permitted = true; 1633 tipc_node_unlock(n_ptr); 1634 tipc_named_recv(buf); 1635 continue; 1636 case BCAST_PROTOCOL: 1637 tipc_link_recv_sync(n_ptr, buf); 1638 tipc_node_unlock(n_ptr); 1639 continue; 1640 case CONN_MANAGER: 1641 tipc_node_unlock(n_ptr); 1642 tipc_port_recv_proto_msg(buf); 1643 continue; 1644 case MSG_FRAGMENTER: 1645 l_ptr->stats.recv_fragments++; 1646 ret = tipc_link_recv_fragment(&l_ptr->reasm_head, 1647 &l_ptr->reasm_tail, 1648 &buf); 1649 if (ret == LINK_REASM_COMPLETE) { 1650 l_ptr->stats.recv_fragmented++; 1651 msg = buf_msg(buf); 1652 goto deliver; 1653 } 1654 if (ret == LINK_REASM_ERROR) 1655 tipc_link_reset(l_ptr); 1656 tipc_node_unlock(n_ptr); 1657 continue; 1658 case CHANGEOVER_PROTOCOL: 1659 type = msg_type(msg); 1660 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1661 msg = buf_msg(buf); 1662 seq_no = msg_seqno(msg); 1663 if (type == ORIGINAL_MSG) 1664 goto deliver; 1665 goto protocol_check; 1666 } 1667 break; 1668 default: 1669 kfree_skb(buf); 1670 buf = NULL; 1671 break; 1672 } 1673 tipc_node_unlock(n_ptr); 1674 tipc_net_route_msg(buf); 1675 continue; 1676 unlock_discard: 1677 1678 tipc_node_unlock(n_ptr); 1679 discard: 1680 kfree_skb(buf); 1681 } 1682 read_unlock_bh(&tipc_net_lock); 1683 } 1684 1685 /** 1686 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1687 * 1688 * Returns increase in queue length (i.e. 0 or 1) 1689 */ 1690 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1691 struct sk_buff *buf) 1692 { 1693 struct sk_buff *queue_buf; 1694 struct sk_buff **prev; 1695 u32 seq_no = buf_seqno(buf); 1696 1697 buf->next = NULL; 1698 1699 /* Empty queue ? */ 1700 if (*head == NULL) { 1701 *head = *tail = buf; 1702 return 1; 1703 } 1704 1705 /* Last ? */ 1706 if (less(buf_seqno(*tail), seq_no)) { 1707 (*tail)->next = buf; 1708 *tail = buf; 1709 return 1; 1710 } 1711 1712 /* Locate insertion point in queue, then insert; discard if duplicate */ 1713 prev = head; 1714 queue_buf = *head; 1715 for (;;) { 1716 u32 curr_seqno = buf_seqno(queue_buf); 1717 1718 if (seq_no == curr_seqno) { 1719 kfree_skb(buf); 1720 return 0; 1721 } 1722 1723 if (less(seq_no, curr_seqno)) 1724 break; 1725 1726 prev = &queue_buf->next; 1727 queue_buf = queue_buf->next; 1728 } 1729 1730 buf->next = queue_buf; 1731 *prev = buf; 1732 return 1; 1733 } 1734 1735 /* 1736 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1737 */ 1738 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1739 struct sk_buff *buf) 1740 { 1741 u32 seq_no = buf_seqno(buf); 1742 1743 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1744 link_recv_proto_msg(l_ptr, buf); 1745 return; 1746 } 1747 1748 /* Record OOS packet arrival (force mismatch on next timeout) */ 1749 l_ptr->checkpoint--; 1750 1751 /* 1752 * Discard packet if a duplicate; otherwise add it to deferred queue 1753 * and notify peer of gap as per protocol specification 1754 */ 1755 if (less(seq_no, mod(l_ptr->next_in_no))) { 1756 l_ptr->stats.duplicates++; 1757 kfree_skb(buf); 1758 return; 1759 } 1760 1761 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1762 &l_ptr->newest_deferred_in, buf)) { 1763 l_ptr->deferred_inqueue_sz++; 1764 l_ptr->stats.deferred_recv++; 1765 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1766 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1767 } else 1768 l_ptr->stats.duplicates++; 1769 } 1770 1771 /* 1772 * Send protocol message to the other endpoint. 1773 */ 1774 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1775 int probe_msg, u32 gap, u32 tolerance, 1776 u32 priority, u32 ack_mtu) 1777 { 1778 struct sk_buff *buf = NULL; 1779 struct tipc_msg *msg = l_ptr->pmsg; 1780 u32 msg_size = sizeof(l_ptr->proto_msg); 1781 int r_flag; 1782 1783 /* Discard any previous message that was deferred due to congestion */ 1784 if (l_ptr->proto_msg_queue) { 1785 kfree_skb(l_ptr->proto_msg_queue); 1786 l_ptr->proto_msg_queue = NULL; 1787 } 1788 1789 if (link_blocked(l_ptr)) 1790 return; 1791 1792 /* Abort non-RESET send if communication with node is prohibited */ 1793 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1794 return; 1795 1796 /* Create protocol message with "out-of-sequence" sequence number */ 1797 msg_set_type(msg, msg_typ); 1798 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1799 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1800 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1801 1802 if (msg_typ == STATE_MSG) { 1803 u32 next_sent = mod(l_ptr->next_out_no); 1804 1805 if (!tipc_link_is_up(l_ptr)) 1806 return; 1807 if (l_ptr->next_out) 1808 next_sent = buf_seqno(l_ptr->next_out); 1809 msg_set_next_sent(msg, next_sent); 1810 if (l_ptr->oldest_deferred_in) { 1811 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1812 gap = mod(rec - mod(l_ptr->next_in_no)); 1813 } 1814 msg_set_seq_gap(msg, gap); 1815 if (gap) 1816 l_ptr->stats.sent_nacks++; 1817 msg_set_link_tolerance(msg, tolerance); 1818 msg_set_linkprio(msg, priority); 1819 msg_set_max_pkt(msg, ack_mtu); 1820 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1821 msg_set_probe(msg, probe_msg != 0); 1822 if (probe_msg) { 1823 u32 mtu = l_ptr->max_pkt; 1824 1825 if ((mtu < l_ptr->max_pkt_target) && 1826 link_working_working(l_ptr) && 1827 l_ptr->fsm_msg_cnt) { 1828 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1829 if (l_ptr->max_pkt_probes == 10) { 1830 l_ptr->max_pkt_target = (msg_size - 4); 1831 l_ptr->max_pkt_probes = 0; 1832 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1833 } 1834 l_ptr->max_pkt_probes++; 1835 } 1836 1837 l_ptr->stats.sent_probes++; 1838 } 1839 l_ptr->stats.sent_states++; 1840 } else { /* RESET_MSG or ACTIVATE_MSG */ 1841 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1842 msg_set_seq_gap(msg, 0); 1843 msg_set_next_sent(msg, 1); 1844 msg_set_probe(msg, 0); 1845 msg_set_link_tolerance(msg, l_ptr->tolerance); 1846 msg_set_linkprio(msg, l_ptr->priority); 1847 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1848 } 1849 1850 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1851 msg_set_redundant_link(msg, r_flag); 1852 msg_set_linkprio(msg, l_ptr->priority); 1853 msg_set_size(msg, msg_size); 1854 1855 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1856 1857 buf = tipc_buf_acquire(msg_size); 1858 if (!buf) 1859 return; 1860 1861 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1862 buf->priority = TC_PRIO_CONTROL; 1863 1864 /* Defer message if bearer is already blocked */ 1865 if (tipc_bearer_blocked(l_ptr->b_ptr)) { 1866 l_ptr->proto_msg_queue = buf; 1867 return; 1868 } 1869 1870 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1871 l_ptr->unacked_window = 0; 1872 kfree_skb(buf); 1873 } 1874 1875 /* 1876 * Receive protocol message : 1877 * Note that network plane id propagates through the network, and may 1878 * change at any time. The node with lowest address rules 1879 */ 1880 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1881 { 1882 u32 rec_gap = 0; 1883 u32 max_pkt_info; 1884 u32 max_pkt_ack; 1885 u32 msg_tol; 1886 struct tipc_msg *msg = buf_msg(buf); 1887 1888 if (link_blocked(l_ptr)) 1889 goto exit; 1890 1891 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1892 l_ptr->checkpoint--; 1893 1894 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1895 if (tipc_own_addr > msg_prevnode(msg)) 1896 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1897 1898 l_ptr->owner->permit_changeover = msg_redundant_link(msg); 1899 1900 switch (msg_type(msg)) { 1901 1902 case RESET_MSG: 1903 if (!link_working_unknown(l_ptr) && 1904 (l_ptr->peer_session != INVALID_SESSION)) { 1905 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1906 break; /* duplicate or old reset: ignore */ 1907 } 1908 1909 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1910 link_working_unknown(l_ptr))) { 1911 /* 1912 * peer has lost contact -- don't allow peer's links 1913 * to reactivate before we recognize loss & clean up 1914 */ 1915 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1916 } 1917 1918 link_state_event(l_ptr, RESET_MSG); 1919 1920 /* fall thru' */ 1921 case ACTIVATE_MSG: 1922 /* Update link settings according other endpoint's values */ 1923 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1924 1925 msg_tol = msg_link_tolerance(msg); 1926 if (msg_tol > l_ptr->tolerance) 1927 link_set_supervision_props(l_ptr, msg_tol); 1928 1929 if (msg_linkprio(msg) > l_ptr->priority) 1930 l_ptr->priority = msg_linkprio(msg); 1931 1932 max_pkt_info = msg_max_pkt(msg); 1933 if (max_pkt_info) { 1934 if (max_pkt_info < l_ptr->max_pkt_target) 1935 l_ptr->max_pkt_target = max_pkt_info; 1936 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1937 l_ptr->max_pkt = l_ptr->max_pkt_target; 1938 } else { 1939 l_ptr->max_pkt = l_ptr->max_pkt_target; 1940 } 1941 1942 /* Synchronize broadcast link info, if not done previously */ 1943 if (!tipc_node_is_up(l_ptr->owner)) { 1944 l_ptr->owner->bclink.last_sent = 1945 l_ptr->owner->bclink.last_in = 1946 msg_last_bcast(msg); 1947 l_ptr->owner->bclink.oos_state = 0; 1948 } 1949 1950 l_ptr->peer_session = msg_session(msg); 1951 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1952 1953 if (msg_type(msg) == ACTIVATE_MSG) 1954 link_state_event(l_ptr, ACTIVATE_MSG); 1955 break; 1956 case STATE_MSG: 1957 1958 msg_tol = msg_link_tolerance(msg); 1959 if (msg_tol) 1960 link_set_supervision_props(l_ptr, msg_tol); 1961 1962 if (msg_linkprio(msg) && 1963 (msg_linkprio(msg) != l_ptr->priority)) { 1964 pr_warn("%s<%s>, priority change %u->%u\n", 1965 link_rst_msg, l_ptr->name, l_ptr->priority, 1966 msg_linkprio(msg)); 1967 l_ptr->priority = msg_linkprio(msg); 1968 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1969 break; 1970 } 1971 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1972 l_ptr->stats.recv_states++; 1973 if (link_reset_unknown(l_ptr)) 1974 break; 1975 1976 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1977 rec_gap = mod(msg_next_sent(msg) - 1978 mod(l_ptr->next_in_no)); 1979 } 1980 1981 max_pkt_ack = msg_max_pkt(msg); 1982 if (max_pkt_ack > l_ptr->max_pkt) { 1983 l_ptr->max_pkt = max_pkt_ack; 1984 l_ptr->max_pkt_probes = 0; 1985 } 1986 1987 max_pkt_ack = 0; 1988 if (msg_probe(msg)) { 1989 l_ptr->stats.recv_probes++; 1990 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1991 max_pkt_ack = msg_size(msg); 1992 } 1993 1994 /* Protocol message before retransmits, reduce loss risk */ 1995 if (l_ptr->owner->bclink.recv_permitted) 1996 tipc_bclink_update_link_state(l_ptr->owner, 1997 msg_last_bcast(msg)); 1998 1999 if (rec_gap || (msg_probe(msg))) { 2000 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2001 0, rec_gap, 0, 0, max_pkt_ack); 2002 } 2003 if (msg_seq_gap(msg)) { 2004 l_ptr->stats.recv_nacks++; 2005 tipc_link_retransmit(l_ptr, l_ptr->first_out, 2006 msg_seq_gap(msg)); 2007 } 2008 break; 2009 } 2010 exit: 2011 kfree_skb(buf); 2012 } 2013 2014 2015 /* 2016 * tipc_link_tunnel(): Send one message via a link belonging to 2017 * another bearer. Owner node is locked. 2018 */ 2019 static void tipc_link_tunnel(struct tipc_link *l_ptr, 2020 struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, 2021 u32 selector) 2022 { 2023 struct tipc_link *tunnel; 2024 struct sk_buff *buf; 2025 u32 length = msg_size(msg); 2026 2027 tunnel = l_ptr->owner->active_links[selector & 1]; 2028 if (!tipc_link_is_up(tunnel)) { 2029 pr_warn("%stunnel link no longer available\n", link_co_err); 2030 return; 2031 } 2032 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2033 buf = tipc_buf_acquire(length + INT_H_SIZE); 2034 if (!buf) { 2035 pr_warn("%sunable to send tunnel msg\n", link_co_err); 2036 return; 2037 } 2038 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 2039 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 2040 tipc_link_send_buf(tunnel, buf); 2041 } 2042 2043 2044 2045 /* 2046 * changeover(): Send whole message queue via the remaining link 2047 * Owner node is locked. 2048 */ 2049 void tipc_link_changeover(struct tipc_link *l_ptr) 2050 { 2051 u32 msgcount = l_ptr->out_queue_size; 2052 struct sk_buff *crs = l_ptr->first_out; 2053 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 2054 struct tipc_msg tunnel_hdr; 2055 int split_bundles; 2056 2057 if (!tunnel) 2058 return; 2059 2060 if (!l_ptr->owner->permit_changeover) { 2061 pr_warn("%speer did not permit changeover\n", link_co_err); 2062 return; 2063 } 2064 2065 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2066 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2067 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2068 msg_set_msgcnt(&tunnel_hdr, msgcount); 2069 2070 if (!l_ptr->first_out) { 2071 struct sk_buff *buf; 2072 2073 buf = tipc_buf_acquire(INT_H_SIZE); 2074 if (buf) { 2075 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2076 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2077 tipc_link_send_buf(tunnel, buf); 2078 } else { 2079 pr_warn("%sunable to send changeover msg\n", 2080 link_co_err); 2081 } 2082 return; 2083 } 2084 2085 split_bundles = (l_ptr->owner->active_links[0] != 2086 l_ptr->owner->active_links[1]); 2087 2088 while (crs) { 2089 struct tipc_msg *msg = buf_msg(crs); 2090 2091 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2092 struct tipc_msg *m = msg_get_wrapped(msg); 2093 unchar *pos = (unchar *)m; 2094 2095 msgcount = msg_msgcnt(msg); 2096 while (msgcount--) { 2097 msg_set_seqno(m, msg_seqno(msg)); 2098 tipc_link_tunnel(l_ptr, &tunnel_hdr, m, 2099 msg_link_selector(m)); 2100 pos += align(msg_size(m)); 2101 m = (struct tipc_msg *)pos; 2102 } 2103 } else { 2104 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg, 2105 msg_link_selector(msg)); 2106 } 2107 crs = crs->next; 2108 } 2109 } 2110 2111 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) 2112 { 2113 struct sk_buff *iter; 2114 struct tipc_msg tunnel_hdr; 2115 2116 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2117 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2118 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2119 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2120 iter = l_ptr->first_out; 2121 while (iter) { 2122 struct sk_buff *outbuf; 2123 struct tipc_msg *msg = buf_msg(iter); 2124 u32 length = msg_size(msg); 2125 2126 if (msg_user(msg) == MSG_BUNDLER) 2127 msg_set_type(msg, CLOSED_MSG); 2128 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2129 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2130 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2131 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2132 if (outbuf == NULL) { 2133 pr_warn("%sunable to send duplicate msg\n", 2134 link_co_err); 2135 return; 2136 } 2137 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2138 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2139 length); 2140 tipc_link_send_buf(tunnel, outbuf); 2141 if (!tipc_link_is_up(l_ptr)) 2142 return; 2143 iter = iter->next; 2144 } 2145 } 2146 2147 /** 2148 * buf_extract - extracts embedded TIPC message from another message 2149 * @skb: encapsulating message buffer 2150 * @from_pos: offset to extract from 2151 * 2152 * Returns a new message buffer containing an embedded message. The 2153 * encapsulating message itself is left unchanged. 2154 */ 2155 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2156 { 2157 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2158 u32 size = msg_size(msg); 2159 struct sk_buff *eb; 2160 2161 eb = tipc_buf_acquire(size); 2162 if (eb) 2163 skb_copy_to_linear_data(eb, msg, size); 2164 return eb; 2165 } 2166 2167 /* 2168 * link_recv_changeover_msg(): Receive tunneled packet sent 2169 * via other link. Node is locked. Return extracted buffer. 2170 */ 2171 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2172 struct sk_buff **buf) 2173 { 2174 struct sk_buff *tunnel_buf = *buf; 2175 struct tipc_link *dest_link; 2176 struct tipc_msg *msg; 2177 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2178 u32 msg_typ = msg_type(tunnel_msg); 2179 u32 msg_count = msg_msgcnt(tunnel_msg); 2180 u32 bearer_id = msg_bearer_id(tunnel_msg); 2181 2182 if (bearer_id >= MAX_BEARERS) 2183 goto exit; 2184 dest_link = (*l_ptr)->owner->links[bearer_id]; 2185 if (!dest_link) 2186 goto exit; 2187 if (dest_link == *l_ptr) { 2188 pr_err("Unexpected changeover message on link <%s>\n", 2189 (*l_ptr)->name); 2190 goto exit; 2191 } 2192 *l_ptr = dest_link; 2193 msg = msg_get_wrapped(tunnel_msg); 2194 2195 if (msg_typ == DUPLICATE_MSG) { 2196 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2197 goto exit; 2198 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2199 if (*buf == NULL) { 2200 pr_warn("%sduplicate msg dropped\n", link_co_err); 2201 goto exit; 2202 } 2203 kfree_skb(tunnel_buf); 2204 return 1; 2205 } 2206 2207 /* First original message ?: */ 2208 if (tipc_link_is_up(dest_link)) { 2209 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2210 dest_link->name); 2211 tipc_link_reset(dest_link); 2212 dest_link->exp_msg_count = msg_count; 2213 if (!msg_count) 2214 goto exit; 2215 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2216 dest_link->exp_msg_count = msg_count; 2217 if (!msg_count) 2218 goto exit; 2219 } 2220 2221 /* Receive original message */ 2222 if (dest_link->exp_msg_count == 0) { 2223 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2224 goto exit; 2225 } 2226 dest_link->exp_msg_count--; 2227 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2228 goto exit; 2229 } else { 2230 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2231 if (*buf != NULL) { 2232 kfree_skb(tunnel_buf); 2233 return 1; 2234 } else { 2235 pr_warn("%soriginal msg dropped\n", link_co_err); 2236 } 2237 } 2238 exit: 2239 *buf = NULL; 2240 kfree_skb(tunnel_buf); 2241 return 0; 2242 } 2243 2244 /* 2245 * Bundler functionality: 2246 */ 2247 void tipc_link_recv_bundle(struct sk_buff *buf) 2248 { 2249 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2250 u32 pos = INT_H_SIZE; 2251 struct sk_buff *obuf; 2252 2253 while (msgcount--) { 2254 obuf = buf_extract(buf, pos); 2255 if (obuf == NULL) { 2256 pr_warn("Link unable to unbundle message(s)\n"); 2257 break; 2258 } 2259 pos += align(msg_size(buf_msg(obuf))); 2260 tipc_net_route_msg(obuf); 2261 } 2262 kfree_skb(buf); 2263 } 2264 2265 /* 2266 * Fragmentation/defragmentation: 2267 */ 2268 2269 /* 2270 * link_send_long_buf: Entry for buffers needing fragmentation. 2271 * The buffer is complete, inclusive total message length. 2272 * Returns user data length. 2273 */ 2274 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2275 { 2276 struct sk_buff *buf_chain = NULL; 2277 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2278 struct tipc_msg *inmsg = buf_msg(buf); 2279 struct tipc_msg fragm_hdr; 2280 u32 insize = msg_size(inmsg); 2281 u32 dsz = msg_data_sz(inmsg); 2282 unchar *crs = buf->data; 2283 u32 rest = insize; 2284 u32 pack_sz = l_ptr->max_pkt; 2285 u32 fragm_sz = pack_sz - INT_H_SIZE; 2286 u32 fragm_no = 0; 2287 u32 destaddr; 2288 2289 if (msg_short(inmsg)) 2290 destaddr = l_ptr->addr; 2291 else 2292 destaddr = msg_destnode(inmsg); 2293 2294 /* Prepare reusable fragment header: */ 2295 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2296 INT_H_SIZE, destaddr); 2297 2298 /* Chop up message: */ 2299 while (rest > 0) { 2300 struct sk_buff *fragm; 2301 2302 if (rest <= fragm_sz) { 2303 fragm_sz = rest; 2304 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2305 } 2306 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2307 if (fragm == NULL) { 2308 kfree_skb(buf); 2309 while (buf_chain) { 2310 buf = buf_chain; 2311 buf_chain = buf_chain->next; 2312 kfree_skb(buf); 2313 } 2314 return -ENOMEM; 2315 } 2316 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2317 fragm_no++; 2318 msg_set_fragm_no(&fragm_hdr, fragm_no); 2319 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2320 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2321 fragm_sz); 2322 buf_chain_tail->next = fragm; 2323 buf_chain_tail = fragm; 2324 2325 rest -= fragm_sz; 2326 crs += fragm_sz; 2327 msg_set_type(&fragm_hdr, FRAGMENT); 2328 } 2329 kfree_skb(buf); 2330 2331 /* Append chain of fragments to send queue & send them */ 2332 l_ptr->long_msg_seq_no++; 2333 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2334 l_ptr->stats.sent_fragments += fragm_no; 2335 l_ptr->stats.sent_fragmented++; 2336 tipc_link_push_queue(l_ptr); 2337 2338 return dsz; 2339 } 2340 2341 /* 2342 * tipc_link_recv_fragment(): Called with node lock on. Returns 2343 * the reassembled buffer if message is complete. 2344 */ 2345 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, 2346 struct sk_buff **fbuf) 2347 { 2348 struct sk_buff *frag = *fbuf; 2349 struct tipc_msg *msg = buf_msg(frag); 2350 u32 fragid = msg_type(msg); 2351 bool headstolen; 2352 int delta; 2353 2354 skb_pull(frag, msg_hdr_sz(msg)); 2355 if (fragid == FIRST_FRAGMENT) { 2356 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2357 goto out_free; 2358 *head = frag; 2359 skb_frag_list_init(*head); 2360 return 0; 2361 } else if (*head && 2362 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2363 kfree_skb_partial(frag, headstolen); 2364 } else { 2365 if (!*head) 2366 goto out_free; 2367 if (!skb_has_frag_list(*head)) 2368 skb_shinfo(*head)->frag_list = frag; 2369 else 2370 (*tail)->next = frag; 2371 *tail = frag; 2372 (*head)->truesize += frag->truesize; 2373 } 2374 if (fragid == LAST_FRAGMENT) { 2375 *fbuf = *head; 2376 *tail = *head = NULL; 2377 return LINK_REASM_COMPLETE; 2378 } 2379 return 0; 2380 out_free: 2381 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2382 kfree_skb(*fbuf); 2383 return LINK_REASM_ERROR; 2384 } 2385 2386 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2387 { 2388 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2389 return; 2390 2391 l_ptr->tolerance = tolerance; 2392 l_ptr->continuity_interval = 2393 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2394 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2395 } 2396 2397 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2398 { 2399 /* Data messages from this node, inclusive FIRST_FRAGM */ 2400 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2401 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2402 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2403 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2404 /* Transiting data messages,inclusive FIRST_FRAGM */ 2405 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2406 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2407 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2408 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2409 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2410 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2411 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2412 /* FRAGMENT and LAST_FRAGMENT packets */ 2413 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2414 } 2415 2416 /** 2417 * link_find_link - locate link by name 2418 * @name: ptr to link name string 2419 * @node: ptr to area to be filled with ptr to associated node 2420 * 2421 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2422 * this also prevents link deletion. 2423 * 2424 * Returns pointer to link (or 0 if invalid link name). 2425 */ 2426 static struct tipc_link *link_find_link(const char *name, 2427 struct tipc_node **node) 2428 { 2429 struct tipc_link *l_ptr; 2430 struct tipc_node *n_ptr; 2431 int i; 2432 2433 list_for_each_entry(n_ptr, &tipc_node_list, list) { 2434 for (i = 0; i < MAX_BEARERS; i++) { 2435 l_ptr = n_ptr->links[i]; 2436 if (l_ptr && !strcmp(l_ptr->name, name)) 2437 goto found; 2438 } 2439 } 2440 l_ptr = NULL; 2441 n_ptr = NULL; 2442 found: 2443 *node = n_ptr; 2444 return l_ptr; 2445 } 2446 2447 /** 2448 * link_value_is_valid -- validate proposed link tolerance/priority/window 2449 * 2450 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2451 * @new_value: the new value 2452 * 2453 * Returns 1 if value is within range, 0 if not. 2454 */ 2455 static int link_value_is_valid(u16 cmd, u32 new_value) 2456 { 2457 switch (cmd) { 2458 case TIPC_CMD_SET_LINK_TOL: 2459 return (new_value >= TIPC_MIN_LINK_TOL) && 2460 (new_value <= TIPC_MAX_LINK_TOL); 2461 case TIPC_CMD_SET_LINK_PRI: 2462 return (new_value <= TIPC_MAX_LINK_PRI); 2463 case TIPC_CMD_SET_LINK_WINDOW: 2464 return (new_value >= TIPC_MIN_LINK_WIN) && 2465 (new_value <= TIPC_MAX_LINK_WIN); 2466 } 2467 return 0; 2468 } 2469 2470 /** 2471 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2472 * @name: ptr to link, bearer, or media name 2473 * @new_value: new value of link, bearer, or media setting 2474 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2475 * 2476 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2477 * 2478 * Returns 0 if value updated and negative value on error. 2479 */ 2480 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2481 { 2482 struct tipc_node *node; 2483 struct tipc_link *l_ptr; 2484 struct tipc_bearer *b_ptr; 2485 struct tipc_media *m_ptr; 2486 int res = 0; 2487 2488 l_ptr = link_find_link(name, &node); 2489 if (l_ptr) { 2490 /* 2491 * acquire node lock for tipc_link_send_proto_msg(). 2492 * see "TIPC locking policy" in net.c. 2493 */ 2494 tipc_node_lock(node); 2495 switch (cmd) { 2496 case TIPC_CMD_SET_LINK_TOL: 2497 link_set_supervision_props(l_ptr, new_value); 2498 tipc_link_send_proto_msg(l_ptr, 2499 STATE_MSG, 0, 0, new_value, 0, 0); 2500 break; 2501 case TIPC_CMD_SET_LINK_PRI: 2502 l_ptr->priority = new_value; 2503 tipc_link_send_proto_msg(l_ptr, 2504 STATE_MSG, 0, 0, 0, new_value, 0); 2505 break; 2506 case TIPC_CMD_SET_LINK_WINDOW: 2507 tipc_link_set_queue_limits(l_ptr, new_value); 2508 break; 2509 default: 2510 res = -EINVAL; 2511 break; 2512 } 2513 tipc_node_unlock(node); 2514 return res; 2515 } 2516 2517 b_ptr = tipc_bearer_find(name); 2518 if (b_ptr) { 2519 switch (cmd) { 2520 case TIPC_CMD_SET_LINK_TOL: 2521 b_ptr->tolerance = new_value; 2522 break; 2523 case TIPC_CMD_SET_LINK_PRI: 2524 b_ptr->priority = new_value; 2525 break; 2526 case TIPC_CMD_SET_LINK_WINDOW: 2527 b_ptr->window = new_value; 2528 break; 2529 default: 2530 res = -EINVAL; 2531 break; 2532 } 2533 return res; 2534 } 2535 2536 m_ptr = tipc_media_find(name); 2537 if (!m_ptr) 2538 return -ENODEV; 2539 switch (cmd) { 2540 case TIPC_CMD_SET_LINK_TOL: 2541 m_ptr->tolerance = new_value; 2542 break; 2543 case TIPC_CMD_SET_LINK_PRI: 2544 m_ptr->priority = new_value; 2545 break; 2546 case TIPC_CMD_SET_LINK_WINDOW: 2547 m_ptr->window = new_value; 2548 break; 2549 default: 2550 res = -EINVAL; 2551 break; 2552 } 2553 return res; 2554 } 2555 2556 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2557 u16 cmd) 2558 { 2559 struct tipc_link_config *args; 2560 u32 new_value; 2561 int res; 2562 2563 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2564 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2565 2566 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2567 new_value = ntohl(args->value); 2568 2569 if (!link_value_is_valid(cmd, new_value)) 2570 return tipc_cfg_reply_error_string( 2571 "cannot change, value invalid"); 2572 2573 if (!strcmp(args->name, tipc_bclink_name)) { 2574 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2575 (tipc_bclink_set_queue_limits(new_value) == 0)) 2576 return tipc_cfg_reply_none(); 2577 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2578 " (cannot change setting on broadcast link)"); 2579 } 2580 2581 read_lock_bh(&tipc_net_lock); 2582 res = link_cmd_set_value(args->name, new_value, cmd); 2583 read_unlock_bh(&tipc_net_lock); 2584 if (res) 2585 return tipc_cfg_reply_error_string("cannot change link setting"); 2586 2587 return tipc_cfg_reply_none(); 2588 } 2589 2590 /** 2591 * link_reset_statistics - reset link statistics 2592 * @l_ptr: pointer to link 2593 */ 2594 static void link_reset_statistics(struct tipc_link *l_ptr) 2595 { 2596 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2597 l_ptr->stats.sent_info = l_ptr->next_out_no; 2598 l_ptr->stats.recv_info = l_ptr->next_in_no; 2599 } 2600 2601 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2602 { 2603 char *link_name; 2604 struct tipc_link *l_ptr; 2605 struct tipc_node *node; 2606 2607 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2608 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2609 2610 link_name = (char *)TLV_DATA(req_tlv_area); 2611 if (!strcmp(link_name, tipc_bclink_name)) { 2612 if (tipc_bclink_reset_stats()) 2613 return tipc_cfg_reply_error_string("link not found"); 2614 return tipc_cfg_reply_none(); 2615 } 2616 2617 read_lock_bh(&tipc_net_lock); 2618 l_ptr = link_find_link(link_name, &node); 2619 if (!l_ptr) { 2620 read_unlock_bh(&tipc_net_lock); 2621 return tipc_cfg_reply_error_string("link not found"); 2622 } 2623 2624 tipc_node_lock(node); 2625 link_reset_statistics(l_ptr); 2626 tipc_node_unlock(node); 2627 read_unlock_bh(&tipc_net_lock); 2628 return tipc_cfg_reply_none(); 2629 } 2630 2631 /** 2632 * percent - convert count to a percentage of total (rounding up or down) 2633 */ 2634 static u32 percent(u32 count, u32 total) 2635 { 2636 return (count * 100 + (total / 2)) / total; 2637 } 2638 2639 /** 2640 * tipc_link_stats - print link statistics 2641 * @name: link name 2642 * @buf: print buffer area 2643 * @buf_size: size of print buffer area 2644 * 2645 * Returns length of print buffer data string (or 0 if error) 2646 */ 2647 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2648 { 2649 struct tipc_link *l; 2650 struct tipc_stats *s; 2651 struct tipc_node *node; 2652 char *status; 2653 u32 profile_total = 0; 2654 int ret; 2655 2656 if (!strcmp(name, tipc_bclink_name)) 2657 return tipc_bclink_stats(buf, buf_size); 2658 2659 read_lock_bh(&tipc_net_lock); 2660 l = link_find_link(name, &node); 2661 if (!l) { 2662 read_unlock_bh(&tipc_net_lock); 2663 return 0; 2664 } 2665 tipc_node_lock(node); 2666 s = &l->stats; 2667 2668 if (tipc_link_is_active(l)) 2669 status = "ACTIVE"; 2670 else if (tipc_link_is_up(l)) 2671 status = "STANDBY"; 2672 else 2673 status = "DEFUNCT"; 2674 2675 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2676 " %s MTU:%u Priority:%u Tolerance:%u ms" 2677 " Window:%u packets\n", 2678 l->name, status, l->max_pkt, l->priority, 2679 l->tolerance, l->queue_limit[0]); 2680 2681 ret += tipc_snprintf(buf + ret, buf_size - ret, 2682 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2683 l->next_in_no - s->recv_info, s->recv_fragments, 2684 s->recv_fragmented, s->recv_bundles, 2685 s->recv_bundled); 2686 2687 ret += tipc_snprintf(buf + ret, buf_size - ret, 2688 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2689 l->next_out_no - s->sent_info, s->sent_fragments, 2690 s->sent_fragmented, s->sent_bundles, 2691 s->sent_bundled); 2692 2693 profile_total = s->msg_length_counts; 2694 if (!profile_total) 2695 profile_total = 1; 2696 2697 ret += tipc_snprintf(buf + ret, buf_size - ret, 2698 " TX profile sample:%u packets average:%u octets\n" 2699 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2700 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2701 s->msg_length_counts, 2702 s->msg_lengths_total / profile_total, 2703 percent(s->msg_length_profile[0], profile_total), 2704 percent(s->msg_length_profile[1], profile_total), 2705 percent(s->msg_length_profile[2], profile_total), 2706 percent(s->msg_length_profile[3], profile_total), 2707 percent(s->msg_length_profile[4], profile_total), 2708 percent(s->msg_length_profile[5], profile_total), 2709 percent(s->msg_length_profile[6], profile_total)); 2710 2711 ret += tipc_snprintf(buf + ret, buf_size - ret, 2712 " RX states:%u probes:%u naks:%u defs:%u" 2713 " dups:%u\n", s->recv_states, s->recv_probes, 2714 s->recv_nacks, s->deferred_recv, s->duplicates); 2715 2716 ret += tipc_snprintf(buf + ret, buf_size - ret, 2717 " TX states:%u probes:%u naks:%u acks:%u" 2718 " dups:%u\n", s->sent_states, s->sent_probes, 2719 s->sent_nacks, s->sent_acks, s->retransmitted); 2720 2721 ret += tipc_snprintf(buf + ret, buf_size - ret, 2722 " Congestion link:%u Send queue" 2723 " max:%u avg:%u\n", s->link_congs, 2724 s->max_queue_sz, s->queue_sz_counts ? 2725 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2726 2727 tipc_node_unlock(node); 2728 read_unlock_bh(&tipc_net_lock); 2729 return ret; 2730 } 2731 2732 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2733 { 2734 struct sk_buff *buf; 2735 struct tlv_desc *rep_tlv; 2736 int str_len; 2737 int pb_len; 2738 char *pb; 2739 2740 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2741 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2742 2743 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2744 if (!buf) 2745 return NULL; 2746 2747 rep_tlv = (struct tlv_desc *)buf->data; 2748 pb = TLV_DATA(rep_tlv); 2749 pb_len = ULTRA_STRING_MAX_LEN; 2750 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2751 pb, pb_len); 2752 if (!str_len) { 2753 kfree_skb(buf); 2754 return tipc_cfg_reply_error_string("link not found"); 2755 } 2756 str_len += 1; /* for "\0" */ 2757 skb_put(buf, TLV_SPACE(str_len)); 2758 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2759 2760 return buf; 2761 } 2762 2763 /** 2764 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2765 * @dest: network address of destination node 2766 * @selector: used to select from set of active links 2767 * 2768 * If no active link can be found, uses default maximum packet size. 2769 */ 2770 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2771 { 2772 struct tipc_node *n_ptr; 2773 struct tipc_link *l_ptr; 2774 u32 res = MAX_PKT_DEFAULT; 2775 2776 if (dest == tipc_own_addr) 2777 return MAX_MSG_SIZE; 2778 2779 read_lock_bh(&tipc_net_lock); 2780 n_ptr = tipc_node_find(dest); 2781 if (n_ptr) { 2782 tipc_node_lock(n_ptr); 2783 l_ptr = n_ptr->active_links[selector & 1]; 2784 if (l_ptr) 2785 res = l_ptr->max_pkt; 2786 tipc_node_unlock(n_ptr); 2787 } 2788 read_unlock_bh(&tipc_net_lock); 2789 return res; 2790 } 2791 2792 static void link_print(struct tipc_link *l_ptr, const char *str) 2793 { 2794 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2795 2796 if (link_working_unknown(l_ptr)) 2797 pr_cont(":WU\n"); 2798 else if (link_reset_reset(l_ptr)) 2799 pr_cont(":RR\n"); 2800 else if (link_reset_unknown(l_ptr)) 2801 pr_cont(":RU\n"); 2802 else if (link_working_working(l_ptr)) 2803 pr_cont(":WW\n"); 2804 else 2805 pr_cont("\n"); 2806 } 2807