1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int link_send_sections_long(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static void link_start(struct tipc_link *l_ptr); 91 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 92 static void tipc_link_send_sync(struct tipc_link *l); 93 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); 94 95 /* 96 * Simple link routines 97 */ 98 static unsigned int align(unsigned int i) 99 { 100 return (i + 3) & ~3u; 101 } 102 103 static void link_init_max_pkt(struct tipc_link *l_ptr) 104 { 105 u32 max_pkt; 106 107 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 if (max_pkt > MAX_MSG_SIZE) 109 max_pkt = MAX_MSG_SIZE; 110 111 l_ptr->max_pkt_target = max_pkt; 112 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 113 l_ptr->max_pkt = l_ptr->max_pkt_target; 114 else 115 l_ptr->max_pkt = MAX_PKT_DEFAULT; 116 117 l_ptr->max_pkt_probes = 0; 118 } 119 120 static u32 link_next_sent(struct tipc_link *l_ptr) 121 { 122 if (l_ptr->next_out) 123 return buf_seqno(l_ptr->next_out); 124 return mod(l_ptr->next_out_no); 125 } 126 127 static u32 link_last_sent(struct tipc_link *l_ptr) 128 { 129 return mod(link_next_sent(l_ptr) - 1); 130 } 131 132 /* 133 * Simple non-static link routines (i.e. referenced outside this file) 134 */ 135 int tipc_link_is_up(struct tipc_link *l_ptr) 136 { 137 if (!l_ptr) 138 return 0; 139 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 140 } 141 142 int tipc_link_is_active(struct tipc_link *l_ptr) 143 { 144 return (l_ptr->owner->active_links[0] == l_ptr) || 145 (l_ptr->owner->active_links[1] == l_ptr); 146 } 147 148 /** 149 * link_timeout - handle expiration of link timer 150 * @l_ptr: pointer to link 151 * 152 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 153 * with tipc_link_delete(). (There is no risk that the node will be deleted by 154 * another thread because tipc_link_delete() always cancels the link timer before 155 * tipc_node_delete() is called.) 156 */ 157 static void link_timeout(struct tipc_link *l_ptr) 158 { 159 tipc_node_lock(l_ptr->owner); 160 161 /* update counters used in statistical profiling of send traffic */ 162 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 163 l_ptr->stats.queue_sz_counts++; 164 165 if (l_ptr->first_out) { 166 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 167 u32 length = msg_size(msg); 168 169 if ((msg_user(msg) == MSG_FRAGMENTER) && 170 (msg_type(msg) == FIRST_FRAGMENT)) { 171 length = msg_size(msg_get_wrapped(msg)); 172 } 173 if (length) { 174 l_ptr->stats.msg_lengths_total += length; 175 l_ptr->stats.msg_length_counts++; 176 if (length <= 64) 177 l_ptr->stats.msg_length_profile[0]++; 178 else if (length <= 256) 179 l_ptr->stats.msg_length_profile[1]++; 180 else if (length <= 1024) 181 l_ptr->stats.msg_length_profile[2]++; 182 else if (length <= 4096) 183 l_ptr->stats.msg_length_profile[3]++; 184 else if (length <= 16384) 185 l_ptr->stats.msg_length_profile[4]++; 186 else if (length <= 32768) 187 l_ptr->stats.msg_length_profile[5]++; 188 else 189 l_ptr->stats.msg_length_profile[6]++; 190 } 191 } 192 193 /* do all other link processing performed on a periodic basis */ 194 195 link_state_event(l_ptr, TIMEOUT_EVT); 196 197 if (l_ptr->next_out) 198 tipc_link_push_queue(l_ptr); 199 200 tipc_node_unlock(l_ptr->owner); 201 } 202 203 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 204 { 205 k_start_timer(&l_ptr->timer, time); 206 } 207 208 /** 209 * tipc_link_create - create a new link 210 * @n_ptr: pointer to associated node 211 * @b_ptr: pointer to associated bearer 212 * @media_addr: media address to use when sending messages over link 213 * 214 * Returns pointer to link. 215 */ 216 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 217 struct tipc_bearer *b_ptr, 218 const struct tipc_media_addr *media_addr) 219 { 220 struct tipc_link *l_ptr; 221 struct tipc_msg *msg; 222 char *if_name; 223 char addr_string[16]; 224 u32 peer = n_ptr->addr; 225 226 if (n_ptr->link_cnt >= 2) { 227 tipc_addr_string_fill(addr_string, n_ptr->addr); 228 pr_err("Attempt to establish third link to %s\n", addr_string); 229 return NULL; 230 } 231 232 if (n_ptr->links[b_ptr->identity]) { 233 tipc_addr_string_fill(addr_string, n_ptr->addr); 234 pr_err("Attempt to establish second link on <%s> to %s\n", 235 b_ptr->name, addr_string); 236 return NULL; 237 } 238 239 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 240 if (!l_ptr) { 241 pr_warn("Link creation failed, no memory\n"); 242 return NULL; 243 } 244 245 l_ptr->addr = peer; 246 if_name = strchr(b_ptr->name, ':') + 1; 247 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 248 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 249 tipc_node(tipc_own_addr), 250 if_name, 251 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 252 /* note: peer i/f name is updated by reset/activate message */ 253 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 254 l_ptr->owner = n_ptr; 255 l_ptr->checkpoint = 1; 256 l_ptr->peer_session = INVALID_SESSION; 257 l_ptr->b_ptr = b_ptr; 258 link_set_supervision_props(l_ptr, b_ptr->tolerance); 259 l_ptr->state = RESET_UNKNOWN; 260 261 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 262 msg = l_ptr->pmsg; 263 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 264 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 265 msg_set_session(msg, (tipc_random & 0xffff)); 266 msg_set_bearer_id(msg, b_ptr->identity); 267 strcpy((char *)msg_data(msg), if_name); 268 269 l_ptr->priority = b_ptr->priority; 270 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 271 272 link_init_max_pkt(l_ptr); 273 274 l_ptr->next_out_no = 1; 275 INIT_LIST_HEAD(&l_ptr->waiting_ports); 276 277 link_reset_statistics(l_ptr); 278 279 tipc_node_attach_link(n_ptr, l_ptr); 280 281 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr); 282 list_add_tail(&l_ptr->link_list, &b_ptr->links); 283 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr); 284 285 return l_ptr; 286 } 287 288 /** 289 * tipc_link_delete - delete a link 290 * @l_ptr: pointer to link 291 * 292 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 293 * This routine must not grab the node lock until after link timer cancellation 294 * to avoid a potential deadlock situation. 295 */ 296 void tipc_link_delete(struct tipc_link *l_ptr) 297 { 298 if (!l_ptr) { 299 pr_err("Attempt to delete non-existent link\n"); 300 return; 301 } 302 303 k_cancel_timer(&l_ptr->timer); 304 305 tipc_node_lock(l_ptr->owner); 306 tipc_link_reset(l_ptr); 307 tipc_node_detach_link(l_ptr->owner, l_ptr); 308 tipc_link_stop(l_ptr); 309 list_del_init(&l_ptr->link_list); 310 tipc_node_unlock(l_ptr->owner); 311 k_term_timer(&l_ptr->timer); 312 kfree(l_ptr); 313 } 314 315 static void link_start(struct tipc_link *l_ptr) 316 { 317 tipc_node_lock(l_ptr->owner); 318 link_state_event(l_ptr, STARTING_EVT); 319 tipc_node_unlock(l_ptr->owner); 320 } 321 322 /** 323 * link_schedule_port - schedule port for deferred sending 324 * @l_ptr: pointer to link 325 * @origport: reference to sending port 326 * @sz: amount of data to be sent 327 * 328 * Schedules port for renewed sending of messages after link congestion 329 * has abated. 330 */ 331 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 332 { 333 struct tipc_port *p_ptr; 334 335 spin_lock_bh(&tipc_port_list_lock); 336 p_ptr = tipc_port_lock(origport); 337 if (p_ptr) { 338 if (!p_ptr->wakeup) 339 goto exit; 340 if (!list_empty(&p_ptr->wait_list)) 341 goto exit; 342 p_ptr->congested = 1; 343 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 344 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 345 l_ptr->stats.link_congs++; 346 exit: 347 tipc_port_unlock(p_ptr); 348 } 349 spin_unlock_bh(&tipc_port_list_lock); 350 return -ELINKCONG; 351 } 352 353 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 354 { 355 struct tipc_port *p_ptr; 356 struct tipc_port *temp_p_ptr; 357 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 358 359 if (all) 360 win = 100000; 361 if (win <= 0) 362 return; 363 if (!spin_trylock_bh(&tipc_port_list_lock)) 364 return; 365 if (link_congested(l_ptr)) 366 goto exit; 367 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 368 wait_list) { 369 if (win <= 0) 370 break; 371 list_del_init(&p_ptr->wait_list); 372 spin_lock_bh(p_ptr->lock); 373 p_ptr->congested = 0; 374 p_ptr->wakeup(p_ptr); 375 win -= p_ptr->waiting_pkts; 376 spin_unlock_bh(p_ptr->lock); 377 } 378 379 exit: 380 spin_unlock_bh(&tipc_port_list_lock); 381 } 382 383 /** 384 * link_release_outqueue - purge link's outbound message queue 385 * @l_ptr: pointer to link 386 */ 387 static void link_release_outqueue(struct tipc_link *l_ptr) 388 { 389 struct sk_buff *buf = l_ptr->first_out; 390 struct sk_buff *next; 391 392 while (buf) { 393 next = buf->next; 394 kfree_skb(buf); 395 buf = next; 396 } 397 l_ptr->first_out = NULL; 398 l_ptr->out_queue_size = 0; 399 } 400 401 /** 402 * tipc_link_reset_fragments - purge link's inbound message fragments queue 403 * @l_ptr: pointer to link 404 */ 405 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 406 { 407 kfree_skb(l_ptr->reasm_head); 408 l_ptr->reasm_head = NULL; 409 l_ptr->reasm_tail = NULL; 410 } 411 412 /** 413 * tipc_link_stop - purge all inbound and outbound messages associated with link 414 * @l_ptr: pointer to link 415 */ 416 void tipc_link_stop(struct tipc_link *l_ptr) 417 { 418 struct sk_buff *buf; 419 struct sk_buff *next; 420 421 buf = l_ptr->oldest_deferred_in; 422 while (buf) { 423 next = buf->next; 424 kfree_skb(buf); 425 buf = next; 426 } 427 428 buf = l_ptr->first_out; 429 while (buf) { 430 next = buf->next; 431 kfree_skb(buf); 432 buf = next; 433 } 434 435 tipc_link_reset_fragments(l_ptr); 436 437 kfree_skb(l_ptr->proto_msg_queue); 438 l_ptr->proto_msg_queue = NULL; 439 } 440 441 void tipc_link_reset(struct tipc_link *l_ptr) 442 { 443 struct sk_buff *buf; 444 u32 prev_state = l_ptr->state; 445 u32 checkpoint = l_ptr->next_in_no; 446 int was_active_link = tipc_link_is_active(l_ptr); 447 448 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 449 450 /* Link is down, accept any session */ 451 l_ptr->peer_session = INVALID_SESSION; 452 453 /* Prepare for max packet size negotiation */ 454 link_init_max_pkt(l_ptr); 455 456 l_ptr->state = RESET_UNKNOWN; 457 458 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 459 return; 460 461 tipc_node_link_down(l_ptr->owner, l_ptr); 462 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 463 464 if (was_active_link && tipc_node_active_links(l_ptr->owner) && 465 l_ptr->owner->permit_changeover) { 466 l_ptr->reset_checkpoint = checkpoint; 467 l_ptr->exp_msg_count = START_CHANGEOVER; 468 } 469 470 /* Clean up all queues: */ 471 link_release_outqueue(l_ptr); 472 kfree_skb(l_ptr->proto_msg_queue); 473 l_ptr->proto_msg_queue = NULL; 474 buf = l_ptr->oldest_deferred_in; 475 while (buf) { 476 struct sk_buff *next = buf->next; 477 kfree_skb(buf); 478 buf = next; 479 } 480 if (!list_empty(&l_ptr->waiting_ports)) 481 tipc_link_wakeup_ports(l_ptr, 1); 482 483 l_ptr->retransm_queue_head = 0; 484 l_ptr->retransm_queue_size = 0; 485 l_ptr->last_out = NULL; 486 l_ptr->first_out = NULL; 487 l_ptr->next_out = NULL; 488 l_ptr->unacked_window = 0; 489 l_ptr->checkpoint = 1; 490 l_ptr->next_out_no = 1; 491 l_ptr->deferred_inqueue_sz = 0; 492 l_ptr->oldest_deferred_in = NULL; 493 l_ptr->newest_deferred_in = NULL; 494 l_ptr->fsm_msg_cnt = 0; 495 l_ptr->stale_count = 0; 496 link_reset_statistics(l_ptr); 497 } 498 499 500 static void link_activate(struct tipc_link *l_ptr) 501 { 502 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 503 tipc_node_link_up(l_ptr->owner, l_ptr); 504 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 505 } 506 507 /** 508 * link_state_event - link finite state machine 509 * @l_ptr: pointer to link 510 * @event: state machine event to process 511 */ 512 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 513 { 514 struct tipc_link *other; 515 u32 cont_intv = l_ptr->continuity_interval; 516 517 if (!l_ptr->started && (event != STARTING_EVT)) 518 return; /* Not yet. */ 519 520 if (link_blocked(l_ptr)) { 521 if (event == TIMEOUT_EVT) 522 link_set_timer(l_ptr, cont_intv); 523 return; /* Changeover going on */ 524 } 525 526 switch (l_ptr->state) { 527 case WORKING_WORKING: 528 switch (event) { 529 case TRAFFIC_MSG_EVT: 530 case ACTIVATE_MSG: 531 break; 532 case TIMEOUT_EVT: 533 if (l_ptr->next_in_no != l_ptr->checkpoint) { 534 l_ptr->checkpoint = l_ptr->next_in_no; 535 if (tipc_bclink_acks_missing(l_ptr->owner)) { 536 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 537 0, 0, 0, 0, 0); 538 l_ptr->fsm_msg_cnt++; 539 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 540 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 541 1, 0, 0, 0, 0); 542 l_ptr->fsm_msg_cnt++; 543 } 544 link_set_timer(l_ptr, cont_intv); 545 break; 546 } 547 l_ptr->state = WORKING_UNKNOWN; 548 l_ptr->fsm_msg_cnt = 0; 549 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 550 l_ptr->fsm_msg_cnt++; 551 link_set_timer(l_ptr, cont_intv / 4); 552 break; 553 case RESET_MSG: 554 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 555 l_ptr->name); 556 tipc_link_reset(l_ptr); 557 l_ptr->state = RESET_RESET; 558 l_ptr->fsm_msg_cnt = 0; 559 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 560 l_ptr->fsm_msg_cnt++; 561 link_set_timer(l_ptr, cont_intv); 562 break; 563 default: 564 pr_err("%s%u in WW state\n", link_unk_evt, event); 565 } 566 break; 567 case WORKING_UNKNOWN: 568 switch (event) { 569 case TRAFFIC_MSG_EVT: 570 case ACTIVATE_MSG: 571 l_ptr->state = WORKING_WORKING; 572 l_ptr->fsm_msg_cnt = 0; 573 link_set_timer(l_ptr, cont_intv); 574 break; 575 case RESET_MSG: 576 pr_info("%s<%s>, requested by peer while probing\n", 577 link_rst_msg, l_ptr->name); 578 tipc_link_reset(l_ptr); 579 l_ptr->state = RESET_RESET; 580 l_ptr->fsm_msg_cnt = 0; 581 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 582 l_ptr->fsm_msg_cnt++; 583 link_set_timer(l_ptr, cont_intv); 584 break; 585 case TIMEOUT_EVT: 586 if (l_ptr->next_in_no != l_ptr->checkpoint) { 587 l_ptr->state = WORKING_WORKING; 588 l_ptr->fsm_msg_cnt = 0; 589 l_ptr->checkpoint = l_ptr->next_in_no; 590 if (tipc_bclink_acks_missing(l_ptr->owner)) { 591 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 592 0, 0, 0, 0, 0); 593 l_ptr->fsm_msg_cnt++; 594 } 595 link_set_timer(l_ptr, cont_intv); 596 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 597 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 598 1, 0, 0, 0, 0); 599 l_ptr->fsm_msg_cnt++; 600 link_set_timer(l_ptr, cont_intv / 4); 601 } else { /* Link has failed */ 602 pr_warn("%s<%s>, peer not responding\n", 603 link_rst_msg, l_ptr->name); 604 tipc_link_reset(l_ptr); 605 l_ptr->state = RESET_UNKNOWN; 606 l_ptr->fsm_msg_cnt = 0; 607 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 608 0, 0, 0, 0, 0); 609 l_ptr->fsm_msg_cnt++; 610 link_set_timer(l_ptr, cont_intv); 611 } 612 break; 613 default: 614 pr_err("%s%u in WU state\n", link_unk_evt, event); 615 } 616 break; 617 case RESET_UNKNOWN: 618 switch (event) { 619 case TRAFFIC_MSG_EVT: 620 break; 621 case ACTIVATE_MSG: 622 other = l_ptr->owner->active_links[0]; 623 if (other && link_working_unknown(other)) 624 break; 625 l_ptr->state = WORKING_WORKING; 626 l_ptr->fsm_msg_cnt = 0; 627 link_activate(l_ptr); 628 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 629 l_ptr->fsm_msg_cnt++; 630 if (l_ptr->owner->working_links == 1) 631 tipc_link_send_sync(l_ptr); 632 link_set_timer(l_ptr, cont_intv); 633 break; 634 case RESET_MSG: 635 l_ptr->state = RESET_RESET; 636 l_ptr->fsm_msg_cnt = 0; 637 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 638 l_ptr->fsm_msg_cnt++; 639 link_set_timer(l_ptr, cont_intv); 640 break; 641 case STARTING_EVT: 642 l_ptr->started = 1; 643 /* fall through */ 644 case TIMEOUT_EVT: 645 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 646 l_ptr->fsm_msg_cnt++; 647 link_set_timer(l_ptr, cont_intv); 648 break; 649 default: 650 pr_err("%s%u in RU state\n", link_unk_evt, event); 651 } 652 break; 653 case RESET_RESET: 654 switch (event) { 655 case TRAFFIC_MSG_EVT: 656 case ACTIVATE_MSG: 657 other = l_ptr->owner->active_links[0]; 658 if (other && link_working_unknown(other)) 659 break; 660 l_ptr->state = WORKING_WORKING; 661 l_ptr->fsm_msg_cnt = 0; 662 link_activate(l_ptr); 663 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 664 l_ptr->fsm_msg_cnt++; 665 if (l_ptr->owner->working_links == 1) 666 tipc_link_send_sync(l_ptr); 667 link_set_timer(l_ptr, cont_intv); 668 break; 669 case RESET_MSG: 670 break; 671 case TIMEOUT_EVT: 672 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 673 l_ptr->fsm_msg_cnt++; 674 link_set_timer(l_ptr, cont_intv); 675 break; 676 default: 677 pr_err("%s%u in RR state\n", link_unk_evt, event); 678 } 679 break; 680 default: 681 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 682 } 683 } 684 685 /* 686 * link_bundle_buf(): Append contents of a buffer to 687 * the tail of an existing one. 688 */ 689 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 690 struct sk_buff *buf) 691 { 692 struct tipc_msg *bundler_msg = buf_msg(bundler); 693 struct tipc_msg *msg = buf_msg(buf); 694 u32 size = msg_size(msg); 695 u32 bundle_size = msg_size(bundler_msg); 696 u32 to_pos = align(bundle_size); 697 u32 pad = to_pos - bundle_size; 698 699 if (msg_user(bundler_msg) != MSG_BUNDLER) 700 return 0; 701 if (msg_type(bundler_msg) != OPEN_MSG) 702 return 0; 703 if (skb_tailroom(bundler) < (pad + size)) 704 return 0; 705 if (l_ptr->max_pkt < (to_pos + size)) 706 return 0; 707 708 skb_put(bundler, pad + size); 709 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 710 msg_set_size(bundler_msg, to_pos + size); 711 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 712 kfree_skb(buf); 713 l_ptr->stats.sent_bundled++; 714 return 1; 715 } 716 717 static void link_add_to_outqueue(struct tipc_link *l_ptr, 718 struct sk_buff *buf, 719 struct tipc_msg *msg) 720 { 721 u32 ack = mod(l_ptr->next_in_no - 1); 722 u32 seqno = mod(l_ptr->next_out_no++); 723 724 msg_set_word(msg, 2, ((ack << 16) | seqno)); 725 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 726 buf->next = NULL; 727 if (l_ptr->first_out) { 728 l_ptr->last_out->next = buf; 729 l_ptr->last_out = buf; 730 } else 731 l_ptr->first_out = l_ptr->last_out = buf; 732 733 l_ptr->out_queue_size++; 734 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 735 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 736 } 737 738 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 739 struct sk_buff *buf_chain, 740 u32 long_msgno) 741 { 742 struct sk_buff *buf; 743 struct tipc_msg *msg; 744 745 if (!l_ptr->next_out) 746 l_ptr->next_out = buf_chain; 747 while (buf_chain) { 748 buf = buf_chain; 749 buf_chain = buf_chain->next; 750 751 msg = buf_msg(buf); 752 msg_set_long_msgno(msg, long_msgno); 753 link_add_to_outqueue(l_ptr, buf, msg); 754 } 755 } 756 757 /* 758 * tipc_link_send_buf() is the 'full path' for messages, called from 759 * inside TIPC when the 'fast path' in tipc_send_buf 760 * has failed, and from link_send() 761 */ 762 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 763 { 764 struct tipc_msg *msg = buf_msg(buf); 765 u32 size = msg_size(msg); 766 u32 dsz = msg_data_sz(msg); 767 u32 queue_size = l_ptr->out_queue_size; 768 u32 imp = tipc_msg_tot_importance(msg); 769 u32 queue_limit = l_ptr->queue_limit[imp]; 770 u32 max_packet = l_ptr->max_pkt; 771 772 /* Match msg importance against queue limits: */ 773 if (unlikely(queue_size >= queue_limit)) { 774 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 775 link_schedule_port(l_ptr, msg_origport(msg), size); 776 kfree_skb(buf); 777 return -ELINKCONG; 778 } 779 kfree_skb(buf); 780 if (imp > CONN_MANAGER) { 781 pr_warn("%s<%s>, send queue full", link_rst_msg, 782 l_ptr->name); 783 tipc_link_reset(l_ptr); 784 } 785 return dsz; 786 } 787 788 /* Fragmentation needed ? */ 789 if (size > max_packet) 790 return link_send_long_buf(l_ptr, buf); 791 792 /* Packet can be queued or sent. */ 793 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) && 794 !link_congested(l_ptr))) { 795 link_add_to_outqueue(l_ptr, buf, msg); 796 797 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 798 l_ptr->unacked_window = 0; 799 return dsz; 800 } 801 /* Congestion: can message be bundled ? */ 802 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 803 (msg_user(msg) != MSG_FRAGMENTER)) { 804 805 /* Try adding message to an existing bundle */ 806 if (l_ptr->next_out && 807 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 808 return dsz; 809 810 /* Try creating a new bundle */ 811 if (size <= max_packet * 2 / 3) { 812 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 813 struct tipc_msg bundler_hdr; 814 815 if (bundler) { 816 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 817 INT_H_SIZE, l_ptr->addr); 818 skb_copy_to_linear_data(bundler, &bundler_hdr, 819 INT_H_SIZE); 820 skb_trim(bundler, INT_H_SIZE); 821 link_bundle_buf(l_ptr, bundler, buf); 822 buf = bundler; 823 msg = buf_msg(buf); 824 l_ptr->stats.sent_bundles++; 825 } 826 } 827 } 828 if (!l_ptr->next_out) 829 l_ptr->next_out = buf; 830 link_add_to_outqueue(l_ptr, buf, msg); 831 return dsz; 832 } 833 834 /* 835 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 836 * not been selected yet, and the the owner node is not locked 837 * Called by TIPC internal users, e.g. the name distributor 838 */ 839 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 840 { 841 struct tipc_link *l_ptr; 842 struct tipc_node *n_ptr; 843 int res = -ELINKCONG; 844 845 read_lock_bh(&tipc_net_lock); 846 n_ptr = tipc_node_find(dest); 847 if (n_ptr) { 848 tipc_node_lock(n_ptr); 849 l_ptr = n_ptr->active_links[selector & 1]; 850 if (l_ptr) 851 res = tipc_link_send_buf(l_ptr, buf); 852 else 853 kfree_skb(buf); 854 tipc_node_unlock(n_ptr); 855 } else { 856 kfree_skb(buf); 857 } 858 read_unlock_bh(&tipc_net_lock); 859 return res; 860 } 861 862 /* 863 * tipc_link_send_sync - synchronize broadcast link endpoints. 864 * 865 * Give a newly added peer node the sequence number where it should 866 * start receiving and acking broadcast packets. 867 * 868 * Called with node locked 869 */ 870 static void tipc_link_send_sync(struct tipc_link *l) 871 { 872 struct sk_buff *buf; 873 struct tipc_msg *msg; 874 875 buf = tipc_buf_acquire(INT_H_SIZE); 876 if (!buf) 877 return; 878 879 msg = buf_msg(buf); 880 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 881 msg_set_last_bcast(msg, l->owner->bclink.acked); 882 link_add_chain_to_outqueue(l, buf, 0); 883 tipc_link_push_queue(l); 884 } 885 886 /* 887 * tipc_link_recv_sync - synchronize broadcast link endpoints. 888 * Receive the sequence number where we should start receiving and 889 * acking broadcast packets from a newly added peer node, and open 890 * up for reception of such packets. 891 * 892 * Called with node locked 893 */ 894 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) 895 { 896 struct tipc_msg *msg = buf_msg(buf); 897 898 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 899 n->bclink.recv_permitted = true; 900 kfree_skb(buf); 901 } 902 903 /* 904 * tipc_link_send_names - send name table entries to new neighbor 905 * 906 * Send routine for bulk delivery of name table messages when contact 907 * with a new neighbor occurs. No link congestion checking is performed 908 * because name table messages *must* be delivered. The messages must be 909 * small enough not to require fragmentation. 910 * Called without any locks held. 911 */ 912 void tipc_link_send_names(struct list_head *message_list, u32 dest) 913 { 914 struct tipc_node *n_ptr; 915 struct tipc_link *l_ptr; 916 struct sk_buff *buf; 917 struct sk_buff *temp_buf; 918 919 if (list_empty(message_list)) 920 return; 921 922 read_lock_bh(&tipc_net_lock); 923 n_ptr = tipc_node_find(dest); 924 if (n_ptr) { 925 tipc_node_lock(n_ptr); 926 l_ptr = n_ptr->active_links[0]; 927 if (l_ptr) { 928 /* convert circular list to linear list */ 929 ((struct sk_buff *)message_list->prev)->next = NULL; 930 link_add_chain_to_outqueue(l_ptr, 931 (struct sk_buff *)message_list->next, 0); 932 tipc_link_push_queue(l_ptr); 933 INIT_LIST_HEAD(message_list); 934 } 935 tipc_node_unlock(n_ptr); 936 } 937 read_unlock_bh(&tipc_net_lock); 938 939 /* discard the messages if they couldn't be sent */ 940 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 941 list_del((struct list_head *)buf); 942 kfree_skb(buf); 943 } 944 } 945 946 /* 947 * link_send_buf_fast: Entry for data messages where the 948 * destination link is known and the header is complete, 949 * inclusive total message length. Very time critical. 950 * Link is locked. Returns user data length. 951 */ 952 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 953 u32 *used_max_pkt) 954 { 955 struct tipc_msg *msg = buf_msg(buf); 956 int res = msg_data_sz(msg); 957 958 if (likely(!link_congested(l_ptr))) { 959 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 960 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) { 961 link_add_to_outqueue(l_ptr, buf, msg); 962 tipc_bearer_send(l_ptr->b_ptr, buf, 963 &l_ptr->media_addr); 964 l_ptr->unacked_window = 0; 965 return res; 966 } 967 } else 968 *used_max_pkt = l_ptr->max_pkt; 969 } 970 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 971 } 972 973 /* 974 * tipc_link_send_sections_fast: Entry for messages where the 975 * destination processor is known and the header is complete, 976 * except for total message length. 977 * Returns user data length or errno. 978 */ 979 int tipc_link_send_sections_fast(struct tipc_port *sender, 980 struct iovec const *msg_sect, 981 unsigned int len, u32 destaddr) 982 { 983 struct tipc_msg *hdr = &sender->phdr; 984 struct tipc_link *l_ptr; 985 struct sk_buff *buf; 986 struct tipc_node *node; 987 int res; 988 u32 selector = msg_origport(hdr) & 1; 989 990 again: 991 /* 992 * Try building message using port's max_pkt hint. 993 * (Must not hold any locks while building message.) 994 */ 995 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 996 /* Exit if build request was invalid */ 997 if (unlikely(res < 0)) 998 return res; 999 1000 read_lock_bh(&tipc_net_lock); 1001 node = tipc_node_find(destaddr); 1002 if (likely(node)) { 1003 tipc_node_lock(node); 1004 l_ptr = node->active_links[selector]; 1005 if (likely(l_ptr)) { 1006 if (likely(buf)) { 1007 res = link_send_buf_fast(l_ptr, buf, 1008 &sender->max_pkt); 1009 exit: 1010 tipc_node_unlock(node); 1011 read_unlock_bh(&tipc_net_lock); 1012 return res; 1013 } 1014 1015 /* Exit if link (or bearer) is congested */ 1016 if (link_congested(l_ptr) || 1017 tipc_bearer_blocked(l_ptr->b_ptr)) { 1018 res = link_schedule_port(l_ptr, 1019 sender->ref, res); 1020 goto exit; 1021 } 1022 1023 /* 1024 * Message size exceeds max_pkt hint; update hint, 1025 * then re-try fast path or fragment the message 1026 */ 1027 sender->max_pkt = l_ptr->max_pkt; 1028 tipc_node_unlock(node); 1029 read_unlock_bh(&tipc_net_lock); 1030 1031 1032 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1033 goto again; 1034 1035 return link_send_sections_long(sender, msg_sect, len, 1036 destaddr); 1037 } 1038 tipc_node_unlock(node); 1039 } 1040 read_unlock_bh(&tipc_net_lock); 1041 1042 /* Couldn't find a link to the destination node */ 1043 if (buf) 1044 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1045 if (res >= 0) 1046 return tipc_port_reject_sections(sender, hdr, msg_sect, 1047 len, TIPC_ERR_NO_NODE); 1048 return res; 1049 } 1050 1051 /* 1052 * link_send_sections_long(): Entry for long messages where the 1053 * destination node is known and the header is complete, 1054 * inclusive total message length. 1055 * Link and bearer congestion status have been checked to be ok, 1056 * and are ignored if they change. 1057 * 1058 * Note that fragments do not use the full link MTU so that they won't have 1059 * to undergo refragmentation if link changeover causes them to be sent 1060 * over another link with an additional tunnel header added as prefix. 1061 * (Refragmentation will still occur if the other link has a smaller MTU.) 1062 * 1063 * Returns user data length or errno. 1064 */ 1065 static int link_send_sections_long(struct tipc_port *sender, 1066 struct iovec const *msg_sect, 1067 unsigned int len, u32 destaddr) 1068 { 1069 struct tipc_link *l_ptr; 1070 struct tipc_node *node; 1071 struct tipc_msg *hdr = &sender->phdr; 1072 u32 dsz = len; 1073 u32 max_pkt, fragm_sz, rest; 1074 struct tipc_msg fragm_hdr; 1075 struct sk_buff *buf, *buf_chain, *prev; 1076 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1077 const unchar __user *sect_crs; 1078 int curr_sect; 1079 u32 fragm_no; 1080 int res = 0; 1081 1082 again: 1083 fragm_no = 1; 1084 max_pkt = sender->max_pkt - INT_H_SIZE; 1085 /* leave room for tunnel header in case of link changeover */ 1086 fragm_sz = max_pkt - INT_H_SIZE; 1087 /* leave room for fragmentation header in each fragment */ 1088 rest = dsz; 1089 fragm_crs = 0; 1090 fragm_rest = 0; 1091 sect_rest = 0; 1092 sect_crs = NULL; 1093 curr_sect = -1; 1094 1095 /* Prepare reusable fragment header */ 1096 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1097 INT_H_SIZE, msg_destnode(hdr)); 1098 msg_set_size(&fragm_hdr, max_pkt); 1099 msg_set_fragm_no(&fragm_hdr, 1); 1100 1101 /* Prepare header of first fragment */ 1102 buf_chain = buf = tipc_buf_acquire(max_pkt); 1103 if (!buf) 1104 return -ENOMEM; 1105 buf->next = NULL; 1106 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1107 hsz = msg_hdr_sz(hdr); 1108 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1109 1110 /* Chop up message */ 1111 fragm_crs = INT_H_SIZE + hsz; 1112 fragm_rest = fragm_sz - hsz; 1113 1114 do { /* For all sections */ 1115 u32 sz; 1116 1117 if (!sect_rest) { 1118 sect_rest = msg_sect[++curr_sect].iov_len; 1119 sect_crs = msg_sect[curr_sect].iov_base; 1120 } 1121 1122 if (sect_rest < fragm_rest) 1123 sz = sect_rest; 1124 else 1125 sz = fragm_rest; 1126 1127 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1128 res = -EFAULT; 1129 error: 1130 for (; buf_chain; buf_chain = buf) { 1131 buf = buf_chain->next; 1132 kfree_skb(buf_chain); 1133 } 1134 return res; 1135 } 1136 sect_crs += sz; 1137 sect_rest -= sz; 1138 fragm_crs += sz; 1139 fragm_rest -= sz; 1140 rest -= sz; 1141 1142 if (!fragm_rest && rest) { 1143 1144 /* Initiate new fragment: */ 1145 if (rest <= fragm_sz) { 1146 fragm_sz = rest; 1147 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1148 } else { 1149 msg_set_type(&fragm_hdr, FRAGMENT); 1150 } 1151 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1152 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1153 prev = buf; 1154 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1155 if (!buf) { 1156 res = -ENOMEM; 1157 goto error; 1158 } 1159 1160 buf->next = NULL; 1161 prev->next = buf; 1162 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1163 fragm_crs = INT_H_SIZE; 1164 fragm_rest = fragm_sz; 1165 } 1166 } while (rest > 0); 1167 1168 /* 1169 * Now we have a buffer chain. Select a link and check 1170 * that packet size is still OK 1171 */ 1172 node = tipc_node_find(destaddr); 1173 if (likely(node)) { 1174 tipc_node_lock(node); 1175 l_ptr = node->active_links[sender->ref & 1]; 1176 if (!l_ptr) { 1177 tipc_node_unlock(node); 1178 goto reject; 1179 } 1180 if (l_ptr->max_pkt < max_pkt) { 1181 sender->max_pkt = l_ptr->max_pkt; 1182 tipc_node_unlock(node); 1183 for (; buf_chain; buf_chain = buf) { 1184 buf = buf_chain->next; 1185 kfree_skb(buf_chain); 1186 } 1187 goto again; 1188 } 1189 } else { 1190 reject: 1191 for (; buf_chain; buf_chain = buf) { 1192 buf = buf_chain->next; 1193 kfree_skb(buf_chain); 1194 } 1195 return tipc_port_reject_sections(sender, hdr, msg_sect, 1196 len, TIPC_ERR_NO_NODE); 1197 } 1198 1199 /* Append chain of fragments to send queue & send them */ 1200 l_ptr->long_msg_seq_no++; 1201 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1202 l_ptr->stats.sent_fragments += fragm_no; 1203 l_ptr->stats.sent_fragmented++; 1204 tipc_link_push_queue(l_ptr); 1205 tipc_node_unlock(node); 1206 return dsz; 1207 } 1208 1209 /* 1210 * tipc_link_push_packet: Push one unsent packet to the media 1211 */ 1212 u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1213 { 1214 struct sk_buff *buf = l_ptr->first_out; 1215 u32 r_q_size = l_ptr->retransm_queue_size; 1216 u32 r_q_head = l_ptr->retransm_queue_head; 1217 1218 /* Step to position where retransmission failed, if any, */ 1219 /* consider that buffers may have been released in meantime */ 1220 if (r_q_size && buf) { 1221 u32 last = lesser(mod(r_q_head + r_q_size), 1222 link_last_sent(l_ptr)); 1223 u32 first = buf_seqno(buf); 1224 1225 while (buf && less(first, r_q_head)) { 1226 first = mod(first + 1); 1227 buf = buf->next; 1228 } 1229 l_ptr->retransm_queue_head = r_q_head = first; 1230 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1231 } 1232 1233 /* Continue retransmission now, if there is anything: */ 1234 if (r_q_size && buf) { 1235 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1236 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1237 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1238 l_ptr->retransm_queue_head = mod(++r_q_head); 1239 l_ptr->retransm_queue_size = --r_q_size; 1240 l_ptr->stats.retransmitted++; 1241 return 0; 1242 } 1243 1244 /* Send deferred protocol message, if any: */ 1245 buf = l_ptr->proto_msg_queue; 1246 if (buf) { 1247 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1248 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1249 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1250 l_ptr->unacked_window = 0; 1251 kfree_skb(buf); 1252 l_ptr->proto_msg_queue = NULL; 1253 return 0; 1254 } 1255 1256 /* Send one deferred data message, if send window not full: */ 1257 buf = l_ptr->next_out; 1258 if (buf) { 1259 struct tipc_msg *msg = buf_msg(buf); 1260 u32 next = msg_seqno(msg); 1261 u32 first = buf_seqno(l_ptr->first_out); 1262 1263 if (mod(next - first) < l_ptr->queue_limit[0]) { 1264 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1265 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1266 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1267 if (msg_user(msg) == MSG_BUNDLER) 1268 msg_set_type(msg, CLOSED_MSG); 1269 l_ptr->next_out = buf->next; 1270 return 0; 1271 } 1272 } 1273 return 1; 1274 } 1275 1276 /* 1277 * push_queue(): push out the unsent messages of a link where 1278 * congestion has abated. Node is locked 1279 */ 1280 void tipc_link_push_queue(struct tipc_link *l_ptr) 1281 { 1282 u32 res; 1283 1284 if (tipc_bearer_blocked(l_ptr->b_ptr)) 1285 return; 1286 1287 do { 1288 res = tipc_link_push_packet(l_ptr); 1289 } while (!res); 1290 } 1291 1292 static void link_reset_all(unsigned long addr) 1293 { 1294 struct tipc_node *n_ptr; 1295 char addr_string[16]; 1296 u32 i; 1297 1298 read_lock_bh(&tipc_net_lock); 1299 n_ptr = tipc_node_find((u32)addr); 1300 if (!n_ptr) { 1301 read_unlock_bh(&tipc_net_lock); 1302 return; /* node no longer exists */ 1303 } 1304 1305 tipc_node_lock(n_ptr); 1306 1307 pr_warn("Resetting all links to %s\n", 1308 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1309 1310 for (i = 0; i < MAX_BEARERS; i++) { 1311 if (n_ptr->links[i]) { 1312 link_print(n_ptr->links[i], "Resetting link\n"); 1313 tipc_link_reset(n_ptr->links[i]); 1314 } 1315 } 1316 1317 tipc_node_unlock(n_ptr); 1318 read_unlock_bh(&tipc_net_lock); 1319 } 1320 1321 static void link_retransmit_failure(struct tipc_link *l_ptr, 1322 struct sk_buff *buf) 1323 { 1324 struct tipc_msg *msg = buf_msg(buf); 1325 1326 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1327 1328 if (l_ptr->addr) { 1329 /* Handle failure on standard link */ 1330 link_print(l_ptr, "Resetting link\n"); 1331 tipc_link_reset(l_ptr); 1332 1333 } else { 1334 /* Handle failure on broadcast link */ 1335 struct tipc_node *n_ptr; 1336 char addr_string[16]; 1337 1338 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1339 pr_cont("Outstanding acks: %lu\n", 1340 (unsigned long) TIPC_SKB_CB(buf)->handle); 1341 1342 n_ptr = tipc_bclink_retransmit_to(); 1343 tipc_node_lock(n_ptr); 1344 1345 tipc_addr_string_fill(addr_string, n_ptr->addr); 1346 pr_info("Broadcast link info for %s\n", addr_string); 1347 pr_info("Reception permitted: %d, Acked: %u\n", 1348 n_ptr->bclink.recv_permitted, 1349 n_ptr->bclink.acked); 1350 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1351 n_ptr->bclink.last_in, 1352 n_ptr->bclink.oos_state, 1353 n_ptr->bclink.last_sent); 1354 1355 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1356 1357 tipc_node_unlock(n_ptr); 1358 1359 l_ptr->stale_count = 0; 1360 } 1361 } 1362 1363 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1364 u32 retransmits) 1365 { 1366 struct tipc_msg *msg; 1367 1368 if (!buf) 1369 return; 1370 1371 msg = buf_msg(buf); 1372 1373 if (tipc_bearer_blocked(l_ptr->b_ptr)) { 1374 if (l_ptr->retransm_queue_size == 0) { 1375 l_ptr->retransm_queue_head = msg_seqno(msg); 1376 l_ptr->retransm_queue_size = retransmits; 1377 } else { 1378 pr_err("Unexpected retransmit on link %s (qsize=%d)\n", 1379 l_ptr->name, l_ptr->retransm_queue_size); 1380 } 1381 return; 1382 } else { 1383 /* Detect repeated retransmit failures on unblocked bearer */ 1384 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1385 if (++l_ptr->stale_count > 100) { 1386 link_retransmit_failure(l_ptr, buf); 1387 return; 1388 } 1389 } else { 1390 l_ptr->last_retransmitted = msg_seqno(msg); 1391 l_ptr->stale_count = 1; 1392 } 1393 } 1394 1395 while (retransmits && (buf != l_ptr->next_out) && buf) { 1396 msg = buf_msg(buf); 1397 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1398 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1399 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1400 buf = buf->next; 1401 retransmits--; 1402 l_ptr->stats.retransmitted++; 1403 } 1404 1405 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1406 } 1407 1408 /** 1409 * link_insert_deferred_queue - insert deferred messages back into receive chain 1410 */ 1411 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1412 struct sk_buff *buf) 1413 { 1414 u32 seq_no; 1415 1416 if (l_ptr->oldest_deferred_in == NULL) 1417 return buf; 1418 1419 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1420 if (seq_no == mod(l_ptr->next_in_no)) { 1421 l_ptr->newest_deferred_in->next = buf; 1422 buf = l_ptr->oldest_deferred_in; 1423 l_ptr->oldest_deferred_in = NULL; 1424 l_ptr->deferred_inqueue_sz = 0; 1425 } 1426 return buf; 1427 } 1428 1429 /** 1430 * link_recv_buf_validate - validate basic format of received message 1431 * 1432 * This routine ensures a TIPC message has an acceptable header, and at least 1433 * as much data as the header indicates it should. The routine also ensures 1434 * that the entire message header is stored in the main fragment of the message 1435 * buffer, to simplify future access to message header fields. 1436 * 1437 * Note: Having extra info present in the message header or data areas is OK. 1438 * TIPC will ignore the excess, under the assumption that it is optional info 1439 * introduced by a later release of the protocol. 1440 */ 1441 static int link_recv_buf_validate(struct sk_buff *buf) 1442 { 1443 static u32 min_data_hdr_size[8] = { 1444 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1445 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1446 }; 1447 1448 struct tipc_msg *msg; 1449 u32 tipc_hdr[2]; 1450 u32 size; 1451 u32 hdr_size; 1452 u32 min_hdr_size; 1453 1454 if (unlikely(buf->len < MIN_H_SIZE)) 1455 return 0; 1456 1457 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1458 if (msg == NULL) 1459 return 0; 1460 1461 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1462 return 0; 1463 1464 size = msg_size(msg); 1465 hdr_size = msg_hdr_sz(msg); 1466 min_hdr_size = msg_isdata(msg) ? 1467 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1468 1469 if (unlikely((hdr_size < min_hdr_size) || 1470 (size < hdr_size) || 1471 (buf->len < size) || 1472 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1473 return 0; 1474 1475 return pskb_may_pull(buf, hdr_size); 1476 } 1477 1478 /** 1479 * tipc_recv_msg - process TIPC messages arriving from off-node 1480 * @head: pointer to message buffer chain 1481 * @tb_ptr: pointer to bearer message arrived on 1482 * 1483 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1484 * structure (i.e. cannot be NULL), but bearer can be inactive. 1485 */ 1486 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1487 { 1488 read_lock_bh(&tipc_net_lock); 1489 while (head) { 1490 struct tipc_node *n_ptr; 1491 struct tipc_link *l_ptr; 1492 struct sk_buff *crs; 1493 struct sk_buff *buf = head; 1494 struct tipc_msg *msg; 1495 u32 seq_no; 1496 u32 ackd; 1497 u32 released = 0; 1498 int type; 1499 1500 head = head->next; 1501 buf->next = NULL; 1502 1503 /* Ensure bearer is still enabled */ 1504 if (unlikely(!b_ptr->active)) 1505 goto discard; 1506 1507 /* Ensure message is well-formed */ 1508 if (unlikely(!link_recv_buf_validate(buf))) 1509 goto discard; 1510 1511 /* Ensure message data is a single contiguous unit */ 1512 if (unlikely(skb_linearize(buf))) 1513 goto discard; 1514 1515 /* Handle arrival of a non-unicast link message */ 1516 msg = buf_msg(buf); 1517 1518 if (unlikely(msg_non_seq(msg))) { 1519 if (msg_user(msg) == LINK_CONFIG) 1520 tipc_disc_recv_msg(buf, b_ptr); 1521 else 1522 tipc_bclink_recv_pkt(buf); 1523 continue; 1524 } 1525 1526 /* Discard unicast link messages destined for another node */ 1527 if (unlikely(!msg_short(msg) && 1528 (msg_destnode(msg) != tipc_own_addr))) 1529 goto discard; 1530 1531 /* Locate neighboring node that sent message */ 1532 n_ptr = tipc_node_find(msg_prevnode(msg)); 1533 if (unlikely(!n_ptr)) 1534 goto discard; 1535 tipc_node_lock(n_ptr); 1536 1537 /* Locate unicast link endpoint that should handle message */ 1538 l_ptr = n_ptr->links[b_ptr->identity]; 1539 if (unlikely(!l_ptr)) 1540 goto unlock_discard; 1541 1542 /* Verify that communication with node is currently allowed */ 1543 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1544 msg_user(msg) == LINK_PROTOCOL && 1545 (msg_type(msg) == RESET_MSG || 1546 msg_type(msg) == ACTIVATE_MSG) && 1547 !msg_redundant_link(msg)) 1548 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1549 1550 if (n_ptr->block_setup) 1551 goto unlock_discard; 1552 1553 /* Validate message sequence number info */ 1554 seq_no = msg_seqno(msg); 1555 ackd = msg_ack(msg); 1556 1557 /* Release acked messages */ 1558 if (n_ptr->bclink.recv_permitted) 1559 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1560 1561 crs = l_ptr->first_out; 1562 while ((crs != l_ptr->next_out) && 1563 less_eq(buf_seqno(crs), ackd)) { 1564 struct sk_buff *next = crs->next; 1565 1566 kfree_skb(crs); 1567 crs = next; 1568 released++; 1569 } 1570 if (released) { 1571 l_ptr->first_out = crs; 1572 l_ptr->out_queue_size -= released; 1573 } 1574 1575 /* Try sending any messages link endpoint has pending */ 1576 if (unlikely(l_ptr->next_out)) 1577 tipc_link_push_queue(l_ptr); 1578 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1579 tipc_link_wakeup_ports(l_ptr, 0); 1580 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1581 l_ptr->stats.sent_acks++; 1582 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1583 } 1584 1585 /* Now (finally!) process the incoming message */ 1586 protocol_check: 1587 if (unlikely(!link_working_working(l_ptr))) { 1588 if (msg_user(msg) == LINK_PROTOCOL) { 1589 link_recv_proto_msg(l_ptr, buf); 1590 head = link_insert_deferred_queue(l_ptr, head); 1591 tipc_node_unlock(n_ptr); 1592 continue; 1593 } 1594 1595 /* Traffic message. Conditionally activate link */ 1596 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1597 1598 if (link_working_working(l_ptr)) { 1599 /* Re-insert buffer in front of queue */ 1600 buf->next = head; 1601 head = buf; 1602 tipc_node_unlock(n_ptr); 1603 continue; 1604 } 1605 goto unlock_discard; 1606 } 1607 1608 /* Link is now in state WORKING_WORKING */ 1609 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1610 link_handle_out_of_seq_msg(l_ptr, buf); 1611 head = link_insert_deferred_queue(l_ptr, head); 1612 tipc_node_unlock(n_ptr); 1613 continue; 1614 } 1615 l_ptr->next_in_no++; 1616 if (unlikely(l_ptr->oldest_deferred_in)) 1617 head = link_insert_deferred_queue(l_ptr, head); 1618 deliver: 1619 if (likely(msg_isdata(msg))) { 1620 tipc_node_unlock(n_ptr); 1621 tipc_port_recv_msg(buf); 1622 continue; 1623 } 1624 switch (msg_user(msg)) { 1625 int ret; 1626 case MSG_BUNDLER: 1627 l_ptr->stats.recv_bundles++; 1628 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1629 tipc_node_unlock(n_ptr); 1630 tipc_link_recv_bundle(buf); 1631 continue; 1632 case NAME_DISTRIBUTOR: 1633 n_ptr->bclink.recv_permitted = true; 1634 tipc_node_unlock(n_ptr); 1635 tipc_named_recv(buf); 1636 continue; 1637 case BCAST_PROTOCOL: 1638 tipc_link_recv_sync(n_ptr, buf); 1639 tipc_node_unlock(n_ptr); 1640 continue; 1641 case CONN_MANAGER: 1642 tipc_node_unlock(n_ptr); 1643 tipc_port_recv_proto_msg(buf); 1644 continue; 1645 case MSG_FRAGMENTER: 1646 l_ptr->stats.recv_fragments++; 1647 ret = tipc_link_recv_fragment(&l_ptr->reasm_head, 1648 &l_ptr->reasm_tail, 1649 &buf); 1650 if (ret == LINK_REASM_COMPLETE) { 1651 l_ptr->stats.recv_fragmented++; 1652 msg = buf_msg(buf); 1653 goto deliver; 1654 } 1655 if (ret == LINK_REASM_ERROR) 1656 tipc_link_reset(l_ptr); 1657 tipc_node_unlock(n_ptr); 1658 continue; 1659 case CHANGEOVER_PROTOCOL: 1660 type = msg_type(msg); 1661 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1662 msg = buf_msg(buf); 1663 seq_no = msg_seqno(msg); 1664 if (type == ORIGINAL_MSG) 1665 goto deliver; 1666 goto protocol_check; 1667 } 1668 break; 1669 default: 1670 kfree_skb(buf); 1671 buf = NULL; 1672 break; 1673 } 1674 tipc_node_unlock(n_ptr); 1675 tipc_net_route_msg(buf); 1676 continue; 1677 unlock_discard: 1678 1679 tipc_node_unlock(n_ptr); 1680 discard: 1681 kfree_skb(buf); 1682 } 1683 read_unlock_bh(&tipc_net_lock); 1684 } 1685 1686 /** 1687 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1688 * 1689 * Returns increase in queue length (i.e. 0 or 1) 1690 */ 1691 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1692 struct sk_buff *buf) 1693 { 1694 struct sk_buff *queue_buf; 1695 struct sk_buff **prev; 1696 u32 seq_no = buf_seqno(buf); 1697 1698 buf->next = NULL; 1699 1700 /* Empty queue ? */ 1701 if (*head == NULL) { 1702 *head = *tail = buf; 1703 return 1; 1704 } 1705 1706 /* Last ? */ 1707 if (less(buf_seqno(*tail), seq_no)) { 1708 (*tail)->next = buf; 1709 *tail = buf; 1710 return 1; 1711 } 1712 1713 /* Locate insertion point in queue, then insert; discard if duplicate */ 1714 prev = head; 1715 queue_buf = *head; 1716 for (;;) { 1717 u32 curr_seqno = buf_seqno(queue_buf); 1718 1719 if (seq_no == curr_seqno) { 1720 kfree_skb(buf); 1721 return 0; 1722 } 1723 1724 if (less(seq_no, curr_seqno)) 1725 break; 1726 1727 prev = &queue_buf->next; 1728 queue_buf = queue_buf->next; 1729 } 1730 1731 buf->next = queue_buf; 1732 *prev = buf; 1733 return 1; 1734 } 1735 1736 /* 1737 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1738 */ 1739 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1740 struct sk_buff *buf) 1741 { 1742 u32 seq_no = buf_seqno(buf); 1743 1744 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1745 link_recv_proto_msg(l_ptr, buf); 1746 return; 1747 } 1748 1749 /* Record OOS packet arrival (force mismatch on next timeout) */ 1750 l_ptr->checkpoint--; 1751 1752 /* 1753 * Discard packet if a duplicate; otherwise add it to deferred queue 1754 * and notify peer of gap as per protocol specification 1755 */ 1756 if (less(seq_no, mod(l_ptr->next_in_no))) { 1757 l_ptr->stats.duplicates++; 1758 kfree_skb(buf); 1759 return; 1760 } 1761 1762 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1763 &l_ptr->newest_deferred_in, buf)) { 1764 l_ptr->deferred_inqueue_sz++; 1765 l_ptr->stats.deferred_recv++; 1766 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1767 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1768 } else 1769 l_ptr->stats.duplicates++; 1770 } 1771 1772 /* 1773 * Send protocol message to the other endpoint. 1774 */ 1775 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1776 int probe_msg, u32 gap, u32 tolerance, 1777 u32 priority, u32 ack_mtu) 1778 { 1779 struct sk_buff *buf = NULL; 1780 struct tipc_msg *msg = l_ptr->pmsg; 1781 u32 msg_size = sizeof(l_ptr->proto_msg); 1782 int r_flag; 1783 1784 /* Discard any previous message that was deferred due to congestion */ 1785 if (l_ptr->proto_msg_queue) { 1786 kfree_skb(l_ptr->proto_msg_queue); 1787 l_ptr->proto_msg_queue = NULL; 1788 } 1789 1790 if (link_blocked(l_ptr)) 1791 return; 1792 1793 /* Abort non-RESET send if communication with node is prohibited */ 1794 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1795 return; 1796 1797 /* Create protocol message with "out-of-sequence" sequence number */ 1798 msg_set_type(msg, msg_typ); 1799 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1800 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1801 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1802 1803 if (msg_typ == STATE_MSG) { 1804 u32 next_sent = mod(l_ptr->next_out_no); 1805 1806 if (!tipc_link_is_up(l_ptr)) 1807 return; 1808 if (l_ptr->next_out) 1809 next_sent = buf_seqno(l_ptr->next_out); 1810 msg_set_next_sent(msg, next_sent); 1811 if (l_ptr->oldest_deferred_in) { 1812 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1813 gap = mod(rec - mod(l_ptr->next_in_no)); 1814 } 1815 msg_set_seq_gap(msg, gap); 1816 if (gap) 1817 l_ptr->stats.sent_nacks++; 1818 msg_set_link_tolerance(msg, tolerance); 1819 msg_set_linkprio(msg, priority); 1820 msg_set_max_pkt(msg, ack_mtu); 1821 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1822 msg_set_probe(msg, probe_msg != 0); 1823 if (probe_msg) { 1824 u32 mtu = l_ptr->max_pkt; 1825 1826 if ((mtu < l_ptr->max_pkt_target) && 1827 link_working_working(l_ptr) && 1828 l_ptr->fsm_msg_cnt) { 1829 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1830 if (l_ptr->max_pkt_probes == 10) { 1831 l_ptr->max_pkt_target = (msg_size - 4); 1832 l_ptr->max_pkt_probes = 0; 1833 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1834 } 1835 l_ptr->max_pkt_probes++; 1836 } 1837 1838 l_ptr->stats.sent_probes++; 1839 } 1840 l_ptr->stats.sent_states++; 1841 } else { /* RESET_MSG or ACTIVATE_MSG */ 1842 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1843 msg_set_seq_gap(msg, 0); 1844 msg_set_next_sent(msg, 1); 1845 msg_set_probe(msg, 0); 1846 msg_set_link_tolerance(msg, l_ptr->tolerance); 1847 msg_set_linkprio(msg, l_ptr->priority); 1848 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1849 } 1850 1851 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1852 msg_set_redundant_link(msg, r_flag); 1853 msg_set_linkprio(msg, l_ptr->priority); 1854 msg_set_size(msg, msg_size); 1855 1856 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1857 1858 buf = tipc_buf_acquire(msg_size); 1859 if (!buf) 1860 return; 1861 1862 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1863 buf->priority = TC_PRIO_CONTROL; 1864 1865 /* Defer message if bearer is already blocked */ 1866 if (tipc_bearer_blocked(l_ptr->b_ptr)) { 1867 l_ptr->proto_msg_queue = buf; 1868 return; 1869 } 1870 1871 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1872 l_ptr->unacked_window = 0; 1873 kfree_skb(buf); 1874 } 1875 1876 /* 1877 * Receive protocol message : 1878 * Note that network plane id propagates through the network, and may 1879 * change at any time. The node with lowest address rules 1880 */ 1881 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1882 { 1883 u32 rec_gap = 0; 1884 u32 max_pkt_info; 1885 u32 max_pkt_ack; 1886 u32 msg_tol; 1887 struct tipc_msg *msg = buf_msg(buf); 1888 1889 if (link_blocked(l_ptr)) 1890 goto exit; 1891 1892 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1893 l_ptr->checkpoint--; 1894 1895 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1896 if (tipc_own_addr > msg_prevnode(msg)) 1897 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1898 1899 l_ptr->owner->permit_changeover = msg_redundant_link(msg); 1900 1901 switch (msg_type(msg)) { 1902 1903 case RESET_MSG: 1904 if (!link_working_unknown(l_ptr) && 1905 (l_ptr->peer_session != INVALID_SESSION)) { 1906 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1907 break; /* duplicate or old reset: ignore */ 1908 } 1909 1910 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1911 link_working_unknown(l_ptr))) { 1912 /* 1913 * peer has lost contact -- don't allow peer's links 1914 * to reactivate before we recognize loss & clean up 1915 */ 1916 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1917 } 1918 1919 link_state_event(l_ptr, RESET_MSG); 1920 1921 /* fall thru' */ 1922 case ACTIVATE_MSG: 1923 /* Update link settings according other endpoint's values */ 1924 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1925 1926 msg_tol = msg_link_tolerance(msg); 1927 if (msg_tol > l_ptr->tolerance) 1928 link_set_supervision_props(l_ptr, msg_tol); 1929 1930 if (msg_linkprio(msg) > l_ptr->priority) 1931 l_ptr->priority = msg_linkprio(msg); 1932 1933 max_pkt_info = msg_max_pkt(msg); 1934 if (max_pkt_info) { 1935 if (max_pkt_info < l_ptr->max_pkt_target) 1936 l_ptr->max_pkt_target = max_pkt_info; 1937 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1938 l_ptr->max_pkt = l_ptr->max_pkt_target; 1939 } else { 1940 l_ptr->max_pkt = l_ptr->max_pkt_target; 1941 } 1942 1943 /* Synchronize broadcast link info, if not done previously */ 1944 if (!tipc_node_is_up(l_ptr->owner)) { 1945 l_ptr->owner->bclink.last_sent = 1946 l_ptr->owner->bclink.last_in = 1947 msg_last_bcast(msg); 1948 l_ptr->owner->bclink.oos_state = 0; 1949 } 1950 1951 l_ptr->peer_session = msg_session(msg); 1952 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1953 1954 if (msg_type(msg) == ACTIVATE_MSG) 1955 link_state_event(l_ptr, ACTIVATE_MSG); 1956 break; 1957 case STATE_MSG: 1958 1959 msg_tol = msg_link_tolerance(msg); 1960 if (msg_tol) 1961 link_set_supervision_props(l_ptr, msg_tol); 1962 1963 if (msg_linkprio(msg) && 1964 (msg_linkprio(msg) != l_ptr->priority)) { 1965 pr_warn("%s<%s>, priority change %u->%u\n", 1966 link_rst_msg, l_ptr->name, l_ptr->priority, 1967 msg_linkprio(msg)); 1968 l_ptr->priority = msg_linkprio(msg); 1969 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1970 break; 1971 } 1972 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1973 l_ptr->stats.recv_states++; 1974 if (link_reset_unknown(l_ptr)) 1975 break; 1976 1977 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1978 rec_gap = mod(msg_next_sent(msg) - 1979 mod(l_ptr->next_in_no)); 1980 } 1981 1982 max_pkt_ack = msg_max_pkt(msg); 1983 if (max_pkt_ack > l_ptr->max_pkt) { 1984 l_ptr->max_pkt = max_pkt_ack; 1985 l_ptr->max_pkt_probes = 0; 1986 } 1987 1988 max_pkt_ack = 0; 1989 if (msg_probe(msg)) { 1990 l_ptr->stats.recv_probes++; 1991 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1992 max_pkt_ack = msg_size(msg); 1993 } 1994 1995 /* Protocol message before retransmits, reduce loss risk */ 1996 if (l_ptr->owner->bclink.recv_permitted) 1997 tipc_bclink_update_link_state(l_ptr->owner, 1998 msg_last_bcast(msg)); 1999 2000 if (rec_gap || (msg_probe(msg))) { 2001 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2002 0, rec_gap, 0, 0, max_pkt_ack); 2003 } 2004 if (msg_seq_gap(msg)) { 2005 l_ptr->stats.recv_nacks++; 2006 tipc_link_retransmit(l_ptr, l_ptr->first_out, 2007 msg_seq_gap(msg)); 2008 } 2009 break; 2010 } 2011 exit: 2012 kfree_skb(buf); 2013 } 2014 2015 2016 /* 2017 * tipc_link_tunnel(): Send one message via a link belonging to 2018 * another bearer. Owner node is locked. 2019 */ 2020 static void tipc_link_tunnel(struct tipc_link *l_ptr, 2021 struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, 2022 u32 selector) 2023 { 2024 struct tipc_link *tunnel; 2025 struct sk_buff *buf; 2026 u32 length = msg_size(msg); 2027 2028 tunnel = l_ptr->owner->active_links[selector & 1]; 2029 if (!tipc_link_is_up(tunnel)) { 2030 pr_warn("%stunnel link no longer available\n", link_co_err); 2031 return; 2032 } 2033 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2034 buf = tipc_buf_acquire(length + INT_H_SIZE); 2035 if (!buf) { 2036 pr_warn("%sunable to send tunnel msg\n", link_co_err); 2037 return; 2038 } 2039 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 2040 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 2041 tipc_link_send_buf(tunnel, buf); 2042 } 2043 2044 2045 2046 /* 2047 * changeover(): Send whole message queue via the remaining link 2048 * Owner node is locked. 2049 */ 2050 void tipc_link_changeover(struct tipc_link *l_ptr) 2051 { 2052 u32 msgcount = l_ptr->out_queue_size; 2053 struct sk_buff *crs = l_ptr->first_out; 2054 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 2055 struct tipc_msg tunnel_hdr; 2056 int split_bundles; 2057 2058 if (!tunnel) 2059 return; 2060 2061 if (!l_ptr->owner->permit_changeover) { 2062 pr_warn("%speer did not permit changeover\n", link_co_err); 2063 return; 2064 } 2065 2066 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2067 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2068 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2069 msg_set_msgcnt(&tunnel_hdr, msgcount); 2070 2071 if (!l_ptr->first_out) { 2072 struct sk_buff *buf; 2073 2074 buf = tipc_buf_acquire(INT_H_SIZE); 2075 if (buf) { 2076 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2077 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2078 tipc_link_send_buf(tunnel, buf); 2079 } else { 2080 pr_warn("%sunable to send changeover msg\n", 2081 link_co_err); 2082 } 2083 return; 2084 } 2085 2086 split_bundles = (l_ptr->owner->active_links[0] != 2087 l_ptr->owner->active_links[1]); 2088 2089 while (crs) { 2090 struct tipc_msg *msg = buf_msg(crs); 2091 2092 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2093 struct tipc_msg *m = msg_get_wrapped(msg); 2094 unchar *pos = (unchar *)m; 2095 2096 msgcount = msg_msgcnt(msg); 2097 while (msgcount--) { 2098 msg_set_seqno(m, msg_seqno(msg)); 2099 tipc_link_tunnel(l_ptr, &tunnel_hdr, m, 2100 msg_link_selector(m)); 2101 pos += align(msg_size(m)); 2102 m = (struct tipc_msg *)pos; 2103 } 2104 } else { 2105 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg, 2106 msg_link_selector(msg)); 2107 } 2108 crs = crs->next; 2109 } 2110 } 2111 2112 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) 2113 { 2114 struct sk_buff *iter; 2115 struct tipc_msg tunnel_hdr; 2116 2117 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2118 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2119 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2120 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2121 iter = l_ptr->first_out; 2122 while (iter) { 2123 struct sk_buff *outbuf; 2124 struct tipc_msg *msg = buf_msg(iter); 2125 u32 length = msg_size(msg); 2126 2127 if (msg_user(msg) == MSG_BUNDLER) 2128 msg_set_type(msg, CLOSED_MSG); 2129 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2130 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2131 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2132 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2133 if (outbuf == NULL) { 2134 pr_warn("%sunable to send duplicate msg\n", 2135 link_co_err); 2136 return; 2137 } 2138 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2139 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2140 length); 2141 tipc_link_send_buf(tunnel, outbuf); 2142 if (!tipc_link_is_up(l_ptr)) 2143 return; 2144 iter = iter->next; 2145 } 2146 } 2147 2148 /** 2149 * buf_extract - extracts embedded TIPC message from another message 2150 * @skb: encapsulating message buffer 2151 * @from_pos: offset to extract from 2152 * 2153 * Returns a new message buffer containing an embedded message. The 2154 * encapsulating message itself is left unchanged. 2155 */ 2156 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2157 { 2158 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2159 u32 size = msg_size(msg); 2160 struct sk_buff *eb; 2161 2162 eb = tipc_buf_acquire(size); 2163 if (eb) 2164 skb_copy_to_linear_data(eb, msg, size); 2165 return eb; 2166 } 2167 2168 /* 2169 * link_recv_changeover_msg(): Receive tunneled packet sent 2170 * via other link. Node is locked. Return extracted buffer. 2171 */ 2172 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2173 struct sk_buff **buf) 2174 { 2175 struct sk_buff *tunnel_buf = *buf; 2176 struct tipc_link *dest_link; 2177 struct tipc_msg *msg; 2178 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2179 u32 msg_typ = msg_type(tunnel_msg); 2180 u32 msg_count = msg_msgcnt(tunnel_msg); 2181 u32 bearer_id = msg_bearer_id(tunnel_msg); 2182 2183 if (bearer_id >= MAX_BEARERS) 2184 goto exit; 2185 dest_link = (*l_ptr)->owner->links[bearer_id]; 2186 if (!dest_link) 2187 goto exit; 2188 if (dest_link == *l_ptr) { 2189 pr_err("Unexpected changeover message on link <%s>\n", 2190 (*l_ptr)->name); 2191 goto exit; 2192 } 2193 *l_ptr = dest_link; 2194 msg = msg_get_wrapped(tunnel_msg); 2195 2196 if (msg_typ == DUPLICATE_MSG) { 2197 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2198 goto exit; 2199 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2200 if (*buf == NULL) { 2201 pr_warn("%sduplicate msg dropped\n", link_co_err); 2202 goto exit; 2203 } 2204 kfree_skb(tunnel_buf); 2205 return 1; 2206 } 2207 2208 /* First original message ?: */ 2209 if (tipc_link_is_up(dest_link)) { 2210 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2211 dest_link->name); 2212 tipc_link_reset(dest_link); 2213 dest_link->exp_msg_count = msg_count; 2214 if (!msg_count) 2215 goto exit; 2216 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2217 dest_link->exp_msg_count = msg_count; 2218 if (!msg_count) 2219 goto exit; 2220 } 2221 2222 /* Receive original message */ 2223 if (dest_link->exp_msg_count == 0) { 2224 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2225 goto exit; 2226 } 2227 dest_link->exp_msg_count--; 2228 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2229 goto exit; 2230 } else { 2231 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2232 if (*buf != NULL) { 2233 kfree_skb(tunnel_buf); 2234 return 1; 2235 } else { 2236 pr_warn("%soriginal msg dropped\n", link_co_err); 2237 } 2238 } 2239 exit: 2240 *buf = NULL; 2241 kfree_skb(tunnel_buf); 2242 return 0; 2243 } 2244 2245 /* 2246 * Bundler functionality: 2247 */ 2248 void tipc_link_recv_bundle(struct sk_buff *buf) 2249 { 2250 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2251 u32 pos = INT_H_SIZE; 2252 struct sk_buff *obuf; 2253 2254 while (msgcount--) { 2255 obuf = buf_extract(buf, pos); 2256 if (obuf == NULL) { 2257 pr_warn("Link unable to unbundle message(s)\n"); 2258 break; 2259 } 2260 pos += align(msg_size(buf_msg(obuf))); 2261 tipc_net_route_msg(obuf); 2262 } 2263 kfree_skb(buf); 2264 } 2265 2266 /* 2267 * Fragmentation/defragmentation: 2268 */ 2269 2270 /* 2271 * link_send_long_buf: Entry for buffers needing fragmentation. 2272 * The buffer is complete, inclusive total message length. 2273 * Returns user data length. 2274 */ 2275 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2276 { 2277 struct sk_buff *buf_chain = NULL; 2278 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2279 struct tipc_msg *inmsg = buf_msg(buf); 2280 struct tipc_msg fragm_hdr; 2281 u32 insize = msg_size(inmsg); 2282 u32 dsz = msg_data_sz(inmsg); 2283 unchar *crs = buf->data; 2284 u32 rest = insize; 2285 u32 pack_sz = l_ptr->max_pkt; 2286 u32 fragm_sz = pack_sz - INT_H_SIZE; 2287 u32 fragm_no = 0; 2288 u32 destaddr; 2289 2290 if (msg_short(inmsg)) 2291 destaddr = l_ptr->addr; 2292 else 2293 destaddr = msg_destnode(inmsg); 2294 2295 /* Prepare reusable fragment header: */ 2296 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2297 INT_H_SIZE, destaddr); 2298 2299 /* Chop up message: */ 2300 while (rest > 0) { 2301 struct sk_buff *fragm; 2302 2303 if (rest <= fragm_sz) { 2304 fragm_sz = rest; 2305 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2306 } 2307 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2308 if (fragm == NULL) { 2309 kfree_skb(buf); 2310 while (buf_chain) { 2311 buf = buf_chain; 2312 buf_chain = buf_chain->next; 2313 kfree_skb(buf); 2314 } 2315 return -ENOMEM; 2316 } 2317 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2318 fragm_no++; 2319 msg_set_fragm_no(&fragm_hdr, fragm_no); 2320 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2321 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2322 fragm_sz); 2323 buf_chain_tail->next = fragm; 2324 buf_chain_tail = fragm; 2325 2326 rest -= fragm_sz; 2327 crs += fragm_sz; 2328 msg_set_type(&fragm_hdr, FRAGMENT); 2329 } 2330 kfree_skb(buf); 2331 2332 /* Append chain of fragments to send queue & send them */ 2333 l_ptr->long_msg_seq_no++; 2334 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2335 l_ptr->stats.sent_fragments += fragm_no; 2336 l_ptr->stats.sent_fragmented++; 2337 tipc_link_push_queue(l_ptr); 2338 2339 return dsz; 2340 } 2341 2342 /* 2343 * tipc_link_recv_fragment(): Called with node lock on. Returns 2344 * the reassembled buffer if message is complete. 2345 */ 2346 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, 2347 struct sk_buff **fbuf) 2348 { 2349 struct sk_buff *frag = *fbuf; 2350 struct tipc_msg *msg = buf_msg(frag); 2351 u32 fragid = msg_type(msg); 2352 bool headstolen; 2353 int delta; 2354 2355 skb_pull(frag, msg_hdr_sz(msg)); 2356 if (fragid == FIRST_FRAGMENT) { 2357 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2358 goto out_free; 2359 *head = frag; 2360 skb_frag_list_init(*head); 2361 return 0; 2362 } else if (*head && 2363 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2364 kfree_skb_partial(frag, headstolen); 2365 } else { 2366 if (!*head) 2367 goto out_free; 2368 if (!skb_has_frag_list(*head)) 2369 skb_shinfo(*head)->frag_list = frag; 2370 else 2371 (*tail)->next = frag; 2372 *tail = frag; 2373 (*head)->truesize += frag->truesize; 2374 } 2375 if (fragid == LAST_FRAGMENT) { 2376 *fbuf = *head; 2377 *tail = *head = NULL; 2378 return LINK_REASM_COMPLETE; 2379 } 2380 return 0; 2381 out_free: 2382 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2383 kfree_skb(*fbuf); 2384 return LINK_REASM_ERROR; 2385 } 2386 2387 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2388 { 2389 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2390 return; 2391 2392 l_ptr->tolerance = tolerance; 2393 l_ptr->continuity_interval = 2394 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2395 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2396 } 2397 2398 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2399 { 2400 /* Data messages from this node, inclusive FIRST_FRAGM */ 2401 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2402 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2403 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2404 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2405 /* Transiting data messages,inclusive FIRST_FRAGM */ 2406 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2407 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2408 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2409 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2410 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2411 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2412 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2413 /* FRAGMENT and LAST_FRAGMENT packets */ 2414 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2415 } 2416 2417 /** 2418 * link_find_link - locate link by name 2419 * @name: ptr to link name string 2420 * @node: ptr to area to be filled with ptr to associated node 2421 * 2422 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2423 * this also prevents link deletion. 2424 * 2425 * Returns pointer to link (or 0 if invalid link name). 2426 */ 2427 static struct tipc_link *link_find_link(const char *name, 2428 struct tipc_node **node) 2429 { 2430 struct tipc_link *l_ptr; 2431 struct tipc_node *n_ptr; 2432 int i; 2433 2434 list_for_each_entry(n_ptr, &tipc_node_list, list) { 2435 for (i = 0; i < MAX_BEARERS; i++) { 2436 l_ptr = n_ptr->links[i]; 2437 if (l_ptr && !strcmp(l_ptr->name, name)) 2438 goto found; 2439 } 2440 } 2441 l_ptr = NULL; 2442 n_ptr = NULL; 2443 found: 2444 *node = n_ptr; 2445 return l_ptr; 2446 } 2447 2448 /** 2449 * link_value_is_valid -- validate proposed link tolerance/priority/window 2450 * 2451 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2452 * @new_value: the new value 2453 * 2454 * Returns 1 if value is within range, 0 if not. 2455 */ 2456 static int link_value_is_valid(u16 cmd, u32 new_value) 2457 { 2458 switch (cmd) { 2459 case TIPC_CMD_SET_LINK_TOL: 2460 return (new_value >= TIPC_MIN_LINK_TOL) && 2461 (new_value <= TIPC_MAX_LINK_TOL); 2462 case TIPC_CMD_SET_LINK_PRI: 2463 return (new_value <= TIPC_MAX_LINK_PRI); 2464 case TIPC_CMD_SET_LINK_WINDOW: 2465 return (new_value >= TIPC_MIN_LINK_WIN) && 2466 (new_value <= TIPC_MAX_LINK_WIN); 2467 } 2468 return 0; 2469 } 2470 2471 /** 2472 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2473 * @name: ptr to link, bearer, or media name 2474 * @new_value: new value of link, bearer, or media setting 2475 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2476 * 2477 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2478 * 2479 * Returns 0 if value updated and negative value on error. 2480 */ 2481 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2482 { 2483 struct tipc_node *node; 2484 struct tipc_link *l_ptr; 2485 struct tipc_bearer *b_ptr; 2486 struct tipc_media *m_ptr; 2487 int res = 0; 2488 2489 l_ptr = link_find_link(name, &node); 2490 if (l_ptr) { 2491 /* 2492 * acquire node lock for tipc_link_send_proto_msg(). 2493 * see "TIPC locking policy" in net.c. 2494 */ 2495 tipc_node_lock(node); 2496 switch (cmd) { 2497 case TIPC_CMD_SET_LINK_TOL: 2498 link_set_supervision_props(l_ptr, new_value); 2499 tipc_link_send_proto_msg(l_ptr, 2500 STATE_MSG, 0, 0, new_value, 0, 0); 2501 break; 2502 case TIPC_CMD_SET_LINK_PRI: 2503 l_ptr->priority = new_value; 2504 tipc_link_send_proto_msg(l_ptr, 2505 STATE_MSG, 0, 0, 0, new_value, 0); 2506 break; 2507 case TIPC_CMD_SET_LINK_WINDOW: 2508 tipc_link_set_queue_limits(l_ptr, new_value); 2509 break; 2510 default: 2511 res = -EINVAL; 2512 break; 2513 } 2514 tipc_node_unlock(node); 2515 return res; 2516 } 2517 2518 b_ptr = tipc_bearer_find(name); 2519 if (b_ptr) { 2520 switch (cmd) { 2521 case TIPC_CMD_SET_LINK_TOL: 2522 b_ptr->tolerance = new_value; 2523 break; 2524 case TIPC_CMD_SET_LINK_PRI: 2525 b_ptr->priority = new_value; 2526 break; 2527 case TIPC_CMD_SET_LINK_WINDOW: 2528 b_ptr->window = new_value; 2529 break; 2530 default: 2531 res = -EINVAL; 2532 break; 2533 } 2534 return res; 2535 } 2536 2537 m_ptr = tipc_media_find(name); 2538 if (!m_ptr) 2539 return -ENODEV; 2540 switch (cmd) { 2541 case TIPC_CMD_SET_LINK_TOL: 2542 m_ptr->tolerance = new_value; 2543 break; 2544 case TIPC_CMD_SET_LINK_PRI: 2545 m_ptr->priority = new_value; 2546 break; 2547 case TIPC_CMD_SET_LINK_WINDOW: 2548 m_ptr->window = new_value; 2549 break; 2550 default: 2551 res = -EINVAL; 2552 break; 2553 } 2554 return res; 2555 } 2556 2557 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2558 u16 cmd) 2559 { 2560 struct tipc_link_config *args; 2561 u32 new_value; 2562 int res; 2563 2564 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2565 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2566 2567 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2568 new_value = ntohl(args->value); 2569 2570 if (!link_value_is_valid(cmd, new_value)) 2571 return tipc_cfg_reply_error_string( 2572 "cannot change, value invalid"); 2573 2574 if (!strcmp(args->name, tipc_bclink_name)) { 2575 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2576 (tipc_bclink_set_queue_limits(new_value) == 0)) 2577 return tipc_cfg_reply_none(); 2578 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2579 " (cannot change setting on broadcast link)"); 2580 } 2581 2582 read_lock_bh(&tipc_net_lock); 2583 res = link_cmd_set_value(args->name, new_value, cmd); 2584 read_unlock_bh(&tipc_net_lock); 2585 if (res) 2586 return tipc_cfg_reply_error_string("cannot change link setting"); 2587 2588 return tipc_cfg_reply_none(); 2589 } 2590 2591 /** 2592 * link_reset_statistics - reset link statistics 2593 * @l_ptr: pointer to link 2594 */ 2595 static void link_reset_statistics(struct tipc_link *l_ptr) 2596 { 2597 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2598 l_ptr->stats.sent_info = l_ptr->next_out_no; 2599 l_ptr->stats.recv_info = l_ptr->next_in_no; 2600 } 2601 2602 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2603 { 2604 char *link_name; 2605 struct tipc_link *l_ptr; 2606 struct tipc_node *node; 2607 2608 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2609 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2610 2611 link_name = (char *)TLV_DATA(req_tlv_area); 2612 if (!strcmp(link_name, tipc_bclink_name)) { 2613 if (tipc_bclink_reset_stats()) 2614 return tipc_cfg_reply_error_string("link not found"); 2615 return tipc_cfg_reply_none(); 2616 } 2617 2618 read_lock_bh(&tipc_net_lock); 2619 l_ptr = link_find_link(link_name, &node); 2620 if (!l_ptr) { 2621 read_unlock_bh(&tipc_net_lock); 2622 return tipc_cfg_reply_error_string("link not found"); 2623 } 2624 2625 tipc_node_lock(node); 2626 link_reset_statistics(l_ptr); 2627 tipc_node_unlock(node); 2628 read_unlock_bh(&tipc_net_lock); 2629 return tipc_cfg_reply_none(); 2630 } 2631 2632 /** 2633 * percent - convert count to a percentage of total (rounding up or down) 2634 */ 2635 static u32 percent(u32 count, u32 total) 2636 { 2637 return (count * 100 + (total / 2)) / total; 2638 } 2639 2640 /** 2641 * tipc_link_stats - print link statistics 2642 * @name: link name 2643 * @buf: print buffer area 2644 * @buf_size: size of print buffer area 2645 * 2646 * Returns length of print buffer data string (or 0 if error) 2647 */ 2648 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2649 { 2650 struct tipc_link *l; 2651 struct tipc_stats *s; 2652 struct tipc_node *node; 2653 char *status; 2654 u32 profile_total = 0; 2655 int ret; 2656 2657 if (!strcmp(name, tipc_bclink_name)) 2658 return tipc_bclink_stats(buf, buf_size); 2659 2660 read_lock_bh(&tipc_net_lock); 2661 l = link_find_link(name, &node); 2662 if (!l) { 2663 read_unlock_bh(&tipc_net_lock); 2664 return 0; 2665 } 2666 tipc_node_lock(node); 2667 s = &l->stats; 2668 2669 if (tipc_link_is_active(l)) 2670 status = "ACTIVE"; 2671 else if (tipc_link_is_up(l)) 2672 status = "STANDBY"; 2673 else 2674 status = "DEFUNCT"; 2675 2676 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2677 " %s MTU:%u Priority:%u Tolerance:%u ms" 2678 " Window:%u packets\n", 2679 l->name, status, l->max_pkt, l->priority, 2680 l->tolerance, l->queue_limit[0]); 2681 2682 ret += tipc_snprintf(buf + ret, buf_size - ret, 2683 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2684 l->next_in_no - s->recv_info, s->recv_fragments, 2685 s->recv_fragmented, s->recv_bundles, 2686 s->recv_bundled); 2687 2688 ret += tipc_snprintf(buf + ret, buf_size - ret, 2689 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2690 l->next_out_no - s->sent_info, s->sent_fragments, 2691 s->sent_fragmented, s->sent_bundles, 2692 s->sent_bundled); 2693 2694 profile_total = s->msg_length_counts; 2695 if (!profile_total) 2696 profile_total = 1; 2697 2698 ret += tipc_snprintf(buf + ret, buf_size - ret, 2699 " TX profile sample:%u packets average:%u octets\n" 2700 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2701 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2702 s->msg_length_counts, 2703 s->msg_lengths_total / profile_total, 2704 percent(s->msg_length_profile[0], profile_total), 2705 percent(s->msg_length_profile[1], profile_total), 2706 percent(s->msg_length_profile[2], profile_total), 2707 percent(s->msg_length_profile[3], profile_total), 2708 percent(s->msg_length_profile[4], profile_total), 2709 percent(s->msg_length_profile[5], profile_total), 2710 percent(s->msg_length_profile[6], profile_total)); 2711 2712 ret += tipc_snprintf(buf + ret, buf_size - ret, 2713 " RX states:%u probes:%u naks:%u defs:%u" 2714 " dups:%u\n", s->recv_states, s->recv_probes, 2715 s->recv_nacks, s->deferred_recv, s->duplicates); 2716 2717 ret += tipc_snprintf(buf + ret, buf_size - ret, 2718 " TX states:%u probes:%u naks:%u acks:%u" 2719 " dups:%u\n", s->sent_states, s->sent_probes, 2720 s->sent_nacks, s->sent_acks, s->retransmitted); 2721 2722 ret += tipc_snprintf(buf + ret, buf_size - ret, 2723 " Congestion link:%u Send queue" 2724 " max:%u avg:%u\n", s->link_congs, 2725 s->max_queue_sz, s->queue_sz_counts ? 2726 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2727 2728 tipc_node_unlock(node); 2729 read_unlock_bh(&tipc_net_lock); 2730 return ret; 2731 } 2732 2733 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2734 { 2735 struct sk_buff *buf; 2736 struct tlv_desc *rep_tlv; 2737 int str_len; 2738 int pb_len; 2739 char *pb; 2740 2741 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2742 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2743 2744 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2745 if (!buf) 2746 return NULL; 2747 2748 rep_tlv = (struct tlv_desc *)buf->data; 2749 pb = TLV_DATA(rep_tlv); 2750 pb_len = ULTRA_STRING_MAX_LEN; 2751 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2752 pb, pb_len); 2753 if (!str_len) { 2754 kfree_skb(buf); 2755 return tipc_cfg_reply_error_string("link not found"); 2756 } 2757 str_len += 1; /* for "\0" */ 2758 skb_put(buf, TLV_SPACE(str_len)); 2759 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2760 2761 return buf; 2762 } 2763 2764 /** 2765 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2766 * @dest: network address of destination node 2767 * @selector: used to select from set of active links 2768 * 2769 * If no active link can be found, uses default maximum packet size. 2770 */ 2771 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2772 { 2773 struct tipc_node *n_ptr; 2774 struct tipc_link *l_ptr; 2775 u32 res = MAX_PKT_DEFAULT; 2776 2777 if (dest == tipc_own_addr) 2778 return MAX_MSG_SIZE; 2779 2780 read_lock_bh(&tipc_net_lock); 2781 n_ptr = tipc_node_find(dest); 2782 if (n_ptr) { 2783 tipc_node_lock(n_ptr); 2784 l_ptr = n_ptr->active_links[selector & 1]; 2785 if (l_ptr) 2786 res = l_ptr->max_pkt; 2787 tipc_node_unlock(n_ptr); 2788 } 2789 read_unlock_bh(&tipc_net_lock); 2790 return res; 2791 } 2792 2793 static void link_print(struct tipc_link *l_ptr, const char *str) 2794 { 2795 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2796 2797 if (link_working_unknown(l_ptr)) 2798 pr_cont(":WU\n"); 2799 else if (link_reset_reset(l_ptr)) 2800 pr_cont(":RR\n"); 2801 else if (link_reset_unknown(l_ptr)) 2802 pr_cont(":RU\n"); 2803 else if (link_working_working(l_ptr)) 2804 pr_cont(":WW\n"); 2805 else 2806 pr_cont("\n"); 2807 } 2808