1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int link_send_sections_long(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 91 static void tipc_link_send_sync(struct tipc_link *l); 92 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); 93 94 /* 95 * Simple link routines 96 */ 97 static unsigned int align(unsigned int i) 98 { 99 return (i + 3) & ~3u; 100 } 101 102 static void link_init_max_pkt(struct tipc_link *l_ptr) 103 { 104 u32 max_pkt; 105 106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 107 if (max_pkt > MAX_MSG_SIZE) 108 max_pkt = MAX_MSG_SIZE; 109 110 l_ptr->max_pkt_target = max_pkt; 111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 112 l_ptr->max_pkt = l_ptr->max_pkt_target; 113 else 114 l_ptr->max_pkt = MAX_PKT_DEFAULT; 115 116 l_ptr->max_pkt_probes = 0; 117 } 118 119 static u32 link_next_sent(struct tipc_link *l_ptr) 120 { 121 if (l_ptr->next_out) 122 return buf_seqno(l_ptr->next_out); 123 return mod(l_ptr->next_out_no); 124 } 125 126 static u32 link_last_sent(struct tipc_link *l_ptr) 127 { 128 return mod(link_next_sent(l_ptr) - 1); 129 } 130 131 /* 132 * Simple non-static link routines (i.e. referenced outside this file) 133 */ 134 int tipc_link_is_up(struct tipc_link *l_ptr) 135 { 136 if (!l_ptr) 137 return 0; 138 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 139 } 140 141 int tipc_link_is_active(struct tipc_link *l_ptr) 142 { 143 return (l_ptr->owner->active_links[0] == l_ptr) || 144 (l_ptr->owner->active_links[1] == l_ptr); 145 } 146 147 /** 148 * link_timeout - handle expiration of link timer 149 * @l_ptr: pointer to link 150 * 151 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 152 * with tipc_link_delete(). (There is no risk that the node will be deleted by 153 * another thread because tipc_link_delete() always cancels the link timer before 154 * tipc_node_delete() is called.) 155 */ 156 static void link_timeout(struct tipc_link *l_ptr) 157 { 158 tipc_node_lock(l_ptr->owner); 159 160 /* update counters used in statistical profiling of send traffic */ 161 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 162 l_ptr->stats.queue_sz_counts++; 163 164 if (l_ptr->first_out) { 165 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 166 u32 length = msg_size(msg); 167 168 if ((msg_user(msg) == MSG_FRAGMENTER) && 169 (msg_type(msg) == FIRST_FRAGMENT)) { 170 length = msg_size(msg_get_wrapped(msg)); 171 } 172 if (length) { 173 l_ptr->stats.msg_lengths_total += length; 174 l_ptr->stats.msg_length_counts++; 175 if (length <= 64) 176 l_ptr->stats.msg_length_profile[0]++; 177 else if (length <= 256) 178 l_ptr->stats.msg_length_profile[1]++; 179 else if (length <= 1024) 180 l_ptr->stats.msg_length_profile[2]++; 181 else if (length <= 4096) 182 l_ptr->stats.msg_length_profile[3]++; 183 else if (length <= 16384) 184 l_ptr->stats.msg_length_profile[4]++; 185 else if (length <= 32768) 186 l_ptr->stats.msg_length_profile[5]++; 187 else 188 l_ptr->stats.msg_length_profile[6]++; 189 } 190 } 191 192 /* do all other link processing performed on a periodic basis */ 193 194 link_state_event(l_ptr, TIMEOUT_EVT); 195 196 if (l_ptr->next_out) 197 tipc_link_push_queue(l_ptr); 198 199 tipc_node_unlock(l_ptr->owner); 200 } 201 202 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 203 { 204 k_start_timer(&l_ptr->timer, time); 205 } 206 207 /** 208 * tipc_link_create - create a new link 209 * @n_ptr: pointer to associated node 210 * @b_ptr: pointer to associated bearer 211 * @media_addr: media address to use when sending messages over link 212 * 213 * Returns pointer to link. 214 */ 215 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 216 struct tipc_bearer *b_ptr, 217 const struct tipc_media_addr *media_addr) 218 { 219 struct tipc_link *l_ptr; 220 struct tipc_msg *msg; 221 char *if_name; 222 char addr_string[16]; 223 u32 peer = n_ptr->addr; 224 225 if (n_ptr->link_cnt >= 2) { 226 tipc_addr_string_fill(addr_string, n_ptr->addr); 227 pr_err("Attempt to establish third link to %s\n", addr_string); 228 return NULL; 229 } 230 231 if (n_ptr->links[b_ptr->identity]) { 232 tipc_addr_string_fill(addr_string, n_ptr->addr); 233 pr_err("Attempt to establish second link on <%s> to %s\n", 234 b_ptr->name, addr_string); 235 return NULL; 236 } 237 238 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 239 if (!l_ptr) { 240 pr_warn("Link creation failed, no memory\n"); 241 return NULL; 242 } 243 244 l_ptr->addr = peer; 245 if_name = strchr(b_ptr->name, ':') + 1; 246 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 247 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 248 tipc_node(tipc_own_addr), 249 if_name, 250 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 251 /* note: peer i/f name is updated by reset/activate message */ 252 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 253 l_ptr->owner = n_ptr; 254 l_ptr->checkpoint = 1; 255 l_ptr->peer_session = INVALID_SESSION; 256 l_ptr->b_ptr = b_ptr; 257 link_set_supervision_props(l_ptr, b_ptr->tolerance); 258 l_ptr->state = RESET_UNKNOWN; 259 260 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 261 msg = l_ptr->pmsg; 262 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 263 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 264 msg_set_session(msg, (tipc_random & 0xffff)); 265 msg_set_bearer_id(msg, b_ptr->identity); 266 strcpy((char *)msg_data(msg), if_name); 267 268 l_ptr->priority = b_ptr->priority; 269 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 270 271 link_init_max_pkt(l_ptr); 272 273 l_ptr->next_out_no = 1; 274 INIT_LIST_HEAD(&l_ptr->waiting_ports); 275 276 link_reset_statistics(l_ptr); 277 278 tipc_node_attach_link(n_ptr, l_ptr); 279 280 k_init_timer(&l_ptr->timer, (Handler)link_timeout, 281 (unsigned long)l_ptr); 282 list_add_tail(&l_ptr->link_list, &b_ptr->links); 283 284 link_state_event(l_ptr, STARTING_EVT); 285 286 return l_ptr; 287 } 288 289 /** 290 * tipc_link_delete - delete a link 291 * @l_ptr: pointer to link 292 * 293 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 294 * This routine must not grab the node lock until after link timer cancellation 295 * to avoid a potential deadlock situation. 296 */ 297 void tipc_link_delete(struct tipc_link *l_ptr) 298 { 299 if (!l_ptr) { 300 pr_err("Attempt to delete non-existent link\n"); 301 return; 302 } 303 304 k_cancel_timer(&l_ptr->timer); 305 306 tipc_node_lock(l_ptr->owner); 307 tipc_link_reset(l_ptr); 308 tipc_node_detach_link(l_ptr->owner, l_ptr); 309 tipc_link_purge_queues(l_ptr); 310 list_del_init(&l_ptr->link_list); 311 tipc_node_unlock(l_ptr->owner); 312 k_term_timer(&l_ptr->timer); 313 kfree(l_ptr); 314 } 315 316 317 /** 318 * link_schedule_port - schedule port for deferred sending 319 * @l_ptr: pointer to link 320 * @origport: reference to sending port 321 * @sz: amount of data to be sent 322 * 323 * Schedules port for renewed sending of messages after link congestion 324 * has abated. 325 */ 326 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 327 { 328 struct tipc_port *p_ptr; 329 330 spin_lock_bh(&tipc_port_list_lock); 331 p_ptr = tipc_port_lock(origport); 332 if (p_ptr) { 333 if (!p_ptr->wakeup) 334 goto exit; 335 if (!list_empty(&p_ptr->wait_list)) 336 goto exit; 337 p_ptr->congested = 1; 338 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 339 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 340 l_ptr->stats.link_congs++; 341 exit: 342 tipc_port_unlock(p_ptr); 343 } 344 spin_unlock_bh(&tipc_port_list_lock); 345 return -ELINKCONG; 346 } 347 348 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 349 { 350 struct tipc_port *p_ptr; 351 struct tipc_port *temp_p_ptr; 352 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 353 354 if (all) 355 win = 100000; 356 if (win <= 0) 357 return; 358 if (!spin_trylock_bh(&tipc_port_list_lock)) 359 return; 360 if (link_congested(l_ptr)) 361 goto exit; 362 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 363 wait_list) { 364 if (win <= 0) 365 break; 366 list_del_init(&p_ptr->wait_list); 367 spin_lock_bh(p_ptr->lock); 368 p_ptr->congested = 0; 369 p_ptr->wakeup(p_ptr); 370 win -= p_ptr->waiting_pkts; 371 spin_unlock_bh(p_ptr->lock); 372 } 373 374 exit: 375 spin_unlock_bh(&tipc_port_list_lock); 376 } 377 378 /** 379 * link_release_outqueue - purge link's outbound message queue 380 * @l_ptr: pointer to link 381 */ 382 static void link_release_outqueue(struct tipc_link *l_ptr) 383 { 384 kfree_skb_list(l_ptr->first_out); 385 l_ptr->first_out = NULL; 386 l_ptr->out_queue_size = 0; 387 } 388 389 /** 390 * tipc_link_reset_fragments - purge link's inbound message fragments queue 391 * @l_ptr: pointer to link 392 */ 393 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 394 { 395 kfree_skb(l_ptr->reasm_head); 396 l_ptr->reasm_head = NULL; 397 l_ptr->reasm_tail = NULL; 398 } 399 400 /** 401 * tipc_link_purge_queues - purge all pkt queues associated with link 402 * @l_ptr: pointer to link 403 */ 404 void tipc_link_purge_queues(struct tipc_link *l_ptr) 405 { 406 kfree_skb_list(l_ptr->oldest_deferred_in); 407 kfree_skb_list(l_ptr->first_out); 408 tipc_link_reset_fragments(l_ptr); 409 kfree_skb(l_ptr->proto_msg_queue); 410 l_ptr->proto_msg_queue = NULL; 411 } 412 413 void tipc_link_reset(struct tipc_link *l_ptr) 414 { 415 u32 prev_state = l_ptr->state; 416 u32 checkpoint = l_ptr->next_in_no; 417 int was_active_link = tipc_link_is_active(l_ptr); 418 419 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 420 421 /* Link is down, accept any session */ 422 l_ptr->peer_session = INVALID_SESSION; 423 424 /* Prepare for max packet size negotiation */ 425 link_init_max_pkt(l_ptr); 426 427 l_ptr->state = RESET_UNKNOWN; 428 429 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 430 return; 431 432 tipc_node_link_down(l_ptr->owner, l_ptr); 433 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 434 435 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 436 l_ptr->reset_checkpoint = checkpoint; 437 l_ptr->exp_msg_count = START_CHANGEOVER; 438 } 439 440 /* Clean up all queues: */ 441 link_release_outqueue(l_ptr); 442 kfree_skb(l_ptr->proto_msg_queue); 443 l_ptr->proto_msg_queue = NULL; 444 kfree_skb_list(l_ptr->oldest_deferred_in); 445 if (!list_empty(&l_ptr->waiting_ports)) 446 tipc_link_wakeup_ports(l_ptr, 1); 447 448 l_ptr->retransm_queue_head = 0; 449 l_ptr->retransm_queue_size = 0; 450 l_ptr->last_out = NULL; 451 l_ptr->first_out = NULL; 452 l_ptr->next_out = NULL; 453 l_ptr->unacked_window = 0; 454 l_ptr->checkpoint = 1; 455 l_ptr->next_out_no = 1; 456 l_ptr->deferred_inqueue_sz = 0; 457 l_ptr->oldest_deferred_in = NULL; 458 l_ptr->newest_deferred_in = NULL; 459 l_ptr->fsm_msg_cnt = 0; 460 l_ptr->stale_count = 0; 461 link_reset_statistics(l_ptr); 462 } 463 464 465 static void link_activate(struct tipc_link *l_ptr) 466 { 467 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 468 tipc_node_link_up(l_ptr->owner, l_ptr); 469 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 470 } 471 472 /** 473 * link_state_event - link finite state machine 474 * @l_ptr: pointer to link 475 * @event: state machine event to process 476 */ 477 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 478 { 479 struct tipc_link *other; 480 u32 cont_intv = l_ptr->continuity_interval; 481 482 if (!l_ptr->started && (event != STARTING_EVT)) 483 return; /* Not yet. */ 484 485 /* Check whether changeover is going on */ 486 if (l_ptr->exp_msg_count) { 487 if (event == TIMEOUT_EVT) 488 link_set_timer(l_ptr, cont_intv); 489 return; 490 } 491 492 switch (l_ptr->state) { 493 case WORKING_WORKING: 494 switch (event) { 495 case TRAFFIC_MSG_EVT: 496 case ACTIVATE_MSG: 497 break; 498 case TIMEOUT_EVT: 499 if (l_ptr->next_in_no != l_ptr->checkpoint) { 500 l_ptr->checkpoint = l_ptr->next_in_no; 501 if (tipc_bclink_acks_missing(l_ptr->owner)) { 502 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 503 0, 0, 0, 0, 0); 504 l_ptr->fsm_msg_cnt++; 505 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 506 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 507 1, 0, 0, 0, 0); 508 l_ptr->fsm_msg_cnt++; 509 } 510 link_set_timer(l_ptr, cont_intv); 511 break; 512 } 513 l_ptr->state = WORKING_UNKNOWN; 514 l_ptr->fsm_msg_cnt = 0; 515 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 516 l_ptr->fsm_msg_cnt++; 517 link_set_timer(l_ptr, cont_intv / 4); 518 break; 519 case RESET_MSG: 520 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 521 l_ptr->name); 522 tipc_link_reset(l_ptr); 523 l_ptr->state = RESET_RESET; 524 l_ptr->fsm_msg_cnt = 0; 525 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 526 l_ptr->fsm_msg_cnt++; 527 link_set_timer(l_ptr, cont_intv); 528 break; 529 default: 530 pr_err("%s%u in WW state\n", link_unk_evt, event); 531 } 532 break; 533 case WORKING_UNKNOWN: 534 switch (event) { 535 case TRAFFIC_MSG_EVT: 536 case ACTIVATE_MSG: 537 l_ptr->state = WORKING_WORKING; 538 l_ptr->fsm_msg_cnt = 0; 539 link_set_timer(l_ptr, cont_intv); 540 break; 541 case RESET_MSG: 542 pr_info("%s<%s>, requested by peer while probing\n", 543 link_rst_msg, l_ptr->name); 544 tipc_link_reset(l_ptr); 545 l_ptr->state = RESET_RESET; 546 l_ptr->fsm_msg_cnt = 0; 547 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 548 l_ptr->fsm_msg_cnt++; 549 link_set_timer(l_ptr, cont_intv); 550 break; 551 case TIMEOUT_EVT: 552 if (l_ptr->next_in_no != l_ptr->checkpoint) { 553 l_ptr->state = WORKING_WORKING; 554 l_ptr->fsm_msg_cnt = 0; 555 l_ptr->checkpoint = l_ptr->next_in_no; 556 if (tipc_bclink_acks_missing(l_ptr->owner)) { 557 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 558 0, 0, 0, 0, 0); 559 l_ptr->fsm_msg_cnt++; 560 } 561 link_set_timer(l_ptr, cont_intv); 562 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 563 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 564 1, 0, 0, 0, 0); 565 l_ptr->fsm_msg_cnt++; 566 link_set_timer(l_ptr, cont_intv / 4); 567 } else { /* Link has failed */ 568 pr_warn("%s<%s>, peer not responding\n", 569 link_rst_msg, l_ptr->name); 570 tipc_link_reset(l_ptr); 571 l_ptr->state = RESET_UNKNOWN; 572 l_ptr->fsm_msg_cnt = 0; 573 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 574 0, 0, 0, 0, 0); 575 l_ptr->fsm_msg_cnt++; 576 link_set_timer(l_ptr, cont_intv); 577 } 578 break; 579 default: 580 pr_err("%s%u in WU state\n", link_unk_evt, event); 581 } 582 break; 583 case RESET_UNKNOWN: 584 switch (event) { 585 case TRAFFIC_MSG_EVT: 586 break; 587 case ACTIVATE_MSG: 588 other = l_ptr->owner->active_links[0]; 589 if (other && link_working_unknown(other)) 590 break; 591 l_ptr->state = WORKING_WORKING; 592 l_ptr->fsm_msg_cnt = 0; 593 link_activate(l_ptr); 594 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 595 l_ptr->fsm_msg_cnt++; 596 if (l_ptr->owner->working_links == 1) 597 tipc_link_send_sync(l_ptr); 598 link_set_timer(l_ptr, cont_intv); 599 break; 600 case RESET_MSG: 601 l_ptr->state = RESET_RESET; 602 l_ptr->fsm_msg_cnt = 0; 603 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 604 l_ptr->fsm_msg_cnt++; 605 link_set_timer(l_ptr, cont_intv); 606 break; 607 case STARTING_EVT: 608 l_ptr->started = 1; 609 /* fall through */ 610 case TIMEOUT_EVT: 611 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 612 l_ptr->fsm_msg_cnt++; 613 link_set_timer(l_ptr, cont_intv); 614 break; 615 default: 616 pr_err("%s%u in RU state\n", link_unk_evt, event); 617 } 618 break; 619 case RESET_RESET: 620 switch (event) { 621 case TRAFFIC_MSG_EVT: 622 case ACTIVATE_MSG: 623 other = l_ptr->owner->active_links[0]; 624 if (other && link_working_unknown(other)) 625 break; 626 l_ptr->state = WORKING_WORKING; 627 l_ptr->fsm_msg_cnt = 0; 628 link_activate(l_ptr); 629 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 630 l_ptr->fsm_msg_cnt++; 631 if (l_ptr->owner->working_links == 1) 632 tipc_link_send_sync(l_ptr); 633 link_set_timer(l_ptr, cont_intv); 634 break; 635 case RESET_MSG: 636 break; 637 case TIMEOUT_EVT: 638 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 639 l_ptr->fsm_msg_cnt++; 640 link_set_timer(l_ptr, cont_intv); 641 break; 642 default: 643 pr_err("%s%u in RR state\n", link_unk_evt, event); 644 } 645 break; 646 default: 647 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 648 } 649 } 650 651 /* 652 * link_bundle_buf(): Append contents of a buffer to 653 * the tail of an existing one. 654 */ 655 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 656 struct sk_buff *buf) 657 { 658 struct tipc_msg *bundler_msg = buf_msg(bundler); 659 struct tipc_msg *msg = buf_msg(buf); 660 u32 size = msg_size(msg); 661 u32 bundle_size = msg_size(bundler_msg); 662 u32 to_pos = align(bundle_size); 663 u32 pad = to_pos - bundle_size; 664 665 if (msg_user(bundler_msg) != MSG_BUNDLER) 666 return 0; 667 if (msg_type(bundler_msg) != OPEN_MSG) 668 return 0; 669 if (skb_tailroom(bundler) < (pad + size)) 670 return 0; 671 if (l_ptr->max_pkt < (to_pos + size)) 672 return 0; 673 674 skb_put(bundler, pad + size); 675 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 676 msg_set_size(bundler_msg, to_pos + size); 677 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 678 kfree_skb(buf); 679 l_ptr->stats.sent_bundled++; 680 return 1; 681 } 682 683 static void link_add_to_outqueue(struct tipc_link *l_ptr, 684 struct sk_buff *buf, 685 struct tipc_msg *msg) 686 { 687 u32 ack = mod(l_ptr->next_in_no - 1); 688 u32 seqno = mod(l_ptr->next_out_no++); 689 690 msg_set_word(msg, 2, ((ack << 16) | seqno)); 691 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 692 buf->next = NULL; 693 if (l_ptr->first_out) { 694 l_ptr->last_out->next = buf; 695 l_ptr->last_out = buf; 696 } else 697 l_ptr->first_out = l_ptr->last_out = buf; 698 699 l_ptr->out_queue_size++; 700 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 701 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 702 } 703 704 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 705 struct sk_buff *buf_chain, 706 u32 long_msgno) 707 { 708 struct sk_buff *buf; 709 struct tipc_msg *msg; 710 711 if (!l_ptr->next_out) 712 l_ptr->next_out = buf_chain; 713 while (buf_chain) { 714 buf = buf_chain; 715 buf_chain = buf_chain->next; 716 717 msg = buf_msg(buf); 718 msg_set_long_msgno(msg, long_msgno); 719 link_add_to_outqueue(l_ptr, buf, msg); 720 } 721 } 722 723 /* 724 * tipc_link_send_buf() is the 'full path' for messages, called from 725 * inside TIPC when the 'fast path' in tipc_send_buf 726 * has failed, and from link_send() 727 */ 728 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 729 { 730 struct tipc_msg *msg = buf_msg(buf); 731 u32 size = msg_size(msg); 732 u32 dsz = msg_data_sz(msg); 733 u32 queue_size = l_ptr->out_queue_size; 734 u32 imp = tipc_msg_tot_importance(msg); 735 u32 queue_limit = l_ptr->queue_limit[imp]; 736 u32 max_packet = l_ptr->max_pkt; 737 738 /* Match msg importance against queue limits: */ 739 if (unlikely(queue_size >= queue_limit)) { 740 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 741 link_schedule_port(l_ptr, msg_origport(msg), size); 742 kfree_skb(buf); 743 return -ELINKCONG; 744 } 745 kfree_skb(buf); 746 if (imp > CONN_MANAGER) { 747 pr_warn("%s<%s>, send queue full", link_rst_msg, 748 l_ptr->name); 749 tipc_link_reset(l_ptr); 750 } 751 return dsz; 752 } 753 754 /* Fragmentation needed ? */ 755 if (size > max_packet) 756 return link_send_long_buf(l_ptr, buf); 757 758 /* Packet can be queued or sent. */ 759 if (likely(!link_congested(l_ptr))) { 760 link_add_to_outqueue(l_ptr, buf, msg); 761 762 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 763 l_ptr->unacked_window = 0; 764 return dsz; 765 } 766 /* Congestion: can message be bundled ? */ 767 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 768 (msg_user(msg) != MSG_FRAGMENTER)) { 769 770 /* Try adding message to an existing bundle */ 771 if (l_ptr->next_out && 772 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 773 return dsz; 774 775 /* Try creating a new bundle */ 776 if (size <= max_packet * 2 / 3) { 777 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 778 struct tipc_msg bundler_hdr; 779 780 if (bundler) { 781 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 782 INT_H_SIZE, l_ptr->addr); 783 skb_copy_to_linear_data(bundler, &bundler_hdr, 784 INT_H_SIZE); 785 skb_trim(bundler, INT_H_SIZE); 786 link_bundle_buf(l_ptr, bundler, buf); 787 buf = bundler; 788 msg = buf_msg(buf); 789 l_ptr->stats.sent_bundles++; 790 } 791 } 792 } 793 if (!l_ptr->next_out) 794 l_ptr->next_out = buf; 795 link_add_to_outqueue(l_ptr, buf, msg); 796 return dsz; 797 } 798 799 /* 800 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 801 * not been selected yet, and the the owner node is not locked 802 * Called by TIPC internal users, e.g. the name distributor 803 */ 804 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 805 { 806 struct tipc_link *l_ptr; 807 struct tipc_node *n_ptr; 808 int res = -ELINKCONG; 809 810 read_lock_bh(&tipc_net_lock); 811 n_ptr = tipc_node_find(dest); 812 if (n_ptr) { 813 tipc_node_lock(n_ptr); 814 l_ptr = n_ptr->active_links[selector & 1]; 815 if (l_ptr) 816 res = tipc_link_send_buf(l_ptr, buf); 817 else 818 kfree_skb(buf); 819 tipc_node_unlock(n_ptr); 820 } else { 821 kfree_skb(buf); 822 } 823 read_unlock_bh(&tipc_net_lock); 824 return res; 825 } 826 827 /* 828 * tipc_link_send_sync - synchronize broadcast link endpoints. 829 * 830 * Give a newly added peer node the sequence number where it should 831 * start receiving and acking broadcast packets. 832 * 833 * Called with node locked 834 */ 835 static void tipc_link_send_sync(struct tipc_link *l) 836 { 837 struct sk_buff *buf; 838 struct tipc_msg *msg; 839 840 buf = tipc_buf_acquire(INT_H_SIZE); 841 if (!buf) 842 return; 843 844 msg = buf_msg(buf); 845 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 846 msg_set_last_bcast(msg, l->owner->bclink.acked); 847 link_add_chain_to_outqueue(l, buf, 0); 848 tipc_link_push_queue(l); 849 } 850 851 /* 852 * tipc_link_recv_sync - synchronize broadcast link endpoints. 853 * Receive the sequence number where we should start receiving and 854 * acking broadcast packets from a newly added peer node, and open 855 * up for reception of such packets. 856 * 857 * Called with node locked 858 */ 859 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) 860 { 861 struct tipc_msg *msg = buf_msg(buf); 862 863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 864 n->bclink.recv_permitted = true; 865 kfree_skb(buf); 866 } 867 868 /* 869 * tipc_link_send_names - send name table entries to new neighbor 870 * 871 * Send routine for bulk delivery of name table messages when contact 872 * with a new neighbor occurs. No link congestion checking is performed 873 * because name table messages *must* be delivered. The messages must be 874 * small enough not to require fragmentation. 875 * Called without any locks held. 876 */ 877 void tipc_link_send_names(struct list_head *message_list, u32 dest) 878 { 879 struct tipc_node *n_ptr; 880 struct tipc_link *l_ptr; 881 struct sk_buff *buf; 882 struct sk_buff *temp_buf; 883 884 if (list_empty(message_list)) 885 return; 886 887 read_lock_bh(&tipc_net_lock); 888 n_ptr = tipc_node_find(dest); 889 if (n_ptr) { 890 tipc_node_lock(n_ptr); 891 l_ptr = n_ptr->active_links[0]; 892 if (l_ptr) { 893 /* convert circular list to linear list */ 894 ((struct sk_buff *)message_list->prev)->next = NULL; 895 link_add_chain_to_outqueue(l_ptr, 896 (struct sk_buff *)message_list->next, 0); 897 tipc_link_push_queue(l_ptr); 898 INIT_LIST_HEAD(message_list); 899 } 900 tipc_node_unlock(n_ptr); 901 } 902 read_unlock_bh(&tipc_net_lock); 903 904 /* discard the messages if they couldn't be sent */ 905 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 906 list_del((struct list_head *)buf); 907 kfree_skb(buf); 908 } 909 } 910 911 /* 912 * link_send_buf_fast: Entry for data messages where the 913 * destination link is known and the header is complete, 914 * inclusive total message length. Very time critical. 915 * Link is locked. Returns user data length. 916 */ 917 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 918 u32 *used_max_pkt) 919 { 920 struct tipc_msg *msg = buf_msg(buf); 921 int res = msg_data_sz(msg); 922 923 if (likely(!link_congested(l_ptr))) { 924 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 925 link_add_to_outqueue(l_ptr, buf, msg); 926 tipc_bearer_send(l_ptr->b_ptr, buf, 927 &l_ptr->media_addr); 928 l_ptr->unacked_window = 0; 929 return res; 930 } 931 else 932 *used_max_pkt = l_ptr->max_pkt; 933 } 934 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 935 } 936 937 /* 938 * tipc_link_send_sections_fast: Entry for messages where the 939 * destination processor is known and the header is complete, 940 * except for total message length. 941 * Returns user data length or errno. 942 */ 943 int tipc_link_send_sections_fast(struct tipc_port *sender, 944 struct iovec const *msg_sect, 945 unsigned int len, u32 destaddr) 946 { 947 struct tipc_msg *hdr = &sender->phdr; 948 struct tipc_link *l_ptr; 949 struct sk_buff *buf; 950 struct tipc_node *node; 951 int res; 952 u32 selector = msg_origport(hdr) & 1; 953 954 again: 955 /* 956 * Try building message using port's max_pkt hint. 957 * (Must not hold any locks while building message.) 958 */ 959 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 960 /* Exit if build request was invalid */ 961 if (unlikely(res < 0)) 962 return res; 963 964 read_lock_bh(&tipc_net_lock); 965 node = tipc_node_find(destaddr); 966 if (likely(node)) { 967 tipc_node_lock(node); 968 l_ptr = node->active_links[selector]; 969 if (likely(l_ptr)) { 970 if (likely(buf)) { 971 res = link_send_buf_fast(l_ptr, buf, 972 &sender->max_pkt); 973 exit: 974 tipc_node_unlock(node); 975 read_unlock_bh(&tipc_net_lock); 976 return res; 977 } 978 979 /* Exit if link (or bearer) is congested */ 980 if (link_congested(l_ptr)) { 981 res = link_schedule_port(l_ptr, 982 sender->ref, res); 983 goto exit; 984 } 985 986 /* 987 * Message size exceeds max_pkt hint; update hint, 988 * then re-try fast path or fragment the message 989 */ 990 sender->max_pkt = l_ptr->max_pkt; 991 tipc_node_unlock(node); 992 read_unlock_bh(&tipc_net_lock); 993 994 995 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 996 goto again; 997 998 return link_send_sections_long(sender, msg_sect, len, 999 destaddr); 1000 } 1001 tipc_node_unlock(node); 1002 } 1003 read_unlock_bh(&tipc_net_lock); 1004 1005 /* Couldn't find a link to the destination node */ 1006 if (buf) 1007 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1008 if (res >= 0) 1009 return tipc_port_reject_sections(sender, hdr, msg_sect, 1010 len, TIPC_ERR_NO_NODE); 1011 return res; 1012 } 1013 1014 /* 1015 * link_send_sections_long(): Entry for long messages where the 1016 * destination node is known and the header is complete, 1017 * inclusive total message length. 1018 * Link and bearer congestion status have been checked to be ok, 1019 * and are ignored if they change. 1020 * 1021 * Note that fragments do not use the full link MTU so that they won't have 1022 * to undergo refragmentation if link changeover causes them to be sent 1023 * over another link with an additional tunnel header added as prefix. 1024 * (Refragmentation will still occur if the other link has a smaller MTU.) 1025 * 1026 * Returns user data length or errno. 1027 */ 1028 static int link_send_sections_long(struct tipc_port *sender, 1029 struct iovec const *msg_sect, 1030 unsigned int len, u32 destaddr) 1031 { 1032 struct tipc_link *l_ptr; 1033 struct tipc_node *node; 1034 struct tipc_msg *hdr = &sender->phdr; 1035 u32 dsz = len; 1036 u32 max_pkt, fragm_sz, rest; 1037 struct tipc_msg fragm_hdr; 1038 struct sk_buff *buf, *buf_chain, *prev; 1039 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1040 const unchar __user *sect_crs; 1041 int curr_sect; 1042 u32 fragm_no; 1043 int res = 0; 1044 1045 again: 1046 fragm_no = 1; 1047 max_pkt = sender->max_pkt - INT_H_SIZE; 1048 /* leave room for tunnel header in case of link changeover */ 1049 fragm_sz = max_pkt - INT_H_SIZE; 1050 /* leave room for fragmentation header in each fragment */ 1051 rest = dsz; 1052 fragm_crs = 0; 1053 fragm_rest = 0; 1054 sect_rest = 0; 1055 sect_crs = NULL; 1056 curr_sect = -1; 1057 1058 /* Prepare reusable fragment header */ 1059 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1060 INT_H_SIZE, msg_destnode(hdr)); 1061 msg_set_size(&fragm_hdr, max_pkt); 1062 msg_set_fragm_no(&fragm_hdr, 1); 1063 1064 /* Prepare header of first fragment */ 1065 buf_chain = buf = tipc_buf_acquire(max_pkt); 1066 if (!buf) 1067 return -ENOMEM; 1068 buf->next = NULL; 1069 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1070 hsz = msg_hdr_sz(hdr); 1071 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1072 1073 /* Chop up message */ 1074 fragm_crs = INT_H_SIZE + hsz; 1075 fragm_rest = fragm_sz - hsz; 1076 1077 do { /* For all sections */ 1078 u32 sz; 1079 1080 if (!sect_rest) { 1081 sect_rest = msg_sect[++curr_sect].iov_len; 1082 sect_crs = msg_sect[curr_sect].iov_base; 1083 } 1084 1085 if (sect_rest < fragm_rest) 1086 sz = sect_rest; 1087 else 1088 sz = fragm_rest; 1089 1090 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1091 res = -EFAULT; 1092 error: 1093 kfree_skb_list(buf_chain); 1094 return res; 1095 } 1096 sect_crs += sz; 1097 sect_rest -= sz; 1098 fragm_crs += sz; 1099 fragm_rest -= sz; 1100 rest -= sz; 1101 1102 if (!fragm_rest && rest) { 1103 1104 /* Initiate new fragment: */ 1105 if (rest <= fragm_sz) { 1106 fragm_sz = rest; 1107 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1108 } else { 1109 msg_set_type(&fragm_hdr, FRAGMENT); 1110 } 1111 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1112 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1113 prev = buf; 1114 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1115 if (!buf) { 1116 res = -ENOMEM; 1117 goto error; 1118 } 1119 1120 buf->next = NULL; 1121 prev->next = buf; 1122 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1123 fragm_crs = INT_H_SIZE; 1124 fragm_rest = fragm_sz; 1125 } 1126 } while (rest > 0); 1127 1128 /* 1129 * Now we have a buffer chain. Select a link and check 1130 * that packet size is still OK 1131 */ 1132 node = tipc_node_find(destaddr); 1133 if (likely(node)) { 1134 tipc_node_lock(node); 1135 l_ptr = node->active_links[sender->ref & 1]; 1136 if (!l_ptr) { 1137 tipc_node_unlock(node); 1138 goto reject; 1139 } 1140 if (l_ptr->max_pkt < max_pkt) { 1141 sender->max_pkt = l_ptr->max_pkt; 1142 tipc_node_unlock(node); 1143 kfree_skb_list(buf_chain); 1144 goto again; 1145 } 1146 } else { 1147 reject: 1148 kfree_skb_list(buf_chain); 1149 return tipc_port_reject_sections(sender, hdr, msg_sect, 1150 len, TIPC_ERR_NO_NODE); 1151 } 1152 1153 /* Append chain of fragments to send queue & send them */ 1154 l_ptr->long_msg_seq_no++; 1155 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1156 l_ptr->stats.sent_fragments += fragm_no; 1157 l_ptr->stats.sent_fragmented++; 1158 tipc_link_push_queue(l_ptr); 1159 tipc_node_unlock(node); 1160 return dsz; 1161 } 1162 1163 /* 1164 * tipc_link_push_packet: Push one unsent packet to the media 1165 */ 1166 static u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1167 { 1168 struct sk_buff *buf = l_ptr->first_out; 1169 u32 r_q_size = l_ptr->retransm_queue_size; 1170 u32 r_q_head = l_ptr->retransm_queue_head; 1171 1172 /* Step to position where retransmission failed, if any, */ 1173 /* consider that buffers may have been released in meantime */ 1174 if (r_q_size && buf) { 1175 u32 last = lesser(mod(r_q_head + r_q_size), 1176 link_last_sent(l_ptr)); 1177 u32 first = buf_seqno(buf); 1178 1179 while (buf && less(first, r_q_head)) { 1180 first = mod(first + 1); 1181 buf = buf->next; 1182 } 1183 l_ptr->retransm_queue_head = r_q_head = first; 1184 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1185 } 1186 1187 /* Continue retransmission now, if there is anything: */ 1188 if (r_q_size && buf) { 1189 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1190 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1191 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1192 l_ptr->retransm_queue_head = mod(++r_q_head); 1193 l_ptr->retransm_queue_size = --r_q_size; 1194 l_ptr->stats.retransmitted++; 1195 return 0; 1196 } 1197 1198 /* Send deferred protocol message, if any: */ 1199 buf = l_ptr->proto_msg_queue; 1200 if (buf) { 1201 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1202 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1203 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1204 l_ptr->unacked_window = 0; 1205 kfree_skb(buf); 1206 l_ptr->proto_msg_queue = NULL; 1207 return 0; 1208 } 1209 1210 /* Send one deferred data message, if send window not full: */ 1211 buf = l_ptr->next_out; 1212 if (buf) { 1213 struct tipc_msg *msg = buf_msg(buf); 1214 u32 next = msg_seqno(msg); 1215 u32 first = buf_seqno(l_ptr->first_out); 1216 1217 if (mod(next - first) < l_ptr->queue_limit[0]) { 1218 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1219 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1220 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1221 if (msg_user(msg) == MSG_BUNDLER) 1222 msg_set_type(msg, CLOSED_MSG); 1223 l_ptr->next_out = buf->next; 1224 return 0; 1225 } 1226 } 1227 return 1; 1228 } 1229 1230 /* 1231 * push_queue(): push out the unsent messages of a link where 1232 * congestion has abated. Node is locked 1233 */ 1234 void tipc_link_push_queue(struct tipc_link *l_ptr) 1235 { 1236 u32 res; 1237 1238 do { 1239 res = tipc_link_push_packet(l_ptr); 1240 } while (!res); 1241 } 1242 1243 static void link_reset_all(unsigned long addr) 1244 { 1245 struct tipc_node *n_ptr; 1246 char addr_string[16]; 1247 u32 i; 1248 1249 read_lock_bh(&tipc_net_lock); 1250 n_ptr = tipc_node_find((u32)addr); 1251 if (!n_ptr) { 1252 read_unlock_bh(&tipc_net_lock); 1253 return; /* node no longer exists */ 1254 } 1255 1256 tipc_node_lock(n_ptr); 1257 1258 pr_warn("Resetting all links to %s\n", 1259 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1260 1261 for (i = 0; i < MAX_BEARERS; i++) { 1262 if (n_ptr->links[i]) { 1263 link_print(n_ptr->links[i], "Resetting link\n"); 1264 tipc_link_reset(n_ptr->links[i]); 1265 } 1266 } 1267 1268 tipc_node_unlock(n_ptr); 1269 read_unlock_bh(&tipc_net_lock); 1270 } 1271 1272 static void link_retransmit_failure(struct tipc_link *l_ptr, 1273 struct sk_buff *buf) 1274 { 1275 struct tipc_msg *msg = buf_msg(buf); 1276 1277 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1278 1279 if (l_ptr->addr) { 1280 /* Handle failure on standard link */ 1281 link_print(l_ptr, "Resetting link\n"); 1282 tipc_link_reset(l_ptr); 1283 1284 } else { 1285 /* Handle failure on broadcast link */ 1286 struct tipc_node *n_ptr; 1287 char addr_string[16]; 1288 1289 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1290 pr_cont("Outstanding acks: %lu\n", 1291 (unsigned long) TIPC_SKB_CB(buf)->handle); 1292 1293 n_ptr = tipc_bclink_retransmit_to(); 1294 tipc_node_lock(n_ptr); 1295 1296 tipc_addr_string_fill(addr_string, n_ptr->addr); 1297 pr_info("Broadcast link info for %s\n", addr_string); 1298 pr_info("Reception permitted: %d, Acked: %u\n", 1299 n_ptr->bclink.recv_permitted, 1300 n_ptr->bclink.acked); 1301 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1302 n_ptr->bclink.last_in, 1303 n_ptr->bclink.oos_state, 1304 n_ptr->bclink.last_sent); 1305 1306 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1307 1308 tipc_node_unlock(n_ptr); 1309 1310 l_ptr->stale_count = 0; 1311 } 1312 } 1313 1314 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1315 u32 retransmits) 1316 { 1317 struct tipc_msg *msg; 1318 1319 if (!buf) 1320 return; 1321 1322 msg = buf_msg(buf); 1323 1324 /* Detect repeated retransmit failures */ 1325 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1326 if (++l_ptr->stale_count > 100) { 1327 link_retransmit_failure(l_ptr, buf); 1328 return; 1329 } 1330 } else { 1331 l_ptr->last_retransmitted = msg_seqno(msg); 1332 l_ptr->stale_count = 1; 1333 } 1334 1335 while (retransmits && (buf != l_ptr->next_out) && buf) { 1336 msg = buf_msg(buf); 1337 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1338 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1339 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1340 buf = buf->next; 1341 retransmits--; 1342 l_ptr->stats.retransmitted++; 1343 } 1344 1345 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1346 } 1347 1348 /** 1349 * link_insert_deferred_queue - insert deferred messages back into receive chain 1350 */ 1351 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1352 struct sk_buff *buf) 1353 { 1354 u32 seq_no; 1355 1356 if (l_ptr->oldest_deferred_in == NULL) 1357 return buf; 1358 1359 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1360 if (seq_no == mod(l_ptr->next_in_no)) { 1361 l_ptr->newest_deferred_in->next = buf; 1362 buf = l_ptr->oldest_deferred_in; 1363 l_ptr->oldest_deferred_in = NULL; 1364 l_ptr->deferred_inqueue_sz = 0; 1365 } 1366 return buf; 1367 } 1368 1369 /** 1370 * link_recv_buf_validate - validate basic format of received message 1371 * 1372 * This routine ensures a TIPC message has an acceptable header, and at least 1373 * as much data as the header indicates it should. The routine also ensures 1374 * that the entire message header is stored in the main fragment of the message 1375 * buffer, to simplify future access to message header fields. 1376 * 1377 * Note: Having extra info present in the message header or data areas is OK. 1378 * TIPC will ignore the excess, under the assumption that it is optional info 1379 * introduced by a later release of the protocol. 1380 */ 1381 static int link_recv_buf_validate(struct sk_buff *buf) 1382 { 1383 static u32 min_data_hdr_size[8] = { 1384 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1385 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1386 }; 1387 1388 struct tipc_msg *msg; 1389 u32 tipc_hdr[2]; 1390 u32 size; 1391 u32 hdr_size; 1392 u32 min_hdr_size; 1393 1394 if (unlikely(buf->len < MIN_H_SIZE)) 1395 return 0; 1396 1397 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1398 if (msg == NULL) 1399 return 0; 1400 1401 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1402 return 0; 1403 1404 size = msg_size(msg); 1405 hdr_size = msg_hdr_sz(msg); 1406 min_hdr_size = msg_isdata(msg) ? 1407 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1408 1409 if (unlikely((hdr_size < min_hdr_size) || 1410 (size < hdr_size) || 1411 (buf->len < size) || 1412 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1413 return 0; 1414 1415 return pskb_may_pull(buf, hdr_size); 1416 } 1417 1418 /** 1419 * tipc_rcv - process TIPC packets/messages arriving from off-node 1420 * @head: pointer to message buffer chain 1421 * @tb_ptr: pointer to bearer message arrived on 1422 * 1423 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1424 * structure (i.e. cannot be NULL), but bearer can be inactive. 1425 */ 1426 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1427 { 1428 read_lock_bh(&tipc_net_lock); 1429 while (head) { 1430 struct tipc_node *n_ptr; 1431 struct tipc_link *l_ptr; 1432 struct sk_buff *crs; 1433 struct sk_buff *buf = head; 1434 struct tipc_msg *msg; 1435 u32 seq_no; 1436 u32 ackd; 1437 u32 released = 0; 1438 int type; 1439 1440 head = head->next; 1441 buf->next = NULL; 1442 1443 /* Ensure bearer is still enabled */ 1444 if (unlikely(!b_ptr->active)) 1445 goto discard; 1446 1447 /* Ensure message is well-formed */ 1448 if (unlikely(!link_recv_buf_validate(buf))) 1449 goto discard; 1450 1451 /* Ensure message data is a single contiguous unit */ 1452 if (unlikely(skb_linearize(buf))) 1453 goto discard; 1454 1455 /* Handle arrival of a non-unicast link message */ 1456 msg = buf_msg(buf); 1457 1458 if (unlikely(msg_non_seq(msg))) { 1459 if (msg_user(msg) == LINK_CONFIG) 1460 tipc_disc_recv_msg(buf, b_ptr); 1461 else 1462 tipc_bclink_recv_pkt(buf); 1463 continue; 1464 } 1465 1466 /* Discard unicast link messages destined for another node */ 1467 if (unlikely(!msg_short(msg) && 1468 (msg_destnode(msg) != tipc_own_addr))) 1469 goto discard; 1470 1471 /* Locate neighboring node that sent message */ 1472 n_ptr = tipc_node_find(msg_prevnode(msg)); 1473 if (unlikely(!n_ptr)) 1474 goto discard; 1475 tipc_node_lock(n_ptr); 1476 1477 /* Locate unicast link endpoint that should handle message */ 1478 l_ptr = n_ptr->links[b_ptr->identity]; 1479 if (unlikely(!l_ptr)) 1480 goto unlock_discard; 1481 1482 /* Verify that communication with node is currently allowed */ 1483 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1484 msg_user(msg) == LINK_PROTOCOL && 1485 (msg_type(msg) == RESET_MSG || 1486 msg_type(msg) == ACTIVATE_MSG) && 1487 !msg_redundant_link(msg)) 1488 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1489 1490 if (n_ptr->block_setup) 1491 goto unlock_discard; 1492 1493 /* Validate message sequence number info */ 1494 seq_no = msg_seqno(msg); 1495 ackd = msg_ack(msg); 1496 1497 /* Release acked messages */ 1498 if (n_ptr->bclink.recv_permitted) 1499 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1500 1501 crs = l_ptr->first_out; 1502 while ((crs != l_ptr->next_out) && 1503 less_eq(buf_seqno(crs), ackd)) { 1504 struct sk_buff *next = crs->next; 1505 1506 kfree_skb(crs); 1507 crs = next; 1508 released++; 1509 } 1510 if (released) { 1511 l_ptr->first_out = crs; 1512 l_ptr->out_queue_size -= released; 1513 } 1514 1515 /* Try sending any messages link endpoint has pending */ 1516 if (unlikely(l_ptr->next_out)) 1517 tipc_link_push_queue(l_ptr); 1518 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1519 tipc_link_wakeup_ports(l_ptr, 0); 1520 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1521 l_ptr->stats.sent_acks++; 1522 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1523 } 1524 1525 /* Now (finally!) process the incoming message */ 1526 protocol_check: 1527 if (unlikely(!link_working_working(l_ptr))) { 1528 if (msg_user(msg) == LINK_PROTOCOL) { 1529 link_recv_proto_msg(l_ptr, buf); 1530 head = link_insert_deferred_queue(l_ptr, head); 1531 tipc_node_unlock(n_ptr); 1532 continue; 1533 } 1534 1535 /* Traffic message. Conditionally activate link */ 1536 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1537 1538 if (link_working_working(l_ptr)) { 1539 /* Re-insert buffer in front of queue */ 1540 buf->next = head; 1541 head = buf; 1542 tipc_node_unlock(n_ptr); 1543 continue; 1544 } 1545 goto unlock_discard; 1546 } 1547 1548 /* Link is now in state WORKING_WORKING */ 1549 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1550 link_handle_out_of_seq_msg(l_ptr, buf); 1551 head = link_insert_deferred_queue(l_ptr, head); 1552 tipc_node_unlock(n_ptr); 1553 continue; 1554 } 1555 l_ptr->next_in_no++; 1556 if (unlikely(l_ptr->oldest_deferred_in)) 1557 head = link_insert_deferred_queue(l_ptr, head); 1558 deliver: 1559 if (likely(msg_isdata(msg))) { 1560 tipc_node_unlock(n_ptr); 1561 tipc_port_recv_msg(buf); 1562 continue; 1563 } 1564 switch (msg_user(msg)) { 1565 int ret; 1566 case MSG_BUNDLER: 1567 l_ptr->stats.recv_bundles++; 1568 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1569 tipc_node_unlock(n_ptr); 1570 tipc_link_recv_bundle(buf); 1571 continue; 1572 case NAME_DISTRIBUTOR: 1573 n_ptr->bclink.recv_permitted = true; 1574 tipc_node_unlock(n_ptr); 1575 tipc_named_recv(buf); 1576 continue; 1577 case BCAST_PROTOCOL: 1578 tipc_link_recv_sync(n_ptr, buf); 1579 tipc_node_unlock(n_ptr); 1580 continue; 1581 case CONN_MANAGER: 1582 tipc_node_unlock(n_ptr); 1583 tipc_port_recv_proto_msg(buf); 1584 continue; 1585 case MSG_FRAGMENTER: 1586 l_ptr->stats.recv_fragments++; 1587 ret = tipc_link_recv_fragment(&l_ptr->reasm_head, 1588 &l_ptr->reasm_tail, 1589 &buf); 1590 if (ret == LINK_REASM_COMPLETE) { 1591 l_ptr->stats.recv_fragmented++; 1592 msg = buf_msg(buf); 1593 goto deliver; 1594 } 1595 if (ret == LINK_REASM_ERROR) 1596 tipc_link_reset(l_ptr); 1597 tipc_node_unlock(n_ptr); 1598 continue; 1599 case CHANGEOVER_PROTOCOL: 1600 type = msg_type(msg); 1601 if (tipc_link_tunnel_rcv(&l_ptr, &buf)) { 1602 msg = buf_msg(buf); 1603 seq_no = msg_seqno(msg); 1604 if (type == ORIGINAL_MSG) 1605 goto deliver; 1606 goto protocol_check; 1607 } 1608 break; 1609 default: 1610 kfree_skb(buf); 1611 buf = NULL; 1612 break; 1613 } 1614 tipc_node_unlock(n_ptr); 1615 tipc_net_route_msg(buf); 1616 continue; 1617 unlock_discard: 1618 1619 tipc_node_unlock(n_ptr); 1620 discard: 1621 kfree_skb(buf); 1622 } 1623 read_unlock_bh(&tipc_net_lock); 1624 } 1625 1626 /** 1627 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1628 * 1629 * Returns increase in queue length (i.e. 0 or 1) 1630 */ 1631 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1632 struct sk_buff *buf) 1633 { 1634 struct sk_buff *queue_buf; 1635 struct sk_buff **prev; 1636 u32 seq_no = buf_seqno(buf); 1637 1638 buf->next = NULL; 1639 1640 /* Empty queue ? */ 1641 if (*head == NULL) { 1642 *head = *tail = buf; 1643 return 1; 1644 } 1645 1646 /* Last ? */ 1647 if (less(buf_seqno(*tail), seq_no)) { 1648 (*tail)->next = buf; 1649 *tail = buf; 1650 return 1; 1651 } 1652 1653 /* Locate insertion point in queue, then insert; discard if duplicate */ 1654 prev = head; 1655 queue_buf = *head; 1656 for (;;) { 1657 u32 curr_seqno = buf_seqno(queue_buf); 1658 1659 if (seq_no == curr_seqno) { 1660 kfree_skb(buf); 1661 return 0; 1662 } 1663 1664 if (less(seq_no, curr_seqno)) 1665 break; 1666 1667 prev = &queue_buf->next; 1668 queue_buf = queue_buf->next; 1669 } 1670 1671 buf->next = queue_buf; 1672 *prev = buf; 1673 return 1; 1674 } 1675 1676 /* 1677 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1678 */ 1679 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1680 struct sk_buff *buf) 1681 { 1682 u32 seq_no = buf_seqno(buf); 1683 1684 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1685 link_recv_proto_msg(l_ptr, buf); 1686 return; 1687 } 1688 1689 /* Record OOS packet arrival (force mismatch on next timeout) */ 1690 l_ptr->checkpoint--; 1691 1692 /* 1693 * Discard packet if a duplicate; otherwise add it to deferred queue 1694 * and notify peer of gap as per protocol specification 1695 */ 1696 if (less(seq_no, mod(l_ptr->next_in_no))) { 1697 l_ptr->stats.duplicates++; 1698 kfree_skb(buf); 1699 return; 1700 } 1701 1702 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1703 &l_ptr->newest_deferred_in, buf)) { 1704 l_ptr->deferred_inqueue_sz++; 1705 l_ptr->stats.deferred_recv++; 1706 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1707 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1708 } else 1709 l_ptr->stats.duplicates++; 1710 } 1711 1712 /* 1713 * Send protocol message to the other endpoint. 1714 */ 1715 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1716 int probe_msg, u32 gap, u32 tolerance, 1717 u32 priority, u32 ack_mtu) 1718 { 1719 struct sk_buff *buf = NULL; 1720 struct tipc_msg *msg = l_ptr->pmsg; 1721 u32 msg_size = sizeof(l_ptr->proto_msg); 1722 int r_flag; 1723 1724 /* Discard any previous message that was deferred due to congestion */ 1725 if (l_ptr->proto_msg_queue) { 1726 kfree_skb(l_ptr->proto_msg_queue); 1727 l_ptr->proto_msg_queue = NULL; 1728 } 1729 1730 /* Don't send protocol message during link changeover */ 1731 if (l_ptr->exp_msg_count) 1732 return; 1733 1734 /* Abort non-RESET send if communication with node is prohibited */ 1735 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1736 return; 1737 1738 /* Create protocol message with "out-of-sequence" sequence number */ 1739 msg_set_type(msg, msg_typ); 1740 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1741 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1742 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1743 1744 if (msg_typ == STATE_MSG) { 1745 u32 next_sent = mod(l_ptr->next_out_no); 1746 1747 if (!tipc_link_is_up(l_ptr)) 1748 return; 1749 if (l_ptr->next_out) 1750 next_sent = buf_seqno(l_ptr->next_out); 1751 msg_set_next_sent(msg, next_sent); 1752 if (l_ptr->oldest_deferred_in) { 1753 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1754 gap = mod(rec - mod(l_ptr->next_in_no)); 1755 } 1756 msg_set_seq_gap(msg, gap); 1757 if (gap) 1758 l_ptr->stats.sent_nacks++; 1759 msg_set_link_tolerance(msg, tolerance); 1760 msg_set_linkprio(msg, priority); 1761 msg_set_max_pkt(msg, ack_mtu); 1762 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1763 msg_set_probe(msg, probe_msg != 0); 1764 if (probe_msg) { 1765 u32 mtu = l_ptr->max_pkt; 1766 1767 if ((mtu < l_ptr->max_pkt_target) && 1768 link_working_working(l_ptr) && 1769 l_ptr->fsm_msg_cnt) { 1770 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1771 if (l_ptr->max_pkt_probes == 10) { 1772 l_ptr->max_pkt_target = (msg_size - 4); 1773 l_ptr->max_pkt_probes = 0; 1774 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1775 } 1776 l_ptr->max_pkt_probes++; 1777 } 1778 1779 l_ptr->stats.sent_probes++; 1780 } 1781 l_ptr->stats.sent_states++; 1782 } else { /* RESET_MSG or ACTIVATE_MSG */ 1783 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1784 msg_set_seq_gap(msg, 0); 1785 msg_set_next_sent(msg, 1); 1786 msg_set_probe(msg, 0); 1787 msg_set_link_tolerance(msg, l_ptr->tolerance); 1788 msg_set_linkprio(msg, l_ptr->priority); 1789 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1790 } 1791 1792 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1793 msg_set_redundant_link(msg, r_flag); 1794 msg_set_linkprio(msg, l_ptr->priority); 1795 msg_set_size(msg, msg_size); 1796 1797 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1798 1799 buf = tipc_buf_acquire(msg_size); 1800 if (!buf) 1801 return; 1802 1803 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1804 buf->priority = TC_PRIO_CONTROL; 1805 1806 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1807 l_ptr->unacked_window = 0; 1808 kfree_skb(buf); 1809 } 1810 1811 /* 1812 * Receive protocol message : 1813 * Note that network plane id propagates through the network, and may 1814 * change at any time. The node with lowest address rules 1815 */ 1816 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1817 { 1818 u32 rec_gap = 0; 1819 u32 max_pkt_info; 1820 u32 max_pkt_ack; 1821 u32 msg_tol; 1822 struct tipc_msg *msg = buf_msg(buf); 1823 1824 /* Discard protocol message during link changeover */ 1825 if (l_ptr->exp_msg_count) 1826 goto exit; 1827 1828 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1829 l_ptr->checkpoint--; 1830 1831 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1832 if (tipc_own_addr > msg_prevnode(msg)) 1833 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1834 1835 switch (msg_type(msg)) { 1836 1837 case RESET_MSG: 1838 if (!link_working_unknown(l_ptr) && 1839 (l_ptr->peer_session != INVALID_SESSION)) { 1840 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1841 break; /* duplicate or old reset: ignore */ 1842 } 1843 1844 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1845 link_working_unknown(l_ptr))) { 1846 /* 1847 * peer has lost contact -- don't allow peer's links 1848 * to reactivate before we recognize loss & clean up 1849 */ 1850 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1851 } 1852 1853 link_state_event(l_ptr, RESET_MSG); 1854 1855 /* fall thru' */ 1856 case ACTIVATE_MSG: 1857 /* Update link settings according other endpoint's values */ 1858 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1859 1860 msg_tol = msg_link_tolerance(msg); 1861 if (msg_tol > l_ptr->tolerance) 1862 link_set_supervision_props(l_ptr, msg_tol); 1863 1864 if (msg_linkprio(msg) > l_ptr->priority) 1865 l_ptr->priority = msg_linkprio(msg); 1866 1867 max_pkt_info = msg_max_pkt(msg); 1868 if (max_pkt_info) { 1869 if (max_pkt_info < l_ptr->max_pkt_target) 1870 l_ptr->max_pkt_target = max_pkt_info; 1871 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1872 l_ptr->max_pkt = l_ptr->max_pkt_target; 1873 } else { 1874 l_ptr->max_pkt = l_ptr->max_pkt_target; 1875 } 1876 1877 /* Synchronize broadcast link info, if not done previously */ 1878 if (!tipc_node_is_up(l_ptr->owner)) { 1879 l_ptr->owner->bclink.last_sent = 1880 l_ptr->owner->bclink.last_in = 1881 msg_last_bcast(msg); 1882 l_ptr->owner->bclink.oos_state = 0; 1883 } 1884 1885 l_ptr->peer_session = msg_session(msg); 1886 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1887 1888 if (msg_type(msg) == ACTIVATE_MSG) 1889 link_state_event(l_ptr, ACTIVATE_MSG); 1890 break; 1891 case STATE_MSG: 1892 1893 msg_tol = msg_link_tolerance(msg); 1894 if (msg_tol) 1895 link_set_supervision_props(l_ptr, msg_tol); 1896 1897 if (msg_linkprio(msg) && 1898 (msg_linkprio(msg) != l_ptr->priority)) { 1899 pr_warn("%s<%s>, priority change %u->%u\n", 1900 link_rst_msg, l_ptr->name, l_ptr->priority, 1901 msg_linkprio(msg)); 1902 l_ptr->priority = msg_linkprio(msg); 1903 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1904 break; 1905 } 1906 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1907 l_ptr->stats.recv_states++; 1908 if (link_reset_unknown(l_ptr)) 1909 break; 1910 1911 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1912 rec_gap = mod(msg_next_sent(msg) - 1913 mod(l_ptr->next_in_no)); 1914 } 1915 1916 max_pkt_ack = msg_max_pkt(msg); 1917 if (max_pkt_ack > l_ptr->max_pkt) { 1918 l_ptr->max_pkt = max_pkt_ack; 1919 l_ptr->max_pkt_probes = 0; 1920 } 1921 1922 max_pkt_ack = 0; 1923 if (msg_probe(msg)) { 1924 l_ptr->stats.recv_probes++; 1925 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1926 max_pkt_ack = msg_size(msg); 1927 } 1928 1929 /* Protocol message before retransmits, reduce loss risk */ 1930 if (l_ptr->owner->bclink.recv_permitted) 1931 tipc_bclink_update_link_state(l_ptr->owner, 1932 msg_last_bcast(msg)); 1933 1934 if (rec_gap || (msg_probe(msg))) { 1935 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1936 0, rec_gap, 0, 0, max_pkt_ack); 1937 } 1938 if (msg_seq_gap(msg)) { 1939 l_ptr->stats.recv_nacks++; 1940 tipc_link_retransmit(l_ptr, l_ptr->first_out, 1941 msg_seq_gap(msg)); 1942 } 1943 break; 1944 } 1945 exit: 1946 kfree_skb(buf); 1947 } 1948 1949 1950 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to 1951 * a different bearer. Owner node is locked. 1952 */ 1953 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, 1954 struct tipc_msg *tunnel_hdr, 1955 struct tipc_msg *msg, 1956 u32 selector) 1957 { 1958 struct tipc_link *tunnel; 1959 struct sk_buff *buf; 1960 u32 length = msg_size(msg); 1961 1962 tunnel = l_ptr->owner->active_links[selector & 1]; 1963 if (!tipc_link_is_up(tunnel)) { 1964 pr_warn("%stunnel link no longer available\n", link_co_err); 1965 return; 1966 } 1967 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 1968 buf = tipc_buf_acquire(length + INT_H_SIZE); 1969 if (!buf) { 1970 pr_warn("%sunable to send tunnel msg\n", link_co_err); 1971 return; 1972 } 1973 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 1974 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 1975 tipc_link_send_buf(tunnel, buf); 1976 } 1977 1978 1979 /* tipc_link_failover_send_queue(): A link has gone down, but a second 1980 * link is still active. We can do failover. Tunnel the failing link's 1981 * whole send queue via the remaining link. This way, we don't lose 1982 * any packets, and sequence order is preserved for subsequent traffic 1983 * sent over the remaining link. Owner node is locked. 1984 */ 1985 void tipc_link_failover_send_queue(struct tipc_link *l_ptr) 1986 { 1987 u32 msgcount = l_ptr->out_queue_size; 1988 struct sk_buff *crs = l_ptr->first_out; 1989 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 1990 struct tipc_msg tunnel_hdr; 1991 int split_bundles; 1992 1993 if (!tunnel) 1994 return; 1995 1996 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 1997 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 1998 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1999 msg_set_msgcnt(&tunnel_hdr, msgcount); 2000 2001 if (!l_ptr->first_out) { 2002 struct sk_buff *buf; 2003 2004 buf = tipc_buf_acquire(INT_H_SIZE); 2005 if (buf) { 2006 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2007 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2008 tipc_link_send_buf(tunnel, buf); 2009 } else { 2010 pr_warn("%sunable to send changeover msg\n", 2011 link_co_err); 2012 } 2013 return; 2014 } 2015 2016 split_bundles = (l_ptr->owner->active_links[0] != 2017 l_ptr->owner->active_links[1]); 2018 2019 while (crs) { 2020 struct tipc_msg *msg = buf_msg(crs); 2021 2022 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2023 struct tipc_msg *m = msg_get_wrapped(msg); 2024 unchar *pos = (unchar *)m; 2025 2026 msgcount = msg_msgcnt(msg); 2027 while (msgcount--) { 2028 msg_set_seqno(m, msg_seqno(msg)); 2029 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m, 2030 msg_link_selector(m)); 2031 pos += align(msg_size(m)); 2032 m = (struct tipc_msg *)pos; 2033 } 2034 } else { 2035 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, 2036 msg_link_selector(msg)); 2037 } 2038 crs = crs->next; 2039 } 2040 } 2041 2042 /* tipc_link_dup_send_queue(): A second link has become active. Tunnel a 2043 * duplicate of the first link's send queue via the new link. This way, we 2044 * are guaranteed that currently queued packets from a socket are delivered 2045 * before future traffic from the same socket, even if this is using the 2046 * new link. The last arriving copy of each duplicate packet is dropped at 2047 * the receiving end by the regular protocol check, so packet cardinality 2048 * and sequence order is preserved per sender/receiver socket pair. 2049 * Owner node is locked. 2050 */ 2051 void tipc_link_dup_send_queue(struct tipc_link *l_ptr, 2052 struct tipc_link *tunnel) 2053 { 2054 struct sk_buff *iter; 2055 struct tipc_msg tunnel_hdr; 2056 2057 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2058 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2059 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2060 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2061 iter = l_ptr->first_out; 2062 while (iter) { 2063 struct sk_buff *outbuf; 2064 struct tipc_msg *msg = buf_msg(iter); 2065 u32 length = msg_size(msg); 2066 2067 if (msg_user(msg) == MSG_BUNDLER) 2068 msg_set_type(msg, CLOSED_MSG); 2069 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2070 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2071 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2072 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2073 if (outbuf == NULL) { 2074 pr_warn("%sunable to send duplicate msg\n", 2075 link_co_err); 2076 return; 2077 } 2078 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2079 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2080 length); 2081 tipc_link_send_buf(tunnel, outbuf); 2082 if (!tipc_link_is_up(l_ptr)) 2083 return; 2084 iter = iter->next; 2085 } 2086 } 2087 2088 /** 2089 * buf_extract - extracts embedded TIPC message from another message 2090 * @skb: encapsulating message buffer 2091 * @from_pos: offset to extract from 2092 * 2093 * Returns a new message buffer containing an embedded message. The 2094 * encapsulating message itself is left unchanged. 2095 */ 2096 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2097 { 2098 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2099 u32 size = msg_size(msg); 2100 struct sk_buff *eb; 2101 2102 eb = tipc_buf_acquire(size); 2103 if (eb) 2104 skb_copy_to_linear_data(eb, msg, size); 2105 return eb; 2106 } 2107 2108 /* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent 2109 * via other link as result of a failover (ORIGINAL_MSG) or 2110 * a new active link (DUPLICATE_MSG). Failover packets are 2111 * returned to the active link for delivery upwards. 2112 * Owner node is locked. 2113 */ 2114 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr, 2115 struct sk_buff **buf) 2116 { 2117 struct sk_buff *tunnel_buf = *buf; 2118 struct tipc_link *dest_link; 2119 struct tipc_msg *msg; 2120 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2121 u32 msg_typ = msg_type(tunnel_msg); 2122 u32 msg_count = msg_msgcnt(tunnel_msg); 2123 u32 bearer_id = msg_bearer_id(tunnel_msg); 2124 2125 if (bearer_id >= MAX_BEARERS) 2126 goto exit; 2127 dest_link = (*l_ptr)->owner->links[bearer_id]; 2128 if (!dest_link) 2129 goto exit; 2130 if (dest_link == *l_ptr) { 2131 pr_err("Unexpected changeover message on link <%s>\n", 2132 (*l_ptr)->name); 2133 goto exit; 2134 } 2135 *l_ptr = dest_link; 2136 msg = msg_get_wrapped(tunnel_msg); 2137 2138 if (msg_typ == DUPLICATE_MSG) { 2139 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2140 goto exit; 2141 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2142 if (*buf == NULL) { 2143 pr_warn("%sduplicate msg dropped\n", link_co_err); 2144 goto exit; 2145 } 2146 kfree_skb(tunnel_buf); 2147 return 1; 2148 } 2149 2150 /* First original message ?: */ 2151 if (tipc_link_is_up(dest_link)) { 2152 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2153 dest_link->name); 2154 tipc_link_reset(dest_link); 2155 dest_link->exp_msg_count = msg_count; 2156 if (!msg_count) 2157 goto exit; 2158 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2159 dest_link->exp_msg_count = msg_count; 2160 if (!msg_count) 2161 goto exit; 2162 } 2163 2164 /* Receive original message */ 2165 if (dest_link->exp_msg_count == 0) { 2166 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2167 goto exit; 2168 } 2169 dest_link->exp_msg_count--; 2170 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2171 goto exit; 2172 } else { 2173 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2174 if (*buf != NULL) { 2175 kfree_skb(tunnel_buf); 2176 return 1; 2177 } else { 2178 pr_warn("%soriginal msg dropped\n", link_co_err); 2179 } 2180 } 2181 exit: 2182 *buf = NULL; 2183 kfree_skb(tunnel_buf); 2184 return 0; 2185 } 2186 2187 /* 2188 * Bundler functionality: 2189 */ 2190 void tipc_link_recv_bundle(struct sk_buff *buf) 2191 { 2192 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2193 u32 pos = INT_H_SIZE; 2194 struct sk_buff *obuf; 2195 2196 while (msgcount--) { 2197 obuf = buf_extract(buf, pos); 2198 if (obuf == NULL) { 2199 pr_warn("Link unable to unbundle message(s)\n"); 2200 break; 2201 } 2202 pos += align(msg_size(buf_msg(obuf))); 2203 tipc_net_route_msg(obuf); 2204 } 2205 kfree_skb(buf); 2206 } 2207 2208 /* 2209 * Fragmentation/defragmentation: 2210 */ 2211 2212 /* 2213 * link_send_long_buf: Entry for buffers needing fragmentation. 2214 * The buffer is complete, inclusive total message length. 2215 * Returns user data length. 2216 */ 2217 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2218 { 2219 struct sk_buff *buf_chain = NULL; 2220 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2221 struct tipc_msg *inmsg = buf_msg(buf); 2222 struct tipc_msg fragm_hdr; 2223 u32 insize = msg_size(inmsg); 2224 u32 dsz = msg_data_sz(inmsg); 2225 unchar *crs = buf->data; 2226 u32 rest = insize; 2227 u32 pack_sz = l_ptr->max_pkt; 2228 u32 fragm_sz = pack_sz - INT_H_SIZE; 2229 u32 fragm_no = 0; 2230 u32 destaddr; 2231 2232 if (msg_short(inmsg)) 2233 destaddr = l_ptr->addr; 2234 else 2235 destaddr = msg_destnode(inmsg); 2236 2237 /* Prepare reusable fragment header: */ 2238 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2239 INT_H_SIZE, destaddr); 2240 2241 /* Chop up message: */ 2242 while (rest > 0) { 2243 struct sk_buff *fragm; 2244 2245 if (rest <= fragm_sz) { 2246 fragm_sz = rest; 2247 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2248 } 2249 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2250 if (fragm == NULL) { 2251 kfree_skb(buf); 2252 kfree_skb_list(buf_chain); 2253 return -ENOMEM; 2254 } 2255 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2256 fragm_no++; 2257 msg_set_fragm_no(&fragm_hdr, fragm_no); 2258 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2259 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2260 fragm_sz); 2261 buf_chain_tail->next = fragm; 2262 buf_chain_tail = fragm; 2263 2264 rest -= fragm_sz; 2265 crs += fragm_sz; 2266 msg_set_type(&fragm_hdr, FRAGMENT); 2267 } 2268 kfree_skb(buf); 2269 2270 /* Append chain of fragments to send queue & send them */ 2271 l_ptr->long_msg_seq_no++; 2272 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2273 l_ptr->stats.sent_fragments += fragm_no; 2274 l_ptr->stats.sent_fragmented++; 2275 tipc_link_push_queue(l_ptr); 2276 2277 return dsz; 2278 } 2279 2280 /* 2281 * tipc_link_recv_fragment(): Called with node lock on. Returns 2282 * the reassembled buffer if message is complete. 2283 */ 2284 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, 2285 struct sk_buff **fbuf) 2286 { 2287 struct sk_buff *frag = *fbuf; 2288 struct tipc_msg *msg = buf_msg(frag); 2289 u32 fragid = msg_type(msg); 2290 bool headstolen; 2291 int delta; 2292 2293 skb_pull(frag, msg_hdr_sz(msg)); 2294 if (fragid == FIRST_FRAGMENT) { 2295 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2296 goto out_free; 2297 *head = frag; 2298 skb_frag_list_init(*head); 2299 return 0; 2300 } else if (*head && 2301 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2302 kfree_skb_partial(frag, headstolen); 2303 } else { 2304 if (!*head) 2305 goto out_free; 2306 if (!skb_has_frag_list(*head)) 2307 skb_shinfo(*head)->frag_list = frag; 2308 else 2309 (*tail)->next = frag; 2310 *tail = frag; 2311 (*head)->truesize += frag->truesize; 2312 } 2313 if (fragid == LAST_FRAGMENT) { 2314 *fbuf = *head; 2315 *tail = *head = NULL; 2316 return LINK_REASM_COMPLETE; 2317 } 2318 return 0; 2319 out_free: 2320 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2321 kfree_skb(*fbuf); 2322 return LINK_REASM_ERROR; 2323 } 2324 2325 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2326 { 2327 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2328 return; 2329 2330 l_ptr->tolerance = tolerance; 2331 l_ptr->continuity_interval = 2332 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2333 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2334 } 2335 2336 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2337 { 2338 /* Data messages from this node, inclusive FIRST_FRAGM */ 2339 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2340 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2341 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2342 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2343 /* Transiting data messages,inclusive FIRST_FRAGM */ 2344 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2345 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2346 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2347 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2348 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2349 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2350 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2351 /* FRAGMENT and LAST_FRAGMENT packets */ 2352 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2353 } 2354 2355 /** 2356 * link_find_link - locate link by name 2357 * @name: ptr to link name string 2358 * @node: ptr to area to be filled with ptr to associated node 2359 * 2360 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2361 * this also prevents link deletion. 2362 * 2363 * Returns pointer to link (or 0 if invalid link name). 2364 */ 2365 static struct tipc_link *link_find_link(const char *name, 2366 struct tipc_node **node) 2367 { 2368 struct tipc_link *l_ptr; 2369 struct tipc_node *n_ptr; 2370 int i; 2371 2372 list_for_each_entry(n_ptr, &tipc_node_list, list) { 2373 for (i = 0; i < MAX_BEARERS; i++) { 2374 l_ptr = n_ptr->links[i]; 2375 if (l_ptr && !strcmp(l_ptr->name, name)) 2376 goto found; 2377 } 2378 } 2379 l_ptr = NULL; 2380 n_ptr = NULL; 2381 found: 2382 *node = n_ptr; 2383 return l_ptr; 2384 } 2385 2386 /** 2387 * link_value_is_valid -- validate proposed link tolerance/priority/window 2388 * 2389 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2390 * @new_value: the new value 2391 * 2392 * Returns 1 if value is within range, 0 if not. 2393 */ 2394 static int link_value_is_valid(u16 cmd, u32 new_value) 2395 { 2396 switch (cmd) { 2397 case TIPC_CMD_SET_LINK_TOL: 2398 return (new_value >= TIPC_MIN_LINK_TOL) && 2399 (new_value <= TIPC_MAX_LINK_TOL); 2400 case TIPC_CMD_SET_LINK_PRI: 2401 return (new_value <= TIPC_MAX_LINK_PRI); 2402 case TIPC_CMD_SET_LINK_WINDOW: 2403 return (new_value >= TIPC_MIN_LINK_WIN) && 2404 (new_value <= TIPC_MAX_LINK_WIN); 2405 } 2406 return 0; 2407 } 2408 2409 /** 2410 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2411 * @name: ptr to link, bearer, or media name 2412 * @new_value: new value of link, bearer, or media setting 2413 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2414 * 2415 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2416 * 2417 * Returns 0 if value updated and negative value on error. 2418 */ 2419 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2420 { 2421 struct tipc_node *node; 2422 struct tipc_link *l_ptr; 2423 struct tipc_bearer *b_ptr; 2424 struct tipc_media *m_ptr; 2425 int res = 0; 2426 2427 l_ptr = link_find_link(name, &node); 2428 if (l_ptr) { 2429 /* 2430 * acquire node lock for tipc_link_send_proto_msg(). 2431 * see "TIPC locking policy" in net.c. 2432 */ 2433 tipc_node_lock(node); 2434 switch (cmd) { 2435 case TIPC_CMD_SET_LINK_TOL: 2436 link_set_supervision_props(l_ptr, new_value); 2437 tipc_link_send_proto_msg(l_ptr, 2438 STATE_MSG, 0, 0, new_value, 0, 0); 2439 break; 2440 case TIPC_CMD_SET_LINK_PRI: 2441 l_ptr->priority = new_value; 2442 tipc_link_send_proto_msg(l_ptr, 2443 STATE_MSG, 0, 0, 0, new_value, 0); 2444 break; 2445 case TIPC_CMD_SET_LINK_WINDOW: 2446 tipc_link_set_queue_limits(l_ptr, new_value); 2447 break; 2448 default: 2449 res = -EINVAL; 2450 break; 2451 } 2452 tipc_node_unlock(node); 2453 return res; 2454 } 2455 2456 b_ptr = tipc_bearer_find(name); 2457 if (b_ptr) { 2458 switch (cmd) { 2459 case TIPC_CMD_SET_LINK_TOL: 2460 b_ptr->tolerance = new_value; 2461 break; 2462 case TIPC_CMD_SET_LINK_PRI: 2463 b_ptr->priority = new_value; 2464 break; 2465 case TIPC_CMD_SET_LINK_WINDOW: 2466 b_ptr->window = new_value; 2467 break; 2468 default: 2469 res = -EINVAL; 2470 break; 2471 } 2472 return res; 2473 } 2474 2475 m_ptr = tipc_media_find(name); 2476 if (!m_ptr) 2477 return -ENODEV; 2478 switch (cmd) { 2479 case TIPC_CMD_SET_LINK_TOL: 2480 m_ptr->tolerance = new_value; 2481 break; 2482 case TIPC_CMD_SET_LINK_PRI: 2483 m_ptr->priority = new_value; 2484 break; 2485 case TIPC_CMD_SET_LINK_WINDOW: 2486 m_ptr->window = new_value; 2487 break; 2488 default: 2489 res = -EINVAL; 2490 break; 2491 } 2492 return res; 2493 } 2494 2495 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2496 u16 cmd) 2497 { 2498 struct tipc_link_config *args; 2499 u32 new_value; 2500 int res; 2501 2502 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2503 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2504 2505 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2506 new_value = ntohl(args->value); 2507 2508 if (!link_value_is_valid(cmd, new_value)) 2509 return tipc_cfg_reply_error_string( 2510 "cannot change, value invalid"); 2511 2512 if (!strcmp(args->name, tipc_bclink_name)) { 2513 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2514 (tipc_bclink_set_queue_limits(new_value) == 0)) 2515 return tipc_cfg_reply_none(); 2516 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2517 " (cannot change setting on broadcast link)"); 2518 } 2519 2520 read_lock_bh(&tipc_net_lock); 2521 res = link_cmd_set_value(args->name, new_value, cmd); 2522 read_unlock_bh(&tipc_net_lock); 2523 if (res) 2524 return tipc_cfg_reply_error_string("cannot change link setting"); 2525 2526 return tipc_cfg_reply_none(); 2527 } 2528 2529 /** 2530 * link_reset_statistics - reset link statistics 2531 * @l_ptr: pointer to link 2532 */ 2533 static void link_reset_statistics(struct tipc_link *l_ptr) 2534 { 2535 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2536 l_ptr->stats.sent_info = l_ptr->next_out_no; 2537 l_ptr->stats.recv_info = l_ptr->next_in_no; 2538 } 2539 2540 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2541 { 2542 char *link_name; 2543 struct tipc_link *l_ptr; 2544 struct tipc_node *node; 2545 2546 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2547 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2548 2549 link_name = (char *)TLV_DATA(req_tlv_area); 2550 if (!strcmp(link_name, tipc_bclink_name)) { 2551 if (tipc_bclink_reset_stats()) 2552 return tipc_cfg_reply_error_string("link not found"); 2553 return tipc_cfg_reply_none(); 2554 } 2555 2556 read_lock_bh(&tipc_net_lock); 2557 l_ptr = link_find_link(link_name, &node); 2558 if (!l_ptr) { 2559 read_unlock_bh(&tipc_net_lock); 2560 return tipc_cfg_reply_error_string("link not found"); 2561 } 2562 2563 tipc_node_lock(node); 2564 link_reset_statistics(l_ptr); 2565 tipc_node_unlock(node); 2566 read_unlock_bh(&tipc_net_lock); 2567 return tipc_cfg_reply_none(); 2568 } 2569 2570 /** 2571 * percent - convert count to a percentage of total (rounding up or down) 2572 */ 2573 static u32 percent(u32 count, u32 total) 2574 { 2575 return (count * 100 + (total / 2)) / total; 2576 } 2577 2578 /** 2579 * tipc_link_stats - print link statistics 2580 * @name: link name 2581 * @buf: print buffer area 2582 * @buf_size: size of print buffer area 2583 * 2584 * Returns length of print buffer data string (or 0 if error) 2585 */ 2586 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2587 { 2588 struct tipc_link *l; 2589 struct tipc_stats *s; 2590 struct tipc_node *node; 2591 char *status; 2592 u32 profile_total = 0; 2593 int ret; 2594 2595 if (!strcmp(name, tipc_bclink_name)) 2596 return tipc_bclink_stats(buf, buf_size); 2597 2598 read_lock_bh(&tipc_net_lock); 2599 l = link_find_link(name, &node); 2600 if (!l) { 2601 read_unlock_bh(&tipc_net_lock); 2602 return 0; 2603 } 2604 tipc_node_lock(node); 2605 s = &l->stats; 2606 2607 if (tipc_link_is_active(l)) 2608 status = "ACTIVE"; 2609 else if (tipc_link_is_up(l)) 2610 status = "STANDBY"; 2611 else 2612 status = "DEFUNCT"; 2613 2614 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2615 " %s MTU:%u Priority:%u Tolerance:%u ms" 2616 " Window:%u packets\n", 2617 l->name, status, l->max_pkt, l->priority, 2618 l->tolerance, l->queue_limit[0]); 2619 2620 ret += tipc_snprintf(buf + ret, buf_size - ret, 2621 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2622 l->next_in_no - s->recv_info, s->recv_fragments, 2623 s->recv_fragmented, s->recv_bundles, 2624 s->recv_bundled); 2625 2626 ret += tipc_snprintf(buf + ret, buf_size - ret, 2627 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2628 l->next_out_no - s->sent_info, s->sent_fragments, 2629 s->sent_fragmented, s->sent_bundles, 2630 s->sent_bundled); 2631 2632 profile_total = s->msg_length_counts; 2633 if (!profile_total) 2634 profile_total = 1; 2635 2636 ret += tipc_snprintf(buf + ret, buf_size - ret, 2637 " TX profile sample:%u packets average:%u octets\n" 2638 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2639 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2640 s->msg_length_counts, 2641 s->msg_lengths_total / profile_total, 2642 percent(s->msg_length_profile[0], profile_total), 2643 percent(s->msg_length_profile[1], profile_total), 2644 percent(s->msg_length_profile[2], profile_total), 2645 percent(s->msg_length_profile[3], profile_total), 2646 percent(s->msg_length_profile[4], profile_total), 2647 percent(s->msg_length_profile[5], profile_total), 2648 percent(s->msg_length_profile[6], profile_total)); 2649 2650 ret += tipc_snprintf(buf + ret, buf_size - ret, 2651 " RX states:%u probes:%u naks:%u defs:%u" 2652 " dups:%u\n", s->recv_states, s->recv_probes, 2653 s->recv_nacks, s->deferred_recv, s->duplicates); 2654 2655 ret += tipc_snprintf(buf + ret, buf_size - ret, 2656 " TX states:%u probes:%u naks:%u acks:%u" 2657 " dups:%u\n", s->sent_states, s->sent_probes, 2658 s->sent_nacks, s->sent_acks, s->retransmitted); 2659 2660 ret += tipc_snprintf(buf + ret, buf_size - ret, 2661 " Congestion link:%u Send queue" 2662 " max:%u avg:%u\n", s->link_congs, 2663 s->max_queue_sz, s->queue_sz_counts ? 2664 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2665 2666 tipc_node_unlock(node); 2667 read_unlock_bh(&tipc_net_lock); 2668 return ret; 2669 } 2670 2671 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2672 { 2673 struct sk_buff *buf; 2674 struct tlv_desc *rep_tlv; 2675 int str_len; 2676 int pb_len; 2677 char *pb; 2678 2679 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2680 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2681 2682 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2683 if (!buf) 2684 return NULL; 2685 2686 rep_tlv = (struct tlv_desc *)buf->data; 2687 pb = TLV_DATA(rep_tlv); 2688 pb_len = ULTRA_STRING_MAX_LEN; 2689 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2690 pb, pb_len); 2691 if (!str_len) { 2692 kfree_skb(buf); 2693 return tipc_cfg_reply_error_string("link not found"); 2694 } 2695 str_len += 1; /* for "\0" */ 2696 skb_put(buf, TLV_SPACE(str_len)); 2697 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2698 2699 return buf; 2700 } 2701 2702 /** 2703 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2704 * @dest: network address of destination node 2705 * @selector: used to select from set of active links 2706 * 2707 * If no active link can be found, uses default maximum packet size. 2708 */ 2709 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2710 { 2711 struct tipc_node *n_ptr; 2712 struct tipc_link *l_ptr; 2713 u32 res = MAX_PKT_DEFAULT; 2714 2715 if (dest == tipc_own_addr) 2716 return MAX_MSG_SIZE; 2717 2718 read_lock_bh(&tipc_net_lock); 2719 n_ptr = tipc_node_find(dest); 2720 if (n_ptr) { 2721 tipc_node_lock(n_ptr); 2722 l_ptr = n_ptr->active_links[selector & 1]; 2723 if (l_ptr) 2724 res = l_ptr->max_pkt; 2725 tipc_node_unlock(n_ptr); 2726 } 2727 read_unlock_bh(&tipc_net_lock); 2728 return res; 2729 } 2730 2731 static void link_print(struct tipc_link *l_ptr, const char *str) 2732 { 2733 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2734 2735 if (link_working_unknown(l_ptr)) 2736 pr_cont(":WU\n"); 2737 else if (link_reset_reset(l_ptr)) 2738 pr_cont(":RR\n"); 2739 else if (link_reset_unknown(l_ptr)) 2740 pr_cont(":RU\n"); 2741 else if (link_working_working(l_ptr)) 2742 pr_cont(":WW\n"); 2743 else 2744 pr_cont("\n"); 2745 } 2746