xref: /openbmc/linux/net/tipc/link.c (revision 7fe2f639)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "name_distr.h"
41 #include "discover.h"
42 #include "config.h"
43 
44 
45 /*
46  * Out-of-range value for link session numbers
47  */
48 
49 #define INVALID_SESSION 0x10000
50 
51 /*
52  * Link state events:
53  */
54 
55 #define  STARTING_EVT    856384768	/* link processing trigger */
56 #define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
57 #define  TIMEOUT_EVT     560817u	/* link timer expired */
58 
59 /*
60  * The following two 'message types' is really just implementation
61  * data conveniently stored in the message header.
62  * They must not be considered part of the protocol
63  */
64 #define OPEN_MSG   0
65 #define CLOSED_MSG 1
66 
67 /*
68  * State value stored in 'exp_msg_count'
69  */
70 
71 #define START_CHANGEOVER 100000u
72 
73 /**
74  * struct link_name - deconstructed link name
75  * @addr_local: network address of node at this end
76  * @if_local: name of interface at this end
77  * @addr_peer: network address of node at far end
78  * @if_peer: name of interface at far end
79  */
80 
81 struct link_name {
82 	u32 addr_local;
83 	char if_local[TIPC_MAX_IF_NAME];
84 	u32 addr_peer;
85 	char if_peer[TIPC_MAX_IF_NAME];
86 };
87 
88 static void link_handle_out_of_seq_msg(struct link *l_ptr,
89 				       struct sk_buff *buf);
90 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
91 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
93 static int  link_send_sections_long(struct tipc_port *sender,
94 				    struct iovec const *msg_sect,
95 				    u32 num_sect, unsigned int total_len,
96 				    u32 destnode);
97 static void link_check_defragm_bufs(struct link *l_ptr);
98 static void link_state_event(struct link *l_ptr, u32 event);
99 static void link_reset_statistics(struct link *l_ptr);
100 static void link_print(struct link *l_ptr, const char *str);
101 static void link_start(struct link *l_ptr);
102 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
103 
104 /*
105  *  Simple link routines
106  */
107 
108 static unsigned int align(unsigned int i)
109 {
110 	return (i + 3) & ~3u;
111 }
112 
113 static void link_init_max_pkt(struct link *l_ptr)
114 {
115 	u32 max_pkt;
116 
117 	max_pkt = (l_ptr->b_ptr->mtu & ~3);
118 	if (max_pkt > MAX_MSG_SIZE)
119 		max_pkt = MAX_MSG_SIZE;
120 
121 	l_ptr->max_pkt_target = max_pkt;
122 	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
123 		l_ptr->max_pkt = l_ptr->max_pkt_target;
124 	else
125 		l_ptr->max_pkt = MAX_PKT_DEFAULT;
126 
127 	l_ptr->max_pkt_probes = 0;
128 }
129 
130 static u32 link_next_sent(struct link *l_ptr)
131 {
132 	if (l_ptr->next_out)
133 		return msg_seqno(buf_msg(l_ptr->next_out));
134 	return mod(l_ptr->next_out_no);
135 }
136 
137 static u32 link_last_sent(struct link *l_ptr)
138 {
139 	return mod(link_next_sent(l_ptr) - 1);
140 }
141 
142 /*
143  *  Simple non-static link routines (i.e. referenced outside this file)
144  */
145 
146 int tipc_link_is_up(struct link *l_ptr)
147 {
148 	if (!l_ptr)
149 		return 0;
150 	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
151 }
152 
153 int tipc_link_is_active(struct link *l_ptr)
154 {
155 	return	(l_ptr->owner->active_links[0] == l_ptr) ||
156 		(l_ptr->owner->active_links[1] == l_ptr);
157 }
158 
159 /**
160  * link_name_validate - validate & (optionally) deconstruct link name
161  * @name - ptr to link name string
162  * @name_parts - ptr to area for link name components (or NULL if not needed)
163  *
164  * Returns 1 if link name is valid, otherwise 0.
165  */
166 
167 static int link_name_validate(const char *name, struct link_name *name_parts)
168 {
169 	char name_copy[TIPC_MAX_LINK_NAME];
170 	char *addr_local;
171 	char *if_local;
172 	char *addr_peer;
173 	char *if_peer;
174 	char dummy;
175 	u32 z_local, c_local, n_local;
176 	u32 z_peer, c_peer, n_peer;
177 	u32 if_local_len;
178 	u32 if_peer_len;
179 
180 	/* copy link name & ensure length is OK */
181 
182 	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
183 	/* need above in case non-Posix strncpy() doesn't pad with nulls */
184 	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
185 	if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
186 		return 0;
187 
188 	/* ensure all component parts of link name are present */
189 
190 	addr_local = name_copy;
191 	if_local = strchr(addr_local, ':');
192 	if (if_local == NULL)
193 		return 0;
194 	*(if_local++) = 0;
195 	addr_peer = strchr(if_local, '-');
196 	if (addr_peer == NULL)
197 		return 0;
198 	*(addr_peer++) = 0;
199 	if_local_len = addr_peer - if_local;
200 	if_peer = strchr(addr_peer, ':');
201 	if (if_peer == NULL)
202 		return 0;
203 	*(if_peer++) = 0;
204 	if_peer_len = strlen(if_peer) + 1;
205 
206 	/* validate component parts of link name */
207 
208 	if ((sscanf(addr_local, "%u.%u.%u%c",
209 		    &z_local, &c_local, &n_local, &dummy) != 3) ||
210 	    (sscanf(addr_peer, "%u.%u.%u%c",
211 		    &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
212 	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
213 	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
214 	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
215 	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
216 	    (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
217 	    (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
218 		return 0;
219 
220 	/* return link name components, if necessary */
221 
222 	if (name_parts) {
223 		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
224 		strcpy(name_parts->if_local, if_local);
225 		name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
226 		strcpy(name_parts->if_peer, if_peer);
227 	}
228 	return 1;
229 }
230 
231 /**
232  * link_timeout - handle expiration of link timer
233  * @l_ptr: pointer to link
234  *
235  * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
236  * with tipc_link_delete().  (There is no risk that the node will be deleted by
237  * another thread because tipc_link_delete() always cancels the link timer before
238  * tipc_node_delete() is called.)
239  */
240 
241 static void link_timeout(struct link *l_ptr)
242 {
243 	tipc_node_lock(l_ptr->owner);
244 
245 	/* update counters used in statistical profiling of send traffic */
246 
247 	l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
248 	l_ptr->stats.queue_sz_counts++;
249 
250 	if (l_ptr->first_out) {
251 		struct tipc_msg *msg = buf_msg(l_ptr->first_out);
252 		u32 length = msg_size(msg);
253 
254 		if ((msg_user(msg) == MSG_FRAGMENTER) &&
255 		    (msg_type(msg) == FIRST_FRAGMENT)) {
256 			length = msg_size(msg_get_wrapped(msg));
257 		}
258 		if (length) {
259 			l_ptr->stats.msg_lengths_total += length;
260 			l_ptr->stats.msg_length_counts++;
261 			if (length <= 64)
262 				l_ptr->stats.msg_length_profile[0]++;
263 			else if (length <= 256)
264 				l_ptr->stats.msg_length_profile[1]++;
265 			else if (length <= 1024)
266 				l_ptr->stats.msg_length_profile[2]++;
267 			else if (length <= 4096)
268 				l_ptr->stats.msg_length_profile[3]++;
269 			else if (length <= 16384)
270 				l_ptr->stats.msg_length_profile[4]++;
271 			else if (length <= 32768)
272 				l_ptr->stats.msg_length_profile[5]++;
273 			else
274 				l_ptr->stats.msg_length_profile[6]++;
275 		}
276 	}
277 
278 	/* do all other link processing performed on a periodic basis */
279 
280 	link_check_defragm_bufs(l_ptr);
281 
282 	link_state_event(l_ptr, TIMEOUT_EVT);
283 
284 	if (l_ptr->next_out)
285 		tipc_link_push_queue(l_ptr);
286 
287 	tipc_node_unlock(l_ptr->owner);
288 }
289 
290 static void link_set_timer(struct link *l_ptr, u32 time)
291 {
292 	k_start_timer(&l_ptr->timer, time);
293 }
294 
295 /**
296  * tipc_link_create - create a new link
297  * @n_ptr: pointer to associated node
298  * @b_ptr: pointer to associated bearer
299  * @media_addr: media address to use when sending messages over link
300  *
301  * Returns pointer to link.
302  */
303 
304 struct link *tipc_link_create(struct tipc_node *n_ptr,
305 			      struct tipc_bearer *b_ptr,
306 			      const struct tipc_media_addr *media_addr)
307 {
308 	struct link *l_ptr;
309 	struct tipc_msg *msg;
310 	char *if_name;
311 	char addr_string[16];
312 	u32 peer = n_ptr->addr;
313 
314 	if (n_ptr->link_cnt >= 2) {
315 		tipc_addr_string_fill(addr_string, n_ptr->addr);
316 		err("Attempt to establish third link to %s\n", addr_string);
317 		return NULL;
318 	}
319 
320 	if (n_ptr->links[b_ptr->identity]) {
321 		tipc_addr_string_fill(addr_string, n_ptr->addr);
322 		err("Attempt to establish second link on <%s> to %s\n",
323 		    b_ptr->name, addr_string);
324 		return NULL;
325 	}
326 
327 	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
328 	if (!l_ptr) {
329 		warn("Link creation failed, no memory\n");
330 		return NULL;
331 	}
332 
333 	l_ptr->addr = peer;
334 	if_name = strchr(b_ptr->name, ':') + 1;
335 	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
336 		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
337 		tipc_node(tipc_own_addr),
338 		if_name,
339 		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
340 		/* note: peer i/f is appended to link name by reset/activate */
341 	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
342 	l_ptr->owner = n_ptr;
343 	l_ptr->checkpoint = 1;
344 	l_ptr->b_ptr = b_ptr;
345 	link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
346 	l_ptr->state = RESET_UNKNOWN;
347 
348 	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
349 	msg = l_ptr->pmsg;
350 	tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
351 	msg_set_size(msg, sizeof(l_ptr->proto_msg));
352 	msg_set_session(msg, (tipc_random & 0xffff));
353 	msg_set_bearer_id(msg, b_ptr->identity);
354 	strcpy((char *)msg_data(msg), if_name);
355 
356 	l_ptr->priority = b_ptr->priority;
357 	tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
358 
359 	link_init_max_pkt(l_ptr);
360 
361 	l_ptr->next_out_no = 1;
362 	INIT_LIST_HEAD(&l_ptr->waiting_ports);
363 
364 	link_reset_statistics(l_ptr);
365 
366 	tipc_node_attach_link(n_ptr, l_ptr);
367 
368 	k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
369 	list_add_tail(&l_ptr->link_list, &b_ptr->links);
370 	tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
371 
372 	return l_ptr;
373 }
374 
375 /**
376  * tipc_link_delete - delete a link
377  * @l_ptr: pointer to link
378  *
379  * Note: 'tipc_net_lock' is write_locked, bearer is locked.
380  * This routine must not grab the node lock until after link timer cancellation
381  * to avoid a potential deadlock situation.
382  */
383 
384 void tipc_link_delete(struct link *l_ptr)
385 {
386 	if (!l_ptr) {
387 		err("Attempt to delete non-existent link\n");
388 		return;
389 	}
390 
391 	k_cancel_timer(&l_ptr->timer);
392 
393 	tipc_node_lock(l_ptr->owner);
394 	tipc_link_reset(l_ptr);
395 	tipc_node_detach_link(l_ptr->owner, l_ptr);
396 	tipc_link_stop(l_ptr);
397 	list_del_init(&l_ptr->link_list);
398 	tipc_node_unlock(l_ptr->owner);
399 	k_term_timer(&l_ptr->timer);
400 	kfree(l_ptr);
401 }
402 
403 static void link_start(struct link *l_ptr)
404 {
405 	tipc_node_lock(l_ptr->owner);
406 	link_state_event(l_ptr, STARTING_EVT);
407 	tipc_node_unlock(l_ptr->owner);
408 }
409 
410 /**
411  * link_schedule_port - schedule port for deferred sending
412  * @l_ptr: pointer to link
413  * @origport: reference to sending port
414  * @sz: amount of data to be sent
415  *
416  * Schedules port for renewed sending of messages after link congestion
417  * has abated.
418  */
419 
420 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
421 {
422 	struct tipc_port *p_ptr;
423 
424 	spin_lock_bh(&tipc_port_list_lock);
425 	p_ptr = tipc_port_lock(origport);
426 	if (p_ptr) {
427 		if (!p_ptr->wakeup)
428 			goto exit;
429 		if (!list_empty(&p_ptr->wait_list))
430 			goto exit;
431 		p_ptr->congested = 1;
432 		p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
433 		list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
434 		l_ptr->stats.link_congs++;
435 exit:
436 		tipc_port_unlock(p_ptr);
437 	}
438 	spin_unlock_bh(&tipc_port_list_lock);
439 	return -ELINKCONG;
440 }
441 
442 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
443 {
444 	struct tipc_port *p_ptr;
445 	struct tipc_port *temp_p_ptr;
446 	int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
447 
448 	if (all)
449 		win = 100000;
450 	if (win <= 0)
451 		return;
452 	if (!spin_trylock_bh(&tipc_port_list_lock))
453 		return;
454 	if (link_congested(l_ptr))
455 		goto exit;
456 	list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
457 				 wait_list) {
458 		if (win <= 0)
459 			break;
460 		list_del_init(&p_ptr->wait_list);
461 		spin_lock_bh(p_ptr->lock);
462 		p_ptr->congested = 0;
463 		p_ptr->wakeup(p_ptr);
464 		win -= p_ptr->waiting_pkts;
465 		spin_unlock_bh(p_ptr->lock);
466 	}
467 
468 exit:
469 	spin_unlock_bh(&tipc_port_list_lock);
470 }
471 
472 /**
473  * link_release_outqueue - purge link's outbound message queue
474  * @l_ptr: pointer to link
475  */
476 
477 static void link_release_outqueue(struct link *l_ptr)
478 {
479 	struct sk_buff *buf = l_ptr->first_out;
480 	struct sk_buff *next;
481 
482 	while (buf) {
483 		next = buf->next;
484 		buf_discard(buf);
485 		buf = next;
486 	}
487 	l_ptr->first_out = NULL;
488 	l_ptr->out_queue_size = 0;
489 }
490 
491 /**
492  * tipc_link_reset_fragments - purge link's inbound message fragments queue
493  * @l_ptr: pointer to link
494  */
495 
496 void tipc_link_reset_fragments(struct link *l_ptr)
497 {
498 	struct sk_buff *buf = l_ptr->defragm_buf;
499 	struct sk_buff *next;
500 
501 	while (buf) {
502 		next = buf->next;
503 		buf_discard(buf);
504 		buf = next;
505 	}
506 	l_ptr->defragm_buf = NULL;
507 }
508 
509 /**
510  * tipc_link_stop - purge all inbound and outbound messages associated with link
511  * @l_ptr: pointer to link
512  */
513 
514 void tipc_link_stop(struct link *l_ptr)
515 {
516 	struct sk_buff *buf;
517 	struct sk_buff *next;
518 
519 	buf = l_ptr->oldest_deferred_in;
520 	while (buf) {
521 		next = buf->next;
522 		buf_discard(buf);
523 		buf = next;
524 	}
525 
526 	buf = l_ptr->first_out;
527 	while (buf) {
528 		next = buf->next;
529 		buf_discard(buf);
530 		buf = next;
531 	}
532 
533 	tipc_link_reset_fragments(l_ptr);
534 
535 	buf_discard(l_ptr->proto_msg_queue);
536 	l_ptr->proto_msg_queue = NULL;
537 }
538 
539 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
540 #define link_send_event(fcn, l_ptr, up) do { } while (0)
541 
542 void tipc_link_reset(struct link *l_ptr)
543 {
544 	struct sk_buff *buf;
545 	u32 prev_state = l_ptr->state;
546 	u32 checkpoint = l_ptr->next_in_no;
547 	int was_active_link = tipc_link_is_active(l_ptr);
548 
549 	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
550 
551 	/* Link is down, accept any session */
552 	l_ptr->peer_session = INVALID_SESSION;
553 
554 	/* Prepare for max packet size negotiation */
555 	link_init_max_pkt(l_ptr);
556 
557 	l_ptr->state = RESET_UNKNOWN;
558 
559 	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
560 		return;
561 
562 	tipc_node_link_down(l_ptr->owner, l_ptr);
563 	tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
564 
565 	if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
566 	    l_ptr->owner->permit_changeover) {
567 		l_ptr->reset_checkpoint = checkpoint;
568 		l_ptr->exp_msg_count = START_CHANGEOVER;
569 	}
570 
571 	/* Clean up all queues: */
572 
573 	link_release_outqueue(l_ptr);
574 	buf_discard(l_ptr->proto_msg_queue);
575 	l_ptr->proto_msg_queue = NULL;
576 	buf = l_ptr->oldest_deferred_in;
577 	while (buf) {
578 		struct sk_buff *next = buf->next;
579 		buf_discard(buf);
580 		buf = next;
581 	}
582 	if (!list_empty(&l_ptr->waiting_ports))
583 		tipc_link_wakeup_ports(l_ptr, 1);
584 
585 	l_ptr->retransm_queue_head = 0;
586 	l_ptr->retransm_queue_size = 0;
587 	l_ptr->last_out = NULL;
588 	l_ptr->first_out = NULL;
589 	l_ptr->next_out = NULL;
590 	l_ptr->unacked_window = 0;
591 	l_ptr->checkpoint = 1;
592 	l_ptr->next_out_no = 1;
593 	l_ptr->deferred_inqueue_sz = 0;
594 	l_ptr->oldest_deferred_in = NULL;
595 	l_ptr->newest_deferred_in = NULL;
596 	l_ptr->fsm_msg_cnt = 0;
597 	l_ptr->stale_count = 0;
598 	link_reset_statistics(l_ptr);
599 
600 	link_send_event(tipc_cfg_link_event, l_ptr, 0);
601 	if (!in_own_cluster(l_ptr->addr))
602 		link_send_event(tipc_disc_link_event, l_ptr, 0);
603 }
604 
605 
606 static void link_activate(struct link *l_ptr)
607 {
608 	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
609 	tipc_node_link_up(l_ptr->owner, l_ptr);
610 	tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
611 	link_send_event(tipc_cfg_link_event, l_ptr, 1);
612 	if (!in_own_cluster(l_ptr->addr))
613 		link_send_event(tipc_disc_link_event, l_ptr, 1);
614 }
615 
616 /**
617  * link_state_event - link finite state machine
618  * @l_ptr: pointer to link
619  * @event: state machine event to process
620  */
621 
622 static void link_state_event(struct link *l_ptr, unsigned event)
623 {
624 	struct link *other;
625 	u32 cont_intv = l_ptr->continuity_interval;
626 
627 	if (!l_ptr->started && (event != STARTING_EVT))
628 		return;		/* Not yet. */
629 
630 	if (link_blocked(l_ptr)) {
631 		if (event == TIMEOUT_EVT)
632 			link_set_timer(l_ptr, cont_intv);
633 		return;	  /* Changeover going on */
634 	}
635 
636 	switch (l_ptr->state) {
637 	case WORKING_WORKING:
638 		switch (event) {
639 		case TRAFFIC_MSG_EVT:
640 		case ACTIVATE_MSG:
641 			break;
642 		case TIMEOUT_EVT:
643 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
644 				l_ptr->checkpoint = l_ptr->next_in_no;
645 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
646 					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
647 								 0, 0, 0, 0, 0);
648 					l_ptr->fsm_msg_cnt++;
649 				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
650 					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
651 								 1, 0, 0, 0, 0);
652 					l_ptr->fsm_msg_cnt++;
653 				}
654 				link_set_timer(l_ptr, cont_intv);
655 				break;
656 			}
657 			l_ptr->state = WORKING_UNKNOWN;
658 			l_ptr->fsm_msg_cnt = 0;
659 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
660 			l_ptr->fsm_msg_cnt++;
661 			link_set_timer(l_ptr, cont_intv / 4);
662 			break;
663 		case RESET_MSG:
664 			info("Resetting link <%s>, requested by peer\n",
665 			     l_ptr->name);
666 			tipc_link_reset(l_ptr);
667 			l_ptr->state = RESET_RESET;
668 			l_ptr->fsm_msg_cnt = 0;
669 			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
670 			l_ptr->fsm_msg_cnt++;
671 			link_set_timer(l_ptr, cont_intv);
672 			break;
673 		default:
674 			err("Unknown link event %u in WW state\n", event);
675 		}
676 		break;
677 	case WORKING_UNKNOWN:
678 		switch (event) {
679 		case TRAFFIC_MSG_EVT:
680 		case ACTIVATE_MSG:
681 			l_ptr->state = WORKING_WORKING;
682 			l_ptr->fsm_msg_cnt = 0;
683 			link_set_timer(l_ptr, cont_intv);
684 			break;
685 		case RESET_MSG:
686 			info("Resetting link <%s>, requested by peer "
687 			     "while probing\n", l_ptr->name);
688 			tipc_link_reset(l_ptr);
689 			l_ptr->state = RESET_RESET;
690 			l_ptr->fsm_msg_cnt = 0;
691 			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
692 			l_ptr->fsm_msg_cnt++;
693 			link_set_timer(l_ptr, cont_intv);
694 			break;
695 		case TIMEOUT_EVT:
696 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
697 				l_ptr->state = WORKING_WORKING;
698 				l_ptr->fsm_msg_cnt = 0;
699 				l_ptr->checkpoint = l_ptr->next_in_no;
700 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
701 					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
702 								 0, 0, 0, 0, 0);
703 					l_ptr->fsm_msg_cnt++;
704 				}
705 				link_set_timer(l_ptr, cont_intv);
706 			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
707 				tipc_link_send_proto_msg(l_ptr, STATE_MSG,
708 							 1, 0, 0, 0, 0);
709 				l_ptr->fsm_msg_cnt++;
710 				link_set_timer(l_ptr, cont_intv / 4);
711 			} else {	/* Link has failed */
712 				warn("Resetting link <%s>, peer not responding\n",
713 				     l_ptr->name);
714 				tipc_link_reset(l_ptr);
715 				l_ptr->state = RESET_UNKNOWN;
716 				l_ptr->fsm_msg_cnt = 0;
717 				tipc_link_send_proto_msg(l_ptr, RESET_MSG,
718 							 0, 0, 0, 0, 0);
719 				l_ptr->fsm_msg_cnt++;
720 				link_set_timer(l_ptr, cont_intv);
721 			}
722 			break;
723 		default:
724 			err("Unknown link event %u in WU state\n", event);
725 		}
726 		break;
727 	case RESET_UNKNOWN:
728 		switch (event) {
729 		case TRAFFIC_MSG_EVT:
730 			break;
731 		case ACTIVATE_MSG:
732 			other = l_ptr->owner->active_links[0];
733 			if (other && link_working_unknown(other))
734 				break;
735 			l_ptr->state = WORKING_WORKING;
736 			l_ptr->fsm_msg_cnt = 0;
737 			link_activate(l_ptr);
738 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
739 			l_ptr->fsm_msg_cnt++;
740 			link_set_timer(l_ptr, cont_intv);
741 			break;
742 		case RESET_MSG:
743 			l_ptr->state = RESET_RESET;
744 			l_ptr->fsm_msg_cnt = 0;
745 			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
746 			l_ptr->fsm_msg_cnt++;
747 			link_set_timer(l_ptr, cont_intv);
748 			break;
749 		case STARTING_EVT:
750 			l_ptr->started = 1;
751 			/* fall through */
752 		case TIMEOUT_EVT:
753 			tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
754 			l_ptr->fsm_msg_cnt++;
755 			link_set_timer(l_ptr, cont_intv);
756 			break;
757 		default:
758 			err("Unknown link event %u in RU state\n", event);
759 		}
760 		break;
761 	case RESET_RESET:
762 		switch (event) {
763 		case TRAFFIC_MSG_EVT:
764 		case ACTIVATE_MSG:
765 			other = l_ptr->owner->active_links[0];
766 			if (other && link_working_unknown(other))
767 				break;
768 			l_ptr->state = WORKING_WORKING;
769 			l_ptr->fsm_msg_cnt = 0;
770 			link_activate(l_ptr);
771 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
772 			l_ptr->fsm_msg_cnt++;
773 			link_set_timer(l_ptr, cont_intv);
774 			break;
775 		case RESET_MSG:
776 			break;
777 		case TIMEOUT_EVT:
778 			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
779 			l_ptr->fsm_msg_cnt++;
780 			link_set_timer(l_ptr, cont_intv);
781 			break;
782 		default:
783 			err("Unknown link event %u in RR state\n", event);
784 		}
785 		break;
786 	default:
787 		err("Unknown link state %u/%u\n", l_ptr->state, event);
788 	}
789 }
790 
791 /*
792  * link_bundle_buf(): Append contents of a buffer to
793  * the tail of an existing one.
794  */
795 
796 static int link_bundle_buf(struct link *l_ptr,
797 			   struct sk_buff *bundler,
798 			   struct sk_buff *buf)
799 {
800 	struct tipc_msg *bundler_msg = buf_msg(bundler);
801 	struct tipc_msg *msg = buf_msg(buf);
802 	u32 size = msg_size(msg);
803 	u32 bundle_size = msg_size(bundler_msg);
804 	u32 to_pos = align(bundle_size);
805 	u32 pad = to_pos - bundle_size;
806 
807 	if (msg_user(bundler_msg) != MSG_BUNDLER)
808 		return 0;
809 	if (msg_type(bundler_msg) != OPEN_MSG)
810 		return 0;
811 	if (skb_tailroom(bundler) < (pad + size))
812 		return 0;
813 	if (l_ptr->max_pkt < (to_pos + size))
814 		return 0;
815 
816 	skb_put(bundler, pad + size);
817 	skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
818 	msg_set_size(bundler_msg, to_pos + size);
819 	msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
820 	buf_discard(buf);
821 	l_ptr->stats.sent_bundled++;
822 	return 1;
823 }
824 
825 static void link_add_to_outqueue(struct link *l_ptr,
826 				 struct sk_buff *buf,
827 				 struct tipc_msg *msg)
828 {
829 	u32 ack = mod(l_ptr->next_in_no - 1);
830 	u32 seqno = mod(l_ptr->next_out_no++);
831 
832 	msg_set_word(msg, 2, ((ack << 16) | seqno));
833 	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
834 	buf->next = NULL;
835 	if (l_ptr->first_out) {
836 		l_ptr->last_out->next = buf;
837 		l_ptr->last_out = buf;
838 	} else
839 		l_ptr->first_out = l_ptr->last_out = buf;
840 
841 	l_ptr->out_queue_size++;
842 	if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
843 		l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
844 }
845 
846 static void link_add_chain_to_outqueue(struct link *l_ptr,
847 				       struct sk_buff *buf_chain,
848 				       u32 long_msgno)
849 {
850 	struct sk_buff *buf;
851 	struct tipc_msg *msg;
852 
853 	if (!l_ptr->next_out)
854 		l_ptr->next_out = buf_chain;
855 	while (buf_chain) {
856 		buf = buf_chain;
857 		buf_chain = buf_chain->next;
858 
859 		msg = buf_msg(buf);
860 		msg_set_long_msgno(msg, long_msgno);
861 		link_add_to_outqueue(l_ptr, buf, msg);
862 	}
863 }
864 
865 /*
866  * tipc_link_send_buf() is the 'full path' for messages, called from
867  * inside TIPC when the 'fast path' in tipc_send_buf
868  * has failed, and from link_send()
869  */
870 
871 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
872 {
873 	struct tipc_msg *msg = buf_msg(buf);
874 	u32 size = msg_size(msg);
875 	u32 dsz = msg_data_sz(msg);
876 	u32 queue_size = l_ptr->out_queue_size;
877 	u32 imp = tipc_msg_tot_importance(msg);
878 	u32 queue_limit = l_ptr->queue_limit[imp];
879 	u32 max_packet = l_ptr->max_pkt;
880 
881 	msg_set_prevnode(msg, tipc_own_addr);	/* If routed message */
882 
883 	/* Match msg importance against queue limits: */
884 
885 	if (unlikely(queue_size >= queue_limit)) {
886 		if (imp <= TIPC_CRITICAL_IMPORTANCE) {
887 			link_schedule_port(l_ptr, msg_origport(msg), size);
888 			buf_discard(buf);
889 			return -ELINKCONG;
890 		}
891 		buf_discard(buf);
892 		if (imp > CONN_MANAGER) {
893 			warn("Resetting link <%s>, send queue full", l_ptr->name);
894 			tipc_link_reset(l_ptr);
895 		}
896 		return dsz;
897 	}
898 
899 	/* Fragmentation needed ? */
900 
901 	if (size > max_packet)
902 		return link_send_long_buf(l_ptr, buf);
903 
904 	/* Packet can be queued or sent: */
905 
906 	if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
907 		   !link_congested(l_ptr))) {
908 		link_add_to_outqueue(l_ptr, buf, msg);
909 
910 		if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
911 			l_ptr->unacked_window = 0;
912 		} else {
913 			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
914 			l_ptr->stats.bearer_congs++;
915 			l_ptr->next_out = buf;
916 		}
917 		return dsz;
918 	}
919 	/* Congestion: can message be bundled ?: */
920 
921 	if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
922 	    (msg_user(msg) != MSG_FRAGMENTER)) {
923 
924 		/* Try adding message to an existing bundle */
925 
926 		if (l_ptr->next_out &&
927 		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
928 			tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
929 			return dsz;
930 		}
931 
932 		/* Try creating a new bundle */
933 
934 		if (size <= max_packet * 2 / 3) {
935 			struct sk_buff *bundler = tipc_buf_acquire(max_packet);
936 			struct tipc_msg bundler_hdr;
937 
938 			if (bundler) {
939 				tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
940 					 INT_H_SIZE, l_ptr->addr);
941 				skb_copy_to_linear_data(bundler, &bundler_hdr,
942 							INT_H_SIZE);
943 				skb_trim(bundler, INT_H_SIZE);
944 				link_bundle_buf(l_ptr, bundler, buf);
945 				buf = bundler;
946 				msg = buf_msg(buf);
947 				l_ptr->stats.sent_bundles++;
948 			}
949 		}
950 	}
951 	if (!l_ptr->next_out)
952 		l_ptr->next_out = buf;
953 	link_add_to_outqueue(l_ptr, buf, msg);
954 	tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
955 	return dsz;
956 }
957 
958 /*
959  * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
960  * not been selected yet, and the the owner node is not locked
961  * Called by TIPC internal users, e.g. the name distributor
962  */
963 
964 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
965 {
966 	struct link *l_ptr;
967 	struct tipc_node *n_ptr;
968 	int res = -ELINKCONG;
969 
970 	read_lock_bh(&tipc_net_lock);
971 	n_ptr = tipc_node_find(dest);
972 	if (n_ptr) {
973 		tipc_node_lock(n_ptr);
974 		l_ptr = n_ptr->active_links[selector & 1];
975 		if (l_ptr)
976 			res = tipc_link_send_buf(l_ptr, buf);
977 		else
978 			buf_discard(buf);
979 		tipc_node_unlock(n_ptr);
980 	} else {
981 		buf_discard(buf);
982 	}
983 	read_unlock_bh(&tipc_net_lock);
984 	return res;
985 }
986 
987 /*
988  * link_send_buf_fast: Entry for data messages where the
989  * destination link is known and the header is complete,
990  * inclusive total message length. Very time critical.
991  * Link is locked. Returns user data length.
992  */
993 
994 static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
995 			      u32 *used_max_pkt)
996 {
997 	struct tipc_msg *msg = buf_msg(buf);
998 	int res = msg_data_sz(msg);
999 
1000 	if (likely(!link_congested(l_ptr))) {
1001 		if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1002 			if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1003 				link_add_to_outqueue(l_ptr, buf, msg);
1004 				if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1005 							    &l_ptr->media_addr))) {
1006 					l_ptr->unacked_window = 0;
1007 					return res;
1008 				}
1009 				tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1010 				l_ptr->stats.bearer_congs++;
1011 				l_ptr->next_out = buf;
1012 				return res;
1013 			}
1014 		} else
1015 			*used_max_pkt = l_ptr->max_pkt;
1016 	}
1017 	return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
1018 }
1019 
1020 /*
1021  * tipc_send_buf_fast: Entry for data messages where the
1022  * destination node is known and the header is complete,
1023  * inclusive total message length.
1024  * Returns user data length.
1025  */
1026 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1027 {
1028 	struct link *l_ptr;
1029 	struct tipc_node *n_ptr;
1030 	int res;
1031 	u32 selector = msg_origport(buf_msg(buf)) & 1;
1032 	u32 dummy;
1033 
1034 	if (destnode == tipc_own_addr)
1035 		return tipc_port_recv_msg(buf);
1036 
1037 	read_lock_bh(&tipc_net_lock);
1038 	n_ptr = tipc_node_find(destnode);
1039 	if (likely(n_ptr)) {
1040 		tipc_node_lock(n_ptr);
1041 		l_ptr = n_ptr->active_links[selector];
1042 		if (likely(l_ptr)) {
1043 			res = link_send_buf_fast(l_ptr, buf, &dummy);
1044 			tipc_node_unlock(n_ptr);
1045 			read_unlock_bh(&tipc_net_lock);
1046 			return res;
1047 		}
1048 		tipc_node_unlock(n_ptr);
1049 	}
1050 	read_unlock_bh(&tipc_net_lock);
1051 	res = msg_data_sz(buf_msg(buf));
1052 	tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1053 	return res;
1054 }
1055 
1056 
1057 /*
1058  * tipc_link_send_sections_fast: Entry for messages where the
1059  * destination processor is known and the header is complete,
1060  * except for total message length.
1061  * Returns user data length or errno.
1062  */
1063 int tipc_link_send_sections_fast(struct tipc_port *sender,
1064 				 struct iovec const *msg_sect,
1065 				 const u32 num_sect,
1066 				 unsigned int total_len,
1067 				 u32 destaddr)
1068 {
1069 	struct tipc_msg *hdr = &sender->phdr;
1070 	struct link *l_ptr;
1071 	struct sk_buff *buf;
1072 	struct tipc_node *node;
1073 	int res;
1074 	u32 selector = msg_origport(hdr) & 1;
1075 
1076 again:
1077 	/*
1078 	 * Try building message using port's max_pkt hint.
1079 	 * (Must not hold any locks while building message.)
1080 	 */
1081 
1082 	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1083 			     sender->max_pkt, !sender->user_port, &buf);
1084 
1085 	read_lock_bh(&tipc_net_lock);
1086 	node = tipc_node_find(destaddr);
1087 	if (likely(node)) {
1088 		tipc_node_lock(node);
1089 		l_ptr = node->active_links[selector];
1090 		if (likely(l_ptr)) {
1091 			if (likely(buf)) {
1092 				res = link_send_buf_fast(l_ptr, buf,
1093 							 &sender->max_pkt);
1094 exit:
1095 				tipc_node_unlock(node);
1096 				read_unlock_bh(&tipc_net_lock);
1097 				return res;
1098 			}
1099 
1100 			/* Exit if build request was invalid */
1101 
1102 			if (unlikely(res < 0))
1103 				goto exit;
1104 
1105 			/* Exit if link (or bearer) is congested */
1106 
1107 			if (link_congested(l_ptr) ||
1108 			    !list_empty(&l_ptr->b_ptr->cong_links)) {
1109 				res = link_schedule_port(l_ptr,
1110 							 sender->ref, res);
1111 				goto exit;
1112 			}
1113 
1114 			/*
1115 			 * Message size exceeds max_pkt hint; update hint,
1116 			 * then re-try fast path or fragment the message
1117 			 */
1118 
1119 			sender->max_pkt = l_ptr->max_pkt;
1120 			tipc_node_unlock(node);
1121 			read_unlock_bh(&tipc_net_lock);
1122 
1123 
1124 			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1125 				goto again;
1126 
1127 			return link_send_sections_long(sender, msg_sect,
1128 						       num_sect, total_len,
1129 						       destaddr);
1130 		}
1131 		tipc_node_unlock(node);
1132 	}
1133 	read_unlock_bh(&tipc_net_lock);
1134 
1135 	/* Couldn't find a link to the destination node */
1136 
1137 	if (buf)
1138 		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1139 	if (res >= 0)
1140 		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1141 						 total_len, TIPC_ERR_NO_NODE);
1142 	return res;
1143 }
1144 
1145 /*
1146  * link_send_sections_long(): Entry for long messages where the
1147  * destination node is known and the header is complete,
1148  * inclusive total message length.
1149  * Link and bearer congestion status have been checked to be ok,
1150  * and are ignored if they change.
1151  *
1152  * Note that fragments do not use the full link MTU so that they won't have
1153  * to undergo refragmentation if link changeover causes them to be sent
1154  * over another link with an additional tunnel header added as prefix.
1155  * (Refragmentation will still occur if the other link has a smaller MTU.)
1156  *
1157  * Returns user data length or errno.
1158  */
1159 static int link_send_sections_long(struct tipc_port *sender,
1160 				   struct iovec const *msg_sect,
1161 				   u32 num_sect,
1162 				   unsigned int total_len,
1163 				   u32 destaddr)
1164 {
1165 	struct link *l_ptr;
1166 	struct tipc_node *node;
1167 	struct tipc_msg *hdr = &sender->phdr;
1168 	u32 dsz = total_len;
1169 	u32 max_pkt, fragm_sz, rest;
1170 	struct tipc_msg fragm_hdr;
1171 	struct sk_buff *buf, *buf_chain, *prev;
1172 	u32 fragm_crs, fragm_rest, hsz, sect_rest;
1173 	const unchar *sect_crs;
1174 	int curr_sect;
1175 	u32 fragm_no;
1176 
1177 again:
1178 	fragm_no = 1;
1179 	max_pkt = sender->max_pkt - INT_H_SIZE;
1180 		/* leave room for tunnel header in case of link changeover */
1181 	fragm_sz = max_pkt - INT_H_SIZE;
1182 		/* leave room for fragmentation header in each fragment */
1183 	rest = dsz;
1184 	fragm_crs = 0;
1185 	fragm_rest = 0;
1186 	sect_rest = 0;
1187 	sect_crs = NULL;
1188 	curr_sect = -1;
1189 
1190 	/* Prepare reusable fragment header: */
1191 
1192 	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1193 		 INT_H_SIZE, msg_destnode(hdr));
1194 	msg_set_size(&fragm_hdr, max_pkt);
1195 	msg_set_fragm_no(&fragm_hdr, 1);
1196 
1197 	/* Prepare header of first fragment: */
1198 
1199 	buf_chain = buf = tipc_buf_acquire(max_pkt);
1200 	if (!buf)
1201 		return -ENOMEM;
1202 	buf->next = NULL;
1203 	skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1204 	hsz = msg_hdr_sz(hdr);
1205 	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1206 
1207 	/* Chop up message: */
1208 
1209 	fragm_crs = INT_H_SIZE + hsz;
1210 	fragm_rest = fragm_sz - hsz;
1211 
1212 	do {		/* For all sections */
1213 		u32 sz;
1214 
1215 		if (!sect_rest) {
1216 			sect_rest = msg_sect[++curr_sect].iov_len;
1217 			sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1218 		}
1219 
1220 		if (sect_rest < fragm_rest)
1221 			sz = sect_rest;
1222 		else
1223 			sz = fragm_rest;
1224 
1225 		if (likely(!sender->user_port)) {
1226 			if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1227 error:
1228 				for (; buf_chain; buf_chain = buf) {
1229 					buf = buf_chain->next;
1230 					buf_discard(buf_chain);
1231 				}
1232 				return -EFAULT;
1233 			}
1234 		} else
1235 			skb_copy_to_linear_data_offset(buf, fragm_crs,
1236 						       sect_crs, sz);
1237 		sect_crs += sz;
1238 		sect_rest -= sz;
1239 		fragm_crs += sz;
1240 		fragm_rest -= sz;
1241 		rest -= sz;
1242 
1243 		if (!fragm_rest && rest) {
1244 
1245 			/* Initiate new fragment: */
1246 			if (rest <= fragm_sz) {
1247 				fragm_sz = rest;
1248 				msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1249 			} else {
1250 				msg_set_type(&fragm_hdr, FRAGMENT);
1251 			}
1252 			msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1253 			msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1254 			prev = buf;
1255 			buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1256 			if (!buf)
1257 				goto error;
1258 
1259 			buf->next = NULL;
1260 			prev->next = buf;
1261 			skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1262 			fragm_crs = INT_H_SIZE;
1263 			fragm_rest = fragm_sz;
1264 		}
1265 	} while (rest > 0);
1266 
1267 	/*
1268 	 * Now we have a buffer chain. Select a link and check
1269 	 * that packet size is still OK
1270 	 */
1271 	node = tipc_node_find(destaddr);
1272 	if (likely(node)) {
1273 		tipc_node_lock(node);
1274 		l_ptr = node->active_links[sender->ref & 1];
1275 		if (!l_ptr) {
1276 			tipc_node_unlock(node);
1277 			goto reject;
1278 		}
1279 		if (l_ptr->max_pkt < max_pkt) {
1280 			sender->max_pkt = l_ptr->max_pkt;
1281 			tipc_node_unlock(node);
1282 			for (; buf_chain; buf_chain = buf) {
1283 				buf = buf_chain->next;
1284 				buf_discard(buf_chain);
1285 			}
1286 			goto again;
1287 		}
1288 	} else {
1289 reject:
1290 		for (; buf_chain; buf_chain = buf) {
1291 			buf = buf_chain->next;
1292 			buf_discard(buf_chain);
1293 		}
1294 		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1295 						 total_len, TIPC_ERR_NO_NODE);
1296 	}
1297 
1298 	/* Append chain of fragments to send queue & send them */
1299 
1300 	l_ptr->long_msg_seq_no++;
1301 	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1302 	l_ptr->stats.sent_fragments += fragm_no;
1303 	l_ptr->stats.sent_fragmented++;
1304 	tipc_link_push_queue(l_ptr);
1305 	tipc_node_unlock(node);
1306 	return dsz;
1307 }
1308 
1309 /*
1310  * tipc_link_push_packet: Push one unsent packet to the media
1311  */
1312 u32 tipc_link_push_packet(struct link *l_ptr)
1313 {
1314 	struct sk_buff *buf = l_ptr->first_out;
1315 	u32 r_q_size = l_ptr->retransm_queue_size;
1316 	u32 r_q_head = l_ptr->retransm_queue_head;
1317 
1318 	/* Step to position where retransmission failed, if any,    */
1319 	/* consider that buffers may have been released in meantime */
1320 
1321 	if (r_q_size && buf) {
1322 		u32 last = lesser(mod(r_q_head + r_q_size),
1323 				  link_last_sent(l_ptr));
1324 		u32 first = msg_seqno(buf_msg(buf));
1325 
1326 		while (buf && less(first, r_q_head)) {
1327 			first = mod(first + 1);
1328 			buf = buf->next;
1329 		}
1330 		l_ptr->retransm_queue_head = r_q_head = first;
1331 		l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1332 	}
1333 
1334 	/* Continue retransmission now, if there is anything: */
1335 
1336 	if (r_q_size && buf) {
1337 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1338 		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1339 		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1340 			l_ptr->retransm_queue_head = mod(++r_q_head);
1341 			l_ptr->retransm_queue_size = --r_q_size;
1342 			l_ptr->stats.retransmitted++;
1343 			return 0;
1344 		} else {
1345 			l_ptr->stats.bearer_congs++;
1346 			return PUSH_FAILED;
1347 		}
1348 	}
1349 
1350 	/* Send deferred protocol message, if any: */
1351 
1352 	buf = l_ptr->proto_msg_queue;
1353 	if (buf) {
1354 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1355 		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1356 		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1357 			l_ptr->unacked_window = 0;
1358 			buf_discard(buf);
1359 			l_ptr->proto_msg_queue = NULL;
1360 			return 0;
1361 		} else {
1362 			l_ptr->stats.bearer_congs++;
1363 			return PUSH_FAILED;
1364 		}
1365 	}
1366 
1367 	/* Send one deferred data message, if send window not full: */
1368 
1369 	buf = l_ptr->next_out;
1370 	if (buf) {
1371 		struct tipc_msg *msg = buf_msg(buf);
1372 		u32 next = msg_seqno(msg);
1373 		u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1374 
1375 		if (mod(next - first) < l_ptr->queue_limit[0]) {
1376 			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1377 			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1378 			if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1379 				if (msg_user(msg) == MSG_BUNDLER)
1380 					msg_set_type(msg, CLOSED_MSG);
1381 				l_ptr->next_out = buf->next;
1382 				return 0;
1383 			} else {
1384 				l_ptr->stats.bearer_congs++;
1385 				return PUSH_FAILED;
1386 			}
1387 		}
1388 	}
1389 	return PUSH_FINISHED;
1390 }
1391 
1392 /*
1393  * push_queue(): push out the unsent messages of a link where
1394  *               congestion has abated. Node is locked
1395  */
1396 void tipc_link_push_queue(struct link *l_ptr)
1397 {
1398 	u32 res;
1399 
1400 	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1401 		return;
1402 
1403 	do {
1404 		res = tipc_link_push_packet(l_ptr);
1405 	} while (!res);
1406 
1407 	if (res == PUSH_FAILED)
1408 		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1409 }
1410 
1411 static void link_reset_all(unsigned long addr)
1412 {
1413 	struct tipc_node *n_ptr;
1414 	char addr_string[16];
1415 	u32 i;
1416 
1417 	read_lock_bh(&tipc_net_lock);
1418 	n_ptr = tipc_node_find((u32)addr);
1419 	if (!n_ptr) {
1420 		read_unlock_bh(&tipc_net_lock);
1421 		return;	/* node no longer exists */
1422 	}
1423 
1424 	tipc_node_lock(n_ptr);
1425 
1426 	warn("Resetting all links to %s\n",
1427 	     tipc_addr_string_fill(addr_string, n_ptr->addr));
1428 
1429 	for (i = 0; i < MAX_BEARERS; i++) {
1430 		if (n_ptr->links[i]) {
1431 			link_print(n_ptr->links[i], "Resetting link\n");
1432 			tipc_link_reset(n_ptr->links[i]);
1433 		}
1434 	}
1435 
1436 	tipc_node_unlock(n_ptr);
1437 	read_unlock_bh(&tipc_net_lock);
1438 }
1439 
1440 static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1441 {
1442 	struct tipc_msg *msg = buf_msg(buf);
1443 
1444 	warn("Retransmission failure on link <%s>\n", l_ptr->name);
1445 
1446 	if (l_ptr->addr) {
1447 
1448 		/* Handle failure on standard link */
1449 
1450 		link_print(l_ptr, "Resetting link\n");
1451 		tipc_link_reset(l_ptr);
1452 
1453 	} else {
1454 
1455 		/* Handle failure on broadcast link */
1456 
1457 		struct tipc_node *n_ptr;
1458 		char addr_string[16];
1459 
1460 		info("Msg seq number: %u,  ", msg_seqno(msg));
1461 		info("Outstanding acks: %lu\n",
1462 		     (unsigned long) TIPC_SKB_CB(buf)->handle);
1463 
1464 		n_ptr = tipc_bclink_retransmit_to();
1465 		tipc_node_lock(n_ptr);
1466 
1467 		tipc_addr_string_fill(addr_string, n_ptr->addr);
1468 		info("Multicast link info for %s\n", addr_string);
1469 		info("Supported: %d,  ", n_ptr->bclink.supported);
1470 		info("Acked: %u\n", n_ptr->bclink.acked);
1471 		info("Last in: %u,  ", n_ptr->bclink.last_in);
1472 		info("Gap after: %u,  ", n_ptr->bclink.gap_after);
1473 		info("Gap to: %u\n", n_ptr->bclink.gap_to);
1474 		info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1475 
1476 		tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1477 
1478 		tipc_node_unlock(n_ptr);
1479 
1480 		l_ptr->stale_count = 0;
1481 	}
1482 }
1483 
1484 void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1485 			  u32 retransmits)
1486 {
1487 	struct tipc_msg *msg;
1488 
1489 	if (!buf)
1490 		return;
1491 
1492 	msg = buf_msg(buf);
1493 
1494 	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1495 		if (l_ptr->retransm_queue_size == 0) {
1496 			l_ptr->retransm_queue_head = msg_seqno(msg);
1497 			l_ptr->retransm_queue_size = retransmits;
1498 		} else {
1499 			err("Unexpected retransmit on link %s (qsize=%d)\n",
1500 			    l_ptr->name, l_ptr->retransm_queue_size);
1501 		}
1502 		return;
1503 	} else {
1504 		/* Detect repeated retransmit failures on uncongested bearer */
1505 
1506 		if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1507 			if (++l_ptr->stale_count > 100) {
1508 				link_retransmit_failure(l_ptr, buf);
1509 				return;
1510 			}
1511 		} else {
1512 			l_ptr->last_retransmitted = msg_seqno(msg);
1513 			l_ptr->stale_count = 1;
1514 		}
1515 	}
1516 
1517 	while (retransmits && (buf != l_ptr->next_out) && buf) {
1518 		msg = buf_msg(buf);
1519 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1520 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1521 		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1522 			buf = buf->next;
1523 			retransmits--;
1524 			l_ptr->stats.retransmitted++;
1525 		} else {
1526 			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1527 			l_ptr->stats.bearer_congs++;
1528 			l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1529 			l_ptr->retransm_queue_size = retransmits;
1530 			return;
1531 		}
1532 	}
1533 
1534 	l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1535 }
1536 
1537 /**
1538  * link_insert_deferred_queue - insert deferred messages back into receive chain
1539  */
1540 
1541 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1542 						  struct sk_buff *buf)
1543 {
1544 	u32 seq_no;
1545 
1546 	if (l_ptr->oldest_deferred_in == NULL)
1547 		return buf;
1548 
1549 	seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1550 	if (seq_no == mod(l_ptr->next_in_no)) {
1551 		l_ptr->newest_deferred_in->next = buf;
1552 		buf = l_ptr->oldest_deferred_in;
1553 		l_ptr->oldest_deferred_in = NULL;
1554 		l_ptr->deferred_inqueue_sz = 0;
1555 	}
1556 	return buf;
1557 }
1558 
1559 /**
1560  * link_recv_buf_validate - validate basic format of received message
1561  *
1562  * This routine ensures a TIPC message has an acceptable header, and at least
1563  * as much data as the header indicates it should.  The routine also ensures
1564  * that the entire message header is stored in the main fragment of the message
1565  * buffer, to simplify future access to message header fields.
1566  *
1567  * Note: Having extra info present in the message header or data areas is OK.
1568  * TIPC will ignore the excess, under the assumption that it is optional info
1569  * introduced by a later release of the protocol.
1570  */
1571 
1572 static int link_recv_buf_validate(struct sk_buff *buf)
1573 {
1574 	static u32 min_data_hdr_size[8] = {
1575 		SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE,
1576 		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1577 		};
1578 
1579 	struct tipc_msg *msg;
1580 	u32 tipc_hdr[2];
1581 	u32 size;
1582 	u32 hdr_size;
1583 	u32 min_hdr_size;
1584 
1585 	if (unlikely(buf->len < MIN_H_SIZE))
1586 		return 0;
1587 
1588 	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1589 	if (msg == NULL)
1590 		return 0;
1591 
1592 	if (unlikely(msg_version(msg) != TIPC_VERSION))
1593 		return 0;
1594 
1595 	size = msg_size(msg);
1596 	hdr_size = msg_hdr_sz(msg);
1597 	min_hdr_size = msg_isdata(msg) ?
1598 		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1599 
1600 	if (unlikely((hdr_size < min_hdr_size) ||
1601 		     (size < hdr_size) ||
1602 		     (buf->len < size) ||
1603 		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1604 		return 0;
1605 
1606 	return pskb_may_pull(buf, hdr_size);
1607 }
1608 
1609 /**
1610  * tipc_recv_msg - process TIPC messages arriving from off-node
1611  * @head: pointer to message buffer chain
1612  * @tb_ptr: pointer to bearer message arrived on
1613  *
1614  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1615  * structure (i.e. cannot be NULL), but bearer can be inactive.
1616  */
1617 
1618 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1619 {
1620 	read_lock_bh(&tipc_net_lock);
1621 	while (head) {
1622 		struct tipc_node *n_ptr;
1623 		struct link *l_ptr;
1624 		struct sk_buff *crs;
1625 		struct sk_buff *buf = head;
1626 		struct tipc_msg *msg;
1627 		u32 seq_no;
1628 		u32 ackd;
1629 		u32 released = 0;
1630 		int type;
1631 
1632 		head = head->next;
1633 
1634 		/* Ensure bearer is still enabled */
1635 
1636 		if (unlikely(!b_ptr->active))
1637 			goto cont;
1638 
1639 		/* Ensure message is well-formed */
1640 
1641 		if (unlikely(!link_recv_buf_validate(buf)))
1642 			goto cont;
1643 
1644 		/* Ensure message data is a single contiguous unit */
1645 
1646 		if (unlikely(buf_linearize(buf)))
1647 			goto cont;
1648 
1649 		/* Handle arrival of a non-unicast link message */
1650 
1651 		msg = buf_msg(buf);
1652 
1653 		if (unlikely(msg_non_seq(msg))) {
1654 			if (msg_user(msg) ==  LINK_CONFIG)
1655 				tipc_disc_recv_msg(buf, b_ptr);
1656 			else
1657 				tipc_bclink_recv_pkt(buf);
1658 			continue;
1659 		}
1660 
1661 		if (unlikely(!msg_short(msg) &&
1662 			     (msg_destnode(msg) != tipc_own_addr)))
1663 			goto cont;
1664 
1665 		/* Discard non-routeable messages destined for another node */
1666 
1667 		if (unlikely(!msg_isdata(msg) &&
1668 			     (msg_destnode(msg) != tipc_own_addr))) {
1669 			if ((msg_user(msg) != CONN_MANAGER) &&
1670 			    (msg_user(msg) != MSG_FRAGMENTER))
1671 				goto cont;
1672 		}
1673 
1674 		/* Locate neighboring node that sent message */
1675 
1676 		n_ptr = tipc_node_find(msg_prevnode(msg));
1677 		if (unlikely(!n_ptr))
1678 			goto cont;
1679 		tipc_node_lock(n_ptr);
1680 
1681 		/* Don't talk to neighbor during cleanup after last session */
1682 
1683 		if (n_ptr->cleanup_required) {
1684 			tipc_node_unlock(n_ptr);
1685 			goto cont;
1686 		}
1687 
1688 		/* Locate unicast link endpoint that should handle message */
1689 
1690 		l_ptr = n_ptr->links[b_ptr->identity];
1691 		if (unlikely(!l_ptr)) {
1692 			tipc_node_unlock(n_ptr);
1693 			goto cont;
1694 		}
1695 
1696 		/* Validate message sequence number info */
1697 
1698 		seq_no = msg_seqno(msg);
1699 		ackd = msg_ack(msg);
1700 
1701 		/* Release acked messages */
1702 
1703 		if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1704 			if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1705 				tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1706 		}
1707 
1708 		crs = l_ptr->first_out;
1709 		while ((crs != l_ptr->next_out) &&
1710 		       less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1711 			struct sk_buff *next = crs->next;
1712 
1713 			buf_discard(crs);
1714 			crs = next;
1715 			released++;
1716 		}
1717 		if (released) {
1718 			l_ptr->first_out = crs;
1719 			l_ptr->out_queue_size -= released;
1720 		}
1721 
1722 		/* Try sending any messages link endpoint has pending */
1723 
1724 		if (unlikely(l_ptr->next_out))
1725 			tipc_link_push_queue(l_ptr);
1726 		if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1727 			tipc_link_wakeup_ports(l_ptr, 0);
1728 		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1729 			l_ptr->stats.sent_acks++;
1730 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1731 		}
1732 
1733 		/* Now (finally!) process the incoming message */
1734 
1735 protocol_check:
1736 		if (likely(link_working_working(l_ptr))) {
1737 			if (likely(seq_no == mod(l_ptr->next_in_no))) {
1738 				l_ptr->next_in_no++;
1739 				if (unlikely(l_ptr->oldest_deferred_in))
1740 					head = link_insert_deferred_queue(l_ptr,
1741 									  head);
1742 				if (likely(msg_is_dest(msg, tipc_own_addr))) {
1743 deliver:
1744 					if (likely(msg_isdata(msg))) {
1745 						tipc_node_unlock(n_ptr);
1746 						tipc_port_recv_msg(buf);
1747 						continue;
1748 					}
1749 					switch (msg_user(msg)) {
1750 					case MSG_BUNDLER:
1751 						l_ptr->stats.recv_bundles++;
1752 						l_ptr->stats.recv_bundled +=
1753 							msg_msgcnt(msg);
1754 						tipc_node_unlock(n_ptr);
1755 						tipc_link_recv_bundle(buf);
1756 						continue;
1757 					case NAME_DISTRIBUTOR:
1758 						tipc_node_unlock(n_ptr);
1759 						tipc_named_recv(buf);
1760 						continue;
1761 					case CONN_MANAGER:
1762 						tipc_node_unlock(n_ptr);
1763 						tipc_port_recv_proto_msg(buf);
1764 						continue;
1765 					case MSG_FRAGMENTER:
1766 						l_ptr->stats.recv_fragments++;
1767 						if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1768 									    &buf, &msg)) {
1769 							l_ptr->stats.recv_fragmented++;
1770 							goto deliver;
1771 						}
1772 						break;
1773 					case CHANGEOVER_PROTOCOL:
1774 						type = msg_type(msg);
1775 						if (link_recv_changeover_msg(&l_ptr, &buf)) {
1776 							msg = buf_msg(buf);
1777 							seq_no = msg_seqno(msg);
1778 							if (type == ORIGINAL_MSG)
1779 								goto deliver;
1780 							goto protocol_check;
1781 						}
1782 						break;
1783 					default:
1784 						buf_discard(buf);
1785 						buf = NULL;
1786 						break;
1787 					}
1788 				}
1789 				tipc_node_unlock(n_ptr);
1790 				tipc_net_route_msg(buf);
1791 				continue;
1792 			}
1793 			link_handle_out_of_seq_msg(l_ptr, buf);
1794 			head = link_insert_deferred_queue(l_ptr, head);
1795 			tipc_node_unlock(n_ptr);
1796 			continue;
1797 		}
1798 
1799 		if (msg_user(msg) == LINK_PROTOCOL) {
1800 			link_recv_proto_msg(l_ptr, buf);
1801 			head = link_insert_deferred_queue(l_ptr, head);
1802 			tipc_node_unlock(n_ptr);
1803 			continue;
1804 		}
1805 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1806 
1807 		if (link_working_working(l_ptr)) {
1808 			/* Re-insert in front of queue */
1809 			buf->next = head;
1810 			head = buf;
1811 			tipc_node_unlock(n_ptr);
1812 			continue;
1813 		}
1814 		tipc_node_unlock(n_ptr);
1815 cont:
1816 		buf_discard(buf);
1817 	}
1818 	read_unlock_bh(&tipc_net_lock);
1819 }
1820 
1821 /*
1822  * link_defer_buf(): Sort a received out-of-sequence packet
1823  *                   into the deferred reception queue.
1824  * Returns the increase of the queue length,i.e. 0 or 1
1825  */
1826 
1827 u32 tipc_link_defer_pkt(struct sk_buff **head,
1828 			struct sk_buff **tail,
1829 			struct sk_buff *buf)
1830 {
1831 	struct sk_buff *prev = NULL;
1832 	struct sk_buff *crs = *head;
1833 	u32 seq_no = msg_seqno(buf_msg(buf));
1834 
1835 	buf->next = NULL;
1836 
1837 	/* Empty queue ? */
1838 	if (*head == NULL) {
1839 		*head = *tail = buf;
1840 		return 1;
1841 	}
1842 
1843 	/* Last ? */
1844 	if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1845 		(*tail)->next = buf;
1846 		*tail = buf;
1847 		return 1;
1848 	}
1849 
1850 	/* Scan through queue and sort it in */
1851 	do {
1852 		struct tipc_msg *msg = buf_msg(crs);
1853 
1854 		if (less(seq_no, msg_seqno(msg))) {
1855 			buf->next = crs;
1856 			if (prev)
1857 				prev->next = buf;
1858 			else
1859 				*head = buf;
1860 			return 1;
1861 		}
1862 		if (seq_no == msg_seqno(msg))
1863 			break;
1864 		prev = crs;
1865 		crs = crs->next;
1866 	} while (crs);
1867 
1868 	/* Message is a duplicate of an existing message */
1869 
1870 	buf_discard(buf);
1871 	return 0;
1872 }
1873 
1874 /**
1875  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1876  */
1877 
1878 static void link_handle_out_of_seq_msg(struct link *l_ptr,
1879 				       struct sk_buff *buf)
1880 {
1881 	u32 seq_no = msg_seqno(buf_msg(buf));
1882 
1883 	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1884 		link_recv_proto_msg(l_ptr, buf);
1885 		return;
1886 	}
1887 
1888 	/* Record OOS packet arrival (force mismatch on next timeout) */
1889 
1890 	l_ptr->checkpoint--;
1891 
1892 	/*
1893 	 * Discard packet if a duplicate; otherwise add it to deferred queue
1894 	 * and notify peer of gap as per protocol specification
1895 	 */
1896 
1897 	if (less(seq_no, mod(l_ptr->next_in_no))) {
1898 		l_ptr->stats.duplicates++;
1899 		buf_discard(buf);
1900 		return;
1901 	}
1902 
1903 	if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1904 				&l_ptr->newest_deferred_in, buf)) {
1905 		l_ptr->deferred_inqueue_sz++;
1906 		l_ptr->stats.deferred_recv++;
1907 		if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1908 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1909 	} else
1910 		l_ptr->stats.duplicates++;
1911 }
1912 
1913 /*
1914  * Send protocol message to the other endpoint.
1915  */
1916 void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1917 			      u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1918 {
1919 	struct sk_buff *buf = NULL;
1920 	struct tipc_msg *msg = l_ptr->pmsg;
1921 	u32 msg_size = sizeof(l_ptr->proto_msg);
1922 	int r_flag;
1923 
1924 	if (link_blocked(l_ptr))
1925 		return;
1926 	msg_set_type(msg, msg_typ);
1927 	msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1928 	msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1929 	msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1930 
1931 	if (msg_typ == STATE_MSG) {
1932 		u32 next_sent = mod(l_ptr->next_out_no);
1933 
1934 		if (!tipc_link_is_up(l_ptr))
1935 			return;
1936 		if (l_ptr->next_out)
1937 			next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1938 		msg_set_next_sent(msg, next_sent);
1939 		if (l_ptr->oldest_deferred_in) {
1940 			u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1941 			gap = mod(rec - mod(l_ptr->next_in_no));
1942 		}
1943 		msg_set_seq_gap(msg, gap);
1944 		if (gap)
1945 			l_ptr->stats.sent_nacks++;
1946 		msg_set_link_tolerance(msg, tolerance);
1947 		msg_set_linkprio(msg, priority);
1948 		msg_set_max_pkt(msg, ack_mtu);
1949 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1950 		msg_set_probe(msg, probe_msg != 0);
1951 		if (probe_msg) {
1952 			u32 mtu = l_ptr->max_pkt;
1953 
1954 			if ((mtu < l_ptr->max_pkt_target) &&
1955 			    link_working_working(l_ptr) &&
1956 			    l_ptr->fsm_msg_cnt) {
1957 				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1958 				if (l_ptr->max_pkt_probes == 10) {
1959 					l_ptr->max_pkt_target = (msg_size - 4);
1960 					l_ptr->max_pkt_probes = 0;
1961 					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1962 				}
1963 				l_ptr->max_pkt_probes++;
1964 			}
1965 
1966 			l_ptr->stats.sent_probes++;
1967 		}
1968 		l_ptr->stats.sent_states++;
1969 	} else {		/* RESET_MSG or ACTIVATE_MSG */
1970 		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1971 		msg_set_seq_gap(msg, 0);
1972 		msg_set_next_sent(msg, 1);
1973 		msg_set_probe(msg, 0);
1974 		msg_set_link_tolerance(msg, l_ptr->tolerance);
1975 		msg_set_linkprio(msg, l_ptr->priority);
1976 		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1977 	}
1978 
1979 	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1980 	msg_set_redundant_link(msg, r_flag);
1981 	msg_set_linkprio(msg, l_ptr->priority);
1982 
1983 	/* Ensure sequence number will not fit : */
1984 
1985 	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1986 
1987 	/* Congestion? */
1988 
1989 	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1990 		if (!l_ptr->proto_msg_queue) {
1991 			l_ptr->proto_msg_queue =
1992 				tipc_buf_acquire(sizeof(l_ptr->proto_msg));
1993 		}
1994 		buf = l_ptr->proto_msg_queue;
1995 		if (!buf)
1996 			return;
1997 		skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1998 		return;
1999 	}
2000 
2001 	/* Message can be sent */
2002 
2003 	buf = tipc_buf_acquire(msg_size);
2004 	if (!buf)
2005 		return;
2006 
2007 	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2008 	msg_set_size(buf_msg(buf), msg_size);
2009 
2010 	if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2011 		l_ptr->unacked_window = 0;
2012 		buf_discard(buf);
2013 		return;
2014 	}
2015 
2016 	/* New congestion */
2017 	tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2018 	l_ptr->proto_msg_queue = buf;
2019 	l_ptr->stats.bearer_congs++;
2020 }
2021 
2022 /*
2023  * Receive protocol message :
2024  * Note that network plane id propagates through the network, and may
2025  * change at any time. The node with lowest address rules
2026  */
2027 
2028 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2029 {
2030 	u32 rec_gap = 0;
2031 	u32 max_pkt_info;
2032 	u32 max_pkt_ack;
2033 	u32 msg_tol;
2034 	struct tipc_msg *msg = buf_msg(buf);
2035 
2036 	if (link_blocked(l_ptr))
2037 		goto exit;
2038 
2039 	/* record unnumbered packet arrival (force mismatch on next timeout) */
2040 
2041 	l_ptr->checkpoint--;
2042 
2043 	if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2044 		if (tipc_own_addr > msg_prevnode(msg))
2045 			l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2046 
2047 	l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2048 
2049 	switch (msg_type(msg)) {
2050 
2051 	case RESET_MSG:
2052 		if (!link_working_unknown(l_ptr) &&
2053 		    (l_ptr->peer_session != INVALID_SESSION)) {
2054 			if (msg_session(msg) == l_ptr->peer_session)
2055 				break; /* duplicate: ignore */
2056 		}
2057 		/* fall thru' */
2058 	case ACTIVATE_MSG:
2059 		/* Update link settings according other endpoint's values */
2060 
2061 		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2062 
2063 		msg_tol = msg_link_tolerance(msg);
2064 		if (msg_tol > l_ptr->tolerance)
2065 			link_set_supervision_props(l_ptr, msg_tol);
2066 
2067 		if (msg_linkprio(msg) > l_ptr->priority)
2068 			l_ptr->priority = msg_linkprio(msg);
2069 
2070 		max_pkt_info = msg_max_pkt(msg);
2071 		if (max_pkt_info) {
2072 			if (max_pkt_info < l_ptr->max_pkt_target)
2073 				l_ptr->max_pkt_target = max_pkt_info;
2074 			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2075 				l_ptr->max_pkt = l_ptr->max_pkt_target;
2076 		} else {
2077 			l_ptr->max_pkt = l_ptr->max_pkt_target;
2078 		}
2079 		l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2080 
2081 		link_state_event(l_ptr, msg_type(msg));
2082 
2083 		l_ptr->peer_session = msg_session(msg);
2084 		l_ptr->peer_bearer_id = msg_bearer_id(msg);
2085 
2086 		/* Synchronize broadcast sequence numbers */
2087 		if (!tipc_node_redundant_links(l_ptr->owner))
2088 			l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2089 		break;
2090 	case STATE_MSG:
2091 
2092 		msg_tol = msg_link_tolerance(msg);
2093 		if (msg_tol)
2094 			link_set_supervision_props(l_ptr, msg_tol);
2095 
2096 		if (msg_linkprio(msg) &&
2097 		    (msg_linkprio(msg) != l_ptr->priority)) {
2098 			warn("Resetting link <%s>, priority change %u->%u\n",
2099 			     l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2100 			l_ptr->priority = msg_linkprio(msg);
2101 			tipc_link_reset(l_ptr); /* Enforce change to take effect */
2102 			break;
2103 		}
2104 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2105 		l_ptr->stats.recv_states++;
2106 		if (link_reset_unknown(l_ptr))
2107 			break;
2108 
2109 		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2110 			rec_gap = mod(msg_next_sent(msg) -
2111 				      mod(l_ptr->next_in_no));
2112 		}
2113 
2114 		max_pkt_ack = msg_max_pkt(msg);
2115 		if (max_pkt_ack > l_ptr->max_pkt) {
2116 			l_ptr->max_pkt = max_pkt_ack;
2117 			l_ptr->max_pkt_probes = 0;
2118 		}
2119 
2120 		max_pkt_ack = 0;
2121 		if (msg_probe(msg)) {
2122 			l_ptr->stats.recv_probes++;
2123 			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2124 				max_pkt_ack = msg_size(msg);
2125 		}
2126 
2127 		/* Protocol message before retransmits, reduce loss risk */
2128 
2129 		tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2130 
2131 		if (rec_gap || (msg_probe(msg))) {
2132 			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2133 						 0, rec_gap, 0, 0, max_pkt_ack);
2134 		}
2135 		if (msg_seq_gap(msg)) {
2136 			l_ptr->stats.recv_nacks++;
2137 			tipc_link_retransmit(l_ptr, l_ptr->first_out,
2138 					     msg_seq_gap(msg));
2139 		}
2140 		break;
2141 	}
2142 exit:
2143 	buf_discard(buf);
2144 }
2145 
2146 
2147 /*
2148  * tipc_link_tunnel(): Send one message via a link belonging to
2149  * another bearer. Owner node is locked.
2150  */
2151 static void tipc_link_tunnel(struct link *l_ptr,
2152 			     struct tipc_msg *tunnel_hdr,
2153 			     struct tipc_msg  *msg,
2154 			     u32 selector)
2155 {
2156 	struct link *tunnel;
2157 	struct sk_buff *buf;
2158 	u32 length = msg_size(msg);
2159 
2160 	tunnel = l_ptr->owner->active_links[selector & 1];
2161 	if (!tipc_link_is_up(tunnel)) {
2162 		warn("Link changeover error, "
2163 		     "tunnel link no longer available\n");
2164 		return;
2165 	}
2166 	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2167 	buf = tipc_buf_acquire(length + INT_H_SIZE);
2168 	if (!buf) {
2169 		warn("Link changeover error, "
2170 		     "unable to send tunnel msg\n");
2171 		return;
2172 	}
2173 	skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2174 	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2175 	tipc_link_send_buf(tunnel, buf);
2176 }
2177 
2178 
2179 
2180 /*
2181  * changeover(): Send whole message queue via the remaining link
2182  *               Owner node is locked.
2183  */
2184 
2185 void tipc_link_changeover(struct link *l_ptr)
2186 {
2187 	u32 msgcount = l_ptr->out_queue_size;
2188 	struct sk_buff *crs = l_ptr->first_out;
2189 	struct link *tunnel = l_ptr->owner->active_links[0];
2190 	struct tipc_msg tunnel_hdr;
2191 	int split_bundles;
2192 
2193 	if (!tunnel)
2194 		return;
2195 
2196 	if (!l_ptr->owner->permit_changeover) {
2197 		warn("Link changeover error, "
2198 		     "peer did not permit changeover\n");
2199 		return;
2200 	}
2201 
2202 	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2203 		 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2204 	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2205 	msg_set_msgcnt(&tunnel_hdr, msgcount);
2206 
2207 	if (!l_ptr->first_out) {
2208 		struct sk_buff *buf;
2209 
2210 		buf = tipc_buf_acquire(INT_H_SIZE);
2211 		if (buf) {
2212 			skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2213 			msg_set_size(&tunnel_hdr, INT_H_SIZE);
2214 			tipc_link_send_buf(tunnel, buf);
2215 		} else {
2216 			warn("Link changeover error, "
2217 			     "unable to send changeover msg\n");
2218 		}
2219 		return;
2220 	}
2221 
2222 	split_bundles = (l_ptr->owner->active_links[0] !=
2223 			 l_ptr->owner->active_links[1]);
2224 
2225 	while (crs) {
2226 		struct tipc_msg *msg = buf_msg(crs);
2227 
2228 		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2229 			struct tipc_msg *m = msg_get_wrapped(msg);
2230 			unchar *pos = (unchar *)m;
2231 
2232 			msgcount = msg_msgcnt(msg);
2233 			while (msgcount--) {
2234 				msg_set_seqno(m, msg_seqno(msg));
2235 				tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2236 						 msg_link_selector(m));
2237 				pos += align(msg_size(m));
2238 				m = (struct tipc_msg *)pos;
2239 			}
2240 		} else {
2241 			tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2242 					 msg_link_selector(msg));
2243 		}
2244 		crs = crs->next;
2245 	}
2246 }
2247 
2248 void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2249 {
2250 	struct sk_buff *iter;
2251 	struct tipc_msg tunnel_hdr;
2252 
2253 	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2254 		 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2255 	msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2256 	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2257 	iter = l_ptr->first_out;
2258 	while (iter) {
2259 		struct sk_buff *outbuf;
2260 		struct tipc_msg *msg = buf_msg(iter);
2261 		u32 length = msg_size(msg);
2262 
2263 		if (msg_user(msg) == MSG_BUNDLER)
2264 			msg_set_type(msg, CLOSED_MSG);
2265 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
2266 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2267 		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2268 		outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2269 		if (outbuf == NULL) {
2270 			warn("Link changeover error, "
2271 			     "unable to send duplicate msg\n");
2272 			return;
2273 		}
2274 		skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2275 		skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2276 					       length);
2277 		tipc_link_send_buf(tunnel, outbuf);
2278 		if (!tipc_link_is_up(l_ptr))
2279 			return;
2280 		iter = iter->next;
2281 	}
2282 }
2283 
2284 
2285 
2286 /**
2287  * buf_extract - extracts embedded TIPC message from another message
2288  * @skb: encapsulating message buffer
2289  * @from_pos: offset to extract from
2290  *
2291  * Returns a new message buffer containing an embedded message.  The
2292  * encapsulating message itself is left unchanged.
2293  */
2294 
2295 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2296 {
2297 	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2298 	u32 size = msg_size(msg);
2299 	struct sk_buff *eb;
2300 
2301 	eb = tipc_buf_acquire(size);
2302 	if (eb)
2303 		skb_copy_to_linear_data(eb, msg, size);
2304 	return eb;
2305 }
2306 
2307 /*
2308  *  link_recv_changeover_msg(): Receive tunneled packet sent
2309  *  via other link. Node is locked. Return extracted buffer.
2310  */
2311 
2312 static int link_recv_changeover_msg(struct link **l_ptr,
2313 				    struct sk_buff **buf)
2314 {
2315 	struct sk_buff *tunnel_buf = *buf;
2316 	struct link *dest_link;
2317 	struct tipc_msg *msg;
2318 	struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2319 	u32 msg_typ = msg_type(tunnel_msg);
2320 	u32 msg_count = msg_msgcnt(tunnel_msg);
2321 
2322 	dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2323 	if (!dest_link)
2324 		goto exit;
2325 	if (dest_link == *l_ptr) {
2326 		err("Unexpected changeover message on link <%s>\n",
2327 		    (*l_ptr)->name);
2328 		goto exit;
2329 	}
2330 	*l_ptr = dest_link;
2331 	msg = msg_get_wrapped(tunnel_msg);
2332 
2333 	if (msg_typ == DUPLICATE_MSG) {
2334 		if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2335 			goto exit;
2336 		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2337 		if (*buf == NULL) {
2338 			warn("Link changeover error, duplicate msg dropped\n");
2339 			goto exit;
2340 		}
2341 		buf_discard(tunnel_buf);
2342 		return 1;
2343 	}
2344 
2345 	/* First original message ?: */
2346 
2347 	if (tipc_link_is_up(dest_link)) {
2348 		info("Resetting link <%s>, changeover initiated by peer\n",
2349 		     dest_link->name);
2350 		tipc_link_reset(dest_link);
2351 		dest_link->exp_msg_count = msg_count;
2352 		if (!msg_count)
2353 			goto exit;
2354 	} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2355 		dest_link->exp_msg_count = msg_count;
2356 		if (!msg_count)
2357 			goto exit;
2358 	}
2359 
2360 	/* Receive original message */
2361 
2362 	if (dest_link->exp_msg_count == 0) {
2363 		warn("Link switchover error, "
2364 		     "got too many tunnelled messages\n");
2365 		goto exit;
2366 	}
2367 	dest_link->exp_msg_count--;
2368 	if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2369 		goto exit;
2370 	} else {
2371 		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2372 		if (*buf != NULL) {
2373 			buf_discard(tunnel_buf);
2374 			return 1;
2375 		} else {
2376 			warn("Link changeover error, original msg dropped\n");
2377 		}
2378 	}
2379 exit:
2380 	*buf = NULL;
2381 	buf_discard(tunnel_buf);
2382 	return 0;
2383 }
2384 
2385 /*
2386  *  Bundler functionality:
2387  */
2388 void tipc_link_recv_bundle(struct sk_buff *buf)
2389 {
2390 	u32 msgcount = msg_msgcnt(buf_msg(buf));
2391 	u32 pos = INT_H_SIZE;
2392 	struct sk_buff *obuf;
2393 
2394 	while (msgcount--) {
2395 		obuf = buf_extract(buf, pos);
2396 		if (obuf == NULL) {
2397 			warn("Link unable to unbundle message(s)\n");
2398 			break;
2399 		}
2400 		pos += align(msg_size(buf_msg(obuf)));
2401 		tipc_net_route_msg(obuf);
2402 	}
2403 	buf_discard(buf);
2404 }
2405 
2406 /*
2407  *  Fragmentation/defragmentation:
2408  */
2409 
2410 
2411 /*
2412  * link_send_long_buf: Entry for buffers needing fragmentation.
2413  * The buffer is complete, inclusive total message length.
2414  * Returns user data length.
2415  */
2416 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2417 {
2418 	struct sk_buff *buf_chain = NULL;
2419 	struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2420 	struct tipc_msg *inmsg = buf_msg(buf);
2421 	struct tipc_msg fragm_hdr;
2422 	u32 insize = msg_size(inmsg);
2423 	u32 dsz = msg_data_sz(inmsg);
2424 	unchar *crs = buf->data;
2425 	u32 rest = insize;
2426 	u32 pack_sz = l_ptr->max_pkt;
2427 	u32 fragm_sz = pack_sz - INT_H_SIZE;
2428 	u32 fragm_no = 0;
2429 	u32 destaddr;
2430 
2431 	if (msg_short(inmsg))
2432 		destaddr = l_ptr->addr;
2433 	else
2434 		destaddr = msg_destnode(inmsg);
2435 
2436 	/* Prepare reusable fragment header: */
2437 
2438 	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2439 		 INT_H_SIZE, destaddr);
2440 
2441 	/* Chop up message: */
2442 
2443 	while (rest > 0) {
2444 		struct sk_buff *fragm;
2445 
2446 		if (rest <= fragm_sz) {
2447 			fragm_sz = rest;
2448 			msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2449 		}
2450 		fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2451 		if (fragm == NULL) {
2452 			buf_discard(buf);
2453 			while (buf_chain) {
2454 				buf = buf_chain;
2455 				buf_chain = buf_chain->next;
2456 				buf_discard(buf);
2457 			}
2458 			return -ENOMEM;
2459 		}
2460 		msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2461 		fragm_no++;
2462 		msg_set_fragm_no(&fragm_hdr, fragm_no);
2463 		skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2464 		skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2465 					       fragm_sz);
2466 		buf_chain_tail->next = fragm;
2467 		buf_chain_tail = fragm;
2468 
2469 		rest -= fragm_sz;
2470 		crs += fragm_sz;
2471 		msg_set_type(&fragm_hdr, FRAGMENT);
2472 	}
2473 	buf_discard(buf);
2474 
2475 	/* Append chain of fragments to send queue & send them */
2476 
2477 	l_ptr->long_msg_seq_no++;
2478 	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2479 	l_ptr->stats.sent_fragments += fragm_no;
2480 	l_ptr->stats.sent_fragmented++;
2481 	tipc_link_push_queue(l_ptr);
2482 
2483 	return dsz;
2484 }
2485 
2486 /*
2487  * A pending message being re-assembled must store certain values
2488  * to handle subsequent fragments correctly. The following functions
2489  * help storing these values in unused, available fields in the
2490  * pending message. This makes dynamic memory allocation unnecessary.
2491  */
2492 
2493 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2494 {
2495 	msg_set_seqno(buf_msg(buf), seqno);
2496 }
2497 
2498 static u32 get_fragm_size(struct sk_buff *buf)
2499 {
2500 	return msg_ack(buf_msg(buf));
2501 }
2502 
2503 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2504 {
2505 	msg_set_ack(buf_msg(buf), sz);
2506 }
2507 
2508 static u32 get_expected_frags(struct sk_buff *buf)
2509 {
2510 	return msg_bcast_ack(buf_msg(buf));
2511 }
2512 
2513 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2514 {
2515 	msg_set_bcast_ack(buf_msg(buf), exp);
2516 }
2517 
2518 static u32 get_timer_cnt(struct sk_buff *buf)
2519 {
2520 	return msg_reroute_cnt(buf_msg(buf));
2521 }
2522 
2523 static void incr_timer_cnt(struct sk_buff *buf)
2524 {
2525 	msg_incr_reroute_cnt(buf_msg(buf));
2526 }
2527 
2528 /*
2529  * tipc_link_recv_fragment(): Called with node lock on. Returns
2530  * the reassembled buffer if message is complete.
2531  */
2532 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2533 			    struct tipc_msg **m)
2534 {
2535 	struct sk_buff *prev = NULL;
2536 	struct sk_buff *fbuf = *fb;
2537 	struct tipc_msg *fragm = buf_msg(fbuf);
2538 	struct sk_buff *pbuf = *pending;
2539 	u32 long_msg_seq_no = msg_long_msgno(fragm);
2540 
2541 	*fb = NULL;
2542 
2543 	/* Is there an incomplete message waiting for this fragment? */
2544 
2545 	while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2546 			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2547 		prev = pbuf;
2548 		pbuf = pbuf->next;
2549 	}
2550 
2551 	if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2552 		struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2553 		u32 msg_sz = msg_size(imsg);
2554 		u32 fragm_sz = msg_data_sz(fragm);
2555 		u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2556 		u32 max =  TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2557 		if (msg_type(imsg) == TIPC_MCAST_MSG)
2558 			max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2559 		if (msg_size(imsg) > max) {
2560 			buf_discard(fbuf);
2561 			return 0;
2562 		}
2563 		pbuf = tipc_buf_acquire(msg_size(imsg));
2564 		if (pbuf != NULL) {
2565 			pbuf->next = *pending;
2566 			*pending = pbuf;
2567 			skb_copy_to_linear_data(pbuf, imsg,
2568 						msg_data_sz(fragm));
2569 			/*  Prepare buffer for subsequent fragments. */
2570 
2571 			set_long_msg_seqno(pbuf, long_msg_seq_no);
2572 			set_fragm_size(pbuf, fragm_sz);
2573 			set_expected_frags(pbuf, exp_fragm_cnt - 1);
2574 		} else {
2575 			warn("Link unable to reassemble fragmented message\n");
2576 		}
2577 		buf_discard(fbuf);
2578 		return 0;
2579 	} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2580 		u32 dsz = msg_data_sz(fragm);
2581 		u32 fsz = get_fragm_size(pbuf);
2582 		u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2583 		u32 exp_frags = get_expected_frags(pbuf) - 1;
2584 		skb_copy_to_linear_data_offset(pbuf, crs,
2585 					       msg_data(fragm), dsz);
2586 		buf_discard(fbuf);
2587 
2588 		/* Is message complete? */
2589 
2590 		if (exp_frags == 0) {
2591 			if (prev)
2592 				prev->next = pbuf->next;
2593 			else
2594 				*pending = pbuf->next;
2595 			msg_reset_reroute_cnt(buf_msg(pbuf));
2596 			*fb = pbuf;
2597 			*m = buf_msg(pbuf);
2598 			return 1;
2599 		}
2600 		set_expected_frags(pbuf, exp_frags);
2601 		return 0;
2602 	}
2603 	buf_discard(fbuf);
2604 	return 0;
2605 }
2606 
2607 /**
2608  * link_check_defragm_bufs - flush stale incoming message fragments
2609  * @l_ptr: pointer to link
2610  */
2611 
2612 static void link_check_defragm_bufs(struct link *l_ptr)
2613 {
2614 	struct sk_buff *prev = NULL;
2615 	struct sk_buff *next = NULL;
2616 	struct sk_buff *buf = l_ptr->defragm_buf;
2617 
2618 	if (!buf)
2619 		return;
2620 	if (!link_working_working(l_ptr))
2621 		return;
2622 	while (buf) {
2623 		u32 cnt = get_timer_cnt(buf);
2624 
2625 		next = buf->next;
2626 		if (cnt < 4) {
2627 			incr_timer_cnt(buf);
2628 			prev = buf;
2629 		} else {
2630 			if (prev)
2631 				prev->next = buf->next;
2632 			else
2633 				l_ptr->defragm_buf = buf->next;
2634 			buf_discard(buf);
2635 		}
2636 		buf = next;
2637 	}
2638 }
2639 
2640 
2641 
2642 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2643 {
2644 	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2645 		return;
2646 
2647 	l_ptr->tolerance = tolerance;
2648 	l_ptr->continuity_interval =
2649 		((tolerance / 4) > 500) ? 500 : tolerance / 4;
2650 	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2651 }
2652 
2653 
2654 void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2655 {
2656 	/* Data messages from this node, inclusive FIRST_FRAGM */
2657 	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2658 	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2659 	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2660 	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2661 	/* Transiting data messages,inclusive FIRST_FRAGM */
2662 	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2663 	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2664 	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2665 	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2666 	l_ptr->queue_limit[CONN_MANAGER] = 1200;
2667 	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2668 	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2669 	/* FRAGMENT and LAST_FRAGMENT packets */
2670 	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2671 }
2672 
2673 /**
2674  * link_find_link - locate link by name
2675  * @name - ptr to link name string
2676  * @node - ptr to area to be filled with ptr to associated node
2677  *
2678  * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2679  * this also prevents link deletion.
2680  *
2681  * Returns pointer to link (or 0 if invalid link name).
2682  */
2683 
2684 static struct link *link_find_link(const char *name, struct tipc_node **node)
2685 {
2686 	struct link_name link_name_parts;
2687 	struct tipc_bearer *b_ptr;
2688 	struct link *l_ptr;
2689 
2690 	if (!link_name_validate(name, &link_name_parts))
2691 		return NULL;
2692 
2693 	b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2694 	if (!b_ptr)
2695 		return NULL;
2696 
2697 	*node = tipc_node_find(link_name_parts.addr_peer);
2698 	if (!*node)
2699 		return NULL;
2700 
2701 	l_ptr = (*node)->links[b_ptr->identity];
2702 	if (!l_ptr || strcmp(l_ptr->name, name))
2703 		return NULL;
2704 
2705 	return l_ptr;
2706 }
2707 
2708 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2709 				     u16 cmd)
2710 {
2711 	struct tipc_link_config *args;
2712 	u32 new_value;
2713 	struct link *l_ptr;
2714 	struct tipc_node *node;
2715 	int res;
2716 
2717 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2718 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2719 
2720 	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2721 	new_value = ntohl(args->value);
2722 
2723 	if (!strcmp(args->name, tipc_bclink_name)) {
2724 		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2725 		    (tipc_bclink_set_queue_limits(new_value) == 0))
2726 			return tipc_cfg_reply_none();
2727 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2728 						   " (cannot change setting on broadcast link)");
2729 	}
2730 
2731 	read_lock_bh(&tipc_net_lock);
2732 	l_ptr = link_find_link(args->name, &node);
2733 	if (!l_ptr) {
2734 		read_unlock_bh(&tipc_net_lock);
2735 		return tipc_cfg_reply_error_string("link not found");
2736 	}
2737 
2738 	tipc_node_lock(node);
2739 	res = -EINVAL;
2740 	switch (cmd) {
2741 	case TIPC_CMD_SET_LINK_TOL:
2742 		if ((new_value >= TIPC_MIN_LINK_TOL) &&
2743 		    (new_value <= TIPC_MAX_LINK_TOL)) {
2744 			link_set_supervision_props(l_ptr, new_value);
2745 			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2746 						 0, 0, new_value, 0, 0);
2747 			res = 0;
2748 		}
2749 		break;
2750 	case TIPC_CMD_SET_LINK_PRI:
2751 		if ((new_value >= TIPC_MIN_LINK_PRI) &&
2752 		    (new_value <= TIPC_MAX_LINK_PRI)) {
2753 			l_ptr->priority = new_value;
2754 			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2755 						 0, 0, 0, new_value, 0);
2756 			res = 0;
2757 		}
2758 		break;
2759 	case TIPC_CMD_SET_LINK_WINDOW:
2760 		if ((new_value >= TIPC_MIN_LINK_WIN) &&
2761 		    (new_value <= TIPC_MAX_LINK_WIN)) {
2762 			tipc_link_set_queue_limits(l_ptr, new_value);
2763 			res = 0;
2764 		}
2765 		break;
2766 	}
2767 	tipc_node_unlock(node);
2768 
2769 	read_unlock_bh(&tipc_net_lock);
2770 	if (res)
2771 		return tipc_cfg_reply_error_string("cannot change link setting");
2772 
2773 	return tipc_cfg_reply_none();
2774 }
2775 
2776 /**
2777  * link_reset_statistics - reset link statistics
2778  * @l_ptr: pointer to link
2779  */
2780 
2781 static void link_reset_statistics(struct link *l_ptr)
2782 {
2783 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2784 	l_ptr->stats.sent_info = l_ptr->next_out_no;
2785 	l_ptr->stats.recv_info = l_ptr->next_in_no;
2786 }
2787 
2788 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2789 {
2790 	char *link_name;
2791 	struct link *l_ptr;
2792 	struct tipc_node *node;
2793 
2794 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2795 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2796 
2797 	link_name = (char *)TLV_DATA(req_tlv_area);
2798 	if (!strcmp(link_name, tipc_bclink_name)) {
2799 		if (tipc_bclink_reset_stats())
2800 			return tipc_cfg_reply_error_string("link not found");
2801 		return tipc_cfg_reply_none();
2802 	}
2803 
2804 	read_lock_bh(&tipc_net_lock);
2805 	l_ptr = link_find_link(link_name, &node);
2806 	if (!l_ptr) {
2807 		read_unlock_bh(&tipc_net_lock);
2808 		return tipc_cfg_reply_error_string("link not found");
2809 	}
2810 
2811 	tipc_node_lock(node);
2812 	link_reset_statistics(l_ptr);
2813 	tipc_node_unlock(node);
2814 	read_unlock_bh(&tipc_net_lock);
2815 	return tipc_cfg_reply_none();
2816 }
2817 
2818 /**
2819  * percent - convert count to a percentage of total (rounding up or down)
2820  */
2821 
2822 static u32 percent(u32 count, u32 total)
2823 {
2824 	return (count * 100 + (total / 2)) / total;
2825 }
2826 
2827 /**
2828  * tipc_link_stats - print link statistics
2829  * @name: link name
2830  * @buf: print buffer area
2831  * @buf_size: size of print buffer area
2832  *
2833  * Returns length of print buffer data string (or 0 if error)
2834  */
2835 
2836 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2837 {
2838 	struct print_buf pb;
2839 	struct link *l_ptr;
2840 	struct tipc_node *node;
2841 	char *status;
2842 	u32 profile_total = 0;
2843 
2844 	if (!strcmp(name, tipc_bclink_name))
2845 		return tipc_bclink_stats(buf, buf_size);
2846 
2847 	tipc_printbuf_init(&pb, buf, buf_size);
2848 
2849 	read_lock_bh(&tipc_net_lock);
2850 	l_ptr = link_find_link(name, &node);
2851 	if (!l_ptr) {
2852 		read_unlock_bh(&tipc_net_lock);
2853 		return 0;
2854 	}
2855 	tipc_node_lock(node);
2856 
2857 	if (tipc_link_is_active(l_ptr))
2858 		status = "ACTIVE";
2859 	else if (tipc_link_is_up(l_ptr))
2860 		status = "STANDBY";
2861 	else
2862 		status = "DEFUNCT";
2863 	tipc_printf(&pb, "Link <%s>\n"
2864 			 "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2865 			 "  Window:%u packets\n",
2866 		    l_ptr->name, status, l_ptr->max_pkt,
2867 		    l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2868 	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2869 		    l_ptr->next_in_no - l_ptr->stats.recv_info,
2870 		    l_ptr->stats.recv_fragments,
2871 		    l_ptr->stats.recv_fragmented,
2872 		    l_ptr->stats.recv_bundles,
2873 		    l_ptr->stats.recv_bundled);
2874 	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2875 		    l_ptr->next_out_no - l_ptr->stats.sent_info,
2876 		    l_ptr->stats.sent_fragments,
2877 		    l_ptr->stats.sent_fragmented,
2878 		    l_ptr->stats.sent_bundles,
2879 		    l_ptr->stats.sent_bundled);
2880 	profile_total = l_ptr->stats.msg_length_counts;
2881 	if (!profile_total)
2882 		profile_total = 1;
2883 	tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2884 			 "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2885 			 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2886 		    l_ptr->stats.msg_length_counts,
2887 		    l_ptr->stats.msg_lengths_total / profile_total,
2888 		    percent(l_ptr->stats.msg_length_profile[0], profile_total),
2889 		    percent(l_ptr->stats.msg_length_profile[1], profile_total),
2890 		    percent(l_ptr->stats.msg_length_profile[2], profile_total),
2891 		    percent(l_ptr->stats.msg_length_profile[3], profile_total),
2892 		    percent(l_ptr->stats.msg_length_profile[4], profile_total),
2893 		    percent(l_ptr->stats.msg_length_profile[5], profile_total),
2894 		    percent(l_ptr->stats.msg_length_profile[6], profile_total));
2895 	tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2896 		    l_ptr->stats.recv_states,
2897 		    l_ptr->stats.recv_probes,
2898 		    l_ptr->stats.recv_nacks,
2899 		    l_ptr->stats.deferred_recv,
2900 		    l_ptr->stats.duplicates);
2901 	tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2902 		    l_ptr->stats.sent_states,
2903 		    l_ptr->stats.sent_probes,
2904 		    l_ptr->stats.sent_nacks,
2905 		    l_ptr->stats.sent_acks,
2906 		    l_ptr->stats.retransmitted);
2907 	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2908 		    l_ptr->stats.bearer_congs,
2909 		    l_ptr->stats.link_congs,
2910 		    l_ptr->stats.max_queue_sz,
2911 		    l_ptr->stats.queue_sz_counts
2912 		    ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2913 		    : 0);
2914 
2915 	tipc_node_unlock(node);
2916 	read_unlock_bh(&tipc_net_lock);
2917 	return tipc_printbuf_validate(&pb);
2918 }
2919 
2920 #define MAX_LINK_STATS_INFO 2000
2921 
2922 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2923 {
2924 	struct sk_buff *buf;
2925 	struct tlv_desc *rep_tlv;
2926 	int str_len;
2927 
2928 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2929 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2930 
2931 	buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2932 	if (!buf)
2933 		return NULL;
2934 
2935 	rep_tlv = (struct tlv_desc *)buf->data;
2936 
2937 	str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2938 				  (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
2939 	if (!str_len) {
2940 		buf_discard(buf);
2941 		return tipc_cfg_reply_error_string("link not found");
2942 	}
2943 
2944 	skb_put(buf, TLV_SPACE(str_len));
2945 	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2946 
2947 	return buf;
2948 }
2949 
2950 /**
2951  * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2952  * @dest: network address of destination node
2953  * @selector: used to select from set of active links
2954  *
2955  * If no active link can be found, uses default maximum packet size.
2956  */
2957 
2958 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2959 {
2960 	struct tipc_node *n_ptr;
2961 	struct link *l_ptr;
2962 	u32 res = MAX_PKT_DEFAULT;
2963 
2964 	if (dest == tipc_own_addr)
2965 		return MAX_MSG_SIZE;
2966 
2967 	read_lock_bh(&tipc_net_lock);
2968 	n_ptr = tipc_node_find(dest);
2969 	if (n_ptr) {
2970 		tipc_node_lock(n_ptr);
2971 		l_ptr = n_ptr->active_links[selector & 1];
2972 		if (l_ptr)
2973 			res = l_ptr->max_pkt;
2974 		tipc_node_unlock(n_ptr);
2975 	}
2976 	read_unlock_bh(&tipc_net_lock);
2977 	return res;
2978 }
2979 
2980 static void link_print(struct link *l_ptr, const char *str)
2981 {
2982 	char print_area[256];
2983 	struct print_buf pb;
2984 	struct print_buf *buf = &pb;
2985 
2986 	tipc_printbuf_init(buf, print_area, sizeof(print_area));
2987 
2988 	tipc_printf(buf, str);
2989 	tipc_printf(buf, "Link %x<%s>:",
2990 		    l_ptr->addr, l_ptr->b_ptr->name);
2991 
2992 #ifdef CONFIG_TIPC_DEBUG
2993 	if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
2994 		goto print_state;
2995 
2996 	tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
2997 	tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
2998 	tipc_printf(buf, "SQUE");
2999 	if (l_ptr->first_out) {
3000 		tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3001 		if (l_ptr->next_out)
3002 			tipc_printf(buf, "%u..",
3003 				    msg_seqno(buf_msg(l_ptr->next_out)));
3004 		tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
3005 		if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3006 			 msg_seqno(buf_msg(l_ptr->first_out)))
3007 		     != (l_ptr->out_queue_size - 1)) ||
3008 		    (l_ptr->last_out->next != NULL)) {
3009 			tipc_printf(buf, "\nSend queue inconsistency\n");
3010 			tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3011 			tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3012 			tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3013 		}
3014 	} else
3015 		tipc_printf(buf, "[]");
3016 	tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3017 	if (l_ptr->oldest_deferred_in) {
3018 		u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3019 		u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3020 		tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3021 		if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3022 			tipc_printf(buf, ":RQSIZ(%u)",
3023 				    l_ptr->deferred_inqueue_sz);
3024 		}
3025 	}
3026 print_state:
3027 #endif
3028 
3029 	if (link_working_unknown(l_ptr))
3030 		tipc_printf(buf, ":WU");
3031 	else if (link_reset_reset(l_ptr))
3032 		tipc_printf(buf, ":RR");
3033 	else if (link_reset_unknown(l_ptr))
3034 		tipc_printf(buf, ":RU");
3035 	else if (link_working_working(l_ptr))
3036 		tipc_printf(buf, ":WW");
3037 	tipc_printf(buf, "\n");
3038 
3039 	tipc_printbuf_validate(buf);
3040 	info("%s", print_area);
3041 }
3042 
3043