xref: /openbmc/linux/net/tipc/link.c (revision 5e29a910)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "link.h"
39 #include "bcast.h"
40 #include "socket.h"
41 #include "name_distr.h"
42 #include "discover.h"
43 #include "netlink.h"
44 
45 #include <linux/pkt_sched.h>
46 
47 /*
48  * Error message prefixes
49  */
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
53 
54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
56 	[TIPC_NLA_LINK_NAME] = {
57 		.type = NLA_STRING,
58 		.len = TIPC_MAX_LINK_NAME
59 	},
60 	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
61 	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
62 	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
63 	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
64 	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
65 	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
66 	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
67 	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
68 };
69 
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
73 	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
74 	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
75 	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
76 };
77 
78 /*
79  * Out-of-range value for link session numbers
80  */
81 #define INVALID_SESSION 0x10000
82 
83 /*
84  * Link state events:
85  */
86 #define  STARTING_EVT    856384768	/* link processing trigger */
87 #define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
88 #define  TIMEOUT_EVT     560817u	/* link timer expired */
89 
90 /*
91  * The following two 'message types' is really just implementation
92  * data conveniently stored in the message header.
93  * They must not be considered part of the protocol
94  */
95 #define OPEN_MSG   0
96 #define CLOSED_MSG 1
97 
98 /*
99  * State value stored in 'exp_msg_count'
100  */
101 #define START_CHANGEOVER 100000u
102 
103 static void link_handle_out_of_seq_msg(struct tipc_link *link,
104 				       struct sk_buff *skb);
105 static void tipc_link_proto_rcv(struct tipc_link *link,
106 				struct sk_buff *skb);
107 static int  tipc_link_tunnel_rcv(struct tipc_node *node,
108 				 struct sk_buff **skb);
109 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
110 static void link_state_event(struct tipc_link *l_ptr, u32 event);
111 static void link_reset_statistics(struct tipc_link *l_ptr);
112 static void link_print(struct tipc_link *l_ptr, const char *str);
113 static void tipc_link_sync_xmit(struct tipc_link *l);
114 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
116 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
117 
118 /*
119  *  Simple link routines
120  */
121 static unsigned int align(unsigned int i)
122 {
123 	return (i + 3) & ~3u;
124 }
125 
126 static void tipc_link_release(struct kref *kref)
127 {
128 	kfree(container_of(kref, struct tipc_link, ref));
129 }
130 
131 static void tipc_link_get(struct tipc_link *l_ptr)
132 {
133 	kref_get(&l_ptr->ref);
134 }
135 
136 static void tipc_link_put(struct tipc_link *l_ptr)
137 {
138 	kref_put(&l_ptr->ref, tipc_link_release);
139 }
140 
141 static void link_init_max_pkt(struct tipc_link *l_ptr)
142 {
143 	struct tipc_node *node = l_ptr->owner;
144 	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
145 	struct tipc_bearer *b_ptr;
146 	u32 max_pkt;
147 
148 	rcu_read_lock();
149 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
150 	if (!b_ptr) {
151 		rcu_read_unlock();
152 		return;
153 	}
154 	max_pkt = (b_ptr->mtu & ~3);
155 	rcu_read_unlock();
156 
157 	if (max_pkt > MAX_MSG_SIZE)
158 		max_pkt = MAX_MSG_SIZE;
159 
160 	l_ptr->max_pkt_target = max_pkt;
161 	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
162 		l_ptr->max_pkt = l_ptr->max_pkt_target;
163 	else
164 		l_ptr->max_pkt = MAX_PKT_DEFAULT;
165 
166 	l_ptr->max_pkt_probes = 0;
167 }
168 
169 /*
170  *  Simple non-static link routines (i.e. referenced outside this file)
171  */
172 int tipc_link_is_up(struct tipc_link *l_ptr)
173 {
174 	if (!l_ptr)
175 		return 0;
176 	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
177 }
178 
179 int tipc_link_is_active(struct tipc_link *l_ptr)
180 {
181 	return	(l_ptr->owner->active_links[0] == l_ptr) ||
182 		(l_ptr->owner->active_links[1] == l_ptr);
183 }
184 
185 /**
186  * link_timeout - handle expiration of link timer
187  * @l_ptr: pointer to link
188  */
189 static void link_timeout(unsigned long data)
190 {
191 	struct tipc_link *l_ptr = (struct tipc_link *)data;
192 	struct sk_buff *skb;
193 
194 	tipc_node_lock(l_ptr->owner);
195 
196 	/* update counters used in statistical profiling of send traffic */
197 	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
198 	l_ptr->stats.queue_sz_counts++;
199 
200 	skb = skb_peek(&l_ptr->outqueue);
201 	if (skb) {
202 		struct tipc_msg *msg = buf_msg(skb);
203 		u32 length = msg_size(msg);
204 
205 		if ((msg_user(msg) == MSG_FRAGMENTER) &&
206 		    (msg_type(msg) == FIRST_FRAGMENT)) {
207 			length = msg_size(msg_get_wrapped(msg));
208 		}
209 		if (length) {
210 			l_ptr->stats.msg_lengths_total += length;
211 			l_ptr->stats.msg_length_counts++;
212 			if (length <= 64)
213 				l_ptr->stats.msg_length_profile[0]++;
214 			else if (length <= 256)
215 				l_ptr->stats.msg_length_profile[1]++;
216 			else if (length <= 1024)
217 				l_ptr->stats.msg_length_profile[2]++;
218 			else if (length <= 4096)
219 				l_ptr->stats.msg_length_profile[3]++;
220 			else if (length <= 16384)
221 				l_ptr->stats.msg_length_profile[4]++;
222 			else if (length <= 32768)
223 				l_ptr->stats.msg_length_profile[5]++;
224 			else
225 				l_ptr->stats.msg_length_profile[6]++;
226 		}
227 	}
228 
229 	/* do all other link processing performed on a periodic basis */
230 	link_state_event(l_ptr, TIMEOUT_EVT);
231 
232 	if (l_ptr->next_out)
233 		tipc_link_push_packets(l_ptr);
234 
235 	tipc_node_unlock(l_ptr->owner);
236 	tipc_link_put(l_ptr);
237 }
238 
239 static void link_set_timer(struct tipc_link *link, unsigned long time)
240 {
241 	if (!mod_timer(&link->timer, jiffies + time))
242 		tipc_link_get(link);
243 }
244 
245 /**
246  * tipc_link_create - create a new link
247  * @n_ptr: pointer to associated node
248  * @b_ptr: pointer to associated bearer
249  * @media_addr: media address to use when sending messages over link
250  *
251  * Returns pointer to link.
252  */
253 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
254 				   struct tipc_bearer *b_ptr,
255 				   const struct tipc_media_addr *media_addr)
256 {
257 	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
258 	struct tipc_link *l_ptr;
259 	struct tipc_msg *msg;
260 	char *if_name;
261 	char addr_string[16];
262 	u32 peer = n_ptr->addr;
263 
264 	if (n_ptr->link_cnt >= MAX_BEARERS) {
265 		tipc_addr_string_fill(addr_string, n_ptr->addr);
266 		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
267 			n_ptr->link_cnt, addr_string, MAX_BEARERS);
268 		return NULL;
269 	}
270 
271 	if (n_ptr->links[b_ptr->identity]) {
272 		tipc_addr_string_fill(addr_string, n_ptr->addr);
273 		pr_err("Attempt to establish second link on <%s> to %s\n",
274 		       b_ptr->name, addr_string);
275 		return NULL;
276 	}
277 
278 	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
279 	if (!l_ptr) {
280 		pr_warn("Link creation failed, no memory\n");
281 		return NULL;
282 	}
283 	kref_init(&l_ptr->ref);
284 	l_ptr->addr = peer;
285 	if_name = strchr(b_ptr->name, ':') + 1;
286 	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
287 		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
288 		tipc_node(tn->own_addr),
289 		if_name,
290 		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
291 		/* note: peer i/f name is updated by reset/activate message */
292 	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
293 	l_ptr->owner = n_ptr;
294 	l_ptr->checkpoint = 1;
295 	l_ptr->peer_session = INVALID_SESSION;
296 	l_ptr->bearer_id = b_ptr->identity;
297 	link_set_supervision_props(l_ptr, b_ptr->tolerance);
298 	l_ptr->state = RESET_UNKNOWN;
299 
300 	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
301 	msg = l_ptr->pmsg;
302 	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
303 		      l_ptr->addr);
304 	msg_set_size(msg, sizeof(l_ptr->proto_msg));
305 	msg_set_session(msg, (tn->random & 0xffff));
306 	msg_set_bearer_id(msg, b_ptr->identity);
307 	strcpy((char *)msg_data(msg), if_name);
308 
309 	l_ptr->priority = b_ptr->priority;
310 	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
311 
312 	l_ptr->net_plane = b_ptr->net_plane;
313 	link_init_max_pkt(l_ptr);
314 
315 	l_ptr->next_out_no = 1;
316 	__skb_queue_head_init(&l_ptr->outqueue);
317 	__skb_queue_head_init(&l_ptr->deferred_queue);
318 	skb_queue_head_init(&l_ptr->wakeupq);
319 	skb_queue_head_init(&l_ptr->inputq);
320 	skb_queue_head_init(&l_ptr->namedq);
321 	link_reset_statistics(l_ptr);
322 	tipc_node_attach_link(n_ptr, l_ptr);
323 	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
324 	link_state_event(l_ptr, STARTING_EVT);
325 
326 	return l_ptr;
327 }
328 
329 /**
330  * link_delete - Conditional deletion of link.
331  *               If timer still running, real delete is done when it expires
332  * @link: link to be deleted
333  */
334 void tipc_link_delete(struct tipc_link *link)
335 {
336 	tipc_link_reset_fragments(link);
337 	tipc_node_detach_link(link->owner, link);
338 	tipc_link_put(link);
339 }
340 
341 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
342 			   bool shutting_down)
343 {
344 	struct tipc_net *tn = net_generic(net, tipc_net_id);
345 	struct tipc_link *link;
346 	struct tipc_node *node;
347 
348 	rcu_read_lock();
349 	list_for_each_entry_rcu(node, &tn->node_list, list) {
350 		tipc_node_lock(node);
351 		link = node->links[bearer_id];
352 		if (!link) {
353 			tipc_node_unlock(node);
354 			continue;
355 		}
356 		tipc_link_reset(link);
357 		if (del_timer(&link->timer))
358 			tipc_link_put(link);
359 		link->flags |= LINK_STOPPED;
360 		/* Delete link now, or when failover is finished: */
361 		if (shutting_down || !tipc_node_is_up(node))
362 			tipc_link_delete(link);
363 		tipc_node_unlock(node);
364 	}
365 	rcu_read_unlock();
366 }
367 
368 /**
369  * link_schedule_user - schedule user for wakeup after congestion
370  * @link: congested link
371  * @oport: sending port
372  * @chain_sz: size of buffer chain that was attempted sent
373  * @imp: importance of message attempted sent
374  * Create pseudo msg to send back to user when congestion abates
375  */
376 static bool link_schedule_user(struct tipc_link *link, u32 oport,
377 			       uint chain_sz, uint imp)
378 {
379 	struct sk_buff *buf;
380 
381 	buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
382 			      link_own_addr(link), link_own_addr(link),
383 			      oport, 0, 0);
384 	if (!buf)
385 		return false;
386 	TIPC_SKB_CB(buf)->chain_sz = chain_sz;
387 	TIPC_SKB_CB(buf)->chain_imp = imp;
388 	skb_queue_tail(&link->wakeupq, buf);
389 	link->stats.link_congs++;
390 	return true;
391 }
392 
393 /**
394  * link_prepare_wakeup - prepare users for wakeup after congestion
395  * @link: congested link
396  * Move a number of waiting users, as permitted by available space in
397  * the send queue, from link wait queue to node wait queue for wakeup
398  */
399 void link_prepare_wakeup(struct tipc_link *link)
400 {
401 	uint pend_qsz = skb_queue_len(&link->outqueue);
402 	struct sk_buff *skb, *tmp;
403 
404 	skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
405 		if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
406 			break;
407 		pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
408 		skb_unlink(skb, &link->wakeupq);
409 		skb_queue_tail(&link->inputq, skb);
410 		link->owner->inputq = &link->inputq;
411 		link->owner->action_flags |= TIPC_MSG_EVT;
412 	}
413 }
414 
415 /**
416  * tipc_link_reset_fragments - purge link's inbound message fragments queue
417  * @l_ptr: pointer to link
418  */
419 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
420 {
421 	kfree_skb(l_ptr->reasm_buf);
422 	l_ptr->reasm_buf = NULL;
423 }
424 
425 /**
426  * tipc_link_purge_queues - purge all pkt queues associated with link
427  * @l_ptr: pointer to link
428  */
429 void tipc_link_purge_queues(struct tipc_link *l_ptr)
430 {
431 	__skb_queue_purge(&l_ptr->deferred_queue);
432 	__skb_queue_purge(&l_ptr->outqueue);
433 	tipc_link_reset_fragments(l_ptr);
434 }
435 
436 void tipc_link_reset(struct tipc_link *l_ptr)
437 {
438 	u32 prev_state = l_ptr->state;
439 	u32 checkpoint = l_ptr->next_in_no;
440 	int was_active_link = tipc_link_is_active(l_ptr);
441 	struct tipc_node *owner = l_ptr->owner;
442 
443 	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
444 
445 	/* Link is down, accept any session */
446 	l_ptr->peer_session = INVALID_SESSION;
447 
448 	/* Prepare for max packet size negotiation */
449 	link_init_max_pkt(l_ptr);
450 
451 	l_ptr->state = RESET_UNKNOWN;
452 
453 	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
454 		return;
455 
456 	tipc_node_link_down(l_ptr->owner, l_ptr);
457 	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
458 
459 	if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
460 		l_ptr->reset_checkpoint = checkpoint;
461 		l_ptr->exp_msg_count = START_CHANGEOVER;
462 	}
463 
464 	/* Clean up all queues, except inputq: */
465 	__skb_queue_purge(&l_ptr->outqueue);
466 	__skb_queue_purge(&l_ptr->deferred_queue);
467 	skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
468 	if (!skb_queue_empty(&l_ptr->inputq))
469 		owner->action_flags |= TIPC_MSG_EVT;
470 	owner->inputq = &l_ptr->inputq;
471 	l_ptr->next_out = NULL;
472 	l_ptr->unacked_window = 0;
473 	l_ptr->checkpoint = 1;
474 	l_ptr->next_out_no = 1;
475 	l_ptr->fsm_msg_cnt = 0;
476 	l_ptr->stale_count = 0;
477 	link_reset_statistics(l_ptr);
478 }
479 
480 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
481 {
482 	struct tipc_net *tn = net_generic(net, tipc_net_id);
483 	struct tipc_link *l_ptr;
484 	struct tipc_node *n_ptr;
485 
486 	rcu_read_lock();
487 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
488 		tipc_node_lock(n_ptr);
489 		l_ptr = n_ptr->links[bearer_id];
490 		if (l_ptr)
491 			tipc_link_reset(l_ptr);
492 		tipc_node_unlock(n_ptr);
493 	}
494 	rcu_read_unlock();
495 }
496 
497 static void link_activate(struct tipc_link *link)
498 {
499 	struct tipc_node *node = link->owner;
500 
501 	link->next_in_no = 1;
502 	link->stats.recv_info = 1;
503 	tipc_node_link_up(node, link);
504 	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
505 }
506 
507 /**
508  * link_state_event - link finite state machine
509  * @l_ptr: pointer to link
510  * @event: state machine event to process
511  */
512 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
513 {
514 	struct tipc_link *other;
515 	unsigned long cont_intv = l_ptr->cont_intv;
516 
517 	if (l_ptr->flags & LINK_STOPPED)
518 		return;
519 
520 	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
521 		return;		/* Not yet. */
522 
523 	/* Check whether changeover is going on */
524 	if (l_ptr->exp_msg_count) {
525 		if (event == TIMEOUT_EVT)
526 			link_set_timer(l_ptr, cont_intv);
527 		return;
528 	}
529 
530 	switch (l_ptr->state) {
531 	case WORKING_WORKING:
532 		switch (event) {
533 		case TRAFFIC_MSG_EVT:
534 		case ACTIVATE_MSG:
535 			break;
536 		case TIMEOUT_EVT:
537 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
538 				l_ptr->checkpoint = l_ptr->next_in_no;
539 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
540 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
541 							     0, 0, 0, 0, 0);
542 					l_ptr->fsm_msg_cnt++;
543 				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
544 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
545 							     1, 0, 0, 0, 0);
546 					l_ptr->fsm_msg_cnt++;
547 				}
548 				link_set_timer(l_ptr, cont_intv);
549 				break;
550 			}
551 			l_ptr->state = WORKING_UNKNOWN;
552 			l_ptr->fsm_msg_cnt = 0;
553 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
554 			l_ptr->fsm_msg_cnt++;
555 			link_set_timer(l_ptr, cont_intv / 4);
556 			break;
557 		case RESET_MSG:
558 			pr_debug("%s<%s>, requested by peer\n",
559 				 link_rst_msg, l_ptr->name);
560 			tipc_link_reset(l_ptr);
561 			l_ptr->state = RESET_RESET;
562 			l_ptr->fsm_msg_cnt = 0;
563 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
564 					     0, 0, 0, 0, 0);
565 			l_ptr->fsm_msg_cnt++;
566 			link_set_timer(l_ptr, cont_intv);
567 			break;
568 		default:
569 			pr_debug("%s%u in WW state\n", link_unk_evt, event);
570 		}
571 		break;
572 	case WORKING_UNKNOWN:
573 		switch (event) {
574 		case TRAFFIC_MSG_EVT:
575 		case ACTIVATE_MSG:
576 			l_ptr->state = WORKING_WORKING;
577 			l_ptr->fsm_msg_cnt = 0;
578 			link_set_timer(l_ptr, cont_intv);
579 			break;
580 		case RESET_MSG:
581 			pr_debug("%s<%s>, requested by peer while probing\n",
582 				 link_rst_msg, l_ptr->name);
583 			tipc_link_reset(l_ptr);
584 			l_ptr->state = RESET_RESET;
585 			l_ptr->fsm_msg_cnt = 0;
586 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
587 					     0, 0, 0, 0, 0);
588 			l_ptr->fsm_msg_cnt++;
589 			link_set_timer(l_ptr, cont_intv);
590 			break;
591 		case TIMEOUT_EVT:
592 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
593 				l_ptr->state = WORKING_WORKING;
594 				l_ptr->fsm_msg_cnt = 0;
595 				l_ptr->checkpoint = l_ptr->next_in_no;
596 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
597 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
598 							     0, 0, 0, 0, 0);
599 					l_ptr->fsm_msg_cnt++;
600 				}
601 				link_set_timer(l_ptr, cont_intv);
602 			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
603 				tipc_link_proto_xmit(l_ptr, STATE_MSG,
604 						     1, 0, 0, 0, 0);
605 				l_ptr->fsm_msg_cnt++;
606 				link_set_timer(l_ptr, cont_intv / 4);
607 			} else {	/* Link has failed */
608 				pr_debug("%s<%s>, peer not responding\n",
609 					 link_rst_msg, l_ptr->name);
610 				tipc_link_reset(l_ptr);
611 				l_ptr->state = RESET_UNKNOWN;
612 				l_ptr->fsm_msg_cnt = 0;
613 				tipc_link_proto_xmit(l_ptr, RESET_MSG,
614 						     0, 0, 0, 0, 0);
615 				l_ptr->fsm_msg_cnt++;
616 				link_set_timer(l_ptr, cont_intv);
617 			}
618 			break;
619 		default:
620 			pr_err("%s%u in WU state\n", link_unk_evt, event);
621 		}
622 		break;
623 	case RESET_UNKNOWN:
624 		switch (event) {
625 		case TRAFFIC_MSG_EVT:
626 			break;
627 		case ACTIVATE_MSG:
628 			other = l_ptr->owner->active_links[0];
629 			if (other && link_working_unknown(other))
630 				break;
631 			l_ptr->state = WORKING_WORKING;
632 			l_ptr->fsm_msg_cnt = 0;
633 			link_activate(l_ptr);
634 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
635 			l_ptr->fsm_msg_cnt++;
636 			if (l_ptr->owner->working_links == 1)
637 				tipc_link_sync_xmit(l_ptr);
638 			link_set_timer(l_ptr, cont_intv);
639 			break;
640 		case RESET_MSG:
641 			l_ptr->state = RESET_RESET;
642 			l_ptr->fsm_msg_cnt = 0;
643 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
644 					     1, 0, 0, 0, 0);
645 			l_ptr->fsm_msg_cnt++;
646 			link_set_timer(l_ptr, cont_intv);
647 			break;
648 		case STARTING_EVT:
649 			l_ptr->flags |= LINK_STARTED;
650 			l_ptr->fsm_msg_cnt++;
651 			link_set_timer(l_ptr, cont_intv);
652 			break;
653 		case TIMEOUT_EVT:
654 			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
655 			l_ptr->fsm_msg_cnt++;
656 			link_set_timer(l_ptr, cont_intv);
657 			break;
658 		default:
659 			pr_err("%s%u in RU state\n", link_unk_evt, event);
660 		}
661 		break;
662 	case RESET_RESET:
663 		switch (event) {
664 		case TRAFFIC_MSG_EVT:
665 		case ACTIVATE_MSG:
666 			other = l_ptr->owner->active_links[0];
667 			if (other && link_working_unknown(other))
668 				break;
669 			l_ptr->state = WORKING_WORKING;
670 			l_ptr->fsm_msg_cnt = 0;
671 			link_activate(l_ptr);
672 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
673 			l_ptr->fsm_msg_cnt++;
674 			if (l_ptr->owner->working_links == 1)
675 				tipc_link_sync_xmit(l_ptr);
676 			link_set_timer(l_ptr, cont_intv);
677 			break;
678 		case RESET_MSG:
679 			break;
680 		case TIMEOUT_EVT:
681 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
682 					     0, 0, 0, 0, 0);
683 			l_ptr->fsm_msg_cnt++;
684 			link_set_timer(l_ptr, cont_intv);
685 			break;
686 		default:
687 			pr_err("%s%u in RR state\n", link_unk_evt, event);
688 		}
689 		break;
690 	default:
691 		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
692 	}
693 }
694 
695 /* tipc_link_cong: determine return value and how to treat the
696  * sent buffer during link congestion.
697  * - For plain, errorless user data messages we keep the buffer and
698  *   return -ELINKONG.
699  * - For all other messages we discard the buffer and return -EHOSTUNREACH
700  * - For TIPC internal messages we also reset the link
701  */
702 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
703 {
704 	struct sk_buff *skb = skb_peek(list);
705 	struct tipc_msg *msg = buf_msg(skb);
706 	uint imp = tipc_msg_tot_importance(msg);
707 	u32 oport = msg_tot_origport(msg);
708 
709 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
710 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
711 		tipc_link_reset(link);
712 		goto drop;
713 	}
714 	if (unlikely(msg_errcode(msg)))
715 		goto drop;
716 	if (unlikely(msg_reroute_cnt(msg)))
717 		goto drop;
718 	if (TIPC_SKB_CB(skb)->wakeup_pending)
719 		return -ELINKCONG;
720 	if (link_schedule_user(link, oport, skb_queue_len(list), imp))
721 		return -ELINKCONG;
722 drop:
723 	__skb_queue_purge(list);
724 	return -EHOSTUNREACH;
725 }
726 
727 /**
728  * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
729  * @link: link to use
730  * @list: chain of buffers containing message
731  *
732  * Consumes the buffer chain, except when returning -ELINKCONG
733  * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
734  * user data messages) or -EHOSTUNREACH (all other messages/senders)
735  * Only the socket functions tipc_send_stream() and tipc_send_packet() need
736  * to act on the return value, since they may need to do more send attempts.
737  */
738 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
739 		     struct sk_buff_head *list)
740 {
741 	struct tipc_msg *msg = buf_msg(skb_peek(list));
742 	uint psz = msg_size(msg);
743 	uint sndlim = link->queue_limit[0];
744 	uint imp = tipc_msg_tot_importance(msg);
745 	uint mtu = link->max_pkt;
746 	uint ack = mod(link->next_in_no - 1);
747 	uint seqno = link->next_out_no;
748 	uint bc_last_in = link->owner->bclink.last_in;
749 	struct tipc_media_addr *addr = &link->media_addr;
750 	struct sk_buff_head *outqueue = &link->outqueue;
751 	struct sk_buff *skb, *tmp;
752 
753 	/* Match queue limits against msg importance: */
754 	if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
755 		return tipc_link_cong(link, list);
756 
757 	/* Has valid packet limit been used ? */
758 	if (unlikely(psz > mtu)) {
759 		__skb_queue_purge(list);
760 		return -EMSGSIZE;
761 	}
762 
763 	/* Prepare each packet for sending, and add to outqueue: */
764 	skb_queue_walk_safe(list, skb, tmp) {
765 		__skb_unlink(skb, list);
766 		msg = buf_msg(skb);
767 		msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
768 		msg_set_bcast_ack(msg, bc_last_in);
769 
770 		if (skb_queue_len(outqueue) < sndlim) {
771 			__skb_queue_tail(outqueue, skb);
772 			tipc_bearer_send(net, link->bearer_id,
773 					 skb, addr);
774 			link->next_out = NULL;
775 			link->unacked_window = 0;
776 		} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
777 			link->stats.sent_bundled++;
778 			continue;
779 		} else if (tipc_msg_make_bundle(outqueue, skb, mtu,
780 						link->addr)) {
781 			link->stats.sent_bundled++;
782 			link->stats.sent_bundles++;
783 			if (!link->next_out)
784 				link->next_out = skb_peek_tail(outqueue);
785 		} else {
786 			__skb_queue_tail(outqueue, skb);
787 			if (!link->next_out)
788 				link->next_out = skb;
789 		}
790 		seqno++;
791 	}
792 	link->next_out_no = seqno;
793 	return 0;
794 }
795 
796 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
797 {
798 	skb_queue_head_init(list);
799 	__skb_queue_tail(list, skb);
800 }
801 
802 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
803 {
804 	struct sk_buff_head head;
805 
806 	skb2list(skb, &head);
807 	return __tipc_link_xmit(link->owner->net, link, &head);
808 }
809 
810 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
811 		       u32 selector)
812 {
813 	struct sk_buff_head head;
814 
815 	skb2list(skb, &head);
816 	return tipc_link_xmit(net, &head, dnode, selector);
817 }
818 
819 /**
820  * tipc_link_xmit() is the general link level function for message sending
821  * @net: the applicable net namespace
822  * @list: chain of buffers containing message
823  * @dsz: amount of user data to be sent
824  * @dnode: address of destination node
825  * @selector: a number used for deterministic link selection
826  * Consumes the buffer chain, except when returning -ELINKCONG
827  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
828  */
829 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
830 		   u32 selector)
831 {
832 	struct tipc_link *link = NULL;
833 	struct tipc_node *node;
834 	int rc = -EHOSTUNREACH;
835 
836 	node = tipc_node_find(net, dnode);
837 	if (node) {
838 		tipc_node_lock(node);
839 		link = node->active_links[selector & 1];
840 		if (link)
841 			rc = __tipc_link_xmit(net, link, list);
842 		tipc_node_unlock(node);
843 	}
844 	if (link)
845 		return rc;
846 
847 	if (likely(in_own_node(net, dnode)))
848 		return tipc_sk_rcv(net, list);
849 
850 	__skb_queue_purge(list);
851 	return rc;
852 }
853 
854 /*
855  * tipc_link_sync_xmit - synchronize broadcast link endpoints.
856  *
857  * Give a newly added peer node the sequence number where it should
858  * start receiving and acking broadcast packets.
859  *
860  * Called with node locked
861  */
862 static void tipc_link_sync_xmit(struct tipc_link *link)
863 {
864 	struct sk_buff *skb;
865 	struct tipc_msg *msg;
866 
867 	skb = tipc_buf_acquire(INT_H_SIZE);
868 	if (!skb)
869 		return;
870 
871 	msg = buf_msg(skb);
872 	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
873 		      INT_H_SIZE, link->addr);
874 	msg_set_last_bcast(msg, link->owner->bclink.acked);
875 	__tipc_link_xmit_skb(link, skb);
876 }
877 
878 /*
879  * tipc_link_sync_rcv - synchronize broadcast link endpoints.
880  * Receive the sequence number where we should start receiving and
881  * acking broadcast packets from a newly added peer node, and open
882  * up for reception of such packets.
883  *
884  * Called with node locked
885  */
886 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
887 {
888 	struct tipc_msg *msg = buf_msg(buf);
889 
890 	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
891 	n->bclink.recv_permitted = true;
892 	kfree_skb(buf);
893 }
894 
895 struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
896 				    const struct sk_buff *skb)
897 {
898 	if (skb_queue_is_last(list, skb))
899 		return NULL;
900 	return skb->next;
901 }
902 
903 /*
904  * tipc_link_push_packets - push unsent packets to bearer
905  *
906  * Push out the unsent messages of a link where congestion
907  * has abated. Node is locked.
908  *
909  * Called with node locked
910  */
911 void tipc_link_push_packets(struct tipc_link *l_ptr)
912 {
913 	struct sk_buff_head *outqueue = &l_ptr->outqueue;
914 	struct sk_buff *skb = l_ptr->next_out;
915 	struct tipc_msg *msg;
916 	u32 next, first;
917 
918 	skb_queue_walk_from(outqueue, skb) {
919 		msg = buf_msg(skb);
920 		next = msg_seqno(msg);
921 		first = buf_seqno(skb_peek(outqueue));
922 
923 		if (mod(next - first) < l_ptr->queue_limit[0]) {
924 			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
925 			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
926 			if (msg_user(msg) == MSG_BUNDLER)
927 				TIPC_SKB_CB(skb)->bundling = false;
928 			tipc_bearer_send(l_ptr->owner->net,
929 					 l_ptr->bearer_id, skb,
930 					 &l_ptr->media_addr);
931 			l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
932 		} else {
933 			break;
934 		}
935 	}
936 }
937 
938 void tipc_link_reset_all(struct tipc_node *node)
939 {
940 	char addr_string[16];
941 	u32 i;
942 
943 	tipc_node_lock(node);
944 
945 	pr_warn("Resetting all links to %s\n",
946 		tipc_addr_string_fill(addr_string, node->addr));
947 
948 	for (i = 0; i < MAX_BEARERS; i++) {
949 		if (node->links[i]) {
950 			link_print(node->links[i], "Resetting link\n");
951 			tipc_link_reset(node->links[i]);
952 		}
953 	}
954 
955 	tipc_node_unlock(node);
956 }
957 
958 static void link_retransmit_failure(struct tipc_link *l_ptr,
959 				    struct sk_buff *buf)
960 {
961 	struct tipc_msg *msg = buf_msg(buf);
962 	struct net *net = l_ptr->owner->net;
963 
964 	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
965 
966 	if (l_ptr->addr) {
967 		/* Handle failure on standard link */
968 		link_print(l_ptr, "Resetting link\n");
969 		tipc_link_reset(l_ptr);
970 
971 	} else {
972 		/* Handle failure on broadcast link */
973 		struct tipc_node *n_ptr;
974 		char addr_string[16];
975 
976 		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
977 		pr_cont("Outstanding acks: %lu\n",
978 			(unsigned long) TIPC_SKB_CB(buf)->handle);
979 
980 		n_ptr = tipc_bclink_retransmit_to(net);
981 		tipc_node_lock(n_ptr);
982 
983 		tipc_addr_string_fill(addr_string, n_ptr->addr);
984 		pr_info("Broadcast link info for %s\n", addr_string);
985 		pr_info("Reception permitted: %d,  Acked: %u\n",
986 			n_ptr->bclink.recv_permitted,
987 			n_ptr->bclink.acked);
988 		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
989 			n_ptr->bclink.last_in,
990 			n_ptr->bclink.oos_state,
991 			n_ptr->bclink.last_sent);
992 
993 		tipc_node_unlock(n_ptr);
994 
995 		tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
996 		l_ptr->stale_count = 0;
997 	}
998 }
999 
1000 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1001 			  u32 retransmits)
1002 {
1003 	struct tipc_msg *msg;
1004 
1005 	if (!skb)
1006 		return;
1007 
1008 	msg = buf_msg(skb);
1009 
1010 	/* Detect repeated retransmit failures */
1011 	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1012 		if (++l_ptr->stale_count > 100) {
1013 			link_retransmit_failure(l_ptr, skb);
1014 			return;
1015 		}
1016 	} else {
1017 		l_ptr->last_retransmitted = msg_seqno(msg);
1018 		l_ptr->stale_count = 1;
1019 	}
1020 
1021 	skb_queue_walk_from(&l_ptr->outqueue, skb) {
1022 		if (!retransmits || skb == l_ptr->next_out)
1023 			break;
1024 		msg = buf_msg(skb);
1025 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1026 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1027 		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1028 				 &l_ptr->media_addr);
1029 		retransmits--;
1030 		l_ptr->stats.retransmitted++;
1031 	}
1032 }
1033 
1034 static void link_retrieve_defq(struct tipc_link *link,
1035 			       struct sk_buff_head *list)
1036 {
1037 	u32 seq_no;
1038 
1039 	if (skb_queue_empty(&link->deferred_queue))
1040 		return;
1041 
1042 	seq_no = buf_seqno(skb_peek(&link->deferred_queue));
1043 	if (seq_no == mod(link->next_in_no))
1044 		skb_queue_splice_tail_init(&link->deferred_queue, list);
1045 }
1046 
1047 /**
1048  * link_recv_buf_validate - validate basic format of received message
1049  *
1050  * This routine ensures a TIPC message has an acceptable header, and at least
1051  * as much data as the header indicates it should.  The routine also ensures
1052  * that the entire message header is stored in the main fragment of the message
1053  * buffer, to simplify future access to message header fields.
1054  *
1055  * Note: Having extra info present in the message header or data areas is OK.
1056  * TIPC will ignore the excess, under the assumption that it is optional info
1057  * introduced by a later release of the protocol.
1058  */
1059 static int link_recv_buf_validate(struct sk_buff *buf)
1060 {
1061 	static u32 min_data_hdr_size[8] = {
1062 		SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1063 		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1064 		};
1065 
1066 	struct tipc_msg *msg;
1067 	u32 tipc_hdr[2];
1068 	u32 size;
1069 	u32 hdr_size;
1070 	u32 min_hdr_size;
1071 
1072 	/* If this packet comes from the defer queue, the skb has already
1073 	 * been validated
1074 	 */
1075 	if (unlikely(TIPC_SKB_CB(buf)->deferred))
1076 		return 1;
1077 
1078 	if (unlikely(buf->len < MIN_H_SIZE))
1079 		return 0;
1080 
1081 	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1082 	if (msg == NULL)
1083 		return 0;
1084 
1085 	if (unlikely(msg_version(msg) != TIPC_VERSION))
1086 		return 0;
1087 
1088 	size = msg_size(msg);
1089 	hdr_size = msg_hdr_sz(msg);
1090 	min_hdr_size = msg_isdata(msg) ?
1091 		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1092 
1093 	if (unlikely((hdr_size < min_hdr_size) ||
1094 		     (size < hdr_size) ||
1095 		     (buf->len < size) ||
1096 		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1097 		return 0;
1098 
1099 	return pskb_may_pull(buf, hdr_size);
1100 }
1101 
1102 /**
1103  * tipc_rcv - process TIPC packets/messages arriving from off-node
1104  * @net: the applicable net namespace
1105  * @skb: TIPC packet
1106  * @b_ptr: pointer to bearer message arrived on
1107  *
1108  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1109  * structure (i.e. cannot be NULL), but bearer can be inactive.
1110  */
1111 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1112 {
1113 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1114 	struct sk_buff_head head;
1115 	struct tipc_node *n_ptr;
1116 	struct tipc_link *l_ptr;
1117 	struct sk_buff *skb1, *tmp;
1118 	struct tipc_msg *msg;
1119 	u32 seq_no;
1120 	u32 ackd;
1121 	u32 released;
1122 
1123 	skb2list(skb, &head);
1124 
1125 	while ((skb = __skb_dequeue(&head))) {
1126 		/* Ensure message is well-formed */
1127 		if (unlikely(!link_recv_buf_validate(skb)))
1128 			goto discard;
1129 
1130 		/* Ensure message data is a single contiguous unit */
1131 		if (unlikely(skb_linearize(skb)))
1132 			goto discard;
1133 
1134 		/* Handle arrival of a non-unicast link message */
1135 		msg = buf_msg(skb);
1136 
1137 		if (unlikely(msg_non_seq(msg))) {
1138 			if (msg_user(msg) ==  LINK_CONFIG)
1139 				tipc_disc_rcv(net, skb, b_ptr);
1140 			else
1141 				tipc_bclink_rcv(net, skb);
1142 			continue;
1143 		}
1144 
1145 		/* Discard unicast link messages destined for another node */
1146 		if (unlikely(!msg_short(msg) &&
1147 			     (msg_destnode(msg) != tn->own_addr)))
1148 			goto discard;
1149 
1150 		/* Locate neighboring node that sent message */
1151 		n_ptr = tipc_node_find(net, msg_prevnode(msg));
1152 		if (unlikely(!n_ptr))
1153 			goto discard;
1154 		tipc_node_lock(n_ptr);
1155 
1156 		/* Locate unicast link endpoint that should handle message */
1157 		l_ptr = n_ptr->links[b_ptr->identity];
1158 		if (unlikely(!l_ptr))
1159 			goto unlock;
1160 
1161 		/* Verify that communication with node is currently allowed */
1162 		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1163 		    msg_user(msg) == LINK_PROTOCOL &&
1164 		    (msg_type(msg) == RESET_MSG ||
1165 		    msg_type(msg) == ACTIVATE_MSG) &&
1166 		    !msg_redundant_link(msg))
1167 			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1168 
1169 		if (tipc_node_blocked(n_ptr))
1170 			goto unlock;
1171 
1172 		/* Validate message sequence number info */
1173 		seq_no = msg_seqno(msg);
1174 		ackd = msg_ack(msg);
1175 
1176 		/* Release acked messages */
1177 		if (n_ptr->bclink.recv_permitted)
1178 			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1179 
1180 		released = 0;
1181 		skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
1182 			if (skb1 == l_ptr->next_out ||
1183 			    more(buf_seqno(skb1), ackd))
1184 				break;
1185 			 __skb_unlink(skb1, &l_ptr->outqueue);
1186 			 kfree_skb(skb1);
1187 			 released = 1;
1188 		}
1189 
1190 		/* Try sending any messages link endpoint has pending */
1191 		if (unlikely(l_ptr->next_out))
1192 			tipc_link_push_packets(l_ptr);
1193 
1194 		if (released && !skb_queue_empty(&l_ptr->wakeupq))
1195 			link_prepare_wakeup(l_ptr);
1196 
1197 		/* Process the incoming packet */
1198 		if (unlikely(!link_working_working(l_ptr))) {
1199 			if (msg_user(msg) == LINK_PROTOCOL) {
1200 				tipc_link_proto_rcv(l_ptr, skb);
1201 				link_retrieve_defq(l_ptr, &head);
1202 				skb = NULL;
1203 				goto unlock;
1204 			}
1205 
1206 			/* Traffic message. Conditionally activate link */
1207 			link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1208 
1209 			if (link_working_working(l_ptr)) {
1210 				/* Re-insert buffer in front of queue */
1211 				__skb_queue_head(&head, skb);
1212 				skb = NULL;
1213 				goto unlock;
1214 			}
1215 			goto unlock;
1216 		}
1217 
1218 		/* Link is now in state WORKING_WORKING */
1219 		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1220 			link_handle_out_of_seq_msg(l_ptr, skb);
1221 			link_retrieve_defq(l_ptr, &head);
1222 			skb = NULL;
1223 			goto unlock;
1224 		}
1225 		l_ptr->next_in_no++;
1226 		if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1227 			link_retrieve_defq(l_ptr, &head);
1228 
1229 		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1230 			l_ptr->stats.sent_acks++;
1231 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1232 		}
1233 		tipc_link_input(l_ptr, skb);
1234 		skb = NULL;
1235 unlock:
1236 		tipc_node_unlock(n_ptr);
1237 discard:
1238 		if (unlikely(skb))
1239 			kfree_skb(skb);
1240 	}
1241 }
1242 
1243 /* tipc_data_input - deliver data and name distr msgs to upper layer
1244  *
1245  * Consumes buffer if message is of right type
1246  * Node lock must be held
1247  */
1248 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1249 {
1250 	struct tipc_node *node = link->owner;
1251 	struct tipc_msg *msg = buf_msg(skb);
1252 	u32 dport = msg_destport(msg);
1253 
1254 	switch (msg_user(msg)) {
1255 	case TIPC_LOW_IMPORTANCE:
1256 	case TIPC_MEDIUM_IMPORTANCE:
1257 	case TIPC_HIGH_IMPORTANCE:
1258 	case TIPC_CRITICAL_IMPORTANCE:
1259 	case CONN_MANAGER:
1260 		if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1261 			node->inputq = &link->inputq;
1262 			node->action_flags |= TIPC_MSG_EVT;
1263 		}
1264 		return true;
1265 	case NAME_DISTRIBUTOR:
1266 		node->bclink.recv_permitted = true;
1267 		node->namedq = &link->namedq;
1268 		skb_queue_tail(&link->namedq, skb);
1269 		if (skb_queue_len(&link->namedq) == 1)
1270 			node->action_flags |= TIPC_NAMED_MSG_EVT;
1271 		return true;
1272 	case MSG_BUNDLER:
1273 	case CHANGEOVER_PROTOCOL:
1274 	case MSG_FRAGMENTER:
1275 	case BCAST_PROTOCOL:
1276 		return false;
1277 	default:
1278 		pr_warn("Dropping received illegal msg type\n");
1279 		kfree_skb(skb);
1280 		return false;
1281 	};
1282 }
1283 
1284 /* tipc_link_input - process packet that has passed link protocol check
1285  *
1286  * Consumes buffer
1287  * Node lock must be held
1288  */
1289 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1290 {
1291 	struct tipc_node *node = link->owner;
1292 	struct tipc_msg *msg = buf_msg(skb);
1293 	struct sk_buff *iskb;
1294 	int pos = 0;
1295 
1296 	if (likely(tipc_data_input(link, skb)))
1297 		return;
1298 
1299 	switch (msg_user(msg)) {
1300 	case CHANGEOVER_PROTOCOL:
1301 		if (!tipc_link_tunnel_rcv(node, &skb))
1302 			break;
1303 		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1304 			tipc_data_input(link, skb);
1305 			break;
1306 		}
1307 	case MSG_BUNDLER:
1308 		link->stats.recv_bundles++;
1309 		link->stats.recv_bundled += msg_msgcnt(msg);
1310 
1311 		while (tipc_msg_extract(skb, &iskb, &pos))
1312 			tipc_data_input(link, iskb);
1313 		break;
1314 	case MSG_FRAGMENTER:
1315 		link->stats.recv_fragments++;
1316 		if (tipc_buf_append(&link->reasm_buf, &skb)) {
1317 			link->stats.recv_fragmented++;
1318 			tipc_data_input(link, skb);
1319 		} else if (!link->reasm_buf) {
1320 			tipc_link_reset(link);
1321 		}
1322 		break;
1323 	case BCAST_PROTOCOL:
1324 		tipc_link_sync_rcv(node, skb);
1325 		break;
1326 	default:
1327 		break;
1328 	};
1329 }
1330 
1331 /**
1332  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1333  *
1334  * Returns increase in queue length (i.e. 0 or 1)
1335  */
1336 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1337 {
1338 	struct sk_buff *skb1;
1339 	u32 seq_no = buf_seqno(skb);
1340 
1341 	/* Empty queue ? */
1342 	if (skb_queue_empty(list)) {
1343 		__skb_queue_tail(list, skb);
1344 		return 1;
1345 	}
1346 
1347 	/* Last ? */
1348 	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1349 		__skb_queue_tail(list, skb);
1350 		return 1;
1351 	}
1352 
1353 	/* Locate insertion point in queue, then insert; discard if duplicate */
1354 	skb_queue_walk(list, skb1) {
1355 		u32 curr_seqno = buf_seqno(skb1);
1356 
1357 		if (seq_no == curr_seqno) {
1358 			kfree_skb(skb);
1359 			return 0;
1360 		}
1361 
1362 		if (less(seq_no, curr_seqno))
1363 			break;
1364 	}
1365 
1366 	__skb_queue_before(list, skb1, skb);
1367 	return 1;
1368 }
1369 
1370 /*
1371  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1372  */
1373 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1374 				       struct sk_buff *buf)
1375 {
1376 	u32 seq_no = buf_seqno(buf);
1377 
1378 	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1379 		tipc_link_proto_rcv(l_ptr, buf);
1380 		return;
1381 	}
1382 
1383 	/* Record OOS packet arrival (force mismatch on next timeout) */
1384 	l_ptr->checkpoint--;
1385 
1386 	/*
1387 	 * Discard packet if a duplicate; otherwise add it to deferred queue
1388 	 * and notify peer of gap as per protocol specification
1389 	 */
1390 	if (less(seq_no, mod(l_ptr->next_in_no))) {
1391 		l_ptr->stats.duplicates++;
1392 		kfree_skb(buf);
1393 		return;
1394 	}
1395 
1396 	if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
1397 		l_ptr->stats.deferred_recv++;
1398 		TIPC_SKB_CB(buf)->deferred = true;
1399 		if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1400 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1401 	} else {
1402 		l_ptr->stats.duplicates++;
1403 	}
1404 }
1405 
1406 /*
1407  * Send protocol message to the other endpoint.
1408  */
1409 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1410 			  u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1411 {
1412 	struct sk_buff *buf = NULL;
1413 	struct tipc_msg *msg = l_ptr->pmsg;
1414 	u32 msg_size = sizeof(l_ptr->proto_msg);
1415 	int r_flag;
1416 
1417 	/* Don't send protocol message during link changeover */
1418 	if (l_ptr->exp_msg_count)
1419 		return;
1420 
1421 	/* Abort non-RESET send if communication with node is prohibited */
1422 	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1423 		return;
1424 
1425 	/* Create protocol message with "out-of-sequence" sequence number */
1426 	msg_set_type(msg, msg_typ);
1427 	msg_set_net_plane(msg, l_ptr->net_plane);
1428 	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1429 	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1430 
1431 	if (msg_typ == STATE_MSG) {
1432 		u32 next_sent = mod(l_ptr->next_out_no);
1433 
1434 		if (!tipc_link_is_up(l_ptr))
1435 			return;
1436 		if (l_ptr->next_out)
1437 			next_sent = buf_seqno(l_ptr->next_out);
1438 		msg_set_next_sent(msg, next_sent);
1439 		if (!skb_queue_empty(&l_ptr->deferred_queue)) {
1440 			u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
1441 			gap = mod(rec - mod(l_ptr->next_in_no));
1442 		}
1443 		msg_set_seq_gap(msg, gap);
1444 		if (gap)
1445 			l_ptr->stats.sent_nacks++;
1446 		msg_set_link_tolerance(msg, tolerance);
1447 		msg_set_linkprio(msg, priority);
1448 		msg_set_max_pkt(msg, ack_mtu);
1449 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1450 		msg_set_probe(msg, probe_msg != 0);
1451 		if (probe_msg) {
1452 			u32 mtu = l_ptr->max_pkt;
1453 
1454 			if ((mtu < l_ptr->max_pkt_target) &&
1455 			    link_working_working(l_ptr) &&
1456 			    l_ptr->fsm_msg_cnt) {
1457 				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1458 				if (l_ptr->max_pkt_probes == 10) {
1459 					l_ptr->max_pkt_target = (msg_size - 4);
1460 					l_ptr->max_pkt_probes = 0;
1461 					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1462 				}
1463 				l_ptr->max_pkt_probes++;
1464 			}
1465 
1466 			l_ptr->stats.sent_probes++;
1467 		}
1468 		l_ptr->stats.sent_states++;
1469 	} else {		/* RESET_MSG or ACTIVATE_MSG */
1470 		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1471 		msg_set_seq_gap(msg, 0);
1472 		msg_set_next_sent(msg, 1);
1473 		msg_set_probe(msg, 0);
1474 		msg_set_link_tolerance(msg, l_ptr->tolerance);
1475 		msg_set_linkprio(msg, l_ptr->priority);
1476 		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1477 	}
1478 
1479 	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1480 	msg_set_redundant_link(msg, r_flag);
1481 	msg_set_linkprio(msg, l_ptr->priority);
1482 	msg_set_size(msg, msg_size);
1483 
1484 	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1485 
1486 	buf = tipc_buf_acquire(msg_size);
1487 	if (!buf)
1488 		return;
1489 
1490 	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1491 	buf->priority = TC_PRIO_CONTROL;
1492 
1493 	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1494 			 &l_ptr->media_addr);
1495 	l_ptr->unacked_window = 0;
1496 	kfree_skb(buf);
1497 }
1498 
1499 /*
1500  * Receive protocol message :
1501  * Note that network plane id propagates through the network, and may
1502  * change at any time. The node with lowest address rules
1503  */
1504 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1505 				struct sk_buff *buf)
1506 {
1507 	u32 rec_gap = 0;
1508 	u32 max_pkt_info;
1509 	u32 max_pkt_ack;
1510 	u32 msg_tol;
1511 	struct tipc_msg *msg = buf_msg(buf);
1512 
1513 	/* Discard protocol message during link changeover */
1514 	if (l_ptr->exp_msg_count)
1515 		goto exit;
1516 
1517 	if (l_ptr->net_plane != msg_net_plane(msg))
1518 		if (link_own_addr(l_ptr) > msg_prevnode(msg))
1519 			l_ptr->net_plane = msg_net_plane(msg);
1520 
1521 	switch (msg_type(msg)) {
1522 
1523 	case RESET_MSG:
1524 		if (!link_working_unknown(l_ptr) &&
1525 		    (l_ptr->peer_session != INVALID_SESSION)) {
1526 			if (less_eq(msg_session(msg), l_ptr->peer_session))
1527 				break; /* duplicate or old reset: ignore */
1528 		}
1529 
1530 		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1531 				link_working_unknown(l_ptr))) {
1532 			/*
1533 			 * peer has lost contact -- don't allow peer's links
1534 			 * to reactivate before we recognize loss & clean up
1535 			 */
1536 			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1537 		}
1538 
1539 		link_state_event(l_ptr, RESET_MSG);
1540 
1541 		/* fall thru' */
1542 	case ACTIVATE_MSG:
1543 		/* Update link settings according other endpoint's values */
1544 		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1545 
1546 		msg_tol = msg_link_tolerance(msg);
1547 		if (msg_tol > l_ptr->tolerance)
1548 			link_set_supervision_props(l_ptr, msg_tol);
1549 
1550 		if (msg_linkprio(msg) > l_ptr->priority)
1551 			l_ptr->priority = msg_linkprio(msg);
1552 
1553 		max_pkt_info = msg_max_pkt(msg);
1554 		if (max_pkt_info) {
1555 			if (max_pkt_info < l_ptr->max_pkt_target)
1556 				l_ptr->max_pkt_target = max_pkt_info;
1557 			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1558 				l_ptr->max_pkt = l_ptr->max_pkt_target;
1559 		} else {
1560 			l_ptr->max_pkt = l_ptr->max_pkt_target;
1561 		}
1562 
1563 		/* Synchronize broadcast link info, if not done previously */
1564 		if (!tipc_node_is_up(l_ptr->owner)) {
1565 			l_ptr->owner->bclink.last_sent =
1566 				l_ptr->owner->bclink.last_in =
1567 				msg_last_bcast(msg);
1568 			l_ptr->owner->bclink.oos_state = 0;
1569 		}
1570 
1571 		l_ptr->peer_session = msg_session(msg);
1572 		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1573 
1574 		if (msg_type(msg) == ACTIVATE_MSG)
1575 			link_state_event(l_ptr, ACTIVATE_MSG);
1576 		break;
1577 	case STATE_MSG:
1578 
1579 		msg_tol = msg_link_tolerance(msg);
1580 		if (msg_tol)
1581 			link_set_supervision_props(l_ptr, msg_tol);
1582 
1583 		if (msg_linkprio(msg) &&
1584 		    (msg_linkprio(msg) != l_ptr->priority)) {
1585 			pr_debug("%s<%s>, priority change %u->%u\n",
1586 				 link_rst_msg, l_ptr->name,
1587 				 l_ptr->priority, msg_linkprio(msg));
1588 			l_ptr->priority = msg_linkprio(msg);
1589 			tipc_link_reset(l_ptr); /* Enforce change to take effect */
1590 			break;
1591 		}
1592 
1593 		/* Record reception; force mismatch at next timeout: */
1594 		l_ptr->checkpoint--;
1595 
1596 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1597 		l_ptr->stats.recv_states++;
1598 		if (link_reset_unknown(l_ptr))
1599 			break;
1600 
1601 		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1602 			rec_gap = mod(msg_next_sent(msg) -
1603 				      mod(l_ptr->next_in_no));
1604 		}
1605 
1606 		max_pkt_ack = msg_max_pkt(msg);
1607 		if (max_pkt_ack > l_ptr->max_pkt) {
1608 			l_ptr->max_pkt = max_pkt_ack;
1609 			l_ptr->max_pkt_probes = 0;
1610 		}
1611 
1612 		max_pkt_ack = 0;
1613 		if (msg_probe(msg)) {
1614 			l_ptr->stats.recv_probes++;
1615 			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1616 				max_pkt_ack = msg_size(msg);
1617 		}
1618 
1619 		/* Protocol message before retransmits, reduce loss risk */
1620 		if (l_ptr->owner->bclink.recv_permitted)
1621 			tipc_bclink_update_link_state(l_ptr->owner,
1622 						      msg_last_bcast(msg));
1623 
1624 		if (rec_gap || (msg_probe(msg))) {
1625 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1626 					     0, max_pkt_ack);
1627 		}
1628 		if (msg_seq_gap(msg)) {
1629 			l_ptr->stats.recv_nacks++;
1630 			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1631 					     msg_seq_gap(msg));
1632 		}
1633 		break;
1634 	}
1635 exit:
1636 	kfree_skb(buf);
1637 }
1638 
1639 
1640 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1641  * a different bearer. Owner node is locked.
1642  */
1643 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1644 				  struct tipc_msg *tunnel_hdr,
1645 				  struct tipc_msg *msg,
1646 				  u32 selector)
1647 {
1648 	struct tipc_link *tunnel;
1649 	struct sk_buff *skb;
1650 	u32 length = msg_size(msg);
1651 
1652 	tunnel = l_ptr->owner->active_links[selector & 1];
1653 	if (!tipc_link_is_up(tunnel)) {
1654 		pr_warn("%stunnel link no longer available\n", link_co_err);
1655 		return;
1656 	}
1657 	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1658 	skb = tipc_buf_acquire(length + INT_H_SIZE);
1659 	if (!skb) {
1660 		pr_warn("%sunable to send tunnel msg\n", link_co_err);
1661 		return;
1662 	}
1663 	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1664 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1665 	__tipc_link_xmit_skb(tunnel, skb);
1666 }
1667 
1668 
1669 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1670  * link is still active. We can do failover. Tunnel the failing link's
1671  * whole send queue via the remaining link. This way, we don't lose
1672  * any packets, and sequence order is preserved for subsequent traffic
1673  * sent over the remaining link. Owner node is locked.
1674  */
1675 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1676 {
1677 	u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1678 	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1679 	struct tipc_msg tunnel_hdr;
1680 	struct sk_buff *skb;
1681 	int split_bundles;
1682 
1683 	if (!tunnel)
1684 		return;
1685 
1686 	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1687 		      ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1688 	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1689 	msg_set_msgcnt(&tunnel_hdr, msgcount);
1690 
1691 	if (skb_queue_empty(&l_ptr->outqueue)) {
1692 		skb = tipc_buf_acquire(INT_H_SIZE);
1693 		if (skb) {
1694 			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1695 			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1696 			__tipc_link_xmit_skb(tunnel, skb);
1697 		} else {
1698 			pr_warn("%sunable to send changeover msg\n",
1699 				link_co_err);
1700 		}
1701 		return;
1702 	}
1703 
1704 	split_bundles = (l_ptr->owner->active_links[0] !=
1705 			 l_ptr->owner->active_links[1]);
1706 
1707 	skb_queue_walk(&l_ptr->outqueue, skb) {
1708 		struct tipc_msg *msg = buf_msg(skb);
1709 
1710 		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1711 			struct tipc_msg *m = msg_get_wrapped(msg);
1712 			unchar *pos = (unchar *)m;
1713 
1714 			msgcount = msg_msgcnt(msg);
1715 			while (msgcount--) {
1716 				msg_set_seqno(m, msg_seqno(msg));
1717 				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1718 						      msg_link_selector(m));
1719 				pos += align(msg_size(m));
1720 				m = (struct tipc_msg *)pos;
1721 			}
1722 		} else {
1723 			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1724 					      msg_link_selector(msg));
1725 		}
1726 	}
1727 }
1728 
1729 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1730  * duplicate of the first link's send queue via the new link. This way, we
1731  * are guaranteed that currently queued packets from a socket are delivered
1732  * before future traffic from the same socket, even if this is using the
1733  * new link. The last arriving copy of each duplicate packet is dropped at
1734  * the receiving end by the regular protocol check, so packet cardinality
1735  * and sequence order is preserved per sender/receiver socket pair.
1736  * Owner node is locked.
1737  */
1738 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1739 			      struct tipc_link *tunnel)
1740 {
1741 	struct sk_buff *skb;
1742 	struct tipc_msg tunnel_hdr;
1743 
1744 	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1745 		      DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1746 	msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
1747 	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1748 	skb_queue_walk(&l_ptr->outqueue, skb) {
1749 		struct sk_buff *outskb;
1750 		struct tipc_msg *msg = buf_msg(skb);
1751 		u32 length = msg_size(msg);
1752 
1753 		if (msg_user(msg) == MSG_BUNDLER)
1754 			msg_set_type(msg, CLOSED_MSG);
1755 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
1756 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1757 		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1758 		outskb = tipc_buf_acquire(length + INT_H_SIZE);
1759 		if (outskb == NULL) {
1760 			pr_warn("%sunable to send duplicate msg\n",
1761 				link_co_err);
1762 			return;
1763 		}
1764 		skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
1765 		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1766 					       length);
1767 		__tipc_link_xmit_skb(tunnel, outskb);
1768 		if (!tipc_link_is_up(l_ptr))
1769 			return;
1770 	}
1771 }
1772 
1773 /**
1774  * buf_extract - extracts embedded TIPC message from another message
1775  * @skb: encapsulating message buffer
1776  * @from_pos: offset to extract from
1777  *
1778  * Returns a new message buffer containing an embedded message.  The
1779  * encapsulating buffer is left unchanged.
1780  */
1781 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1782 {
1783 	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
1784 	u32 size = msg_size(msg);
1785 	struct sk_buff *eb;
1786 
1787 	eb = tipc_buf_acquire(size);
1788 	if (eb)
1789 		skb_copy_to_linear_data(eb, msg, size);
1790 	return eb;
1791 }
1792 
1793 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1794  * Owner node is locked.
1795  */
1796 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
1797 			      struct sk_buff *t_buf)
1798 {
1799 	struct sk_buff *buf;
1800 
1801 	if (!tipc_link_is_up(l_ptr))
1802 		return;
1803 
1804 	buf = buf_extract(t_buf, INT_H_SIZE);
1805 	if (buf == NULL) {
1806 		pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1807 		return;
1808 	}
1809 
1810 	/* Add buffer to deferred queue, if applicable: */
1811 	link_handle_out_of_seq_msg(l_ptr, buf);
1812 }
1813 
1814 /*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1815  *  Owner node is locked.
1816  */
1817 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1818 					      struct sk_buff *t_buf)
1819 {
1820 	struct tipc_msg *t_msg = buf_msg(t_buf);
1821 	struct sk_buff *buf = NULL;
1822 	struct tipc_msg *msg;
1823 
1824 	if (tipc_link_is_up(l_ptr))
1825 		tipc_link_reset(l_ptr);
1826 
1827 	/* First failover packet? */
1828 	if (l_ptr->exp_msg_count == START_CHANGEOVER)
1829 		l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1830 
1831 	/* Should there be an inner packet? */
1832 	if (l_ptr->exp_msg_count) {
1833 		l_ptr->exp_msg_count--;
1834 		buf = buf_extract(t_buf, INT_H_SIZE);
1835 		if (buf == NULL) {
1836 			pr_warn("%sno inner failover pkt\n", link_co_err);
1837 			goto exit;
1838 		}
1839 		msg = buf_msg(buf);
1840 
1841 		if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1842 			kfree_skb(buf);
1843 			buf = NULL;
1844 			goto exit;
1845 		}
1846 		if (msg_user(msg) == MSG_FRAGMENTER) {
1847 			l_ptr->stats.recv_fragments++;
1848 			tipc_buf_append(&l_ptr->reasm_buf, &buf);
1849 		}
1850 	}
1851 exit:
1852 	if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1853 		tipc_link_delete(l_ptr);
1854 	return buf;
1855 }
1856 
1857 /*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1858  *  via other link as result of a failover (ORIGINAL_MSG) or
1859  *  a new active link (DUPLICATE_MSG). Failover packets are
1860  *  returned to the active link for delivery upwards.
1861  *  Owner node is locked.
1862  */
1863 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1864 				struct sk_buff **buf)
1865 {
1866 	struct sk_buff *t_buf = *buf;
1867 	struct tipc_link *l_ptr;
1868 	struct tipc_msg *t_msg = buf_msg(t_buf);
1869 	u32 bearer_id = msg_bearer_id(t_msg);
1870 
1871 	*buf = NULL;
1872 
1873 	if (bearer_id >= MAX_BEARERS)
1874 		goto exit;
1875 
1876 	l_ptr = n_ptr->links[bearer_id];
1877 	if (!l_ptr)
1878 		goto exit;
1879 
1880 	if (msg_type(t_msg) == DUPLICATE_MSG)
1881 		tipc_link_dup_rcv(l_ptr, t_buf);
1882 	else if (msg_type(t_msg) == ORIGINAL_MSG)
1883 		*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1884 	else
1885 		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1886 exit:
1887 	kfree_skb(t_buf);
1888 	return *buf != NULL;
1889 }
1890 
1891 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1892 {
1893 	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1894 
1895 	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1896 		return;
1897 
1898 	l_ptr->tolerance = tol;
1899 	l_ptr->cont_intv = msecs_to_jiffies(intv);
1900 	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1901 }
1902 
1903 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1904 {
1905 	/* Data messages from this node, inclusive FIRST_FRAGM */
1906 	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
1907 	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
1908 	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
1909 	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
1910 	/* Transiting data messages,inclusive FIRST_FRAGM */
1911 	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
1912 	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
1913 	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
1914 	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
1915 	l_ptr->queue_limit[CONN_MANAGER] = 1200;
1916 	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
1917 	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
1918 	/* FRAGMENT and LAST_FRAGMENT packets */
1919 	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
1920 }
1921 
1922 /* tipc_link_find_owner - locate owner node of link by link's name
1923  * @net: the applicable net namespace
1924  * @name: pointer to link name string
1925  * @bearer_id: pointer to index in 'node->links' array where the link was found.
1926  *
1927  * Returns pointer to node owning the link, or 0 if no matching link is found.
1928  */
1929 static struct tipc_node *tipc_link_find_owner(struct net *net,
1930 					      const char *link_name,
1931 					      unsigned int *bearer_id)
1932 {
1933 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1934 	struct tipc_link *l_ptr;
1935 	struct tipc_node *n_ptr;
1936 	struct tipc_node *found_node = NULL;
1937 	int i;
1938 
1939 	*bearer_id = 0;
1940 	rcu_read_lock();
1941 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1942 		tipc_node_lock(n_ptr);
1943 		for (i = 0; i < MAX_BEARERS; i++) {
1944 			l_ptr = n_ptr->links[i];
1945 			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1946 				*bearer_id = i;
1947 				found_node = n_ptr;
1948 				break;
1949 			}
1950 		}
1951 		tipc_node_unlock(n_ptr);
1952 		if (found_node)
1953 			break;
1954 	}
1955 	rcu_read_unlock();
1956 
1957 	return found_node;
1958 }
1959 
1960 /**
1961  * link_reset_statistics - reset link statistics
1962  * @l_ptr: pointer to link
1963  */
1964 static void link_reset_statistics(struct tipc_link *l_ptr)
1965 {
1966 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1967 	l_ptr->stats.sent_info = l_ptr->next_out_no;
1968 	l_ptr->stats.recv_info = l_ptr->next_in_no;
1969 }
1970 
1971 static void link_print(struct tipc_link *l_ptr, const char *str)
1972 {
1973 	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1974 	struct tipc_bearer *b_ptr;
1975 
1976 	rcu_read_lock();
1977 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1978 	if (b_ptr)
1979 		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1980 	rcu_read_unlock();
1981 
1982 	if (link_working_unknown(l_ptr))
1983 		pr_cont(":WU\n");
1984 	else if (link_reset_reset(l_ptr))
1985 		pr_cont(":RR\n");
1986 	else if (link_reset_unknown(l_ptr))
1987 		pr_cont(":RU\n");
1988 	else if (link_working_working(l_ptr))
1989 		pr_cont(":WW\n");
1990 	else
1991 		pr_cont("\n");
1992 }
1993 
1994 /* Parse and validate nested (link) properties valid for media, bearer and link
1995  */
1996 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1997 {
1998 	int err;
1999 
2000 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
2001 			       tipc_nl_prop_policy);
2002 	if (err)
2003 		return err;
2004 
2005 	if (props[TIPC_NLA_PROP_PRIO]) {
2006 		u32 prio;
2007 
2008 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2009 		if (prio > TIPC_MAX_LINK_PRI)
2010 			return -EINVAL;
2011 	}
2012 
2013 	if (props[TIPC_NLA_PROP_TOL]) {
2014 		u32 tol;
2015 
2016 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2017 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2018 			return -EINVAL;
2019 	}
2020 
2021 	if (props[TIPC_NLA_PROP_WIN]) {
2022 		u32 win;
2023 
2024 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2025 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2026 			return -EINVAL;
2027 	}
2028 
2029 	return 0;
2030 }
2031 
2032 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2033 {
2034 	int err;
2035 	int res = 0;
2036 	int bearer_id;
2037 	char *name;
2038 	struct tipc_link *link;
2039 	struct tipc_node *node;
2040 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2041 	struct net *net = sock_net(skb->sk);
2042 
2043 	if (!info->attrs[TIPC_NLA_LINK])
2044 		return -EINVAL;
2045 
2046 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2047 			       info->attrs[TIPC_NLA_LINK],
2048 			       tipc_nl_link_policy);
2049 	if (err)
2050 		return err;
2051 
2052 	if (!attrs[TIPC_NLA_LINK_NAME])
2053 		return -EINVAL;
2054 
2055 	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2056 
2057 	node = tipc_link_find_owner(net, name, &bearer_id);
2058 	if (!node)
2059 		return -EINVAL;
2060 
2061 	tipc_node_lock(node);
2062 
2063 	link = node->links[bearer_id];
2064 	if (!link) {
2065 		res = -EINVAL;
2066 		goto out;
2067 	}
2068 
2069 	if (attrs[TIPC_NLA_LINK_PROP]) {
2070 		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2071 
2072 		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2073 					      props);
2074 		if (err) {
2075 			res = err;
2076 			goto out;
2077 		}
2078 
2079 		if (props[TIPC_NLA_PROP_TOL]) {
2080 			u32 tol;
2081 
2082 			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2083 			link_set_supervision_props(link, tol);
2084 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2085 		}
2086 		if (props[TIPC_NLA_PROP_PRIO]) {
2087 			u32 prio;
2088 
2089 			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2090 			link->priority = prio;
2091 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2092 		}
2093 		if (props[TIPC_NLA_PROP_WIN]) {
2094 			u32 win;
2095 
2096 			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2097 			tipc_link_set_queue_limits(link, win);
2098 		}
2099 	}
2100 
2101 out:
2102 	tipc_node_unlock(node);
2103 
2104 	return res;
2105 }
2106 
2107 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2108 {
2109 	int i;
2110 	struct nlattr *stats;
2111 
2112 	struct nla_map {
2113 		u32 key;
2114 		u32 val;
2115 	};
2116 
2117 	struct nla_map map[] = {
2118 		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
2119 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2120 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2121 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2122 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2123 		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
2124 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2125 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2126 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2127 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2128 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2129 			s->msg_length_counts : 1},
2130 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2131 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2132 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2133 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2134 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2135 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2136 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2137 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2138 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2139 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2140 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2141 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2142 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2143 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2144 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2145 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2146 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2147 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2148 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2149 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2150 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2151 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2152 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2153 	};
2154 
2155 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2156 	if (!stats)
2157 		return -EMSGSIZE;
2158 
2159 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2160 		if (nla_put_u32(skb, map[i].key, map[i].val))
2161 			goto msg_full;
2162 
2163 	nla_nest_end(skb, stats);
2164 
2165 	return 0;
2166 msg_full:
2167 	nla_nest_cancel(skb, stats);
2168 
2169 	return -EMSGSIZE;
2170 }
2171 
2172 /* Caller should hold appropriate locks to protect the link */
2173 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2174 			      struct tipc_link *link)
2175 {
2176 	int err;
2177 	void *hdr;
2178 	struct nlattr *attrs;
2179 	struct nlattr *prop;
2180 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2181 
2182 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2183 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2184 	if (!hdr)
2185 		return -EMSGSIZE;
2186 
2187 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2188 	if (!attrs)
2189 		goto msg_full;
2190 
2191 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2192 		goto attr_msg_full;
2193 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2194 			tipc_cluster_mask(tn->own_addr)))
2195 		goto attr_msg_full;
2196 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2197 		goto attr_msg_full;
2198 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2199 		goto attr_msg_full;
2200 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2201 		goto attr_msg_full;
2202 
2203 	if (tipc_link_is_up(link))
2204 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2205 			goto attr_msg_full;
2206 	if (tipc_link_is_active(link))
2207 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2208 			goto attr_msg_full;
2209 
2210 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2211 	if (!prop)
2212 		goto attr_msg_full;
2213 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2214 		goto prop_msg_full;
2215 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2216 		goto prop_msg_full;
2217 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2218 			link->queue_limit[TIPC_LOW_IMPORTANCE]))
2219 		goto prop_msg_full;
2220 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2221 		goto prop_msg_full;
2222 	nla_nest_end(msg->skb, prop);
2223 
2224 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2225 	if (err)
2226 		goto attr_msg_full;
2227 
2228 	nla_nest_end(msg->skb, attrs);
2229 	genlmsg_end(msg->skb, hdr);
2230 
2231 	return 0;
2232 
2233 prop_msg_full:
2234 	nla_nest_cancel(msg->skb, prop);
2235 attr_msg_full:
2236 	nla_nest_cancel(msg->skb, attrs);
2237 msg_full:
2238 	genlmsg_cancel(msg->skb, hdr);
2239 
2240 	return -EMSGSIZE;
2241 }
2242 
2243 /* Caller should hold node lock  */
2244 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2245 				    struct tipc_node *node, u32 *prev_link)
2246 {
2247 	u32 i;
2248 	int err;
2249 
2250 	for (i = *prev_link; i < MAX_BEARERS; i++) {
2251 		*prev_link = i;
2252 
2253 		if (!node->links[i])
2254 			continue;
2255 
2256 		err = __tipc_nl_add_link(net, msg, node->links[i]);
2257 		if (err)
2258 			return err;
2259 	}
2260 	*prev_link = 0;
2261 
2262 	return 0;
2263 }
2264 
2265 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2266 {
2267 	struct net *net = sock_net(skb->sk);
2268 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2269 	struct tipc_node *node;
2270 	struct tipc_nl_msg msg;
2271 	u32 prev_node = cb->args[0];
2272 	u32 prev_link = cb->args[1];
2273 	int done = cb->args[2];
2274 	int err;
2275 
2276 	if (done)
2277 		return 0;
2278 
2279 	msg.skb = skb;
2280 	msg.portid = NETLINK_CB(cb->skb).portid;
2281 	msg.seq = cb->nlh->nlmsg_seq;
2282 
2283 	rcu_read_lock();
2284 
2285 	if (prev_node) {
2286 		node = tipc_node_find(net, prev_node);
2287 		if (!node) {
2288 			/* We never set seq or call nl_dump_check_consistent()
2289 			 * this means that setting prev_seq here will cause the
2290 			 * consistence check to fail in the netlink callback
2291 			 * handler. Resulting in the last NLMSG_DONE message
2292 			 * having the NLM_F_DUMP_INTR flag set.
2293 			 */
2294 			cb->prev_seq = 1;
2295 			goto out;
2296 		}
2297 
2298 		list_for_each_entry_continue_rcu(node, &tn->node_list,
2299 						 list) {
2300 			tipc_node_lock(node);
2301 			err = __tipc_nl_add_node_links(net, &msg, node,
2302 						       &prev_link);
2303 			tipc_node_unlock(node);
2304 			if (err)
2305 				goto out;
2306 
2307 			prev_node = node->addr;
2308 		}
2309 	} else {
2310 		err = tipc_nl_add_bc_link(net, &msg);
2311 		if (err)
2312 			goto out;
2313 
2314 		list_for_each_entry_rcu(node, &tn->node_list, list) {
2315 			tipc_node_lock(node);
2316 			err = __tipc_nl_add_node_links(net, &msg, node,
2317 						       &prev_link);
2318 			tipc_node_unlock(node);
2319 			if (err)
2320 				goto out;
2321 
2322 			prev_node = node->addr;
2323 		}
2324 	}
2325 	done = 1;
2326 out:
2327 	rcu_read_unlock();
2328 
2329 	cb->args[0] = prev_node;
2330 	cb->args[1] = prev_link;
2331 	cb->args[2] = done;
2332 
2333 	return skb->len;
2334 }
2335 
2336 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2337 {
2338 	struct net *net = genl_info_net(info);
2339 	struct sk_buff *ans_skb;
2340 	struct tipc_nl_msg msg;
2341 	struct tipc_link *link;
2342 	struct tipc_node *node;
2343 	char *name;
2344 	int bearer_id;
2345 	int err;
2346 
2347 	if (!info->attrs[TIPC_NLA_LINK_NAME])
2348 		return -EINVAL;
2349 
2350 	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2351 	node = tipc_link_find_owner(net, name, &bearer_id);
2352 	if (!node)
2353 		return -EINVAL;
2354 
2355 	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2356 	if (!ans_skb)
2357 		return -ENOMEM;
2358 
2359 	msg.skb = ans_skb;
2360 	msg.portid = info->snd_portid;
2361 	msg.seq = info->snd_seq;
2362 
2363 	tipc_node_lock(node);
2364 	link = node->links[bearer_id];
2365 	if (!link) {
2366 		err = -EINVAL;
2367 		goto err_out;
2368 	}
2369 
2370 	err = __tipc_nl_add_link(net, &msg, link);
2371 	if (err)
2372 		goto err_out;
2373 
2374 	tipc_node_unlock(node);
2375 
2376 	return genlmsg_reply(ans_skb, info);
2377 
2378 err_out:
2379 	tipc_node_unlock(node);
2380 	nlmsg_free(ans_skb);
2381 
2382 	return err;
2383 }
2384 
2385 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2386 {
2387 	int err;
2388 	char *link_name;
2389 	unsigned int bearer_id;
2390 	struct tipc_link *link;
2391 	struct tipc_node *node;
2392 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2393 	struct net *net = sock_net(skb->sk);
2394 
2395 	if (!info->attrs[TIPC_NLA_LINK])
2396 		return -EINVAL;
2397 
2398 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2399 			       info->attrs[TIPC_NLA_LINK],
2400 			       tipc_nl_link_policy);
2401 	if (err)
2402 		return err;
2403 
2404 	if (!attrs[TIPC_NLA_LINK_NAME])
2405 		return -EINVAL;
2406 
2407 	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2408 
2409 	if (strcmp(link_name, tipc_bclink_name) == 0) {
2410 		err = tipc_bclink_reset_stats(net);
2411 		if (err)
2412 			return err;
2413 		return 0;
2414 	}
2415 
2416 	node = tipc_link_find_owner(net, link_name, &bearer_id);
2417 	if (!node)
2418 		return -EINVAL;
2419 
2420 	tipc_node_lock(node);
2421 
2422 	link = node->links[bearer_id];
2423 	if (!link) {
2424 		tipc_node_unlock(node);
2425 		return -EINVAL;
2426 	}
2427 
2428 	link_reset_statistics(link);
2429 
2430 	tipc_node_unlock(node);
2431 
2432 	return 0;
2433 }
2434