xref: /openbmc/linux/net/tipc/link.c (revision 2d33394e23d63b750dcba40e5feaeba425427b52)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 
46 #include <linux/pkt_sched.h>
47 
48 /*
49  * Error message prefixes
50  */
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
54 
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
57 	[TIPC_NLA_LINK_NAME] = {
58 		.type = NLA_STRING,
59 		.len = TIPC_MAX_LINK_NAME
60 	},
61 	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
62 	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
63 	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
64 	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
65 	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
66 	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
67 	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
68 	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
69 };
70 
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
74 	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
75 	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
76 	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
77 };
78 
79 /*
80  * Out-of-range value for link session numbers
81  */
82 #define INVALID_SESSION 0x10000
83 
84 /*
85  * Link state events:
86  */
87 #define  STARTING_EVT    856384768	/* link processing trigger */
88 #define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
89 #define  TIMEOUT_EVT     560817u	/* link timer expired */
90 
91 /*
92  * The following two 'message types' is really just implementation
93  * data conveniently stored in the message header.
94  * They must not be considered part of the protocol
95  */
96 #define OPEN_MSG   0
97 #define CLOSED_MSG 1
98 
99 /*
100  * State value stored in 'exp_msg_count'
101  */
102 #define START_CHANGEOVER 100000u
103 
104 static void link_handle_out_of_seq_msg(struct tipc_link *link,
105 				       struct sk_buff *skb);
106 static void tipc_link_proto_rcv(struct tipc_link *link,
107 				struct sk_buff *skb);
108 static int  tipc_link_tunnel_rcv(struct tipc_node *node,
109 				 struct sk_buff **skb);
110 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
111 static void link_state_event(struct tipc_link *l_ptr, u32 event);
112 static void link_reset_statistics(struct tipc_link *l_ptr);
113 static void link_print(struct tipc_link *l_ptr, const char *str);
114 static void tipc_link_sync_xmit(struct tipc_link *l);
115 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
116 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
117 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
118 
119 /*
120  *  Simple link routines
121  */
122 static unsigned int align(unsigned int i)
123 {
124 	return (i + 3) & ~3u;
125 }
126 
127 static void tipc_link_release(struct kref *kref)
128 {
129 	kfree(container_of(kref, struct tipc_link, ref));
130 }
131 
132 static void tipc_link_get(struct tipc_link *l_ptr)
133 {
134 	kref_get(&l_ptr->ref);
135 }
136 
137 static void tipc_link_put(struct tipc_link *l_ptr)
138 {
139 	kref_put(&l_ptr->ref, tipc_link_release);
140 }
141 
142 static void link_init_max_pkt(struct tipc_link *l_ptr)
143 {
144 	struct tipc_node *node = l_ptr->owner;
145 	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
146 	struct tipc_bearer *b_ptr;
147 	u32 max_pkt;
148 
149 	rcu_read_lock();
150 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
151 	if (!b_ptr) {
152 		rcu_read_unlock();
153 		return;
154 	}
155 	max_pkt = (b_ptr->mtu & ~3);
156 	rcu_read_unlock();
157 
158 	if (max_pkt > MAX_MSG_SIZE)
159 		max_pkt = MAX_MSG_SIZE;
160 
161 	l_ptr->max_pkt_target = max_pkt;
162 	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
163 		l_ptr->max_pkt = l_ptr->max_pkt_target;
164 	else
165 		l_ptr->max_pkt = MAX_PKT_DEFAULT;
166 
167 	l_ptr->max_pkt_probes = 0;
168 }
169 
170 /*
171  *  Simple non-static link routines (i.e. referenced outside this file)
172  */
173 int tipc_link_is_up(struct tipc_link *l_ptr)
174 {
175 	if (!l_ptr)
176 		return 0;
177 	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
178 }
179 
180 int tipc_link_is_active(struct tipc_link *l_ptr)
181 {
182 	return	(l_ptr->owner->active_links[0] == l_ptr) ||
183 		(l_ptr->owner->active_links[1] == l_ptr);
184 }
185 
186 /**
187  * link_timeout - handle expiration of link timer
188  * @l_ptr: pointer to link
189  */
190 static void link_timeout(unsigned long data)
191 {
192 	struct tipc_link *l_ptr = (struct tipc_link *)data;
193 	struct sk_buff *skb;
194 
195 	tipc_node_lock(l_ptr->owner);
196 
197 	/* update counters used in statistical profiling of send traffic */
198 	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
199 	l_ptr->stats.queue_sz_counts++;
200 
201 	skb = skb_peek(&l_ptr->transmq);
202 	if (skb) {
203 		struct tipc_msg *msg = buf_msg(skb);
204 		u32 length = msg_size(msg);
205 
206 		if ((msg_user(msg) == MSG_FRAGMENTER) &&
207 		    (msg_type(msg) == FIRST_FRAGMENT)) {
208 			length = msg_size(msg_get_wrapped(msg));
209 		}
210 		if (length) {
211 			l_ptr->stats.msg_lengths_total += length;
212 			l_ptr->stats.msg_length_counts++;
213 			if (length <= 64)
214 				l_ptr->stats.msg_length_profile[0]++;
215 			else if (length <= 256)
216 				l_ptr->stats.msg_length_profile[1]++;
217 			else if (length <= 1024)
218 				l_ptr->stats.msg_length_profile[2]++;
219 			else if (length <= 4096)
220 				l_ptr->stats.msg_length_profile[3]++;
221 			else if (length <= 16384)
222 				l_ptr->stats.msg_length_profile[4]++;
223 			else if (length <= 32768)
224 				l_ptr->stats.msg_length_profile[5]++;
225 			else
226 				l_ptr->stats.msg_length_profile[6]++;
227 		}
228 	}
229 
230 	/* do all other link processing performed on a periodic basis */
231 	link_state_event(l_ptr, TIMEOUT_EVT);
232 
233 	if (skb_queue_len(&l_ptr->backlogq))
234 		tipc_link_push_packets(l_ptr);
235 
236 	tipc_node_unlock(l_ptr->owner);
237 	tipc_link_put(l_ptr);
238 }
239 
240 static void link_set_timer(struct tipc_link *link, unsigned long time)
241 {
242 	if (!mod_timer(&link->timer, jiffies + time))
243 		tipc_link_get(link);
244 }
245 
246 /**
247  * tipc_link_create - create a new link
248  * @n_ptr: pointer to associated node
249  * @b_ptr: pointer to associated bearer
250  * @media_addr: media address to use when sending messages over link
251  *
252  * Returns pointer to link.
253  */
254 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
255 				   struct tipc_bearer *b_ptr,
256 				   const struct tipc_media_addr *media_addr)
257 {
258 	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
259 	struct tipc_link *l_ptr;
260 	struct tipc_msg *msg;
261 	char *if_name;
262 	char addr_string[16];
263 	u32 peer = n_ptr->addr;
264 
265 	if (n_ptr->link_cnt >= MAX_BEARERS) {
266 		tipc_addr_string_fill(addr_string, n_ptr->addr);
267 		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
268 			n_ptr->link_cnt, addr_string, MAX_BEARERS);
269 		return NULL;
270 	}
271 
272 	if (n_ptr->links[b_ptr->identity]) {
273 		tipc_addr_string_fill(addr_string, n_ptr->addr);
274 		pr_err("Attempt to establish second link on <%s> to %s\n",
275 		       b_ptr->name, addr_string);
276 		return NULL;
277 	}
278 
279 	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
280 	if (!l_ptr) {
281 		pr_warn("Link creation failed, no memory\n");
282 		return NULL;
283 	}
284 	kref_init(&l_ptr->ref);
285 	l_ptr->addr = peer;
286 	if_name = strchr(b_ptr->name, ':') + 1;
287 	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
288 		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
289 		tipc_node(tn->own_addr),
290 		if_name,
291 		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
292 		/* note: peer i/f name is updated by reset/activate message */
293 	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
294 	l_ptr->owner = n_ptr;
295 	l_ptr->checkpoint = 1;
296 	l_ptr->peer_session = INVALID_SESSION;
297 	l_ptr->bearer_id = b_ptr->identity;
298 	link_set_supervision_props(l_ptr, b_ptr->tolerance);
299 	l_ptr->state = RESET_UNKNOWN;
300 
301 	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
302 	msg = l_ptr->pmsg;
303 	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
304 		      l_ptr->addr);
305 	msg_set_size(msg, sizeof(l_ptr->proto_msg));
306 	msg_set_session(msg, (tn->random & 0xffff));
307 	msg_set_bearer_id(msg, b_ptr->identity);
308 	strcpy((char *)msg_data(msg), if_name);
309 	l_ptr->net_plane = b_ptr->net_plane;
310 	link_init_max_pkt(l_ptr);
311 	l_ptr->priority = b_ptr->priority;
312 	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
313 
314 	l_ptr->next_out_no = 1;
315 	__skb_queue_head_init(&l_ptr->transmq);
316 	__skb_queue_head_init(&l_ptr->backlogq);
317 	__skb_queue_head_init(&l_ptr->deferdq);
318 	skb_queue_head_init(&l_ptr->wakeupq);
319 	skb_queue_head_init(&l_ptr->inputq);
320 	skb_queue_head_init(&l_ptr->namedq);
321 	link_reset_statistics(l_ptr);
322 	tipc_node_attach_link(n_ptr, l_ptr);
323 	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
324 	link_state_event(l_ptr, STARTING_EVT);
325 
326 	return l_ptr;
327 }
328 
329 /**
330  * link_delete - Conditional deletion of link.
331  *               If timer still running, real delete is done when it expires
332  * @link: link to be deleted
333  */
334 void tipc_link_delete(struct tipc_link *link)
335 {
336 	tipc_link_reset_fragments(link);
337 	tipc_node_detach_link(link->owner, link);
338 	tipc_link_put(link);
339 }
340 
341 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
342 			   bool shutting_down)
343 {
344 	struct tipc_net *tn = net_generic(net, tipc_net_id);
345 	struct tipc_link *link;
346 	struct tipc_node *node;
347 	bool del_link;
348 
349 	rcu_read_lock();
350 	list_for_each_entry_rcu(node, &tn->node_list, list) {
351 		tipc_node_lock(node);
352 		link = node->links[bearer_id];
353 		if (!link) {
354 			tipc_node_unlock(node);
355 			continue;
356 		}
357 		del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
358 		tipc_link_reset(link);
359 		if (del_timer(&link->timer))
360 			tipc_link_put(link);
361 		link->flags |= LINK_STOPPED;
362 		/* Delete link now, or when failover is finished: */
363 		if (shutting_down || !tipc_node_is_up(node) || del_link)
364 			tipc_link_delete(link);
365 		tipc_node_unlock(node);
366 	}
367 	rcu_read_unlock();
368 }
369 
370 /**
371  * link_schedule_user - schedule user for wakeup after congestion
372  * @link: congested link
373  * @oport: sending port
374  * @chain_sz: size of buffer chain that was attempted sent
375  * @imp: importance of message attempted sent
376  * Create pseudo msg to send back to user when congestion abates
377  */
378 static bool link_schedule_user(struct tipc_link *link, u32 oport,
379 			       uint chain_sz, uint imp)
380 {
381 	struct sk_buff *buf;
382 
383 	buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
384 			      link_own_addr(link), link_own_addr(link),
385 			      oport, 0, 0);
386 	if (!buf)
387 		return false;
388 	TIPC_SKB_CB(buf)->chain_sz = chain_sz;
389 	TIPC_SKB_CB(buf)->chain_imp = imp;
390 	skb_queue_tail(&link->wakeupq, buf);
391 	link->stats.link_congs++;
392 	return true;
393 }
394 
395 /**
396  * link_prepare_wakeup - prepare users for wakeup after congestion
397  * @link: congested link
398  * Move a number of waiting users, as permitted by available space in
399  * the send queue, from link wait queue to node wait queue for wakeup
400  */
401 void link_prepare_wakeup(struct tipc_link *link)
402 {
403 	uint pend_qsz = skb_queue_len(&link->backlogq);
404 	struct sk_buff *skb, *tmp;
405 
406 	skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
407 		if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
408 			break;
409 		pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
410 		skb_unlink(skb, &link->wakeupq);
411 		skb_queue_tail(&link->inputq, skb);
412 		link->owner->inputq = &link->inputq;
413 		link->owner->action_flags |= TIPC_MSG_EVT;
414 	}
415 }
416 
417 /**
418  * tipc_link_reset_fragments - purge link's inbound message fragments queue
419  * @l_ptr: pointer to link
420  */
421 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
422 {
423 	kfree_skb(l_ptr->reasm_buf);
424 	l_ptr->reasm_buf = NULL;
425 }
426 
427 /**
428  * tipc_link_purge_queues - purge all pkt queues associated with link
429  * @l_ptr: pointer to link
430  */
431 void tipc_link_purge_queues(struct tipc_link *l_ptr)
432 {
433 	__skb_queue_purge(&l_ptr->deferdq);
434 	__skb_queue_purge(&l_ptr->transmq);
435 	__skb_queue_purge(&l_ptr->backlogq);
436 	tipc_link_reset_fragments(l_ptr);
437 }
438 
439 void tipc_link_reset(struct tipc_link *l_ptr)
440 {
441 	u32 prev_state = l_ptr->state;
442 	u32 checkpoint = l_ptr->next_in_no;
443 	int was_active_link = tipc_link_is_active(l_ptr);
444 	struct tipc_node *owner = l_ptr->owner;
445 
446 	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
447 
448 	/* Link is down, accept any session */
449 	l_ptr->peer_session = INVALID_SESSION;
450 
451 	/* Prepare for max packet size negotiation */
452 	link_init_max_pkt(l_ptr);
453 
454 	l_ptr->state = RESET_UNKNOWN;
455 
456 	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
457 		return;
458 
459 	tipc_node_link_down(l_ptr->owner, l_ptr);
460 	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
461 
462 	if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
463 		l_ptr->reset_checkpoint = checkpoint;
464 		l_ptr->exp_msg_count = START_CHANGEOVER;
465 	}
466 
467 	/* Clean up all queues, except inputq: */
468 	__skb_queue_purge(&l_ptr->transmq);
469 	__skb_queue_purge(&l_ptr->backlogq);
470 	__skb_queue_purge(&l_ptr->deferdq);
471 	if (!owner->inputq)
472 		owner->inputq = &l_ptr->inputq;
473 	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
474 	if (!skb_queue_empty(owner->inputq))
475 		owner->action_flags |= TIPC_MSG_EVT;
476 	l_ptr->rcv_unacked = 0;
477 	l_ptr->checkpoint = 1;
478 	l_ptr->next_out_no = 1;
479 	l_ptr->fsm_msg_cnt = 0;
480 	l_ptr->stale_count = 0;
481 	link_reset_statistics(l_ptr);
482 }
483 
484 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
485 {
486 	struct tipc_net *tn = net_generic(net, tipc_net_id);
487 	struct tipc_link *l_ptr;
488 	struct tipc_node *n_ptr;
489 
490 	rcu_read_lock();
491 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
492 		tipc_node_lock(n_ptr);
493 		l_ptr = n_ptr->links[bearer_id];
494 		if (l_ptr)
495 			tipc_link_reset(l_ptr);
496 		tipc_node_unlock(n_ptr);
497 	}
498 	rcu_read_unlock();
499 }
500 
501 static void link_activate(struct tipc_link *link)
502 {
503 	struct tipc_node *node = link->owner;
504 
505 	link->next_in_no = 1;
506 	link->stats.recv_info = 1;
507 	tipc_node_link_up(node, link);
508 	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
509 }
510 
511 /**
512  * link_state_event - link finite state machine
513  * @l_ptr: pointer to link
514  * @event: state machine event to process
515  */
516 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
517 {
518 	struct tipc_link *other;
519 	unsigned long cont_intv = l_ptr->cont_intv;
520 
521 	if (l_ptr->flags & LINK_STOPPED)
522 		return;
523 
524 	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
525 		return;		/* Not yet. */
526 
527 	/* Check whether changeover is going on */
528 	if (l_ptr->exp_msg_count) {
529 		if (event == TIMEOUT_EVT)
530 			link_set_timer(l_ptr, cont_intv);
531 		return;
532 	}
533 
534 	switch (l_ptr->state) {
535 	case WORKING_WORKING:
536 		switch (event) {
537 		case TRAFFIC_MSG_EVT:
538 		case ACTIVATE_MSG:
539 			break;
540 		case TIMEOUT_EVT:
541 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
542 				l_ptr->checkpoint = l_ptr->next_in_no;
543 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
544 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
545 							     0, 0, 0, 0, 0);
546 					l_ptr->fsm_msg_cnt++;
547 				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
548 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
549 							     1, 0, 0, 0, 0);
550 					l_ptr->fsm_msg_cnt++;
551 				}
552 				link_set_timer(l_ptr, cont_intv);
553 				break;
554 			}
555 			l_ptr->state = WORKING_UNKNOWN;
556 			l_ptr->fsm_msg_cnt = 0;
557 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
558 			l_ptr->fsm_msg_cnt++;
559 			link_set_timer(l_ptr, cont_intv / 4);
560 			break;
561 		case RESET_MSG:
562 			pr_debug("%s<%s>, requested by peer\n",
563 				 link_rst_msg, l_ptr->name);
564 			tipc_link_reset(l_ptr);
565 			l_ptr->state = RESET_RESET;
566 			l_ptr->fsm_msg_cnt = 0;
567 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
568 					     0, 0, 0, 0, 0);
569 			l_ptr->fsm_msg_cnt++;
570 			link_set_timer(l_ptr, cont_intv);
571 			break;
572 		default:
573 			pr_debug("%s%u in WW state\n", link_unk_evt, event);
574 		}
575 		break;
576 	case WORKING_UNKNOWN:
577 		switch (event) {
578 		case TRAFFIC_MSG_EVT:
579 		case ACTIVATE_MSG:
580 			l_ptr->state = WORKING_WORKING;
581 			l_ptr->fsm_msg_cnt = 0;
582 			link_set_timer(l_ptr, cont_intv);
583 			break;
584 		case RESET_MSG:
585 			pr_debug("%s<%s>, requested by peer while probing\n",
586 				 link_rst_msg, l_ptr->name);
587 			tipc_link_reset(l_ptr);
588 			l_ptr->state = RESET_RESET;
589 			l_ptr->fsm_msg_cnt = 0;
590 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
591 					     0, 0, 0, 0, 0);
592 			l_ptr->fsm_msg_cnt++;
593 			link_set_timer(l_ptr, cont_intv);
594 			break;
595 		case TIMEOUT_EVT:
596 			if (l_ptr->next_in_no != l_ptr->checkpoint) {
597 				l_ptr->state = WORKING_WORKING;
598 				l_ptr->fsm_msg_cnt = 0;
599 				l_ptr->checkpoint = l_ptr->next_in_no;
600 				if (tipc_bclink_acks_missing(l_ptr->owner)) {
601 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
602 							     0, 0, 0, 0, 0);
603 					l_ptr->fsm_msg_cnt++;
604 				}
605 				link_set_timer(l_ptr, cont_intv);
606 			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
607 				tipc_link_proto_xmit(l_ptr, STATE_MSG,
608 						     1, 0, 0, 0, 0);
609 				l_ptr->fsm_msg_cnt++;
610 				link_set_timer(l_ptr, cont_intv / 4);
611 			} else {	/* Link has failed */
612 				pr_debug("%s<%s>, peer not responding\n",
613 					 link_rst_msg, l_ptr->name);
614 				tipc_link_reset(l_ptr);
615 				l_ptr->state = RESET_UNKNOWN;
616 				l_ptr->fsm_msg_cnt = 0;
617 				tipc_link_proto_xmit(l_ptr, RESET_MSG,
618 						     0, 0, 0, 0, 0);
619 				l_ptr->fsm_msg_cnt++;
620 				link_set_timer(l_ptr, cont_intv);
621 			}
622 			break;
623 		default:
624 			pr_err("%s%u in WU state\n", link_unk_evt, event);
625 		}
626 		break;
627 	case RESET_UNKNOWN:
628 		switch (event) {
629 		case TRAFFIC_MSG_EVT:
630 			break;
631 		case ACTIVATE_MSG:
632 			other = l_ptr->owner->active_links[0];
633 			if (other && link_working_unknown(other))
634 				break;
635 			l_ptr->state = WORKING_WORKING;
636 			l_ptr->fsm_msg_cnt = 0;
637 			link_activate(l_ptr);
638 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
639 			l_ptr->fsm_msg_cnt++;
640 			if (l_ptr->owner->working_links == 1)
641 				tipc_link_sync_xmit(l_ptr);
642 			link_set_timer(l_ptr, cont_intv);
643 			break;
644 		case RESET_MSG:
645 			l_ptr->state = RESET_RESET;
646 			l_ptr->fsm_msg_cnt = 0;
647 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
648 					     1, 0, 0, 0, 0);
649 			l_ptr->fsm_msg_cnt++;
650 			link_set_timer(l_ptr, cont_intv);
651 			break;
652 		case STARTING_EVT:
653 			l_ptr->flags |= LINK_STARTED;
654 			l_ptr->fsm_msg_cnt++;
655 			link_set_timer(l_ptr, cont_intv);
656 			break;
657 		case TIMEOUT_EVT:
658 			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
659 			l_ptr->fsm_msg_cnt++;
660 			link_set_timer(l_ptr, cont_intv);
661 			break;
662 		default:
663 			pr_err("%s%u in RU state\n", link_unk_evt, event);
664 		}
665 		break;
666 	case RESET_RESET:
667 		switch (event) {
668 		case TRAFFIC_MSG_EVT:
669 		case ACTIVATE_MSG:
670 			other = l_ptr->owner->active_links[0];
671 			if (other && link_working_unknown(other))
672 				break;
673 			l_ptr->state = WORKING_WORKING;
674 			l_ptr->fsm_msg_cnt = 0;
675 			link_activate(l_ptr);
676 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
677 			l_ptr->fsm_msg_cnt++;
678 			if (l_ptr->owner->working_links == 1)
679 				tipc_link_sync_xmit(l_ptr);
680 			link_set_timer(l_ptr, cont_intv);
681 			break;
682 		case RESET_MSG:
683 			break;
684 		case TIMEOUT_EVT:
685 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
686 					     0, 0, 0, 0, 0);
687 			l_ptr->fsm_msg_cnt++;
688 			link_set_timer(l_ptr, cont_intv);
689 			break;
690 		default:
691 			pr_err("%s%u in RR state\n", link_unk_evt, event);
692 		}
693 		break;
694 	default:
695 		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
696 	}
697 }
698 
699 /* tipc_link_cong: determine return value and how to treat the
700  * sent buffer during link congestion.
701  * - For plain, errorless user data messages we keep the buffer and
702  *   return -ELINKONG.
703  * - For all other messages we discard the buffer and return -EHOSTUNREACH
704  * - For TIPC internal messages we also reset the link
705  */
706 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
707 {
708 	struct sk_buff *skb = skb_peek(list);
709 	struct tipc_msg *msg = buf_msg(skb);
710 	int imp = msg_importance(msg);
711 	u32 oport = msg_tot_origport(msg);
712 
713 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
714 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
715 		tipc_link_reset(link);
716 		goto drop;
717 	}
718 	if (unlikely(msg_errcode(msg)))
719 		goto drop;
720 	if (unlikely(msg_reroute_cnt(msg)))
721 		goto drop;
722 	if (TIPC_SKB_CB(skb)->wakeup_pending)
723 		return -ELINKCONG;
724 	if (link_schedule_user(link, oport, skb_queue_len(list), imp))
725 		return -ELINKCONG;
726 drop:
727 	__skb_queue_purge(list);
728 	return -EHOSTUNREACH;
729 }
730 
731 /**
732  * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
733  * @link: link to use
734  * @list: chain of buffers containing message
735  *
736  * Consumes the buffer chain, except when returning -ELINKCONG
737  * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
738  * user data messages) or -EHOSTUNREACH (all other messages/senders)
739  * Only the socket functions tipc_send_stream() and tipc_send_packet() need
740  * to act on the return value, since they may need to do more send attempts.
741  */
742 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
743 		     struct sk_buff_head *list)
744 {
745 	struct tipc_msg *msg = buf_msg(skb_peek(list));
746 	unsigned int maxwin = link->window;
747 	unsigned int imp = msg_importance(msg);
748 	uint mtu = link->max_pkt;
749 	uint ack = mod(link->next_in_no - 1);
750 	uint seqno = link->next_out_no;
751 	uint bc_last_in = link->owner->bclink.last_in;
752 	struct tipc_media_addr *addr = &link->media_addr;
753 	struct sk_buff_head *transmq = &link->transmq;
754 	struct sk_buff_head *backlogq = &link->backlogq;
755 	struct sk_buff *skb, *tmp;
756 
757 	/* Match queue limit against msg importance: */
758 	if (unlikely(skb_queue_len(backlogq) >= link->queue_limit[imp]))
759 		return tipc_link_cong(link, list);
760 
761 	/* Has valid packet limit been used ? */
762 	if (unlikely(msg_size(msg) > mtu)) {
763 		__skb_queue_purge(list);
764 		return -EMSGSIZE;
765 	}
766 
767 	/* Prepare each packet for sending, and add to relevant queue: */
768 	skb_queue_walk_safe(list, skb, tmp) {
769 		__skb_unlink(skb, list);
770 		msg = buf_msg(skb);
771 		msg_set_seqno(msg, seqno);
772 		msg_set_ack(msg, ack);
773 		msg_set_bcast_ack(msg, bc_last_in);
774 
775 		if (likely(skb_queue_len(transmq) < maxwin)) {
776 			__skb_queue_tail(transmq, skb);
777 			tipc_bearer_send(net, link->bearer_id, skb, addr);
778 			link->rcv_unacked = 0;
779 			seqno++;
780 			continue;
781 		}
782 		if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
783 			link->stats.sent_bundled++;
784 			continue;
785 		}
786 		if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
787 			link->stats.sent_bundled++;
788 			link->stats.sent_bundles++;
789 		}
790 		__skb_queue_tail(backlogq, skb);
791 		seqno++;
792 	}
793 	link->next_out_no = seqno;
794 	return 0;
795 }
796 
797 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
798 {
799 	skb_queue_head_init(list);
800 	__skb_queue_tail(list, skb);
801 }
802 
803 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
804 {
805 	struct sk_buff_head head;
806 
807 	skb2list(skb, &head);
808 	return __tipc_link_xmit(link->owner->net, link, &head);
809 }
810 
811 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
812 		       u32 selector)
813 {
814 	struct sk_buff_head head;
815 
816 	skb2list(skb, &head);
817 	return tipc_link_xmit(net, &head, dnode, selector);
818 }
819 
820 /**
821  * tipc_link_xmit() is the general link level function for message sending
822  * @net: the applicable net namespace
823  * @list: chain of buffers containing message
824  * @dsz: amount of user data to be sent
825  * @dnode: address of destination node
826  * @selector: a number used for deterministic link selection
827  * Consumes the buffer chain, except when returning -ELINKCONG
828  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
829  */
830 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
831 		   u32 selector)
832 {
833 	struct tipc_link *link = NULL;
834 	struct tipc_node *node;
835 	int rc = -EHOSTUNREACH;
836 
837 	node = tipc_node_find(net, dnode);
838 	if (node) {
839 		tipc_node_lock(node);
840 		link = node->active_links[selector & 1];
841 		if (link)
842 			rc = __tipc_link_xmit(net, link, list);
843 		tipc_node_unlock(node);
844 	}
845 	if (link)
846 		return rc;
847 
848 	if (likely(in_own_node(net, dnode)))
849 		return tipc_sk_rcv(net, list);
850 
851 	__skb_queue_purge(list);
852 	return rc;
853 }
854 
855 /*
856  * tipc_link_sync_xmit - synchronize broadcast link endpoints.
857  *
858  * Give a newly added peer node the sequence number where it should
859  * start receiving and acking broadcast packets.
860  *
861  * Called with node locked
862  */
863 static void tipc_link_sync_xmit(struct tipc_link *link)
864 {
865 	struct sk_buff *skb;
866 	struct tipc_msg *msg;
867 
868 	skb = tipc_buf_acquire(INT_H_SIZE);
869 	if (!skb)
870 		return;
871 
872 	msg = buf_msg(skb);
873 	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
874 		      INT_H_SIZE, link->addr);
875 	msg_set_last_bcast(msg, link->owner->bclink.acked);
876 	__tipc_link_xmit_skb(link, skb);
877 }
878 
879 /*
880  * tipc_link_sync_rcv - synchronize broadcast link endpoints.
881  * Receive the sequence number where we should start receiving and
882  * acking broadcast packets from a newly added peer node, and open
883  * up for reception of such packets.
884  *
885  * Called with node locked
886  */
887 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
888 {
889 	struct tipc_msg *msg = buf_msg(buf);
890 
891 	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
892 	n->bclink.recv_permitted = true;
893 	kfree_skb(buf);
894 }
895 
896 /*
897  * tipc_link_push_packets - push unsent packets to bearer
898  *
899  * Push out the unsent messages of a link where congestion
900  * has abated. Node is locked.
901  *
902  * Called with node locked
903  */
904 void tipc_link_push_packets(struct tipc_link *link)
905 {
906 	struct sk_buff *skb;
907 	struct tipc_msg *msg;
908 	unsigned int ack = mod(link->next_in_no - 1);
909 
910 	while (skb_queue_len(&link->transmq) < link->window) {
911 		skb = __skb_dequeue(&link->backlogq);
912 		if (!skb)
913 			break;
914 		msg = buf_msg(skb);
915 		msg_set_ack(msg, ack);
916 		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
917 		link->rcv_unacked = 0;
918 		__skb_queue_tail(&link->transmq, skb);
919 		tipc_bearer_send(link->owner->net, link->bearer_id,
920 				 skb, &link->media_addr);
921 	}
922 }
923 
924 void tipc_link_reset_all(struct tipc_node *node)
925 {
926 	char addr_string[16];
927 	u32 i;
928 
929 	tipc_node_lock(node);
930 
931 	pr_warn("Resetting all links to %s\n",
932 		tipc_addr_string_fill(addr_string, node->addr));
933 
934 	for (i = 0; i < MAX_BEARERS; i++) {
935 		if (node->links[i]) {
936 			link_print(node->links[i], "Resetting link\n");
937 			tipc_link_reset(node->links[i]);
938 		}
939 	}
940 
941 	tipc_node_unlock(node);
942 }
943 
944 static void link_retransmit_failure(struct tipc_link *l_ptr,
945 				    struct sk_buff *buf)
946 {
947 	struct tipc_msg *msg = buf_msg(buf);
948 	struct net *net = l_ptr->owner->net;
949 
950 	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
951 
952 	if (l_ptr->addr) {
953 		/* Handle failure on standard link */
954 		link_print(l_ptr, "Resetting link\n");
955 		tipc_link_reset(l_ptr);
956 
957 	} else {
958 		/* Handle failure on broadcast link */
959 		struct tipc_node *n_ptr;
960 		char addr_string[16];
961 
962 		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
963 		pr_cont("Outstanding acks: %lu\n",
964 			(unsigned long) TIPC_SKB_CB(buf)->handle);
965 
966 		n_ptr = tipc_bclink_retransmit_to(net);
967 		tipc_node_lock(n_ptr);
968 
969 		tipc_addr_string_fill(addr_string, n_ptr->addr);
970 		pr_info("Broadcast link info for %s\n", addr_string);
971 		pr_info("Reception permitted: %d,  Acked: %u\n",
972 			n_ptr->bclink.recv_permitted,
973 			n_ptr->bclink.acked);
974 		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
975 			n_ptr->bclink.last_in,
976 			n_ptr->bclink.oos_state,
977 			n_ptr->bclink.last_sent);
978 
979 		tipc_node_unlock(n_ptr);
980 
981 		tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
982 		l_ptr->stale_count = 0;
983 	}
984 }
985 
986 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
987 			  u32 retransmits)
988 {
989 	struct tipc_msg *msg;
990 
991 	if (!skb)
992 		return;
993 
994 	msg = buf_msg(skb);
995 
996 	/* Detect repeated retransmit failures */
997 	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
998 		if (++l_ptr->stale_count > 100) {
999 			link_retransmit_failure(l_ptr, skb);
1000 			return;
1001 		}
1002 	} else {
1003 		l_ptr->last_retransmitted = msg_seqno(msg);
1004 		l_ptr->stale_count = 1;
1005 	}
1006 
1007 	skb_queue_walk_from(&l_ptr->transmq, skb) {
1008 		if (!retransmits)
1009 			break;
1010 		msg = buf_msg(skb);
1011 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1012 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1013 		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1014 				 &l_ptr->media_addr);
1015 		retransmits--;
1016 		l_ptr->stats.retransmitted++;
1017 	}
1018 }
1019 
1020 static void link_retrieve_defq(struct tipc_link *link,
1021 			       struct sk_buff_head *list)
1022 {
1023 	u32 seq_no;
1024 
1025 	if (skb_queue_empty(&link->deferdq))
1026 		return;
1027 
1028 	seq_no = buf_seqno(skb_peek(&link->deferdq));
1029 	if (seq_no == mod(link->next_in_no))
1030 		skb_queue_splice_tail_init(&link->deferdq, list);
1031 }
1032 
1033 /**
1034  * tipc_rcv - process TIPC packets/messages arriving from off-node
1035  * @net: the applicable net namespace
1036  * @skb: TIPC packet
1037  * @b_ptr: pointer to bearer message arrived on
1038  *
1039  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1040  * structure (i.e. cannot be NULL), but bearer can be inactive.
1041  */
1042 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1043 {
1044 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1045 	struct sk_buff_head head;
1046 	struct tipc_node *n_ptr;
1047 	struct tipc_link *l_ptr;
1048 	struct sk_buff *skb1, *tmp;
1049 	struct tipc_msg *msg;
1050 	u32 seq_no;
1051 	u32 ackd;
1052 	u32 released;
1053 
1054 	skb2list(skb, &head);
1055 
1056 	while ((skb = __skb_dequeue(&head))) {
1057 		/* Ensure message is well-formed */
1058 		if (unlikely(!tipc_msg_validate(skb)))
1059 			goto discard;
1060 
1061 		/* Handle arrival of a non-unicast link message */
1062 		msg = buf_msg(skb);
1063 		if (unlikely(msg_non_seq(msg))) {
1064 			if (msg_user(msg) ==  LINK_CONFIG)
1065 				tipc_disc_rcv(net, skb, b_ptr);
1066 			else
1067 				tipc_bclink_rcv(net, skb);
1068 			continue;
1069 		}
1070 
1071 		/* Discard unicast link messages destined for another node */
1072 		if (unlikely(!msg_short(msg) &&
1073 			     (msg_destnode(msg) != tn->own_addr)))
1074 			goto discard;
1075 
1076 		/* Locate neighboring node that sent message */
1077 		n_ptr = tipc_node_find(net, msg_prevnode(msg));
1078 		if (unlikely(!n_ptr))
1079 			goto discard;
1080 		tipc_node_lock(n_ptr);
1081 
1082 		/* Locate unicast link endpoint that should handle message */
1083 		l_ptr = n_ptr->links[b_ptr->identity];
1084 		if (unlikely(!l_ptr))
1085 			goto unlock;
1086 
1087 		/* Verify that communication with node is currently allowed */
1088 		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1089 		    msg_user(msg) == LINK_PROTOCOL &&
1090 		    (msg_type(msg) == RESET_MSG ||
1091 		    msg_type(msg) == ACTIVATE_MSG) &&
1092 		    !msg_redundant_link(msg))
1093 			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1094 
1095 		if (tipc_node_blocked(n_ptr))
1096 			goto unlock;
1097 
1098 		/* Validate message sequence number info */
1099 		seq_no = msg_seqno(msg);
1100 		ackd = msg_ack(msg);
1101 
1102 		/* Release acked messages */
1103 		if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1104 			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1105 
1106 		released = 0;
1107 		skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1108 			if (more(buf_seqno(skb1), ackd))
1109 				break;
1110 			 __skb_unlink(skb1, &l_ptr->transmq);
1111 			 kfree_skb(skb1);
1112 			 released = 1;
1113 		}
1114 
1115 		/* Try sending any messages link endpoint has pending */
1116 		if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1117 			tipc_link_push_packets(l_ptr);
1118 
1119 		if (released && !skb_queue_empty(&l_ptr->wakeupq))
1120 			link_prepare_wakeup(l_ptr);
1121 
1122 		/* Process the incoming packet */
1123 		if (unlikely(!link_working_working(l_ptr))) {
1124 			if (msg_user(msg) == LINK_PROTOCOL) {
1125 				tipc_link_proto_rcv(l_ptr, skb);
1126 				link_retrieve_defq(l_ptr, &head);
1127 				skb = NULL;
1128 				goto unlock;
1129 			}
1130 
1131 			/* Traffic message. Conditionally activate link */
1132 			link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1133 
1134 			if (link_working_working(l_ptr)) {
1135 				/* Re-insert buffer in front of queue */
1136 				__skb_queue_head(&head, skb);
1137 				skb = NULL;
1138 				goto unlock;
1139 			}
1140 			goto unlock;
1141 		}
1142 
1143 		/* Link is now in state WORKING_WORKING */
1144 		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1145 			link_handle_out_of_seq_msg(l_ptr, skb);
1146 			link_retrieve_defq(l_ptr, &head);
1147 			skb = NULL;
1148 			goto unlock;
1149 		}
1150 		l_ptr->next_in_no++;
1151 		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1152 			link_retrieve_defq(l_ptr, &head);
1153 		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1154 			l_ptr->stats.sent_acks++;
1155 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1156 		}
1157 		tipc_link_input(l_ptr, skb);
1158 		skb = NULL;
1159 unlock:
1160 		tipc_node_unlock(n_ptr);
1161 discard:
1162 		if (unlikely(skb))
1163 			kfree_skb(skb);
1164 	}
1165 }
1166 
1167 /* tipc_data_input - deliver data and name distr msgs to upper layer
1168  *
1169  * Consumes buffer if message is of right type
1170  * Node lock must be held
1171  */
1172 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1173 {
1174 	struct tipc_node *node = link->owner;
1175 	struct tipc_msg *msg = buf_msg(skb);
1176 	u32 dport = msg_destport(msg);
1177 
1178 	switch (msg_user(msg)) {
1179 	case TIPC_LOW_IMPORTANCE:
1180 	case TIPC_MEDIUM_IMPORTANCE:
1181 	case TIPC_HIGH_IMPORTANCE:
1182 	case TIPC_CRITICAL_IMPORTANCE:
1183 	case CONN_MANAGER:
1184 		if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1185 			node->inputq = &link->inputq;
1186 			node->action_flags |= TIPC_MSG_EVT;
1187 		}
1188 		return true;
1189 	case NAME_DISTRIBUTOR:
1190 		node->bclink.recv_permitted = true;
1191 		node->namedq = &link->namedq;
1192 		skb_queue_tail(&link->namedq, skb);
1193 		if (skb_queue_len(&link->namedq) == 1)
1194 			node->action_flags |= TIPC_NAMED_MSG_EVT;
1195 		return true;
1196 	case MSG_BUNDLER:
1197 	case CHANGEOVER_PROTOCOL:
1198 	case MSG_FRAGMENTER:
1199 	case BCAST_PROTOCOL:
1200 		return false;
1201 	default:
1202 		pr_warn("Dropping received illegal msg type\n");
1203 		kfree_skb(skb);
1204 		return false;
1205 	};
1206 }
1207 
1208 /* tipc_link_input - process packet that has passed link protocol check
1209  *
1210  * Consumes buffer
1211  * Node lock must be held
1212  */
1213 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1214 {
1215 	struct tipc_node *node = link->owner;
1216 	struct tipc_msg *msg = buf_msg(skb);
1217 	struct sk_buff *iskb;
1218 	int pos = 0;
1219 
1220 	if (likely(tipc_data_input(link, skb)))
1221 		return;
1222 
1223 	switch (msg_user(msg)) {
1224 	case CHANGEOVER_PROTOCOL:
1225 		if (!tipc_link_tunnel_rcv(node, &skb))
1226 			break;
1227 		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1228 			tipc_data_input(link, skb);
1229 			break;
1230 		}
1231 	case MSG_BUNDLER:
1232 		link->stats.recv_bundles++;
1233 		link->stats.recv_bundled += msg_msgcnt(msg);
1234 
1235 		while (tipc_msg_extract(skb, &iskb, &pos))
1236 			tipc_data_input(link, iskb);
1237 		break;
1238 	case MSG_FRAGMENTER:
1239 		link->stats.recv_fragments++;
1240 		if (tipc_buf_append(&link->reasm_buf, &skb)) {
1241 			link->stats.recv_fragmented++;
1242 			tipc_data_input(link, skb);
1243 		} else if (!link->reasm_buf) {
1244 			tipc_link_reset(link);
1245 		}
1246 		break;
1247 	case BCAST_PROTOCOL:
1248 		tipc_link_sync_rcv(node, skb);
1249 		break;
1250 	default:
1251 		break;
1252 	};
1253 }
1254 
1255 /**
1256  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1257  *
1258  * Returns increase in queue length (i.e. 0 or 1)
1259  */
1260 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1261 {
1262 	struct sk_buff *skb1;
1263 	u32 seq_no = buf_seqno(skb);
1264 
1265 	/* Empty queue ? */
1266 	if (skb_queue_empty(list)) {
1267 		__skb_queue_tail(list, skb);
1268 		return 1;
1269 	}
1270 
1271 	/* Last ? */
1272 	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1273 		__skb_queue_tail(list, skb);
1274 		return 1;
1275 	}
1276 
1277 	/* Locate insertion point in queue, then insert; discard if duplicate */
1278 	skb_queue_walk(list, skb1) {
1279 		u32 curr_seqno = buf_seqno(skb1);
1280 
1281 		if (seq_no == curr_seqno) {
1282 			kfree_skb(skb);
1283 			return 0;
1284 		}
1285 
1286 		if (less(seq_no, curr_seqno))
1287 			break;
1288 	}
1289 
1290 	__skb_queue_before(list, skb1, skb);
1291 	return 1;
1292 }
1293 
1294 /*
1295  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1296  */
1297 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1298 				       struct sk_buff *buf)
1299 {
1300 	u32 seq_no = buf_seqno(buf);
1301 
1302 	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1303 		tipc_link_proto_rcv(l_ptr, buf);
1304 		return;
1305 	}
1306 
1307 	/* Record OOS packet arrival (force mismatch on next timeout) */
1308 	l_ptr->checkpoint--;
1309 
1310 	/*
1311 	 * Discard packet if a duplicate; otherwise add it to deferred queue
1312 	 * and notify peer of gap as per protocol specification
1313 	 */
1314 	if (less(seq_no, mod(l_ptr->next_in_no))) {
1315 		l_ptr->stats.duplicates++;
1316 		kfree_skb(buf);
1317 		return;
1318 	}
1319 
1320 	if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1321 		l_ptr->stats.deferred_recv++;
1322 		if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1323 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1324 	} else {
1325 		l_ptr->stats.duplicates++;
1326 	}
1327 }
1328 
1329 /*
1330  * Send protocol message to the other endpoint.
1331  */
1332 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1333 			  u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1334 {
1335 	struct sk_buff *buf = NULL;
1336 	struct tipc_msg *msg = l_ptr->pmsg;
1337 	u32 msg_size = sizeof(l_ptr->proto_msg);
1338 	int r_flag;
1339 
1340 	/* Don't send protocol message during link changeover */
1341 	if (l_ptr->exp_msg_count)
1342 		return;
1343 
1344 	/* Abort non-RESET send if communication with node is prohibited */
1345 	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1346 		return;
1347 
1348 	/* Create protocol message with "out-of-sequence" sequence number */
1349 	msg_set_type(msg, msg_typ);
1350 	msg_set_net_plane(msg, l_ptr->net_plane);
1351 	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1352 	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1353 
1354 	if (msg_typ == STATE_MSG) {
1355 		u32 next_sent = mod(l_ptr->next_out_no);
1356 
1357 		if (!tipc_link_is_up(l_ptr))
1358 			return;
1359 		if (skb_queue_len(&l_ptr->backlogq))
1360 			next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1361 		msg_set_next_sent(msg, next_sent);
1362 		if (!skb_queue_empty(&l_ptr->deferdq)) {
1363 			u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
1364 			gap = mod(rec - mod(l_ptr->next_in_no));
1365 		}
1366 		msg_set_seq_gap(msg, gap);
1367 		if (gap)
1368 			l_ptr->stats.sent_nacks++;
1369 		msg_set_link_tolerance(msg, tolerance);
1370 		msg_set_linkprio(msg, priority);
1371 		msg_set_max_pkt(msg, ack_mtu);
1372 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1373 		msg_set_probe(msg, probe_msg != 0);
1374 		if (probe_msg) {
1375 			u32 mtu = l_ptr->max_pkt;
1376 
1377 			if ((mtu < l_ptr->max_pkt_target) &&
1378 			    link_working_working(l_ptr) &&
1379 			    l_ptr->fsm_msg_cnt) {
1380 				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1381 				if (l_ptr->max_pkt_probes == 10) {
1382 					l_ptr->max_pkt_target = (msg_size - 4);
1383 					l_ptr->max_pkt_probes = 0;
1384 					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1385 				}
1386 				l_ptr->max_pkt_probes++;
1387 			}
1388 
1389 			l_ptr->stats.sent_probes++;
1390 		}
1391 		l_ptr->stats.sent_states++;
1392 	} else {		/* RESET_MSG or ACTIVATE_MSG */
1393 		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1394 		msg_set_seq_gap(msg, 0);
1395 		msg_set_next_sent(msg, 1);
1396 		msg_set_probe(msg, 0);
1397 		msg_set_link_tolerance(msg, l_ptr->tolerance);
1398 		msg_set_linkprio(msg, l_ptr->priority);
1399 		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1400 	}
1401 
1402 	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1403 	msg_set_redundant_link(msg, r_flag);
1404 	msg_set_linkprio(msg, l_ptr->priority);
1405 	msg_set_size(msg, msg_size);
1406 
1407 	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1408 
1409 	buf = tipc_buf_acquire(msg_size);
1410 	if (!buf)
1411 		return;
1412 
1413 	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1414 	buf->priority = TC_PRIO_CONTROL;
1415 	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1416 			 &l_ptr->media_addr);
1417 	l_ptr->rcv_unacked = 0;
1418 	kfree_skb(buf);
1419 }
1420 
1421 /*
1422  * Receive protocol message :
1423  * Note that network plane id propagates through the network, and may
1424  * change at any time. The node with lowest address rules
1425  */
1426 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1427 				struct sk_buff *buf)
1428 {
1429 	u32 rec_gap = 0;
1430 	u32 max_pkt_info;
1431 	u32 max_pkt_ack;
1432 	u32 msg_tol;
1433 	struct tipc_msg *msg = buf_msg(buf);
1434 
1435 	/* Discard protocol message during link changeover */
1436 	if (l_ptr->exp_msg_count)
1437 		goto exit;
1438 
1439 	if (l_ptr->net_plane != msg_net_plane(msg))
1440 		if (link_own_addr(l_ptr) > msg_prevnode(msg))
1441 			l_ptr->net_plane = msg_net_plane(msg);
1442 
1443 	switch (msg_type(msg)) {
1444 
1445 	case RESET_MSG:
1446 		if (!link_working_unknown(l_ptr) &&
1447 		    (l_ptr->peer_session != INVALID_SESSION)) {
1448 			if (less_eq(msg_session(msg), l_ptr->peer_session))
1449 				break; /* duplicate or old reset: ignore */
1450 		}
1451 
1452 		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1453 				link_working_unknown(l_ptr))) {
1454 			/*
1455 			 * peer has lost contact -- don't allow peer's links
1456 			 * to reactivate before we recognize loss & clean up
1457 			 */
1458 			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1459 		}
1460 
1461 		link_state_event(l_ptr, RESET_MSG);
1462 
1463 		/* fall thru' */
1464 	case ACTIVATE_MSG:
1465 		/* Update link settings according other endpoint's values */
1466 		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1467 
1468 		msg_tol = msg_link_tolerance(msg);
1469 		if (msg_tol > l_ptr->tolerance)
1470 			link_set_supervision_props(l_ptr, msg_tol);
1471 
1472 		if (msg_linkprio(msg) > l_ptr->priority)
1473 			l_ptr->priority = msg_linkprio(msg);
1474 
1475 		max_pkt_info = msg_max_pkt(msg);
1476 		if (max_pkt_info) {
1477 			if (max_pkt_info < l_ptr->max_pkt_target)
1478 				l_ptr->max_pkt_target = max_pkt_info;
1479 			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1480 				l_ptr->max_pkt = l_ptr->max_pkt_target;
1481 		} else {
1482 			l_ptr->max_pkt = l_ptr->max_pkt_target;
1483 		}
1484 
1485 		/* Synchronize broadcast link info, if not done previously */
1486 		if (!tipc_node_is_up(l_ptr->owner)) {
1487 			l_ptr->owner->bclink.last_sent =
1488 				l_ptr->owner->bclink.last_in =
1489 				msg_last_bcast(msg);
1490 			l_ptr->owner->bclink.oos_state = 0;
1491 		}
1492 
1493 		l_ptr->peer_session = msg_session(msg);
1494 		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1495 
1496 		if (msg_type(msg) == ACTIVATE_MSG)
1497 			link_state_event(l_ptr, ACTIVATE_MSG);
1498 		break;
1499 	case STATE_MSG:
1500 
1501 		msg_tol = msg_link_tolerance(msg);
1502 		if (msg_tol)
1503 			link_set_supervision_props(l_ptr, msg_tol);
1504 
1505 		if (msg_linkprio(msg) &&
1506 		    (msg_linkprio(msg) != l_ptr->priority)) {
1507 			pr_debug("%s<%s>, priority change %u->%u\n",
1508 				 link_rst_msg, l_ptr->name,
1509 				 l_ptr->priority, msg_linkprio(msg));
1510 			l_ptr->priority = msg_linkprio(msg);
1511 			tipc_link_reset(l_ptr); /* Enforce change to take effect */
1512 			break;
1513 		}
1514 
1515 		/* Record reception; force mismatch at next timeout: */
1516 		l_ptr->checkpoint--;
1517 
1518 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1519 		l_ptr->stats.recv_states++;
1520 		if (link_reset_unknown(l_ptr))
1521 			break;
1522 
1523 		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1524 			rec_gap = mod(msg_next_sent(msg) -
1525 				      mod(l_ptr->next_in_no));
1526 		}
1527 
1528 		max_pkt_ack = msg_max_pkt(msg);
1529 		if (max_pkt_ack > l_ptr->max_pkt) {
1530 			l_ptr->max_pkt = max_pkt_ack;
1531 			l_ptr->max_pkt_probes = 0;
1532 		}
1533 
1534 		max_pkt_ack = 0;
1535 		if (msg_probe(msg)) {
1536 			l_ptr->stats.recv_probes++;
1537 			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1538 				max_pkt_ack = msg_size(msg);
1539 		}
1540 
1541 		/* Protocol message before retransmits, reduce loss risk */
1542 		if (l_ptr->owner->bclink.recv_permitted)
1543 			tipc_bclink_update_link_state(l_ptr->owner,
1544 						      msg_last_bcast(msg));
1545 
1546 		if (rec_gap || (msg_probe(msg))) {
1547 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1548 					     0, max_pkt_ack);
1549 		}
1550 		if (msg_seq_gap(msg)) {
1551 			l_ptr->stats.recv_nacks++;
1552 			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1553 					     msg_seq_gap(msg));
1554 		}
1555 		break;
1556 	}
1557 exit:
1558 	kfree_skb(buf);
1559 }
1560 
1561 
1562 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1563  * a different bearer. Owner node is locked.
1564  */
1565 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1566 				  struct tipc_msg *tunnel_hdr,
1567 				  struct tipc_msg *msg,
1568 				  u32 selector)
1569 {
1570 	struct tipc_link *tunnel;
1571 	struct sk_buff *skb;
1572 	u32 length = msg_size(msg);
1573 
1574 	tunnel = l_ptr->owner->active_links[selector & 1];
1575 	if (!tipc_link_is_up(tunnel)) {
1576 		pr_warn("%stunnel link no longer available\n", link_co_err);
1577 		return;
1578 	}
1579 	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1580 	skb = tipc_buf_acquire(length + INT_H_SIZE);
1581 	if (!skb) {
1582 		pr_warn("%sunable to send tunnel msg\n", link_co_err);
1583 		return;
1584 	}
1585 	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1586 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1587 	__tipc_link_xmit_skb(tunnel, skb);
1588 }
1589 
1590 
1591 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1592  * link is still active. We can do failover. Tunnel the failing link's
1593  * whole send queue via the remaining link. This way, we don't lose
1594  * any packets, and sequence order is preserved for subsequent traffic
1595  * sent over the remaining link. Owner node is locked.
1596  */
1597 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1598 {
1599 	int msgcount;
1600 	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1601 	struct tipc_msg tunnel_hdr;
1602 	struct sk_buff *skb;
1603 	int split_bundles;
1604 
1605 	if (!tunnel)
1606 		return;
1607 
1608 	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1609 		      ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1610 	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1611 	msgcount = skb_queue_len(&l_ptr->transmq);
1612 	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1613 	msg_set_msgcnt(&tunnel_hdr, msgcount);
1614 
1615 	if (skb_queue_empty(&l_ptr->transmq)) {
1616 		skb = tipc_buf_acquire(INT_H_SIZE);
1617 		if (skb) {
1618 			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1619 			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1620 			__tipc_link_xmit_skb(tunnel, skb);
1621 		} else {
1622 			pr_warn("%sunable to send changeover msg\n",
1623 				link_co_err);
1624 		}
1625 		return;
1626 	}
1627 
1628 	split_bundles = (l_ptr->owner->active_links[0] !=
1629 			 l_ptr->owner->active_links[1]);
1630 
1631 	skb_queue_walk(&l_ptr->transmq, skb) {
1632 		struct tipc_msg *msg = buf_msg(skb);
1633 
1634 		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1635 			struct tipc_msg *m = msg_get_wrapped(msg);
1636 			unchar *pos = (unchar *)m;
1637 
1638 			msgcount = msg_msgcnt(msg);
1639 			while (msgcount--) {
1640 				msg_set_seqno(m, msg_seqno(msg));
1641 				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1642 						      msg_link_selector(m));
1643 				pos += align(msg_size(m));
1644 				m = (struct tipc_msg *)pos;
1645 			}
1646 		} else {
1647 			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1648 					      msg_link_selector(msg));
1649 		}
1650 	}
1651 }
1652 
1653 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1654  * duplicate of the first link's send queue via the new link. This way, we
1655  * are guaranteed that currently queued packets from a socket are delivered
1656  * before future traffic from the same socket, even if this is using the
1657  * new link. The last arriving copy of each duplicate packet is dropped at
1658  * the receiving end by the regular protocol check, so packet cardinality
1659  * and sequence order is preserved per sender/receiver socket pair.
1660  * Owner node is locked.
1661  */
1662 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1663 			      struct tipc_link *tnl)
1664 {
1665 	struct sk_buff *skb;
1666 	struct tipc_msg tnl_hdr;
1667 	struct sk_buff_head *queue = &link->transmq;
1668 	int mcnt;
1669 
1670 	tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
1671 		      DUPLICATE_MSG, INT_H_SIZE, link->addr);
1672 	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1673 	msg_set_msgcnt(&tnl_hdr, mcnt);
1674 	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1675 
1676 tunnel_queue:
1677 	skb_queue_walk(queue, skb) {
1678 		struct sk_buff *outskb;
1679 		struct tipc_msg *msg = buf_msg(skb);
1680 		u32 len = msg_size(msg);
1681 
1682 		msg_set_ack(msg, mod(link->next_in_no - 1));
1683 		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1684 		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1685 		outskb = tipc_buf_acquire(len + INT_H_SIZE);
1686 		if (outskb == NULL) {
1687 			pr_warn("%sunable to send duplicate msg\n",
1688 				link_co_err);
1689 			return;
1690 		}
1691 		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1692 		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1693 					       skb->data, len);
1694 		__tipc_link_xmit_skb(tnl, outskb);
1695 		if (!tipc_link_is_up(link))
1696 			return;
1697 	}
1698 	if (queue == &link->backlogq)
1699 		return;
1700 	queue = &link->backlogq;
1701 	goto tunnel_queue;
1702 }
1703 
1704 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1705  * Owner node is locked.
1706  */
1707 static void tipc_link_dup_rcv(struct tipc_link *link,
1708 			      struct sk_buff *skb)
1709 {
1710 	struct sk_buff *iskb;
1711 	int pos = 0;
1712 
1713 	if (!tipc_link_is_up(link))
1714 		return;
1715 
1716 	if (!tipc_msg_extract(skb, &iskb, &pos)) {
1717 		pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1718 		return;
1719 	}
1720 	/* Append buffer to deferred queue, if applicable: */
1721 	link_handle_out_of_seq_msg(link, iskb);
1722 }
1723 
1724 /*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1725  *  Owner node is locked.
1726  */
1727 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1728 					      struct sk_buff *t_buf)
1729 {
1730 	struct tipc_msg *t_msg = buf_msg(t_buf);
1731 	struct sk_buff *buf = NULL;
1732 	struct tipc_msg *msg;
1733 	int pos = 0;
1734 
1735 	if (tipc_link_is_up(l_ptr))
1736 		tipc_link_reset(l_ptr);
1737 
1738 	/* First failover packet? */
1739 	if (l_ptr->exp_msg_count == START_CHANGEOVER)
1740 		l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1741 
1742 	/* Should there be an inner packet? */
1743 	if (l_ptr->exp_msg_count) {
1744 		l_ptr->exp_msg_count--;
1745 		if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1746 			pr_warn("%sno inner failover pkt\n", link_co_err);
1747 			goto exit;
1748 		}
1749 		msg = buf_msg(buf);
1750 
1751 		if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1752 			kfree_skb(buf);
1753 			buf = NULL;
1754 			goto exit;
1755 		}
1756 		if (msg_user(msg) == MSG_FRAGMENTER) {
1757 			l_ptr->stats.recv_fragments++;
1758 			tipc_buf_append(&l_ptr->reasm_buf, &buf);
1759 		}
1760 	}
1761 exit:
1762 	if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1763 		tipc_link_delete(l_ptr);
1764 	return buf;
1765 }
1766 
1767 /*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1768  *  via other link as result of a failover (ORIGINAL_MSG) or
1769  *  a new active link (DUPLICATE_MSG). Failover packets are
1770  *  returned to the active link for delivery upwards.
1771  *  Owner node is locked.
1772  */
1773 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1774 				struct sk_buff **buf)
1775 {
1776 	struct sk_buff *t_buf = *buf;
1777 	struct tipc_link *l_ptr;
1778 	struct tipc_msg *t_msg = buf_msg(t_buf);
1779 	u32 bearer_id = msg_bearer_id(t_msg);
1780 
1781 	*buf = NULL;
1782 
1783 	if (bearer_id >= MAX_BEARERS)
1784 		goto exit;
1785 
1786 	l_ptr = n_ptr->links[bearer_id];
1787 	if (!l_ptr)
1788 		goto exit;
1789 
1790 	if (msg_type(t_msg) == DUPLICATE_MSG)
1791 		tipc_link_dup_rcv(l_ptr, t_buf);
1792 	else if (msg_type(t_msg) == ORIGINAL_MSG)
1793 		*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1794 	else
1795 		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1796 exit:
1797 	kfree_skb(t_buf);
1798 	return *buf != NULL;
1799 }
1800 
1801 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1802 {
1803 	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1804 
1805 	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1806 		return;
1807 
1808 	l_ptr->tolerance = tol;
1809 	l_ptr->cont_intv = msecs_to_jiffies(intv);
1810 	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1811 }
1812 
1813 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1814 {
1815 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);
1816 
1817 	l->window = win;
1818 	l->queue_limit[TIPC_LOW_IMPORTANCE]      = win / 2;
1819 	l->queue_limit[TIPC_MEDIUM_IMPORTANCE]   = win;
1820 	l->queue_limit[TIPC_HIGH_IMPORTANCE]     = win / 2 * 3;
1821 	l->queue_limit[TIPC_CRITICAL_IMPORTANCE] = win * 2;
1822 	l->queue_limit[TIPC_SYSTEM_IMPORTANCE]   = max_bulk;
1823 }
1824 
1825 /* tipc_link_find_owner - locate owner node of link by link's name
1826  * @net: the applicable net namespace
1827  * @name: pointer to link name string
1828  * @bearer_id: pointer to index in 'node->links' array where the link was found.
1829  *
1830  * Returns pointer to node owning the link, or 0 if no matching link is found.
1831  */
1832 static struct tipc_node *tipc_link_find_owner(struct net *net,
1833 					      const char *link_name,
1834 					      unsigned int *bearer_id)
1835 {
1836 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1837 	struct tipc_link *l_ptr;
1838 	struct tipc_node *n_ptr;
1839 	struct tipc_node *found_node = NULL;
1840 	int i;
1841 
1842 	*bearer_id = 0;
1843 	rcu_read_lock();
1844 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1845 		tipc_node_lock(n_ptr);
1846 		for (i = 0; i < MAX_BEARERS; i++) {
1847 			l_ptr = n_ptr->links[i];
1848 			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1849 				*bearer_id = i;
1850 				found_node = n_ptr;
1851 				break;
1852 			}
1853 		}
1854 		tipc_node_unlock(n_ptr);
1855 		if (found_node)
1856 			break;
1857 	}
1858 	rcu_read_unlock();
1859 
1860 	return found_node;
1861 }
1862 
1863 /**
1864  * link_reset_statistics - reset link statistics
1865  * @l_ptr: pointer to link
1866  */
1867 static void link_reset_statistics(struct tipc_link *l_ptr)
1868 {
1869 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1870 	l_ptr->stats.sent_info = l_ptr->next_out_no;
1871 	l_ptr->stats.recv_info = l_ptr->next_in_no;
1872 }
1873 
1874 static void link_print(struct tipc_link *l_ptr, const char *str)
1875 {
1876 	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1877 	struct tipc_bearer *b_ptr;
1878 
1879 	rcu_read_lock();
1880 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1881 	if (b_ptr)
1882 		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1883 	rcu_read_unlock();
1884 
1885 	if (link_working_unknown(l_ptr))
1886 		pr_cont(":WU\n");
1887 	else if (link_reset_reset(l_ptr))
1888 		pr_cont(":RR\n");
1889 	else if (link_reset_unknown(l_ptr))
1890 		pr_cont(":RU\n");
1891 	else if (link_working_working(l_ptr))
1892 		pr_cont(":WW\n");
1893 	else
1894 		pr_cont("\n");
1895 }
1896 
1897 /* Parse and validate nested (link) properties valid for media, bearer and link
1898  */
1899 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1900 {
1901 	int err;
1902 
1903 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1904 			       tipc_nl_prop_policy);
1905 	if (err)
1906 		return err;
1907 
1908 	if (props[TIPC_NLA_PROP_PRIO]) {
1909 		u32 prio;
1910 
1911 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1912 		if (prio > TIPC_MAX_LINK_PRI)
1913 			return -EINVAL;
1914 	}
1915 
1916 	if (props[TIPC_NLA_PROP_TOL]) {
1917 		u32 tol;
1918 
1919 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1920 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1921 			return -EINVAL;
1922 	}
1923 
1924 	if (props[TIPC_NLA_PROP_WIN]) {
1925 		u32 win;
1926 
1927 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1928 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1929 			return -EINVAL;
1930 	}
1931 
1932 	return 0;
1933 }
1934 
1935 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1936 {
1937 	int err;
1938 	int res = 0;
1939 	int bearer_id;
1940 	char *name;
1941 	struct tipc_link *link;
1942 	struct tipc_node *node;
1943 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1944 	struct net *net = sock_net(skb->sk);
1945 
1946 	if (!info->attrs[TIPC_NLA_LINK])
1947 		return -EINVAL;
1948 
1949 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1950 			       info->attrs[TIPC_NLA_LINK],
1951 			       tipc_nl_link_policy);
1952 	if (err)
1953 		return err;
1954 
1955 	if (!attrs[TIPC_NLA_LINK_NAME])
1956 		return -EINVAL;
1957 
1958 	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1959 
1960 	node = tipc_link_find_owner(net, name, &bearer_id);
1961 	if (!node)
1962 		return -EINVAL;
1963 
1964 	tipc_node_lock(node);
1965 
1966 	link = node->links[bearer_id];
1967 	if (!link) {
1968 		res = -EINVAL;
1969 		goto out;
1970 	}
1971 
1972 	if (attrs[TIPC_NLA_LINK_PROP]) {
1973 		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1974 
1975 		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1976 					      props);
1977 		if (err) {
1978 			res = err;
1979 			goto out;
1980 		}
1981 
1982 		if (props[TIPC_NLA_PROP_TOL]) {
1983 			u32 tol;
1984 
1985 			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1986 			link_set_supervision_props(link, tol);
1987 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
1988 		}
1989 		if (props[TIPC_NLA_PROP_PRIO]) {
1990 			u32 prio;
1991 
1992 			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1993 			link->priority = prio;
1994 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
1995 		}
1996 		if (props[TIPC_NLA_PROP_WIN]) {
1997 			u32 win;
1998 
1999 			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2000 			tipc_link_set_queue_limits(link, win);
2001 		}
2002 	}
2003 
2004 out:
2005 	tipc_node_unlock(node);
2006 
2007 	return res;
2008 }
2009 
2010 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2011 {
2012 	int i;
2013 	struct nlattr *stats;
2014 
2015 	struct nla_map {
2016 		u32 key;
2017 		u32 val;
2018 	};
2019 
2020 	struct nla_map map[] = {
2021 		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
2022 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2023 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2024 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2025 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2026 		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
2027 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2028 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2029 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2030 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2031 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2032 			s->msg_length_counts : 1},
2033 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2034 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2035 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2036 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2037 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2038 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2039 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2040 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2041 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2042 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2043 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2044 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2045 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2046 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2047 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2048 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2049 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2050 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2051 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2052 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2053 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2054 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2055 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2056 	};
2057 
2058 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2059 	if (!stats)
2060 		return -EMSGSIZE;
2061 
2062 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2063 		if (nla_put_u32(skb, map[i].key, map[i].val))
2064 			goto msg_full;
2065 
2066 	nla_nest_end(skb, stats);
2067 
2068 	return 0;
2069 msg_full:
2070 	nla_nest_cancel(skb, stats);
2071 
2072 	return -EMSGSIZE;
2073 }
2074 
2075 /* Caller should hold appropriate locks to protect the link */
2076 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2077 			      struct tipc_link *link)
2078 {
2079 	int err;
2080 	void *hdr;
2081 	struct nlattr *attrs;
2082 	struct nlattr *prop;
2083 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2084 
2085 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2086 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2087 	if (!hdr)
2088 		return -EMSGSIZE;
2089 
2090 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2091 	if (!attrs)
2092 		goto msg_full;
2093 
2094 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2095 		goto attr_msg_full;
2096 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2097 			tipc_cluster_mask(tn->own_addr)))
2098 		goto attr_msg_full;
2099 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2100 		goto attr_msg_full;
2101 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2102 		goto attr_msg_full;
2103 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2104 		goto attr_msg_full;
2105 
2106 	if (tipc_link_is_up(link))
2107 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2108 			goto attr_msg_full;
2109 	if (tipc_link_is_active(link))
2110 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2111 			goto attr_msg_full;
2112 
2113 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2114 	if (!prop)
2115 		goto attr_msg_full;
2116 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2117 		goto prop_msg_full;
2118 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2119 		goto prop_msg_full;
2120 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2121 			link->queue_limit[TIPC_LOW_IMPORTANCE]))
2122 		goto prop_msg_full;
2123 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2124 		goto prop_msg_full;
2125 	nla_nest_end(msg->skb, prop);
2126 
2127 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2128 	if (err)
2129 		goto attr_msg_full;
2130 
2131 	nla_nest_end(msg->skb, attrs);
2132 	genlmsg_end(msg->skb, hdr);
2133 
2134 	return 0;
2135 
2136 prop_msg_full:
2137 	nla_nest_cancel(msg->skb, prop);
2138 attr_msg_full:
2139 	nla_nest_cancel(msg->skb, attrs);
2140 msg_full:
2141 	genlmsg_cancel(msg->skb, hdr);
2142 
2143 	return -EMSGSIZE;
2144 }
2145 
2146 /* Caller should hold node lock  */
2147 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2148 				    struct tipc_node *node, u32 *prev_link)
2149 {
2150 	u32 i;
2151 	int err;
2152 
2153 	for (i = *prev_link; i < MAX_BEARERS; i++) {
2154 		*prev_link = i;
2155 
2156 		if (!node->links[i])
2157 			continue;
2158 
2159 		err = __tipc_nl_add_link(net, msg, node->links[i]);
2160 		if (err)
2161 			return err;
2162 	}
2163 	*prev_link = 0;
2164 
2165 	return 0;
2166 }
2167 
2168 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2169 {
2170 	struct net *net = sock_net(skb->sk);
2171 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2172 	struct tipc_node *node;
2173 	struct tipc_nl_msg msg;
2174 	u32 prev_node = cb->args[0];
2175 	u32 prev_link = cb->args[1];
2176 	int done = cb->args[2];
2177 	int err;
2178 
2179 	if (done)
2180 		return 0;
2181 
2182 	msg.skb = skb;
2183 	msg.portid = NETLINK_CB(cb->skb).portid;
2184 	msg.seq = cb->nlh->nlmsg_seq;
2185 
2186 	rcu_read_lock();
2187 
2188 	if (prev_node) {
2189 		node = tipc_node_find(net, prev_node);
2190 		if (!node) {
2191 			/* We never set seq or call nl_dump_check_consistent()
2192 			 * this means that setting prev_seq here will cause the
2193 			 * consistence check to fail in the netlink callback
2194 			 * handler. Resulting in the last NLMSG_DONE message
2195 			 * having the NLM_F_DUMP_INTR flag set.
2196 			 */
2197 			cb->prev_seq = 1;
2198 			goto out;
2199 		}
2200 
2201 		list_for_each_entry_continue_rcu(node, &tn->node_list,
2202 						 list) {
2203 			tipc_node_lock(node);
2204 			err = __tipc_nl_add_node_links(net, &msg, node,
2205 						       &prev_link);
2206 			tipc_node_unlock(node);
2207 			if (err)
2208 				goto out;
2209 
2210 			prev_node = node->addr;
2211 		}
2212 	} else {
2213 		err = tipc_nl_add_bc_link(net, &msg);
2214 		if (err)
2215 			goto out;
2216 
2217 		list_for_each_entry_rcu(node, &tn->node_list, list) {
2218 			tipc_node_lock(node);
2219 			err = __tipc_nl_add_node_links(net, &msg, node,
2220 						       &prev_link);
2221 			tipc_node_unlock(node);
2222 			if (err)
2223 				goto out;
2224 
2225 			prev_node = node->addr;
2226 		}
2227 	}
2228 	done = 1;
2229 out:
2230 	rcu_read_unlock();
2231 
2232 	cb->args[0] = prev_node;
2233 	cb->args[1] = prev_link;
2234 	cb->args[2] = done;
2235 
2236 	return skb->len;
2237 }
2238 
2239 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2240 {
2241 	struct net *net = genl_info_net(info);
2242 	struct sk_buff *ans_skb;
2243 	struct tipc_nl_msg msg;
2244 	struct tipc_link *link;
2245 	struct tipc_node *node;
2246 	char *name;
2247 	int bearer_id;
2248 	int err;
2249 
2250 	if (!info->attrs[TIPC_NLA_LINK_NAME])
2251 		return -EINVAL;
2252 
2253 	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2254 	node = tipc_link_find_owner(net, name, &bearer_id);
2255 	if (!node)
2256 		return -EINVAL;
2257 
2258 	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2259 	if (!ans_skb)
2260 		return -ENOMEM;
2261 
2262 	msg.skb = ans_skb;
2263 	msg.portid = info->snd_portid;
2264 	msg.seq = info->snd_seq;
2265 
2266 	tipc_node_lock(node);
2267 	link = node->links[bearer_id];
2268 	if (!link) {
2269 		err = -EINVAL;
2270 		goto err_out;
2271 	}
2272 
2273 	err = __tipc_nl_add_link(net, &msg, link);
2274 	if (err)
2275 		goto err_out;
2276 
2277 	tipc_node_unlock(node);
2278 
2279 	return genlmsg_reply(ans_skb, info);
2280 
2281 err_out:
2282 	tipc_node_unlock(node);
2283 	nlmsg_free(ans_skb);
2284 
2285 	return err;
2286 }
2287 
2288 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2289 {
2290 	int err;
2291 	char *link_name;
2292 	unsigned int bearer_id;
2293 	struct tipc_link *link;
2294 	struct tipc_node *node;
2295 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2296 	struct net *net = sock_net(skb->sk);
2297 
2298 	if (!info->attrs[TIPC_NLA_LINK])
2299 		return -EINVAL;
2300 
2301 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2302 			       info->attrs[TIPC_NLA_LINK],
2303 			       tipc_nl_link_policy);
2304 	if (err)
2305 		return err;
2306 
2307 	if (!attrs[TIPC_NLA_LINK_NAME])
2308 		return -EINVAL;
2309 
2310 	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2311 
2312 	if (strcmp(link_name, tipc_bclink_name) == 0) {
2313 		err = tipc_bclink_reset_stats(net);
2314 		if (err)
2315 			return err;
2316 		return 0;
2317 	}
2318 
2319 	node = tipc_link_find_owner(net, link_name, &bearer_id);
2320 	if (!node)
2321 		return -EINVAL;
2322 
2323 	tipc_node_lock(node);
2324 
2325 	link = node->links[bearer_id];
2326 	if (!link) {
2327 		tipc_node_unlock(node);
2328 		return -EINVAL;
2329 	}
2330 
2331 	link_reset_statistics(link);
2332 
2333 	tipc_node_unlock(node);
2334 
2335 	return 0;
2336 }
2337