xref: /openbmc/linux/net/tipc/link.c (revision a8da474e)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 
46 #include <linux/pkt_sched.h>
47 
48 /*
49  * Error message prefixes
50  */
51 static const char *link_co_err = "Link tunneling error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char tipc_bclink_name[] = "broadcast-link";
54 
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
57 	[TIPC_NLA_LINK_NAME] = {
58 		.type = NLA_STRING,
59 		.len = TIPC_MAX_LINK_NAME
60 	},
61 	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
62 	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
63 	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
64 	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
65 	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
66 	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
67 	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
68 	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
69 };
70 
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
74 	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
75 	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
76 	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
77 };
78 
79 /* Send states for broadcast NACKs
80  */
81 enum {
82 	BC_NACK_SND_CONDITIONAL,
83 	BC_NACK_SND_UNCONDITIONAL,
84 	BC_NACK_SND_SUPPRESS,
85 };
86 
87 /*
88  * Interval between NACKs when packets arrive out of order
89  */
90 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
91 /*
92  * Out-of-range value for link session numbers
93  */
94 #define WILDCARD_SESSION 0x10000
95 
96 /* Link FSM states:
97  */
98 enum {
99 	LINK_ESTABLISHED     = 0xe,
100 	LINK_ESTABLISHING    = 0xe  << 4,
101 	LINK_RESET           = 0x1  << 8,
102 	LINK_RESETTING       = 0x2  << 12,
103 	LINK_PEER_RESET      = 0xd  << 16,
104 	LINK_FAILINGOVER     = 0xf  << 20,
105 	LINK_SYNCHING        = 0xc  << 24
106 };
107 
108 /* Link FSM state checking routines
109  */
110 static int link_is_up(struct tipc_link *l)
111 {
112 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
113 }
114 
115 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
116 			       struct sk_buff_head *xmitq);
117 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
118 				      u16 rcvgap, int tolerance, int priority,
119 				      struct sk_buff_head *xmitq);
120 static void link_reset_statistics(struct tipc_link *l_ptr);
121 static void link_print(struct tipc_link *l_ptr, const char *str);
122 static void tipc_link_build_nack_msg(struct tipc_link *l,
123 				     struct sk_buff_head *xmitq);
124 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
125 					struct sk_buff_head *xmitq);
126 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
127 
128 /*
129  *  Simple non-static link routines (i.e. referenced outside this file)
130  */
131 bool tipc_link_is_up(struct tipc_link *l)
132 {
133 	return link_is_up(l);
134 }
135 
136 bool tipc_link_peer_is_down(struct tipc_link *l)
137 {
138 	return l->state == LINK_PEER_RESET;
139 }
140 
141 bool tipc_link_is_reset(struct tipc_link *l)
142 {
143 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
144 }
145 
146 bool tipc_link_is_establishing(struct tipc_link *l)
147 {
148 	return l->state == LINK_ESTABLISHING;
149 }
150 
151 bool tipc_link_is_synching(struct tipc_link *l)
152 {
153 	return l->state == LINK_SYNCHING;
154 }
155 
156 bool tipc_link_is_failingover(struct tipc_link *l)
157 {
158 	return l->state == LINK_FAILINGOVER;
159 }
160 
161 bool tipc_link_is_blocked(struct tipc_link *l)
162 {
163 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
164 }
165 
166 static bool link_is_bc_sndlink(struct tipc_link *l)
167 {
168 	return !l->bc_sndlink;
169 }
170 
171 static bool link_is_bc_rcvlink(struct tipc_link *l)
172 {
173 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
174 }
175 
176 int tipc_link_is_active(struct tipc_link *l)
177 {
178 	return l->active;
179 }
180 
181 void tipc_link_set_active(struct tipc_link *l, bool active)
182 {
183 	l->active = active;
184 }
185 
186 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
187 			   struct tipc_link *uc_l,
188 			   struct sk_buff_head *xmitq)
189 {
190 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
191 
192 	snd_l->ackers++;
193 	rcv_l->acked = snd_l->snd_nxt - 1;
194 	tipc_link_build_bc_init_msg(uc_l, xmitq);
195 }
196 
197 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
198 			      struct tipc_link *rcv_l,
199 			      struct sk_buff_head *xmitq)
200 {
201 	u16 ack = snd_l->snd_nxt - 1;
202 
203 	snd_l->ackers--;
204 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
205 	tipc_link_reset(rcv_l);
206 	rcv_l->state = LINK_RESET;
207 	if (!snd_l->ackers) {
208 		tipc_link_reset(snd_l);
209 		__skb_queue_purge(xmitq);
210 	}
211 }
212 
213 int tipc_link_bc_peers(struct tipc_link *l)
214 {
215 	return l->ackers;
216 }
217 
218 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
219 {
220 	l->mtu = mtu;
221 }
222 
223 int tipc_link_mtu(struct tipc_link *l)
224 {
225 	return l->mtu;
226 }
227 
228 static u32 link_own_addr(struct tipc_link *l)
229 {
230 	return msg_prevnode(l->pmsg);
231 }
232 
233 /**
234  * tipc_link_create - create a new link
235  * @n: pointer to associated node
236  * @if_name: associated interface name
237  * @bearer_id: id (index) of associated bearer
238  * @tolerance: link tolerance to be used by link
239  * @net_plane: network plane (A,B,c..) this link belongs to
240  * @mtu: mtu to be advertised by link
241  * @priority: priority to be used by link
242  * @window: send window to be used by link
243  * @session: session to be used by link
244  * @ownnode: identity of own node
245  * @peer: node id of peer node
246  * @peer_caps: bitmap describing peer node capabilities
247  * @bc_sndlink: the namespace global link used for broadcast sending
248  * @bc_rcvlink: the peer specific link used for broadcast reception
249  * @inputq: queue to put messages ready for delivery
250  * @namedq: queue to put binding table update messages ready for delivery
251  * @link: return value, pointer to put the created link
252  *
253  * Returns true if link was created, otherwise false
254  */
255 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
256 		      int tolerance, char net_plane, u32 mtu, int priority,
257 		      int window, u32 session, u32 ownnode, u32 peer,
258 		      u16 peer_caps,
259 		      struct tipc_link *bc_sndlink,
260 		      struct tipc_link *bc_rcvlink,
261 		      struct sk_buff_head *inputq,
262 		      struct sk_buff_head *namedq,
263 		      struct tipc_link **link)
264 {
265 	struct tipc_link *l;
266 	struct tipc_msg *hdr;
267 
268 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
269 	if (!l)
270 		return false;
271 	*link = l;
272 	l->pmsg = (struct tipc_msg *)&l->proto_msg;
273 	hdr = l->pmsg;
274 	tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
275 	msg_set_size(hdr, sizeof(l->proto_msg));
276 	msg_set_session(hdr, session);
277 	msg_set_bearer_id(hdr, l->bearer_id);
278 
279 	/* Note: peer i/f name is completed by reset/activate message */
280 	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
281 		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
282 		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
283 	strcpy((char *)msg_data(hdr), if_name);
284 
285 	l->addr = peer;
286 	l->peer_caps = peer_caps;
287 	l->net = net;
288 	l->peer_session = WILDCARD_SESSION;
289 	l->bearer_id = bearer_id;
290 	l->tolerance = tolerance;
291 	l->net_plane = net_plane;
292 	l->advertised_mtu = mtu;
293 	l->mtu = mtu;
294 	l->priority = priority;
295 	tipc_link_set_queue_limits(l, window);
296 	l->ackers = 1;
297 	l->bc_sndlink = bc_sndlink;
298 	l->bc_rcvlink = bc_rcvlink;
299 	l->inputq = inputq;
300 	l->namedq = namedq;
301 	l->state = LINK_RESETTING;
302 	__skb_queue_head_init(&l->transmq);
303 	__skb_queue_head_init(&l->backlogq);
304 	__skb_queue_head_init(&l->deferdq);
305 	skb_queue_head_init(&l->wakeupq);
306 	skb_queue_head_init(l->inputq);
307 	return true;
308 }
309 
310 /**
311  * tipc_link_bc_create - create new link to be used for broadcast
312  * @n: pointer to associated node
313  * @mtu: mtu to be used
314  * @window: send window to be used
315  * @inputq: queue to put messages ready for delivery
316  * @namedq: queue to put binding table update messages ready for delivery
317  * @link: return value, pointer to put the created link
318  *
319  * Returns true if link was created, otherwise false
320  */
321 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
322 			 int mtu, int window, u16 peer_caps,
323 			 struct sk_buff_head *inputq,
324 			 struct sk_buff_head *namedq,
325 			 struct tipc_link *bc_sndlink,
326 			 struct tipc_link **link)
327 {
328 	struct tipc_link *l;
329 
330 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
331 			      0, ownnode, peer, peer_caps, bc_sndlink,
332 			      NULL, inputq, namedq, link))
333 		return false;
334 
335 	l = *link;
336 	strcpy(l->name, tipc_bclink_name);
337 	tipc_link_reset(l);
338 	l->state = LINK_RESET;
339 	l->ackers = 0;
340 	l->bc_rcvlink = l;
341 
342 	/* Broadcast send link is always up */
343 	if (link_is_bc_sndlink(l))
344 		l->state = LINK_ESTABLISHED;
345 
346 	return true;
347 }
348 
349 /**
350  * tipc_link_fsm_evt - link finite state machine
351  * @l: pointer to link
352  * @evt: state machine event to be processed
353  */
354 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
355 {
356 	int rc = 0;
357 
358 	switch (l->state) {
359 	case LINK_RESETTING:
360 		switch (evt) {
361 		case LINK_PEER_RESET_EVT:
362 			l->state = LINK_PEER_RESET;
363 			break;
364 		case LINK_RESET_EVT:
365 			l->state = LINK_RESET;
366 			break;
367 		case LINK_FAILURE_EVT:
368 		case LINK_FAILOVER_BEGIN_EVT:
369 		case LINK_ESTABLISH_EVT:
370 		case LINK_FAILOVER_END_EVT:
371 		case LINK_SYNCH_BEGIN_EVT:
372 		case LINK_SYNCH_END_EVT:
373 		default:
374 			goto illegal_evt;
375 		}
376 		break;
377 	case LINK_RESET:
378 		switch (evt) {
379 		case LINK_PEER_RESET_EVT:
380 			l->state = LINK_ESTABLISHING;
381 			break;
382 		case LINK_FAILOVER_BEGIN_EVT:
383 			l->state = LINK_FAILINGOVER;
384 		case LINK_FAILURE_EVT:
385 		case LINK_RESET_EVT:
386 		case LINK_ESTABLISH_EVT:
387 		case LINK_FAILOVER_END_EVT:
388 			break;
389 		case LINK_SYNCH_BEGIN_EVT:
390 		case LINK_SYNCH_END_EVT:
391 		default:
392 			goto illegal_evt;
393 		}
394 		break;
395 	case LINK_PEER_RESET:
396 		switch (evt) {
397 		case LINK_RESET_EVT:
398 			l->state = LINK_ESTABLISHING;
399 			break;
400 		case LINK_PEER_RESET_EVT:
401 		case LINK_ESTABLISH_EVT:
402 		case LINK_FAILURE_EVT:
403 			break;
404 		case LINK_SYNCH_BEGIN_EVT:
405 		case LINK_SYNCH_END_EVT:
406 		case LINK_FAILOVER_BEGIN_EVT:
407 		case LINK_FAILOVER_END_EVT:
408 		default:
409 			goto illegal_evt;
410 		}
411 		break;
412 	case LINK_FAILINGOVER:
413 		switch (evt) {
414 		case LINK_FAILOVER_END_EVT:
415 			l->state = LINK_RESET;
416 			break;
417 		case LINK_PEER_RESET_EVT:
418 		case LINK_RESET_EVT:
419 		case LINK_ESTABLISH_EVT:
420 		case LINK_FAILURE_EVT:
421 			break;
422 		case LINK_FAILOVER_BEGIN_EVT:
423 		case LINK_SYNCH_BEGIN_EVT:
424 		case LINK_SYNCH_END_EVT:
425 		default:
426 			goto illegal_evt;
427 		}
428 		break;
429 	case LINK_ESTABLISHING:
430 		switch (evt) {
431 		case LINK_ESTABLISH_EVT:
432 			l->state = LINK_ESTABLISHED;
433 			break;
434 		case LINK_FAILOVER_BEGIN_EVT:
435 			l->state = LINK_FAILINGOVER;
436 			break;
437 		case LINK_RESET_EVT:
438 			l->state = LINK_RESET;
439 			break;
440 		case LINK_FAILURE_EVT:
441 		case LINK_PEER_RESET_EVT:
442 		case LINK_SYNCH_BEGIN_EVT:
443 		case LINK_FAILOVER_END_EVT:
444 			break;
445 		case LINK_SYNCH_END_EVT:
446 		default:
447 			goto illegal_evt;
448 		}
449 		break;
450 	case LINK_ESTABLISHED:
451 		switch (evt) {
452 		case LINK_PEER_RESET_EVT:
453 			l->state = LINK_PEER_RESET;
454 			rc |= TIPC_LINK_DOWN_EVT;
455 			break;
456 		case LINK_FAILURE_EVT:
457 			l->state = LINK_RESETTING;
458 			rc |= TIPC_LINK_DOWN_EVT;
459 			break;
460 		case LINK_RESET_EVT:
461 			l->state = LINK_RESET;
462 			break;
463 		case LINK_ESTABLISH_EVT:
464 		case LINK_SYNCH_END_EVT:
465 			break;
466 		case LINK_SYNCH_BEGIN_EVT:
467 			l->state = LINK_SYNCHING;
468 			break;
469 		case LINK_FAILOVER_BEGIN_EVT:
470 		case LINK_FAILOVER_END_EVT:
471 		default:
472 			goto illegal_evt;
473 		}
474 		break;
475 	case LINK_SYNCHING:
476 		switch (evt) {
477 		case LINK_PEER_RESET_EVT:
478 			l->state = LINK_PEER_RESET;
479 			rc |= TIPC_LINK_DOWN_EVT;
480 			break;
481 		case LINK_FAILURE_EVT:
482 			l->state = LINK_RESETTING;
483 			rc |= TIPC_LINK_DOWN_EVT;
484 			break;
485 		case LINK_RESET_EVT:
486 			l->state = LINK_RESET;
487 			break;
488 		case LINK_ESTABLISH_EVT:
489 		case LINK_SYNCH_BEGIN_EVT:
490 			break;
491 		case LINK_SYNCH_END_EVT:
492 			l->state = LINK_ESTABLISHED;
493 			break;
494 		case LINK_FAILOVER_BEGIN_EVT:
495 		case LINK_FAILOVER_END_EVT:
496 		default:
497 			goto illegal_evt;
498 		}
499 		break;
500 	default:
501 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
502 	}
503 	return rc;
504 illegal_evt:
505 	pr_err("Illegal FSM event %x in state %x on link %s\n",
506 	       evt, l->state, l->name);
507 	return rc;
508 }
509 
510 /* link_profile_stats - update statistical profiling of traffic
511  */
512 static void link_profile_stats(struct tipc_link *l)
513 {
514 	struct sk_buff *skb;
515 	struct tipc_msg *msg;
516 	int length;
517 
518 	/* Update counters used in statistical profiling of send traffic */
519 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
520 	l->stats.queue_sz_counts++;
521 
522 	skb = skb_peek(&l->transmq);
523 	if (!skb)
524 		return;
525 	msg = buf_msg(skb);
526 	length = msg_size(msg);
527 
528 	if (msg_user(msg) == MSG_FRAGMENTER) {
529 		if (msg_type(msg) != FIRST_FRAGMENT)
530 			return;
531 		length = msg_size(msg_get_wrapped(msg));
532 	}
533 	l->stats.msg_lengths_total += length;
534 	l->stats.msg_length_counts++;
535 	if (length <= 64)
536 		l->stats.msg_length_profile[0]++;
537 	else if (length <= 256)
538 		l->stats.msg_length_profile[1]++;
539 	else if (length <= 1024)
540 		l->stats.msg_length_profile[2]++;
541 	else if (length <= 4096)
542 		l->stats.msg_length_profile[3]++;
543 	else if (length <= 16384)
544 		l->stats.msg_length_profile[4]++;
545 	else if (length <= 32768)
546 		l->stats.msg_length_profile[5]++;
547 	else
548 		l->stats.msg_length_profile[6]++;
549 }
550 
551 /* tipc_link_timeout - perform periodic task as instructed from node timeout
552  */
553 /* tipc_link_timeout - perform periodic task as instructed from node timeout
554  */
555 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
556 {
557 	int rc = 0;
558 	int mtyp = STATE_MSG;
559 	bool xmit = false;
560 	bool prb = false;
561 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
562 	u16 bc_acked = l->bc_rcvlink->acked;
563 	bool bc_up = link_is_up(l->bc_rcvlink);
564 
565 	link_profile_stats(l);
566 
567 	switch (l->state) {
568 	case LINK_ESTABLISHED:
569 	case LINK_SYNCHING:
570 		if (!l->silent_intv_cnt) {
571 			if (bc_up && (bc_acked != bc_snt))
572 				xmit = true;
573 		} else if (l->silent_intv_cnt <= l->abort_limit) {
574 			xmit = true;
575 			prb = true;
576 		} else {
577 			rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
578 		}
579 		l->silent_intv_cnt++;
580 		break;
581 	case LINK_RESET:
582 		xmit = true;
583 		mtyp = RESET_MSG;
584 		break;
585 	case LINK_ESTABLISHING:
586 		xmit = true;
587 		mtyp = ACTIVATE_MSG;
588 		break;
589 	case LINK_PEER_RESET:
590 	case LINK_RESETTING:
591 	case LINK_FAILINGOVER:
592 		break;
593 	default:
594 		break;
595 	}
596 
597 	if (xmit)
598 		tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
599 
600 	return rc;
601 }
602 
603 /**
604  * link_schedule_user - schedule a message sender for wakeup after congestion
605  * @link: congested link
606  * @list: message that was attempted sent
607  * Create pseudo msg to send back to user when congestion abates
608  * Does not consume buffer list
609  */
610 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
611 {
612 	struct tipc_msg *msg = buf_msg(skb_peek(list));
613 	int imp = msg_importance(msg);
614 	u32 oport = msg_origport(msg);
615 	u32 addr = link_own_addr(link);
616 	struct sk_buff *skb;
617 
618 	/* This really cannot happen...  */
619 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
620 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
621 		return -ENOBUFS;
622 	}
623 	/* Non-blocking sender: */
624 	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
625 		return -ELINKCONG;
626 
627 	/* Create and schedule wakeup pseudo message */
628 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
629 			      addr, addr, oport, 0, 0);
630 	if (!skb)
631 		return -ENOBUFS;
632 	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
633 	TIPC_SKB_CB(skb)->chain_imp = imp;
634 	skb_queue_tail(&link->wakeupq, skb);
635 	link->stats.link_congs++;
636 	return -ELINKCONG;
637 }
638 
639 /**
640  * link_prepare_wakeup - prepare users for wakeup after congestion
641  * @link: congested link
642  * Move a number of waiting users, as permitted by available space in
643  * the send queue, from link wait queue to node wait queue for wakeup
644  */
645 void link_prepare_wakeup(struct tipc_link *l)
646 {
647 	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
648 	int imp, lim;
649 	struct sk_buff *skb, *tmp;
650 
651 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
652 		imp = TIPC_SKB_CB(skb)->chain_imp;
653 		lim = l->window + l->backlog[imp].limit;
654 		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
655 		if ((pnd[imp] + l->backlog[imp].len) >= lim)
656 			break;
657 		skb_unlink(skb, &l->wakeupq);
658 		skb_queue_tail(l->inputq, skb);
659 	}
660 }
661 
662 void tipc_link_reset(struct tipc_link *l)
663 {
664 	/* Link is down, accept any session */
665 	l->peer_session = WILDCARD_SESSION;
666 
667 	/* If peer is up, it only accepts an incremented session number */
668 	msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
669 
670 	/* Prepare for renewed mtu size negotiation */
671 	l->mtu = l->advertised_mtu;
672 
673 	/* Clean up all queues and counters: */
674 	__skb_queue_purge(&l->transmq);
675 	__skb_queue_purge(&l->deferdq);
676 	skb_queue_splice_init(&l->wakeupq, l->inputq);
677 	__skb_queue_purge(&l->backlogq);
678 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
679 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
680 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
681 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
682 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
683 	kfree_skb(l->reasm_buf);
684 	kfree_skb(l->failover_reasm_skb);
685 	l->reasm_buf = NULL;
686 	l->failover_reasm_skb = NULL;
687 	l->rcv_unacked = 0;
688 	l->snd_nxt = 1;
689 	l->rcv_nxt = 1;
690 	l->acked = 0;
691 	l->silent_intv_cnt = 0;
692 	l->stats.recv_info = 0;
693 	l->stale_count = 0;
694 	l->bc_peer_is_up = false;
695 	link_reset_statistics(l);
696 }
697 
698 /**
699  * tipc_link_xmit(): enqueue buffer list according to queue situation
700  * @link: link to use
701  * @list: chain of buffers containing message
702  * @xmitq: returned list of packets to be sent by caller
703  *
704  * Consumes the buffer chain, except when returning -ELINKCONG,
705  * since the caller then may want to make more send attempts.
706  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
707  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
708  */
709 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
710 		   struct sk_buff_head *xmitq)
711 {
712 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
713 	unsigned int maxwin = l->window;
714 	unsigned int i, imp = msg_importance(hdr);
715 	unsigned int mtu = l->mtu;
716 	u16 ack = l->rcv_nxt - 1;
717 	u16 seqno = l->snd_nxt;
718 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
719 	struct sk_buff_head *transmq = &l->transmq;
720 	struct sk_buff_head *backlogq = &l->backlogq;
721 	struct sk_buff *skb, *_skb, *bskb;
722 
723 	/* Match msg importance against this and all higher backlog limits: */
724 	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
725 		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
726 			return link_schedule_user(l, list);
727 	}
728 	if (unlikely(msg_size(hdr) > mtu))
729 		return -EMSGSIZE;
730 
731 	/* Prepare each packet for sending, and add to relevant queue: */
732 	while (skb_queue_len(list)) {
733 		skb = skb_peek(list);
734 		hdr = buf_msg(skb);
735 		msg_set_seqno(hdr, seqno);
736 		msg_set_ack(hdr, ack);
737 		msg_set_bcast_ack(hdr, bc_ack);
738 
739 		if (likely(skb_queue_len(transmq) < maxwin)) {
740 			_skb = skb_clone(skb, GFP_ATOMIC);
741 			if (!_skb)
742 				return -ENOBUFS;
743 			__skb_dequeue(list);
744 			__skb_queue_tail(transmq, skb);
745 			__skb_queue_tail(xmitq, _skb);
746 			TIPC_SKB_CB(skb)->ackers = l->ackers;
747 			l->rcv_unacked = 0;
748 			seqno++;
749 			continue;
750 		}
751 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
752 			kfree_skb(__skb_dequeue(list));
753 			l->stats.sent_bundled++;
754 			continue;
755 		}
756 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
757 			kfree_skb(__skb_dequeue(list));
758 			__skb_queue_tail(backlogq, bskb);
759 			l->backlog[msg_importance(buf_msg(bskb))].len++;
760 			l->stats.sent_bundled++;
761 			l->stats.sent_bundles++;
762 			continue;
763 		}
764 		l->backlog[imp].len += skb_queue_len(list);
765 		skb_queue_splice_tail_init(list, backlogq);
766 	}
767 	l->snd_nxt = seqno;
768 	return 0;
769 }
770 
771 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
772 {
773 	struct sk_buff *skb, *_skb;
774 	struct tipc_msg *hdr;
775 	u16 seqno = l->snd_nxt;
776 	u16 ack = l->rcv_nxt - 1;
777 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
778 
779 	while (skb_queue_len(&l->transmq) < l->window) {
780 		skb = skb_peek(&l->backlogq);
781 		if (!skb)
782 			break;
783 		_skb = skb_clone(skb, GFP_ATOMIC);
784 		if (!_skb)
785 			break;
786 		__skb_dequeue(&l->backlogq);
787 		hdr = buf_msg(skb);
788 		l->backlog[msg_importance(hdr)].len--;
789 		__skb_queue_tail(&l->transmq, skb);
790 		__skb_queue_tail(xmitq, _skb);
791 		TIPC_SKB_CB(skb)->ackers = l->ackers;
792 		msg_set_seqno(hdr, seqno);
793 		msg_set_ack(hdr, ack);
794 		msg_set_bcast_ack(hdr, bc_ack);
795 		l->rcv_unacked = 0;
796 		seqno++;
797 	}
798 	l->snd_nxt = seqno;
799 }
800 
801 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
802 {
803 	struct tipc_msg *hdr = buf_msg(skb);
804 
805 	pr_warn("Retransmission failure on link <%s>\n", l->name);
806 	link_print(l, "Resetting link ");
807 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
808 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
809 	pr_info("sqno %u, prev: %x, src: %x\n",
810 		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
811 }
812 
813 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
814 		      struct sk_buff_head *xmitq)
815 {
816 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
817 	struct tipc_msg *hdr;
818 	u16 ack = l->rcv_nxt - 1;
819 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
820 
821 	if (!skb)
822 		return 0;
823 
824 	/* Detect repeated retransmit failures on same packet */
825 	if (likely(l->last_retransm != buf_seqno(skb))) {
826 		l->last_retransm = buf_seqno(skb);
827 		l->stale_count = 1;
828 	} else if (++l->stale_count > 100) {
829 		link_retransmit_failure(l, skb);
830 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
831 	}
832 
833 	/* Move forward to where retransmission should start */
834 	skb_queue_walk(&l->transmq, skb) {
835 		if (!less(buf_seqno(skb), from))
836 			break;
837 	}
838 
839 	skb_queue_walk_from(&l->transmq, skb) {
840 		if (more(buf_seqno(skb), to))
841 			break;
842 		hdr = buf_msg(skb);
843 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
844 		if (!_skb)
845 			return 0;
846 		hdr = buf_msg(_skb);
847 		msg_set_ack(hdr, ack);
848 		msg_set_bcast_ack(hdr, bc_ack);
849 		_skb->priority = TC_PRIO_CONTROL;
850 		__skb_queue_tail(xmitq, _skb);
851 		l->stats.retransmitted++;
852 	}
853 	return 0;
854 }
855 
856 /* tipc_data_input - deliver data and name distr msgs to upper layer
857  *
858  * Consumes buffer if message is of right type
859  * Node lock must be held
860  */
861 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
862 			    struct sk_buff_head *inputq)
863 {
864 	switch (msg_user(buf_msg(skb))) {
865 	case TIPC_LOW_IMPORTANCE:
866 	case TIPC_MEDIUM_IMPORTANCE:
867 	case TIPC_HIGH_IMPORTANCE:
868 	case TIPC_CRITICAL_IMPORTANCE:
869 	case CONN_MANAGER:
870 		skb_queue_tail(inputq, skb);
871 		return true;
872 	case NAME_DISTRIBUTOR:
873 		l->bc_rcvlink->state = LINK_ESTABLISHED;
874 		skb_queue_tail(l->namedq, skb);
875 		return true;
876 	case MSG_BUNDLER:
877 	case TUNNEL_PROTOCOL:
878 	case MSG_FRAGMENTER:
879 	case BCAST_PROTOCOL:
880 		return false;
881 	default:
882 		pr_warn("Dropping received illegal msg type\n");
883 		kfree_skb(skb);
884 		return false;
885 	};
886 }
887 
888 /* tipc_link_input - process packet that has passed link protocol check
889  *
890  * Consumes buffer
891  */
892 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
893 			   struct sk_buff_head *inputq)
894 {
895 	struct tipc_msg *hdr = buf_msg(skb);
896 	struct sk_buff **reasm_skb = &l->reasm_buf;
897 	struct sk_buff *iskb;
898 	struct sk_buff_head tmpq;
899 	int usr = msg_user(hdr);
900 	int rc = 0;
901 	int pos = 0;
902 	int ipos = 0;
903 
904 	if (unlikely(usr == TUNNEL_PROTOCOL)) {
905 		if (msg_type(hdr) == SYNCH_MSG) {
906 			__skb_queue_purge(&l->deferdq);
907 			goto drop;
908 		}
909 		if (!tipc_msg_extract(skb, &iskb, &ipos))
910 			return rc;
911 		kfree_skb(skb);
912 		skb = iskb;
913 		hdr = buf_msg(skb);
914 		if (less(msg_seqno(hdr), l->drop_point))
915 			goto drop;
916 		if (tipc_data_input(l, skb, inputq))
917 			return rc;
918 		usr = msg_user(hdr);
919 		reasm_skb = &l->failover_reasm_skb;
920 	}
921 
922 	if (usr == MSG_BUNDLER) {
923 		skb_queue_head_init(&tmpq);
924 		l->stats.recv_bundles++;
925 		l->stats.recv_bundled += msg_msgcnt(hdr);
926 		while (tipc_msg_extract(skb, &iskb, &pos))
927 			tipc_data_input(l, iskb, &tmpq);
928 		tipc_skb_queue_splice_tail(&tmpq, inputq);
929 		return 0;
930 	} else if (usr == MSG_FRAGMENTER) {
931 		l->stats.recv_fragments++;
932 		if (tipc_buf_append(reasm_skb, &skb)) {
933 			l->stats.recv_fragmented++;
934 			tipc_data_input(l, skb, inputq);
935 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
936 			pr_warn_ratelimited("Unable to build fragment list\n");
937 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
938 		}
939 		return 0;
940 	} else if (usr == BCAST_PROTOCOL) {
941 		tipc_bcast_lock(l->net);
942 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
943 		tipc_bcast_unlock(l->net);
944 	}
945 drop:
946 	kfree_skb(skb);
947 	return 0;
948 }
949 
950 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
951 {
952 	bool released = false;
953 	struct sk_buff *skb, *tmp;
954 
955 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
956 		if (more(buf_seqno(skb), acked))
957 			break;
958 		__skb_unlink(skb, &l->transmq);
959 		kfree_skb(skb);
960 		released = true;
961 	}
962 	return released;
963 }
964 
965 /* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
966  *
967  * Note that sending of broadcast ack is coordinated among nodes, to reduce
968  * risk of ack storms towards the sender
969  */
970 int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
971 {
972 	if (!l)
973 		return 0;
974 
975 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
976 	if (link_is_bc_rcvlink(l)) {
977 		if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
978 			return 0;
979 		l->rcv_unacked = 0;
980 		return TIPC_LINK_SND_BC_ACK;
981 	}
982 
983 	/* Unicast ACK */
984 	l->rcv_unacked = 0;
985 	l->stats.sent_acks++;
986 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
987 	return 0;
988 }
989 
990 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
991  */
992 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
993 {
994 	int mtyp = RESET_MSG;
995 
996 	if (l->state == LINK_ESTABLISHING)
997 		mtyp = ACTIVATE_MSG;
998 
999 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1000 }
1001 
1002 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1003  */
1004 static void tipc_link_build_nack_msg(struct tipc_link *l,
1005 				     struct sk_buff_head *xmitq)
1006 {
1007 	u32 def_cnt = ++l->stats.deferred_recv;
1008 
1009 	if (link_is_bc_rcvlink(l))
1010 		return;
1011 
1012 	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1013 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1014 }
1015 
1016 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1017  * @l: the link that should handle the message
1018  * @skb: TIPC packet
1019  * @xmitq: queue to place packets to be sent after this call
1020  */
1021 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1022 		  struct sk_buff_head *xmitq)
1023 {
1024 	struct sk_buff_head *defq = &l->deferdq;
1025 	struct tipc_msg *hdr;
1026 	u16 seqno, rcv_nxt, win_lim;
1027 	int rc = 0;
1028 
1029 	do {
1030 		hdr = buf_msg(skb);
1031 		seqno = msg_seqno(hdr);
1032 		rcv_nxt = l->rcv_nxt;
1033 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1034 
1035 		/* Verify and update link state */
1036 		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1037 			return tipc_link_proto_rcv(l, skb, xmitq);
1038 
1039 		if (unlikely(!link_is_up(l))) {
1040 			if (l->state == LINK_ESTABLISHING)
1041 				rc = TIPC_LINK_UP_EVT;
1042 			goto drop;
1043 		}
1044 
1045 		/* Don't send probe at next timeout expiration */
1046 		l->silent_intv_cnt = 0;
1047 
1048 		/* Drop if outside receive window */
1049 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1050 			l->stats.duplicates++;
1051 			goto drop;
1052 		}
1053 
1054 		/* Forward queues and wake up waiting users */
1055 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1056 			tipc_link_advance_backlog(l, xmitq);
1057 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1058 				link_prepare_wakeup(l);
1059 		}
1060 
1061 		/* Defer delivery if sequence gap */
1062 		if (unlikely(seqno != rcv_nxt)) {
1063 			__tipc_skb_queue_sorted(defq, seqno, skb);
1064 			tipc_link_build_nack_msg(l, xmitq);
1065 			break;
1066 		}
1067 
1068 		/* Deliver packet */
1069 		l->rcv_nxt++;
1070 		l->stats.recv_info++;
1071 		if (!tipc_data_input(l, skb, l->inputq))
1072 			rc |= tipc_link_input(l, skb, l->inputq);
1073 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1074 			rc |= tipc_link_build_ack_msg(l, xmitq);
1075 		if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1076 			break;
1077 	} while ((skb = __skb_dequeue(defq)));
1078 
1079 	return rc;
1080 drop:
1081 	kfree_skb(skb);
1082 	return rc;
1083 }
1084 
1085 /*
1086  * Send protocol message to the other endpoint.
1087  */
1088 void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1089 			  u32 gap, u32 tolerance, u32 priority)
1090 {
1091 	struct sk_buff *skb = NULL;
1092 	struct sk_buff_head xmitq;
1093 
1094 	__skb_queue_head_init(&xmitq);
1095 	tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1096 				  tolerance, priority, &xmitq);
1097 	skb = __skb_dequeue(&xmitq);
1098 	if (!skb)
1099 		return;
1100 	tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
1101 	l->rcv_unacked = 0;
1102 }
1103 
1104 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1105 				      u16 rcvgap, int tolerance, int priority,
1106 				      struct sk_buff_head *xmitq)
1107 {
1108 	struct sk_buff *skb = NULL;
1109 	struct tipc_msg *hdr = l->pmsg;
1110 	bool node_up = link_is_up(l->bc_rcvlink);
1111 
1112 	/* Don't send protocol message during reset or link failover */
1113 	if (tipc_link_is_blocked(l))
1114 		return;
1115 
1116 	msg_set_type(hdr, mtyp);
1117 	msg_set_net_plane(hdr, l->net_plane);
1118 	msg_set_next_sent(hdr, l->snd_nxt);
1119 	msg_set_ack(hdr, l->rcv_nxt - 1);
1120 	msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1121 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1122 	msg_set_link_tolerance(hdr, tolerance);
1123 	msg_set_linkprio(hdr, priority);
1124 	msg_set_redundant_link(hdr, node_up);
1125 	msg_set_seq_gap(hdr, 0);
1126 
1127 	/* Compatibility: created msg must not be in sequence with pkt flow */
1128 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1129 
1130 	if (mtyp == STATE_MSG) {
1131 		if (!tipc_link_is_up(l))
1132 			return;
1133 
1134 		/* Override rcvgap if there are packets in deferred queue */
1135 		if (!skb_queue_empty(&l->deferdq))
1136 			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
1137 		if (rcvgap) {
1138 			msg_set_seq_gap(hdr, rcvgap);
1139 			l->stats.sent_nacks++;
1140 		}
1141 		msg_set_probe(hdr, probe);
1142 		if (probe)
1143 			l->stats.sent_probes++;
1144 		l->stats.sent_states++;
1145 		l->rcv_unacked = 0;
1146 	} else {
1147 		/* RESET_MSG or ACTIVATE_MSG */
1148 		msg_set_max_pkt(hdr, l->advertised_mtu);
1149 		msg_set_ack(hdr, l->rcv_nxt - 1);
1150 		msg_set_next_sent(hdr, 1);
1151 	}
1152 	skb = tipc_buf_acquire(msg_size(hdr));
1153 	if (!skb)
1154 		return;
1155 	skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1156 	skb->priority = TC_PRIO_CONTROL;
1157 	__skb_queue_tail(xmitq, skb);
1158 }
1159 
1160 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1161  * with contents of the link's transmit and backlog queues.
1162  */
1163 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1164 			   int mtyp, struct sk_buff_head *xmitq)
1165 {
1166 	struct sk_buff *skb, *tnlskb;
1167 	struct tipc_msg *hdr, tnlhdr;
1168 	struct sk_buff_head *queue = &l->transmq;
1169 	struct sk_buff_head tmpxq, tnlq;
1170 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1171 
1172 	if (!tnl)
1173 		return;
1174 
1175 	skb_queue_head_init(&tnlq);
1176 	skb_queue_head_init(&tmpxq);
1177 
1178 	/* At least one packet required for safe algorithm => add dummy */
1179 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1180 			      BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
1181 			      0, 0, TIPC_ERR_NO_PORT);
1182 	if (!skb) {
1183 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1184 		return;
1185 	}
1186 	skb_queue_tail(&tnlq, skb);
1187 	tipc_link_xmit(l, &tnlq, &tmpxq);
1188 	__skb_queue_purge(&tmpxq);
1189 
1190 	/* Initialize reusable tunnel packet header */
1191 	tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
1192 		      mtyp, INT_H_SIZE, l->addr);
1193 	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1194 	msg_set_msgcnt(&tnlhdr, pktcnt);
1195 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1196 tnl:
1197 	/* Wrap each packet into a tunnel packet */
1198 	skb_queue_walk(queue, skb) {
1199 		hdr = buf_msg(skb);
1200 		if (queue == &l->backlogq)
1201 			msg_set_seqno(hdr, seqno++);
1202 		pktlen = msg_size(hdr);
1203 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1204 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1205 		if (!tnlskb) {
1206 			pr_warn("%sunable to send packet\n", link_co_err);
1207 			return;
1208 		}
1209 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1210 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1211 		__skb_queue_tail(&tnlq, tnlskb);
1212 	}
1213 	if (queue != &l->backlogq) {
1214 		queue = &l->backlogq;
1215 		goto tnl;
1216 	}
1217 
1218 	tipc_link_xmit(tnl, &tnlq, xmitq);
1219 
1220 	if (mtyp == FAILOVER_MSG) {
1221 		tnl->drop_point = l->rcv_nxt;
1222 		tnl->failover_reasm_skb = l->reasm_buf;
1223 		l->reasm_buf = NULL;
1224 	}
1225 }
1226 
1227 /* tipc_link_proto_rcv(): receive link level protocol message :
1228  * Note that network plane id propagates through the network, and may
1229  * change at any time. The node with lowest numerical id determines
1230  * network plane
1231  */
1232 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1233 			       struct sk_buff_head *xmitq)
1234 {
1235 	struct tipc_msg *hdr = buf_msg(skb);
1236 	u16 rcvgap = 0;
1237 	u16 ack = msg_ack(hdr);
1238 	u16 gap = msg_seq_gap(hdr);
1239 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1240 	u16 peers_tol = msg_link_tolerance(hdr);
1241 	u16 peers_prio = msg_linkprio(hdr);
1242 	u16 rcv_nxt = l->rcv_nxt;
1243 	int mtyp = msg_type(hdr);
1244 	char *if_name;
1245 	int rc = 0;
1246 
1247 	if (tipc_link_is_blocked(l) || !xmitq)
1248 		goto exit;
1249 
1250 	if (link_own_addr(l) > msg_prevnode(hdr))
1251 		l->net_plane = msg_net_plane(hdr);
1252 
1253 	switch (mtyp) {
1254 	case RESET_MSG:
1255 
1256 		/* Ignore duplicate RESET with old session number */
1257 		if ((less_eq(msg_session(hdr), l->peer_session)) &&
1258 		    (l->peer_session != WILDCARD_SESSION))
1259 			break;
1260 		/* fall thru' */
1261 
1262 	case ACTIVATE_MSG:
1263 
1264 		/* Complete own link name with peer's interface name */
1265 		if_name =  strrchr(l->name, ':') + 1;
1266 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1267 			break;
1268 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1269 			break;
1270 		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);
1271 
1272 		/* Update own tolerance if peer indicates a non-zero value */
1273 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1274 			l->tolerance = peers_tol;
1275 
1276 		/* Update own priority if peer's priority is higher */
1277 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1278 			l->priority = peers_prio;
1279 
1280 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1281 		if ((mtyp == RESET_MSG) || !link_is_up(l))
1282 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1283 
1284 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1285 		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1286 			rc = TIPC_LINK_UP_EVT;
1287 
1288 		l->peer_session = msg_session(hdr);
1289 		l->peer_bearer_id = msg_bearer_id(hdr);
1290 		if (l->mtu > msg_max_pkt(hdr))
1291 			l->mtu = msg_max_pkt(hdr);
1292 		break;
1293 
1294 	case STATE_MSG:
1295 
1296 		/* Update own tolerance if peer indicates a non-zero value */
1297 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1298 			l->tolerance = peers_tol;
1299 
1300 		l->silent_intv_cnt = 0;
1301 		l->stats.recv_states++;
1302 		if (msg_probe(hdr))
1303 			l->stats.recv_probes++;
1304 
1305 		if (!link_is_up(l)) {
1306 			if (l->state == LINK_ESTABLISHING)
1307 				rc = TIPC_LINK_UP_EVT;
1308 			break;
1309 		}
1310 
1311 		/* Send NACK if peer has sent pkts we haven't received yet */
1312 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1313 			rcvgap = peers_snd_nxt - l->rcv_nxt;
1314 		if (rcvgap || (msg_probe(hdr)))
1315 			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1316 						  0, 0, xmitq);
1317 		tipc_link_release_pkts(l, ack);
1318 
1319 		/* If NACK, retransmit will now start at right position */
1320 		if (gap) {
1321 			rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1322 			l->stats.recv_nacks++;
1323 		}
1324 
1325 		tipc_link_advance_backlog(l, xmitq);
1326 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1327 			link_prepare_wakeup(l);
1328 	}
1329 exit:
1330 	kfree_skb(skb);
1331 	return rc;
1332 }
1333 
1334 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1335  */
1336 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1337 					 u16 peers_snd_nxt,
1338 					 struct sk_buff_head *xmitq)
1339 {
1340 	struct sk_buff *skb;
1341 	struct tipc_msg *hdr;
1342 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1343 	u16 ack = l->rcv_nxt - 1;
1344 	u16 gap_to = peers_snd_nxt - 1;
1345 
1346 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1347 			      0, l->addr, link_own_addr(l), 0, 0, 0);
1348 	if (!skb)
1349 		return false;
1350 	hdr = buf_msg(skb);
1351 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1352 	msg_set_bcast_ack(hdr, ack);
1353 	msg_set_bcgap_after(hdr, ack);
1354 	if (dfrd_skb)
1355 		gap_to = buf_seqno(dfrd_skb) - 1;
1356 	msg_set_bcgap_to(hdr, gap_to);
1357 	msg_set_non_seq(hdr, bcast);
1358 	__skb_queue_tail(xmitq, skb);
1359 	return true;
1360 }
1361 
1362 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1363  *
1364  * Give a newly added peer node the sequence number where it should
1365  * start receiving and acking broadcast packets.
1366  */
1367 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1368 					struct sk_buff_head *xmitq)
1369 {
1370 	struct sk_buff_head list;
1371 
1372 	__skb_queue_head_init(&list);
1373 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1374 		return;
1375 	tipc_link_xmit(l, &list, xmitq);
1376 }
1377 
1378 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1379  */
1380 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1381 {
1382 	int mtyp = msg_type(hdr);
1383 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1384 
1385 	if (link_is_up(l))
1386 		return;
1387 
1388 	if (msg_user(hdr) == BCAST_PROTOCOL) {
1389 		l->rcv_nxt = peers_snd_nxt;
1390 		l->state = LINK_ESTABLISHED;
1391 		return;
1392 	}
1393 
1394 	if (l->peer_caps & TIPC_BCAST_SYNCH)
1395 		return;
1396 
1397 	if (msg_peer_node_is_up(hdr))
1398 		return;
1399 
1400 	/* Compatibility: accept older, less safe initial synch data */
1401 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1402 		l->rcv_nxt = peers_snd_nxt;
1403 }
1404 
1405 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1406  */
1407 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1408 			   struct sk_buff_head *xmitq)
1409 {
1410 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1411 
1412 	if (!link_is_up(l))
1413 		return;
1414 
1415 	if (!msg_peer_node_is_up(hdr))
1416 		return;
1417 
1418 	l->bc_peer_is_up = true;
1419 
1420 	/* Ignore if peers_snd_nxt goes beyond receive window */
1421 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1422 		return;
1423 
1424 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1425 		l->nack_state = BC_NACK_SND_CONDITIONAL;
1426 		return;
1427 	}
1428 
1429 	/* Don't NACK if one was recently sent or peeked */
1430 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1431 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1432 		return;
1433 	}
1434 
1435 	/* Conditionally delay NACK sending until next synch rcv */
1436 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1437 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1438 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1439 			return;
1440 	}
1441 
1442 	/* Send NACK now but suppress next one */
1443 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1444 	l->nack_state = BC_NACK_SND_SUPPRESS;
1445 }
1446 
1447 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1448 			  struct sk_buff_head *xmitq)
1449 {
1450 	struct sk_buff *skb, *tmp;
1451 	struct tipc_link *snd_l = l->bc_sndlink;
1452 
1453 	if (!link_is_up(l) || !l->bc_peer_is_up)
1454 		return;
1455 
1456 	if (!more(acked, l->acked))
1457 		return;
1458 
1459 	/* Skip over packets peer has already acked */
1460 	skb_queue_walk(&snd_l->transmq, skb) {
1461 		if (more(buf_seqno(skb), l->acked))
1462 			break;
1463 	}
1464 
1465 	/* Update/release the packets peer is acking now */
1466 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1467 		if (more(buf_seqno(skb), acked))
1468 			break;
1469 		if (!--TIPC_SKB_CB(skb)->ackers) {
1470 			__skb_unlink(skb, &snd_l->transmq);
1471 			kfree_skb(skb);
1472 		}
1473 	}
1474 	l->acked = acked;
1475 	tipc_link_advance_backlog(snd_l, xmitq);
1476 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1477 		link_prepare_wakeup(snd_l);
1478 }
1479 
1480 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1481  */
1482 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1483 			  struct sk_buff_head *xmitq)
1484 {
1485 	struct tipc_msg *hdr = buf_msg(skb);
1486 	u32 dnode = msg_destnode(hdr);
1487 	int mtyp = msg_type(hdr);
1488 	u16 acked = msg_bcast_ack(hdr);
1489 	u16 from = acked + 1;
1490 	u16 to = msg_bcgap_to(hdr);
1491 	u16 peers_snd_nxt = to + 1;
1492 	int rc = 0;
1493 
1494 	kfree_skb(skb);
1495 
1496 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1497 		return 0;
1498 
1499 	if (mtyp != STATE_MSG)
1500 		return 0;
1501 
1502 	if (dnode == link_own_addr(l)) {
1503 		tipc_link_bc_ack_rcv(l, acked, xmitq);
1504 		rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1505 		l->stats.recv_nacks++;
1506 		return rc;
1507 	}
1508 
1509 	/* Msg for other node => suppress own NACK at next sync if applicable */
1510 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1511 		l->nack_state = BC_NACK_SND_SUPPRESS;
1512 
1513 	return 0;
1514 }
1515 
1516 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1517 {
1518 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1519 
1520 	l->window = win;
1521 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
1522 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
1523 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
1524 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1525 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1526 }
1527 
1528 /* tipc_link_find_owner - locate owner node of link by link's name
1529  * @net: the applicable net namespace
1530  * @name: pointer to link name string
1531  * @bearer_id: pointer to index in 'node->links' array where the link was found.
1532  *
1533  * Returns pointer to node owning the link, or 0 if no matching link is found.
1534  */
1535 static struct tipc_node *tipc_link_find_owner(struct net *net,
1536 					      const char *link_name,
1537 					      unsigned int *bearer_id)
1538 {
1539 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1540 	struct tipc_link *l_ptr;
1541 	struct tipc_node *n_ptr;
1542 	struct tipc_node *found_node = NULL;
1543 	int i;
1544 
1545 	*bearer_id = 0;
1546 	rcu_read_lock();
1547 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1548 		tipc_node_lock(n_ptr);
1549 		for (i = 0; i < MAX_BEARERS; i++) {
1550 			l_ptr = n_ptr->links[i].link;
1551 			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1552 				*bearer_id = i;
1553 				found_node = n_ptr;
1554 				break;
1555 			}
1556 		}
1557 		tipc_node_unlock(n_ptr);
1558 		if (found_node)
1559 			break;
1560 	}
1561 	rcu_read_unlock();
1562 
1563 	return found_node;
1564 }
1565 
1566 /**
1567  * link_reset_statistics - reset link statistics
1568  * @l_ptr: pointer to link
1569  */
1570 static void link_reset_statistics(struct tipc_link *l_ptr)
1571 {
1572 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1573 	l_ptr->stats.sent_info = l_ptr->snd_nxt;
1574 	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
1575 }
1576 
1577 static void link_print(struct tipc_link *l, const char *str)
1578 {
1579 	struct sk_buff *hskb = skb_peek(&l->transmq);
1580 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1581 	u16 tail = l->snd_nxt - 1;
1582 
1583 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1584 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1585 		skb_queue_len(&l->transmq), head, tail,
1586 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1587 }
1588 
1589 /* Parse and validate nested (link) properties valid for media, bearer and link
1590  */
1591 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1592 {
1593 	int err;
1594 
1595 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1596 			       tipc_nl_prop_policy);
1597 	if (err)
1598 		return err;
1599 
1600 	if (props[TIPC_NLA_PROP_PRIO]) {
1601 		u32 prio;
1602 
1603 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1604 		if (prio > TIPC_MAX_LINK_PRI)
1605 			return -EINVAL;
1606 	}
1607 
1608 	if (props[TIPC_NLA_PROP_TOL]) {
1609 		u32 tol;
1610 
1611 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1612 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1613 			return -EINVAL;
1614 	}
1615 
1616 	if (props[TIPC_NLA_PROP_WIN]) {
1617 		u32 win;
1618 
1619 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1620 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1621 			return -EINVAL;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1628 {
1629 	int err;
1630 	int res = 0;
1631 	int bearer_id;
1632 	char *name;
1633 	struct tipc_link *link;
1634 	struct tipc_node *node;
1635 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1636 	struct net *net = sock_net(skb->sk);
1637 
1638 	if (!info->attrs[TIPC_NLA_LINK])
1639 		return -EINVAL;
1640 
1641 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1642 			       info->attrs[TIPC_NLA_LINK],
1643 			       tipc_nl_link_policy);
1644 	if (err)
1645 		return err;
1646 
1647 	if (!attrs[TIPC_NLA_LINK_NAME])
1648 		return -EINVAL;
1649 
1650 	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1651 
1652 	if (strcmp(name, tipc_bclink_name) == 0)
1653 		return tipc_nl_bc_link_set(net, attrs);
1654 
1655 	node = tipc_link_find_owner(net, name, &bearer_id);
1656 	if (!node)
1657 		return -EINVAL;
1658 
1659 	tipc_node_lock(node);
1660 
1661 	link = node->links[bearer_id].link;
1662 	if (!link) {
1663 		res = -EINVAL;
1664 		goto out;
1665 	}
1666 
1667 	if (attrs[TIPC_NLA_LINK_PROP]) {
1668 		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1669 
1670 		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1671 					      props);
1672 		if (err) {
1673 			res = err;
1674 			goto out;
1675 		}
1676 
1677 		if (props[TIPC_NLA_PROP_TOL]) {
1678 			u32 tol;
1679 
1680 			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1681 			link->tolerance = tol;
1682 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1683 		}
1684 		if (props[TIPC_NLA_PROP_PRIO]) {
1685 			u32 prio;
1686 
1687 			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1688 			link->priority = prio;
1689 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1690 		}
1691 		if (props[TIPC_NLA_PROP_WIN]) {
1692 			u32 win;
1693 
1694 			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1695 			tipc_link_set_queue_limits(link, win);
1696 		}
1697 	}
1698 
1699 out:
1700 	tipc_node_unlock(node);
1701 
1702 	return res;
1703 }
1704 
1705 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1706 {
1707 	int i;
1708 	struct nlattr *stats;
1709 
1710 	struct nla_map {
1711 		u32 key;
1712 		u32 val;
1713 	};
1714 
1715 	struct nla_map map[] = {
1716 		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
1717 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1718 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1719 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1720 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1721 		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
1722 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1723 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1724 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1725 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1726 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1727 			s->msg_length_counts : 1},
1728 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1729 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1730 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1731 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1732 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1733 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1734 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1735 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1736 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1737 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
1738 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1739 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1740 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1741 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
1742 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1743 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1744 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1745 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1746 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1747 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1748 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1749 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1750 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
1751 	};
1752 
1753 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1754 	if (!stats)
1755 		return -EMSGSIZE;
1756 
1757 	for (i = 0; i <  ARRAY_SIZE(map); i++)
1758 		if (nla_put_u32(skb, map[i].key, map[i].val))
1759 			goto msg_full;
1760 
1761 	nla_nest_end(skb, stats);
1762 
1763 	return 0;
1764 msg_full:
1765 	nla_nest_cancel(skb, stats);
1766 
1767 	return -EMSGSIZE;
1768 }
1769 
1770 /* Caller should hold appropriate locks to protect the link */
1771 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1772 			      struct tipc_link *link, int nlflags)
1773 {
1774 	int err;
1775 	void *hdr;
1776 	struct nlattr *attrs;
1777 	struct nlattr *prop;
1778 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1779 
1780 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1781 			  nlflags, TIPC_NL_LINK_GET);
1782 	if (!hdr)
1783 		return -EMSGSIZE;
1784 
1785 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1786 	if (!attrs)
1787 		goto msg_full;
1788 
1789 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1790 		goto attr_msg_full;
1791 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1792 			tipc_cluster_mask(tn->own_addr)))
1793 		goto attr_msg_full;
1794 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1795 		goto attr_msg_full;
1796 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1797 		goto attr_msg_full;
1798 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1799 		goto attr_msg_full;
1800 
1801 	if (tipc_link_is_up(link))
1802 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1803 			goto attr_msg_full;
1804 	if (link->active)
1805 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1806 			goto attr_msg_full;
1807 
1808 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1809 	if (!prop)
1810 		goto attr_msg_full;
1811 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1812 		goto prop_msg_full;
1813 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1814 		goto prop_msg_full;
1815 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1816 			link->window))
1817 		goto prop_msg_full;
1818 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1819 		goto prop_msg_full;
1820 	nla_nest_end(msg->skb, prop);
1821 
1822 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
1823 	if (err)
1824 		goto attr_msg_full;
1825 
1826 	nla_nest_end(msg->skb, attrs);
1827 	genlmsg_end(msg->skb, hdr);
1828 
1829 	return 0;
1830 
1831 prop_msg_full:
1832 	nla_nest_cancel(msg->skb, prop);
1833 attr_msg_full:
1834 	nla_nest_cancel(msg->skb, attrs);
1835 msg_full:
1836 	genlmsg_cancel(msg->skb, hdr);
1837 
1838 	return -EMSGSIZE;
1839 }
1840 
1841 /* Caller should hold node lock  */
1842 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1843 				    struct tipc_node *node, u32 *prev_link)
1844 {
1845 	u32 i;
1846 	int err;
1847 
1848 	for (i = *prev_link; i < MAX_BEARERS; i++) {
1849 		*prev_link = i;
1850 
1851 		if (!node->links[i].link)
1852 			continue;
1853 
1854 		err = __tipc_nl_add_link(net, msg,
1855 					 node->links[i].link, NLM_F_MULTI);
1856 		if (err)
1857 			return err;
1858 	}
1859 	*prev_link = 0;
1860 
1861 	return 0;
1862 }
1863 
1864 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1865 {
1866 	struct net *net = sock_net(skb->sk);
1867 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1868 	struct tipc_node *node;
1869 	struct tipc_nl_msg msg;
1870 	u32 prev_node = cb->args[0];
1871 	u32 prev_link = cb->args[1];
1872 	int done = cb->args[2];
1873 	int err;
1874 
1875 	if (done)
1876 		return 0;
1877 
1878 	msg.skb = skb;
1879 	msg.portid = NETLINK_CB(cb->skb).portid;
1880 	msg.seq = cb->nlh->nlmsg_seq;
1881 
1882 	rcu_read_lock();
1883 	if (prev_node) {
1884 		node = tipc_node_find(net, prev_node);
1885 		if (!node) {
1886 			/* We never set seq or call nl_dump_check_consistent()
1887 			 * this means that setting prev_seq here will cause the
1888 			 * consistence check to fail in the netlink callback
1889 			 * handler. Resulting in the last NLMSG_DONE message
1890 			 * having the NLM_F_DUMP_INTR flag set.
1891 			 */
1892 			cb->prev_seq = 1;
1893 			goto out;
1894 		}
1895 		tipc_node_put(node);
1896 
1897 		list_for_each_entry_continue_rcu(node, &tn->node_list,
1898 						 list) {
1899 			tipc_node_lock(node);
1900 			err = __tipc_nl_add_node_links(net, &msg, node,
1901 						       &prev_link);
1902 			tipc_node_unlock(node);
1903 			if (err)
1904 				goto out;
1905 
1906 			prev_node = node->addr;
1907 		}
1908 	} else {
1909 		err = tipc_nl_add_bc_link(net, &msg);
1910 		if (err)
1911 			goto out;
1912 
1913 		list_for_each_entry_rcu(node, &tn->node_list, list) {
1914 			tipc_node_lock(node);
1915 			err = __tipc_nl_add_node_links(net, &msg, node,
1916 						       &prev_link);
1917 			tipc_node_unlock(node);
1918 			if (err)
1919 				goto out;
1920 
1921 			prev_node = node->addr;
1922 		}
1923 	}
1924 	done = 1;
1925 out:
1926 	rcu_read_unlock();
1927 
1928 	cb->args[0] = prev_node;
1929 	cb->args[1] = prev_link;
1930 	cb->args[2] = done;
1931 
1932 	return skb->len;
1933 }
1934 
1935 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
1936 {
1937 	struct net *net = genl_info_net(info);
1938 	struct tipc_nl_msg msg;
1939 	char *name;
1940 	int err;
1941 
1942 	msg.portid = info->snd_portid;
1943 	msg.seq = info->snd_seq;
1944 
1945 	if (!info->attrs[TIPC_NLA_LINK_NAME])
1946 		return -EINVAL;
1947 	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1948 
1949 	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1950 	if (!msg.skb)
1951 		return -ENOMEM;
1952 
1953 	if (strcmp(name, tipc_bclink_name) == 0) {
1954 		err = tipc_nl_add_bc_link(net, &msg);
1955 		if (err) {
1956 			nlmsg_free(msg.skb);
1957 			return err;
1958 		}
1959 	} else {
1960 		int bearer_id;
1961 		struct tipc_node *node;
1962 		struct tipc_link *link;
1963 
1964 		node = tipc_link_find_owner(net, name, &bearer_id);
1965 		if (!node)
1966 			return -EINVAL;
1967 
1968 		tipc_node_lock(node);
1969 		link = node->links[bearer_id].link;
1970 		if (!link) {
1971 			tipc_node_unlock(node);
1972 			nlmsg_free(msg.skb);
1973 			return -EINVAL;
1974 		}
1975 
1976 		err = __tipc_nl_add_link(net, &msg, link, 0);
1977 		tipc_node_unlock(node);
1978 		if (err) {
1979 			nlmsg_free(msg.skb);
1980 			return err;
1981 		}
1982 	}
1983 
1984 	return genlmsg_reply(msg.skb, info);
1985 }
1986 
1987 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
1988 {
1989 	int err;
1990 	char *link_name;
1991 	unsigned int bearer_id;
1992 	struct tipc_link *link;
1993 	struct tipc_node *node;
1994 	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1995 	struct net *net = sock_net(skb->sk);
1996 
1997 	if (!info->attrs[TIPC_NLA_LINK])
1998 		return -EINVAL;
1999 
2000 	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2001 			       info->attrs[TIPC_NLA_LINK],
2002 			       tipc_nl_link_policy);
2003 	if (err)
2004 		return err;
2005 
2006 	if (!attrs[TIPC_NLA_LINK_NAME])
2007 		return -EINVAL;
2008 
2009 	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2010 
2011 	if (strcmp(link_name, tipc_bclink_name) == 0) {
2012 		err = tipc_bclink_reset_stats(net);
2013 		if (err)
2014 			return err;
2015 		return 0;
2016 	}
2017 
2018 	node = tipc_link_find_owner(net, link_name, &bearer_id);
2019 	if (!node)
2020 		return -EINVAL;
2021 
2022 	tipc_node_lock(node);
2023 
2024 	link = node->links[bearer_id].link;
2025 	if (!link) {
2026 		tipc_node_unlock(node);
2027 		return -EINVAL;
2028 	}
2029 
2030 	link_reset_statistics(link);
2031 
2032 	tipc_node_unlock(node);
2033 
2034 	return 0;
2035 }
2036