xref: /openbmc/linux/net/tipc/link.c (revision fb960bd2)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 
47 #include <linux/pkt_sched.h>
48 
49 struct tipc_stats {
50 	u32 sent_pkts;
51 	u32 recv_pkts;
52 	u32 sent_states;
53 	u32 recv_states;
54 	u32 sent_probes;
55 	u32 recv_probes;
56 	u32 sent_nacks;
57 	u32 recv_nacks;
58 	u32 sent_acks;
59 	u32 sent_bundled;
60 	u32 sent_bundles;
61 	u32 recv_bundled;
62 	u32 recv_bundles;
63 	u32 retransmitted;
64 	u32 sent_fragmented;
65 	u32 sent_fragments;
66 	u32 recv_fragmented;
67 	u32 recv_fragments;
68 	u32 link_congs;		/* # port sends blocked by congestion */
69 	u32 deferred_recv;
70 	u32 duplicates;
71 	u32 max_queue_sz;	/* send queue size high water mark */
72 	u32 accu_queue_sz;	/* used for send queue size profiling */
73 	u32 queue_sz_counts;	/* used for send queue size profiling */
74 	u32 msg_length_counts;	/* used for message length profiling */
75 	u32 msg_lengths_total;	/* used for message length profiling */
76 	u32 msg_length_profile[7]; /* used for msg. length profiling */
77 };
78 
79 /**
80  * struct tipc_link - TIPC link data structure
81  * @addr: network address of link's peer node
82  * @name: link name character string
83  * @media_addr: media address to use when sending messages over link
84  * @timer: link timer
85  * @net: pointer to namespace struct
86  * @refcnt: reference counter for permanent references (owner node & timer)
87  * @peer_session: link session # being used by peer end of link
88  * @peer_bearer_id: bearer id used by link's peer endpoint
89  * @bearer_id: local bearer id used by link
90  * @tolerance: minimum link continuity loss needed to reset link [in ms]
91  * @abort_limit: # of unacknowledged continuity probes needed to reset link
92  * @state: current state of link FSM
93  * @peer_caps: bitmap describing capabilities of peer node
94  * @silent_intv_cnt: # of timer intervals without any reception from peer
95  * @proto_msg: template for control messages generated by link
96  * @pmsg: convenience pointer to "proto_msg" field
97  * @priority: current link priority
98  * @net_plane: current link network plane ('A' through 'H')
99  * @mon_state: cookie with information needed by link monitor
100  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101  * @exp_msg_count: # of tunnelled messages expected during link changeover
102  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103  * @mtu: current maximum packet size for this link
104  * @advertised_mtu: advertised own mtu when link is being established
105  * @transmitq: queue for sent, non-acked messages
106  * @backlogq: queue for messages waiting to be sent
107  * @snt_nxt: next sequence number to use for outbound messages
108  * @last_retransmitted: sequence number of most recently retransmitted message
109  * @stale_count: # of identical retransmit requests made by peer
110  * @ackers: # of peers that needs to ack each packet before it can be released
111  * @acked: # last packet acked by a certain peer. Used for broadcast.
112  * @rcv_nxt: next sequence number to expect for inbound messages
113  * @deferred_queue: deferred queue saved OOS b'cast message received from node
114  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115  * @inputq: buffer queue for messages to be delivered upwards
116  * @namedq: buffer queue for name table messages to be delivered upwards
117  * @next_out: ptr to first unsent outbound message in queue
118  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120  * @reasm_buf: head of partially reassembled inbound message fragments
121  * @bc_rcvr: marks that this is a broadcast receiver link
122  * @stats: collects statistics regarding link activity
123  */
124 struct tipc_link {
125 	u32 addr;
126 	char name[TIPC_MAX_LINK_NAME];
127 	struct net *net;
128 
129 	/* Management and link supervision data */
130 	u32 peer_session;
131 	u32 session;
132 	u32 peer_bearer_id;
133 	u32 bearer_id;
134 	u32 tolerance;
135 	u32 abort_limit;
136 	u32 state;
137 	u16 peer_caps;
138 	bool active;
139 	u32 silent_intv_cnt;
140 	char if_name[TIPC_MAX_IF_NAME];
141 	u32 priority;
142 	char net_plane;
143 	struct tipc_mon_state mon_state;
144 	u16 rst_cnt;
145 
146 	/* Failover/synch */
147 	u16 drop_point;
148 	struct sk_buff *failover_reasm_skb;
149 
150 	/* Max packet negotiation */
151 	u16 mtu;
152 	u16 advertised_mtu;
153 
154 	/* Sending */
155 	struct sk_buff_head transmq;
156 	struct sk_buff_head backlogq;
157 	struct {
158 		u16 len;
159 		u16 limit;
160 	} backlog[5];
161 	u16 snd_nxt;
162 	u16 last_retransm;
163 	u16 window;
164 	u32 stale_count;
165 
166 	/* Reception */
167 	u16 rcv_nxt;
168 	u32 rcv_unacked;
169 	struct sk_buff_head deferdq;
170 	struct sk_buff_head *inputq;
171 	struct sk_buff_head *namedq;
172 
173 	/* Congestion handling */
174 	struct sk_buff_head wakeupq;
175 
176 	/* Fragmentation/reassembly */
177 	struct sk_buff *reasm_buf;
178 
179 	/* Broadcast */
180 	u16 ackers;
181 	u16 acked;
182 	struct tipc_link *bc_rcvlink;
183 	struct tipc_link *bc_sndlink;
184 	unsigned long prev_retr;
185 	u16 prev_from;
186 	u16 prev_to;
187 	u8 nack_state;
188 	bool bc_peer_is_up;
189 
190 	/* Statistics */
191 	struct tipc_stats stats;
192 };
193 
194 /*
195  * Error message prefixes
196  */
197 static const char *link_co_err = "Link tunneling error, ";
198 static const char *link_rst_msg = "Resetting link ";
199 
200 /* Send states for broadcast NACKs
201  */
202 enum {
203 	BC_NACK_SND_CONDITIONAL,
204 	BC_NACK_SND_UNCONDITIONAL,
205 	BC_NACK_SND_SUPPRESS,
206 };
207 
208 #define TIPC_BC_RETR_LIMIT 10   /* [ms] */
209 
210 /*
211  * Interval between NACKs when packets arrive out of order
212  */
213 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
214 
215 /* Wildcard value for link session numbers. When it is known that
216  * peer endpoint is down, any session number must be accepted.
217  */
218 #define ANY_SESSION 0x10000
219 
220 /* Link FSM states:
221  */
222 enum {
223 	LINK_ESTABLISHED     = 0xe,
224 	LINK_ESTABLISHING    = 0xe  << 4,
225 	LINK_RESET           = 0x1  << 8,
226 	LINK_RESETTING       = 0x2  << 12,
227 	LINK_PEER_RESET      = 0xd  << 16,
228 	LINK_FAILINGOVER     = 0xf  << 20,
229 	LINK_SYNCHING        = 0xc  << 24
230 };
231 
232 /* Link FSM state checking routines
233  */
234 static int link_is_up(struct tipc_link *l)
235 {
236 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
237 }
238 
239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 			       struct sk_buff_head *xmitq);
241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
242 				      bool probe_reply, u16 rcvgap,
243 				      int tolerance, int priority,
244 				      struct sk_buff_head *xmitq);
245 static void link_print(struct tipc_link *l, const char *str);
246 static int tipc_link_build_nack_msg(struct tipc_link *l,
247 				    struct sk_buff_head *xmitq);
248 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 					struct sk_buff_head *xmitq);
250 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
251 
252 /*
253  *  Simple non-static link routines (i.e. referenced outside this file)
254  */
255 bool tipc_link_is_up(struct tipc_link *l)
256 {
257 	return link_is_up(l);
258 }
259 
260 bool tipc_link_peer_is_down(struct tipc_link *l)
261 {
262 	return l->state == LINK_PEER_RESET;
263 }
264 
265 bool tipc_link_is_reset(struct tipc_link *l)
266 {
267 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268 }
269 
270 bool tipc_link_is_establishing(struct tipc_link *l)
271 {
272 	return l->state == LINK_ESTABLISHING;
273 }
274 
275 bool tipc_link_is_synching(struct tipc_link *l)
276 {
277 	return l->state == LINK_SYNCHING;
278 }
279 
280 bool tipc_link_is_failingover(struct tipc_link *l)
281 {
282 	return l->state == LINK_FAILINGOVER;
283 }
284 
285 bool tipc_link_is_blocked(struct tipc_link *l)
286 {
287 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
288 }
289 
290 static bool link_is_bc_sndlink(struct tipc_link *l)
291 {
292 	return !l->bc_sndlink;
293 }
294 
295 static bool link_is_bc_rcvlink(struct tipc_link *l)
296 {
297 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298 }
299 
300 int tipc_link_is_active(struct tipc_link *l)
301 {
302 	return l->active;
303 }
304 
305 void tipc_link_set_active(struct tipc_link *l, bool active)
306 {
307 	l->active = active;
308 }
309 
310 u32 tipc_link_id(struct tipc_link *l)
311 {
312 	return l->peer_bearer_id << 16 | l->bearer_id;
313 }
314 
315 int tipc_link_window(struct tipc_link *l)
316 {
317 	return l->window;
318 }
319 
320 int tipc_link_prio(struct tipc_link *l)
321 {
322 	return l->priority;
323 }
324 
325 unsigned long tipc_link_tolerance(struct tipc_link *l)
326 {
327 	return l->tolerance;
328 }
329 
330 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
331 {
332 	return l->inputq;
333 }
334 
335 char tipc_link_plane(struct tipc_link *l)
336 {
337 	return l->net_plane;
338 }
339 
340 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 			   struct tipc_link *uc_l,
342 			   struct sk_buff_head *xmitq)
343 {
344 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345 
346 	snd_l->ackers++;
347 	rcv_l->acked = snd_l->snd_nxt - 1;
348 	snd_l->state = LINK_ESTABLISHED;
349 	tipc_link_build_bc_init_msg(uc_l, xmitq);
350 }
351 
352 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 			      struct tipc_link *rcv_l,
354 			      struct sk_buff_head *xmitq)
355 {
356 	u16 ack = snd_l->snd_nxt - 1;
357 
358 	snd_l->ackers--;
359 	rcv_l->bc_peer_is_up = true;
360 	rcv_l->state = LINK_ESTABLISHED;
361 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 	tipc_link_reset(rcv_l);
363 	rcv_l->state = LINK_RESET;
364 	if (!snd_l->ackers) {
365 		tipc_link_reset(snd_l);
366 		snd_l->state = LINK_RESET;
367 		__skb_queue_purge(xmitq);
368 	}
369 }
370 
371 int tipc_link_bc_peers(struct tipc_link *l)
372 {
373 	return l->ackers;
374 }
375 
376 u16 link_bc_rcv_gap(struct tipc_link *l)
377 {
378 	struct sk_buff *skb = skb_peek(&l->deferdq);
379 	u16 gap = 0;
380 
381 	if (more(l->snd_nxt, l->rcv_nxt))
382 		gap = l->snd_nxt - l->rcv_nxt;
383 	if (skb)
384 		gap = buf_seqno(skb) - l->rcv_nxt;
385 	return gap;
386 }
387 
388 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
389 {
390 	l->mtu = mtu;
391 }
392 
393 int tipc_link_mtu(struct tipc_link *l)
394 {
395 	return l->mtu;
396 }
397 
398 u16 tipc_link_rcv_nxt(struct tipc_link *l)
399 {
400 	return l->rcv_nxt;
401 }
402 
403 u16 tipc_link_acked(struct tipc_link *l)
404 {
405 	return l->acked;
406 }
407 
408 char *tipc_link_name(struct tipc_link *l)
409 {
410 	return l->name;
411 }
412 
413 /**
414  * tipc_link_create - create a new link
415  * @n: pointer to associated node
416  * @if_name: associated interface name
417  * @bearer_id: id (index) of associated bearer
418  * @tolerance: link tolerance to be used by link
419  * @net_plane: network plane (A,B,c..) this link belongs to
420  * @mtu: mtu to be advertised by link
421  * @priority: priority to be used by link
422  * @window: send window to be used by link
423  * @session: session to be used by link
424  * @ownnode: identity of own node
425  * @peer: node id of peer node
426  * @peer_caps: bitmap describing peer node capabilities
427  * @bc_sndlink: the namespace global link used for broadcast sending
428  * @bc_rcvlink: the peer specific link used for broadcast reception
429  * @inputq: queue to put messages ready for delivery
430  * @namedq: queue to put binding table update messages ready for delivery
431  * @link: return value, pointer to put the created link
432  *
433  * Returns true if link was created, otherwise false
434  */
435 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
436 		      int tolerance, char net_plane, u32 mtu, int priority,
437 		      int window, u32 session, u32 ownnode, u32 peer,
438 		      u16 peer_caps,
439 		      struct tipc_link *bc_sndlink,
440 		      struct tipc_link *bc_rcvlink,
441 		      struct sk_buff_head *inputq,
442 		      struct sk_buff_head *namedq,
443 		      struct tipc_link **link)
444 {
445 	struct tipc_link *l;
446 
447 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
448 	if (!l)
449 		return false;
450 	*link = l;
451 	l->session = session;
452 
453 	/* Note: peer i/f name is completed by reset/activate message */
454 	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
455 		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
456 		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
457 	strcpy(l->if_name, if_name);
458 	l->addr = peer;
459 	l->peer_caps = peer_caps;
460 	l->net = net;
461 	l->peer_session = ANY_SESSION;
462 	l->bearer_id = bearer_id;
463 	l->tolerance = tolerance;
464 	l->net_plane = net_plane;
465 	l->advertised_mtu = mtu;
466 	l->mtu = mtu;
467 	l->priority = priority;
468 	tipc_link_set_queue_limits(l, window);
469 	l->ackers = 1;
470 	l->bc_sndlink = bc_sndlink;
471 	l->bc_rcvlink = bc_rcvlink;
472 	l->inputq = inputq;
473 	l->namedq = namedq;
474 	l->state = LINK_RESETTING;
475 	__skb_queue_head_init(&l->transmq);
476 	__skb_queue_head_init(&l->backlogq);
477 	__skb_queue_head_init(&l->deferdq);
478 	skb_queue_head_init(&l->wakeupq);
479 	skb_queue_head_init(l->inputq);
480 	return true;
481 }
482 
483 /**
484  * tipc_link_bc_create - create new link to be used for broadcast
485  * @n: pointer to associated node
486  * @mtu: mtu to be used
487  * @window: send window to be used
488  * @inputq: queue to put messages ready for delivery
489  * @namedq: queue to put binding table update messages ready for delivery
490  * @link: return value, pointer to put the created link
491  *
492  * Returns true if link was created, otherwise false
493  */
494 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
495 			 int mtu, int window, u16 peer_caps,
496 			 struct sk_buff_head *inputq,
497 			 struct sk_buff_head *namedq,
498 			 struct tipc_link *bc_sndlink,
499 			 struct tipc_link **link)
500 {
501 	struct tipc_link *l;
502 
503 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
504 			      0, ownnode, peer, peer_caps, bc_sndlink,
505 			      NULL, inputq, namedq, link))
506 		return false;
507 
508 	l = *link;
509 	strcpy(l->name, tipc_bclink_name);
510 	tipc_link_reset(l);
511 	l->state = LINK_RESET;
512 	l->ackers = 0;
513 	l->bc_rcvlink = l;
514 
515 	/* Broadcast send link is always up */
516 	if (link_is_bc_sndlink(l))
517 		l->state = LINK_ESTABLISHED;
518 
519 	/* Disable replicast if even a single peer doesn't support it */
520 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
521 		tipc_bcast_disable_rcast(net);
522 
523 	return true;
524 }
525 
526 /**
527  * tipc_link_fsm_evt - link finite state machine
528  * @l: pointer to link
529  * @evt: state machine event to be processed
530  */
531 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
532 {
533 	int rc = 0;
534 
535 	switch (l->state) {
536 	case LINK_RESETTING:
537 		switch (evt) {
538 		case LINK_PEER_RESET_EVT:
539 			l->state = LINK_PEER_RESET;
540 			break;
541 		case LINK_RESET_EVT:
542 			l->state = LINK_RESET;
543 			break;
544 		case LINK_FAILURE_EVT:
545 		case LINK_FAILOVER_BEGIN_EVT:
546 		case LINK_ESTABLISH_EVT:
547 		case LINK_FAILOVER_END_EVT:
548 		case LINK_SYNCH_BEGIN_EVT:
549 		case LINK_SYNCH_END_EVT:
550 		default:
551 			goto illegal_evt;
552 		}
553 		break;
554 	case LINK_RESET:
555 		switch (evt) {
556 		case LINK_PEER_RESET_EVT:
557 			l->state = LINK_ESTABLISHING;
558 			break;
559 		case LINK_FAILOVER_BEGIN_EVT:
560 			l->state = LINK_FAILINGOVER;
561 		case LINK_FAILURE_EVT:
562 		case LINK_RESET_EVT:
563 		case LINK_ESTABLISH_EVT:
564 		case LINK_FAILOVER_END_EVT:
565 			break;
566 		case LINK_SYNCH_BEGIN_EVT:
567 		case LINK_SYNCH_END_EVT:
568 		default:
569 			goto illegal_evt;
570 		}
571 		break;
572 	case LINK_PEER_RESET:
573 		switch (evt) {
574 		case LINK_RESET_EVT:
575 			l->state = LINK_ESTABLISHING;
576 			break;
577 		case LINK_PEER_RESET_EVT:
578 		case LINK_ESTABLISH_EVT:
579 		case LINK_FAILURE_EVT:
580 			break;
581 		case LINK_SYNCH_BEGIN_EVT:
582 		case LINK_SYNCH_END_EVT:
583 		case LINK_FAILOVER_BEGIN_EVT:
584 		case LINK_FAILOVER_END_EVT:
585 		default:
586 			goto illegal_evt;
587 		}
588 		break;
589 	case LINK_FAILINGOVER:
590 		switch (evt) {
591 		case LINK_FAILOVER_END_EVT:
592 			l->state = LINK_RESET;
593 			break;
594 		case LINK_PEER_RESET_EVT:
595 		case LINK_RESET_EVT:
596 		case LINK_ESTABLISH_EVT:
597 		case LINK_FAILURE_EVT:
598 			break;
599 		case LINK_FAILOVER_BEGIN_EVT:
600 		case LINK_SYNCH_BEGIN_EVT:
601 		case LINK_SYNCH_END_EVT:
602 		default:
603 			goto illegal_evt;
604 		}
605 		break;
606 	case LINK_ESTABLISHING:
607 		switch (evt) {
608 		case LINK_ESTABLISH_EVT:
609 			l->state = LINK_ESTABLISHED;
610 			break;
611 		case LINK_FAILOVER_BEGIN_EVT:
612 			l->state = LINK_FAILINGOVER;
613 			break;
614 		case LINK_RESET_EVT:
615 			l->state = LINK_RESET;
616 			break;
617 		case LINK_FAILURE_EVT:
618 		case LINK_PEER_RESET_EVT:
619 		case LINK_SYNCH_BEGIN_EVT:
620 		case LINK_FAILOVER_END_EVT:
621 			break;
622 		case LINK_SYNCH_END_EVT:
623 		default:
624 			goto illegal_evt;
625 		}
626 		break;
627 	case LINK_ESTABLISHED:
628 		switch (evt) {
629 		case LINK_PEER_RESET_EVT:
630 			l->state = LINK_PEER_RESET;
631 			rc |= TIPC_LINK_DOWN_EVT;
632 			break;
633 		case LINK_FAILURE_EVT:
634 			l->state = LINK_RESETTING;
635 			rc |= TIPC_LINK_DOWN_EVT;
636 			break;
637 		case LINK_RESET_EVT:
638 			l->state = LINK_RESET;
639 			break;
640 		case LINK_ESTABLISH_EVT:
641 		case LINK_SYNCH_END_EVT:
642 			break;
643 		case LINK_SYNCH_BEGIN_EVT:
644 			l->state = LINK_SYNCHING;
645 			break;
646 		case LINK_FAILOVER_BEGIN_EVT:
647 		case LINK_FAILOVER_END_EVT:
648 		default:
649 			goto illegal_evt;
650 		}
651 		break;
652 	case LINK_SYNCHING:
653 		switch (evt) {
654 		case LINK_PEER_RESET_EVT:
655 			l->state = LINK_PEER_RESET;
656 			rc |= TIPC_LINK_DOWN_EVT;
657 			break;
658 		case LINK_FAILURE_EVT:
659 			l->state = LINK_RESETTING;
660 			rc |= TIPC_LINK_DOWN_EVT;
661 			break;
662 		case LINK_RESET_EVT:
663 			l->state = LINK_RESET;
664 			break;
665 		case LINK_ESTABLISH_EVT:
666 		case LINK_SYNCH_BEGIN_EVT:
667 			break;
668 		case LINK_SYNCH_END_EVT:
669 			l->state = LINK_ESTABLISHED;
670 			break;
671 		case LINK_FAILOVER_BEGIN_EVT:
672 		case LINK_FAILOVER_END_EVT:
673 		default:
674 			goto illegal_evt;
675 		}
676 		break;
677 	default:
678 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
679 	}
680 	return rc;
681 illegal_evt:
682 	pr_err("Illegal FSM event %x in state %x on link %s\n",
683 	       evt, l->state, l->name);
684 	return rc;
685 }
686 
687 /* link_profile_stats - update statistical profiling of traffic
688  */
689 static void link_profile_stats(struct tipc_link *l)
690 {
691 	struct sk_buff *skb;
692 	struct tipc_msg *msg;
693 	int length;
694 
695 	/* Update counters used in statistical profiling of send traffic */
696 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
697 	l->stats.queue_sz_counts++;
698 
699 	skb = skb_peek(&l->transmq);
700 	if (!skb)
701 		return;
702 	msg = buf_msg(skb);
703 	length = msg_size(msg);
704 
705 	if (msg_user(msg) == MSG_FRAGMENTER) {
706 		if (msg_type(msg) != FIRST_FRAGMENT)
707 			return;
708 		length = msg_size(msg_get_wrapped(msg));
709 	}
710 	l->stats.msg_lengths_total += length;
711 	l->stats.msg_length_counts++;
712 	if (length <= 64)
713 		l->stats.msg_length_profile[0]++;
714 	else if (length <= 256)
715 		l->stats.msg_length_profile[1]++;
716 	else if (length <= 1024)
717 		l->stats.msg_length_profile[2]++;
718 	else if (length <= 4096)
719 		l->stats.msg_length_profile[3]++;
720 	else if (length <= 16384)
721 		l->stats.msg_length_profile[4]++;
722 	else if (length <= 32768)
723 		l->stats.msg_length_profile[5]++;
724 	else
725 		l->stats.msg_length_profile[6]++;
726 }
727 
728 /* tipc_link_timeout - perform periodic task as instructed from node timeout
729  */
730 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
731 {
732 	int mtyp = 0;
733 	int rc = 0;
734 	bool state = false;
735 	bool probe = false;
736 	bool setup = false;
737 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
738 	u16 bc_acked = l->bc_rcvlink->acked;
739 	struct tipc_mon_state *mstate = &l->mon_state;
740 
741 	switch (l->state) {
742 	case LINK_ESTABLISHED:
743 	case LINK_SYNCHING:
744 		mtyp = STATE_MSG;
745 		link_profile_stats(l);
746 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
747 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
748 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
749 		state = bc_acked != bc_snt;
750 		state |= l->bc_rcvlink->rcv_unacked;
751 		state |= l->rcv_unacked;
752 		state |= !skb_queue_empty(&l->transmq);
753 		state |= !skb_queue_empty(&l->deferdq);
754 		probe = mstate->probing;
755 		probe |= l->silent_intv_cnt;
756 		if (probe || mstate->monitoring)
757 			l->silent_intv_cnt++;
758 		break;
759 	case LINK_RESET:
760 		setup = l->rst_cnt++ <= 4;
761 		setup |= !(l->rst_cnt % 16);
762 		mtyp = RESET_MSG;
763 		break;
764 	case LINK_ESTABLISHING:
765 		setup = true;
766 		mtyp = ACTIVATE_MSG;
767 		break;
768 	case LINK_PEER_RESET:
769 	case LINK_RESETTING:
770 	case LINK_FAILINGOVER:
771 		break;
772 	default:
773 		break;
774 	}
775 
776 	if (state || probe || setup)
777 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
778 
779 	return rc;
780 }
781 
782 /**
783  * link_schedule_user - schedule a message sender for wakeup after congestion
784  * @l: congested link
785  * @hdr: header of message that is being sent
786  * Create pseudo msg to send back to user when congestion abates
787  */
788 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
789 {
790 	u32 dnode = tipc_own_addr(l->net);
791 	u32 dport = msg_origport(hdr);
792 	struct sk_buff *skb;
793 
794 	/* Create and schedule wakeup pseudo message */
795 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
796 			      dnode, l->addr, dport, 0, 0);
797 	if (!skb)
798 		return -ENOBUFS;
799 	msg_set_dest_droppable(buf_msg(skb), true);
800 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
801 	skb_queue_tail(&l->wakeupq, skb);
802 	l->stats.link_congs++;
803 	return -ELINKCONG;
804 }
805 
806 /**
807  * link_prepare_wakeup - prepare users for wakeup after congestion
808  * @l: congested link
809  * Wake up a number of waiting users, as permitted by available space
810  * in the send queue
811  */
812 void link_prepare_wakeup(struct tipc_link *l)
813 {
814 	struct sk_buff *skb, *tmp;
815 	int imp, i = 0;
816 
817 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
818 		imp = TIPC_SKB_CB(skb)->chain_imp;
819 		if (l->backlog[imp].len < l->backlog[imp].limit) {
820 			skb_unlink(skb, &l->wakeupq);
821 			skb_queue_tail(l->inputq, skb);
822 		} else if (i++ > 10) {
823 			break;
824 		}
825 	}
826 }
827 
828 void tipc_link_reset(struct tipc_link *l)
829 {
830 	l->peer_session = ANY_SESSION;
831 	l->session++;
832 	l->mtu = l->advertised_mtu;
833 	__skb_queue_purge(&l->transmq);
834 	__skb_queue_purge(&l->deferdq);
835 	skb_queue_splice_init(&l->wakeupq, l->inputq);
836 	__skb_queue_purge(&l->backlogq);
837 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
838 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
839 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
840 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
841 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
842 	kfree_skb(l->reasm_buf);
843 	kfree_skb(l->failover_reasm_skb);
844 	l->reasm_buf = NULL;
845 	l->failover_reasm_skb = NULL;
846 	l->rcv_unacked = 0;
847 	l->snd_nxt = 1;
848 	l->rcv_nxt = 1;
849 	l->acked = 0;
850 	l->silent_intv_cnt = 0;
851 	l->rst_cnt = 0;
852 	l->stale_count = 0;
853 	l->bc_peer_is_up = false;
854 	memset(&l->mon_state, 0, sizeof(l->mon_state));
855 	tipc_link_reset_stats(l);
856 }
857 
858 /**
859  * tipc_link_xmit(): enqueue buffer list according to queue situation
860  * @link: link to use
861  * @list: chain of buffers containing message
862  * @xmitq: returned list of packets to be sent by caller
863  *
864  * Consumes the buffer chain.
865  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
866  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
867  */
868 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
869 		   struct sk_buff_head *xmitq)
870 {
871 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
872 	unsigned int maxwin = l->window;
873 	int imp = msg_importance(hdr);
874 	unsigned int mtu = l->mtu;
875 	u16 ack = l->rcv_nxt - 1;
876 	u16 seqno = l->snd_nxt;
877 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
878 	struct sk_buff_head *transmq = &l->transmq;
879 	struct sk_buff_head *backlogq = &l->backlogq;
880 	struct sk_buff *skb, *_skb, *bskb;
881 	int pkt_cnt = skb_queue_len(list);
882 	int rc = 0;
883 
884 	if (unlikely(msg_size(hdr) > mtu)) {
885 		skb_queue_purge(list);
886 		return -EMSGSIZE;
887 	}
888 
889 	/* Allow oversubscription of one data msg per source at congestion */
890 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
891 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
892 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
893 			return -ENOBUFS;
894 		}
895 		rc = link_schedule_user(l, hdr);
896 	}
897 
898 	if (pkt_cnt > 1) {
899 		l->stats.sent_fragmented++;
900 		l->stats.sent_fragments += pkt_cnt;
901 	}
902 
903 	/* Prepare each packet for sending, and add to relevant queue: */
904 	while (skb_queue_len(list)) {
905 		skb = skb_peek(list);
906 		hdr = buf_msg(skb);
907 		msg_set_seqno(hdr, seqno);
908 		msg_set_ack(hdr, ack);
909 		msg_set_bcast_ack(hdr, bc_ack);
910 
911 		if (likely(skb_queue_len(transmq) < maxwin)) {
912 			_skb = skb_clone(skb, GFP_ATOMIC);
913 			if (!_skb) {
914 				skb_queue_purge(list);
915 				return -ENOBUFS;
916 			}
917 			__skb_dequeue(list);
918 			__skb_queue_tail(transmq, skb);
919 			__skb_queue_tail(xmitq, _skb);
920 			TIPC_SKB_CB(skb)->ackers = l->ackers;
921 			l->rcv_unacked = 0;
922 			l->stats.sent_pkts++;
923 			seqno++;
924 			continue;
925 		}
926 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
927 			kfree_skb(__skb_dequeue(list));
928 			l->stats.sent_bundled++;
929 			continue;
930 		}
931 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
932 			kfree_skb(__skb_dequeue(list));
933 			__skb_queue_tail(backlogq, bskb);
934 			l->backlog[msg_importance(buf_msg(bskb))].len++;
935 			l->stats.sent_bundled++;
936 			l->stats.sent_bundles++;
937 			continue;
938 		}
939 		l->backlog[imp].len += skb_queue_len(list);
940 		skb_queue_splice_tail_init(list, backlogq);
941 	}
942 	l->snd_nxt = seqno;
943 	return rc;
944 }
945 
946 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
947 {
948 	struct sk_buff *skb, *_skb;
949 	struct tipc_msg *hdr;
950 	u16 seqno = l->snd_nxt;
951 	u16 ack = l->rcv_nxt - 1;
952 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
953 
954 	while (skb_queue_len(&l->transmq) < l->window) {
955 		skb = skb_peek(&l->backlogq);
956 		if (!skb)
957 			break;
958 		_skb = skb_clone(skb, GFP_ATOMIC);
959 		if (!_skb)
960 			break;
961 		__skb_dequeue(&l->backlogq);
962 		hdr = buf_msg(skb);
963 		l->backlog[msg_importance(hdr)].len--;
964 		__skb_queue_tail(&l->transmq, skb);
965 		__skb_queue_tail(xmitq, _skb);
966 		TIPC_SKB_CB(skb)->ackers = l->ackers;
967 		msg_set_seqno(hdr, seqno);
968 		msg_set_ack(hdr, ack);
969 		msg_set_bcast_ack(hdr, bc_ack);
970 		l->rcv_unacked = 0;
971 		l->stats.sent_pkts++;
972 		seqno++;
973 	}
974 	l->snd_nxt = seqno;
975 }
976 
977 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
978 {
979 	struct tipc_msg *hdr = buf_msg(skb);
980 
981 	pr_warn("Retransmission failure on link <%s>\n", l->name);
982 	link_print(l, "State of link ");
983 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
984 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
985 	pr_info("sqno %u, prev: %x, src: %x\n",
986 		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
987 }
988 
989 int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
990 		      u16 from, u16 to, struct sk_buff_head *xmitq)
991 {
992 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
993 	struct tipc_msg *hdr;
994 	u16 ack = l->rcv_nxt - 1;
995 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
996 
997 	if (!skb)
998 		return 0;
999 
1000 	/* Detect repeated retransmit failures on same packet */
1001 	if (nacker->last_retransm != buf_seqno(skb)) {
1002 		nacker->last_retransm = buf_seqno(skb);
1003 		nacker->stale_count = 1;
1004 	} else if (++nacker->stale_count > 100) {
1005 		link_retransmit_failure(l, skb);
1006 		nacker->stale_count = 0;
1007 		if (link_is_bc_sndlink(l))
1008 			return TIPC_LINK_DOWN_EVT;
1009 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1010 	}
1011 
1012 	/* Move forward to where retransmission should start */
1013 	skb_queue_walk(&l->transmq, skb) {
1014 		if (!less(buf_seqno(skb), from))
1015 			break;
1016 	}
1017 
1018 	skb_queue_walk_from(&l->transmq, skb) {
1019 		if (more(buf_seqno(skb), to))
1020 			break;
1021 		hdr = buf_msg(skb);
1022 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1023 		if (!_skb)
1024 			return 0;
1025 		hdr = buf_msg(_skb);
1026 		msg_set_ack(hdr, ack);
1027 		msg_set_bcast_ack(hdr, bc_ack);
1028 		_skb->priority = TC_PRIO_CONTROL;
1029 		__skb_queue_tail(xmitq, _skb);
1030 		l->stats.retransmitted++;
1031 	}
1032 	return 0;
1033 }
1034 
1035 /* tipc_data_input - deliver data and name distr msgs to upper layer
1036  *
1037  * Consumes buffer if message is of right type
1038  * Node lock must be held
1039  */
1040 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1041 			    struct sk_buff_head *inputq)
1042 {
1043 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1044 	struct tipc_msg *hdr = buf_msg(skb);
1045 
1046 	switch (msg_user(hdr)) {
1047 	case TIPC_LOW_IMPORTANCE:
1048 	case TIPC_MEDIUM_IMPORTANCE:
1049 	case TIPC_HIGH_IMPORTANCE:
1050 	case TIPC_CRITICAL_IMPORTANCE:
1051 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1052 			skb_queue_tail(mc_inputq, skb);
1053 			return true;
1054 		}
1055 	case CONN_MANAGER:
1056 		skb_queue_tail(inputq, skb);
1057 		return true;
1058 	case GROUP_PROTOCOL:
1059 		skb_queue_tail(mc_inputq, skb);
1060 		return true;
1061 	case NAME_DISTRIBUTOR:
1062 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1063 		skb_queue_tail(l->namedq, skb);
1064 		return true;
1065 	case MSG_BUNDLER:
1066 	case TUNNEL_PROTOCOL:
1067 	case MSG_FRAGMENTER:
1068 	case BCAST_PROTOCOL:
1069 		return false;
1070 	default:
1071 		pr_warn("Dropping received illegal msg type\n");
1072 		kfree_skb(skb);
1073 		return false;
1074 	};
1075 }
1076 
1077 /* tipc_link_input - process packet that has passed link protocol check
1078  *
1079  * Consumes buffer
1080  */
1081 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1082 			   struct sk_buff_head *inputq)
1083 {
1084 	struct tipc_msg *hdr = buf_msg(skb);
1085 	struct sk_buff **reasm_skb = &l->reasm_buf;
1086 	struct sk_buff *iskb;
1087 	struct sk_buff_head tmpq;
1088 	int usr = msg_user(hdr);
1089 	int rc = 0;
1090 	int pos = 0;
1091 	int ipos = 0;
1092 
1093 	if (unlikely(usr == TUNNEL_PROTOCOL)) {
1094 		if (msg_type(hdr) == SYNCH_MSG) {
1095 			__skb_queue_purge(&l->deferdq);
1096 			goto drop;
1097 		}
1098 		if (!tipc_msg_extract(skb, &iskb, &ipos))
1099 			return rc;
1100 		kfree_skb(skb);
1101 		skb = iskb;
1102 		hdr = buf_msg(skb);
1103 		if (less(msg_seqno(hdr), l->drop_point))
1104 			goto drop;
1105 		if (tipc_data_input(l, skb, inputq))
1106 			return rc;
1107 		usr = msg_user(hdr);
1108 		reasm_skb = &l->failover_reasm_skb;
1109 	}
1110 
1111 	if (usr == MSG_BUNDLER) {
1112 		skb_queue_head_init(&tmpq);
1113 		l->stats.recv_bundles++;
1114 		l->stats.recv_bundled += msg_msgcnt(hdr);
1115 		while (tipc_msg_extract(skb, &iskb, &pos))
1116 			tipc_data_input(l, iskb, &tmpq);
1117 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1118 		return 0;
1119 	} else if (usr == MSG_FRAGMENTER) {
1120 		l->stats.recv_fragments++;
1121 		if (tipc_buf_append(reasm_skb, &skb)) {
1122 			l->stats.recv_fragmented++;
1123 			tipc_data_input(l, skb, inputq);
1124 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1125 			pr_warn_ratelimited("Unable to build fragment list\n");
1126 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1127 		}
1128 		return 0;
1129 	} else if (usr == BCAST_PROTOCOL) {
1130 		tipc_bcast_lock(l->net);
1131 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1132 		tipc_bcast_unlock(l->net);
1133 	}
1134 drop:
1135 	kfree_skb(skb);
1136 	return 0;
1137 }
1138 
1139 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1140 {
1141 	bool released = false;
1142 	struct sk_buff *skb, *tmp;
1143 
1144 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1145 		if (more(buf_seqno(skb), acked))
1146 			break;
1147 		__skb_unlink(skb, &l->transmq);
1148 		kfree_skb(skb);
1149 		released = true;
1150 	}
1151 	return released;
1152 }
1153 
1154 /* tipc_link_build_state_msg: prepare link state message for transmission
1155  *
1156  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1157  * risk of ack storms towards the sender
1158  */
1159 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1160 {
1161 	if (!l)
1162 		return 0;
1163 
1164 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1165 	if (link_is_bc_rcvlink(l)) {
1166 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1167 			return 0;
1168 		l->rcv_unacked = 0;
1169 
1170 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1171 		l->snd_nxt = l->rcv_nxt;
1172 		return TIPC_LINK_SND_STATE;
1173 	}
1174 
1175 	/* Unicast ACK */
1176 	l->rcv_unacked = 0;
1177 	l->stats.sent_acks++;
1178 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1179 	return 0;
1180 }
1181 
1182 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1183  */
1184 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1185 {
1186 	int mtyp = RESET_MSG;
1187 	struct sk_buff *skb;
1188 
1189 	if (l->state == LINK_ESTABLISHING)
1190 		mtyp = ACTIVATE_MSG;
1191 
1192 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1193 
1194 	/* Inform peer that this endpoint is going down if applicable */
1195 	skb = skb_peek_tail(xmitq);
1196 	if (skb && (l->state == LINK_RESET))
1197 		msg_set_peer_stopping(buf_msg(skb), 1);
1198 }
1199 
1200 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1201  * Note that sending of broadcast NACK is coordinated among nodes, to
1202  * reduce the risk of NACK storms towards the sender
1203  */
1204 static int tipc_link_build_nack_msg(struct tipc_link *l,
1205 				    struct sk_buff_head *xmitq)
1206 {
1207 	u32 def_cnt = ++l->stats.deferred_recv;
1208 	int match1, match2;
1209 
1210 	if (link_is_bc_rcvlink(l)) {
1211 		match1 = def_cnt & 0xf;
1212 		match2 = tipc_own_addr(l->net) & 0xf;
1213 		if (match1 == match2)
1214 			return TIPC_LINK_SND_STATE;
1215 		return 0;
1216 	}
1217 
1218 	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1219 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1220 	return 0;
1221 }
1222 
1223 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1224  * @l: the link that should handle the message
1225  * @skb: TIPC packet
1226  * @xmitq: queue to place packets to be sent after this call
1227  */
1228 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1229 		  struct sk_buff_head *xmitq)
1230 {
1231 	struct sk_buff_head *defq = &l->deferdq;
1232 	struct tipc_msg *hdr;
1233 	u16 seqno, rcv_nxt, win_lim;
1234 	int rc = 0;
1235 
1236 	do {
1237 		hdr = buf_msg(skb);
1238 		seqno = msg_seqno(hdr);
1239 		rcv_nxt = l->rcv_nxt;
1240 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1241 
1242 		/* Verify and update link state */
1243 		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1244 			return tipc_link_proto_rcv(l, skb, xmitq);
1245 
1246 		if (unlikely(!link_is_up(l))) {
1247 			if (l->state == LINK_ESTABLISHING)
1248 				rc = TIPC_LINK_UP_EVT;
1249 			goto drop;
1250 		}
1251 
1252 		/* Don't send probe at next timeout expiration */
1253 		l->silent_intv_cnt = 0;
1254 
1255 		/* Drop if outside receive window */
1256 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1257 			l->stats.duplicates++;
1258 			goto drop;
1259 		}
1260 
1261 		/* Forward queues and wake up waiting users */
1262 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1263 			tipc_link_advance_backlog(l, xmitq);
1264 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1265 				link_prepare_wakeup(l);
1266 		}
1267 
1268 		/* Defer delivery if sequence gap */
1269 		if (unlikely(seqno != rcv_nxt)) {
1270 			__tipc_skb_queue_sorted(defq, seqno, skb);
1271 			rc |= tipc_link_build_nack_msg(l, xmitq);
1272 			break;
1273 		}
1274 
1275 		/* Deliver packet */
1276 		l->rcv_nxt++;
1277 		l->stats.recv_pkts++;
1278 		if (!tipc_data_input(l, skb, l->inputq))
1279 			rc |= tipc_link_input(l, skb, l->inputq);
1280 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1281 			rc |= tipc_link_build_state_msg(l, xmitq);
1282 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1283 			break;
1284 	} while ((skb = __skb_dequeue(defq)));
1285 
1286 	return rc;
1287 drop:
1288 	kfree_skb(skb);
1289 	return rc;
1290 }
1291 
1292 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1293 				      bool probe_reply, u16 rcvgap,
1294 				      int tolerance, int priority,
1295 				      struct sk_buff_head *xmitq)
1296 {
1297 	struct tipc_link *bcl = l->bc_rcvlink;
1298 	struct sk_buff *skb;
1299 	struct tipc_msg *hdr;
1300 	struct sk_buff_head *dfq = &l->deferdq;
1301 	bool node_up = link_is_up(bcl);
1302 	struct tipc_mon_state *mstate = &l->mon_state;
1303 	int dlen = 0;
1304 	void *data;
1305 
1306 	/* Don't send protocol message during reset or link failover */
1307 	if (tipc_link_is_blocked(l))
1308 		return;
1309 
1310 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1311 		return;
1312 
1313 	if (!skb_queue_empty(dfq))
1314 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1315 
1316 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1317 			      tipc_max_domain_size, l->addr,
1318 			      tipc_own_addr(l->net), 0, 0, 0);
1319 	if (!skb)
1320 		return;
1321 
1322 	hdr = buf_msg(skb);
1323 	data = msg_data(hdr);
1324 	msg_set_session(hdr, l->session);
1325 	msg_set_bearer_id(hdr, l->bearer_id);
1326 	msg_set_net_plane(hdr, l->net_plane);
1327 	msg_set_next_sent(hdr, l->snd_nxt);
1328 	msg_set_ack(hdr, l->rcv_nxt - 1);
1329 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1330 	msg_set_bc_ack_invalid(hdr, !node_up);
1331 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1332 	msg_set_link_tolerance(hdr, tolerance);
1333 	msg_set_linkprio(hdr, priority);
1334 	msg_set_redundant_link(hdr, node_up);
1335 	msg_set_seq_gap(hdr, 0);
1336 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1337 
1338 	if (mtyp == STATE_MSG) {
1339 		msg_set_seq_gap(hdr, rcvgap);
1340 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1341 		msg_set_probe(hdr, probe);
1342 		msg_set_is_keepalive(hdr, probe || probe_reply);
1343 		tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1344 		msg_set_size(hdr, INT_H_SIZE + dlen);
1345 		skb_trim(skb, INT_H_SIZE + dlen);
1346 		l->stats.sent_states++;
1347 		l->rcv_unacked = 0;
1348 	} else {
1349 		/* RESET_MSG or ACTIVATE_MSG */
1350 		msg_set_max_pkt(hdr, l->advertised_mtu);
1351 		strcpy(data, l->if_name);
1352 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1353 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1354 	}
1355 	if (probe)
1356 		l->stats.sent_probes++;
1357 	if (rcvgap)
1358 		l->stats.sent_nacks++;
1359 	skb->priority = TC_PRIO_CONTROL;
1360 	__skb_queue_tail(xmitq, skb);
1361 }
1362 
1363 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1364  * with contents of the link's transmit and backlog queues.
1365  */
1366 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1367 			   int mtyp, struct sk_buff_head *xmitq)
1368 {
1369 	struct sk_buff *skb, *tnlskb;
1370 	struct tipc_msg *hdr, tnlhdr;
1371 	struct sk_buff_head *queue = &l->transmq;
1372 	struct sk_buff_head tmpxq, tnlq;
1373 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1374 
1375 	if (!tnl)
1376 		return;
1377 
1378 	skb_queue_head_init(&tnlq);
1379 	skb_queue_head_init(&tmpxq);
1380 
1381 	/* At least one packet required for safe algorithm => add dummy */
1382 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1383 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1384 			      0, 0, TIPC_ERR_NO_PORT);
1385 	if (!skb) {
1386 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1387 		return;
1388 	}
1389 	skb_queue_tail(&tnlq, skb);
1390 	tipc_link_xmit(l, &tnlq, &tmpxq);
1391 	__skb_queue_purge(&tmpxq);
1392 
1393 	/* Initialize reusable tunnel packet header */
1394 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1395 		      mtyp, INT_H_SIZE, l->addr);
1396 	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1397 	msg_set_msgcnt(&tnlhdr, pktcnt);
1398 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1399 tnl:
1400 	/* Wrap each packet into a tunnel packet */
1401 	skb_queue_walk(queue, skb) {
1402 		hdr = buf_msg(skb);
1403 		if (queue == &l->backlogq)
1404 			msg_set_seqno(hdr, seqno++);
1405 		pktlen = msg_size(hdr);
1406 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1407 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1408 		if (!tnlskb) {
1409 			pr_warn("%sunable to send packet\n", link_co_err);
1410 			return;
1411 		}
1412 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1413 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1414 		__skb_queue_tail(&tnlq, tnlskb);
1415 	}
1416 	if (queue != &l->backlogq) {
1417 		queue = &l->backlogq;
1418 		goto tnl;
1419 	}
1420 
1421 	tipc_link_xmit(tnl, &tnlq, xmitq);
1422 
1423 	if (mtyp == FAILOVER_MSG) {
1424 		tnl->drop_point = l->rcv_nxt;
1425 		tnl->failover_reasm_skb = l->reasm_buf;
1426 		l->reasm_buf = NULL;
1427 	}
1428 }
1429 
1430 /* tipc_link_proto_rcv(): receive link level protocol message :
1431  * Note that network plane id propagates through the network, and may
1432  * change at any time. The node with lowest numerical id determines
1433  * network plane
1434  */
1435 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1436 			       struct sk_buff_head *xmitq)
1437 {
1438 	struct tipc_msg *hdr = buf_msg(skb);
1439 	u16 rcvgap = 0;
1440 	u16 ack = msg_ack(hdr);
1441 	u16 gap = msg_seq_gap(hdr);
1442 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1443 	u16 peers_tol = msg_link_tolerance(hdr);
1444 	u16 peers_prio = msg_linkprio(hdr);
1445 	u16 rcv_nxt = l->rcv_nxt;
1446 	u16 dlen = msg_data_sz(hdr);
1447 	int mtyp = msg_type(hdr);
1448 	bool reply = msg_probe(hdr);
1449 	void *data;
1450 	char *if_name;
1451 	int rc = 0;
1452 
1453 	if (tipc_link_is_blocked(l) || !xmitq)
1454 		goto exit;
1455 
1456 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1457 		l->net_plane = msg_net_plane(hdr);
1458 
1459 	skb_linearize(skb);
1460 	hdr = buf_msg(skb);
1461 	data = msg_data(hdr);
1462 
1463 	switch (mtyp) {
1464 	case RESET_MSG:
1465 
1466 		/* Ignore duplicate RESET with old session number */
1467 		if ((less_eq(msg_session(hdr), l->peer_session)) &&
1468 		    (l->peer_session != ANY_SESSION))
1469 			break;
1470 		/* fall thru' */
1471 
1472 	case ACTIVATE_MSG:
1473 
1474 		/* Complete own link name with peer's interface name */
1475 		if_name =  strrchr(l->name, ':') + 1;
1476 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1477 			break;
1478 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1479 			break;
1480 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1481 
1482 		/* Update own tolerance if peer indicates a non-zero value */
1483 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1484 			l->tolerance = peers_tol;
1485 
1486 		/* Update own priority if peer's priority is higher */
1487 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1488 			l->priority = peers_prio;
1489 
1490 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1491 		if (msg_peer_stopping(hdr))
1492 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1493 		else if ((mtyp == RESET_MSG) || !link_is_up(l))
1494 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1495 
1496 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1497 		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1498 			rc = TIPC_LINK_UP_EVT;
1499 
1500 		l->peer_session = msg_session(hdr);
1501 		l->peer_bearer_id = msg_bearer_id(hdr);
1502 		if (l->mtu > msg_max_pkt(hdr))
1503 			l->mtu = msg_max_pkt(hdr);
1504 		break;
1505 
1506 	case STATE_MSG:
1507 
1508 		/* Update own tolerance if peer indicates a non-zero value */
1509 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1510 			l->tolerance = peers_tol;
1511 
1512 		/* Update own prio if peer indicates a different value */
1513 		if ((peers_prio != l->priority) &&
1514 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1515 			l->priority = peers_prio;
1516 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1517 		}
1518 
1519 		l->silent_intv_cnt = 0;
1520 		l->stats.recv_states++;
1521 		if (msg_probe(hdr))
1522 			l->stats.recv_probes++;
1523 
1524 		if (!link_is_up(l)) {
1525 			if (l->state == LINK_ESTABLISHING)
1526 				rc = TIPC_LINK_UP_EVT;
1527 			break;
1528 		}
1529 		tipc_mon_rcv(l->net, data, dlen, l->addr,
1530 			     &l->mon_state, l->bearer_id);
1531 
1532 		/* Send NACK if peer has sent pkts we haven't received yet */
1533 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1534 			rcvgap = peers_snd_nxt - l->rcv_nxt;
1535 		if (rcvgap || reply)
1536 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1537 						  rcvgap, 0, 0, xmitq);
1538 		tipc_link_release_pkts(l, ack);
1539 
1540 		/* If NACK, retransmit will now start at right position */
1541 		if (gap) {
1542 			rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
1543 			l->stats.recv_nacks++;
1544 		}
1545 
1546 		tipc_link_advance_backlog(l, xmitq);
1547 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1548 			link_prepare_wakeup(l);
1549 	}
1550 exit:
1551 	kfree_skb(skb);
1552 	return rc;
1553 }
1554 
1555 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1556  */
1557 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1558 					 u16 peers_snd_nxt,
1559 					 struct sk_buff_head *xmitq)
1560 {
1561 	struct sk_buff *skb;
1562 	struct tipc_msg *hdr;
1563 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1564 	u16 ack = l->rcv_nxt - 1;
1565 	u16 gap_to = peers_snd_nxt - 1;
1566 
1567 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1568 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1569 	if (!skb)
1570 		return false;
1571 	hdr = buf_msg(skb);
1572 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1573 	msg_set_bcast_ack(hdr, ack);
1574 	msg_set_bcgap_after(hdr, ack);
1575 	if (dfrd_skb)
1576 		gap_to = buf_seqno(dfrd_skb) - 1;
1577 	msg_set_bcgap_to(hdr, gap_to);
1578 	msg_set_non_seq(hdr, bcast);
1579 	__skb_queue_tail(xmitq, skb);
1580 	return true;
1581 }
1582 
1583 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1584  *
1585  * Give a newly added peer node the sequence number where it should
1586  * start receiving and acking broadcast packets.
1587  */
1588 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1589 					struct sk_buff_head *xmitq)
1590 {
1591 	struct sk_buff_head list;
1592 
1593 	__skb_queue_head_init(&list);
1594 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1595 		return;
1596 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1597 	tipc_link_xmit(l, &list, xmitq);
1598 }
1599 
1600 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1601  */
1602 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1603 {
1604 	int mtyp = msg_type(hdr);
1605 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1606 
1607 	if (link_is_up(l))
1608 		return;
1609 
1610 	if (msg_user(hdr) == BCAST_PROTOCOL) {
1611 		l->rcv_nxt = peers_snd_nxt;
1612 		l->state = LINK_ESTABLISHED;
1613 		return;
1614 	}
1615 
1616 	if (l->peer_caps & TIPC_BCAST_SYNCH)
1617 		return;
1618 
1619 	if (msg_peer_node_is_up(hdr))
1620 		return;
1621 
1622 	/* Compatibility: accept older, less safe initial synch data */
1623 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1624 		l->rcv_nxt = peers_snd_nxt;
1625 }
1626 
1627 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1628  * - Adjust permitted range if there is overlap with previous retransmission
1629  */
1630 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1631 {
1632 	unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1633 
1634 	if (less(*to, *from))
1635 		return false;
1636 
1637 	/* New retransmission request */
1638 	if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1639 	    less(*to, l->prev_from) || more(*from, l->prev_to)) {
1640 		l->prev_from = *from;
1641 		l->prev_to = *to;
1642 		l->prev_retr = jiffies;
1643 		return true;
1644 	}
1645 
1646 	/* Inside range of previous retransmit */
1647 	if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1648 		return false;
1649 
1650 	/* Fully or partially outside previous range => exclude overlap */
1651 	if (less(*from, l->prev_from)) {
1652 		*to = l->prev_from - 1;
1653 		l->prev_from = *from;
1654 	}
1655 	if (more(*to, l->prev_to)) {
1656 		*from = l->prev_to + 1;
1657 		l->prev_to = *to;
1658 	}
1659 	l->prev_retr = jiffies;
1660 	return true;
1661 }
1662 
1663 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1664  */
1665 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1666 			  struct sk_buff_head *xmitq)
1667 {
1668 	struct tipc_link *snd_l = l->bc_sndlink;
1669 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1670 	u16 from = msg_bcast_ack(hdr) + 1;
1671 	u16 to = from + msg_bc_gap(hdr) - 1;
1672 	int rc = 0;
1673 
1674 	if (!link_is_up(l))
1675 		return rc;
1676 
1677 	if (!msg_peer_node_is_up(hdr))
1678 		return rc;
1679 
1680 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
1681 	if (msg_ack(hdr))
1682 		l->bc_peer_is_up = true;
1683 
1684 	if (!l->bc_peer_is_up)
1685 		return rc;
1686 
1687 	l->stats.recv_nacks++;
1688 
1689 	/* Ignore if peers_snd_nxt goes beyond receive window */
1690 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1691 		return rc;
1692 
1693 	if (link_bc_retr_eval(snd_l, &from, &to))
1694 		rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1695 
1696 	l->snd_nxt = peers_snd_nxt;
1697 	if (link_bc_rcv_gap(l))
1698 		rc |= TIPC_LINK_SND_STATE;
1699 
1700 	/* Return now if sender supports nack via STATE messages */
1701 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1702 		return rc;
1703 
1704 	/* Otherwise, be backwards compatible */
1705 
1706 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1707 		l->nack_state = BC_NACK_SND_CONDITIONAL;
1708 		return 0;
1709 	}
1710 
1711 	/* Don't NACK if one was recently sent or peeked */
1712 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1713 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1714 		return 0;
1715 	}
1716 
1717 	/* Conditionally delay NACK sending until next synch rcv */
1718 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1719 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1720 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1721 			return 0;
1722 	}
1723 
1724 	/* Send NACK now but suppress next one */
1725 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1726 	l->nack_state = BC_NACK_SND_SUPPRESS;
1727 	return 0;
1728 }
1729 
1730 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1731 			  struct sk_buff_head *xmitq)
1732 {
1733 	struct sk_buff *skb, *tmp;
1734 	struct tipc_link *snd_l = l->bc_sndlink;
1735 
1736 	if (!link_is_up(l) || !l->bc_peer_is_up)
1737 		return;
1738 
1739 	if (!more(acked, l->acked))
1740 		return;
1741 
1742 	/* Skip over packets peer has already acked */
1743 	skb_queue_walk(&snd_l->transmq, skb) {
1744 		if (more(buf_seqno(skb), l->acked))
1745 			break;
1746 	}
1747 
1748 	/* Update/release the packets peer is acking now */
1749 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1750 		if (more(buf_seqno(skb), acked))
1751 			break;
1752 		if (!--TIPC_SKB_CB(skb)->ackers) {
1753 			__skb_unlink(skb, &snd_l->transmq);
1754 			kfree_skb(skb);
1755 		}
1756 	}
1757 	l->acked = acked;
1758 	tipc_link_advance_backlog(snd_l, xmitq);
1759 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1760 		link_prepare_wakeup(snd_l);
1761 }
1762 
1763 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1764  * This function is here for backwards compatibility, since
1765  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1766  */
1767 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1768 			  struct sk_buff_head *xmitq)
1769 {
1770 	struct tipc_msg *hdr = buf_msg(skb);
1771 	u32 dnode = msg_destnode(hdr);
1772 	int mtyp = msg_type(hdr);
1773 	u16 acked = msg_bcast_ack(hdr);
1774 	u16 from = acked + 1;
1775 	u16 to = msg_bcgap_to(hdr);
1776 	u16 peers_snd_nxt = to + 1;
1777 	int rc = 0;
1778 
1779 	kfree_skb(skb);
1780 
1781 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1782 		return 0;
1783 
1784 	if (mtyp != STATE_MSG)
1785 		return 0;
1786 
1787 	if (dnode == tipc_own_addr(l->net)) {
1788 		tipc_link_bc_ack_rcv(l, acked, xmitq);
1789 		rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
1790 		l->stats.recv_nacks++;
1791 		return rc;
1792 	}
1793 
1794 	/* Msg for other node => suppress own NACK at next sync if applicable */
1795 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1796 		l->nack_state = BC_NACK_SND_SUPPRESS;
1797 
1798 	return 0;
1799 }
1800 
1801 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1802 {
1803 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1804 
1805 	l->window = win;
1806 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
1807 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
1808 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
1809 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1810 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1811 }
1812 
1813 /**
1814  * link_reset_stats - reset link statistics
1815  * @l: pointer to link
1816  */
1817 void tipc_link_reset_stats(struct tipc_link *l)
1818 {
1819 	memset(&l->stats, 0, sizeof(l->stats));
1820 }
1821 
1822 static void link_print(struct tipc_link *l, const char *str)
1823 {
1824 	struct sk_buff *hskb = skb_peek(&l->transmq);
1825 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1826 	u16 tail = l->snd_nxt - 1;
1827 
1828 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1829 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1830 		skb_queue_len(&l->transmq), head, tail,
1831 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1832 }
1833 
1834 /* Parse and validate nested (link) properties valid for media, bearer and link
1835  */
1836 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1837 {
1838 	int err;
1839 
1840 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1841 			       tipc_nl_prop_policy, NULL);
1842 	if (err)
1843 		return err;
1844 
1845 	if (props[TIPC_NLA_PROP_PRIO]) {
1846 		u32 prio;
1847 
1848 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1849 		if (prio > TIPC_MAX_LINK_PRI)
1850 			return -EINVAL;
1851 	}
1852 
1853 	if (props[TIPC_NLA_PROP_TOL]) {
1854 		u32 tol;
1855 
1856 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1857 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1858 			return -EINVAL;
1859 	}
1860 
1861 	if (props[TIPC_NLA_PROP_WIN]) {
1862 		u32 win;
1863 
1864 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1865 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1866 			return -EINVAL;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1873 {
1874 	int i;
1875 	struct nlattr *stats;
1876 
1877 	struct nla_map {
1878 		u32 key;
1879 		u32 val;
1880 	};
1881 
1882 	struct nla_map map[] = {
1883 		{TIPC_NLA_STATS_RX_INFO, 0},
1884 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1885 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1886 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1887 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1888 		{TIPC_NLA_STATS_TX_INFO, 0},
1889 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1890 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1891 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1892 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1893 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1894 			s->msg_length_counts : 1},
1895 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1896 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1897 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1898 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1899 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1900 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1901 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1902 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1903 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1904 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
1905 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1906 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1907 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1908 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
1909 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1910 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1911 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1912 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1913 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1914 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1915 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1916 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1917 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
1918 	};
1919 
1920 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1921 	if (!stats)
1922 		return -EMSGSIZE;
1923 
1924 	for (i = 0; i <  ARRAY_SIZE(map); i++)
1925 		if (nla_put_u32(skb, map[i].key, map[i].val))
1926 			goto msg_full;
1927 
1928 	nla_nest_end(skb, stats);
1929 
1930 	return 0;
1931 msg_full:
1932 	nla_nest_cancel(skb, stats);
1933 
1934 	return -EMSGSIZE;
1935 }
1936 
1937 /* Caller should hold appropriate locks to protect the link */
1938 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1939 		       struct tipc_link *link, int nlflags)
1940 {
1941 	int err;
1942 	void *hdr;
1943 	struct nlattr *attrs;
1944 	struct nlattr *prop;
1945 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1946 
1947 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1948 			  nlflags, TIPC_NL_LINK_GET);
1949 	if (!hdr)
1950 		return -EMSGSIZE;
1951 
1952 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1953 	if (!attrs)
1954 		goto msg_full;
1955 
1956 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1957 		goto attr_msg_full;
1958 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1959 			tipc_cluster_mask(tn->own_addr)))
1960 		goto attr_msg_full;
1961 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1962 		goto attr_msg_full;
1963 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1964 		goto attr_msg_full;
1965 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1966 		goto attr_msg_full;
1967 
1968 	if (tipc_link_is_up(link))
1969 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1970 			goto attr_msg_full;
1971 	if (link->active)
1972 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1973 			goto attr_msg_full;
1974 
1975 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1976 	if (!prop)
1977 		goto attr_msg_full;
1978 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1979 		goto prop_msg_full;
1980 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1981 		goto prop_msg_full;
1982 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1983 			link->window))
1984 		goto prop_msg_full;
1985 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1986 		goto prop_msg_full;
1987 	nla_nest_end(msg->skb, prop);
1988 
1989 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
1990 	if (err)
1991 		goto attr_msg_full;
1992 
1993 	nla_nest_end(msg->skb, attrs);
1994 	genlmsg_end(msg->skb, hdr);
1995 
1996 	return 0;
1997 
1998 prop_msg_full:
1999 	nla_nest_cancel(msg->skb, prop);
2000 attr_msg_full:
2001 	nla_nest_cancel(msg->skb, attrs);
2002 msg_full:
2003 	genlmsg_cancel(msg->skb, hdr);
2004 
2005 	return -EMSGSIZE;
2006 }
2007 
2008 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2009 				      struct tipc_stats *stats)
2010 {
2011 	int i;
2012 	struct nlattr *nest;
2013 
2014 	struct nla_map {
2015 		__u32 key;
2016 		__u32 val;
2017 	};
2018 
2019 	struct nla_map map[] = {
2020 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2021 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2022 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2023 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2024 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2025 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2026 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2027 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2028 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2029 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2030 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2031 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2032 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2033 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2034 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2035 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2036 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2037 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2038 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2039 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2040 	};
2041 
2042 	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2043 	if (!nest)
2044 		return -EMSGSIZE;
2045 
2046 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2047 		if (nla_put_u32(skb, map[i].key, map[i].val))
2048 			goto msg_full;
2049 
2050 	nla_nest_end(skb, nest);
2051 
2052 	return 0;
2053 msg_full:
2054 	nla_nest_cancel(skb, nest);
2055 
2056 	return -EMSGSIZE;
2057 }
2058 
2059 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2060 {
2061 	int err;
2062 	void *hdr;
2063 	struct nlattr *attrs;
2064 	struct nlattr *prop;
2065 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2066 	struct tipc_link *bcl = tn->bcl;
2067 
2068 	if (!bcl)
2069 		return 0;
2070 
2071 	tipc_bcast_lock(net);
2072 
2073 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2074 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2075 	if (!hdr) {
2076 		tipc_bcast_unlock(net);
2077 		return -EMSGSIZE;
2078 	}
2079 
2080 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2081 	if (!attrs)
2082 		goto msg_full;
2083 
2084 	/* The broadcast link is always up */
2085 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2086 		goto attr_msg_full;
2087 
2088 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2089 		goto attr_msg_full;
2090 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2091 		goto attr_msg_full;
2092 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2093 		goto attr_msg_full;
2094 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2095 		goto attr_msg_full;
2096 
2097 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2098 	if (!prop)
2099 		goto attr_msg_full;
2100 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2101 		goto prop_msg_full;
2102 	nla_nest_end(msg->skb, prop);
2103 
2104 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2105 	if (err)
2106 		goto attr_msg_full;
2107 
2108 	tipc_bcast_unlock(net);
2109 	nla_nest_end(msg->skb, attrs);
2110 	genlmsg_end(msg->skb, hdr);
2111 
2112 	return 0;
2113 
2114 prop_msg_full:
2115 	nla_nest_cancel(msg->skb, prop);
2116 attr_msg_full:
2117 	nla_nest_cancel(msg->skb, attrs);
2118 msg_full:
2119 	tipc_bcast_unlock(net);
2120 	genlmsg_cancel(msg->skb, hdr);
2121 
2122 	return -EMSGSIZE;
2123 }
2124 
2125 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2126 			     struct sk_buff_head *xmitq)
2127 {
2128 	l->tolerance = tol;
2129 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2130 }
2131 
2132 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2133 			struct sk_buff_head *xmitq)
2134 {
2135 	l->priority = prio;
2136 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2137 }
2138 
2139 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2140 {
2141 	l->abort_limit = limit;
2142 }
2143