xref: /openbmc/linux/net/tipc/link.c (revision 4b0aaacee51eb6592a03fdefd5ce97558518e291)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 
47 #include <linux/pkt_sched.h>
48 
49 struct tipc_stats {
50 	u32 sent_pkts;
51 	u32 recv_pkts;
52 	u32 sent_states;
53 	u32 recv_states;
54 	u32 sent_probes;
55 	u32 recv_probes;
56 	u32 sent_nacks;
57 	u32 recv_nacks;
58 	u32 sent_acks;
59 	u32 sent_bundled;
60 	u32 sent_bundles;
61 	u32 recv_bundled;
62 	u32 recv_bundles;
63 	u32 retransmitted;
64 	u32 sent_fragmented;
65 	u32 sent_fragments;
66 	u32 recv_fragmented;
67 	u32 recv_fragments;
68 	u32 link_congs;		/* # port sends blocked by congestion */
69 	u32 deferred_recv;
70 	u32 duplicates;
71 	u32 max_queue_sz;	/* send queue size high water mark */
72 	u32 accu_queue_sz;	/* used for send queue size profiling */
73 	u32 queue_sz_counts;	/* used for send queue size profiling */
74 	u32 msg_length_counts;	/* used for message length profiling */
75 	u32 msg_lengths_total;	/* used for message length profiling */
76 	u32 msg_length_profile[7]; /* used for msg. length profiling */
77 };
78 
79 /**
80  * struct tipc_link - TIPC link data structure
81  * @addr: network address of link's peer node
82  * @name: link name character string
83  * @media_addr: media address to use when sending messages over link
84  * @timer: link timer
85  * @net: pointer to namespace struct
86  * @refcnt: reference counter for permanent references (owner node & timer)
87  * @peer_session: link session # being used by peer end of link
88  * @peer_bearer_id: bearer id used by link's peer endpoint
89  * @bearer_id: local bearer id used by link
90  * @tolerance: minimum link continuity loss needed to reset link [in ms]
91  * @abort_limit: # of unacknowledged continuity probes needed to reset link
92  * @state: current state of link FSM
93  * @peer_caps: bitmap describing capabilities of peer node
94  * @silent_intv_cnt: # of timer intervals without any reception from peer
95  * @proto_msg: template for control messages generated by link
96  * @pmsg: convenience pointer to "proto_msg" field
97  * @priority: current link priority
98  * @net_plane: current link network plane ('A' through 'H')
99  * @mon_state: cookie with information needed by link monitor
100  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101  * @exp_msg_count: # of tunnelled messages expected during link changeover
102  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103  * @mtu: current maximum packet size for this link
104  * @advertised_mtu: advertised own mtu when link is being established
105  * @transmitq: queue for sent, non-acked messages
106  * @backlogq: queue for messages waiting to be sent
107  * @snt_nxt: next sequence number to use for outbound messages
108  * @last_retransmitted: sequence number of most recently retransmitted message
109  * @stale_cnt: counter for number of identical retransmit attempts
110  * @stale_limit: time when repeated identical retransmits must force link reset
111  * @ackers: # of peers that needs to ack each packet before it can be released
112  * @acked: # last packet acked by a certain peer. Used for broadcast.
113  * @rcv_nxt: next sequence number to expect for inbound messages
114  * @deferred_queue: deferred queue saved OOS b'cast message received from node
115  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116  * @inputq: buffer queue for messages to be delivered upwards
117  * @namedq: buffer queue for name table messages to be delivered upwards
118  * @next_out: ptr to first unsent outbound message in queue
119  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121  * @reasm_buf: head of partially reassembled inbound message fragments
122  * @bc_rcvr: marks that this is a broadcast receiver link
123  * @stats: collects statistics regarding link activity
124  */
125 struct tipc_link {
126 	u32 addr;
127 	char name[TIPC_MAX_LINK_NAME];
128 	struct net *net;
129 
130 	/* Management and link supervision data */
131 	u16 peer_session;
132 	u16 session;
133 	u16 snd_nxt_state;
134 	u16 rcv_nxt_state;
135 	u32 peer_bearer_id;
136 	u32 bearer_id;
137 	u32 tolerance;
138 	u32 abort_limit;
139 	u32 state;
140 	u16 peer_caps;
141 	bool in_session;
142 	bool active;
143 	u32 silent_intv_cnt;
144 	char if_name[TIPC_MAX_IF_NAME];
145 	u32 priority;
146 	char net_plane;
147 	struct tipc_mon_state mon_state;
148 	u16 rst_cnt;
149 
150 	/* Failover/synch */
151 	u16 drop_point;
152 	struct sk_buff *failover_reasm_skb;
153 
154 	/* Max packet negotiation */
155 	u16 mtu;
156 	u16 advertised_mtu;
157 
158 	/* Sending */
159 	struct sk_buff_head transmq;
160 	struct sk_buff_head backlogq;
161 	struct {
162 		u16 len;
163 		u16 limit;
164 	} backlog[5];
165 	u16 snd_nxt;
166 	u16 last_retransm;
167 	u16 window;
168 	u16 stale_cnt;
169 	unsigned long stale_limit;
170 
171 	/* Reception */
172 	u16 rcv_nxt;
173 	u32 rcv_unacked;
174 	struct sk_buff_head deferdq;
175 	struct sk_buff_head *inputq;
176 	struct sk_buff_head *namedq;
177 
178 	/* Congestion handling */
179 	struct sk_buff_head wakeupq;
180 
181 	/* Fragmentation/reassembly */
182 	struct sk_buff *reasm_buf;
183 
184 	/* Broadcast */
185 	u16 ackers;
186 	u16 acked;
187 	struct tipc_link *bc_rcvlink;
188 	struct tipc_link *bc_sndlink;
189 	unsigned long prev_retr;
190 	u16 prev_from;
191 	u16 prev_to;
192 	u8 nack_state;
193 	bool bc_peer_is_up;
194 
195 	/* Statistics */
196 	struct tipc_stats stats;
197 };
198 
199 /*
200  * Error message prefixes
201  */
202 static const char *link_co_err = "Link tunneling error, ";
203 static const char *link_rst_msg = "Resetting link ";
204 
205 /* Send states for broadcast NACKs
206  */
207 enum {
208 	BC_NACK_SND_CONDITIONAL,
209 	BC_NACK_SND_UNCONDITIONAL,
210 	BC_NACK_SND_SUPPRESS,
211 };
212 
213 #define TIPC_BC_RETR_LIMIT 10   /* [ms] */
214 
215 /*
216  * Interval between NACKs when packets arrive out of order
217  */
218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
219 
220 /* Link FSM states:
221  */
222 enum {
223 	LINK_ESTABLISHED     = 0xe,
224 	LINK_ESTABLISHING    = 0xe  << 4,
225 	LINK_RESET           = 0x1  << 8,
226 	LINK_RESETTING       = 0x2  << 12,
227 	LINK_PEER_RESET      = 0xd  << 16,
228 	LINK_FAILINGOVER     = 0xf  << 20,
229 	LINK_SYNCHING        = 0xc  << 24
230 };
231 
232 /* Link FSM state checking routines
233  */
234 static int link_is_up(struct tipc_link *l)
235 {
236 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
237 }
238 
239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 			       struct sk_buff_head *xmitq);
241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
242 				      bool probe_reply, u16 rcvgap,
243 				      int tolerance, int priority,
244 				      struct sk_buff_head *xmitq);
245 static void link_print(struct tipc_link *l, const char *str);
246 static int tipc_link_build_nack_msg(struct tipc_link *l,
247 				    struct sk_buff_head *xmitq);
248 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 					struct sk_buff_head *xmitq);
250 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
251 
252 /*
253  *  Simple non-static link routines (i.e. referenced outside this file)
254  */
255 bool tipc_link_is_up(struct tipc_link *l)
256 {
257 	return link_is_up(l);
258 }
259 
260 bool tipc_link_peer_is_down(struct tipc_link *l)
261 {
262 	return l->state == LINK_PEER_RESET;
263 }
264 
265 bool tipc_link_is_reset(struct tipc_link *l)
266 {
267 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268 }
269 
270 bool tipc_link_is_establishing(struct tipc_link *l)
271 {
272 	return l->state == LINK_ESTABLISHING;
273 }
274 
275 bool tipc_link_is_synching(struct tipc_link *l)
276 {
277 	return l->state == LINK_SYNCHING;
278 }
279 
280 bool tipc_link_is_failingover(struct tipc_link *l)
281 {
282 	return l->state == LINK_FAILINGOVER;
283 }
284 
285 bool tipc_link_is_blocked(struct tipc_link *l)
286 {
287 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
288 }
289 
290 static bool link_is_bc_sndlink(struct tipc_link *l)
291 {
292 	return !l->bc_sndlink;
293 }
294 
295 static bool link_is_bc_rcvlink(struct tipc_link *l)
296 {
297 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298 }
299 
300 void tipc_link_set_active(struct tipc_link *l, bool active)
301 {
302 	l->active = active;
303 }
304 
305 u32 tipc_link_id(struct tipc_link *l)
306 {
307 	return l->peer_bearer_id << 16 | l->bearer_id;
308 }
309 
310 int tipc_link_window(struct tipc_link *l)
311 {
312 	return l->window;
313 }
314 
315 int tipc_link_prio(struct tipc_link *l)
316 {
317 	return l->priority;
318 }
319 
320 unsigned long tipc_link_tolerance(struct tipc_link *l)
321 {
322 	return l->tolerance;
323 }
324 
325 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
326 {
327 	return l->inputq;
328 }
329 
330 char tipc_link_plane(struct tipc_link *l)
331 {
332 	return l->net_plane;
333 }
334 
335 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
336 {
337 	l->peer_caps = capabilities;
338 }
339 
340 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 			   struct tipc_link *uc_l,
342 			   struct sk_buff_head *xmitq)
343 {
344 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345 
346 	snd_l->ackers++;
347 	rcv_l->acked = snd_l->snd_nxt - 1;
348 	snd_l->state = LINK_ESTABLISHED;
349 	tipc_link_build_bc_init_msg(uc_l, xmitq);
350 }
351 
352 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 			      struct tipc_link *rcv_l,
354 			      struct sk_buff_head *xmitq)
355 {
356 	u16 ack = snd_l->snd_nxt - 1;
357 
358 	snd_l->ackers--;
359 	rcv_l->bc_peer_is_up = true;
360 	rcv_l->state = LINK_ESTABLISHED;
361 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 	tipc_link_reset(rcv_l);
363 	rcv_l->state = LINK_RESET;
364 	if (!snd_l->ackers) {
365 		tipc_link_reset(snd_l);
366 		snd_l->state = LINK_RESET;
367 		__skb_queue_purge(xmitq);
368 	}
369 }
370 
371 int tipc_link_bc_peers(struct tipc_link *l)
372 {
373 	return l->ackers;
374 }
375 
376 static u16 link_bc_rcv_gap(struct tipc_link *l)
377 {
378 	struct sk_buff *skb = skb_peek(&l->deferdq);
379 	u16 gap = 0;
380 
381 	if (more(l->snd_nxt, l->rcv_nxt))
382 		gap = l->snd_nxt - l->rcv_nxt;
383 	if (skb)
384 		gap = buf_seqno(skb) - l->rcv_nxt;
385 	return gap;
386 }
387 
388 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
389 {
390 	l->mtu = mtu;
391 }
392 
393 int tipc_link_mtu(struct tipc_link *l)
394 {
395 	return l->mtu;
396 }
397 
398 u16 tipc_link_rcv_nxt(struct tipc_link *l)
399 {
400 	return l->rcv_nxt;
401 }
402 
403 u16 tipc_link_acked(struct tipc_link *l)
404 {
405 	return l->acked;
406 }
407 
408 char *tipc_link_name(struct tipc_link *l)
409 {
410 	return l->name;
411 }
412 
413 u32 tipc_link_state(struct tipc_link *l)
414 {
415 	return l->state;
416 }
417 
418 /**
419  * tipc_link_create - create a new link
420  * @n: pointer to associated node
421  * @if_name: associated interface name
422  * @bearer_id: id (index) of associated bearer
423  * @tolerance: link tolerance to be used by link
424  * @net_plane: network plane (A,B,c..) this link belongs to
425  * @mtu: mtu to be advertised by link
426  * @priority: priority to be used by link
427  * @window: send window to be used by link
428  * @session: session to be used by link
429  * @ownnode: identity of own node
430  * @peer: node id of peer node
431  * @peer_caps: bitmap describing peer node capabilities
432  * @bc_sndlink: the namespace global link used for broadcast sending
433  * @bc_rcvlink: the peer specific link used for broadcast reception
434  * @inputq: queue to put messages ready for delivery
435  * @namedq: queue to put binding table update messages ready for delivery
436  * @link: return value, pointer to put the created link
437  *
438  * Returns true if link was created, otherwise false
439  */
440 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
441 		      int tolerance, char net_plane, u32 mtu, int priority,
442 		      int window, u32 session, u32 self,
443 		      u32 peer, u8 *peer_id, u16 peer_caps,
444 		      struct tipc_link *bc_sndlink,
445 		      struct tipc_link *bc_rcvlink,
446 		      struct sk_buff_head *inputq,
447 		      struct sk_buff_head *namedq,
448 		      struct tipc_link **link)
449 {
450 	char peer_str[NODE_ID_STR_LEN] = {0,};
451 	char self_str[NODE_ID_STR_LEN] = {0,};
452 	struct tipc_link *l;
453 
454 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
455 	if (!l)
456 		return false;
457 	*link = l;
458 	l->session = session;
459 
460 	/* Set link name for unicast links only */
461 	if (peer_id) {
462 		tipc_nodeid2string(self_str, tipc_own_id(net));
463 		if (strlen(self_str) > 16)
464 			sprintf(self_str, "%x", self);
465 		tipc_nodeid2string(peer_str, peer_id);
466 		if (strlen(peer_str) > 16)
467 			sprintf(peer_str, "%x", peer);
468 	}
469 	/* Peer i/f name will be completed by reset/activate message */
470 	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
471 		 self_str, if_name, peer_str);
472 
473 	strcpy(l->if_name, if_name);
474 	l->addr = peer;
475 	l->peer_caps = peer_caps;
476 	l->net = net;
477 	l->in_session = false;
478 	l->bearer_id = bearer_id;
479 	l->tolerance = tolerance;
480 	l->net_plane = net_plane;
481 	l->advertised_mtu = mtu;
482 	l->mtu = mtu;
483 	l->priority = priority;
484 	tipc_link_set_queue_limits(l, window);
485 	l->ackers = 1;
486 	l->bc_sndlink = bc_sndlink;
487 	l->bc_rcvlink = bc_rcvlink;
488 	l->inputq = inputq;
489 	l->namedq = namedq;
490 	l->state = LINK_RESETTING;
491 	__skb_queue_head_init(&l->transmq);
492 	__skb_queue_head_init(&l->backlogq);
493 	__skb_queue_head_init(&l->deferdq);
494 	skb_queue_head_init(&l->wakeupq);
495 	skb_queue_head_init(l->inputq);
496 	return true;
497 }
498 
499 /**
500  * tipc_link_bc_create - create new link to be used for broadcast
501  * @n: pointer to associated node
502  * @mtu: mtu to be used initially if no peers
503  * @window: send window to be used
504  * @inputq: queue to put messages ready for delivery
505  * @namedq: queue to put binding table update messages ready for delivery
506  * @link: return value, pointer to put the created link
507  *
508  * Returns true if link was created, otherwise false
509  */
510 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
511 			 int mtu, int window, u16 peer_caps,
512 			 struct sk_buff_head *inputq,
513 			 struct sk_buff_head *namedq,
514 			 struct tipc_link *bc_sndlink,
515 			 struct tipc_link **link)
516 {
517 	struct tipc_link *l;
518 
519 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
520 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
521 			      NULL, inputq, namedq, link))
522 		return false;
523 
524 	l = *link;
525 	strcpy(l->name, tipc_bclink_name);
526 	tipc_link_reset(l);
527 	l->state = LINK_RESET;
528 	l->ackers = 0;
529 	l->bc_rcvlink = l;
530 
531 	/* Broadcast send link is always up */
532 	if (link_is_bc_sndlink(l))
533 		l->state = LINK_ESTABLISHED;
534 
535 	/* Disable replicast if even a single peer doesn't support it */
536 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
537 		tipc_bcast_disable_rcast(net);
538 
539 	return true;
540 }
541 
542 /**
543  * tipc_link_fsm_evt - link finite state machine
544  * @l: pointer to link
545  * @evt: state machine event to be processed
546  */
547 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
548 {
549 	int rc = 0;
550 
551 	switch (l->state) {
552 	case LINK_RESETTING:
553 		switch (evt) {
554 		case LINK_PEER_RESET_EVT:
555 			l->state = LINK_PEER_RESET;
556 			break;
557 		case LINK_RESET_EVT:
558 			l->state = LINK_RESET;
559 			break;
560 		case LINK_FAILURE_EVT:
561 		case LINK_FAILOVER_BEGIN_EVT:
562 		case LINK_ESTABLISH_EVT:
563 		case LINK_FAILOVER_END_EVT:
564 		case LINK_SYNCH_BEGIN_EVT:
565 		case LINK_SYNCH_END_EVT:
566 		default:
567 			goto illegal_evt;
568 		}
569 		break;
570 	case LINK_RESET:
571 		switch (evt) {
572 		case LINK_PEER_RESET_EVT:
573 			l->state = LINK_ESTABLISHING;
574 			break;
575 		case LINK_FAILOVER_BEGIN_EVT:
576 			l->state = LINK_FAILINGOVER;
577 		case LINK_FAILURE_EVT:
578 		case LINK_RESET_EVT:
579 		case LINK_ESTABLISH_EVT:
580 		case LINK_FAILOVER_END_EVT:
581 			break;
582 		case LINK_SYNCH_BEGIN_EVT:
583 		case LINK_SYNCH_END_EVT:
584 		default:
585 			goto illegal_evt;
586 		}
587 		break;
588 	case LINK_PEER_RESET:
589 		switch (evt) {
590 		case LINK_RESET_EVT:
591 			l->state = LINK_ESTABLISHING;
592 			break;
593 		case LINK_PEER_RESET_EVT:
594 		case LINK_ESTABLISH_EVT:
595 		case LINK_FAILURE_EVT:
596 			break;
597 		case LINK_SYNCH_BEGIN_EVT:
598 		case LINK_SYNCH_END_EVT:
599 		case LINK_FAILOVER_BEGIN_EVT:
600 		case LINK_FAILOVER_END_EVT:
601 		default:
602 			goto illegal_evt;
603 		}
604 		break;
605 	case LINK_FAILINGOVER:
606 		switch (evt) {
607 		case LINK_FAILOVER_END_EVT:
608 			l->state = LINK_RESET;
609 			break;
610 		case LINK_PEER_RESET_EVT:
611 		case LINK_RESET_EVT:
612 		case LINK_ESTABLISH_EVT:
613 		case LINK_FAILURE_EVT:
614 			break;
615 		case LINK_FAILOVER_BEGIN_EVT:
616 		case LINK_SYNCH_BEGIN_EVT:
617 		case LINK_SYNCH_END_EVT:
618 		default:
619 			goto illegal_evt;
620 		}
621 		break;
622 	case LINK_ESTABLISHING:
623 		switch (evt) {
624 		case LINK_ESTABLISH_EVT:
625 			l->state = LINK_ESTABLISHED;
626 			break;
627 		case LINK_FAILOVER_BEGIN_EVT:
628 			l->state = LINK_FAILINGOVER;
629 			break;
630 		case LINK_RESET_EVT:
631 			l->state = LINK_RESET;
632 			break;
633 		case LINK_FAILURE_EVT:
634 		case LINK_PEER_RESET_EVT:
635 		case LINK_SYNCH_BEGIN_EVT:
636 		case LINK_FAILOVER_END_EVT:
637 			break;
638 		case LINK_SYNCH_END_EVT:
639 		default:
640 			goto illegal_evt;
641 		}
642 		break;
643 	case LINK_ESTABLISHED:
644 		switch (evt) {
645 		case LINK_PEER_RESET_EVT:
646 			l->state = LINK_PEER_RESET;
647 			rc |= TIPC_LINK_DOWN_EVT;
648 			break;
649 		case LINK_FAILURE_EVT:
650 			l->state = LINK_RESETTING;
651 			rc |= TIPC_LINK_DOWN_EVT;
652 			break;
653 		case LINK_RESET_EVT:
654 			l->state = LINK_RESET;
655 			break;
656 		case LINK_ESTABLISH_EVT:
657 		case LINK_SYNCH_END_EVT:
658 			break;
659 		case LINK_SYNCH_BEGIN_EVT:
660 			l->state = LINK_SYNCHING;
661 			break;
662 		case LINK_FAILOVER_BEGIN_EVT:
663 		case LINK_FAILOVER_END_EVT:
664 		default:
665 			goto illegal_evt;
666 		}
667 		break;
668 	case LINK_SYNCHING:
669 		switch (evt) {
670 		case LINK_PEER_RESET_EVT:
671 			l->state = LINK_PEER_RESET;
672 			rc |= TIPC_LINK_DOWN_EVT;
673 			break;
674 		case LINK_FAILURE_EVT:
675 			l->state = LINK_RESETTING;
676 			rc |= TIPC_LINK_DOWN_EVT;
677 			break;
678 		case LINK_RESET_EVT:
679 			l->state = LINK_RESET;
680 			break;
681 		case LINK_ESTABLISH_EVT:
682 		case LINK_SYNCH_BEGIN_EVT:
683 			break;
684 		case LINK_SYNCH_END_EVT:
685 			l->state = LINK_ESTABLISHED;
686 			break;
687 		case LINK_FAILOVER_BEGIN_EVT:
688 		case LINK_FAILOVER_END_EVT:
689 		default:
690 			goto illegal_evt;
691 		}
692 		break;
693 	default:
694 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
695 	}
696 	return rc;
697 illegal_evt:
698 	pr_err("Illegal FSM event %x in state %x on link %s\n",
699 	       evt, l->state, l->name);
700 	return rc;
701 }
702 
703 /* link_profile_stats - update statistical profiling of traffic
704  */
705 static void link_profile_stats(struct tipc_link *l)
706 {
707 	struct sk_buff *skb;
708 	struct tipc_msg *msg;
709 	int length;
710 
711 	/* Update counters used in statistical profiling of send traffic */
712 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
713 	l->stats.queue_sz_counts++;
714 
715 	skb = skb_peek(&l->transmq);
716 	if (!skb)
717 		return;
718 	msg = buf_msg(skb);
719 	length = msg_size(msg);
720 
721 	if (msg_user(msg) == MSG_FRAGMENTER) {
722 		if (msg_type(msg) != FIRST_FRAGMENT)
723 			return;
724 		length = msg_size(msg_get_wrapped(msg));
725 	}
726 	l->stats.msg_lengths_total += length;
727 	l->stats.msg_length_counts++;
728 	if (length <= 64)
729 		l->stats.msg_length_profile[0]++;
730 	else if (length <= 256)
731 		l->stats.msg_length_profile[1]++;
732 	else if (length <= 1024)
733 		l->stats.msg_length_profile[2]++;
734 	else if (length <= 4096)
735 		l->stats.msg_length_profile[3]++;
736 	else if (length <= 16384)
737 		l->stats.msg_length_profile[4]++;
738 	else if (length <= 32768)
739 		l->stats.msg_length_profile[5]++;
740 	else
741 		l->stats.msg_length_profile[6]++;
742 }
743 
744 /* tipc_link_timeout - perform periodic task as instructed from node timeout
745  */
746 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
747 {
748 	int mtyp = 0;
749 	int rc = 0;
750 	bool state = false;
751 	bool probe = false;
752 	bool setup = false;
753 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
754 	u16 bc_acked = l->bc_rcvlink->acked;
755 	struct tipc_mon_state *mstate = &l->mon_state;
756 
757 	switch (l->state) {
758 	case LINK_ESTABLISHED:
759 	case LINK_SYNCHING:
760 		mtyp = STATE_MSG;
761 		link_profile_stats(l);
762 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
763 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
764 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
765 		state = bc_acked != bc_snt;
766 		state |= l->bc_rcvlink->rcv_unacked;
767 		state |= l->rcv_unacked;
768 		state |= !skb_queue_empty(&l->transmq);
769 		state |= !skb_queue_empty(&l->deferdq);
770 		probe = mstate->probing;
771 		probe |= l->silent_intv_cnt;
772 		if (probe || mstate->monitoring)
773 			l->silent_intv_cnt++;
774 		break;
775 	case LINK_RESET:
776 		setup = l->rst_cnt++ <= 4;
777 		setup |= !(l->rst_cnt % 16);
778 		mtyp = RESET_MSG;
779 		break;
780 	case LINK_ESTABLISHING:
781 		setup = true;
782 		mtyp = ACTIVATE_MSG;
783 		break;
784 	case LINK_PEER_RESET:
785 	case LINK_RESETTING:
786 	case LINK_FAILINGOVER:
787 		break;
788 	default:
789 		break;
790 	}
791 
792 	if (state || probe || setup)
793 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
794 
795 	return rc;
796 }
797 
798 /**
799  * link_schedule_user - schedule a message sender for wakeup after congestion
800  * @l: congested link
801  * @hdr: header of message that is being sent
802  * Create pseudo msg to send back to user when congestion abates
803  */
804 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
805 {
806 	u32 dnode = tipc_own_addr(l->net);
807 	u32 dport = msg_origport(hdr);
808 	struct sk_buff *skb;
809 
810 	/* Create and schedule wakeup pseudo message */
811 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
812 			      dnode, l->addr, dport, 0, 0);
813 	if (!skb)
814 		return -ENOBUFS;
815 	msg_set_dest_droppable(buf_msg(skb), true);
816 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
817 	skb_queue_tail(&l->wakeupq, skb);
818 	l->stats.link_congs++;
819 	return -ELINKCONG;
820 }
821 
822 /**
823  * link_prepare_wakeup - prepare users for wakeup after congestion
824  * @l: congested link
825  * Wake up a number of waiting users, as permitted by available space
826  * in the send queue
827  */
828 static void link_prepare_wakeup(struct tipc_link *l)
829 {
830 	struct sk_buff *skb, *tmp;
831 	int imp, i = 0;
832 
833 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
834 		imp = TIPC_SKB_CB(skb)->chain_imp;
835 		if (l->backlog[imp].len < l->backlog[imp].limit) {
836 			skb_unlink(skb, &l->wakeupq);
837 			skb_queue_tail(l->inputq, skb);
838 		} else if (i++ > 10) {
839 			break;
840 		}
841 	}
842 }
843 
844 void tipc_link_reset(struct tipc_link *l)
845 {
846 	l->in_session = false;
847 	l->session++;
848 	l->mtu = l->advertised_mtu;
849 	spin_lock_bh(&l->wakeupq.lock);
850 	spin_lock_bh(&l->inputq->lock);
851 	skb_queue_splice_init(&l->wakeupq, l->inputq);
852 	spin_unlock_bh(&l->inputq->lock);
853 	spin_unlock_bh(&l->wakeupq.lock);
854 
855 	__skb_queue_purge(&l->transmq);
856 	__skb_queue_purge(&l->deferdq);
857 	__skb_queue_purge(&l->backlogq);
858 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
859 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
860 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
861 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
862 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
863 	kfree_skb(l->reasm_buf);
864 	kfree_skb(l->failover_reasm_skb);
865 	l->reasm_buf = NULL;
866 	l->failover_reasm_skb = NULL;
867 	l->rcv_unacked = 0;
868 	l->snd_nxt = 1;
869 	l->rcv_nxt = 1;
870 	l->snd_nxt_state = 1;
871 	l->rcv_nxt_state = 1;
872 	l->acked = 0;
873 	l->silent_intv_cnt = 0;
874 	l->rst_cnt = 0;
875 	l->stale_cnt = 0;
876 	l->bc_peer_is_up = false;
877 	memset(&l->mon_state, 0, sizeof(l->mon_state));
878 	tipc_link_reset_stats(l);
879 }
880 
881 /**
882  * tipc_link_xmit(): enqueue buffer list according to queue situation
883  * @link: link to use
884  * @list: chain of buffers containing message
885  * @xmitq: returned list of packets to be sent by caller
886  *
887  * Consumes the buffer chain.
888  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
889  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
890  */
891 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
892 		   struct sk_buff_head *xmitq)
893 {
894 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
895 	unsigned int maxwin = l->window;
896 	int imp = msg_importance(hdr);
897 	unsigned int mtu = l->mtu;
898 	u16 ack = l->rcv_nxt - 1;
899 	u16 seqno = l->snd_nxt;
900 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
901 	struct sk_buff_head *transmq = &l->transmq;
902 	struct sk_buff_head *backlogq = &l->backlogq;
903 	struct sk_buff *skb, *_skb, *bskb;
904 	int pkt_cnt = skb_queue_len(list);
905 	int rc = 0;
906 
907 	if (unlikely(msg_size(hdr) > mtu)) {
908 		skb_queue_purge(list);
909 		return -EMSGSIZE;
910 	}
911 
912 	/* Allow oversubscription of one data msg per source at congestion */
913 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
914 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
915 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
916 			return -ENOBUFS;
917 		}
918 		rc = link_schedule_user(l, hdr);
919 	}
920 
921 	if (pkt_cnt > 1) {
922 		l->stats.sent_fragmented++;
923 		l->stats.sent_fragments += pkt_cnt;
924 	}
925 
926 	/* Prepare each packet for sending, and add to relevant queue: */
927 	while (skb_queue_len(list)) {
928 		skb = skb_peek(list);
929 		hdr = buf_msg(skb);
930 		msg_set_seqno(hdr, seqno);
931 		msg_set_ack(hdr, ack);
932 		msg_set_bcast_ack(hdr, bc_ack);
933 
934 		if (likely(skb_queue_len(transmq) < maxwin)) {
935 			_skb = skb_clone(skb, GFP_ATOMIC);
936 			if (!_skb) {
937 				skb_queue_purge(list);
938 				return -ENOBUFS;
939 			}
940 			__skb_dequeue(list);
941 			__skb_queue_tail(transmq, skb);
942 			__skb_queue_tail(xmitq, _skb);
943 			TIPC_SKB_CB(skb)->ackers = l->ackers;
944 			l->rcv_unacked = 0;
945 			l->stats.sent_pkts++;
946 			seqno++;
947 			continue;
948 		}
949 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
950 			kfree_skb(__skb_dequeue(list));
951 			l->stats.sent_bundled++;
952 			continue;
953 		}
954 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
955 			kfree_skb(__skb_dequeue(list));
956 			__skb_queue_tail(backlogq, bskb);
957 			l->backlog[msg_importance(buf_msg(bskb))].len++;
958 			l->stats.sent_bundled++;
959 			l->stats.sent_bundles++;
960 			continue;
961 		}
962 		l->backlog[imp].len += skb_queue_len(list);
963 		skb_queue_splice_tail_init(list, backlogq);
964 	}
965 	l->snd_nxt = seqno;
966 	return rc;
967 }
968 
969 static void tipc_link_advance_backlog(struct tipc_link *l,
970 				      struct sk_buff_head *xmitq)
971 {
972 	struct sk_buff *skb, *_skb;
973 	struct tipc_msg *hdr;
974 	u16 seqno = l->snd_nxt;
975 	u16 ack = l->rcv_nxt - 1;
976 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
977 
978 	while (skb_queue_len(&l->transmq) < l->window) {
979 		skb = skb_peek(&l->backlogq);
980 		if (!skb)
981 			break;
982 		_skb = skb_clone(skb, GFP_ATOMIC);
983 		if (!_skb)
984 			break;
985 		__skb_dequeue(&l->backlogq);
986 		hdr = buf_msg(skb);
987 		l->backlog[msg_importance(hdr)].len--;
988 		__skb_queue_tail(&l->transmq, skb);
989 		__skb_queue_tail(xmitq, _skb);
990 		TIPC_SKB_CB(skb)->ackers = l->ackers;
991 		msg_set_seqno(hdr, seqno);
992 		msg_set_ack(hdr, ack);
993 		msg_set_bcast_ack(hdr, bc_ack);
994 		l->rcv_unacked = 0;
995 		l->stats.sent_pkts++;
996 		seqno++;
997 	}
998 	l->snd_nxt = seqno;
999 }
1000 
1001 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
1002 {
1003 	struct tipc_msg *hdr = buf_msg(skb);
1004 
1005 	pr_warn("Retransmission failure on link <%s>\n", l->name);
1006 	link_print(l, "State of link ");
1007 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1008 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1009 	pr_info("sqno %u, prev: %x, src: %x\n",
1010 		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1011 }
1012 
1013 /* tipc_link_retrans() - retransmit one or more packets
1014  * @l: the link to transmit on
1015  * @r: the receiving link ordering the retransmit. Same as l if unicast
1016  * @from: retransmit from (inclusive) this sequence number
1017  * @to: retransmit to (inclusive) this sequence number
1018  * xmitq: queue for accumulating the retransmitted packets
1019  */
1020 static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1021 			     u16 from, u16 to, struct sk_buff_head *xmitq)
1022 {
1023 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1024 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1025 	u16 ack = l->rcv_nxt - 1;
1026 	struct tipc_msg *hdr;
1027 
1028 	if (!skb)
1029 		return 0;
1030 
1031 	/* Detect repeated retransmit failures on same packet */
1032 	if (r->last_retransm != buf_seqno(skb)) {
1033 		r->last_retransm = buf_seqno(skb);
1034 		r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
1035 	} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1036 		link_retransmit_failure(l, skb);
1037 		if (link_is_bc_sndlink(l))
1038 			return TIPC_LINK_DOWN_EVT;
1039 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1040 	}
1041 
1042 	skb_queue_walk(&l->transmq, skb) {
1043 		hdr = buf_msg(skb);
1044 		if (less(msg_seqno(hdr), from))
1045 			continue;
1046 		if (more(msg_seqno(hdr), to))
1047 			break;
1048 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1049 		if (!_skb)
1050 			return 0;
1051 		hdr = buf_msg(_skb);
1052 		msg_set_ack(hdr, ack);
1053 		msg_set_bcast_ack(hdr, bc_ack);
1054 		_skb->priority = TC_PRIO_CONTROL;
1055 		__skb_queue_tail(xmitq, _skb);
1056 		l->stats.retransmitted++;
1057 	}
1058 	return 0;
1059 }
1060 
1061 /* tipc_data_input - deliver data and name distr msgs to upper layer
1062  *
1063  * Consumes buffer if message is of right type
1064  * Node lock must be held
1065  */
1066 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1067 			    struct sk_buff_head *inputq)
1068 {
1069 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1070 	struct tipc_msg *hdr = buf_msg(skb);
1071 
1072 	switch (msg_user(hdr)) {
1073 	case TIPC_LOW_IMPORTANCE:
1074 	case TIPC_MEDIUM_IMPORTANCE:
1075 	case TIPC_HIGH_IMPORTANCE:
1076 	case TIPC_CRITICAL_IMPORTANCE:
1077 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1078 			skb_queue_tail(mc_inputq, skb);
1079 			return true;
1080 		}
1081 		/* else: fall through */
1082 	case CONN_MANAGER:
1083 		skb_queue_tail(inputq, skb);
1084 		return true;
1085 	case GROUP_PROTOCOL:
1086 		skb_queue_tail(mc_inputq, skb);
1087 		return true;
1088 	case NAME_DISTRIBUTOR:
1089 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1090 		skb_queue_tail(l->namedq, skb);
1091 		return true;
1092 	case MSG_BUNDLER:
1093 	case TUNNEL_PROTOCOL:
1094 	case MSG_FRAGMENTER:
1095 	case BCAST_PROTOCOL:
1096 		return false;
1097 	default:
1098 		pr_warn("Dropping received illegal msg type\n");
1099 		kfree_skb(skb);
1100 		return false;
1101 	};
1102 }
1103 
1104 /* tipc_link_input - process packet that has passed link protocol check
1105  *
1106  * Consumes buffer
1107  */
1108 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1109 			   struct sk_buff_head *inputq)
1110 {
1111 	struct tipc_msg *hdr = buf_msg(skb);
1112 	struct sk_buff **reasm_skb = &l->reasm_buf;
1113 	struct sk_buff *iskb;
1114 	struct sk_buff_head tmpq;
1115 	int usr = msg_user(hdr);
1116 	int rc = 0;
1117 	int pos = 0;
1118 	int ipos = 0;
1119 
1120 	if (unlikely(usr == TUNNEL_PROTOCOL)) {
1121 		if (msg_type(hdr) == SYNCH_MSG) {
1122 			__skb_queue_purge(&l->deferdq);
1123 			goto drop;
1124 		}
1125 		if (!tipc_msg_extract(skb, &iskb, &ipos))
1126 			return rc;
1127 		kfree_skb(skb);
1128 		skb = iskb;
1129 		hdr = buf_msg(skb);
1130 		if (less(msg_seqno(hdr), l->drop_point))
1131 			goto drop;
1132 		if (tipc_data_input(l, skb, inputq))
1133 			return rc;
1134 		usr = msg_user(hdr);
1135 		reasm_skb = &l->failover_reasm_skb;
1136 	}
1137 
1138 	if (usr == MSG_BUNDLER) {
1139 		skb_queue_head_init(&tmpq);
1140 		l->stats.recv_bundles++;
1141 		l->stats.recv_bundled += msg_msgcnt(hdr);
1142 		while (tipc_msg_extract(skb, &iskb, &pos))
1143 			tipc_data_input(l, iskb, &tmpq);
1144 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1145 		return 0;
1146 	} else if (usr == MSG_FRAGMENTER) {
1147 		l->stats.recv_fragments++;
1148 		if (tipc_buf_append(reasm_skb, &skb)) {
1149 			l->stats.recv_fragmented++;
1150 			tipc_data_input(l, skb, inputq);
1151 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1152 			pr_warn_ratelimited("Unable to build fragment list\n");
1153 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1154 		}
1155 		return 0;
1156 	} else if (usr == BCAST_PROTOCOL) {
1157 		tipc_bcast_lock(l->net);
1158 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1159 		tipc_bcast_unlock(l->net);
1160 	}
1161 drop:
1162 	kfree_skb(skb);
1163 	return 0;
1164 }
1165 
1166 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1167 {
1168 	bool released = false;
1169 	struct sk_buff *skb, *tmp;
1170 
1171 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1172 		if (more(buf_seqno(skb), acked))
1173 			break;
1174 		__skb_unlink(skb, &l->transmq);
1175 		kfree_skb(skb);
1176 		released = true;
1177 	}
1178 	return released;
1179 }
1180 
1181 /* tipc_link_build_state_msg: prepare link state message for transmission
1182  *
1183  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1184  * risk of ack storms towards the sender
1185  */
1186 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1187 {
1188 	if (!l)
1189 		return 0;
1190 
1191 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1192 	if (link_is_bc_rcvlink(l)) {
1193 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1194 			return 0;
1195 		l->rcv_unacked = 0;
1196 
1197 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1198 		l->snd_nxt = l->rcv_nxt;
1199 		return TIPC_LINK_SND_STATE;
1200 	}
1201 
1202 	/* Unicast ACK */
1203 	l->rcv_unacked = 0;
1204 	l->stats.sent_acks++;
1205 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1206 	return 0;
1207 }
1208 
1209 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1210  */
1211 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1212 {
1213 	int mtyp = RESET_MSG;
1214 	struct sk_buff *skb;
1215 
1216 	if (l->state == LINK_ESTABLISHING)
1217 		mtyp = ACTIVATE_MSG;
1218 
1219 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1220 
1221 	/* Inform peer that this endpoint is going down if applicable */
1222 	skb = skb_peek_tail(xmitq);
1223 	if (skb && (l->state == LINK_RESET))
1224 		msg_set_peer_stopping(buf_msg(skb), 1);
1225 }
1226 
1227 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1228  * Note that sending of broadcast NACK is coordinated among nodes, to
1229  * reduce the risk of NACK storms towards the sender
1230  */
1231 static int tipc_link_build_nack_msg(struct tipc_link *l,
1232 				    struct sk_buff_head *xmitq)
1233 {
1234 	u32 def_cnt = ++l->stats.deferred_recv;
1235 	int match1, match2;
1236 
1237 	if (link_is_bc_rcvlink(l)) {
1238 		match1 = def_cnt & 0xf;
1239 		match2 = tipc_own_addr(l->net) & 0xf;
1240 		if (match1 == match2)
1241 			return TIPC_LINK_SND_STATE;
1242 		return 0;
1243 	}
1244 
1245 	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1246 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1247 	return 0;
1248 }
1249 
1250 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1251  * @l: the link that should handle the message
1252  * @skb: TIPC packet
1253  * @xmitq: queue to place packets to be sent after this call
1254  */
1255 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1256 		  struct sk_buff_head *xmitq)
1257 {
1258 	struct sk_buff_head *defq = &l->deferdq;
1259 	struct tipc_msg *hdr;
1260 	u16 seqno, rcv_nxt, win_lim;
1261 	int rc = 0;
1262 
1263 	do {
1264 		hdr = buf_msg(skb);
1265 		seqno = msg_seqno(hdr);
1266 		rcv_nxt = l->rcv_nxt;
1267 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1268 
1269 		/* Verify and update link state */
1270 		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1271 			return tipc_link_proto_rcv(l, skb, xmitq);
1272 
1273 		if (unlikely(!link_is_up(l))) {
1274 			if (l->state == LINK_ESTABLISHING)
1275 				rc = TIPC_LINK_UP_EVT;
1276 			goto drop;
1277 		}
1278 
1279 		/* Don't send probe at next timeout expiration */
1280 		l->silent_intv_cnt = 0;
1281 
1282 		/* Drop if outside receive window */
1283 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1284 			l->stats.duplicates++;
1285 			goto drop;
1286 		}
1287 
1288 		/* Forward queues and wake up waiting users */
1289 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1290 			l->stale_cnt = 0;
1291 			tipc_link_advance_backlog(l, xmitq);
1292 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1293 				link_prepare_wakeup(l);
1294 		}
1295 
1296 		/* Defer delivery if sequence gap */
1297 		if (unlikely(seqno != rcv_nxt)) {
1298 			__tipc_skb_queue_sorted(defq, seqno, skb);
1299 			rc |= tipc_link_build_nack_msg(l, xmitq);
1300 			break;
1301 		}
1302 
1303 		/* Deliver packet */
1304 		l->rcv_nxt++;
1305 		l->stats.recv_pkts++;
1306 		if (!tipc_data_input(l, skb, l->inputq))
1307 			rc |= tipc_link_input(l, skb, l->inputq);
1308 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1309 			rc |= tipc_link_build_state_msg(l, xmitq);
1310 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1311 			break;
1312 	} while ((skb = __skb_dequeue(defq)));
1313 
1314 	return rc;
1315 drop:
1316 	kfree_skb(skb);
1317 	return rc;
1318 }
1319 
1320 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1321 				      bool probe_reply, u16 rcvgap,
1322 				      int tolerance, int priority,
1323 				      struct sk_buff_head *xmitq)
1324 {
1325 	struct tipc_link *bcl = l->bc_rcvlink;
1326 	struct sk_buff *skb;
1327 	struct tipc_msg *hdr;
1328 	struct sk_buff_head *dfq = &l->deferdq;
1329 	bool node_up = link_is_up(bcl);
1330 	struct tipc_mon_state *mstate = &l->mon_state;
1331 	int dlen = 0;
1332 	void *data;
1333 
1334 	/* Don't send protocol message during reset or link failover */
1335 	if (tipc_link_is_blocked(l))
1336 		return;
1337 
1338 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1339 		return;
1340 
1341 	if (!skb_queue_empty(dfq))
1342 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1343 
1344 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1345 			      tipc_max_domain_size, l->addr,
1346 			      tipc_own_addr(l->net), 0, 0, 0);
1347 	if (!skb)
1348 		return;
1349 
1350 	hdr = buf_msg(skb);
1351 	data = msg_data(hdr);
1352 	msg_set_session(hdr, l->session);
1353 	msg_set_bearer_id(hdr, l->bearer_id);
1354 	msg_set_net_plane(hdr, l->net_plane);
1355 	msg_set_next_sent(hdr, l->snd_nxt);
1356 	msg_set_ack(hdr, l->rcv_nxt - 1);
1357 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1358 	msg_set_bc_ack_invalid(hdr, !node_up);
1359 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1360 	msg_set_link_tolerance(hdr, tolerance);
1361 	msg_set_linkprio(hdr, priority);
1362 	msg_set_redundant_link(hdr, node_up);
1363 	msg_set_seq_gap(hdr, 0);
1364 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1365 
1366 	if (mtyp == STATE_MSG) {
1367 		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1368 			msg_set_seqno(hdr, l->snd_nxt_state++);
1369 		msg_set_seq_gap(hdr, rcvgap);
1370 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1371 		msg_set_probe(hdr, probe);
1372 		msg_set_is_keepalive(hdr, probe || probe_reply);
1373 		tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1374 		msg_set_size(hdr, INT_H_SIZE + dlen);
1375 		skb_trim(skb, INT_H_SIZE + dlen);
1376 		l->stats.sent_states++;
1377 		l->rcv_unacked = 0;
1378 	} else {
1379 		/* RESET_MSG or ACTIVATE_MSG */
1380 		msg_set_max_pkt(hdr, l->advertised_mtu);
1381 		strcpy(data, l->if_name);
1382 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1383 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1384 	}
1385 	if (probe)
1386 		l->stats.sent_probes++;
1387 	if (rcvgap)
1388 		l->stats.sent_nacks++;
1389 	skb->priority = TC_PRIO_CONTROL;
1390 	__skb_queue_tail(xmitq, skb);
1391 }
1392 
1393 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1394 				    struct sk_buff_head *xmitq)
1395 {
1396 	u32 onode = tipc_own_addr(l->net);
1397 	struct tipc_msg *hdr, *ihdr;
1398 	struct sk_buff_head tnlq;
1399 	struct sk_buff *skb;
1400 	u32 dnode = l->addr;
1401 
1402 	skb_queue_head_init(&tnlq);
1403 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1404 			      INT_H_SIZE, BASIC_H_SIZE,
1405 			      dnode, onode, 0, 0, 0);
1406 	if (!skb) {
1407 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1408 		return;
1409 	}
1410 
1411 	hdr = buf_msg(skb);
1412 	msg_set_msgcnt(hdr, 1);
1413 	msg_set_bearer_id(hdr, l->peer_bearer_id);
1414 
1415 	ihdr = (struct tipc_msg *)msg_data(hdr);
1416 	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1417 		      BASIC_H_SIZE, dnode);
1418 	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1419 	__skb_queue_tail(&tnlq, skb);
1420 	tipc_link_xmit(l, &tnlq, xmitq);
1421 }
1422 
1423 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1424  * with contents of the link's transmit and backlog queues.
1425  */
1426 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1427 			   int mtyp, struct sk_buff_head *xmitq)
1428 {
1429 	struct sk_buff *skb, *tnlskb;
1430 	struct tipc_msg *hdr, tnlhdr;
1431 	struct sk_buff_head *queue = &l->transmq;
1432 	struct sk_buff_head tmpxq, tnlq;
1433 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1434 
1435 	if (!tnl)
1436 		return;
1437 
1438 	skb_queue_head_init(&tnlq);
1439 	skb_queue_head_init(&tmpxq);
1440 
1441 	/* At least one packet required for safe algorithm => add dummy */
1442 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1443 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1444 			      0, 0, TIPC_ERR_NO_PORT);
1445 	if (!skb) {
1446 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1447 		return;
1448 	}
1449 	skb_queue_tail(&tnlq, skb);
1450 	tipc_link_xmit(l, &tnlq, &tmpxq);
1451 	__skb_queue_purge(&tmpxq);
1452 
1453 	/* Initialize reusable tunnel packet header */
1454 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1455 		      mtyp, INT_H_SIZE, l->addr);
1456 	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1457 	msg_set_msgcnt(&tnlhdr, pktcnt);
1458 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1459 tnl:
1460 	/* Wrap each packet into a tunnel packet */
1461 	skb_queue_walk(queue, skb) {
1462 		hdr = buf_msg(skb);
1463 		if (queue == &l->backlogq)
1464 			msg_set_seqno(hdr, seqno++);
1465 		pktlen = msg_size(hdr);
1466 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1467 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1468 		if (!tnlskb) {
1469 			pr_warn("%sunable to send packet\n", link_co_err);
1470 			return;
1471 		}
1472 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1473 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1474 		__skb_queue_tail(&tnlq, tnlskb);
1475 	}
1476 	if (queue != &l->backlogq) {
1477 		queue = &l->backlogq;
1478 		goto tnl;
1479 	}
1480 
1481 	tipc_link_xmit(tnl, &tnlq, xmitq);
1482 
1483 	if (mtyp == FAILOVER_MSG) {
1484 		tnl->drop_point = l->rcv_nxt;
1485 		tnl->failover_reasm_skb = l->reasm_buf;
1486 		l->reasm_buf = NULL;
1487 	}
1488 }
1489 
1490 /* tipc_link_validate_msg(): validate message against current link state
1491  * Returns true if message should be accepted, otherwise false
1492  */
1493 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1494 {
1495 	u16 curr_session = l->peer_session;
1496 	u16 session = msg_session(hdr);
1497 	int mtyp = msg_type(hdr);
1498 
1499 	if (msg_user(hdr) != LINK_PROTOCOL)
1500 		return true;
1501 
1502 	switch (mtyp) {
1503 	case RESET_MSG:
1504 		if (!l->in_session)
1505 			return true;
1506 		/* Accept only RESET with new session number */
1507 		return more(session, curr_session);
1508 	case ACTIVATE_MSG:
1509 		if (!l->in_session)
1510 			return true;
1511 		/* Accept only ACTIVATE with new or current session number */
1512 		return !less(session, curr_session);
1513 	case STATE_MSG:
1514 		/* Accept only STATE with current session number */
1515 		if (!l->in_session)
1516 			return false;
1517 		if (session != curr_session)
1518 			return false;
1519 		/* Extra sanity check */
1520 		if (!link_is_up(l) && msg_ack(hdr))
1521 			return false;
1522 		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1523 			return true;
1524 		/* Accept only STATE with new sequence number */
1525 		return !less(msg_seqno(hdr), l->rcv_nxt_state);
1526 	default:
1527 		return false;
1528 	}
1529 }
1530 
1531 /* tipc_link_proto_rcv(): receive link level protocol message :
1532  * Note that network plane id propagates through the network, and may
1533  * change at any time. The node with lowest numerical id determines
1534  * network plane
1535  */
1536 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1537 			       struct sk_buff_head *xmitq)
1538 {
1539 	struct tipc_msg *hdr = buf_msg(skb);
1540 	u16 rcvgap = 0;
1541 	u16 ack = msg_ack(hdr);
1542 	u16 gap = msg_seq_gap(hdr);
1543 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1544 	u16 peers_tol = msg_link_tolerance(hdr);
1545 	u16 peers_prio = msg_linkprio(hdr);
1546 	u16 rcv_nxt = l->rcv_nxt;
1547 	u16 dlen = msg_data_sz(hdr);
1548 	int mtyp = msg_type(hdr);
1549 	bool reply = msg_probe(hdr);
1550 	void *data;
1551 	char *if_name;
1552 	int rc = 0;
1553 
1554 	if (tipc_link_is_blocked(l) || !xmitq)
1555 		goto exit;
1556 
1557 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1558 		l->net_plane = msg_net_plane(hdr);
1559 
1560 	skb_linearize(skb);
1561 	hdr = buf_msg(skb);
1562 	data = msg_data(hdr);
1563 
1564 	if (!tipc_link_validate_msg(l, hdr))
1565 		goto exit;
1566 
1567 	switch (mtyp) {
1568 	case RESET_MSG:
1569 	case ACTIVATE_MSG:
1570 		/* Complete own link name with peer's interface name */
1571 		if_name =  strrchr(l->name, ':') + 1;
1572 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1573 			break;
1574 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1575 			break;
1576 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1577 
1578 		/* Update own tolerance if peer indicates a non-zero value */
1579 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1580 			l->tolerance = peers_tol;
1581 
1582 		/* Update own priority if peer's priority is higher */
1583 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1584 			l->priority = peers_prio;
1585 
1586 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1587 		if (msg_peer_stopping(hdr))
1588 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1589 		else if ((mtyp == RESET_MSG) || !link_is_up(l))
1590 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1591 
1592 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1593 		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1594 			rc = TIPC_LINK_UP_EVT;
1595 
1596 		l->peer_session = msg_session(hdr);
1597 		l->in_session = true;
1598 		l->peer_bearer_id = msg_bearer_id(hdr);
1599 		if (l->mtu > msg_max_pkt(hdr))
1600 			l->mtu = msg_max_pkt(hdr);
1601 		break;
1602 
1603 	case STATE_MSG:
1604 		l->rcv_nxt_state = msg_seqno(hdr) + 1;
1605 
1606 		/* Update own tolerance if peer indicates a non-zero value */
1607 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1608 			l->tolerance = peers_tol;
1609 
1610 		/* Update own prio if peer indicates a different value */
1611 		if ((peers_prio != l->priority) &&
1612 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1613 			l->priority = peers_prio;
1614 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1615 		}
1616 
1617 		l->silent_intv_cnt = 0;
1618 		l->stats.recv_states++;
1619 		if (msg_probe(hdr))
1620 			l->stats.recv_probes++;
1621 
1622 		if (!link_is_up(l)) {
1623 			if (l->state == LINK_ESTABLISHING)
1624 				rc = TIPC_LINK_UP_EVT;
1625 			break;
1626 		}
1627 		tipc_mon_rcv(l->net, data, dlen, l->addr,
1628 			     &l->mon_state, l->bearer_id);
1629 
1630 		/* Send NACK if peer has sent pkts we haven't received yet */
1631 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1632 			rcvgap = peers_snd_nxt - l->rcv_nxt;
1633 		if (rcvgap || reply)
1634 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1635 						  rcvgap, 0, 0, xmitq);
1636 		tipc_link_release_pkts(l, ack);
1637 
1638 		/* If NACK, retransmit will now start at right position */
1639 		if (gap) {
1640 			rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
1641 			l->stats.recv_nacks++;
1642 		}
1643 
1644 		tipc_link_advance_backlog(l, xmitq);
1645 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1646 			link_prepare_wakeup(l);
1647 	}
1648 exit:
1649 	kfree_skb(skb);
1650 	return rc;
1651 }
1652 
1653 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1654  */
1655 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1656 					 u16 peers_snd_nxt,
1657 					 struct sk_buff_head *xmitq)
1658 {
1659 	struct sk_buff *skb;
1660 	struct tipc_msg *hdr;
1661 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1662 	u16 ack = l->rcv_nxt - 1;
1663 	u16 gap_to = peers_snd_nxt - 1;
1664 
1665 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1666 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1667 	if (!skb)
1668 		return false;
1669 	hdr = buf_msg(skb);
1670 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1671 	msg_set_bcast_ack(hdr, ack);
1672 	msg_set_bcgap_after(hdr, ack);
1673 	if (dfrd_skb)
1674 		gap_to = buf_seqno(dfrd_skb) - 1;
1675 	msg_set_bcgap_to(hdr, gap_to);
1676 	msg_set_non_seq(hdr, bcast);
1677 	__skb_queue_tail(xmitq, skb);
1678 	return true;
1679 }
1680 
1681 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1682  *
1683  * Give a newly added peer node the sequence number where it should
1684  * start receiving and acking broadcast packets.
1685  */
1686 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1687 					struct sk_buff_head *xmitq)
1688 {
1689 	struct sk_buff_head list;
1690 
1691 	__skb_queue_head_init(&list);
1692 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1693 		return;
1694 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1695 	tipc_link_xmit(l, &list, xmitq);
1696 }
1697 
1698 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1699  */
1700 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1701 {
1702 	int mtyp = msg_type(hdr);
1703 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1704 
1705 	if (link_is_up(l))
1706 		return;
1707 
1708 	if (msg_user(hdr) == BCAST_PROTOCOL) {
1709 		l->rcv_nxt = peers_snd_nxt;
1710 		l->state = LINK_ESTABLISHED;
1711 		return;
1712 	}
1713 
1714 	if (l->peer_caps & TIPC_BCAST_SYNCH)
1715 		return;
1716 
1717 	if (msg_peer_node_is_up(hdr))
1718 		return;
1719 
1720 	/* Compatibility: accept older, less safe initial synch data */
1721 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1722 		l->rcv_nxt = peers_snd_nxt;
1723 }
1724 
1725 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1726  * - Adjust permitted range if there is overlap with previous retransmission
1727  */
1728 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1729 {
1730 	unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1731 
1732 	if (less(*to, *from))
1733 		return false;
1734 
1735 	/* New retransmission request */
1736 	if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1737 	    less(*to, l->prev_from) || more(*from, l->prev_to)) {
1738 		l->prev_from = *from;
1739 		l->prev_to = *to;
1740 		l->prev_retr = jiffies;
1741 		return true;
1742 	}
1743 
1744 	/* Inside range of previous retransmit */
1745 	if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1746 		return false;
1747 
1748 	/* Fully or partially outside previous range => exclude overlap */
1749 	if (less(*from, l->prev_from)) {
1750 		*to = l->prev_from - 1;
1751 		l->prev_from = *from;
1752 	}
1753 	if (more(*to, l->prev_to)) {
1754 		*from = l->prev_to + 1;
1755 		l->prev_to = *to;
1756 	}
1757 	l->prev_retr = jiffies;
1758 	return true;
1759 }
1760 
1761 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1762  */
1763 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1764 			  struct sk_buff_head *xmitq)
1765 {
1766 	struct tipc_link *snd_l = l->bc_sndlink;
1767 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1768 	u16 from = msg_bcast_ack(hdr) + 1;
1769 	u16 to = from + msg_bc_gap(hdr) - 1;
1770 	int rc = 0;
1771 
1772 	if (!link_is_up(l))
1773 		return rc;
1774 
1775 	if (!msg_peer_node_is_up(hdr))
1776 		return rc;
1777 
1778 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
1779 	if (msg_ack(hdr))
1780 		l->bc_peer_is_up = true;
1781 
1782 	if (!l->bc_peer_is_up)
1783 		return rc;
1784 
1785 	l->stats.recv_nacks++;
1786 
1787 	/* Ignore if peers_snd_nxt goes beyond receive window */
1788 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1789 		return rc;
1790 
1791 	if (link_bc_retr_eval(snd_l, &from, &to))
1792 		rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1793 
1794 	l->snd_nxt = peers_snd_nxt;
1795 	if (link_bc_rcv_gap(l))
1796 		rc |= TIPC_LINK_SND_STATE;
1797 
1798 	/* Return now if sender supports nack via STATE messages */
1799 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1800 		return rc;
1801 
1802 	/* Otherwise, be backwards compatible */
1803 
1804 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1805 		l->nack_state = BC_NACK_SND_CONDITIONAL;
1806 		return 0;
1807 	}
1808 
1809 	/* Don't NACK if one was recently sent or peeked */
1810 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1811 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1812 		return 0;
1813 	}
1814 
1815 	/* Conditionally delay NACK sending until next synch rcv */
1816 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1817 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1818 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1819 			return 0;
1820 	}
1821 
1822 	/* Send NACK now but suppress next one */
1823 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1824 	l->nack_state = BC_NACK_SND_SUPPRESS;
1825 	return 0;
1826 }
1827 
1828 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1829 			  struct sk_buff_head *xmitq)
1830 {
1831 	struct sk_buff *skb, *tmp;
1832 	struct tipc_link *snd_l = l->bc_sndlink;
1833 
1834 	if (!link_is_up(l) || !l->bc_peer_is_up)
1835 		return;
1836 
1837 	if (!more(acked, l->acked))
1838 		return;
1839 
1840 	/* Skip over packets peer has already acked */
1841 	skb_queue_walk(&snd_l->transmq, skb) {
1842 		if (more(buf_seqno(skb), l->acked))
1843 			break;
1844 	}
1845 
1846 	/* Update/release the packets peer is acking now */
1847 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1848 		if (more(buf_seqno(skb), acked))
1849 			break;
1850 		if (!--TIPC_SKB_CB(skb)->ackers) {
1851 			__skb_unlink(skb, &snd_l->transmq);
1852 			kfree_skb(skb);
1853 		}
1854 	}
1855 	l->acked = acked;
1856 	tipc_link_advance_backlog(snd_l, xmitq);
1857 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1858 		link_prepare_wakeup(snd_l);
1859 }
1860 
1861 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1862  * This function is here for backwards compatibility, since
1863  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1864  */
1865 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1866 			  struct sk_buff_head *xmitq)
1867 {
1868 	struct tipc_msg *hdr = buf_msg(skb);
1869 	u32 dnode = msg_destnode(hdr);
1870 	int mtyp = msg_type(hdr);
1871 	u16 acked = msg_bcast_ack(hdr);
1872 	u16 from = acked + 1;
1873 	u16 to = msg_bcgap_to(hdr);
1874 	u16 peers_snd_nxt = to + 1;
1875 	int rc = 0;
1876 
1877 	kfree_skb(skb);
1878 
1879 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1880 		return 0;
1881 
1882 	if (mtyp != STATE_MSG)
1883 		return 0;
1884 
1885 	if (dnode == tipc_own_addr(l->net)) {
1886 		tipc_link_bc_ack_rcv(l, acked, xmitq);
1887 		rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
1888 		l->stats.recv_nacks++;
1889 		return rc;
1890 	}
1891 
1892 	/* Msg for other node => suppress own NACK at next sync if applicable */
1893 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1894 		l->nack_state = BC_NACK_SND_SUPPRESS;
1895 
1896 	return 0;
1897 }
1898 
1899 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1900 {
1901 	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
1902 
1903 	l->window = win;
1904 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
1905 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
1906 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
1907 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1908 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1909 }
1910 
1911 /**
1912  * link_reset_stats - reset link statistics
1913  * @l: pointer to link
1914  */
1915 void tipc_link_reset_stats(struct tipc_link *l)
1916 {
1917 	memset(&l->stats, 0, sizeof(l->stats));
1918 }
1919 
1920 static void link_print(struct tipc_link *l, const char *str)
1921 {
1922 	struct sk_buff *hskb = skb_peek(&l->transmq);
1923 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1924 	u16 tail = l->snd_nxt - 1;
1925 
1926 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1927 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1928 		skb_queue_len(&l->transmq), head, tail,
1929 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1930 }
1931 
1932 /* Parse and validate nested (link) properties valid for media, bearer and link
1933  */
1934 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1935 {
1936 	int err;
1937 
1938 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1939 			       tipc_nl_prop_policy, NULL);
1940 	if (err)
1941 		return err;
1942 
1943 	if (props[TIPC_NLA_PROP_PRIO]) {
1944 		u32 prio;
1945 
1946 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1947 		if (prio > TIPC_MAX_LINK_PRI)
1948 			return -EINVAL;
1949 	}
1950 
1951 	if (props[TIPC_NLA_PROP_TOL]) {
1952 		u32 tol;
1953 
1954 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1955 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1956 			return -EINVAL;
1957 	}
1958 
1959 	if (props[TIPC_NLA_PROP_WIN]) {
1960 		u32 win;
1961 
1962 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1963 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1964 			return -EINVAL;
1965 	}
1966 
1967 	return 0;
1968 }
1969 
1970 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1971 {
1972 	int i;
1973 	struct nlattr *stats;
1974 
1975 	struct nla_map {
1976 		u32 key;
1977 		u32 val;
1978 	};
1979 
1980 	struct nla_map map[] = {
1981 		{TIPC_NLA_STATS_RX_INFO, 0},
1982 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1983 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1984 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1985 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1986 		{TIPC_NLA_STATS_TX_INFO, 0},
1987 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1988 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1989 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1990 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1991 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1992 			s->msg_length_counts : 1},
1993 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1994 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1995 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1996 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1997 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1998 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1999 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2000 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2001 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2002 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2003 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2004 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2005 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2006 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2007 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2008 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2009 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2010 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2011 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2012 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2013 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2014 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2015 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2016 	};
2017 
2018 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2019 	if (!stats)
2020 		return -EMSGSIZE;
2021 
2022 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2023 		if (nla_put_u32(skb, map[i].key, map[i].val))
2024 			goto msg_full;
2025 
2026 	nla_nest_end(skb, stats);
2027 
2028 	return 0;
2029 msg_full:
2030 	nla_nest_cancel(skb, stats);
2031 
2032 	return -EMSGSIZE;
2033 }
2034 
2035 /* Caller should hold appropriate locks to protect the link */
2036 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2037 		       struct tipc_link *link, int nlflags)
2038 {
2039 	u32 self = tipc_own_addr(net);
2040 	struct nlattr *attrs;
2041 	struct nlattr *prop;
2042 	void *hdr;
2043 	int err;
2044 
2045 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2046 			  nlflags, TIPC_NL_LINK_GET);
2047 	if (!hdr)
2048 		return -EMSGSIZE;
2049 
2050 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2051 	if (!attrs)
2052 		goto msg_full;
2053 
2054 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2055 		goto attr_msg_full;
2056 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2057 		goto attr_msg_full;
2058 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2059 		goto attr_msg_full;
2060 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2061 		goto attr_msg_full;
2062 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2063 		goto attr_msg_full;
2064 
2065 	if (tipc_link_is_up(link))
2066 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2067 			goto attr_msg_full;
2068 	if (link->active)
2069 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2070 			goto attr_msg_full;
2071 
2072 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2073 	if (!prop)
2074 		goto attr_msg_full;
2075 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2076 		goto prop_msg_full;
2077 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2078 		goto prop_msg_full;
2079 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2080 			link->window))
2081 		goto prop_msg_full;
2082 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2083 		goto prop_msg_full;
2084 	nla_nest_end(msg->skb, prop);
2085 
2086 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2087 	if (err)
2088 		goto attr_msg_full;
2089 
2090 	nla_nest_end(msg->skb, attrs);
2091 	genlmsg_end(msg->skb, hdr);
2092 
2093 	return 0;
2094 
2095 prop_msg_full:
2096 	nla_nest_cancel(msg->skb, prop);
2097 attr_msg_full:
2098 	nla_nest_cancel(msg->skb, attrs);
2099 msg_full:
2100 	genlmsg_cancel(msg->skb, hdr);
2101 
2102 	return -EMSGSIZE;
2103 }
2104 
2105 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2106 				      struct tipc_stats *stats)
2107 {
2108 	int i;
2109 	struct nlattr *nest;
2110 
2111 	struct nla_map {
2112 		__u32 key;
2113 		__u32 val;
2114 	};
2115 
2116 	struct nla_map map[] = {
2117 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2118 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2119 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2120 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2121 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2122 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2123 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2124 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2125 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2126 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2127 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2128 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2129 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2130 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2131 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2132 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2133 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2134 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2135 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2136 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2137 	};
2138 
2139 	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2140 	if (!nest)
2141 		return -EMSGSIZE;
2142 
2143 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2144 		if (nla_put_u32(skb, map[i].key, map[i].val))
2145 			goto msg_full;
2146 
2147 	nla_nest_end(skb, nest);
2148 
2149 	return 0;
2150 msg_full:
2151 	nla_nest_cancel(skb, nest);
2152 
2153 	return -EMSGSIZE;
2154 }
2155 
2156 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2157 {
2158 	int err;
2159 	void *hdr;
2160 	struct nlattr *attrs;
2161 	struct nlattr *prop;
2162 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2163 	struct tipc_link *bcl = tn->bcl;
2164 
2165 	if (!bcl)
2166 		return 0;
2167 
2168 	tipc_bcast_lock(net);
2169 
2170 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2171 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2172 	if (!hdr) {
2173 		tipc_bcast_unlock(net);
2174 		return -EMSGSIZE;
2175 	}
2176 
2177 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2178 	if (!attrs)
2179 		goto msg_full;
2180 
2181 	/* The broadcast link is always up */
2182 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2183 		goto attr_msg_full;
2184 
2185 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2186 		goto attr_msg_full;
2187 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2188 		goto attr_msg_full;
2189 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2190 		goto attr_msg_full;
2191 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2192 		goto attr_msg_full;
2193 
2194 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2195 	if (!prop)
2196 		goto attr_msg_full;
2197 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2198 		goto prop_msg_full;
2199 	nla_nest_end(msg->skb, prop);
2200 
2201 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2202 	if (err)
2203 		goto attr_msg_full;
2204 
2205 	tipc_bcast_unlock(net);
2206 	nla_nest_end(msg->skb, attrs);
2207 	genlmsg_end(msg->skb, hdr);
2208 
2209 	return 0;
2210 
2211 prop_msg_full:
2212 	nla_nest_cancel(msg->skb, prop);
2213 attr_msg_full:
2214 	nla_nest_cancel(msg->skb, attrs);
2215 msg_full:
2216 	tipc_bcast_unlock(net);
2217 	genlmsg_cancel(msg->skb, hdr);
2218 
2219 	return -EMSGSIZE;
2220 }
2221 
2222 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2223 			     struct sk_buff_head *xmitq)
2224 {
2225 	l->tolerance = tol;
2226 	if (link_is_up(l))
2227 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2228 }
2229 
2230 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2231 			struct sk_buff_head *xmitq)
2232 {
2233 	l->priority = prio;
2234 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2235 }
2236 
2237 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2238 {
2239 	l->abort_limit = limit;
2240 }
2241