xref: /openbmc/linux/net/tipc/link.c (revision ae213c44)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 #include "trace.h"
47 
48 #include <linux/pkt_sched.h>
49 
50 struct tipc_stats {
51 	u32 sent_pkts;
52 	u32 recv_pkts;
53 	u32 sent_states;
54 	u32 recv_states;
55 	u32 sent_probes;
56 	u32 recv_probes;
57 	u32 sent_nacks;
58 	u32 recv_nacks;
59 	u32 sent_acks;
60 	u32 sent_bundled;
61 	u32 sent_bundles;
62 	u32 recv_bundled;
63 	u32 recv_bundles;
64 	u32 retransmitted;
65 	u32 sent_fragmented;
66 	u32 sent_fragments;
67 	u32 recv_fragmented;
68 	u32 recv_fragments;
69 	u32 link_congs;		/* # port sends blocked by congestion */
70 	u32 deferred_recv;
71 	u32 duplicates;
72 	u32 max_queue_sz;	/* send queue size high water mark */
73 	u32 accu_queue_sz;	/* used for send queue size profiling */
74 	u32 queue_sz_counts;	/* used for send queue size profiling */
75 	u32 msg_length_counts;	/* used for message length profiling */
76 	u32 msg_lengths_total;	/* used for message length profiling */
77 	u32 msg_length_profile[7]; /* used for msg. length profiling */
78 };
79 
80 /**
81  * struct tipc_link - TIPC link data structure
82  * @addr: network address of link's peer node
83  * @name: link name character string
84  * @media_addr: media address to use when sending messages over link
85  * @timer: link timer
86  * @net: pointer to namespace struct
87  * @refcnt: reference counter for permanent references (owner node & timer)
88  * @peer_session: link session # being used by peer end of link
89  * @peer_bearer_id: bearer id used by link's peer endpoint
90  * @bearer_id: local bearer id used by link
91  * @tolerance: minimum link continuity loss needed to reset link [in ms]
92  * @abort_limit: # of unacknowledged continuity probes needed to reset link
93  * @state: current state of link FSM
94  * @peer_caps: bitmap describing capabilities of peer node
95  * @silent_intv_cnt: # of timer intervals without any reception from peer
96  * @proto_msg: template for control messages generated by link
97  * @pmsg: convenience pointer to "proto_msg" field
98  * @priority: current link priority
99  * @net_plane: current link network plane ('A' through 'H')
100  * @mon_state: cookie with information needed by link monitor
101  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102  * @exp_msg_count: # of tunnelled messages expected during link changeover
103  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104  * @mtu: current maximum packet size for this link
105  * @advertised_mtu: advertised own mtu when link is being established
106  * @transmitq: queue for sent, non-acked messages
107  * @backlogq: queue for messages waiting to be sent
108  * @snt_nxt: next sequence number to use for outbound messages
109  * @prev_from: sequence number of most previous retransmission request
110  * @stale_cnt: counter for number of identical retransmit attempts
111  * @stale_limit: time when repeated identical retransmits must force link reset
112  * @ackers: # of peers that needs to ack each packet before it can be released
113  * @acked: # last packet acked by a certain peer. Used for broadcast.
114  * @rcv_nxt: next sequence number to expect for inbound messages
115  * @deferred_queue: deferred queue saved OOS b'cast message received from node
116  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
117  * @inputq: buffer queue for messages to be delivered upwards
118  * @namedq: buffer queue for name table messages to be delivered upwards
119  * @next_out: ptr to first unsent outbound message in queue
120  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
121  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
122  * @reasm_buf: head of partially reassembled inbound message fragments
123  * @bc_rcvr: marks that this is a broadcast receiver link
124  * @stats: collects statistics regarding link activity
125  */
126 struct tipc_link {
127 	u32 addr;
128 	char name[TIPC_MAX_LINK_NAME];
129 	struct net *net;
130 
131 	/* Management and link supervision data */
132 	u16 peer_session;
133 	u16 session;
134 	u16 snd_nxt_state;
135 	u16 rcv_nxt_state;
136 	u32 peer_bearer_id;
137 	u32 bearer_id;
138 	u32 tolerance;
139 	u32 abort_limit;
140 	u32 state;
141 	u16 peer_caps;
142 	bool in_session;
143 	bool active;
144 	u32 silent_intv_cnt;
145 	char if_name[TIPC_MAX_IF_NAME];
146 	u32 priority;
147 	char net_plane;
148 	struct tipc_mon_state mon_state;
149 	u16 rst_cnt;
150 
151 	/* Failover/synch */
152 	u16 drop_point;
153 	struct sk_buff *failover_reasm_skb;
154 	struct sk_buff_head failover_deferdq;
155 
156 	/* Max packet negotiation */
157 	u16 mtu;
158 	u16 advertised_mtu;
159 
160 	/* Sending */
161 	struct sk_buff_head transmq;
162 	struct sk_buff_head backlogq;
163 	struct {
164 		u16 len;
165 		u16 limit;
166 	} backlog[5];
167 	u16 snd_nxt;
168 	u16 prev_from;
169 	u16 window;
170 	u16 stale_cnt;
171 	unsigned long stale_limit;
172 
173 	/* Reception */
174 	u16 rcv_nxt;
175 	u32 rcv_unacked;
176 	struct sk_buff_head deferdq;
177 	struct sk_buff_head *inputq;
178 	struct sk_buff_head *namedq;
179 
180 	/* Congestion handling */
181 	struct sk_buff_head wakeupq;
182 
183 	/* Fragmentation/reassembly */
184 	struct sk_buff *reasm_buf;
185 
186 	/* Broadcast */
187 	u16 ackers;
188 	u16 acked;
189 	struct tipc_link *bc_rcvlink;
190 	struct tipc_link *bc_sndlink;
191 	u8 nack_state;
192 	bool bc_peer_is_up;
193 
194 	/* Statistics */
195 	struct tipc_stats stats;
196 };
197 
198 /*
199  * Error message prefixes
200  */
201 static const char *link_co_err = "Link tunneling error, ";
202 static const char *link_rst_msg = "Resetting link ";
203 
204 /* Send states for broadcast NACKs
205  */
206 enum {
207 	BC_NACK_SND_CONDITIONAL,
208 	BC_NACK_SND_UNCONDITIONAL,
209 	BC_NACK_SND_SUPPRESS,
210 };
211 
212 #define TIPC_BC_RETR_LIM msecs_to_jiffies(10)   /* [ms] */
213 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
214 
215 /*
216  * Interval between NACKs when packets arrive out of order
217  */
218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
219 
220 /* Link FSM states:
221  */
222 enum {
223 	LINK_ESTABLISHED     = 0xe,
224 	LINK_ESTABLISHING    = 0xe  << 4,
225 	LINK_RESET           = 0x1  << 8,
226 	LINK_RESETTING       = 0x2  << 12,
227 	LINK_PEER_RESET      = 0xd  << 16,
228 	LINK_FAILINGOVER     = 0xf  << 20,
229 	LINK_SYNCHING        = 0xc  << 24
230 };
231 
232 /* Link FSM state checking routines
233  */
234 static int link_is_up(struct tipc_link *l)
235 {
236 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
237 }
238 
239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 			       struct sk_buff_head *xmitq);
241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
242 				      bool probe_reply, u16 rcvgap,
243 				      int tolerance, int priority,
244 				      struct sk_buff_head *xmitq);
245 static void link_print(struct tipc_link *l, const char *str);
246 static int tipc_link_build_nack_msg(struct tipc_link *l,
247 				    struct sk_buff_head *xmitq);
248 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 					struct sk_buff_head *xmitq);
250 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
251 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
252 static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
253 				      struct tipc_gap_ack_blks *ga,
254 				      struct sk_buff_head *xmitq);
255 
256 /*
257  *  Simple non-static link routines (i.e. referenced outside this file)
258  */
259 bool tipc_link_is_up(struct tipc_link *l)
260 {
261 	return link_is_up(l);
262 }
263 
264 bool tipc_link_peer_is_down(struct tipc_link *l)
265 {
266 	return l->state == LINK_PEER_RESET;
267 }
268 
269 bool tipc_link_is_reset(struct tipc_link *l)
270 {
271 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
272 }
273 
274 bool tipc_link_is_establishing(struct tipc_link *l)
275 {
276 	return l->state == LINK_ESTABLISHING;
277 }
278 
279 bool tipc_link_is_synching(struct tipc_link *l)
280 {
281 	return l->state == LINK_SYNCHING;
282 }
283 
284 bool tipc_link_is_failingover(struct tipc_link *l)
285 {
286 	return l->state == LINK_FAILINGOVER;
287 }
288 
289 bool tipc_link_is_blocked(struct tipc_link *l)
290 {
291 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
292 }
293 
294 static bool link_is_bc_sndlink(struct tipc_link *l)
295 {
296 	return !l->bc_sndlink;
297 }
298 
299 static bool link_is_bc_rcvlink(struct tipc_link *l)
300 {
301 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
302 }
303 
304 void tipc_link_set_active(struct tipc_link *l, bool active)
305 {
306 	l->active = active;
307 }
308 
309 u32 tipc_link_id(struct tipc_link *l)
310 {
311 	return l->peer_bearer_id << 16 | l->bearer_id;
312 }
313 
314 int tipc_link_window(struct tipc_link *l)
315 {
316 	return l->window;
317 }
318 
319 int tipc_link_prio(struct tipc_link *l)
320 {
321 	return l->priority;
322 }
323 
324 unsigned long tipc_link_tolerance(struct tipc_link *l)
325 {
326 	return l->tolerance;
327 }
328 
329 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
330 {
331 	return l->inputq;
332 }
333 
334 char tipc_link_plane(struct tipc_link *l)
335 {
336 	return l->net_plane;
337 }
338 
339 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
340 {
341 	l->peer_caps = capabilities;
342 }
343 
344 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
345 			   struct tipc_link *uc_l,
346 			   struct sk_buff_head *xmitq)
347 {
348 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
349 
350 	snd_l->ackers++;
351 	rcv_l->acked = snd_l->snd_nxt - 1;
352 	snd_l->state = LINK_ESTABLISHED;
353 	tipc_link_build_bc_init_msg(uc_l, xmitq);
354 }
355 
356 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
357 			      struct tipc_link *rcv_l,
358 			      struct sk_buff_head *xmitq)
359 {
360 	u16 ack = snd_l->snd_nxt - 1;
361 
362 	snd_l->ackers--;
363 	rcv_l->bc_peer_is_up = true;
364 	rcv_l->state = LINK_ESTABLISHED;
365 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
366 	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
367 	tipc_link_reset(rcv_l);
368 	rcv_l->state = LINK_RESET;
369 	if (!snd_l->ackers) {
370 		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
371 		tipc_link_reset(snd_l);
372 		snd_l->state = LINK_RESET;
373 		__skb_queue_purge(xmitq);
374 	}
375 }
376 
377 int tipc_link_bc_peers(struct tipc_link *l)
378 {
379 	return l->ackers;
380 }
381 
382 static u16 link_bc_rcv_gap(struct tipc_link *l)
383 {
384 	struct sk_buff *skb = skb_peek(&l->deferdq);
385 	u16 gap = 0;
386 
387 	if (more(l->snd_nxt, l->rcv_nxt))
388 		gap = l->snd_nxt - l->rcv_nxt;
389 	if (skb)
390 		gap = buf_seqno(skb) - l->rcv_nxt;
391 	return gap;
392 }
393 
394 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
395 {
396 	l->mtu = mtu;
397 }
398 
399 int tipc_link_mtu(struct tipc_link *l)
400 {
401 	return l->mtu;
402 }
403 
404 u16 tipc_link_rcv_nxt(struct tipc_link *l)
405 {
406 	return l->rcv_nxt;
407 }
408 
409 u16 tipc_link_acked(struct tipc_link *l)
410 {
411 	return l->acked;
412 }
413 
414 char *tipc_link_name(struct tipc_link *l)
415 {
416 	return l->name;
417 }
418 
419 u32 tipc_link_state(struct tipc_link *l)
420 {
421 	return l->state;
422 }
423 
424 /**
425  * tipc_link_create - create a new link
426  * @n: pointer to associated node
427  * @if_name: associated interface name
428  * @bearer_id: id (index) of associated bearer
429  * @tolerance: link tolerance to be used by link
430  * @net_plane: network plane (A,B,c..) this link belongs to
431  * @mtu: mtu to be advertised by link
432  * @priority: priority to be used by link
433  * @window: send window to be used by link
434  * @session: session to be used by link
435  * @ownnode: identity of own node
436  * @peer: node id of peer node
437  * @peer_caps: bitmap describing peer node capabilities
438  * @bc_sndlink: the namespace global link used for broadcast sending
439  * @bc_rcvlink: the peer specific link used for broadcast reception
440  * @inputq: queue to put messages ready for delivery
441  * @namedq: queue to put binding table update messages ready for delivery
442  * @link: return value, pointer to put the created link
443  *
444  * Returns true if link was created, otherwise false
445  */
446 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
447 		      int tolerance, char net_plane, u32 mtu, int priority,
448 		      int window, u32 session, u32 self,
449 		      u32 peer, u8 *peer_id, u16 peer_caps,
450 		      struct tipc_link *bc_sndlink,
451 		      struct tipc_link *bc_rcvlink,
452 		      struct sk_buff_head *inputq,
453 		      struct sk_buff_head *namedq,
454 		      struct tipc_link **link)
455 {
456 	char peer_str[NODE_ID_STR_LEN] = {0,};
457 	char self_str[NODE_ID_STR_LEN] = {0,};
458 	struct tipc_link *l;
459 
460 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
461 	if (!l)
462 		return false;
463 	*link = l;
464 	l->session = session;
465 
466 	/* Set link name for unicast links only */
467 	if (peer_id) {
468 		tipc_nodeid2string(self_str, tipc_own_id(net));
469 		if (strlen(self_str) > 16)
470 			sprintf(self_str, "%x", self);
471 		tipc_nodeid2string(peer_str, peer_id);
472 		if (strlen(peer_str) > 16)
473 			sprintf(peer_str, "%x", peer);
474 	}
475 	/* Peer i/f name will be completed by reset/activate message */
476 	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
477 		 self_str, if_name, peer_str);
478 
479 	strcpy(l->if_name, if_name);
480 	l->addr = peer;
481 	l->peer_caps = peer_caps;
482 	l->net = net;
483 	l->in_session = false;
484 	l->bearer_id = bearer_id;
485 	l->tolerance = tolerance;
486 	if (bc_rcvlink)
487 		bc_rcvlink->tolerance = tolerance;
488 	l->net_plane = net_plane;
489 	l->advertised_mtu = mtu;
490 	l->mtu = mtu;
491 	l->priority = priority;
492 	tipc_link_set_queue_limits(l, window);
493 	l->ackers = 1;
494 	l->bc_sndlink = bc_sndlink;
495 	l->bc_rcvlink = bc_rcvlink;
496 	l->inputq = inputq;
497 	l->namedq = namedq;
498 	l->state = LINK_RESETTING;
499 	__skb_queue_head_init(&l->transmq);
500 	__skb_queue_head_init(&l->backlogq);
501 	__skb_queue_head_init(&l->deferdq);
502 	__skb_queue_head_init(&l->failover_deferdq);
503 	skb_queue_head_init(&l->wakeupq);
504 	skb_queue_head_init(l->inputq);
505 	return true;
506 }
507 
508 /**
509  * tipc_link_bc_create - create new link to be used for broadcast
510  * @n: pointer to associated node
511  * @mtu: mtu to be used initially if no peers
512  * @window: send window to be used
513  * @inputq: queue to put messages ready for delivery
514  * @namedq: queue to put binding table update messages ready for delivery
515  * @link: return value, pointer to put the created link
516  *
517  * Returns true if link was created, otherwise false
518  */
519 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
520 			 int mtu, int window, u16 peer_caps,
521 			 struct sk_buff_head *inputq,
522 			 struct sk_buff_head *namedq,
523 			 struct tipc_link *bc_sndlink,
524 			 struct tipc_link **link)
525 {
526 	struct tipc_link *l;
527 
528 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
529 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
530 			      NULL, inputq, namedq, link))
531 		return false;
532 
533 	l = *link;
534 	strcpy(l->name, tipc_bclink_name);
535 	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
536 	tipc_link_reset(l);
537 	l->state = LINK_RESET;
538 	l->ackers = 0;
539 	l->bc_rcvlink = l;
540 
541 	/* Broadcast send link is always up */
542 	if (link_is_bc_sndlink(l))
543 		l->state = LINK_ESTABLISHED;
544 
545 	/* Disable replicast if even a single peer doesn't support it */
546 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
547 		tipc_bcast_disable_rcast(net);
548 
549 	return true;
550 }
551 
552 /**
553  * tipc_link_fsm_evt - link finite state machine
554  * @l: pointer to link
555  * @evt: state machine event to be processed
556  */
557 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
558 {
559 	int rc = 0;
560 	int old_state = l->state;
561 
562 	switch (l->state) {
563 	case LINK_RESETTING:
564 		switch (evt) {
565 		case LINK_PEER_RESET_EVT:
566 			l->state = LINK_PEER_RESET;
567 			break;
568 		case LINK_RESET_EVT:
569 			l->state = LINK_RESET;
570 			break;
571 		case LINK_FAILURE_EVT:
572 		case LINK_FAILOVER_BEGIN_EVT:
573 		case LINK_ESTABLISH_EVT:
574 		case LINK_FAILOVER_END_EVT:
575 		case LINK_SYNCH_BEGIN_EVT:
576 		case LINK_SYNCH_END_EVT:
577 		default:
578 			goto illegal_evt;
579 		}
580 		break;
581 	case LINK_RESET:
582 		switch (evt) {
583 		case LINK_PEER_RESET_EVT:
584 			l->state = LINK_ESTABLISHING;
585 			break;
586 		case LINK_FAILOVER_BEGIN_EVT:
587 			l->state = LINK_FAILINGOVER;
588 		case LINK_FAILURE_EVT:
589 		case LINK_RESET_EVT:
590 		case LINK_ESTABLISH_EVT:
591 		case LINK_FAILOVER_END_EVT:
592 			break;
593 		case LINK_SYNCH_BEGIN_EVT:
594 		case LINK_SYNCH_END_EVT:
595 		default:
596 			goto illegal_evt;
597 		}
598 		break;
599 	case LINK_PEER_RESET:
600 		switch (evt) {
601 		case LINK_RESET_EVT:
602 			l->state = LINK_ESTABLISHING;
603 			break;
604 		case LINK_PEER_RESET_EVT:
605 		case LINK_ESTABLISH_EVT:
606 		case LINK_FAILURE_EVT:
607 			break;
608 		case LINK_SYNCH_BEGIN_EVT:
609 		case LINK_SYNCH_END_EVT:
610 		case LINK_FAILOVER_BEGIN_EVT:
611 		case LINK_FAILOVER_END_EVT:
612 		default:
613 			goto illegal_evt;
614 		}
615 		break;
616 	case LINK_FAILINGOVER:
617 		switch (evt) {
618 		case LINK_FAILOVER_END_EVT:
619 			l->state = LINK_RESET;
620 			break;
621 		case LINK_PEER_RESET_EVT:
622 		case LINK_RESET_EVT:
623 		case LINK_ESTABLISH_EVT:
624 		case LINK_FAILURE_EVT:
625 			break;
626 		case LINK_FAILOVER_BEGIN_EVT:
627 		case LINK_SYNCH_BEGIN_EVT:
628 		case LINK_SYNCH_END_EVT:
629 		default:
630 			goto illegal_evt;
631 		}
632 		break;
633 	case LINK_ESTABLISHING:
634 		switch (evt) {
635 		case LINK_ESTABLISH_EVT:
636 			l->state = LINK_ESTABLISHED;
637 			break;
638 		case LINK_FAILOVER_BEGIN_EVT:
639 			l->state = LINK_FAILINGOVER;
640 			break;
641 		case LINK_RESET_EVT:
642 			l->state = LINK_RESET;
643 			break;
644 		case LINK_FAILURE_EVT:
645 		case LINK_PEER_RESET_EVT:
646 		case LINK_SYNCH_BEGIN_EVT:
647 		case LINK_FAILOVER_END_EVT:
648 			break;
649 		case LINK_SYNCH_END_EVT:
650 		default:
651 			goto illegal_evt;
652 		}
653 		break;
654 	case LINK_ESTABLISHED:
655 		switch (evt) {
656 		case LINK_PEER_RESET_EVT:
657 			l->state = LINK_PEER_RESET;
658 			rc |= TIPC_LINK_DOWN_EVT;
659 			break;
660 		case LINK_FAILURE_EVT:
661 			l->state = LINK_RESETTING;
662 			rc |= TIPC_LINK_DOWN_EVT;
663 			break;
664 		case LINK_RESET_EVT:
665 			l->state = LINK_RESET;
666 			break;
667 		case LINK_ESTABLISH_EVT:
668 		case LINK_SYNCH_END_EVT:
669 			break;
670 		case LINK_SYNCH_BEGIN_EVT:
671 			l->state = LINK_SYNCHING;
672 			break;
673 		case LINK_FAILOVER_BEGIN_EVT:
674 		case LINK_FAILOVER_END_EVT:
675 		default:
676 			goto illegal_evt;
677 		}
678 		break;
679 	case LINK_SYNCHING:
680 		switch (evt) {
681 		case LINK_PEER_RESET_EVT:
682 			l->state = LINK_PEER_RESET;
683 			rc |= TIPC_LINK_DOWN_EVT;
684 			break;
685 		case LINK_FAILURE_EVT:
686 			l->state = LINK_RESETTING;
687 			rc |= TIPC_LINK_DOWN_EVT;
688 			break;
689 		case LINK_RESET_EVT:
690 			l->state = LINK_RESET;
691 			break;
692 		case LINK_ESTABLISH_EVT:
693 		case LINK_SYNCH_BEGIN_EVT:
694 			break;
695 		case LINK_SYNCH_END_EVT:
696 			l->state = LINK_ESTABLISHED;
697 			break;
698 		case LINK_FAILOVER_BEGIN_EVT:
699 		case LINK_FAILOVER_END_EVT:
700 		default:
701 			goto illegal_evt;
702 		}
703 		break;
704 	default:
705 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
706 	}
707 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
708 	return rc;
709 illegal_evt:
710 	pr_err("Illegal FSM event %x in state %x on link %s\n",
711 	       evt, l->state, l->name);
712 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
713 	return rc;
714 }
715 
716 /* link_profile_stats - update statistical profiling of traffic
717  */
718 static void link_profile_stats(struct tipc_link *l)
719 {
720 	struct sk_buff *skb;
721 	struct tipc_msg *msg;
722 	int length;
723 
724 	/* Update counters used in statistical profiling of send traffic */
725 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
726 	l->stats.queue_sz_counts++;
727 
728 	skb = skb_peek(&l->transmq);
729 	if (!skb)
730 		return;
731 	msg = buf_msg(skb);
732 	length = msg_size(msg);
733 
734 	if (msg_user(msg) == MSG_FRAGMENTER) {
735 		if (msg_type(msg) != FIRST_FRAGMENT)
736 			return;
737 		length = msg_size(msg_get_wrapped(msg));
738 	}
739 	l->stats.msg_lengths_total += length;
740 	l->stats.msg_length_counts++;
741 	if (length <= 64)
742 		l->stats.msg_length_profile[0]++;
743 	else if (length <= 256)
744 		l->stats.msg_length_profile[1]++;
745 	else if (length <= 1024)
746 		l->stats.msg_length_profile[2]++;
747 	else if (length <= 4096)
748 		l->stats.msg_length_profile[3]++;
749 	else if (length <= 16384)
750 		l->stats.msg_length_profile[4]++;
751 	else if (length <= 32768)
752 		l->stats.msg_length_profile[5]++;
753 	else
754 		l->stats.msg_length_profile[6]++;
755 }
756 
757 /**
758  * tipc_link_too_silent - check if link is "too silent"
759  * @l: tipc link to be checked
760  *
761  * Returns true if the link 'silent_intv_cnt' is about to reach the
762  * 'abort_limit' value, otherwise false
763  */
764 bool tipc_link_too_silent(struct tipc_link *l)
765 {
766 	return (l->silent_intv_cnt + 2 > l->abort_limit);
767 }
768 
769 /* tipc_link_timeout - perform periodic task as instructed from node timeout
770  */
771 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
772 {
773 	int mtyp = 0;
774 	int rc = 0;
775 	bool state = false;
776 	bool probe = false;
777 	bool setup = false;
778 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
779 	u16 bc_acked = l->bc_rcvlink->acked;
780 	struct tipc_mon_state *mstate = &l->mon_state;
781 
782 	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
783 	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
784 	switch (l->state) {
785 	case LINK_ESTABLISHED:
786 	case LINK_SYNCHING:
787 		mtyp = STATE_MSG;
788 		link_profile_stats(l);
789 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
790 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
791 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
792 		state = bc_acked != bc_snt;
793 		state |= l->bc_rcvlink->rcv_unacked;
794 		state |= l->rcv_unacked;
795 		state |= !skb_queue_empty(&l->transmq);
796 		state |= !skb_queue_empty(&l->deferdq);
797 		probe = mstate->probing;
798 		probe |= l->silent_intv_cnt;
799 		if (probe || mstate->monitoring)
800 			l->silent_intv_cnt++;
801 		break;
802 	case LINK_RESET:
803 		setup = l->rst_cnt++ <= 4;
804 		setup |= !(l->rst_cnt % 16);
805 		mtyp = RESET_MSG;
806 		break;
807 	case LINK_ESTABLISHING:
808 		setup = true;
809 		mtyp = ACTIVATE_MSG;
810 		break;
811 	case LINK_PEER_RESET:
812 	case LINK_RESETTING:
813 	case LINK_FAILINGOVER:
814 		break;
815 	default:
816 		break;
817 	}
818 
819 	if (state || probe || setup)
820 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
821 
822 	return rc;
823 }
824 
825 /**
826  * link_schedule_user - schedule a message sender for wakeup after congestion
827  * @l: congested link
828  * @hdr: header of message that is being sent
829  * Create pseudo msg to send back to user when congestion abates
830  */
831 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
832 {
833 	u32 dnode = tipc_own_addr(l->net);
834 	u32 dport = msg_origport(hdr);
835 	struct sk_buff *skb;
836 
837 	/* Create and schedule wakeup pseudo message */
838 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
839 			      dnode, l->addr, dport, 0, 0);
840 	if (!skb)
841 		return -ENOBUFS;
842 	msg_set_dest_droppable(buf_msg(skb), true);
843 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
844 	skb_queue_tail(&l->wakeupq, skb);
845 	l->stats.link_congs++;
846 	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
847 	return -ELINKCONG;
848 }
849 
850 /**
851  * link_prepare_wakeup - prepare users for wakeup after congestion
852  * @l: congested link
853  * Wake up a number of waiting users, as permitted by available space
854  * in the send queue
855  */
856 static void link_prepare_wakeup(struct tipc_link *l)
857 {
858 	struct sk_buff *skb, *tmp;
859 	int imp, i = 0;
860 
861 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
862 		imp = TIPC_SKB_CB(skb)->chain_imp;
863 		if (l->backlog[imp].len < l->backlog[imp].limit) {
864 			skb_unlink(skb, &l->wakeupq);
865 			skb_queue_tail(l->inputq, skb);
866 		} else if (i++ > 10) {
867 			break;
868 		}
869 	}
870 }
871 
872 void tipc_link_reset(struct tipc_link *l)
873 {
874 	struct sk_buff_head list;
875 
876 	__skb_queue_head_init(&list);
877 
878 	l->in_session = false;
879 	/* Force re-synch of peer session number before establishing */
880 	l->peer_session--;
881 	l->session++;
882 	l->mtu = l->advertised_mtu;
883 
884 	spin_lock_bh(&l->wakeupq.lock);
885 	skb_queue_splice_init(&l->wakeupq, &list);
886 	spin_unlock_bh(&l->wakeupq.lock);
887 
888 	spin_lock_bh(&l->inputq->lock);
889 	skb_queue_splice_init(&list, l->inputq);
890 	spin_unlock_bh(&l->inputq->lock);
891 
892 	__skb_queue_purge(&l->transmq);
893 	__skb_queue_purge(&l->deferdq);
894 	__skb_queue_purge(&l->backlogq);
895 	__skb_queue_purge(&l->failover_deferdq);
896 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
897 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
898 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
899 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
900 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
901 	kfree_skb(l->reasm_buf);
902 	kfree_skb(l->failover_reasm_skb);
903 	l->reasm_buf = NULL;
904 	l->failover_reasm_skb = NULL;
905 	l->rcv_unacked = 0;
906 	l->snd_nxt = 1;
907 	l->rcv_nxt = 1;
908 	l->snd_nxt_state = 1;
909 	l->rcv_nxt_state = 1;
910 	l->acked = 0;
911 	l->silent_intv_cnt = 0;
912 	l->rst_cnt = 0;
913 	l->stale_cnt = 0;
914 	l->bc_peer_is_up = false;
915 	memset(&l->mon_state, 0, sizeof(l->mon_state));
916 	tipc_link_reset_stats(l);
917 }
918 
919 /**
920  * tipc_link_xmit(): enqueue buffer list according to queue situation
921  * @link: link to use
922  * @list: chain of buffers containing message
923  * @xmitq: returned list of packets to be sent by caller
924  *
925  * Consumes the buffer chain.
926  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
927  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
928  */
929 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
930 		   struct sk_buff_head *xmitq)
931 {
932 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
933 	unsigned int maxwin = l->window;
934 	int imp = msg_importance(hdr);
935 	unsigned int mtu = l->mtu;
936 	u16 ack = l->rcv_nxt - 1;
937 	u16 seqno = l->snd_nxt;
938 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
939 	struct sk_buff_head *transmq = &l->transmq;
940 	struct sk_buff_head *backlogq = &l->backlogq;
941 	struct sk_buff *skb, *_skb, *bskb;
942 	int pkt_cnt = skb_queue_len(list);
943 	int rc = 0;
944 
945 	if (unlikely(msg_size(hdr) > mtu)) {
946 		skb_queue_purge(list);
947 		return -EMSGSIZE;
948 	}
949 
950 	/* Allow oversubscription of one data msg per source at congestion */
951 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
952 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
953 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
954 			return -ENOBUFS;
955 		}
956 		rc = link_schedule_user(l, hdr);
957 	}
958 
959 	if (pkt_cnt > 1) {
960 		l->stats.sent_fragmented++;
961 		l->stats.sent_fragments += pkt_cnt;
962 	}
963 
964 	/* Prepare each packet for sending, and add to relevant queue: */
965 	while (skb_queue_len(list)) {
966 		skb = skb_peek(list);
967 		hdr = buf_msg(skb);
968 		msg_set_seqno(hdr, seqno);
969 		msg_set_ack(hdr, ack);
970 		msg_set_bcast_ack(hdr, bc_ack);
971 
972 		if (likely(skb_queue_len(transmq) < maxwin)) {
973 			_skb = skb_clone(skb, GFP_ATOMIC);
974 			if (!_skb) {
975 				skb_queue_purge(list);
976 				return -ENOBUFS;
977 			}
978 			__skb_dequeue(list);
979 			__skb_queue_tail(transmq, skb);
980 			/* next retransmit attempt */
981 			if (link_is_bc_sndlink(l))
982 				TIPC_SKB_CB(skb)->nxt_retr =
983 					jiffies + TIPC_BC_RETR_LIM;
984 			__skb_queue_tail(xmitq, _skb);
985 			TIPC_SKB_CB(skb)->ackers = l->ackers;
986 			l->rcv_unacked = 0;
987 			l->stats.sent_pkts++;
988 			seqno++;
989 			continue;
990 		}
991 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
992 			kfree_skb(__skb_dequeue(list));
993 			l->stats.sent_bundled++;
994 			continue;
995 		}
996 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
997 			kfree_skb(__skb_dequeue(list));
998 			__skb_queue_tail(backlogq, bskb);
999 			l->backlog[msg_importance(buf_msg(bskb))].len++;
1000 			l->stats.sent_bundled++;
1001 			l->stats.sent_bundles++;
1002 			continue;
1003 		}
1004 		l->backlog[imp].len += skb_queue_len(list);
1005 		skb_queue_splice_tail_init(list, backlogq);
1006 	}
1007 	l->snd_nxt = seqno;
1008 	return rc;
1009 }
1010 
1011 static void tipc_link_advance_backlog(struct tipc_link *l,
1012 				      struct sk_buff_head *xmitq)
1013 {
1014 	struct sk_buff *skb, *_skb;
1015 	struct tipc_msg *hdr;
1016 	u16 seqno = l->snd_nxt;
1017 	u16 ack = l->rcv_nxt - 1;
1018 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1019 
1020 	while (skb_queue_len(&l->transmq) < l->window) {
1021 		skb = skb_peek(&l->backlogq);
1022 		if (!skb)
1023 			break;
1024 		_skb = skb_clone(skb, GFP_ATOMIC);
1025 		if (!_skb)
1026 			break;
1027 		__skb_dequeue(&l->backlogq);
1028 		hdr = buf_msg(skb);
1029 		l->backlog[msg_importance(hdr)].len--;
1030 		__skb_queue_tail(&l->transmq, skb);
1031 		/* next retransmit attempt */
1032 		if (link_is_bc_sndlink(l))
1033 			TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1034 
1035 		__skb_queue_tail(xmitq, _skb);
1036 		TIPC_SKB_CB(skb)->ackers = l->ackers;
1037 		msg_set_seqno(hdr, seqno);
1038 		msg_set_ack(hdr, ack);
1039 		msg_set_bcast_ack(hdr, bc_ack);
1040 		l->rcv_unacked = 0;
1041 		l->stats.sent_pkts++;
1042 		seqno++;
1043 	}
1044 	l->snd_nxt = seqno;
1045 }
1046 
1047 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
1048 {
1049 	struct tipc_msg *hdr = buf_msg(skb);
1050 
1051 	pr_warn("Retransmission failure on link <%s>\n", l->name);
1052 	link_print(l, "State of link ");
1053 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1054 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1055 	pr_info("sqno %u, prev: %x, src: %x\n",
1056 		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1057 }
1058 
1059 /* tipc_link_retrans() - retransmit one or more packets
1060  * @l: the link to transmit on
1061  * @r: the receiving link ordering the retransmit. Same as l if unicast
1062  * @from: retransmit from (inclusive) this sequence number
1063  * @to: retransmit to (inclusive) this sequence number
1064  * xmitq: queue for accumulating the retransmitted packets
1065  */
1066 static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1067 			     u16 from, u16 to, struct sk_buff_head *xmitq)
1068 {
1069 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1070 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1071 	u16 ack = l->rcv_nxt - 1;
1072 	struct tipc_msg *hdr;
1073 
1074 	if (!skb)
1075 		return 0;
1076 	if (less(to, from))
1077 		return 0;
1078 
1079 	trace_tipc_link_retrans(r, from, to, &l->transmq);
1080 	/* Detect repeated retransmit failures on same packet */
1081 	if (r->prev_from != from) {
1082 		r->prev_from = from;
1083 		r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1084 		r->stale_cnt = 0;
1085 	} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1086 		link_retransmit_failure(l, skb);
1087 		trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1088 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1089 		trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1090 		if (link_is_bc_sndlink(l))
1091 			return TIPC_LINK_DOWN_EVT;
1092 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1093 	}
1094 
1095 	skb_queue_walk(&l->transmq, skb) {
1096 		hdr = buf_msg(skb);
1097 		if (less(msg_seqno(hdr), from))
1098 			continue;
1099 		if (more(msg_seqno(hdr), to))
1100 			break;
1101 		if (link_is_bc_sndlink(l)) {
1102 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1103 				continue;
1104 			TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1105 		}
1106 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1107 		if (!_skb)
1108 			return 0;
1109 		hdr = buf_msg(_skb);
1110 		msg_set_ack(hdr, ack);
1111 		msg_set_bcast_ack(hdr, bc_ack);
1112 		_skb->priority = TC_PRIO_CONTROL;
1113 		__skb_queue_tail(xmitq, _skb);
1114 		l->stats.retransmitted++;
1115 	}
1116 	return 0;
1117 }
1118 
1119 /* tipc_data_input - deliver data and name distr msgs to upper layer
1120  *
1121  * Consumes buffer if message is of right type
1122  * Node lock must be held
1123  */
1124 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1125 			    struct sk_buff_head *inputq)
1126 {
1127 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1128 	struct tipc_msg *hdr = buf_msg(skb);
1129 
1130 	switch (msg_user(hdr)) {
1131 	case TIPC_LOW_IMPORTANCE:
1132 	case TIPC_MEDIUM_IMPORTANCE:
1133 	case TIPC_HIGH_IMPORTANCE:
1134 	case TIPC_CRITICAL_IMPORTANCE:
1135 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1136 			skb_queue_tail(mc_inputq, skb);
1137 			return true;
1138 		}
1139 		/* fall through */
1140 	case CONN_MANAGER:
1141 		skb_queue_tail(inputq, skb);
1142 		return true;
1143 	case GROUP_PROTOCOL:
1144 		skb_queue_tail(mc_inputq, skb);
1145 		return true;
1146 	case NAME_DISTRIBUTOR:
1147 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1148 		skb_queue_tail(l->namedq, skb);
1149 		return true;
1150 	case MSG_BUNDLER:
1151 	case TUNNEL_PROTOCOL:
1152 	case MSG_FRAGMENTER:
1153 	case BCAST_PROTOCOL:
1154 		return false;
1155 	default:
1156 		pr_warn("Dropping received illegal msg type\n");
1157 		kfree_skb(skb);
1158 		return true;
1159 	};
1160 }
1161 
1162 /* tipc_link_input - process packet that has passed link protocol check
1163  *
1164  * Consumes buffer
1165  */
1166 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1167 			   struct sk_buff_head *inputq,
1168 			   struct sk_buff **reasm_skb)
1169 {
1170 	struct tipc_msg *hdr = buf_msg(skb);
1171 	struct sk_buff *iskb;
1172 	struct sk_buff_head tmpq;
1173 	int usr = msg_user(hdr);
1174 	int pos = 0;
1175 
1176 	if (usr == MSG_BUNDLER) {
1177 		skb_queue_head_init(&tmpq);
1178 		l->stats.recv_bundles++;
1179 		l->stats.recv_bundled += msg_msgcnt(hdr);
1180 		while (tipc_msg_extract(skb, &iskb, &pos))
1181 			tipc_data_input(l, iskb, &tmpq);
1182 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1183 		return 0;
1184 	} else if (usr == MSG_FRAGMENTER) {
1185 		l->stats.recv_fragments++;
1186 		if (tipc_buf_append(reasm_skb, &skb)) {
1187 			l->stats.recv_fragmented++;
1188 			tipc_data_input(l, skb, inputq);
1189 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1190 			pr_warn_ratelimited("Unable to build fragment list\n");
1191 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1192 		}
1193 		return 0;
1194 	} else if (usr == BCAST_PROTOCOL) {
1195 		tipc_bcast_lock(l->net);
1196 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1197 		tipc_bcast_unlock(l->net);
1198 	}
1199 
1200 	kfree_skb(skb);
1201 	return 0;
1202 }
1203 
1204 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1205  *			 inner message along with the ones in the old link's
1206  *			 deferdq
1207  * @l: tunnel link
1208  * @skb: TUNNEL_PROTOCOL message
1209  * @inputq: queue to put messages ready for delivery
1210  */
1211 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1212 			     struct sk_buff_head *inputq)
1213 {
1214 	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1215 	struct sk_buff_head *fdefq = &l->failover_deferdq;
1216 	struct tipc_msg *hdr = buf_msg(skb);
1217 	struct sk_buff *iskb;
1218 	int ipos = 0;
1219 	int rc = 0;
1220 	u16 seqno;
1221 
1222 	/* SYNCH_MSG */
1223 	if (msg_type(hdr) == SYNCH_MSG)
1224 		goto drop;
1225 
1226 	/* FAILOVER_MSG */
1227 	if (!tipc_msg_extract(skb, &iskb, &ipos)) {
1228 		pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n",
1229 				    skb_queue_len(fdefq));
1230 		return rc;
1231 	}
1232 
1233 	do {
1234 		seqno = buf_seqno(iskb);
1235 
1236 		if (unlikely(less(seqno, l->drop_point))) {
1237 			kfree_skb(iskb);
1238 			continue;
1239 		}
1240 
1241 		if (unlikely(seqno != l->drop_point)) {
1242 			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1243 			continue;
1244 		}
1245 
1246 		l->drop_point++;
1247 
1248 		if (!tipc_data_input(l, iskb, inputq))
1249 			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1250 		if (unlikely(rc))
1251 			break;
1252 	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1253 
1254 drop:
1255 	kfree_skb(skb);
1256 	return rc;
1257 }
1258 
1259 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1260 {
1261 	bool released = false;
1262 	struct sk_buff *skb, *tmp;
1263 
1264 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1265 		if (more(buf_seqno(skb), acked))
1266 			break;
1267 		__skb_unlink(skb, &l->transmq);
1268 		kfree_skb(skb);
1269 		released = true;
1270 	}
1271 	return released;
1272 }
1273 
1274 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1275  * @l: tipc link that data have come with gaps in sequence if any
1276  * @data: data buffer to store the Gap ACK blocks after built
1277  *
1278  * returns the actual allocated memory size
1279  */
1280 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1281 {
1282 	struct sk_buff *skb = skb_peek(&l->deferdq);
1283 	struct tipc_gap_ack_blks *ga = data;
1284 	u16 len, expect, seqno = 0;
1285 	u8 n = 0;
1286 
1287 	if (!skb)
1288 		goto exit;
1289 
1290 	expect = buf_seqno(skb);
1291 	skb_queue_walk(&l->deferdq, skb) {
1292 		seqno = buf_seqno(skb);
1293 		if (unlikely(more(seqno, expect))) {
1294 			ga->gacks[n].ack = htons(expect - 1);
1295 			ga->gacks[n].gap = htons(seqno - expect);
1296 			if (++n >= MAX_GAP_ACK_BLKS) {
1297 				pr_info_ratelimited("Too few Gap ACK blocks!\n");
1298 				goto exit;
1299 			}
1300 		} else if (unlikely(less(seqno, expect))) {
1301 			pr_warn("Unexpected skb in deferdq!\n");
1302 			continue;
1303 		}
1304 		expect = seqno + 1;
1305 	}
1306 
1307 	/* last block */
1308 	ga->gacks[n].ack = htons(seqno);
1309 	ga->gacks[n].gap = 0;
1310 	n++;
1311 
1312 exit:
1313 	len = tipc_gap_ack_blks_sz(n);
1314 	ga->len = htons(len);
1315 	ga->gack_cnt = n;
1316 	return len;
1317 }
1318 
1319 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1320  *			       acked packets, also doing retransmissions if
1321  *			       gaps found
1322  * @l: tipc link with transmq queue to be advanced
1323  * @acked: seqno of last packet acked by peer without any gaps before
1324  * @gap: # of gap packets
1325  * @ga: buffer pointer to Gap ACK blocks from peer
1326  * @xmitq: queue for accumulating the retransmitted packets if any
1327  */
1328 static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1329 				      struct tipc_gap_ack_blks *ga,
1330 				      struct sk_buff_head *xmitq)
1331 {
1332 	struct sk_buff *skb, *_skb, *tmp;
1333 	struct tipc_msg *hdr;
1334 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1335 	u16 ack = l->rcv_nxt - 1;
1336 	u16 seqno;
1337 	u16 n = 0;
1338 
1339 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1340 		seqno = buf_seqno(skb);
1341 
1342 next_gap_ack:
1343 		if (less_eq(seqno, acked)) {
1344 			/* release skb */
1345 			__skb_unlink(skb, &l->transmq);
1346 			kfree_skb(skb);
1347 		} else if (less_eq(seqno, acked + gap)) {
1348 			/* retransmit skb */
1349 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1350 				continue;
1351 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1352 
1353 			_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1354 			if (!_skb)
1355 				continue;
1356 			hdr = buf_msg(_skb);
1357 			msg_set_ack(hdr, ack);
1358 			msg_set_bcast_ack(hdr, bc_ack);
1359 			_skb->priority = TC_PRIO_CONTROL;
1360 			__skb_queue_tail(xmitq, _skb);
1361 			l->stats.retransmitted++;
1362 		} else {
1363 			/* retry with Gap ACK blocks if any */
1364 			if (!ga || n >= ga->gack_cnt)
1365 				break;
1366 			acked = ntohs(ga->gacks[n].ack);
1367 			gap = ntohs(ga->gacks[n].gap);
1368 			n++;
1369 			goto next_gap_ack;
1370 		}
1371 	}
1372 }
1373 
1374 /* tipc_link_build_state_msg: prepare link state message for transmission
1375  *
1376  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1377  * risk of ack storms towards the sender
1378  */
1379 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1380 {
1381 	if (!l)
1382 		return 0;
1383 
1384 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1385 	if (link_is_bc_rcvlink(l)) {
1386 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1387 			return 0;
1388 		l->rcv_unacked = 0;
1389 
1390 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1391 		l->snd_nxt = l->rcv_nxt;
1392 		return TIPC_LINK_SND_STATE;
1393 	}
1394 
1395 	/* Unicast ACK */
1396 	l->rcv_unacked = 0;
1397 	l->stats.sent_acks++;
1398 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1399 	return 0;
1400 }
1401 
1402 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1403  */
1404 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1405 {
1406 	int mtyp = RESET_MSG;
1407 	struct sk_buff *skb;
1408 
1409 	if (l->state == LINK_ESTABLISHING)
1410 		mtyp = ACTIVATE_MSG;
1411 
1412 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1413 
1414 	/* Inform peer that this endpoint is going down if applicable */
1415 	skb = skb_peek_tail(xmitq);
1416 	if (skb && (l->state == LINK_RESET))
1417 		msg_set_peer_stopping(buf_msg(skb), 1);
1418 }
1419 
1420 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1421  * Note that sending of broadcast NACK is coordinated among nodes, to
1422  * reduce the risk of NACK storms towards the sender
1423  */
1424 static int tipc_link_build_nack_msg(struct tipc_link *l,
1425 				    struct sk_buff_head *xmitq)
1426 {
1427 	u32 def_cnt = ++l->stats.deferred_recv;
1428 	u32 defq_len = skb_queue_len(&l->deferdq);
1429 	int match1, match2;
1430 
1431 	if (link_is_bc_rcvlink(l)) {
1432 		match1 = def_cnt & 0xf;
1433 		match2 = tipc_own_addr(l->net) & 0xf;
1434 		if (match1 == match2)
1435 			return TIPC_LINK_SND_STATE;
1436 		return 0;
1437 	}
1438 
1439 	if (defq_len >= 3 && !((defq_len - 3) % 16))
1440 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1441 	return 0;
1442 }
1443 
1444 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1445  * @l: the link that should handle the message
1446  * @skb: TIPC packet
1447  * @xmitq: queue to place packets to be sent after this call
1448  */
1449 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1450 		  struct sk_buff_head *xmitq)
1451 {
1452 	struct sk_buff_head *defq = &l->deferdq;
1453 	struct tipc_msg *hdr = buf_msg(skb);
1454 	u16 seqno, rcv_nxt, win_lim;
1455 	int rc = 0;
1456 
1457 	/* Verify and update link state */
1458 	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1459 		return tipc_link_proto_rcv(l, skb, xmitq);
1460 
1461 	/* Don't send probe at next timeout expiration */
1462 	l->silent_intv_cnt = 0;
1463 
1464 	do {
1465 		hdr = buf_msg(skb);
1466 		seqno = msg_seqno(hdr);
1467 		rcv_nxt = l->rcv_nxt;
1468 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1469 
1470 		if (unlikely(!link_is_up(l))) {
1471 			if (l->state == LINK_ESTABLISHING)
1472 				rc = TIPC_LINK_UP_EVT;
1473 			goto drop;
1474 		}
1475 
1476 		/* Drop if outside receive window */
1477 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1478 			l->stats.duplicates++;
1479 			goto drop;
1480 		}
1481 
1482 		/* Forward queues and wake up waiting users */
1483 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1484 			l->stale_cnt = 0;
1485 			tipc_link_advance_backlog(l, xmitq);
1486 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1487 				link_prepare_wakeup(l);
1488 		}
1489 
1490 		/* Defer delivery if sequence gap */
1491 		if (unlikely(seqno != rcv_nxt)) {
1492 			__tipc_skb_queue_sorted(defq, seqno, skb);
1493 			rc |= tipc_link_build_nack_msg(l, xmitq);
1494 			break;
1495 		}
1496 
1497 		/* Deliver packet */
1498 		l->rcv_nxt++;
1499 		l->stats.recv_pkts++;
1500 
1501 		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1502 			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1503 		else if (!tipc_data_input(l, skb, l->inputq))
1504 			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1505 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1506 			rc |= tipc_link_build_state_msg(l, xmitq);
1507 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1508 			break;
1509 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1510 
1511 	return rc;
1512 drop:
1513 	kfree_skb(skb);
1514 	return rc;
1515 }
1516 
1517 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1518 				      bool probe_reply, u16 rcvgap,
1519 				      int tolerance, int priority,
1520 				      struct sk_buff_head *xmitq)
1521 {
1522 	struct tipc_link *bcl = l->bc_rcvlink;
1523 	struct sk_buff *skb;
1524 	struct tipc_msg *hdr;
1525 	struct sk_buff_head *dfq = &l->deferdq;
1526 	bool node_up = link_is_up(bcl);
1527 	struct tipc_mon_state *mstate = &l->mon_state;
1528 	int dlen = 0;
1529 	void *data;
1530 	u16 glen = 0;
1531 
1532 	/* Don't send protocol message during reset or link failover */
1533 	if (tipc_link_is_blocked(l))
1534 		return;
1535 
1536 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1537 		return;
1538 
1539 	if (!skb_queue_empty(dfq))
1540 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1541 
1542 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1543 			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1544 			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1545 	if (!skb)
1546 		return;
1547 
1548 	hdr = buf_msg(skb);
1549 	data = msg_data(hdr);
1550 	msg_set_session(hdr, l->session);
1551 	msg_set_bearer_id(hdr, l->bearer_id);
1552 	msg_set_net_plane(hdr, l->net_plane);
1553 	msg_set_next_sent(hdr, l->snd_nxt);
1554 	msg_set_ack(hdr, l->rcv_nxt - 1);
1555 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1556 	msg_set_bc_ack_invalid(hdr, !node_up);
1557 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1558 	msg_set_link_tolerance(hdr, tolerance);
1559 	msg_set_linkprio(hdr, priority);
1560 	msg_set_redundant_link(hdr, node_up);
1561 	msg_set_seq_gap(hdr, 0);
1562 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1563 
1564 	if (mtyp == STATE_MSG) {
1565 		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1566 			msg_set_seqno(hdr, l->snd_nxt_state++);
1567 		msg_set_seq_gap(hdr, rcvgap);
1568 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1569 		msg_set_probe(hdr, probe);
1570 		msg_set_is_keepalive(hdr, probe || probe_reply);
1571 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1572 			glen = tipc_build_gap_ack_blks(l, data);
1573 		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1574 		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1575 		skb_trim(skb, INT_H_SIZE + glen + dlen);
1576 		l->stats.sent_states++;
1577 		l->rcv_unacked = 0;
1578 	} else {
1579 		/* RESET_MSG or ACTIVATE_MSG */
1580 		if (mtyp == ACTIVATE_MSG) {
1581 			msg_set_dest_session_valid(hdr, 1);
1582 			msg_set_dest_session(hdr, l->peer_session);
1583 		}
1584 		msg_set_max_pkt(hdr, l->advertised_mtu);
1585 		strcpy(data, l->if_name);
1586 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1587 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1588 	}
1589 	if (probe)
1590 		l->stats.sent_probes++;
1591 	if (rcvgap)
1592 		l->stats.sent_nacks++;
1593 	skb->priority = TC_PRIO_CONTROL;
1594 	__skb_queue_tail(xmitq, skb);
1595 	trace_tipc_proto_build(skb, false, l->name);
1596 }
1597 
1598 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1599 				    struct sk_buff_head *xmitq)
1600 {
1601 	u32 onode = tipc_own_addr(l->net);
1602 	struct tipc_msg *hdr, *ihdr;
1603 	struct sk_buff_head tnlq;
1604 	struct sk_buff *skb;
1605 	u32 dnode = l->addr;
1606 
1607 	skb_queue_head_init(&tnlq);
1608 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1609 			      INT_H_SIZE, BASIC_H_SIZE,
1610 			      dnode, onode, 0, 0, 0);
1611 	if (!skb) {
1612 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1613 		return;
1614 	}
1615 
1616 	hdr = buf_msg(skb);
1617 	msg_set_msgcnt(hdr, 1);
1618 	msg_set_bearer_id(hdr, l->peer_bearer_id);
1619 
1620 	ihdr = (struct tipc_msg *)msg_data(hdr);
1621 	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1622 		      BASIC_H_SIZE, dnode);
1623 	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1624 	__skb_queue_tail(&tnlq, skb);
1625 	tipc_link_xmit(l, &tnlq, xmitq);
1626 }
1627 
1628 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1629  * with contents of the link's transmit and backlog queues.
1630  */
1631 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1632 			   int mtyp, struct sk_buff_head *xmitq)
1633 {
1634 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1635 	struct sk_buff *skb, *tnlskb;
1636 	struct tipc_msg *hdr, tnlhdr;
1637 	struct sk_buff_head *queue = &l->transmq;
1638 	struct sk_buff_head tmpxq, tnlq;
1639 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1640 
1641 	if (!tnl)
1642 		return;
1643 
1644 	skb_queue_head_init(&tnlq);
1645 	skb_queue_head_init(&tmpxq);
1646 
1647 	/* At least one packet required for safe algorithm => add dummy */
1648 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1649 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1650 			      0, 0, TIPC_ERR_NO_PORT);
1651 	if (!skb) {
1652 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1653 		return;
1654 	}
1655 	skb_queue_tail(&tnlq, skb);
1656 	tipc_link_xmit(l, &tnlq, &tmpxq);
1657 	__skb_queue_purge(&tmpxq);
1658 
1659 	/* Initialize reusable tunnel packet header */
1660 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1661 		      mtyp, INT_H_SIZE, l->addr);
1662 	if (mtyp == SYNCH_MSG)
1663 		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1664 	else
1665 		pktcnt = skb_queue_len(&l->transmq);
1666 	pktcnt += skb_queue_len(&l->backlogq);
1667 	msg_set_msgcnt(&tnlhdr, pktcnt);
1668 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1669 tnl:
1670 	/* Wrap each packet into a tunnel packet */
1671 	skb_queue_walk(queue, skb) {
1672 		hdr = buf_msg(skb);
1673 		if (queue == &l->backlogq)
1674 			msg_set_seqno(hdr, seqno++);
1675 		pktlen = msg_size(hdr);
1676 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1677 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1678 		if (!tnlskb) {
1679 			pr_warn("%sunable to send packet\n", link_co_err);
1680 			return;
1681 		}
1682 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1683 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1684 		__skb_queue_tail(&tnlq, tnlskb);
1685 	}
1686 	if (queue != &l->backlogq) {
1687 		queue = &l->backlogq;
1688 		goto tnl;
1689 	}
1690 
1691 	tipc_link_xmit(tnl, &tnlq, xmitq);
1692 
1693 	if (mtyp == FAILOVER_MSG) {
1694 		tnl->drop_point = l->rcv_nxt;
1695 		tnl->failover_reasm_skb = l->reasm_buf;
1696 		l->reasm_buf = NULL;
1697 
1698 		/* Failover the link's deferdq */
1699 		if (unlikely(!skb_queue_empty(fdefq))) {
1700 			pr_warn("Link failover deferdq not empty: %d!\n",
1701 				skb_queue_len(fdefq));
1702 			__skb_queue_purge(fdefq);
1703 		}
1704 		skb_queue_splice_init(&l->deferdq, fdefq);
1705 	}
1706 }
1707 
1708 /**
1709  * tipc_link_failover_prepare() - prepare tnl for link failover
1710  *
1711  * This is a special version of the precursor - tipc_link_tnl_prepare(),
1712  * see the tipc_node_link_failover() for details
1713  *
1714  * @l: failover link
1715  * @tnl: tunnel link
1716  * @xmitq: queue for messages to be xmited
1717  */
1718 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1719 				struct sk_buff_head *xmitq)
1720 {
1721 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1722 
1723 	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1724 
1725 	/* This failover link enpoint was never established before,
1726 	 * so it has not received anything from peer.
1727 	 * Otherwise, it must be a normal failover situation or the
1728 	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1729 	 * would have to start over from scratch instead.
1730 	 */
1731 	WARN_ON(l && tipc_link_is_up(l));
1732 	tnl->drop_point = 1;
1733 	tnl->failover_reasm_skb = NULL;
1734 
1735 	/* Initiate the link's failover deferdq */
1736 	if (unlikely(!skb_queue_empty(fdefq))) {
1737 		pr_warn("Link failover deferdq not empty: %d!\n",
1738 			skb_queue_len(fdefq));
1739 		__skb_queue_purge(fdefq);
1740 	}
1741 }
1742 
1743 /* tipc_link_validate_msg(): validate message against current link state
1744  * Returns true if message should be accepted, otherwise false
1745  */
1746 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1747 {
1748 	u16 curr_session = l->peer_session;
1749 	u16 session = msg_session(hdr);
1750 	int mtyp = msg_type(hdr);
1751 
1752 	if (msg_user(hdr) != LINK_PROTOCOL)
1753 		return true;
1754 
1755 	switch (mtyp) {
1756 	case RESET_MSG:
1757 		if (!l->in_session)
1758 			return true;
1759 		/* Accept only RESET with new session number */
1760 		return more(session, curr_session);
1761 	case ACTIVATE_MSG:
1762 		if (!l->in_session)
1763 			return true;
1764 		/* Accept only ACTIVATE with new or current session number */
1765 		return !less(session, curr_session);
1766 	case STATE_MSG:
1767 		/* Accept only STATE with current session number */
1768 		if (!l->in_session)
1769 			return false;
1770 		if (session != curr_session)
1771 			return false;
1772 		/* Extra sanity check */
1773 		if (!link_is_up(l) && msg_ack(hdr))
1774 			return false;
1775 		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1776 			return true;
1777 		/* Accept only STATE with new sequence number */
1778 		return !less(msg_seqno(hdr), l->rcv_nxt_state);
1779 	default:
1780 		return false;
1781 	}
1782 }
1783 
1784 /* tipc_link_proto_rcv(): receive link level protocol message :
1785  * Note that network plane id propagates through the network, and may
1786  * change at any time. The node with lowest numerical id determines
1787  * network plane
1788  */
1789 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1790 			       struct sk_buff_head *xmitq)
1791 {
1792 	struct tipc_msg *hdr = buf_msg(skb);
1793 	struct tipc_gap_ack_blks *ga = NULL;
1794 	u16 rcvgap = 0;
1795 	u16 ack = msg_ack(hdr);
1796 	u16 gap = msg_seq_gap(hdr);
1797 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1798 	u16 peers_tol = msg_link_tolerance(hdr);
1799 	u16 peers_prio = msg_linkprio(hdr);
1800 	u16 rcv_nxt = l->rcv_nxt;
1801 	u16 dlen = msg_data_sz(hdr);
1802 	int mtyp = msg_type(hdr);
1803 	bool reply = msg_probe(hdr);
1804 	u16 glen = 0;
1805 	void *data;
1806 	char *if_name;
1807 	int rc = 0;
1808 
1809 	trace_tipc_proto_rcv(skb, false, l->name);
1810 	if (tipc_link_is_blocked(l) || !xmitq)
1811 		goto exit;
1812 
1813 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1814 		l->net_plane = msg_net_plane(hdr);
1815 
1816 	skb_linearize(skb);
1817 	hdr = buf_msg(skb);
1818 	data = msg_data(hdr);
1819 
1820 	if (!tipc_link_validate_msg(l, hdr)) {
1821 		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1822 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1823 		goto exit;
1824 	}
1825 
1826 	switch (mtyp) {
1827 	case RESET_MSG:
1828 	case ACTIVATE_MSG:
1829 		/* Complete own link name with peer's interface name */
1830 		if_name =  strrchr(l->name, ':') + 1;
1831 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1832 			break;
1833 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1834 			break;
1835 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1836 
1837 		/* Update own tolerance if peer indicates a non-zero value */
1838 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1839 			l->tolerance = peers_tol;
1840 			l->bc_rcvlink->tolerance = peers_tol;
1841 		}
1842 		/* Update own priority if peer's priority is higher */
1843 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1844 			l->priority = peers_prio;
1845 
1846 		/* If peer is going down we want full re-establish cycle */
1847 		if (msg_peer_stopping(hdr)) {
1848 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1849 			break;
1850 		}
1851 
1852 		/* If this endpoint was re-created while peer was ESTABLISHING
1853 		 * it doesn't know current session number. Force re-synch.
1854 		 */
1855 		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1856 		    l->session != msg_dest_session(hdr)) {
1857 			if (less(l->session, msg_dest_session(hdr)))
1858 				l->session = msg_dest_session(hdr) + 1;
1859 			break;
1860 		}
1861 
1862 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1863 		if (mtyp == RESET_MSG || !link_is_up(l))
1864 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1865 
1866 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1867 		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1868 			rc = TIPC_LINK_UP_EVT;
1869 
1870 		l->peer_session = msg_session(hdr);
1871 		l->in_session = true;
1872 		l->peer_bearer_id = msg_bearer_id(hdr);
1873 		if (l->mtu > msg_max_pkt(hdr))
1874 			l->mtu = msg_max_pkt(hdr);
1875 		break;
1876 
1877 	case STATE_MSG:
1878 		l->rcv_nxt_state = msg_seqno(hdr) + 1;
1879 
1880 		/* Update own tolerance if peer indicates a non-zero value */
1881 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1882 			l->tolerance = peers_tol;
1883 			l->bc_rcvlink->tolerance = peers_tol;
1884 		}
1885 		/* Update own prio if peer indicates a different value */
1886 		if ((peers_prio != l->priority) &&
1887 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1888 			l->priority = peers_prio;
1889 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1890 		}
1891 
1892 		l->silent_intv_cnt = 0;
1893 		l->stats.recv_states++;
1894 		if (msg_probe(hdr))
1895 			l->stats.recv_probes++;
1896 
1897 		if (!link_is_up(l)) {
1898 			if (l->state == LINK_ESTABLISHING)
1899 				rc = TIPC_LINK_UP_EVT;
1900 			break;
1901 		}
1902 
1903 		/* Receive Gap ACK blocks from peer if any */
1904 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1905 			ga = (struct tipc_gap_ack_blks *)data;
1906 			glen = ntohs(ga->len);
1907 			/* sanity check: if failed, ignore Gap ACK blocks */
1908 			if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
1909 				ga = NULL;
1910 		}
1911 
1912 		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
1913 			     &l->mon_state, l->bearer_id);
1914 
1915 		/* Send NACK if peer has sent pkts we haven't received yet */
1916 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1917 			rcvgap = peers_snd_nxt - l->rcv_nxt;
1918 		if (rcvgap || reply)
1919 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1920 						  rcvgap, 0, 0, xmitq);
1921 
1922 		tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
1923 
1924 		/* If NACK, retransmit will now start at right position */
1925 		if (gap)
1926 			l->stats.recv_nacks++;
1927 
1928 		tipc_link_advance_backlog(l, xmitq);
1929 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1930 			link_prepare_wakeup(l);
1931 	}
1932 exit:
1933 	kfree_skb(skb);
1934 	return rc;
1935 }
1936 
1937 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1938  */
1939 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1940 					 u16 peers_snd_nxt,
1941 					 struct sk_buff_head *xmitq)
1942 {
1943 	struct sk_buff *skb;
1944 	struct tipc_msg *hdr;
1945 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1946 	u16 ack = l->rcv_nxt - 1;
1947 	u16 gap_to = peers_snd_nxt - 1;
1948 
1949 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1950 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1951 	if (!skb)
1952 		return false;
1953 	hdr = buf_msg(skb);
1954 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1955 	msg_set_bcast_ack(hdr, ack);
1956 	msg_set_bcgap_after(hdr, ack);
1957 	if (dfrd_skb)
1958 		gap_to = buf_seqno(dfrd_skb) - 1;
1959 	msg_set_bcgap_to(hdr, gap_to);
1960 	msg_set_non_seq(hdr, bcast);
1961 	__skb_queue_tail(xmitq, skb);
1962 	return true;
1963 }
1964 
1965 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1966  *
1967  * Give a newly added peer node the sequence number where it should
1968  * start receiving and acking broadcast packets.
1969  */
1970 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1971 					struct sk_buff_head *xmitq)
1972 {
1973 	struct sk_buff_head list;
1974 
1975 	__skb_queue_head_init(&list);
1976 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1977 		return;
1978 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1979 	tipc_link_xmit(l, &list, xmitq);
1980 }
1981 
1982 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1983  */
1984 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1985 {
1986 	int mtyp = msg_type(hdr);
1987 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1988 
1989 	if (link_is_up(l))
1990 		return;
1991 
1992 	if (msg_user(hdr) == BCAST_PROTOCOL) {
1993 		l->rcv_nxt = peers_snd_nxt;
1994 		l->state = LINK_ESTABLISHED;
1995 		return;
1996 	}
1997 
1998 	if (l->peer_caps & TIPC_BCAST_SYNCH)
1999 		return;
2000 
2001 	if (msg_peer_node_is_up(hdr))
2002 		return;
2003 
2004 	/* Compatibility: accept older, less safe initial synch data */
2005 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2006 		l->rcv_nxt = peers_snd_nxt;
2007 }
2008 
2009 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2010  */
2011 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2012 			  struct sk_buff_head *xmitq)
2013 {
2014 	struct tipc_link *snd_l = l->bc_sndlink;
2015 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2016 	u16 from = msg_bcast_ack(hdr) + 1;
2017 	u16 to = from + msg_bc_gap(hdr) - 1;
2018 	int rc = 0;
2019 
2020 	if (!link_is_up(l))
2021 		return rc;
2022 
2023 	if (!msg_peer_node_is_up(hdr))
2024 		return rc;
2025 
2026 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
2027 	if (msg_ack(hdr))
2028 		l->bc_peer_is_up = true;
2029 
2030 	if (!l->bc_peer_is_up)
2031 		return rc;
2032 
2033 	l->stats.recv_nacks++;
2034 
2035 	/* Ignore if peers_snd_nxt goes beyond receive window */
2036 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2037 		return rc;
2038 
2039 	rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
2040 
2041 	l->snd_nxt = peers_snd_nxt;
2042 	if (link_bc_rcv_gap(l))
2043 		rc |= TIPC_LINK_SND_STATE;
2044 
2045 	/* Return now if sender supports nack via STATE messages */
2046 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2047 		return rc;
2048 
2049 	/* Otherwise, be backwards compatible */
2050 
2051 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2052 		l->nack_state = BC_NACK_SND_CONDITIONAL;
2053 		return 0;
2054 	}
2055 
2056 	/* Don't NACK if one was recently sent or peeked */
2057 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2058 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2059 		return 0;
2060 	}
2061 
2062 	/* Conditionally delay NACK sending until next synch rcv */
2063 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2064 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2065 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2066 			return 0;
2067 	}
2068 
2069 	/* Send NACK now but suppress next one */
2070 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2071 	l->nack_state = BC_NACK_SND_SUPPRESS;
2072 	return 0;
2073 }
2074 
2075 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2076 			  struct sk_buff_head *xmitq)
2077 {
2078 	struct sk_buff *skb, *tmp;
2079 	struct tipc_link *snd_l = l->bc_sndlink;
2080 
2081 	if (!link_is_up(l) || !l->bc_peer_is_up)
2082 		return;
2083 
2084 	if (!more(acked, l->acked))
2085 		return;
2086 
2087 	trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
2088 	/* Skip over packets peer has already acked */
2089 	skb_queue_walk(&snd_l->transmq, skb) {
2090 		if (more(buf_seqno(skb), l->acked))
2091 			break;
2092 	}
2093 
2094 	/* Update/release the packets peer is acking now */
2095 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2096 		if (more(buf_seqno(skb), acked))
2097 			break;
2098 		if (!--TIPC_SKB_CB(skb)->ackers) {
2099 			__skb_unlink(skb, &snd_l->transmq);
2100 			kfree_skb(skb);
2101 		}
2102 	}
2103 	l->acked = acked;
2104 	tipc_link_advance_backlog(snd_l, xmitq);
2105 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2106 		link_prepare_wakeup(snd_l);
2107 }
2108 
2109 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2110  * This function is here for backwards compatibility, since
2111  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2112  */
2113 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2114 			  struct sk_buff_head *xmitq)
2115 {
2116 	struct tipc_msg *hdr = buf_msg(skb);
2117 	u32 dnode = msg_destnode(hdr);
2118 	int mtyp = msg_type(hdr);
2119 	u16 acked = msg_bcast_ack(hdr);
2120 	u16 from = acked + 1;
2121 	u16 to = msg_bcgap_to(hdr);
2122 	u16 peers_snd_nxt = to + 1;
2123 	int rc = 0;
2124 
2125 	kfree_skb(skb);
2126 
2127 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2128 		return 0;
2129 
2130 	if (mtyp != STATE_MSG)
2131 		return 0;
2132 
2133 	if (dnode == tipc_own_addr(l->net)) {
2134 		tipc_link_bc_ack_rcv(l, acked, xmitq);
2135 		rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
2136 		l->stats.recv_nacks++;
2137 		return rc;
2138 	}
2139 
2140 	/* Msg for other node => suppress own NACK at next sync if applicable */
2141 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2142 		l->nack_state = BC_NACK_SND_SUPPRESS;
2143 
2144 	return 0;
2145 }
2146 
2147 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
2148 {
2149 	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2150 
2151 	l->window = win;
2152 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
2153 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
2154 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
2155 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
2156 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2157 }
2158 
2159 /**
2160  * link_reset_stats - reset link statistics
2161  * @l: pointer to link
2162  */
2163 void tipc_link_reset_stats(struct tipc_link *l)
2164 {
2165 	memset(&l->stats, 0, sizeof(l->stats));
2166 }
2167 
2168 static void link_print(struct tipc_link *l, const char *str)
2169 {
2170 	struct sk_buff *hskb = skb_peek(&l->transmq);
2171 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2172 	u16 tail = l->snd_nxt - 1;
2173 
2174 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2175 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2176 		skb_queue_len(&l->transmq), head, tail,
2177 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2178 }
2179 
2180 /* Parse and validate nested (link) properties valid for media, bearer and link
2181  */
2182 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2183 {
2184 	int err;
2185 
2186 	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2187 					  tipc_nl_prop_policy, NULL);
2188 	if (err)
2189 		return err;
2190 
2191 	if (props[TIPC_NLA_PROP_PRIO]) {
2192 		u32 prio;
2193 
2194 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2195 		if (prio > TIPC_MAX_LINK_PRI)
2196 			return -EINVAL;
2197 	}
2198 
2199 	if (props[TIPC_NLA_PROP_TOL]) {
2200 		u32 tol;
2201 
2202 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2203 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2204 			return -EINVAL;
2205 	}
2206 
2207 	if (props[TIPC_NLA_PROP_WIN]) {
2208 		u32 win;
2209 
2210 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2211 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2212 			return -EINVAL;
2213 	}
2214 
2215 	return 0;
2216 }
2217 
2218 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2219 {
2220 	int i;
2221 	struct nlattr *stats;
2222 
2223 	struct nla_map {
2224 		u32 key;
2225 		u32 val;
2226 	};
2227 
2228 	struct nla_map map[] = {
2229 		{TIPC_NLA_STATS_RX_INFO, 0},
2230 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2231 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2232 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2233 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2234 		{TIPC_NLA_STATS_TX_INFO, 0},
2235 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2236 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2237 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2238 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2239 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2240 			s->msg_length_counts : 1},
2241 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2242 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2243 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2244 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2245 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2246 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2247 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2248 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2249 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2250 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2251 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2252 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2253 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2254 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2255 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2256 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2257 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2258 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2259 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2260 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2261 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2262 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2263 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2264 	};
2265 
2266 	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2267 	if (!stats)
2268 		return -EMSGSIZE;
2269 
2270 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2271 		if (nla_put_u32(skb, map[i].key, map[i].val))
2272 			goto msg_full;
2273 
2274 	nla_nest_end(skb, stats);
2275 
2276 	return 0;
2277 msg_full:
2278 	nla_nest_cancel(skb, stats);
2279 
2280 	return -EMSGSIZE;
2281 }
2282 
2283 /* Caller should hold appropriate locks to protect the link */
2284 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2285 		       struct tipc_link *link, int nlflags)
2286 {
2287 	u32 self = tipc_own_addr(net);
2288 	struct nlattr *attrs;
2289 	struct nlattr *prop;
2290 	void *hdr;
2291 	int err;
2292 
2293 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2294 			  nlflags, TIPC_NL_LINK_GET);
2295 	if (!hdr)
2296 		return -EMSGSIZE;
2297 
2298 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2299 	if (!attrs)
2300 		goto msg_full;
2301 
2302 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2303 		goto attr_msg_full;
2304 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2305 		goto attr_msg_full;
2306 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2307 		goto attr_msg_full;
2308 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2309 		goto attr_msg_full;
2310 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2311 		goto attr_msg_full;
2312 
2313 	if (tipc_link_is_up(link))
2314 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2315 			goto attr_msg_full;
2316 	if (link->active)
2317 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2318 			goto attr_msg_full;
2319 
2320 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2321 	if (!prop)
2322 		goto attr_msg_full;
2323 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2324 		goto prop_msg_full;
2325 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2326 		goto prop_msg_full;
2327 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2328 			link->window))
2329 		goto prop_msg_full;
2330 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2331 		goto prop_msg_full;
2332 	nla_nest_end(msg->skb, prop);
2333 
2334 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2335 	if (err)
2336 		goto attr_msg_full;
2337 
2338 	nla_nest_end(msg->skb, attrs);
2339 	genlmsg_end(msg->skb, hdr);
2340 
2341 	return 0;
2342 
2343 prop_msg_full:
2344 	nla_nest_cancel(msg->skb, prop);
2345 attr_msg_full:
2346 	nla_nest_cancel(msg->skb, attrs);
2347 msg_full:
2348 	genlmsg_cancel(msg->skb, hdr);
2349 
2350 	return -EMSGSIZE;
2351 }
2352 
2353 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2354 				      struct tipc_stats *stats)
2355 {
2356 	int i;
2357 	struct nlattr *nest;
2358 
2359 	struct nla_map {
2360 		__u32 key;
2361 		__u32 val;
2362 	};
2363 
2364 	struct nla_map map[] = {
2365 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2366 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2367 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2368 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2369 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2370 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2371 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2372 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2373 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2374 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2375 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2376 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2377 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2378 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2379 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2380 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2381 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2382 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2383 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2384 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2385 	};
2386 
2387 	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2388 	if (!nest)
2389 		return -EMSGSIZE;
2390 
2391 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2392 		if (nla_put_u32(skb, map[i].key, map[i].val))
2393 			goto msg_full;
2394 
2395 	nla_nest_end(skb, nest);
2396 
2397 	return 0;
2398 msg_full:
2399 	nla_nest_cancel(skb, nest);
2400 
2401 	return -EMSGSIZE;
2402 }
2403 
2404 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2405 {
2406 	int err;
2407 	void *hdr;
2408 	struct nlattr *attrs;
2409 	struct nlattr *prop;
2410 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2411 	u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2412 	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2413 	struct tipc_link *bcl = tn->bcl;
2414 
2415 	if (!bcl)
2416 		return 0;
2417 
2418 	tipc_bcast_lock(net);
2419 
2420 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2421 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2422 	if (!hdr) {
2423 		tipc_bcast_unlock(net);
2424 		return -EMSGSIZE;
2425 	}
2426 
2427 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2428 	if (!attrs)
2429 		goto msg_full;
2430 
2431 	/* The broadcast link is always up */
2432 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2433 		goto attr_msg_full;
2434 
2435 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2436 		goto attr_msg_full;
2437 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2438 		goto attr_msg_full;
2439 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2440 		goto attr_msg_full;
2441 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2442 		goto attr_msg_full;
2443 
2444 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2445 	if (!prop)
2446 		goto attr_msg_full;
2447 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2448 		goto prop_msg_full;
2449 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2450 		goto prop_msg_full;
2451 	if (bc_mode & BCLINK_MODE_SEL)
2452 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2453 				bc_ratio))
2454 			goto prop_msg_full;
2455 	nla_nest_end(msg->skb, prop);
2456 
2457 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2458 	if (err)
2459 		goto attr_msg_full;
2460 
2461 	tipc_bcast_unlock(net);
2462 	nla_nest_end(msg->skb, attrs);
2463 	genlmsg_end(msg->skb, hdr);
2464 
2465 	return 0;
2466 
2467 prop_msg_full:
2468 	nla_nest_cancel(msg->skb, prop);
2469 attr_msg_full:
2470 	nla_nest_cancel(msg->skb, attrs);
2471 msg_full:
2472 	tipc_bcast_unlock(net);
2473 	genlmsg_cancel(msg->skb, hdr);
2474 
2475 	return -EMSGSIZE;
2476 }
2477 
2478 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2479 			     struct sk_buff_head *xmitq)
2480 {
2481 	l->tolerance = tol;
2482 	if (l->bc_rcvlink)
2483 		l->bc_rcvlink->tolerance = tol;
2484 	if (link_is_up(l))
2485 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2486 }
2487 
2488 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2489 			struct sk_buff_head *xmitq)
2490 {
2491 	l->priority = prio;
2492 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2493 }
2494 
2495 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2496 {
2497 	l->abort_limit = limit;
2498 }
2499 
2500 char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2501 {
2502 	if (!l)
2503 		scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2504 	else if (link_is_bc_sndlink(l))
2505 		scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2506 	else if (link_is_bc_rcvlink(l))
2507 		scnprintf(buf, TIPC_MAX_LINK_NAME,
2508 			  "broadcast-receiver, peer %x", l->addr);
2509 	else
2510 		memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2511 
2512 	return buf;
2513 }
2514 
2515 /**
2516  * tipc_link_dump - dump TIPC link data
2517  * @l: tipc link to be dumped
2518  * @dqueues: bitmask to decide if any link queue to be dumped?
2519  *           - TIPC_DUMP_NONE: don't dump link queues
2520  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2521  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2522  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2523  *           - TIPC_DUMP_INPUTQ: dump link input queue
2524  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2525  *           - TIPC_DUMP_ALL: dump all the link queues above
2526  * @buf: returned buffer of dump data in format
2527  */
2528 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2529 {
2530 	int i = 0;
2531 	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2532 	struct sk_buff_head *list;
2533 	struct sk_buff *hskb, *tskb;
2534 	u32 len;
2535 
2536 	if (!l) {
2537 		i += scnprintf(buf, sz, "link data: (null)\n");
2538 		return i;
2539 	}
2540 
2541 	i += scnprintf(buf, sz, "link data: %x", l->addr);
2542 	i += scnprintf(buf + i, sz - i, " %x", l->state);
2543 	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2544 	i += scnprintf(buf + i, sz - i, " %u", l->session);
2545 	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2546 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2547 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2548 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2549 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2550 	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2551 	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2552 	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2553 	i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
2554 	i += scnprintf(buf + i, sz - i, " %u", l->stale_cnt);
2555 	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2556 
2557 	list = &l->transmq;
2558 	len = skb_queue_len(list);
2559 	hskb = skb_peek(list);
2560 	tskb = skb_peek_tail(list);
2561 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2562 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2563 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2564 
2565 	list = &l->deferdq;
2566 	len = skb_queue_len(list);
2567 	hskb = skb_peek(list);
2568 	tskb = skb_peek_tail(list);
2569 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2570 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2571 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2572 
2573 	list = &l->backlogq;
2574 	len = skb_queue_len(list);
2575 	hskb = skb_peek(list);
2576 	tskb = skb_peek_tail(list);
2577 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2578 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2579 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2580 
2581 	list = l->inputq;
2582 	len = skb_queue_len(list);
2583 	hskb = skb_peek(list);
2584 	tskb = skb_peek_tail(list);
2585 	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2586 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2587 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2588 
2589 	if (dqueues & TIPC_DUMP_TRANSMQ) {
2590 		i += scnprintf(buf + i, sz - i, "transmq: ");
2591 		i += tipc_list_dump(&l->transmq, false, buf + i);
2592 	}
2593 	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2594 		i += scnprintf(buf + i, sz - i,
2595 			       "backlogq: <%u %u %u %u %u>, ",
2596 			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2597 			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2598 			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2599 			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2600 			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2601 		i += tipc_list_dump(&l->backlogq, false, buf + i);
2602 	}
2603 	if (dqueues & TIPC_DUMP_DEFERDQ) {
2604 		i += scnprintf(buf + i, sz - i, "deferdq: ");
2605 		i += tipc_list_dump(&l->deferdq, false, buf + i);
2606 	}
2607 	if (dqueues & TIPC_DUMP_INPUTQ) {
2608 		i += scnprintf(buf + i, sz - i, "inputq: ");
2609 		i += tipc_list_dump(l->inputq, false, buf + i);
2610 	}
2611 	if (dqueues & TIPC_DUMP_WAKEUP) {
2612 		i += scnprintf(buf + i, sz - i, "wakeup: ");
2613 		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2614 	}
2615 
2616 	return i;
2617 }
2618