xref: /openbmc/linux/net/tipc/link.c (revision a1b2f04ea527397fcacacd09e0d690927feef429)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 #include "trace.h"
47 
48 #include <linux/pkt_sched.h>
49 
50 struct tipc_stats {
51 	u32 sent_pkts;
52 	u32 recv_pkts;
53 	u32 sent_states;
54 	u32 recv_states;
55 	u32 sent_probes;
56 	u32 recv_probes;
57 	u32 sent_nacks;
58 	u32 recv_nacks;
59 	u32 sent_acks;
60 	u32 sent_bundled;
61 	u32 sent_bundles;
62 	u32 recv_bundled;
63 	u32 recv_bundles;
64 	u32 retransmitted;
65 	u32 sent_fragmented;
66 	u32 sent_fragments;
67 	u32 recv_fragmented;
68 	u32 recv_fragments;
69 	u32 link_congs;		/* # port sends blocked by congestion */
70 	u32 deferred_recv;
71 	u32 duplicates;
72 	u32 max_queue_sz;	/* send queue size high water mark */
73 	u32 accu_queue_sz;	/* used for send queue size profiling */
74 	u32 queue_sz_counts;	/* used for send queue size profiling */
75 	u32 msg_length_counts;	/* used for message length profiling */
76 	u32 msg_lengths_total;	/* used for message length profiling */
77 	u32 msg_length_profile[7]; /* used for msg. length profiling */
78 };
79 
80 /**
81  * struct tipc_link - TIPC link data structure
82  * @addr: network address of link's peer node
83  * @name: link name character string
84  * @media_addr: media address to use when sending messages over link
85  * @timer: link timer
86  * @net: pointer to namespace struct
87  * @refcnt: reference counter for permanent references (owner node & timer)
88  * @peer_session: link session # being used by peer end of link
89  * @peer_bearer_id: bearer id used by link's peer endpoint
90  * @bearer_id: local bearer id used by link
91  * @tolerance: minimum link continuity loss needed to reset link [in ms]
92  * @abort_limit: # of unacknowledged continuity probes needed to reset link
93  * @state: current state of link FSM
94  * @peer_caps: bitmap describing capabilities of peer node
95  * @silent_intv_cnt: # of timer intervals without any reception from peer
96  * @proto_msg: template for control messages generated by link
97  * @pmsg: convenience pointer to "proto_msg" field
98  * @priority: current link priority
99  * @net_plane: current link network plane ('A' through 'H')
100  * @mon_state: cookie with information needed by link monitor
101  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102  * @exp_msg_count: # of tunnelled messages expected during link changeover
103  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104  * @mtu: current maximum packet size for this link
105  * @advertised_mtu: advertised own mtu when link is being established
106  * @transmitq: queue for sent, non-acked messages
107  * @backlogq: queue for messages waiting to be sent
108  * @snt_nxt: next sequence number to use for outbound messages
109  * @prev_from: sequence number of most previous retransmission request
110  * @stale_limit: time when repeated identical retransmits must force link reset
111  * @ackers: # of peers that needs to ack each packet before it can be released
112  * @acked: # last packet acked by a certain peer. Used for broadcast.
113  * @rcv_nxt: next sequence number to expect for inbound messages
114  * @deferred_queue: deferred queue saved OOS b'cast message received from node
115  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116  * @inputq: buffer queue for messages to be delivered upwards
117  * @namedq: buffer queue for name table messages to be delivered upwards
118  * @next_out: ptr to first unsent outbound message in queue
119  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121  * @reasm_buf: head of partially reassembled inbound message fragments
122  * @bc_rcvr: marks that this is a broadcast receiver link
123  * @stats: collects statistics regarding link activity
124  */
125 struct tipc_link {
126 	u32 addr;
127 	char name[TIPC_MAX_LINK_NAME];
128 	struct net *net;
129 
130 	/* Management and link supervision data */
131 	u16 peer_session;
132 	u16 session;
133 	u16 snd_nxt_state;
134 	u16 rcv_nxt_state;
135 	u32 peer_bearer_id;
136 	u32 bearer_id;
137 	u32 tolerance;
138 	u32 abort_limit;
139 	u32 state;
140 	u16 peer_caps;
141 	bool in_session;
142 	bool active;
143 	u32 silent_intv_cnt;
144 	char if_name[TIPC_MAX_IF_NAME];
145 	u32 priority;
146 	char net_plane;
147 	struct tipc_mon_state mon_state;
148 	u16 rst_cnt;
149 
150 	/* Failover/synch */
151 	u16 drop_point;
152 	struct sk_buff *failover_reasm_skb;
153 	struct sk_buff_head failover_deferdq;
154 
155 	/* Max packet negotiation */
156 	u16 mtu;
157 	u16 advertised_mtu;
158 
159 	/* Sending */
160 	struct sk_buff_head transmq;
161 	struct sk_buff_head backlogq;
162 	struct {
163 		u16 len;
164 		u16 limit;
165 	} backlog[5];
166 	u16 snd_nxt;
167 	u16 prev_from;
168 	u16 window;
169 	unsigned long stale_limit;
170 
171 	/* Reception */
172 	u16 rcv_nxt;
173 	u32 rcv_unacked;
174 	struct sk_buff_head deferdq;
175 	struct sk_buff_head *inputq;
176 	struct sk_buff_head *namedq;
177 
178 	/* Congestion handling */
179 	struct sk_buff_head wakeupq;
180 
181 	/* Fragmentation/reassembly */
182 	struct sk_buff *reasm_buf;
183 	struct sk_buff *reasm_tnlmsg;
184 
185 	/* Broadcast */
186 	u16 ackers;
187 	u16 acked;
188 	struct tipc_link *bc_rcvlink;
189 	struct tipc_link *bc_sndlink;
190 	u8 nack_state;
191 	bool bc_peer_is_up;
192 
193 	/* Statistics */
194 	struct tipc_stats stats;
195 };
196 
197 /*
198  * Error message prefixes
199  */
200 static const char *link_co_err = "Link tunneling error, ";
201 static const char *link_rst_msg = "Resetting link ";
202 
203 /* Send states for broadcast NACKs
204  */
205 enum {
206 	BC_NACK_SND_CONDITIONAL,
207 	BC_NACK_SND_UNCONDITIONAL,
208 	BC_NACK_SND_SUPPRESS,
209 };
210 
211 #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
212 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
213 
214 /*
215  * Interval between NACKs when packets arrive out of order
216  */
217 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
218 
219 /* Link FSM states:
220  */
221 enum {
222 	LINK_ESTABLISHED     = 0xe,
223 	LINK_ESTABLISHING    = 0xe  << 4,
224 	LINK_RESET           = 0x1  << 8,
225 	LINK_RESETTING       = 0x2  << 12,
226 	LINK_PEER_RESET      = 0xd  << 16,
227 	LINK_FAILINGOVER     = 0xf  << 20,
228 	LINK_SYNCHING        = 0xc  << 24
229 };
230 
231 /* Link FSM state checking routines
232  */
233 static int link_is_up(struct tipc_link *l)
234 {
235 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
236 }
237 
238 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
239 			       struct sk_buff_head *xmitq);
240 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
241 				      bool probe_reply, u16 rcvgap,
242 				      int tolerance, int priority,
243 				      struct sk_buff_head *xmitq);
244 static void link_print(struct tipc_link *l, const char *str);
245 static int tipc_link_build_nack_msg(struct tipc_link *l,
246 				    struct sk_buff_head *xmitq);
247 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
248 					struct sk_buff_head *xmitq);
249 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
250 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
251 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
252 				     struct tipc_gap_ack_blks *ga,
253 				     struct sk_buff_head *xmitq);
254 
255 /*
256  *  Simple non-static link routines (i.e. referenced outside this file)
257  */
258 bool tipc_link_is_up(struct tipc_link *l)
259 {
260 	return link_is_up(l);
261 }
262 
263 bool tipc_link_peer_is_down(struct tipc_link *l)
264 {
265 	return l->state == LINK_PEER_RESET;
266 }
267 
268 bool tipc_link_is_reset(struct tipc_link *l)
269 {
270 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
271 }
272 
273 bool tipc_link_is_establishing(struct tipc_link *l)
274 {
275 	return l->state == LINK_ESTABLISHING;
276 }
277 
278 bool tipc_link_is_synching(struct tipc_link *l)
279 {
280 	return l->state == LINK_SYNCHING;
281 }
282 
283 bool tipc_link_is_failingover(struct tipc_link *l)
284 {
285 	return l->state == LINK_FAILINGOVER;
286 }
287 
288 bool tipc_link_is_blocked(struct tipc_link *l)
289 {
290 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
291 }
292 
293 static bool link_is_bc_sndlink(struct tipc_link *l)
294 {
295 	return !l->bc_sndlink;
296 }
297 
298 static bool link_is_bc_rcvlink(struct tipc_link *l)
299 {
300 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
301 }
302 
303 void tipc_link_set_active(struct tipc_link *l, bool active)
304 {
305 	l->active = active;
306 }
307 
308 u32 tipc_link_id(struct tipc_link *l)
309 {
310 	return l->peer_bearer_id << 16 | l->bearer_id;
311 }
312 
313 int tipc_link_window(struct tipc_link *l)
314 {
315 	return l->window;
316 }
317 
318 int tipc_link_prio(struct tipc_link *l)
319 {
320 	return l->priority;
321 }
322 
323 unsigned long tipc_link_tolerance(struct tipc_link *l)
324 {
325 	return l->tolerance;
326 }
327 
328 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
329 {
330 	return l->inputq;
331 }
332 
333 char tipc_link_plane(struct tipc_link *l)
334 {
335 	return l->net_plane;
336 }
337 
338 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
339 {
340 	l->peer_caps = capabilities;
341 }
342 
343 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
344 			   struct tipc_link *uc_l,
345 			   struct sk_buff_head *xmitq)
346 {
347 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
348 
349 	snd_l->ackers++;
350 	rcv_l->acked = snd_l->snd_nxt - 1;
351 	snd_l->state = LINK_ESTABLISHED;
352 	tipc_link_build_bc_init_msg(uc_l, xmitq);
353 }
354 
355 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
356 			      struct tipc_link *rcv_l,
357 			      struct sk_buff_head *xmitq)
358 {
359 	u16 ack = snd_l->snd_nxt - 1;
360 
361 	snd_l->ackers--;
362 	rcv_l->bc_peer_is_up = true;
363 	rcv_l->state = LINK_ESTABLISHED;
364 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
365 	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
366 	tipc_link_reset(rcv_l);
367 	rcv_l->state = LINK_RESET;
368 	if (!snd_l->ackers) {
369 		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
370 		tipc_link_reset(snd_l);
371 		snd_l->state = LINK_RESET;
372 		__skb_queue_purge(xmitq);
373 	}
374 }
375 
376 int tipc_link_bc_peers(struct tipc_link *l)
377 {
378 	return l->ackers;
379 }
380 
381 static u16 link_bc_rcv_gap(struct tipc_link *l)
382 {
383 	struct sk_buff *skb = skb_peek(&l->deferdq);
384 	u16 gap = 0;
385 
386 	if (more(l->snd_nxt, l->rcv_nxt))
387 		gap = l->snd_nxt - l->rcv_nxt;
388 	if (skb)
389 		gap = buf_seqno(skb) - l->rcv_nxt;
390 	return gap;
391 }
392 
393 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
394 {
395 	l->mtu = mtu;
396 }
397 
398 int tipc_link_mtu(struct tipc_link *l)
399 {
400 	return l->mtu;
401 }
402 
403 u16 tipc_link_rcv_nxt(struct tipc_link *l)
404 {
405 	return l->rcv_nxt;
406 }
407 
408 u16 tipc_link_acked(struct tipc_link *l)
409 {
410 	return l->acked;
411 }
412 
413 char *tipc_link_name(struct tipc_link *l)
414 {
415 	return l->name;
416 }
417 
418 u32 tipc_link_state(struct tipc_link *l)
419 {
420 	return l->state;
421 }
422 
423 /**
424  * tipc_link_create - create a new link
425  * @n: pointer to associated node
426  * @if_name: associated interface name
427  * @bearer_id: id (index) of associated bearer
428  * @tolerance: link tolerance to be used by link
429  * @net_plane: network plane (A,B,c..) this link belongs to
430  * @mtu: mtu to be advertised by link
431  * @priority: priority to be used by link
432  * @window: send window to be used by link
433  * @session: session to be used by link
434  * @ownnode: identity of own node
435  * @peer: node id of peer node
436  * @peer_caps: bitmap describing peer node capabilities
437  * @bc_sndlink: the namespace global link used for broadcast sending
438  * @bc_rcvlink: the peer specific link used for broadcast reception
439  * @inputq: queue to put messages ready for delivery
440  * @namedq: queue to put binding table update messages ready for delivery
441  * @link: return value, pointer to put the created link
442  *
443  * Returns true if link was created, otherwise false
444  */
445 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
446 		      int tolerance, char net_plane, u32 mtu, int priority,
447 		      int window, u32 session, u32 self,
448 		      u32 peer, u8 *peer_id, u16 peer_caps,
449 		      struct tipc_link *bc_sndlink,
450 		      struct tipc_link *bc_rcvlink,
451 		      struct sk_buff_head *inputq,
452 		      struct sk_buff_head *namedq,
453 		      struct tipc_link **link)
454 {
455 	char peer_str[NODE_ID_STR_LEN] = {0,};
456 	char self_str[NODE_ID_STR_LEN] = {0,};
457 	struct tipc_link *l;
458 
459 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
460 	if (!l)
461 		return false;
462 	*link = l;
463 	l->session = session;
464 
465 	/* Set link name for unicast links only */
466 	if (peer_id) {
467 		tipc_nodeid2string(self_str, tipc_own_id(net));
468 		if (strlen(self_str) > 16)
469 			sprintf(self_str, "%x", self);
470 		tipc_nodeid2string(peer_str, peer_id);
471 		if (strlen(peer_str) > 16)
472 			sprintf(peer_str, "%x", peer);
473 	}
474 	/* Peer i/f name will be completed by reset/activate message */
475 	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
476 		 self_str, if_name, peer_str);
477 
478 	strcpy(l->if_name, if_name);
479 	l->addr = peer;
480 	l->peer_caps = peer_caps;
481 	l->net = net;
482 	l->in_session = false;
483 	l->bearer_id = bearer_id;
484 	l->tolerance = tolerance;
485 	if (bc_rcvlink)
486 		bc_rcvlink->tolerance = tolerance;
487 	l->net_plane = net_plane;
488 	l->advertised_mtu = mtu;
489 	l->mtu = mtu;
490 	l->priority = priority;
491 	tipc_link_set_queue_limits(l, window);
492 	l->ackers = 1;
493 	l->bc_sndlink = bc_sndlink;
494 	l->bc_rcvlink = bc_rcvlink;
495 	l->inputq = inputq;
496 	l->namedq = namedq;
497 	l->state = LINK_RESETTING;
498 	__skb_queue_head_init(&l->transmq);
499 	__skb_queue_head_init(&l->backlogq);
500 	__skb_queue_head_init(&l->deferdq);
501 	__skb_queue_head_init(&l->failover_deferdq);
502 	skb_queue_head_init(&l->wakeupq);
503 	skb_queue_head_init(l->inputq);
504 	return true;
505 }
506 
507 /**
508  * tipc_link_bc_create - create new link to be used for broadcast
509  * @n: pointer to associated node
510  * @mtu: mtu to be used initially if no peers
511  * @window: send window to be used
512  * @inputq: queue to put messages ready for delivery
513  * @namedq: queue to put binding table update messages ready for delivery
514  * @link: return value, pointer to put the created link
515  *
516  * Returns true if link was created, otherwise false
517  */
518 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
519 			 int mtu, int window, u16 peer_caps,
520 			 struct sk_buff_head *inputq,
521 			 struct sk_buff_head *namedq,
522 			 struct tipc_link *bc_sndlink,
523 			 struct tipc_link **link)
524 {
525 	struct tipc_link *l;
526 
527 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
528 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
529 			      NULL, inputq, namedq, link))
530 		return false;
531 
532 	l = *link;
533 	strcpy(l->name, tipc_bclink_name);
534 	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
535 	tipc_link_reset(l);
536 	l->state = LINK_RESET;
537 	l->ackers = 0;
538 	l->bc_rcvlink = l;
539 
540 	/* Broadcast send link is always up */
541 	if (link_is_bc_sndlink(l))
542 		l->state = LINK_ESTABLISHED;
543 
544 	/* Disable replicast if even a single peer doesn't support it */
545 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
546 		tipc_bcast_disable_rcast(net);
547 
548 	return true;
549 }
550 
551 /**
552  * tipc_link_fsm_evt - link finite state machine
553  * @l: pointer to link
554  * @evt: state machine event to be processed
555  */
556 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
557 {
558 	int rc = 0;
559 	int old_state = l->state;
560 
561 	switch (l->state) {
562 	case LINK_RESETTING:
563 		switch (evt) {
564 		case LINK_PEER_RESET_EVT:
565 			l->state = LINK_PEER_RESET;
566 			break;
567 		case LINK_RESET_EVT:
568 			l->state = LINK_RESET;
569 			break;
570 		case LINK_FAILURE_EVT:
571 		case LINK_FAILOVER_BEGIN_EVT:
572 		case LINK_ESTABLISH_EVT:
573 		case LINK_FAILOVER_END_EVT:
574 		case LINK_SYNCH_BEGIN_EVT:
575 		case LINK_SYNCH_END_EVT:
576 		default:
577 			goto illegal_evt;
578 		}
579 		break;
580 	case LINK_RESET:
581 		switch (evt) {
582 		case LINK_PEER_RESET_EVT:
583 			l->state = LINK_ESTABLISHING;
584 			break;
585 		case LINK_FAILOVER_BEGIN_EVT:
586 			l->state = LINK_FAILINGOVER;
587 		case LINK_FAILURE_EVT:
588 		case LINK_RESET_EVT:
589 		case LINK_ESTABLISH_EVT:
590 		case LINK_FAILOVER_END_EVT:
591 			break;
592 		case LINK_SYNCH_BEGIN_EVT:
593 		case LINK_SYNCH_END_EVT:
594 		default:
595 			goto illegal_evt;
596 		}
597 		break;
598 	case LINK_PEER_RESET:
599 		switch (evt) {
600 		case LINK_RESET_EVT:
601 			l->state = LINK_ESTABLISHING;
602 			break;
603 		case LINK_PEER_RESET_EVT:
604 		case LINK_ESTABLISH_EVT:
605 		case LINK_FAILURE_EVT:
606 			break;
607 		case LINK_SYNCH_BEGIN_EVT:
608 		case LINK_SYNCH_END_EVT:
609 		case LINK_FAILOVER_BEGIN_EVT:
610 		case LINK_FAILOVER_END_EVT:
611 		default:
612 			goto illegal_evt;
613 		}
614 		break;
615 	case LINK_FAILINGOVER:
616 		switch (evt) {
617 		case LINK_FAILOVER_END_EVT:
618 			l->state = LINK_RESET;
619 			break;
620 		case LINK_PEER_RESET_EVT:
621 		case LINK_RESET_EVT:
622 		case LINK_ESTABLISH_EVT:
623 		case LINK_FAILURE_EVT:
624 			break;
625 		case LINK_FAILOVER_BEGIN_EVT:
626 		case LINK_SYNCH_BEGIN_EVT:
627 		case LINK_SYNCH_END_EVT:
628 		default:
629 			goto illegal_evt;
630 		}
631 		break;
632 	case LINK_ESTABLISHING:
633 		switch (evt) {
634 		case LINK_ESTABLISH_EVT:
635 			l->state = LINK_ESTABLISHED;
636 			break;
637 		case LINK_FAILOVER_BEGIN_EVT:
638 			l->state = LINK_FAILINGOVER;
639 			break;
640 		case LINK_RESET_EVT:
641 			l->state = LINK_RESET;
642 			break;
643 		case LINK_FAILURE_EVT:
644 		case LINK_PEER_RESET_EVT:
645 		case LINK_SYNCH_BEGIN_EVT:
646 		case LINK_FAILOVER_END_EVT:
647 			break;
648 		case LINK_SYNCH_END_EVT:
649 		default:
650 			goto illegal_evt;
651 		}
652 		break;
653 	case LINK_ESTABLISHED:
654 		switch (evt) {
655 		case LINK_PEER_RESET_EVT:
656 			l->state = LINK_PEER_RESET;
657 			rc |= TIPC_LINK_DOWN_EVT;
658 			break;
659 		case LINK_FAILURE_EVT:
660 			l->state = LINK_RESETTING;
661 			rc |= TIPC_LINK_DOWN_EVT;
662 			break;
663 		case LINK_RESET_EVT:
664 			l->state = LINK_RESET;
665 			break;
666 		case LINK_ESTABLISH_EVT:
667 		case LINK_SYNCH_END_EVT:
668 			break;
669 		case LINK_SYNCH_BEGIN_EVT:
670 			l->state = LINK_SYNCHING;
671 			break;
672 		case LINK_FAILOVER_BEGIN_EVT:
673 		case LINK_FAILOVER_END_EVT:
674 		default:
675 			goto illegal_evt;
676 		}
677 		break;
678 	case LINK_SYNCHING:
679 		switch (evt) {
680 		case LINK_PEER_RESET_EVT:
681 			l->state = LINK_PEER_RESET;
682 			rc |= TIPC_LINK_DOWN_EVT;
683 			break;
684 		case LINK_FAILURE_EVT:
685 			l->state = LINK_RESETTING;
686 			rc |= TIPC_LINK_DOWN_EVT;
687 			break;
688 		case LINK_RESET_EVT:
689 			l->state = LINK_RESET;
690 			break;
691 		case LINK_ESTABLISH_EVT:
692 		case LINK_SYNCH_BEGIN_EVT:
693 			break;
694 		case LINK_SYNCH_END_EVT:
695 			l->state = LINK_ESTABLISHED;
696 			break;
697 		case LINK_FAILOVER_BEGIN_EVT:
698 		case LINK_FAILOVER_END_EVT:
699 		default:
700 			goto illegal_evt;
701 		}
702 		break;
703 	default:
704 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
705 	}
706 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
707 	return rc;
708 illegal_evt:
709 	pr_err("Illegal FSM event %x in state %x on link %s\n",
710 	       evt, l->state, l->name);
711 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
712 	return rc;
713 }
714 
715 /* link_profile_stats - update statistical profiling of traffic
716  */
717 static void link_profile_stats(struct tipc_link *l)
718 {
719 	struct sk_buff *skb;
720 	struct tipc_msg *msg;
721 	int length;
722 
723 	/* Update counters used in statistical profiling of send traffic */
724 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
725 	l->stats.queue_sz_counts++;
726 
727 	skb = skb_peek(&l->transmq);
728 	if (!skb)
729 		return;
730 	msg = buf_msg(skb);
731 	length = msg_size(msg);
732 
733 	if (msg_user(msg) == MSG_FRAGMENTER) {
734 		if (msg_type(msg) != FIRST_FRAGMENT)
735 			return;
736 		length = msg_size(msg_inner_hdr(msg));
737 	}
738 	l->stats.msg_lengths_total += length;
739 	l->stats.msg_length_counts++;
740 	if (length <= 64)
741 		l->stats.msg_length_profile[0]++;
742 	else if (length <= 256)
743 		l->stats.msg_length_profile[1]++;
744 	else if (length <= 1024)
745 		l->stats.msg_length_profile[2]++;
746 	else if (length <= 4096)
747 		l->stats.msg_length_profile[3]++;
748 	else if (length <= 16384)
749 		l->stats.msg_length_profile[4]++;
750 	else if (length <= 32768)
751 		l->stats.msg_length_profile[5]++;
752 	else
753 		l->stats.msg_length_profile[6]++;
754 }
755 
756 /**
757  * tipc_link_too_silent - check if link is "too silent"
758  * @l: tipc link to be checked
759  *
760  * Returns true if the link 'silent_intv_cnt' is about to reach the
761  * 'abort_limit' value, otherwise false
762  */
763 bool tipc_link_too_silent(struct tipc_link *l)
764 {
765 	return (l->silent_intv_cnt + 2 > l->abort_limit);
766 }
767 
768 /* tipc_link_timeout - perform periodic task as instructed from node timeout
769  */
770 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
771 {
772 	int mtyp = 0;
773 	int rc = 0;
774 	bool state = false;
775 	bool probe = false;
776 	bool setup = false;
777 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
778 	u16 bc_acked = l->bc_rcvlink->acked;
779 	struct tipc_mon_state *mstate = &l->mon_state;
780 
781 	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
782 	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
783 	switch (l->state) {
784 	case LINK_ESTABLISHED:
785 	case LINK_SYNCHING:
786 		mtyp = STATE_MSG;
787 		link_profile_stats(l);
788 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
789 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
790 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
791 		state = bc_acked != bc_snt;
792 		state |= l->bc_rcvlink->rcv_unacked;
793 		state |= l->rcv_unacked;
794 		state |= !skb_queue_empty(&l->transmq);
795 		state |= !skb_queue_empty(&l->deferdq);
796 		probe = mstate->probing;
797 		probe |= l->silent_intv_cnt;
798 		if (probe || mstate->monitoring)
799 			l->silent_intv_cnt++;
800 		break;
801 	case LINK_RESET:
802 		setup = l->rst_cnt++ <= 4;
803 		setup |= !(l->rst_cnt % 16);
804 		mtyp = RESET_MSG;
805 		break;
806 	case LINK_ESTABLISHING:
807 		setup = true;
808 		mtyp = ACTIVATE_MSG;
809 		break;
810 	case LINK_PEER_RESET:
811 	case LINK_RESETTING:
812 	case LINK_FAILINGOVER:
813 		break;
814 	default:
815 		break;
816 	}
817 
818 	if (state || probe || setup)
819 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
820 
821 	return rc;
822 }
823 
824 /**
825  * link_schedule_user - schedule a message sender for wakeup after congestion
826  * @l: congested link
827  * @hdr: header of message that is being sent
828  * Create pseudo msg to send back to user when congestion abates
829  */
830 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
831 {
832 	u32 dnode = tipc_own_addr(l->net);
833 	u32 dport = msg_origport(hdr);
834 	struct sk_buff *skb;
835 
836 	/* Create and schedule wakeup pseudo message */
837 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
838 			      dnode, l->addr, dport, 0, 0);
839 	if (!skb)
840 		return -ENOBUFS;
841 	msg_set_dest_droppable(buf_msg(skb), true);
842 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
843 	skb_queue_tail(&l->wakeupq, skb);
844 	l->stats.link_congs++;
845 	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
846 	return -ELINKCONG;
847 }
848 
849 /**
850  * link_prepare_wakeup - prepare users for wakeup after congestion
851  * @l: congested link
852  * Wake up a number of waiting users, as permitted by available space
853  * in the send queue
854  */
855 static void link_prepare_wakeup(struct tipc_link *l)
856 {
857 	struct sk_buff_head *wakeupq = &l->wakeupq;
858 	struct sk_buff_head *inputq = l->inputq;
859 	struct sk_buff *skb, *tmp;
860 	struct sk_buff_head tmpq;
861 	int avail[5] = {0,};
862 	int imp = 0;
863 
864 	__skb_queue_head_init(&tmpq);
865 
866 	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
867 		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
868 
869 	skb_queue_walk_safe(wakeupq, skb, tmp) {
870 		imp = TIPC_SKB_CB(skb)->chain_imp;
871 		if (avail[imp] <= 0)
872 			continue;
873 		avail[imp]--;
874 		__skb_unlink(skb, wakeupq);
875 		__skb_queue_tail(&tmpq, skb);
876 	}
877 
878 	spin_lock_bh(&inputq->lock);
879 	skb_queue_splice_tail(&tmpq, inputq);
880 	spin_unlock_bh(&inputq->lock);
881 
882 }
883 
884 void tipc_link_reset(struct tipc_link *l)
885 {
886 	struct sk_buff_head list;
887 
888 	__skb_queue_head_init(&list);
889 
890 	l->in_session = false;
891 	/* Force re-synch of peer session number before establishing */
892 	l->peer_session--;
893 	l->session++;
894 	l->mtu = l->advertised_mtu;
895 
896 	spin_lock_bh(&l->wakeupq.lock);
897 	skb_queue_splice_init(&l->wakeupq, &list);
898 	spin_unlock_bh(&l->wakeupq.lock);
899 
900 	spin_lock_bh(&l->inputq->lock);
901 	skb_queue_splice_init(&list, l->inputq);
902 	spin_unlock_bh(&l->inputq->lock);
903 
904 	__skb_queue_purge(&l->transmq);
905 	__skb_queue_purge(&l->deferdq);
906 	__skb_queue_purge(&l->backlogq);
907 	__skb_queue_purge(&l->failover_deferdq);
908 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
909 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
910 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
911 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
912 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
913 	kfree_skb(l->reasm_buf);
914 	kfree_skb(l->reasm_tnlmsg);
915 	kfree_skb(l->failover_reasm_skb);
916 	l->reasm_buf = NULL;
917 	l->reasm_tnlmsg = NULL;
918 	l->failover_reasm_skb = NULL;
919 	l->rcv_unacked = 0;
920 	l->snd_nxt = 1;
921 	l->rcv_nxt = 1;
922 	l->snd_nxt_state = 1;
923 	l->rcv_nxt_state = 1;
924 	l->acked = 0;
925 	l->silent_intv_cnt = 0;
926 	l->rst_cnt = 0;
927 	l->bc_peer_is_up = false;
928 	memset(&l->mon_state, 0, sizeof(l->mon_state));
929 	tipc_link_reset_stats(l);
930 }
931 
932 /**
933  * tipc_link_xmit(): enqueue buffer list according to queue situation
934  * @link: link to use
935  * @list: chain of buffers containing message
936  * @xmitq: returned list of packets to be sent by caller
937  *
938  * Consumes the buffer chain.
939  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
940  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
941  */
942 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
943 		   struct sk_buff_head *xmitq)
944 {
945 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
946 	unsigned int maxwin = l->window;
947 	int imp = msg_importance(hdr);
948 	unsigned int mtu = l->mtu;
949 	u16 ack = l->rcv_nxt - 1;
950 	u16 seqno = l->snd_nxt;
951 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
952 	struct sk_buff_head *transmq = &l->transmq;
953 	struct sk_buff_head *backlogq = &l->backlogq;
954 	struct sk_buff *skb, *_skb, *bskb;
955 	int pkt_cnt = skb_queue_len(list);
956 	int rc = 0;
957 
958 	if (unlikely(msg_size(hdr) > mtu)) {
959 		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
960 			skb_queue_len(list), msg_user(hdr),
961 			msg_type(hdr), msg_size(hdr), mtu);
962 		skb_queue_purge(list);
963 		return -EMSGSIZE;
964 	}
965 
966 	/* Allow oversubscription of one data msg per source at congestion */
967 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
968 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
969 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
970 			return -ENOBUFS;
971 		}
972 		rc = link_schedule_user(l, hdr);
973 	}
974 
975 	if (pkt_cnt > 1) {
976 		l->stats.sent_fragmented++;
977 		l->stats.sent_fragments += pkt_cnt;
978 	}
979 
980 	/* Prepare each packet for sending, and add to relevant queue: */
981 	while (skb_queue_len(list)) {
982 		skb = skb_peek(list);
983 		hdr = buf_msg(skb);
984 		msg_set_seqno(hdr, seqno);
985 		msg_set_ack(hdr, ack);
986 		msg_set_bcast_ack(hdr, bc_ack);
987 
988 		if (likely(skb_queue_len(transmq) < maxwin)) {
989 			_skb = skb_clone(skb, GFP_ATOMIC);
990 			if (!_skb) {
991 				skb_queue_purge(list);
992 				return -ENOBUFS;
993 			}
994 			__skb_dequeue(list);
995 			__skb_queue_tail(transmq, skb);
996 			/* next retransmit attempt */
997 			if (link_is_bc_sndlink(l))
998 				TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
999 			__skb_queue_tail(xmitq, _skb);
1000 			TIPC_SKB_CB(skb)->ackers = l->ackers;
1001 			l->rcv_unacked = 0;
1002 			l->stats.sent_pkts++;
1003 			seqno++;
1004 			continue;
1005 		}
1006 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
1007 			kfree_skb(__skb_dequeue(list));
1008 			l->stats.sent_bundled++;
1009 			continue;
1010 		}
1011 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
1012 			kfree_skb(__skb_dequeue(list));
1013 			__skb_queue_tail(backlogq, bskb);
1014 			l->backlog[msg_importance(buf_msg(bskb))].len++;
1015 			l->stats.sent_bundled++;
1016 			l->stats.sent_bundles++;
1017 			continue;
1018 		}
1019 		l->backlog[imp].len += skb_queue_len(list);
1020 		skb_queue_splice_tail_init(list, backlogq);
1021 	}
1022 	l->snd_nxt = seqno;
1023 	return rc;
1024 }
1025 
1026 static void tipc_link_advance_backlog(struct tipc_link *l,
1027 				      struct sk_buff_head *xmitq)
1028 {
1029 	struct sk_buff *skb, *_skb;
1030 	struct tipc_msg *hdr;
1031 	u16 seqno = l->snd_nxt;
1032 	u16 ack = l->rcv_nxt - 1;
1033 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1034 
1035 	while (skb_queue_len(&l->transmq) < l->window) {
1036 		skb = skb_peek(&l->backlogq);
1037 		if (!skb)
1038 			break;
1039 		_skb = skb_clone(skb, GFP_ATOMIC);
1040 		if (!_skb)
1041 			break;
1042 		__skb_dequeue(&l->backlogq);
1043 		hdr = buf_msg(skb);
1044 		l->backlog[msg_importance(hdr)].len--;
1045 		__skb_queue_tail(&l->transmq, skb);
1046 		/* next retransmit attempt */
1047 		if (link_is_bc_sndlink(l))
1048 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1049 
1050 		__skb_queue_tail(xmitq, _skb);
1051 		TIPC_SKB_CB(skb)->ackers = l->ackers;
1052 		msg_set_seqno(hdr, seqno);
1053 		msg_set_ack(hdr, ack);
1054 		msg_set_bcast_ack(hdr, bc_ack);
1055 		l->rcv_unacked = 0;
1056 		l->stats.sent_pkts++;
1057 		seqno++;
1058 	}
1059 	l->snd_nxt = seqno;
1060 }
1061 
1062 /**
1063  * link_retransmit_failure() - Detect repeated retransmit failures
1064  * @l: tipc link sender
1065  * @r: tipc link receiver (= l in case of unicast)
1066  * @from: seqno of the 1st packet in retransmit request
1067  * @rc: returned code
1068  *
1069  * Return: true if the repeated retransmit failures happens, otherwise
1070  * false
1071  */
1072 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1073 				    u16 from, int *rc)
1074 {
1075 	struct sk_buff *skb = skb_peek(&l->transmq);
1076 	struct tipc_msg *hdr;
1077 
1078 	if (!skb)
1079 		return false;
1080 	hdr = buf_msg(skb);
1081 
1082 	/* Detect repeated retransmit failures on same packet */
1083 	if (r->prev_from != from) {
1084 		r->prev_from = from;
1085 		r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1086 	} else if (time_after(jiffies, r->stale_limit)) {
1087 		pr_warn("Retransmission failure on link <%s>\n", l->name);
1088 		link_print(l, "State of link ");
1089 		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1090 			msg_user(hdr), msg_type(hdr), msg_size(hdr),
1091 			msg_errcode(hdr));
1092 		pr_info("sqno %u, prev: %x, src: %x\n",
1093 			msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1094 
1095 		trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1096 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1097 		trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1098 
1099 		if (link_is_bc_sndlink(l))
1100 			*rc = TIPC_LINK_DOWN_EVT;
1101 
1102 		*rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1103 		return true;
1104 	}
1105 
1106 	return false;
1107 }
1108 
1109 /* tipc_link_bc_retrans() - retransmit zero or more packets
1110  * @l: the link to transmit on
1111  * @r: the receiving link ordering the retransmit. Same as l if unicast
1112  * @from: retransmit from (inclusive) this sequence number
1113  * @to: retransmit to (inclusive) this sequence number
1114  * xmitq: queue for accumulating the retransmitted packets
1115  */
1116 static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1117 				u16 from, u16 to, struct sk_buff_head *xmitq)
1118 {
1119 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1120 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1121 	u16 ack = l->rcv_nxt - 1;
1122 	struct tipc_msg *hdr;
1123 	int rc = 0;
1124 
1125 	if (!skb)
1126 		return 0;
1127 	if (less(to, from))
1128 		return 0;
1129 
1130 	trace_tipc_link_retrans(r, from, to, &l->transmq);
1131 
1132 	if (link_retransmit_failure(l, r, from, &rc))
1133 		return rc;
1134 
1135 	skb_queue_walk(&l->transmq, skb) {
1136 		hdr = buf_msg(skb);
1137 		if (less(msg_seqno(hdr), from))
1138 			continue;
1139 		if (more(msg_seqno(hdr), to))
1140 			break;
1141 		if (link_is_bc_sndlink(l)) {
1142 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1143 				continue;
1144 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1145 		}
1146 		_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1147 		if (!_skb)
1148 			return 0;
1149 		hdr = buf_msg(_skb);
1150 		msg_set_ack(hdr, ack);
1151 		msg_set_bcast_ack(hdr, bc_ack);
1152 		_skb->priority = TC_PRIO_CONTROL;
1153 		__skb_queue_tail(xmitq, _skb);
1154 		l->stats.retransmitted++;
1155 	}
1156 	return 0;
1157 }
1158 
1159 /* tipc_data_input - deliver data and name distr msgs to upper layer
1160  *
1161  * Consumes buffer if message is of right type
1162  * Node lock must be held
1163  */
1164 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1165 			    struct sk_buff_head *inputq)
1166 {
1167 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1168 	struct tipc_msg *hdr = buf_msg(skb);
1169 
1170 	switch (msg_user(hdr)) {
1171 	case TIPC_LOW_IMPORTANCE:
1172 	case TIPC_MEDIUM_IMPORTANCE:
1173 	case TIPC_HIGH_IMPORTANCE:
1174 	case TIPC_CRITICAL_IMPORTANCE:
1175 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1176 			skb_queue_tail(mc_inputq, skb);
1177 			return true;
1178 		}
1179 		/* fall through */
1180 	case CONN_MANAGER:
1181 		skb_queue_tail(inputq, skb);
1182 		return true;
1183 	case GROUP_PROTOCOL:
1184 		skb_queue_tail(mc_inputq, skb);
1185 		return true;
1186 	case NAME_DISTRIBUTOR:
1187 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1188 		skb_queue_tail(l->namedq, skb);
1189 		return true;
1190 	case MSG_BUNDLER:
1191 	case TUNNEL_PROTOCOL:
1192 	case MSG_FRAGMENTER:
1193 	case BCAST_PROTOCOL:
1194 		return false;
1195 	default:
1196 		pr_warn("Dropping received illegal msg type\n");
1197 		kfree_skb(skb);
1198 		return true;
1199 	};
1200 }
1201 
1202 /* tipc_link_input - process packet that has passed link protocol check
1203  *
1204  * Consumes buffer
1205  */
1206 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1207 			   struct sk_buff_head *inputq,
1208 			   struct sk_buff **reasm_skb)
1209 {
1210 	struct tipc_msg *hdr = buf_msg(skb);
1211 	struct sk_buff *iskb;
1212 	struct sk_buff_head tmpq;
1213 	int usr = msg_user(hdr);
1214 	int pos = 0;
1215 
1216 	if (usr == MSG_BUNDLER) {
1217 		skb_queue_head_init(&tmpq);
1218 		l->stats.recv_bundles++;
1219 		l->stats.recv_bundled += msg_msgcnt(hdr);
1220 		while (tipc_msg_extract(skb, &iskb, &pos))
1221 			tipc_data_input(l, iskb, &tmpq);
1222 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1223 		return 0;
1224 	} else if (usr == MSG_FRAGMENTER) {
1225 		l->stats.recv_fragments++;
1226 		if (tipc_buf_append(reasm_skb, &skb)) {
1227 			l->stats.recv_fragmented++;
1228 			tipc_data_input(l, skb, inputq);
1229 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1230 			pr_warn_ratelimited("Unable to build fragment list\n");
1231 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1232 		}
1233 		return 0;
1234 	} else if (usr == BCAST_PROTOCOL) {
1235 		tipc_bcast_lock(l->net);
1236 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1237 		tipc_bcast_unlock(l->net);
1238 	}
1239 
1240 	kfree_skb(skb);
1241 	return 0;
1242 }
1243 
1244 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1245  *			 inner message along with the ones in the old link's
1246  *			 deferdq
1247  * @l: tunnel link
1248  * @skb: TUNNEL_PROTOCOL message
1249  * @inputq: queue to put messages ready for delivery
1250  */
1251 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1252 			     struct sk_buff_head *inputq)
1253 {
1254 	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1255 	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1256 	struct sk_buff_head *fdefq = &l->failover_deferdq;
1257 	struct tipc_msg *hdr = buf_msg(skb);
1258 	struct sk_buff *iskb;
1259 	int ipos = 0;
1260 	int rc = 0;
1261 	u16 seqno;
1262 
1263 	if (msg_type(hdr) == SYNCH_MSG) {
1264 		kfree_skb(skb);
1265 		return 0;
1266 	}
1267 
1268 	/* Not a fragment? */
1269 	if (likely(!msg_nof_fragms(hdr))) {
1270 		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1271 			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1272 					    skb_queue_len(fdefq));
1273 			return 0;
1274 		}
1275 		kfree_skb(skb);
1276 	} else {
1277 		/* Set fragment type for buf_append */
1278 		if (msg_fragm_no(hdr) == 1)
1279 			msg_set_type(hdr, FIRST_FRAGMENT);
1280 		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1281 			msg_set_type(hdr, FRAGMENT);
1282 		else
1283 			msg_set_type(hdr, LAST_FRAGMENT);
1284 
1285 		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1286 			/* Successful but non-complete reassembly? */
1287 			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1288 				return 0;
1289 			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1290 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1291 		}
1292 		iskb = skb;
1293 	}
1294 
1295 	do {
1296 		seqno = buf_seqno(iskb);
1297 		if (unlikely(less(seqno, l->drop_point))) {
1298 			kfree_skb(iskb);
1299 			continue;
1300 		}
1301 		if (unlikely(seqno != l->drop_point)) {
1302 			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1303 			continue;
1304 		}
1305 
1306 		l->drop_point++;
1307 		if (!tipc_data_input(l, iskb, inputq))
1308 			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1309 		if (unlikely(rc))
1310 			break;
1311 	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1312 
1313 	return rc;
1314 }
1315 
1316 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1317 {
1318 	bool released = false;
1319 	struct sk_buff *skb, *tmp;
1320 
1321 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1322 		if (more(buf_seqno(skb), acked))
1323 			break;
1324 		__skb_unlink(skb, &l->transmq);
1325 		kfree_skb(skb);
1326 		released = true;
1327 	}
1328 	return released;
1329 }
1330 
1331 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1332  * @l: tipc link that data have come with gaps in sequence if any
1333  * @data: data buffer to store the Gap ACK blocks after built
1334  *
1335  * returns the actual allocated memory size
1336  */
1337 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1338 {
1339 	struct sk_buff *skb = skb_peek(&l->deferdq);
1340 	struct tipc_gap_ack_blks *ga = data;
1341 	u16 len, expect, seqno = 0;
1342 	u8 n = 0;
1343 
1344 	if (!skb)
1345 		goto exit;
1346 
1347 	expect = buf_seqno(skb);
1348 	skb_queue_walk(&l->deferdq, skb) {
1349 		seqno = buf_seqno(skb);
1350 		if (unlikely(more(seqno, expect))) {
1351 			ga->gacks[n].ack = htons(expect - 1);
1352 			ga->gacks[n].gap = htons(seqno - expect);
1353 			if (++n >= MAX_GAP_ACK_BLKS) {
1354 				pr_info_ratelimited("Too few Gap ACK blocks!\n");
1355 				goto exit;
1356 			}
1357 		} else if (unlikely(less(seqno, expect))) {
1358 			pr_warn("Unexpected skb in deferdq!\n");
1359 			continue;
1360 		}
1361 		expect = seqno + 1;
1362 	}
1363 
1364 	/* last block */
1365 	ga->gacks[n].ack = htons(seqno);
1366 	ga->gacks[n].gap = 0;
1367 	n++;
1368 
1369 exit:
1370 	len = tipc_gap_ack_blks_sz(n);
1371 	ga->len = htons(len);
1372 	ga->gack_cnt = n;
1373 	return len;
1374 }
1375 
1376 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1377  *			       acked packets, also doing retransmissions if
1378  *			       gaps found
1379  * @l: tipc link with transmq queue to be advanced
1380  * @acked: seqno of last packet acked by peer without any gaps before
1381  * @gap: # of gap packets
1382  * @ga: buffer pointer to Gap ACK blocks from peer
1383  * @xmitq: queue for accumulating the retransmitted packets if any
1384  *
1385  * In case of a repeated retransmit failures, the call will return shortly
1386  * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
1387  */
1388 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1389 				     struct tipc_gap_ack_blks *ga,
1390 				     struct sk_buff_head *xmitq)
1391 {
1392 	struct sk_buff *skb, *_skb, *tmp;
1393 	struct tipc_msg *hdr;
1394 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1395 	u16 ack = l->rcv_nxt - 1;
1396 	u16 seqno, n = 0;
1397 	int rc = 0;
1398 
1399 	if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1400 		return rc;
1401 
1402 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1403 		seqno = buf_seqno(skb);
1404 
1405 next_gap_ack:
1406 		if (less_eq(seqno, acked)) {
1407 			/* release skb */
1408 			__skb_unlink(skb, &l->transmq);
1409 			kfree_skb(skb);
1410 		} else if (less_eq(seqno, acked + gap)) {
1411 			/* retransmit skb */
1412 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1413 				continue;
1414 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1415 
1416 			_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1417 			if (!_skb)
1418 				continue;
1419 			hdr = buf_msg(_skb);
1420 			msg_set_ack(hdr, ack);
1421 			msg_set_bcast_ack(hdr, bc_ack);
1422 			_skb->priority = TC_PRIO_CONTROL;
1423 			__skb_queue_tail(xmitq, _skb);
1424 			l->stats.retransmitted++;
1425 		} else {
1426 			/* retry with Gap ACK blocks if any */
1427 			if (!ga || n >= ga->gack_cnt)
1428 				break;
1429 			acked = ntohs(ga->gacks[n].ack);
1430 			gap = ntohs(ga->gacks[n].gap);
1431 			n++;
1432 			goto next_gap_ack;
1433 		}
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 /* tipc_link_build_state_msg: prepare link state message for transmission
1440  *
1441  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1442  * risk of ack storms towards the sender
1443  */
1444 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1445 {
1446 	if (!l)
1447 		return 0;
1448 
1449 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1450 	if (link_is_bc_rcvlink(l)) {
1451 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1452 			return 0;
1453 		l->rcv_unacked = 0;
1454 
1455 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1456 		l->snd_nxt = l->rcv_nxt;
1457 		return TIPC_LINK_SND_STATE;
1458 	}
1459 
1460 	/* Unicast ACK */
1461 	l->rcv_unacked = 0;
1462 	l->stats.sent_acks++;
1463 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1464 	return 0;
1465 }
1466 
1467 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1468  */
1469 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1470 {
1471 	int mtyp = RESET_MSG;
1472 	struct sk_buff *skb;
1473 
1474 	if (l->state == LINK_ESTABLISHING)
1475 		mtyp = ACTIVATE_MSG;
1476 
1477 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1478 
1479 	/* Inform peer that this endpoint is going down if applicable */
1480 	skb = skb_peek_tail(xmitq);
1481 	if (skb && (l->state == LINK_RESET))
1482 		msg_set_peer_stopping(buf_msg(skb), 1);
1483 }
1484 
1485 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1486  * Note that sending of broadcast NACK is coordinated among nodes, to
1487  * reduce the risk of NACK storms towards the sender
1488  */
1489 static int tipc_link_build_nack_msg(struct tipc_link *l,
1490 				    struct sk_buff_head *xmitq)
1491 {
1492 	u32 def_cnt = ++l->stats.deferred_recv;
1493 	u32 defq_len = skb_queue_len(&l->deferdq);
1494 	int match1, match2;
1495 
1496 	if (link_is_bc_rcvlink(l)) {
1497 		match1 = def_cnt & 0xf;
1498 		match2 = tipc_own_addr(l->net) & 0xf;
1499 		if (match1 == match2)
1500 			return TIPC_LINK_SND_STATE;
1501 		return 0;
1502 	}
1503 
1504 	if (defq_len >= 3 && !((defq_len - 3) % 16))
1505 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1506 	return 0;
1507 }
1508 
1509 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1510  * @l: the link that should handle the message
1511  * @skb: TIPC packet
1512  * @xmitq: queue to place packets to be sent after this call
1513  */
1514 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1515 		  struct sk_buff_head *xmitq)
1516 {
1517 	struct sk_buff_head *defq = &l->deferdq;
1518 	struct tipc_msg *hdr = buf_msg(skb);
1519 	u16 seqno, rcv_nxt, win_lim;
1520 	int rc = 0;
1521 
1522 	/* Verify and update link state */
1523 	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1524 		return tipc_link_proto_rcv(l, skb, xmitq);
1525 
1526 	/* Don't send probe at next timeout expiration */
1527 	l->silent_intv_cnt = 0;
1528 
1529 	do {
1530 		hdr = buf_msg(skb);
1531 		seqno = msg_seqno(hdr);
1532 		rcv_nxt = l->rcv_nxt;
1533 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1534 
1535 		if (unlikely(!link_is_up(l))) {
1536 			if (l->state == LINK_ESTABLISHING)
1537 				rc = TIPC_LINK_UP_EVT;
1538 			goto drop;
1539 		}
1540 
1541 		/* Drop if outside receive window */
1542 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1543 			l->stats.duplicates++;
1544 			goto drop;
1545 		}
1546 
1547 		/* Forward queues and wake up waiting users */
1548 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1549 			tipc_link_advance_backlog(l, xmitq);
1550 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1551 				link_prepare_wakeup(l);
1552 		}
1553 
1554 		/* Defer delivery if sequence gap */
1555 		if (unlikely(seqno != rcv_nxt)) {
1556 			__tipc_skb_queue_sorted(defq, seqno, skb);
1557 			rc |= tipc_link_build_nack_msg(l, xmitq);
1558 			break;
1559 		}
1560 
1561 		/* Deliver packet */
1562 		l->rcv_nxt++;
1563 		l->stats.recv_pkts++;
1564 
1565 		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1566 			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1567 		else if (!tipc_data_input(l, skb, l->inputq))
1568 			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1569 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1570 			rc |= tipc_link_build_state_msg(l, xmitq);
1571 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1572 			break;
1573 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1574 
1575 	return rc;
1576 drop:
1577 	kfree_skb(skb);
1578 	return rc;
1579 }
1580 
1581 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1582 				      bool probe_reply, u16 rcvgap,
1583 				      int tolerance, int priority,
1584 				      struct sk_buff_head *xmitq)
1585 {
1586 	struct tipc_link *bcl = l->bc_rcvlink;
1587 	struct sk_buff *skb;
1588 	struct tipc_msg *hdr;
1589 	struct sk_buff_head *dfq = &l->deferdq;
1590 	bool node_up = link_is_up(bcl);
1591 	struct tipc_mon_state *mstate = &l->mon_state;
1592 	int dlen = 0;
1593 	void *data;
1594 	u16 glen = 0;
1595 
1596 	/* Don't send protocol message during reset or link failover */
1597 	if (tipc_link_is_blocked(l))
1598 		return;
1599 
1600 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1601 		return;
1602 
1603 	if (!skb_queue_empty(dfq))
1604 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1605 
1606 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1607 			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1608 			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1609 	if (!skb)
1610 		return;
1611 
1612 	hdr = buf_msg(skb);
1613 	data = msg_data(hdr);
1614 	msg_set_session(hdr, l->session);
1615 	msg_set_bearer_id(hdr, l->bearer_id);
1616 	msg_set_net_plane(hdr, l->net_plane);
1617 	msg_set_next_sent(hdr, l->snd_nxt);
1618 	msg_set_ack(hdr, l->rcv_nxt - 1);
1619 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1620 	msg_set_bc_ack_invalid(hdr, !node_up);
1621 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1622 	msg_set_link_tolerance(hdr, tolerance);
1623 	msg_set_linkprio(hdr, priority);
1624 	msg_set_redundant_link(hdr, node_up);
1625 	msg_set_seq_gap(hdr, 0);
1626 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1627 
1628 	if (mtyp == STATE_MSG) {
1629 		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1630 			msg_set_seqno(hdr, l->snd_nxt_state++);
1631 		msg_set_seq_gap(hdr, rcvgap);
1632 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1633 		msg_set_probe(hdr, probe);
1634 		msg_set_is_keepalive(hdr, probe || probe_reply);
1635 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1636 			glen = tipc_build_gap_ack_blks(l, data);
1637 		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1638 		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1639 		skb_trim(skb, INT_H_SIZE + glen + dlen);
1640 		l->stats.sent_states++;
1641 		l->rcv_unacked = 0;
1642 	} else {
1643 		/* RESET_MSG or ACTIVATE_MSG */
1644 		if (mtyp == ACTIVATE_MSG) {
1645 			msg_set_dest_session_valid(hdr, 1);
1646 			msg_set_dest_session(hdr, l->peer_session);
1647 		}
1648 		msg_set_max_pkt(hdr, l->advertised_mtu);
1649 		strcpy(data, l->if_name);
1650 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1651 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1652 	}
1653 	if (probe)
1654 		l->stats.sent_probes++;
1655 	if (rcvgap)
1656 		l->stats.sent_nacks++;
1657 	skb->priority = TC_PRIO_CONTROL;
1658 	__skb_queue_tail(xmitq, skb);
1659 	trace_tipc_proto_build(skb, false, l->name);
1660 }
1661 
1662 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1663 				    struct sk_buff_head *xmitq)
1664 {
1665 	u32 onode = tipc_own_addr(l->net);
1666 	struct tipc_msg *hdr, *ihdr;
1667 	struct sk_buff_head tnlq;
1668 	struct sk_buff *skb;
1669 	u32 dnode = l->addr;
1670 
1671 	skb_queue_head_init(&tnlq);
1672 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1673 			      INT_H_SIZE, BASIC_H_SIZE,
1674 			      dnode, onode, 0, 0, 0);
1675 	if (!skb) {
1676 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1677 		return;
1678 	}
1679 
1680 	hdr = buf_msg(skb);
1681 	msg_set_msgcnt(hdr, 1);
1682 	msg_set_bearer_id(hdr, l->peer_bearer_id);
1683 
1684 	ihdr = (struct tipc_msg *)msg_data(hdr);
1685 	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1686 		      BASIC_H_SIZE, dnode);
1687 	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1688 	__skb_queue_tail(&tnlq, skb);
1689 	tipc_link_xmit(l, &tnlq, xmitq);
1690 }
1691 
1692 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1693  * with contents of the link's transmit and backlog queues.
1694  */
1695 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1696 			   int mtyp, struct sk_buff_head *xmitq)
1697 {
1698 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1699 	struct sk_buff *skb, *tnlskb;
1700 	struct tipc_msg *hdr, tnlhdr;
1701 	struct sk_buff_head *queue = &l->transmq;
1702 	struct sk_buff_head tmpxq, tnlq, frags;
1703 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1704 	bool pktcnt_need_update = false;
1705 	u16 syncpt;
1706 	int rc;
1707 
1708 	if (!tnl)
1709 		return;
1710 
1711 	skb_queue_head_init(&tnlq);
1712 	skb_queue_head_init(&tmpxq);
1713 	skb_queue_head_init(&frags);
1714 
1715 	/* At least one packet required for safe algorithm => add dummy */
1716 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1717 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1718 			      0, 0, TIPC_ERR_NO_PORT);
1719 	if (!skb) {
1720 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1721 		return;
1722 	}
1723 	skb_queue_tail(&tnlq, skb);
1724 	tipc_link_xmit(l, &tnlq, &tmpxq);
1725 	__skb_queue_purge(&tmpxq);
1726 
1727 	/* Link Synching:
1728 	 * From now on, send only one single ("dummy") SYNCH message
1729 	 * to peer. The SYNCH message does not contain any data, just
1730 	 * a header conveying the synch point to the peer.
1731 	 */
1732 	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1733 		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1734 					 INT_H_SIZE, 0, l->addr,
1735 					 tipc_own_addr(l->net),
1736 					 0, 0, 0);
1737 		if (!tnlskb) {
1738 			pr_warn("%sunable to create dummy SYNCH_MSG\n",
1739 				link_co_err);
1740 			return;
1741 		}
1742 
1743 		hdr = buf_msg(tnlskb);
1744 		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1745 		msg_set_syncpt(hdr, syncpt);
1746 		msg_set_bearer_id(hdr, l->peer_bearer_id);
1747 		__skb_queue_tail(&tnlq, tnlskb);
1748 		tipc_link_xmit(tnl, &tnlq, xmitq);
1749 		return;
1750 	}
1751 
1752 	/* Initialize reusable tunnel packet header */
1753 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1754 		      mtyp, INT_H_SIZE, l->addr);
1755 	if (mtyp == SYNCH_MSG)
1756 		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1757 	else
1758 		pktcnt = skb_queue_len(&l->transmq);
1759 	pktcnt += skb_queue_len(&l->backlogq);
1760 	msg_set_msgcnt(&tnlhdr, pktcnt);
1761 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1762 tnl:
1763 	/* Wrap each packet into a tunnel packet */
1764 	skb_queue_walk(queue, skb) {
1765 		hdr = buf_msg(skb);
1766 		if (queue == &l->backlogq)
1767 			msg_set_seqno(hdr, seqno++);
1768 		pktlen = msg_size(hdr);
1769 
1770 		/* Tunnel link MTU is not large enough? This could be
1771 		 * due to:
1772 		 * 1) Link MTU has just changed or set differently;
1773 		 * 2) Or FAILOVER on the top of a SYNCH message
1774 		 *
1775 		 * The 2nd case should not happen if peer supports
1776 		 * TIPC_TUNNEL_ENHANCED
1777 		 */
1778 		if (pktlen > tnl->mtu - INT_H_SIZE) {
1779 			if (mtyp == FAILOVER_MSG &&
1780 			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1781 				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1782 						       &frags);
1783 				if (rc) {
1784 					pr_warn("%sunable to frag msg: rc %d\n",
1785 						link_co_err, rc);
1786 					return;
1787 				}
1788 				pktcnt += skb_queue_len(&frags) - 1;
1789 				pktcnt_need_update = true;
1790 				skb_queue_splice_tail_init(&frags, &tnlq);
1791 				continue;
1792 			}
1793 			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
1794 			 * => Just warn it and return!
1795 			 */
1796 			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
1797 					    link_co_err, msg_user(hdr),
1798 					    msg_type(hdr), msg_size(hdr));
1799 			return;
1800 		}
1801 
1802 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1803 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1804 		if (!tnlskb) {
1805 			pr_warn("%sunable to send packet\n", link_co_err);
1806 			return;
1807 		}
1808 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1809 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1810 		__skb_queue_tail(&tnlq, tnlskb);
1811 	}
1812 	if (queue != &l->backlogq) {
1813 		queue = &l->backlogq;
1814 		goto tnl;
1815 	}
1816 
1817 	if (pktcnt_need_update)
1818 		skb_queue_walk(&tnlq, skb) {
1819 			hdr = buf_msg(skb);
1820 			msg_set_msgcnt(hdr, pktcnt);
1821 		}
1822 
1823 	tipc_link_xmit(tnl, &tnlq, xmitq);
1824 
1825 	if (mtyp == FAILOVER_MSG) {
1826 		tnl->drop_point = l->rcv_nxt;
1827 		tnl->failover_reasm_skb = l->reasm_buf;
1828 		l->reasm_buf = NULL;
1829 
1830 		/* Failover the link's deferdq */
1831 		if (unlikely(!skb_queue_empty(fdefq))) {
1832 			pr_warn("Link failover deferdq not empty: %d!\n",
1833 				skb_queue_len(fdefq));
1834 			__skb_queue_purge(fdefq);
1835 		}
1836 		skb_queue_splice_init(&l->deferdq, fdefq);
1837 	}
1838 }
1839 
1840 /**
1841  * tipc_link_failover_prepare() - prepare tnl for link failover
1842  *
1843  * This is a special version of the precursor - tipc_link_tnl_prepare(),
1844  * see the tipc_node_link_failover() for details
1845  *
1846  * @l: failover link
1847  * @tnl: tunnel link
1848  * @xmitq: queue for messages to be xmited
1849  */
1850 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1851 				struct sk_buff_head *xmitq)
1852 {
1853 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1854 
1855 	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1856 
1857 	/* This failover link enpoint was never established before,
1858 	 * so it has not received anything from peer.
1859 	 * Otherwise, it must be a normal failover situation or the
1860 	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1861 	 * would have to start over from scratch instead.
1862 	 */
1863 	tnl->drop_point = 1;
1864 	tnl->failover_reasm_skb = NULL;
1865 
1866 	/* Initiate the link's failover deferdq */
1867 	if (unlikely(!skb_queue_empty(fdefq))) {
1868 		pr_warn("Link failover deferdq not empty: %d!\n",
1869 			skb_queue_len(fdefq));
1870 		__skb_queue_purge(fdefq);
1871 	}
1872 }
1873 
1874 /* tipc_link_validate_msg(): validate message against current link state
1875  * Returns true if message should be accepted, otherwise false
1876  */
1877 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1878 {
1879 	u16 curr_session = l->peer_session;
1880 	u16 session = msg_session(hdr);
1881 	int mtyp = msg_type(hdr);
1882 
1883 	if (msg_user(hdr) != LINK_PROTOCOL)
1884 		return true;
1885 
1886 	switch (mtyp) {
1887 	case RESET_MSG:
1888 		if (!l->in_session)
1889 			return true;
1890 		/* Accept only RESET with new session number */
1891 		return more(session, curr_session);
1892 	case ACTIVATE_MSG:
1893 		if (!l->in_session)
1894 			return true;
1895 		/* Accept only ACTIVATE with new or current session number */
1896 		return !less(session, curr_session);
1897 	case STATE_MSG:
1898 		/* Accept only STATE with current session number */
1899 		if (!l->in_session)
1900 			return false;
1901 		if (session != curr_session)
1902 			return false;
1903 		/* Extra sanity check */
1904 		if (!link_is_up(l) && msg_ack(hdr))
1905 			return false;
1906 		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1907 			return true;
1908 		/* Accept only STATE with new sequence number */
1909 		return !less(msg_seqno(hdr), l->rcv_nxt_state);
1910 	default:
1911 		return false;
1912 	}
1913 }
1914 
1915 /* tipc_link_proto_rcv(): receive link level protocol message :
1916  * Note that network plane id propagates through the network, and may
1917  * change at any time. The node with lowest numerical id determines
1918  * network plane
1919  */
1920 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1921 			       struct sk_buff_head *xmitq)
1922 {
1923 	struct tipc_msg *hdr = buf_msg(skb);
1924 	struct tipc_gap_ack_blks *ga = NULL;
1925 	u16 rcvgap = 0;
1926 	u16 ack = msg_ack(hdr);
1927 	u16 gap = msg_seq_gap(hdr);
1928 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1929 	u16 peers_tol = msg_link_tolerance(hdr);
1930 	u16 peers_prio = msg_linkprio(hdr);
1931 	u16 rcv_nxt = l->rcv_nxt;
1932 	u16 dlen = msg_data_sz(hdr);
1933 	int mtyp = msg_type(hdr);
1934 	bool reply = msg_probe(hdr);
1935 	u16 glen = 0;
1936 	void *data;
1937 	char *if_name;
1938 	int rc = 0;
1939 
1940 	trace_tipc_proto_rcv(skb, false, l->name);
1941 	if (tipc_link_is_blocked(l) || !xmitq)
1942 		goto exit;
1943 
1944 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1945 		l->net_plane = msg_net_plane(hdr);
1946 
1947 	skb_linearize(skb);
1948 	hdr = buf_msg(skb);
1949 	data = msg_data(hdr);
1950 
1951 	if (!tipc_link_validate_msg(l, hdr)) {
1952 		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1953 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1954 		goto exit;
1955 	}
1956 
1957 	switch (mtyp) {
1958 	case RESET_MSG:
1959 	case ACTIVATE_MSG:
1960 		/* Complete own link name with peer's interface name */
1961 		if_name =  strrchr(l->name, ':') + 1;
1962 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1963 			break;
1964 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1965 			break;
1966 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1967 
1968 		/* Update own tolerance if peer indicates a non-zero value */
1969 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1970 			l->tolerance = peers_tol;
1971 			l->bc_rcvlink->tolerance = peers_tol;
1972 		}
1973 		/* Update own priority if peer's priority is higher */
1974 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1975 			l->priority = peers_prio;
1976 
1977 		/* If peer is going down we want full re-establish cycle */
1978 		if (msg_peer_stopping(hdr)) {
1979 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1980 			break;
1981 		}
1982 
1983 		/* If this endpoint was re-created while peer was ESTABLISHING
1984 		 * it doesn't know current session number. Force re-synch.
1985 		 */
1986 		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1987 		    l->session != msg_dest_session(hdr)) {
1988 			if (less(l->session, msg_dest_session(hdr)))
1989 				l->session = msg_dest_session(hdr) + 1;
1990 			break;
1991 		}
1992 
1993 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1994 		if (mtyp == RESET_MSG || !link_is_up(l))
1995 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1996 
1997 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1998 		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1999 			rc = TIPC_LINK_UP_EVT;
2000 
2001 		l->peer_session = msg_session(hdr);
2002 		l->in_session = true;
2003 		l->peer_bearer_id = msg_bearer_id(hdr);
2004 		if (l->mtu > msg_max_pkt(hdr))
2005 			l->mtu = msg_max_pkt(hdr);
2006 		break;
2007 
2008 	case STATE_MSG:
2009 		l->rcv_nxt_state = msg_seqno(hdr) + 1;
2010 
2011 		/* Update own tolerance if peer indicates a non-zero value */
2012 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2013 			l->tolerance = peers_tol;
2014 			l->bc_rcvlink->tolerance = peers_tol;
2015 		}
2016 		/* Update own prio if peer indicates a different value */
2017 		if ((peers_prio != l->priority) &&
2018 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2019 			l->priority = peers_prio;
2020 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2021 		}
2022 
2023 		l->silent_intv_cnt = 0;
2024 		l->stats.recv_states++;
2025 		if (msg_probe(hdr))
2026 			l->stats.recv_probes++;
2027 
2028 		if (!link_is_up(l)) {
2029 			if (l->state == LINK_ESTABLISHING)
2030 				rc = TIPC_LINK_UP_EVT;
2031 			break;
2032 		}
2033 
2034 		/* Receive Gap ACK blocks from peer if any */
2035 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
2036 			ga = (struct tipc_gap_ack_blks *)data;
2037 			glen = ntohs(ga->len);
2038 			/* sanity check: if failed, ignore Gap ACK blocks */
2039 			if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
2040 				ga = NULL;
2041 		}
2042 
2043 		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2044 			     &l->mon_state, l->bearer_id);
2045 
2046 		/* Send NACK if peer has sent pkts we haven't received yet */
2047 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
2048 			rcvgap = peers_snd_nxt - l->rcv_nxt;
2049 		if (rcvgap || reply)
2050 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2051 						  rcvgap, 0, 0, xmitq);
2052 
2053 		rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
2054 
2055 		/* If NACK, retransmit will now start at right position */
2056 		if (gap)
2057 			l->stats.recv_nacks++;
2058 
2059 		tipc_link_advance_backlog(l, xmitq);
2060 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2061 			link_prepare_wakeup(l);
2062 	}
2063 exit:
2064 	kfree_skb(skb);
2065 	return rc;
2066 }
2067 
2068 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2069  */
2070 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2071 					 u16 peers_snd_nxt,
2072 					 struct sk_buff_head *xmitq)
2073 {
2074 	struct sk_buff *skb;
2075 	struct tipc_msg *hdr;
2076 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2077 	u16 ack = l->rcv_nxt - 1;
2078 	u16 gap_to = peers_snd_nxt - 1;
2079 
2080 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2081 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2082 	if (!skb)
2083 		return false;
2084 	hdr = buf_msg(skb);
2085 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2086 	msg_set_bcast_ack(hdr, ack);
2087 	msg_set_bcgap_after(hdr, ack);
2088 	if (dfrd_skb)
2089 		gap_to = buf_seqno(dfrd_skb) - 1;
2090 	msg_set_bcgap_to(hdr, gap_to);
2091 	msg_set_non_seq(hdr, bcast);
2092 	__skb_queue_tail(xmitq, skb);
2093 	return true;
2094 }
2095 
2096 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2097  *
2098  * Give a newly added peer node the sequence number where it should
2099  * start receiving and acking broadcast packets.
2100  */
2101 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2102 					struct sk_buff_head *xmitq)
2103 {
2104 	struct sk_buff_head list;
2105 
2106 	__skb_queue_head_init(&list);
2107 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2108 		return;
2109 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2110 	tipc_link_xmit(l, &list, xmitq);
2111 }
2112 
2113 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2114  */
2115 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2116 {
2117 	int mtyp = msg_type(hdr);
2118 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2119 
2120 	if (link_is_up(l))
2121 		return;
2122 
2123 	if (msg_user(hdr) == BCAST_PROTOCOL) {
2124 		l->rcv_nxt = peers_snd_nxt;
2125 		l->state = LINK_ESTABLISHED;
2126 		return;
2127 	}
2128 
2129 	if (l->peer_caps & TIPC_BCAST_SYNCH)
2130 		return;
2131 
2132 	if (msg_peer_node_is_up(hdr))
2133 		return;
2134 
2135 	/* Compatibility: accept older, less safe initial synch data */
2136 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2137 		l->rcv_nxt = peers_snd_nxt;
2138 }
2139 
2140 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2141  */
2142 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2143 			  struct sk_buff_head *xmitq)
2144 {
2145 	struct tipc_link *snd_l = l->bc_sndlink;
2146 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2147 	u16 from = msg_bcast_ack(hdr) + 1;
2148 	u16 to = from + msg_bc_gap(hdr) - 1;
2149 	int rc = 0;
2150 
2151 	if (!link_is_up(l))
2152 		return rc;
2153 
2154 	if (!msg_peer_node_is_up(hdr))
2155 		return rc;
2156 
2157 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
2158 	if (msg_ack(hdr))
2159 		l->bc_peer_is_up = true;
2160 
2161 	if (!l->bc_peer_is_up)
2162 		return rc;
2163 
2164 	l->stats.recv_nacks++;
2165 
2166 	/* Ignore if peers_snd_nxt goes beyond receive window */
2167 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2168 		return rc;
2169 
2170 	rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
2171 
2172 	l->snd_nxt = peers_snd_nxt;
2173 	if (link_bc_rcv_gap(l))
2174 		rc |= TIPC_LINK_SND_STATE;
2175 
2176 	/* Return now if sender supports nack via STATE messages */
2177 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2178 		return rc;
2179 
2180 	/* Otherwise, be backwards compatible */
2181 
2182 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2183 		l->nack_state = BC_NACK_SND_CONDITIONAL;
2184 		return 0;
2185 	}
2186 
2187 	/* Don't NACK if one was recently sent or peeked */
2188 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2189 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2190 		return 0;
2191 	}
2192 
2193 	/* Conditionally delay NACK sending until next synch rcv */
2194 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2195 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2196 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2197 			return 0;
2198 	}
2199 
2200 	/* Send NACK now but suppress next one */
2201 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2202 	l->nack_state = BC_NACK_SND_SUPPRESS;
2203 	return 0;
2204 }
2205 
2206 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2207 			  struct sk_buff_head *xmitq)
2208 {
2209 	struct sk_buff *skb, *tmp;
2210 	struct tipc_link *snd_l = l->bc_sndlink;
2211 
2212 	if (!link_is_up(l) || !l->bc_peer_is_up)
2213 		return;
2214 
2215 	if (!more(acked, l->acked))
2216 		return;
2217 
2218 	trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
2219 	/* Skip over packets peer has already acked */
2220 	skb_queue_walk(&snd_l->transmq, skb) {
2221 		if (more(buf_seqno(skb), l->acked))
2222 			break;
2223 	}
2224 
2225 	/* Update/release the packets peer is acking now */
2226 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2227 		if (more(buf_seqno(skb), acked))
2228 			break;
2229 		if (!--TIPC_SKB_CB(skb)->ackers) {
2230 			__skb_unlink(skb, &snd_l->transmq);
2231 			kfree_skb(skb);
2232 		}
2233 	}
2234 	l->acked = acked;
2235 	tipc_link_advance_backlog(snd_l, xmitq);
2236 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2237 		link_prepare_wakeup(snd_l);
2238 }
2239 
2240 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2241  * This function is here for backwards compatibility, since
2242  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2243  */
2244 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2245 			  struct sk_buff_head *xmitq)
2246 {
2247 	struct tipc_msg *hdr = buf_msg(skb);
2248 	u32 dnode = msg_destnode(hdr);
2249 	int mtyp = msg_type(hdr);
2250 	u16 acked = msg_bcast_ack(hdr);
2251 	u16 from = acked + 1;
2252 	u16 to = msg_bcgap_to(hdr);
2253 	u16 peers_snd_nxt = to + 1;
2254 	int rc = 0;
2255 
2256 	kfree_skb(skb);
2257 
2258 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2259 		return 0;
2260 
2261 	if (mtyp != STATE_MSG)
2262 		return 0;
2263 
2264 	if (dnode == tipc_own_addr(l->net)) {
2265 		tipc_link_bc_ack_rcv(l, acked, xmitq);
2266 		rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
2267 		l->stats.recv_nacks++;
2268 		return rc;
2269 	}
2270 
2271 	/* Msg for other node => suppress own NACK at next sync if applicable */
2272 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2273 		l->nack_state = BC_NACK_SND_SUPPRESS;
2274 
2275 	return 0;
2276 }
2277 
2278 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
2279 {
2280 	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2281 
2282 	l->window = win;
2283 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
2284 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
2285 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
2286 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
2287 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2288 }
2289 
2290 /**
2291  * link_reset_stats - reset link statistics
2292  * @l: pointer to link
2293  */
2294 void tipc_link_reset_stats(struct tipc_link *l)
2295 {
2296 	memset(&l->stats, 0, sizeof(l->stats));
2297 }
2298 
2299 static void link_print(struct tipc_link *l, const char *str)
2300 {
2301 	struct sk_buff *hskb = skb_peek(&l->transmq);
2302 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2303 	u16 tail = l->snd_nxt - 1;
2304 
2305 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2306 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2307 		skb_queue_len(&l->transmq), head, tail,
2308 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2309 }
2310 
2311 /* Parse and validate nested (link) properties valid for media, bearer and link
2312  */
2313 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2314 {
2315 	int err;
2316 
2317 	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2318 					  tipc_nl_prop_policy, NULL);
2319 	if (err)
2320 		return err;
2321 
2322 	if (props[TIPC_NLA_PROP_PRIO]) {
2323 		u32 prio;
2324 
2325 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2326 		if (prio > TIPC_MAX_LINK_PRI)
2327 			return -EINVAL;
2328 	}
2329 
2330 	if (props[TIPC_NLA_PROP_TOL]) {
2331 		u32 tol;
2332 
2333 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2334 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2335 			return -EINVAL;
2336 	}
2337 
2338 	if (props[TIPC_NLA_PROP_WIN]) {
2339 		u32 win;
2340 
2341 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2342 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2343 			return -EINVAL;
2344 	}
2345 
2346 	return 0;
2347 }
2348 
2349 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2350 {
2351 	int i;
2352 	struct nlattr *stats;
2353 
2354 	struct nla_map {
2355 		u32 key;
2356 		u32 val;
2357 	};
2358 
2359 	struct nla_map map[] = {
2360 		{TIPC_NLA_STATS_RX_INFO, 0},
2361 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2362 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2363 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2364 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2365 		{TIPC_NLA_STATS_TX_INFO, 0},
2366 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2367 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2368 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2369 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2370 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2371 			s->msg_length_counts : 1},
2372 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2373 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2374 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2375 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2376 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2377 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2378 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2379 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2380 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2381 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2382 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2383 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2384 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2385 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2386 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2387 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2388 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2389 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2390 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2391 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2392 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2393 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2394 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2395 	};
2396 
2397 	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2398 	if (!stats)
2399 		return -EMSGSIZE;
2400 
2401 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2402 		if (nla_put_u32(skb, map[i].key, map[i].val))
2403 			goto msg_full;
2404 
2405 	nla_nest_end(skb, stats);
2406 
2407 	return 0;
2408 msg_full:
2409 	nla_nest_cancel(skb, stats);
2410 
2411 	return -EMSGSIZE;
2412 }
2413 
2414 /* Caller should hold appropriate locks to protect the link */
2415 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2416 		       struct tipc_link *link, int nlflags)
2417 {
2418 	u32 self = tipc_own_addr(net);
2419 	struct nlattr *attrs;
2420 	struct nlattr *prop;
2421 	void *hdr;
2422 	int err;
2423 
2424 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2425 			  nlflags, TIPC_NL_LINK_GET);
2426 	if (!hdr)
2427 		return -EMSGSIZE;
2428 
2429 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2430 	if (!attrs)
2431 		goto msg_full;
2432 
2433 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2434 		goto attr_msg_full;
2435 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2436 		goto attr_msg_full;
2437 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2438 		goto attr_msg_full;
2439 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2440 		goto attr_msg_full;
2441 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2442 		goto attr_msg_full;
2443 
2444 	if (tipc_link_is_up(link))
2445 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2446 			goto attr_msg_full;
2447 	if (link->active)
2448 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2449 			goto attr_msg_full;
2450 
2451 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2452 	if (!prop)
2453 		goto attr_msg_full;
2454 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2455 		goto prop_msg_full;
2456 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2457 		goto prop_msg_full;
2458 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2459 			link->window))
2460 		goto prop_msg_full;
2461 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2462 		goto prop_msg_full;
2463 	nla_nest_end(msg->skb, prop);
2464 
2465 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2466 	if (err)
2467 		goto attr_msg_full;
2468 
2469 	nla_nest_end(msg->skb, attrs);
2470 	genlmsg_end(msg->skb, hdr);
2471 
2472 	return 0;
2473 
2474 prop_msg_full:
2475 	nla_nest_cancel(msg->skb, prop);
2476 attr_msg_full:
2477 	nla_nest_cancel(msg->skb, attrs);
2478 msg_full:
2479 	genlmsg_cancel(msg->skb, hdr);
2480 
2481 	return -EMSGSIZE;
2482 }
2483 
2484 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2485 				      struct tipc_stats *stats)
2486 {
2487 	int i;
2488 	struct nlattr *nest;
2489 
2490 	struct nla_map {
2491 		__u32 key;
2492 		__u32 val;
2493 	};
2494 
2495 	struct nla_map map[] = {
2496 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2497 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2498 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2499 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2500 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2501 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2502 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2503 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2504 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2505 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2506 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2507 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2508 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2509 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2510 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2511 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2512 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2513 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2514 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2515 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2516 	};
2517 
2518 	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2519 	if (!nest)
2520 		return -EMSGSIZE;
2521 
2522 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2523 		if (nla_put_u32(skb, map[i].key, map[i].val))
2524 			goto msg_full;
2525 
2526 	nla_nest_end(skb, nest);
2527 
2528 	return 0;
2529 msg_full:
2530 	nla_nest_cancel(skb, nest);
2531 
2532 	return -EMSGSIZE;
2533 }
2534 
2535 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2536 {
2537 	int err;
2538 	void *hdr;
2539 	struct nlattr *attrs;
2540 	struct nlattr *prop;
2541 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2542 	u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2543 	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2544 	struct tipc_link *bcl = tn->bcl;
2545 
2546 	if (!bcl)
2547 		return 0;
2548 
2549 	tipc_bcast_lock(net);
2550 
2551 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2552 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2553 	if (!hdr) {
2554 		tipc_bcast_unlock(net);
2555 		return -EMSGSIZE;
2556 	}
2557 
2558 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2559 	if (!attrs)
2560 		goto msg_full;
2561 
2562 	/* The broadcast link is always up */
2563 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2564 		goto attr_msg_full;
2565 
2566 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2567 		goto attr_msg_full;
2568 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2569 		goto attr_msg_full;
2570 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2571 		goto attr_msg_full;
2572 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2573 		goto attr_msg_full;
2574 
2575 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2576 	if (!prop)
2577 		goto attr_msg_full;
2578 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2579 		goto prop_msg_full;
2580 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2581 		goto prop_msg_full;
2582 	if (bc_mode & BCLINK_MODE_SEL)
2583 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2584 				bc_ratio))
2585 			goto prop_msg_full;
2586 	nla_nest_end(msg->skb, prop);
2587 
2588 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2589 	if (err)
2590 		goto attr_msg_full;
2591 
2592 	tipc_bcast_unlock(net);
2593 	nla_nest_end(msg->skb, attrs);
2594 	genlmsg_end(msg->skb, hdr);
2595 
2596 	return 0;
2597 
2598 prop_msg_full:
2599 	nla_nest_cancel(msg->skb, prop);
2600 attr_msg_full:
2601 	nla_nest_cancel(msg->skb, attrs);
2602 msg_full:
2603 	tipc_bcast_unlock(net);
2604 	genlmsg_cancel(msg->skb, hdr);
2605 
2606 	return -EMSGSIZE;
2607 }
2608 
2609 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2610 			     struct sk_buff_head *xmitq)
2611 {
2612 	l->tolerance = tol;
2613 	if (l->bc_rcvlink)
2614 		l->bc_rcvlink->tolerance = tol;
2615 	if (link_is_up(l))
2616 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2617 }
2618 
2619 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2620 			struct sk_buff_head *xmitq)
2621 {
2622 	l->priority = prio;
2623 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2624 }
2625 
2626 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2627 {
2628 	l->abort_limit = limit;
2629 }
2630 
2631 char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2632 {
2633 	if (!l)
2634 		scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2635 	else if (link_is_bc_sndlink(l))
2636 		scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2637 	else if (link_is_bc_rcvlink(l))
2638 		scnprintf(buf, TIPC_MAX_LINK_NAME,
2639 			  "broadcast-receiver, peer %x", l->addr);
2640 	else
2641 		memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2642 
2643 	return buf;
2644 }
2645 
2646 /**
2647  * tipc_link_dump - dump TIPC link data
2648  * @l: tipc link to be dumped
2649  * @dqueues: bitmask to decide if any link queue to be dumped?
2650  *           - TIPC_DUMP_NONE: don't dump link queues
2651  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2652  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2653  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2654  *           - TIPC_DUMP_INPUTQ: dump link input queue
2655  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2656  *           - TIPC_DUMP_ALL: dump all the link queues above
2657  * @buf: returned buffer of dump data in format
2658  */
2659 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2660 {
2661 	int i = 0;
2662 	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2663 	struct sk_buff_head *list;
2664 	struct sk_buff *hskb, *tskb;
2665 	u32 len;
2666 
2667 	if (!l) {
2668 		i += scnprintf(buf, sz, "link data: (null)\n");
2669 		return i;
2670 	}
2671 
2672 	i += scnprintf(buf, sz, "link data: %x", l->addr);
2673 	i += scnprintf(buf + i, sz - i, " %x", l->state);
2674 	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2675 	i += scnprintf(buf + i, sz - i, " %u", l->session);
2676 	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2677 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2678 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2679 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2680 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2681 	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2682 	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2683 	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2684 	i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
2685 	i += scnprintf(buf + i, sz - i, " %u", 0);
2686 	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2687 
2688 	list = &l->transmq;
2689 	len = skb_queue_len(list);
2690 	hskb = skb_peek(list);
2691 	tskb = skb_peek_tail(list);
2692 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2693 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2694 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2695 
2696 	list = &l->deferdq;
2697 	len = skb_queue_len(list);
2698 	hskb = skb_peek(list);
2699 	tskb = skb_peek_tail(list);
2700 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2701 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2702 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2703 
2704 	list = &l->backlogq;
2705 	len = skb_queue_len(list);
2706 	hskb = skb_peek(list);
2707 	tskb = skb_peek_tail(list);
2708 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2709 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2710 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2711 
2712 	list = l->inputq;
2713 	len = skb_queue_len(list);
2714 	hskb = skb_peek(list);
2715 	tskb = skb_peek_tail(list);
2716 	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2717 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2718 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2719 
2720 	if (dqueues & TIPC_DUMP_TRANSMQ) {
2721 		i += scnprintf(buf + i, sz - i, "transmq: ");
2722 		i += tipc_list_dump(&l->transmq, false, buf + i);
2723 	}
2724 	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2725 		i += scnprintf(buf + i, sz - i,
2726 			       "backlogq: <%u %u %u %u %u>, ",
2727 			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2728 			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2729 			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2730 			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2731 			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2732 		i += tipc_list_dump(&l->backlogq, false, buf + i);
2733 	}
2734 	if (dqueues & TIPC_DUMP_DEFERDQ) {
2735 		i += scnprintf(buf + i, sz - i, "deferdq: ");
2736 		i += tipc_list_dump(&l->deferdq, false, buf + i);
2737 	}
2738 	if (dqueues & TIPC_DUMP_INPUTQ) {
2739 		i += scnprintf(buf + i, sz - i, "inputq: ");
2740 		i += tipc_list_dump(l->inputq, false, buf + i);
2741 	}
2742 	if (dqueues & TIPC_DUMP_WAKEUP) {
2743 		i += scnprintf(buf + i, sz - i, "wakeup: ");
2744 		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2745 	}
2746 
2747 	return i;
2748 }
2749