xref: /openbmc/linux/net/tipc/link.c (revision c1d3fb8a)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 #include "trace.h"
47 #include "crypto.h"
48 
49 #include <linux/pkt_sched.h>
50 
51 struct tipc_stats {
52 	u32 sent_pkts;
53 	u32 recv_pkts;
54 	u32 sent_states;
55 	u32 recv_states;
56 	u32 sent_probes;
57 	u32 recv_probes;
58 	u32 sent_nacks;
59 	u32 recv_nacks;
60 	u32 sent_acks;
61 	u32 sent_bundled;
62 	u32 sent_bundles;
63 	u32 recv_bundled;
64 	u32 recv_bundles;
65 	u32 retransmitted;
66 	u32 sent_fragmented;
67 	u32 sent_fragments;
68 	u32 recv_fragmented;
69 	u32 recv_fragments;
70 	u32 link_congs;		/* # port sends blocked by congestion */
71 	u32 deferred_recv;
72 	u32 duplicates;
73 	u32 max_queue_sz;	/* send queue size high water mark */
74 	u32 accu_queue_sz;	/* used for send queue size profiling */
75 	u32 queue_sz_counts;	/* used for send queue size profiling */
76 	u32 msg_length_counts;	/* used for message length profiling */
77 	u32 msg_lengths_total;	/* used for message length profiling */
78 	u32 msg_length_profile[7]; /* used for msg. length profiling */
79 };
80 
81 /**
82  * struct tipc_link - TIPC link data structure
83  * @addr: network address of link's peer node
84  * @name: link name character string
85  * @media_addr: media address to use when sending messages over link
86  * @timer: link timer
87  * @net: pointer to namespace struct
88  * @refcnt: reference counter for permanent references (owner node & timer)
89  * @peer_session: link session # being used by peer end of link
90  * @peer_bearer_id: bearer id used by link's peer endpoint
91  * @bearer_id: local bearer id used by link
92  * @tolerance: minimum link continuity loss needed to reset link [in ms]
93  * @abort_limit: # of unacknowledged continuity probes needed to reset link
94  * @state: current state of link FSM
95  * @peer_caps: bitmap describing capabilities of peer node
96  * @silent_intv_cnt: # of timer intervals without any reception from peer
97  * @proto_msg: template for control messages generated by link
98  * @pmsg: convenience pointer to "proto_msg" field
99  * @priority: current link priority
100  * @net_plane: current link network plane ('A' through 'H')
101  * @mon_state: cookie with information needed by link monitor
102  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103  * @exp_msg_count: # of tunnelled messages expected during link changeover
104  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105  * @mtu: current maximum packet size for this link
106  * @advertised_mtu: advertised own mtu when link is being established
107  * @transmitq: queue for sent, non-acked messages
108  * @backlogq: queue for messages waiting to be sent
109  * @snt_nxt: next sequence number to use for outbound messages
110  * @ackers: # of peers that needs to ack each packet before it can be released
111  * @acked: # last packet acked by a certain peer. Used for broadcast.
112  * @rcv_nxt: next sequence number to expect for inbound messages
113  * @deferred_queue: deferred queue saved OOS b'cast message received from node
114  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115  * @inputq: buffer queue for messages to be delivered upwards
116  * @namedq: buffer queue for name table messages to be delivered upwards
117  * @next_out: ptr to first unsent outbound message in queue
118  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120  * @reasm_buf: head of partially reassembled inbound message fragments
121  * @bc_rcvr: marks that this is a broadcast receiver link
122  * @stats: collects statistics regarding link activity
123  */
124 struct tipc_link {
125 	u32 addr;
126 	char name[TIPC_MAX_LINK_NAME];
127 	struct net *net;
128 
129 	/* Management and link supervision data */
130 	u16 peer_session;
131 	u16 session;
132 	u16 snd_nxt_state;
133 	u16 rcv_nxt_state;
134 	u32 peer_bearer_id;
135 	u32 bearer_id;
136 	u32 tolerance;
137 	u32 abort_limit;
138 	u32 state;
139 	u16 peer_caps;
140 	bool in_session;
141 	bool active;
142 	u32 silent_intv_cnt;
143 	char if_name[TIPC_MAX_IF_NAME];
144 	u32 priority;
145 	char net_plane;
146 	struct tipc_mon_state mon_state;
147 	u16 rst_cnt;
148 
149 	/* Failover/synch */
150 	u16 drop_point;
151 	struct sk_buff *failover_reasm_skb;
152 	struct sk_buff_head failover_deferdq;
153 
154 	/* Max packet negotiation */
155 	u16 mtu;
156 	u16 advertised_mtu;
157 
158 	/* Sending */
159 	struct sk_buff_head transmq;
160 	struct sk_buff_head backlogq;
161 	struct {
162 		u16 len;
163 		u16 limit;
164 		struct sk_buff *target_bskb;
165 	} backlog[5];
166 	u16 snd_nxt;
167 	u16 window;
168 
169 	/* Reception */
170 	u16 rcv_nxt;
171 	u32 rcv_unacked;
172 	struct sk_buff_head deferdq;
173 	struct sk_buff_head *inputq;
174 	struct sk_buff_head *namedq;
175 
176 	/* Congestion handling */
177 	struct sk_buff_head wakeupq;
178 
179 	/* Fragmentation/reassembly */
180 	struct sk_buff *reasm_buf;
181 	struct sk_buff *reasm_tnlmsg;
182 
183 	/* Broadcast */
184 	u16 ackers;
185 	u16 acked;
186 	struct tipc_link *bc_rcvlink;
187 	struct tipc_link *bc_sndlink;
188 	u8 nack_state;
189 	bool bc_peer_is_up;
190 
191 	/* Statistics */
192 	struct tipc_stats stats;
193 };
194 
195 /*
196  * Error message prefixes
197  */
198 static const char *link_co_err = "Link tunneling error, ";
199 static const char *link_rst_msg = "Resetting link ";
200 
201 /* Send states for broadcast NACKs
202  */
203 enum {
204 	BC_NACK_SND_CONDITIONAL,
205 	BC_NACK_SND_UNCONDITIONAL,
206 	BC_NACK_SND_SUPPRESS,
207 };
208 
209 #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
210 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
211 
212 /*
213  * Interval between NACKs when packets arrive out of order
214  */
215 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
216 
217 /* Link FSM states:
218  */
219 enum {
220 	LINK_ESTABLISHED     = 0xe,
221 	LINK_ESTABLISHING    = 0xe  << 4,
222 	LINK_RESET           = 0x1  << 8,
223 	LINK_RESETTING       = 0x2  << 12,
224 	LINK_PEER_RESET      = 0xd  << 16,
225 	LINK_FAILINGOVER     = 0xf  << 20,
226 	LINK_SYNCHING        = 0xc  << 24
227 };
228 
229 /* Link FSM state checking routines
230  */
231 static int link_is_up(struct tipc_link *l)
232 {
233 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
234 }
235 
236 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
237 			       struct sk_buff_head *xmitq);
238 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
239 				      bool probe_reply, u16 rcvgap,
240 				      int tolerance, int priority,
241 				      struct sk_buff_head *xmitq);
242 static void link_print(struct tipc_link *l, const char *str);
243 static int tipc_link_build_nack_msg(struct tipc_link *l,
244 				    struct sk_buff_head *xmitq);
245 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
246 					struct sk_buff_head *xmitq);
247 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
248 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
249 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
250 				     struct tipc_gap_ack_blks *ga,
251 				     struct sk_buff_head *xmitq);
252 
253 /*
254  *  Simple non-static link routines (i.e. referenced outside this file)
255  */
256 bool tipc_link_is_up(struct tipc_link *l)
257 {
258 	return link_is_up(l);
259 }
260 
261 bool tipc_link_peer_is_down(struct tipc_link *l)
262 {
263 	return l->state == LINK_PEER_RESET;
264 }
265 
266 bool tipc_link_is_reset(struct tipc_link *l)
267 {
268 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
269 }
270 
271 bool tipc_link_is_establishing(struct tipc_link *l)
272 {
273 	return l->state == LINK_ESTABLISHING;
274 }
275 
276 bool tipc_link_is_synching(struct tipc_link *l)
277 {
278 	return l->state == LINK_SYNCHING;
279 }
280 
281 bool tipc_link_is_failingover(struct tipc_link *l)
282 {
283 	return l->state == LINK_FAILINGOVER;
284 }
285 
286 bool tipc_link_is_blocked(struct tipc_link *l)
287 {
288 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
289 }
290 
291 static bool link_is_bc_sndlink(struct tipc_link *l)
292 {
293 	return !l->bc_sndlink;
294 }
295 
296 static bool link_is_bc_rcvlink(struct tipc_link *l)
297 {
298 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
299 }
300 
301 void tipc_link_set_active(struct tipc_link *l, bool active)
302 {
303 	l->active = active;
304 }
305 
306 u32 tipc_link_id(struct tipc_link *l)
307 {
308 	return l->peer_bearer_id << 16 | l->bearer_id;
309 }
310 
311 int tipc_link_window(struct tipc_link *l)
312 {
313 	return l->window;
314 }
315 
316 int tipc_link_prio(struct tipc_link *l)
317 {
318 	return l->priority;
319 }
320 
321 unsigned long tipc_link_tolerance(struct tipc_link *l)
322 {
323 	return l->tolerance;
324 }
325 
326 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
327 {
328 	return l->inputq;
329 }
330 
331 char tipc_link_plane(struct tipc_link *l)
332 {
333 	return l->net_plane;
334 }
335 
336 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
337 {
338 	l->peer_caps = capabilities;
339 }
340 
341 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
342 			   struct tipc_link *uc_l,
343 			   struct sk_buff_head *xmitq)
344 {
345 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
346 
347 	snd_l->ackers++;
348 	rcv_l->acked = snd_l->snd_nxt - 1;
349 	snd_l->state = LINK_ESTABLISHED;
350 	tipc_link_build_bc_init_msg(uc_l, xmitq);
351 }
352 
353 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
354 			      struct tipc_link *rcv_l,
355 			      struct sk_buff_head *xmitq)
356 {
357 	u16 ack = snd_l->snd_nxt - 1;
358 
359 	snd_l->ackers--;
360 	rcv_l->bc_peer_is_up = true;
361 	rcv_l->state = LINK_ESTABLISHED;
362 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
363 	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
364 	tipc_link_reset(rcv_l);
365 	rcv_l->state = LINK_RESET;
366 	if (!snd_l->ackers) {
367 		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
368 		tipc_link_reset(snd_l);
369 		snd_l->state = LINK_RESET;
370 		__skb_queue_purge(xmitq);
371 	}
372 }
373 
374 int tipc_link_bc_peers(struct tipc_link *l)
375 {
376 	return l->ackers;
377 }
378 
379 static u16 link_bc_rcv_gap(struct tipc_link *l)
380 {
381 	struct sk_buff *skb = skb_peek(&l->deferdq);
382 	u16 gap = 0;
383 
384 	if (more(l->snd_nxt, l->rcv_nxt))
385 		gap = l->snd_nxt - l->rcv_nxt;
386 	if (skb)
387 		gap = buf_seqno(skb) - l->rcv_nxt;
388 	return gap;
389 }
390 
391 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
392 {
393 	l->mtu = mtu;
394 }
395 
396 int tipc_link_mtu(struct tipc_link *l)
397 {
398 	return l->mtu;
399 }
400 
401 int tipc_link_mss(struct tipc_link *l)
402 {
403 #ifdef CONFIG_TIPC_CRYPTO
404 	return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
405 #else
406 	return l->mtu - INT_H_SIZE;
407 #endif
408 }
409 
410 u16 tipc_link_rcv_nxt(struct tipc_link *l)
411 {
412 	return l->rcv_nxt;
413 }
414 
415 u16 tipc_link_acked(struct tipc_link *l)
416 {
417 	return l->acked;
418 }
419 
420 char *tipc_link_name(struct tipc_link *l)
421 {
422 	return l->name;
423 }
424 
425 u32 tipc_link_state(struct tipc_link *l)
426 {
427 	return l->state;
428 }
429 
430 /**
431  * tipc_link_create - create a new link
432  * @n: pointer to associated node
433  * @if_name: associated interface name
434  * @bearer_id: id (index) of associated bearer
435  * @tolerance: link tolerance to be used by link
436  * @net_plane: network plane (A,B,c..) this link belongs to
437  * @mtu: mtu to be advertised by link
438  * @priority: priority to be used by link
439  * @window: send window to be used by link
440  * @session: session to be used by link
441  * @ownnode: identity of own node
442  * @peer: node id of peer node
443  * @peer_caps: bitmap describing peer node capabilities
444  * @bc_sndlink: the namespace global link used for broadcast sending
445  * @bc_rcvlink: the peer specific link used for broadcast reception
446  * @inputq: queue to put messages ready for delivery
447  * @namedq: queue to put binding table update messages ready for delivery
448  * @link: return value, pointer to put the created link
449  *
450  * Returns true if link was created, otherwise false
451  */
452 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
453 		      int tolerance, char net_plane, u32 mtu, int priority,
454 		      int window, u32 session, u32 self,
455 		      u32 peer, u8 *peer_id, u16 peer_caps,
456 		      struct tipc_link *bc_sndlink,
457 		      struct tipc_link *bc_rcvlink,
458 		      struct sk_buff_head *inputq,
459 		      struct sk_buff_head *namedq,
460 		      struct tipc_link **link)
461 {
462 	char peer_str[NODE_ID_STR_LEN] = {0,};
463 	char self_str[NODE_ID_STR_LEN] = {0,};
464 	struct tipc_link *l;
465 
466 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
467 	if (!l)
468 		return false;
469 	*link = l;
470 	l->session = session;
471 
472 	/* Set link name for unicast links only */
473 	if (peer_id) {
474 		tipc_nodeid2string(self_str, tipc_own_id(net));
475 		if (strlen(self_str) > 16)
476 			sprintf(self_str, "%x", self);
477 		tipc_nodeid2string(peer_str, peer_id);
478 		if (strlen(peer_str) > 16)
479 			sprintf(peer_str, "%x", peer);
480 	}
481 	/* Peer i/f name will be completed by reset/activate message */
482 	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
483 		 self_str, if_name, peer_str);
484 
485 	strcpy(l->if_name, if_name);
486 	l->addr = peer;
487 	l->peer_caps = peer_caps;
488 	l->net = net;
489 	l->in_session = false;
490 	l->bearer_id = bearer_id;
491 	l->tolerance = tolerance;
492 	if (bc_rcvlink)
493 		bc_rcvlink->tolerance = tolerance;
494 	l->net_plane = net_plane;
495 	l->advertised_mtu = mtu;
496 	l->mtu = mtu;
497 	l->priority = priority;
498 	tipc_link_set_queue_limits(l, window);
499 	l->ackers = 1;
500 	l->bc_sndlink = bc_sndlink;
501 	l->bc_rcvlink = bc_rcvlink;
502 	l->inputq = inputq;
503 	l->namedq = namedq;
504 	l->state = LINK_RESETTING;
505 	__skb_queue_head_init(&l->transmq);
506 	__skb_queue_head_init(&l->backlogq);
507 	__skb_queue_head_init(&l->deferdq);
508 	__skb_queue_head_init(&l->failover_deferdq);
509 	skb_queue_head_init(&l->wakeupq);
510 	skb_queue_head_init(l->inputq);
511 	return true;
512 }
513 
514 /**
515  * tipc_link_bc_create - create new link to be used for broadcast
516  * @n: pointer to associated node
517  * @mtu: mtu to be used initially if no peers
518  * @window: send window to be used
519  * @inputq: queue to put messages ready for delivery
520  * @namedq: queue to put binding table update messages ready for delivery
521  * @link: return value, pointer to put the created link
522  *
523  * Returns true if link was created, otherwise false
524  */
525 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
526 			 int mtu, int window, u16 peer_caps,
527 			 struct sk_buff_head *inputq,
528 			 struct sk_buff_head *namedq,
529 			 struct tipc_link *bc_sndlink,
530 			 struct tipc_link **link)
531 {
532 	struct tipc_link *l;
533 
534 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
535 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
536 			      NULL, inputq, namedq, link))
537 		return false;
538 
539 	l = *link;
540 	strcpy(l->name, tipc_bclink_name);
541 	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
542 	tipc_link_reset(l);
543 	l->state = LINK_RESET;
544 	l->ackers = 0;
545 	l->bc_rcvlink = l;
546 
547 	/* Broadcast send link is always up */
548 	if (link_is_bc_sndlink(l))
549 		l->state = LINK_ESTABLISHED;
550 
551 	/* Disable replicast if even a single peer doesn't support it */
552 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
553 		tipc_bcast_toggle_rcast(net, false);
554 
555 	return true;
556 }
557 
558 /**
559  * tipc_link_fsm_evt - link finite state machine
560  * @l: pointer to link
561  * @evt: state machine event to be processed
562  */
563 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
564 {
565 	int rc = 0;
566 	int old_state = l->state;
567 
568 	switch (l->state) {
569 	case LINK_RESETTING:
570 		switch (evt) {
571 		case LINK_PEER_RESET_EVT:
572 			l->state = LINK_PEER_RESET;
573 			break;
574 		case LINK_RESET_EVT:
575 			l->state = LINK_RESET;
576 			break;
577 		case LINK_FAILURE_EVT:
578 		case LINK_FAILOVER_BEGIN_EVT:
579 		case LINK_ESTABLISH_EVT:
580 		case LINK_FAILOVER_END_EVT:
581 		case LINK_SYNCH_BEGIN_EVT:
582 		case LINK_SYNCH_END_EVT:
583 		default:
584 			goto illegal_evt;
585 		}
586 		break;
587 	case LINK_RESET:
588 		switch (evt) {
589 		case LINK_PEER_RESET_EVT:
590 			l->state = LINK_ESTABLISHING;
591 			break;
592 		case LINK_FAILOVER_BEGIN_EVT:
593 			l->state = LINK_FAILINGOVER;
594 		case LINK_FAILURE_EVT:
595 		case LINK_RESET_EVT:
596 		case LINK_ESTABLISH_EVT:
597 		case LINK_FAILOVER_END_EVT:
598 			break;
599 		case LINK_SYNCH_BEGIN_EVT:
600 		case LINK_SYNCH_END_EVT:
601 		default:
602 			goto illegal_evt;
603 		}
604 		break;
605 	case LINK_PEER_RESET:
606 		switch (evt) {
607 		case LINK_RESET_EVT:
608 			l->state = LINK_ESTABLISHING;
609 			break;
610 		case LINK_PEER_RESET_EVT:
611 		case LINK_ESTABLISH_EVT:
612 		case LINK_FAILURE_EVT:
613 			break;
614 		case LINK_SYNCH_BEGIN_EVT:
615 		case LINK_SYNCH_END_EVT:
616 		case LINK_FAILOVER_BEGIN_EVT:
617 		case LINK_FAILOVER_END_EVT:
618 		default:
619 			goto illegal_evt;
620 		}
621 		break;
622 	case LINK_FAILINGOVER:
623 		switch (evt) {
624 		case LINK_FAILOVER_END_EVT:
625 			l->state = LINK_RESET;
626 			break;
627 		case LINK_PEER_RESET_EVT:
628 		case LINK_RESET_EVT:
629 		case LINK_ESTABLISH_EVT:
630 		case LINK_FAILURE_EVT:
631 			break;
632 		case LINK_FAILOVER_BEGIN_EVT:
633 		case LINK_SYNCH_BEGIN_EVT:
634 		case LINK_SYNCH_END_EVT:
635 		default:
636 			goto illegal_evt;
637 		}
638 		break;
639 	case LINK_ESTABLISHING:
640 		switch (evt) {
641 		case LINK_ESTABLISH_EVT:
642 			l->state = LINK_ESTABLISHED;
643 			break;
644 		case LINK_FAILOVER_BEGIN_EVT:
645 			l->state = LINK_FAILINGOVER;
646 			break;
647 		case LINK_RESET_EVT:
648 			l->state = LINK_RESET;
649 			break;
650 		case LINK_FAILURE_EVT:
651 		case LINK_PEER_RESET_EVT:
652 		case LINK_SYNCH_BEGIN_EVT:
653 		case LINK_FAILOVER_END_EVT:
654 			break;
655 		case LINK_SYNCH_END_EVT:
656 		default:
657 			goto illegal_evt;
658 		}
659 		break;
660 	case LINK_ESTABLISHED:
661 		switch (evt) {
662 		case LINK_PEER_RESET_EVT:
663 			l->state = LINK_PEER_RESET;
664 			rc |= TIPC_LINK_DOWN_EVT;
665 			break;
666 		case LINK_FAILURE_EVT:
667 			l->state = LINK_RESETTING;
668 			rc |= TIPC_LINK_DOWN_EVT;
669 			break;
670 		case LINK_RESET_EVT:
671 			l->state = LINK_RESET;
672 			break;
673 		case LINK_ESTABLISH_EVT:
674 		case LINK_SYNCH_END_EVT:
675 			break;
676 		case LINK_SYNCH_BEGIN_EVT:
677 			l->state = LINK_SYNCHING;
678 			break;
679 		case LINK_FAILOVER_BEGIN_EVT:
680 		case LINK_FAILOVER_END_EVT:
681 		default:
682 			goto illegal_evt;
683 		}
684 		break;
685 	case LINK_SYNCHING:
686 		switch (evt) {
687 		case LINK_PEER_RESET_EVT:
688 			l->state = LINK_PEER_RESET;
689 			rc |= TIPC_LINK_DOWN_EVT;
690 			break;
691 		case LINK_FAILURE_EVT:
692 			l->state = LINK_RESETTING;
693 			rc |= TIPC_LINK_DOWN_EVT;
694 			break;
695 		case LINK_RESET_EVT:
696 			l->state = LINK_RESET;
697 			break;
698 		case LINK_ESTABLISH_EVT:
699 		case LINK_SYNCH_BEGIN_EVT:
700 			break;
701 		case LINK_SYNCH_END_EVT:
702 			l->state = LINK_ESTABLISHED;
703 			break;
704 		case LINK_FAILOVER_BEGIN_EVT:
705 		case LINK_FAILOVER_END_EVT:
706 		default:
707 			goto illegal_evt;
708 		}
709 		break;
710 	default:
711 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
712 	}
713 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
714 	return rc;
715 illegal_evt:
716 	pr_err("Illegal FSM event %x in state %x on link %s\n",
717 	       evt, l->state, l->name);
718 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
719 	return rc;
720 }
721 
722 /* link_profile_stats - update statistical profiling of traffic
723  */
724 static void link_profile_stats(struct tipc_link *l)
725 {
726 	struct sk_buff *skb;
727 	struct tipc_msg *msg;
728 	int length;
729 
730 	/* Update counters used in statistical profiling of send traffic */
731 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
732 	l->stats.queue_sz_counts++;
733 
734 	skb = skb_peek(&l->transmq);
735 	if (!skb)
736 		return;
737 	msg = buf_msg(skb);
738 	length = msg_size(msg);
739 
740 	if (msg_user(msg) == MSG_FRAGMENTER) {
741 		if (msg_type(msg) != FIRST_FRAGMENT)
742 			return;
743 		length = msg_size(msg_inner_hdr(msg));
744 	}
745 	l->stats.msg_lengths_total += length;
746 	l->stats.msg_length_counts++;
747 	if (length <= 64)
748 		l->stats.msg_length_profile[0]++;
749 	else if (length <= 256)
750 		l->stats.msg_length_profile[1]++;
751 	else if (length <= 1024)
752 		l->stats.msg_length_profile[2]++;
753 	else if (length <= 4096)
754 		l->stats.msg_length_profile[3]++;
755 	else if (length <= 16384)
756 		l->stats.msg_length_profile[4]++;
757 	else if (length <= 32768)
758 		l->stats.msg_length_profile[5]++;
759 	else
760 		l->stats.msg_length_profile[6]++;
761 }
762 
763 /**
764  * tipc_link_too_silent - check if link is "too silent"
765  * @l: tipc link to be checked
766  *
767  * Returns true if the link 'silent_intv_cnt' is about to reach the
768  * 'abort_limit' value, otherwise false
769  */
770 bool tipc_link_too_silent(struct tipc_link *l)
771 {
772 	return (l->silent_intv_cnt + 2 > l->abort_limit);
773 }
774 
775 /* tipc_link_timeout - perform periodic task as instructed from node timeout
776  */
777 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
778 {
779 	int mtyp = 0;
780 	int rc = 0;
781 	bool state = false;
782 	bool probe = false;
783 	bool setup = false;
784 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
785 	u16 bc_acked = l->bc_rcvlink->acked;
786 	struct tipc_mon_state *mstate = &l->mon_state;
787 
788 	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
789 	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
790 	switch (l->state) {
791 	case LINK_ESTABLISHED:
792 	case LINK_SYNCHING:
793 		mtyp = STATE_MSG;
794 		link_profile_stats(l);
795 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
796 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
797 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
798 		state = bc_acked != bc_snt;
799 		state |= l->bc_rcvlink->rcv_unacked;
800 		state |= l->rcv_unacked;
801 		state |= !skb_queue_empty(&l->transmq);
802 		state |= !skb_queue_empty(&l->deferdq);
803 		probe = mstate->probing;
804 		probe |= l->silent_intv_cnt;
805 		if (probe || mstate->monitoring)
806 			l->silent_intv_cnt++;
807 		break;
808 	case LINK_RESET:
809 		setup = l->rst_cnt++ <= 4;
810 		setup |= !(l->rst_cnt % 16);
811 		mtyp = RESET_MSG;
812 		break;
813 	case LINK_ESTABLISHING:
814 		setup = true;
815 		mtyp = ACTIVATE_MSG;
816 		break;
817 	case LINK_PEER_RESET:
818 	case LINK_RESETTING:
819 	case LINK_FAILINGOVER:
820 		break;
821 	default:
822 		break;
823 	}
824 
825 	if (state || probe || setup)
826 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
827 
828 	return rc;
829 }
830 
831 /**
832  * link_schedule_user - schedule a message sender for wakeup after congestion
833  * @l: congested link
834  * @hdr: header of message that is being sent
835  * Create pseudo msg to send back to user when congestion abates
836  */
837 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
838 {
839 	u32 dnode = tipc_own_addr(l->net);
840 	u32 dport = msg_origport(hdr);
841 	struct sk_buff *skb;
842 
843 	/* Create and schedule wakeup pseudo message */
844 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
845 			      dnode, l->addr, dport, 0, 0);
846 	if (!skb)
847 		return -ENOBUFS;
848 	msg_set_dest_droppable(buf_msg(skb), true);
849 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
850 	skb_queue_tail(&l->wakeupq, skb);
851 	l->stats.link_congs++;
852 	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
853 	return -ELINKCONG;
854 }
855 
856 /**
857  * link_prepare_wakeup - prepare users for wakeup after congestion
858  * @l: congested link
859  * Wake up a number of waiting users, as permitted by available space
860  * in the send queue
861  */
862 static void link_prepare_wakeup(struct tipc_link *l)
863 {
864 	struct sk_buff_head *wakeupq = &l->wakeupq;
865 	struct sk_buff_head *inputq = l->inputq;
866 	struct sk_buff *skb, *tmp;
867 	struct sk_buff_head tmpq;
868 	int avail[5] = {0,};
869 	int imp = 0;
870 
871 	__skb_queue_head_init(&tmpq);
872 
873 	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
874 		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
875 
876 	skb_queue_walk_safe(wakeupq, skb, tmp) {
877 		imp = TIPC_SKB_CB(skb)->chain_imp;
878 		if (avail[imp] <= 0)
879 			continue;
880 		avail[imp]--;
881 		__skb_unlink(skb, wakeupq);
882 		__skb_queue_tail(&tmpq, skb);
883 	}
884 
885 	spin_lock_bh(&inputq->lock);
886 	skb_queue_splice_tail(&tmpq, inputq);
887 	spin_unlock_bh(&inputq->lock);
888 
889 }
890 
891 void tipc_link_reset(struct tipc_link *l)
892 {
893 	struct sk_buff_head list;
894 	u32 imp;
895 
896 	__skb_queue_head_init(&list);
897 
898 	l->in_session = false;
899 	/* Force re-synch of peer session number before establishing */
900 	l->peer_session--;
901 	l->session++;
902 	l->mtu = l->advertised_mtu;
903 
904 	spin_lock_bh(&l->wakeupq.lock);
905 	skb_queue_splice_init(&l->wakeupq, &list);
906 	spin_unlock_bh(&l->wakeupq.lock);
907 
908 	spin_lock_bh(&l->inputq->lock);
909 	skb_queue_splice_init(&list, l->inputq);
910 	spin_unlock_bh(&l->inputq->lock);
911 
912 	__skb_queue_purge(&l->transmq);
913 	__skb_queue_purge(&l->deferdq);
914 	__skb_queue_purge(&l->backlogq);
915 	__skb_queue_purge(&l->failover_deferdq);
916 	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
917 		l->backlog[imp].len = 0;
918 		l->backlog[imp].target_bskb = NULL;
919 	}
920 	kfree_skb(l->reasm_buf);
921 	kfree_skb(l->reasm_tnlmsg);
922 	kfree_skb(l->failover_reasm_skb);
923 	l->reasm_buf = NULL;
924 	l->reasm_tnlmsg = NULL;
925 	l->failover_reasm_skb = NULL;
926 	l->rcv_unacked = 0;
927 	l->snd_nxt = 1;
928 	l->rcv_nxt = 1;
929 	l->snd_nxt_state = 1;
930 	l->rcv_nxt_state = 1;
931 	l->acked = 0;
932 	l->silent_intv_cnt = 0;
933 	l->rst_cnt = 0;
934 	l->bc_peer_is_up = false;
935 	memset(&l->mon_state, 0, sizeof(l->mon_state));
936 	tipc_link_reset_stats(l);
937 }
938 
939 /**
940  * tipc_link_xmit(): enqueue buffer list according to queue situation
941  * @link: link to use
942  * @list: chain of buffers containing message
943  * @xmitq: returned list of packets to be sent by caller
944  *
945  * Consumes the buffer chain.
946  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
947  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
948  */
949 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
950 		   struct sk_buff_head *xmitq)
951 {
952 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
953 	struct sk_buff_head *backlogq = &l->backlogq;
954 	struct sk_buff_head *transmq = &l->transmq;
955 	struct sk_buff *skb, *_skb;
956 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
957 	u16 ack = l->rcv_nxt - 1;
958 	u16 seqno = l->snd_nxt;
959 	int pkt_cnt = skb_queue_len(list);
960 	int imp = msg_importance(hdr);
961 	unsigned int mss = tipc_link_mss(l);
962 	unsigned int maxwin = l->window;
963 	unsigned int mtu = l->mtu;
964 	bool new_bundle;
965 	int rc = 0;
966 
967 	if (unlikely(msg_size(hdr) > mtu)) {
968 		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
969 			skb_queue_len(list), msg_user(hdr),
970 			msg_type(hdr), msg_size(hdr), mtu);
971 		__skb_queue_purge(list);
972 		return -EMSGSIZE;
973 	}
974 
975 	/* Allow oversubscription of one data msg per source at congestion */
976 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
977 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
978 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
979 			return -ENOBUFS;
980 		}
981 		rc = link_schedule_user(l, hdr);
982 	}
983 
984 	if (pkt_cnt > 1) {
985 		l->stats.sent_fragmented++;
986 		l->stats.sent_fragments += pkt_cnt;
987 	}
988 
989 	/* Prepare each packet for sending, and add to relevant queue: */
990 	while ((skb = __skb_dequeue(list))) {
991 		if (likely(skb_queue_len(transmq) < maxwin)) {
992 			hdr = buf_msg(skb);
993 			msg_set_seqno(hdr, seqno);
994 			msg_set_ack(hdr, ack);
995 			msg_set_bcast_ack(hdr, bc_ack);
996 			_skb = skb_clone(skb, GFP_ATOMIC);
997 			if (!_skb) {
998 				kfree_skb(skb);
999 				__skb_queue_purge(list);
1000 				return -ENOBUFS;
1001 			}
1002 			__skb_queue_tail(transmq, skb);
1003 			/* next retransmit attempt */
1004 			if (link_is_bc_sndlink(l))
1005 				TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1006 			__skb_queue_tail(xmitq, _skb);
1007 			TIPC_SKB_CB(skb)->ackers = l->ackers;
1008 			l->rcv_unacked = 0;
1009 			l->stats.sent_pkts++;
1010 			seqno++;
1011 			continue;
1012 		}
1013 		if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1014 					mss, l->addr, &new_bundle)) {
1015 			if (skb) {
1016 				/* Keep a ref. to the skb for next try */
1017 				l->backlog[imp].target_bskb = skb;
1018 				l->backlog[imp].len++;
1019 				__skb_queue_tail(backlogq, skb);
1020 			} else {
1021 				if (new_bundle) {
1022 					l->stats.sent_bundles++;
1023 					l->stats.sent_bundled++;
1024 				}
1025 				l->stats.sent_bundled++;
1026 			}
1027 			continue;
1028 		}
1029 		l->backlog[imp].target_bskb = NULL;
1030 		l->backlog[imp].len += (1 + skb_queue_len(list));
1031 		__skb_queue_tail(backlogq, skb);
1032 		skb_queue_splice_tail_init(list, backlogq);
1033 	}
1034 	l->snd_nxt = seqno;
1035 	return rc;
1036 }
1037 
1038 static void tipc_link_advance_backlog(struct tipc_link *l,
1039 				      struct sk_buff_head *xmitq)
1040 {
1041 	struct sk_buff *skb, *_skb;
1042 	struct tipc_msg *hdr;
1043 	u16 seqno = l->snd_nxt;
1044 	u16 ack = l->rcv_nxt - 1;
1045 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1046 	u32 imp;
1047 
1048 	while (skb_queue_len(&l->transmq) < l->window) {
1049 		skb = skb_peek(&l->backlogq);
1050 		if (!skb)
1051 			break;
1052 		_skb = skb_clone(skb, GFP_ATOMIC);
1053 		if (!_skb)
1054 			break;
1055 		__skb_dequeue(&l->backlogq);
1056 		hdr = buf_msg(skb);
1057 		imp = msg_importance(hdr);
1058 		l->backlog[imp].len--;
1059 		if (unlikely(skb == l->backlog[imp].target_bskb))
1060 			l->backlog[imp].target_bskb = NULL;
1061 		__skb_queue_tail(&l->transmq, skb);
1062 		/* next retransmit attempt */
1063 		if (link_is_bc_sndlink(l))
1064 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1065 
1066 		__skb_queue_tail(xmitq, _skb);
1067 		TIPC_SKB_CB(skb)->ackers = l->ackers;
1068 		msg_set_seqno(hdr, seqno);
1069 		msg_set_ack(hdr, ack);
1070 		msg_set_bcast_ack(hdr, bc_ack);
1071 		l->rcv_unacked = 0;
1072 		l->stats.sent_pkts++;
1073 		seqno++;
1074 	}
1075 	l->snd_nxt = seqno;
1076 }
1077 
1078 /**
1079  * link_retransmit_failure() - Detect repeated retransmit failures
1080  * @l: tipc link sender
1081  * @r: tipc link receiver (= l in case of unicast)
1082  * @rc: returned code
1083  *
1084  * Return: true if the repeated retransmit failures happens, otherwise
1085  * false
1086  */
1087 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1088 				    int *rc)
1089 {
1090 	struct sk_buff *skb = skb_peek(&l->transmq);
1091 	struct tipc_msg *hdr;
1092 
1093 	if (!skb)
1094 		return false;
1095 
1096 	if (!TIPC_SKB_CB(skb)->retr_cnt)
1097 		return false;
1098 
1099 	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1100 			msecs_to_jiffies(r->tolerance * 10)))
1101 		return false;
1102 
1103 	hdr = buf_msg(skb);
1104 	if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1105 		return false;
1106 
1107 	pr_warn("Retransmission failure on link <%s>\n", l->name);
1108 	link_print(l, "State of link ");
1109 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1110 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1111 	pr_info("sqno %u, prev: %x, dest: %x\n",
1112 		msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1113 	pr_info("retr_stamp %d, retr_cnt %d\n",
1114 		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1115 		TIPC_SKB_CB(skb)->retr_cnt);
1116 
1117 	trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1118 	trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1119 	trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1120 
1121 	if (link_is_bc_sndlink(l)) {
1122 		r->state = LINK_RESET;
1123 		*rc = TIPC_LINK_DOWN_EVT;
1124 	} else {
1125 		*rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1126 	}
1127 
1128 	return true;
1129 }
1130 
1131 /* tipc_link_bc_retrans() - retransmit zero or more packets
1132  * @l: the link to transmit on
1133  * @r: the receiving link ordering the retransmit. Same as l if unicast
1134  * @from: retransmit from (inclusive) this sequence number
1135  * @to: retransmit to (inclusive) this sequence number
1136  * xmitq: queue for accumulating the retransmitted packets
1137  */
1138 static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1139 				u16 from, u16 to, struct sk_buff_head *xmitq)
1140 {
1141 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1142 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1143 	u16 ack = l->rcv_nxt - 1;
1144 	struct tipc_msg *hdr;
1145 	int rc = 0;
1146 
1147 	if (!skb)
1148 		return 0;
1149 	if (less(to, from))
1150 		return 0;
1151 
1152 	trace_tipc_link_retrans(r, from, to, &l->transmq);
1153 
1154 	if (link_retransmit_failure(l, r, &rc))
1155 		return rc;
1156 
1157 	skb_queue_walk(&l->transmq, skb) {
1158 		hdr = buf_msg(skb);
1159 		if (less(msg_seqno(hdr), from))
1160 			continue;
1161 		if (more(msg_seqno(hdr), to))
1162 			break;
1163 
1164 		if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1165 			continue;
1166 		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1167 		_skb = pskb_copy(skb, GFP_ATOMIC);
1168 		if (!_skb)
1169 			return 0;
1170 		hdr = buf_msg(_skb);
1171 		msg_set_ack(hdr, ack);
1172 		msg_set_bcast_ack(hdr, bc_ack);
1173 		_skb->priority = TC_PRIO_CONTROL;
1174 		__skb_queue_tail(xmitq, _skb);
1175 		l->stats.retransmitted++;
1176 
1177 		/* Increase actual retrans counter & mark first time */
1178 		if (!TIPC_SKB_CB(skb)->retr_cnt++)
1179 			TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1180 	}
1181 	return 0;
1182 }
1183 
1184 /* tipc_data_input - deliver data and name distr msgs to upper layer
1185  *
1186  * Consumes buffer if message is of right type
1187  * Node lock must be held
1188  */
1189 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1190 			    struct sk_buff_head *inputq)
1191 {
1192 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1193 	struct tipc_msg *hdr = buf_msg(skb);
1194 
1195 	switch (msg_user(hdr)) {
1196 	case TIPC_LOW_IMPORTANCE:
1197 	case TIPC_MEDIUM_IMPORTANCE:
1198 	case TIPC_HIGH_IMPORTANCE:
1199 	case TIPC_CRITICAL_IMPORTANCE:
1200 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1201 			skb_queue_tail(mc_inputq, skb);
1202 			return true;
1203 		}
1204 		/* fall through */
1205 	case CONN_MANAGER:
1206 		skb_queue_tail(inputq, skb);
1207 		return true;
1208 	case GROUP_PROTOCOL:
1209 		skb_queue_tail(mc_inputq, skb);
1210 		return true;
1211 	case NAME_DISTRIBUTOR:
1212 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1213 		skb_queue_tail(l->namedq, skb);
1214 		return true;
1215 	case MSG_BUNDLER:
1216 	case TUNNEL_PROTOCOL:
1217 	case MSG_FRAGMENTER:
1218 	case BCAST_PROTOCOL:
1219 		return false;
1220 	default:
1221 		pr_warn("Dropping received illegal msg type\n");
1222 		kfree_skb(skb);
1223 		return true;
1224 	};
1225 }
1226 
1227 /* tipc_link_input - process packet that has passed link protocol check
1228  *
1229  * Consumes buffer
1230  */
1231 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1232 			   struct sk_buff_head *inputq,
1233 			   struct sk_buff **reasm_skb)
1234 {
1235 	struct tipc_msg *hdr = buf_msg(skb);
1236 	struct sk_buff *iskb;
1237 	struct sk_buff_head tmpq;
1238 	int usr = msg_user(hdr);
1239 	int pos = 0;
1240 
1241 	if (usr == MSG_BUNDLER) {
1242 		skb_queue_head_init(&tmpq);
1243 		l->stats.recv_bundles++;
1244 		l->stats.recv_bundled += msg_msgcnt(hdr);
1245 		while (tipc_msg_extract(skb, &iskb, &pos))
1246 			tipc_data_input(l, iskb, &tmpq);
1247 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1248 		return 0;
1249 	} else if (usr == MSG_FRAGMENTER) {
1250 		l->stats.recv_fragments++;
1251 		if (tipc_buf_append(reasm_skb, &skb)) {
1252 			l->stats.recv_fragmented++;
1253 			tipc_data_input(l, skb, inputq);
1254 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1255 			pr_warn_ratelimited("Unable to build fragment list\n");
1256 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1257 		}
1258 		return 0;
1259 	} else if (usr == BCAST_PROTOCOL) {
1260 		tipc_bcast_lock(l->net);
1261 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1262 		tipc_bcast_unlock(l->net);
1263 	}
1264 
1265 	kfree_skb(skb);
1266 	return 0;
1267 }
1268 
1269 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1270  *			 inner message along with the ones in the old link's
1271  *			 deferdq
1272  * @l: tunnel link
1273  * @skb: TUNNEL_PROTOCOL message
1274  * @inputq: queue to put messages ready for delivery
1275  */
1276 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1277 			     struct sk_buff_head *inputq)
1278 {
1279 	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1280 	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1281 	struct sk_buff_head *fdefq = &l->failover_deferdq;
1282 	struct tipc_msg *hdr = buf_msg(skb);
1283 	struct sk_buff *iskb;
1284 	int ipos = 0;
1285 	int rc = 0;
1286 	u16 seqno;
1287 
1288 	if (msg_type(hdr) == SYNCH_MSG) {
1289 		kfree_skb(skb);
1290 		return 0;
1291 	}
1292 
1293 	/* Not a fragment? */
1294 	if (likely(!msg_nof_fragms(hdr))) {
1295 		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1296 			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1297 					    skb_queue_len(fdefq));
1298 			return 0;
1299 		}
1300 		kfree_skb(skb);
1301 	} else {
1302 		/* Set fragment type for buf_append */
1303 		if (msg_fragm_no(hdr) == 1)
1304 			msg_set_type(hdr, FIRST_FRAGMENT);
1305 		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1306 			msg_set_type(hdr, FRAGMENT);
1307 		else
1308 			msg_set_type(hdr, LAST_FRAGMENT);
1309 
1310 		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1311 			/* Successful but non-complete reassembly? */
1312 			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1313 				return 0;
1314 			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1315 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1316 		}
1317 		iskb = skb;
1318 	}
1319 
1320 	do {
1321 		seqno = buf_seqno(iskb);
1322 		if (unlikely(less(seqno, l->drop_point))) {
1323 			kfree_skb(iskb);
1324 			continue;
1325 		}
1326 		if (unlikely(seqno != l->drop_point)) {
1327 			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1328 			continue;
1329 		}
1330 
1331 		l->drop_point++;
1332 		if (!tipc_data_input(l, iskb, inputq))
1333 			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1334 		if (unlikely(rc))
1335 			break;
1336 	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1337 
1338 	return rc;
1339 }
1340 
1341 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1342 {
1343 	bool released = false;
1344 	struct sk_buff *skb, *tmp;
1345 
1346 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1347 		if (more(buf_seqno(skb), acked))
1348 			break;
1349 		__skb_unlink(skb, &l->transmq);
1350 		kfree_skb(skb);
1351 		released = true;
1352 	}
1353 	return released;
1354 }
1355 
1356 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1357  * @l: tipc link that data have come with gaps in sequence if any
1358  * @data: data buffer to store the Gap ACK blocks after built
1359  *
1360  * returns the actual allocated memory size
1361  */
1362 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1363 {
1364 	struct sk_buff *skb = skb_peek(&l->deferdq);
1365 	struct tipc_gap_ack_blks *ga = data;
1366 	u16 len, expect, seqno = 0;
1367 	u8 n = 0;
1368 
1369 	if (!skb)
1370 		goto exit;
1371 
1372 	expect = buf_seqno(skb);
1373 	skb_queue_walk(&l->deferdq, skb) {
1374 		seqno = buf_seqno(skb);
1375 		if (unlikely(more(seqno, expect))) {
1376 			ga->gacks[n].ack = htons(expect - 1);
1377 			ga->gacks[n].gap = htons(seqno - expect);
1378 			if (++n >= MAX_GAP_ACK_BLKS) {
1379 				pr_info_ratelimited("Too few Gap ACK blocks!\n");
1380 				goto exit;
1381 			}
1382 		} else if (unlikely(less(seqno, expect))) {
1383 			pr_warn("Unexpected skb in deferdq!\n");
1384 			continue;
1385 		}
1386 		expect = seqno + 1;
1387 	}
1388 
1389 	/* last block */
1390 	ga->gacks[n].ack = htons(seqno);
1391 	ga->gacks[n].gap = 0;
1392 	n++;
1393 
1394 exit:
1395 	len = tipc_gap_ack_blks_sz(n);
1396 	ga->len = htons(len);
1397 	ga->gack_cnt = n;
1398 	return len;
1399 }
1400 
1401 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1402  *			       acked packets, also doing retransmissions if
1403  *			       gaps found
1404  * @l: tipc link with transmq queue to be advanced
1405  * @acked: seqno of last packet acked by peer without any gaps before
1406  * @gap: # of gap packets
1407  * @ga: buffer pointer to Gap ACK blocks from peer
1408  * @xmitq: queue for accumulating the retransmitted packets if any
1409  *
1410  * In case of a repeated retransmit failures, the call will return shortly
1411  * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
1412  */
1413 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1414 				     struct tipc_gap_ack_blks *ga,
1415 				     struct sk_buff_head *xmitq)
1416 {
1417 	struct sk_buff *skb, *_skb, *tmp;
1418 	struct tipc_msg *hdr;
1419 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1420 	u16 ack = l->rcv_nxt - 1;
1421 	bool passed = false;
1422 	u16 seqno, n = 0;
1423 	int rc = 0;
1424 
1425 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1426 		seqno = buf_seqno(skb);
1427 
1428 next_gap_ack:
1429 		if (less_eq(seqno, acked)) {
1430 			/* release skb */
1431 			__skb_unlink(skb, &l->transmq);
1432 			kfree_skb(skb);
1433 		} else if (less_eq(seqno, acked + gap)) {
1434 			/* First, check if repeated retrans failures occurs? */
1435 			if (!passed && link_retransmit_failure(l, l, &rc))
1436 				return rc;
1437 			passed = true;
1438 
1439 			/* retransmit skb if unrestricted*/
1440 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1441 				continue;
1442 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1443 			_skb = pskb_copy(skb, GFP_ATOMIC);
1444 			if (!_skb)
1445 				continue;
1446 			hdr = buf_msg(_skb);
1447 			msg_set_ack(hdr, ack);
1448 			msg_set_bcast_ack(hdr, bc_ack);
1449 			_skb->priority = TC_PRIO_CONTROL;
1450 			__skb_queue_tail(xmitq, _skb);
1451 			l->stats.retransmitted++;
1452 
1453 			/* Increase actual retrans counter & mark first time */
1454 			if (!TIPC_SKB_CB(skb)->retr_cnt++)
1455 				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1456 		} else {
1457 			/* retry with Gap ACK blocks if any */
1458 			if (!ga || n >= ga->gack_cnt)
1459 				break;
1460 			acked = ntohs(ga->gacks[n].ack);
1461 			gap = ntohs(ga->gacks[n].gap);
1462 			n++;
1463 			goto next_gap_ack;
1464 		}
1465 	}
1466 
1467 	return 0;
1468 }
1469 
1470 /* tipc_link_build_state_msg: prepare link state message for transmission
1471  *
1472  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1473  * risk of ack storms towards the sender
1474  */
1475 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1476 {
1477 	if (!l)
1478 		return 0;
1479 
1480 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1481 	if (link_is_bc_rcvlink(l)) {
1482 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1483 			return 0;
1484 		l->rcv_unacked = 0;
1485 
1486 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1487 		l->snd_nxt = l->rcv_nxt;
1488 		return TIPC_LINK_SND_STATE;
1489 	}
1490 
1491 	/* Unicast ACK */
1492 	l->rcv_unacked = 0;
1493 	l->stats.sent_acks++;
1494 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1495 	return 0;
1496 }
1497 
1498 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1499  */
1500 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1501 {
1502 	int mtyp = RESET_MSG;
1503 	struct sk_buff *skb;
1504 
1505 	if (l->state == LINK_ESTABLISHING)
1506 		mtyp = ACTIVATE_MSG;
1507 
1508 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1509 
1510 	/* Inform peer that this endpoint is going down if applicable */
1511 	skb = skb_peek_tail(xmitq);
1512 	if (skb && (l->state == LINK_RESET))
1513 		msg_set_peer_stopping(buf_msg(skb), 1);
1514 }
1515 
1516 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1517  * Note that sending of broadcast NACK is coordinated among nodes, to
1518  * reduce the risk of NACK storms towards the sender
1519  */
1520 static int tipc_link_build_nack_msg(struct tipc_link *l,
1521 				    struct sk_buff_head *xmitq)
1522 {
1523 	u32 def_cnt = ++l->stats.deferred_recv;
1524 	u32 defq_len = skb_queue_len(&l->deferdq);
1525 	int match1, match2;
1526 
1527 	if (link_is_bc_rcvlink(l)) {
1528 		match1 = def_cnt & 0xf;
1529 		match2 = tipc_own_addr(l->net) & 0xf;
1530 		if (match1 == match2)
1531 			return TIPC_LINK_SND_STATE;
1532 		return 0;
1533 	}
1534 
1535 	if (defq_len >= 3 && !((defq_len - 3) % 16))
1536 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1537 	return 0;
1538 }
1539 
1540 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1541  * @l: the link that should handle the message
1542  * @skb: TIPC packet
1543  * @xmitq: queue to place packets to be sent after this call
1544  */
1545 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1546 		  struct sk_buff_head *xmitq)
1547 {
1548 	struct sk_buff_head *defq = &l->deferdq;
1549 	struct tipc_msg *hdr = buf_msg(skb);
1550 	u16 seqno, rcv_nxt, win_lim;
1551 	int rc = 0;
1552 
1553 	/* Verify and update link state */
1554 	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1555 		return tipc_link_proto_rcv(l, skb, xmitq);
1556 
1557 	/* Don't send probe at next timeout expiration */
1558 	l->silent_intv_cnt = 0;
1559 
1560 	do {
1561 		hdr = buf_msg(skb);
1562 		seqno = msg_seqno(hdr);
1563 		rcv_nxt = l->rcv_nxt;
1564 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1565 
1566 		if (unlikely(!link_is_up(l))) {
1567 			if (l->state == LINK_ESTABLISHING)
1568 				rc = TIPC_LINK_UP_EVT;
1569 			goto drop;
1570 		}
1571 
1572 		/* Drop if outside receive window */
1573 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1574 			l->stats.duplicates++;
1575 			goto drop;
1576 		}
1577 
1578 		/* Forward queues and wake up waiting users */
1579 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1580 			tipc_link_advance_backlog(l, xmitq);
1581 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1582 				link_prepare_wakeup(l);
1583 		}
1584 
1585 		/* Defer delivery if sequence gap */
1586 		if (unlikely(seqno != rcv_nxt)) {
1587 			__tipc_skb_queue_sorted(defq, seqno, skb);
1588 			rc |= tipc_link_build_nack_msg(l, xmitq);
1589 			break;
1590 		}
1591 
1592 		/* Deliver packet */
1593 		l->rcv_nxt++;
1594 		l->stats.recv_pkts++;
1595 
1596 		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1597 			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1598 		else if (!tipc_data_input(l, skb, l->inputq))
1599 			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1600 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1601 			rc |= tipc_link_build_state_msg(l, xmitq);
1602 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1603 			break;
1604 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1605 
1606 	return rc;
1607 drop:
1608 	kfree_skb(skb);
1609 	return rc;
1610 }
1611 
1612 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1613 				      bool probe_reply, u16 rcvgap,
1614 				      int tolerance, int priority,
1615 				      struct sk_buff_head *xmitq)
1616 {
1617 	struct tipc_link *bcl = l->bc_rcvlink;
1618 	struct sk_buff *skb;
1619 	struct tipc_msg *hdr;
1620 	struct sk_buff_head *dfq = &l->deferdq;
1621 	bool node_up = link_is_up(bcl);
1622 	struct tipc_mon_state *mstate = &l->mon_state;
1623 	int dlen = 0;
1624 	void *data;
1625 	u16 glen = 0;
1626 
1627 	/* Don't send protocol message during reset or link failover */
1628 	if (tipc_link_is_blocked(l))
1629 		return;
1630 
1631 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1632 		return;
1633 
1634 	if (!skb_queue_empty(dfq))
1635 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1636 
1637 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1638 			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1639 			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1640 	if (!skb)
1641 		return;
1642 
1643 	hdr = buf_msg(skb);
1644 	data = msg_data(hdr);
1645 	msg_set_session(hdr, l->session);
1646 	msg_set_bearer_id(hdr, l->bearer_id);
1647 	msg_set_net_plane(hdr, l->net_plane);
1648 	msg_set_next_sent(hdr, l->snd_nxt);
1649 	msg_set_ack(hdr, l->rcv_nxt - 1);
1650 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1651 	msg_set_bc_ack_invalid(hdr, !node_up);
1652 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1653 	msg_set_link_tolerance(hdr, tolerance);
1654 	msg_set_linkprio(hdr, priority);
1655 	msg_set_redundant_link(hdr, node_up);
1656 	msg_set_seq_gap(hdr, 0);
1657 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1658 
1659 	if (mtyp == STATE_MSG) {
1660 		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1661 			msg_set_seqno(hdr, l->snd_nxt_state++);
1662 		msg_set_seq_gap(hdr, rcvgap);
1663 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1664 		msg_set_probe(hdr, probe);
1665 		msg_set_is_keepalive(hdr, probe || probe_reply);
1666 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1667 			glen = tipc_build_gap_ack_blks(l, data);
1668 		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1669 		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1670 		skb_trim(skb, INT_H_SIZE + glen + dlen);
1671 		l->stats.sent_states++;
1672 		l->rcv_unacked = 0;
1673 	} else {
1674 		/* RESET_MSG or ACTIVATE_MSG */
1675 		if (mtyp == ACTIVATE_MSG) {
1676 			msg_set_dest_session_valid(hdr, 1);
1677 			msg_set_dest_session(hdr, l->peer_session);
1678 		}
1679 		msg_set_max_pkt(hdr, l->advertised_mtu);
1680 		strcpy(data, l->if_name);
1681 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1682 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1683 	}
1684 	if (probe)
1685 		l->stats.sent_probes++;
1686 	if (rcvgap)
1687 		l->stats.sent_nacks++;
1688 	skb->priority = TC_PRIO_CONTROL;
1689 	__skb_queue_tail(xmitq, skb);
1690 	trace_tipc_proto_build(skb, false, l->name);
1691 }
1692 
1693 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1694 				    struct sk_buff_head *xmitq)
1695 {
1696 	u32 onode = tipc_own_addr(l->net);
1697 	struct tipc_msg *hdr, *ihdr;
1698 	struct sk_buff_head tnlq;
1699 	struct sk_buff *skb;
1700 	u32 dnode = l->addr;
1701 
1702 	__skb_queue_head_init(&tnlq);
1703 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1704 			      INT_H_SIZE, BASIC_H_SIZE,
1705 			      dnode, onode, 0, 0, 0);
1706 	if (!skb) {
1707 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1708 		return;
1709 	}
1710 
1711 	hdr = buf_msg(skb);
1712 	msg_set_msgcnt(hdr, 1);
1713 	msg_set_bearer_id(hdr, l->peer_bearer_id);
1714 
1715 	ihdr = (struct tipc_msg *)msg_data(hdr);
1716 	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1717 		      BASIC_H_SIZE, dnode);
1718 	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1719 	__skb_queue_tail(&tnlq, skb);
1720 	tipc_link_xmit(l, &tnlq, xmitq);
1721 }
1722 
1723 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1724  * with contents of the link's transmit and backlog queues.
1725  */
1726 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1727 			   int mtyp, struct sk_buff_head *xmitq)
1728 {
1729 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1730 	struct sk_buff *skb, *tnlskb;
1731 	struct tipc_msg *hdr, tnlhdr;
1732 	struct sk_buff_head *queue = &l->transmq;
1733 	struct sk_buff_head tmpxq, tnlq, frags;
1734 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1735 	bool pktcnt_need_update = false;
1736 	u16 syncpt;
1737 	int rc;
1738 
1739 	if (!tnl)
1740 		return;
1741 
1742 	__skb_queue_head_init(&tnlq);
1743 	/* Link Synching:
1744 	 * From now on, send only one single ("dummy") SYNCH message
1745 	 * to peer. The SYNCH message does not contain any data, just
1746 	 * a header conveying the synch point to the peer.
1747 	 */
1748 	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1749 		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1750 					 INT_H_SIZE, 0, l->addr,
1751 					 tipc_own_addr(l->net),
1752 					 0, 0, 0);
1753 		if (!tnlskb) {
1754 			pr_warn("%sunable to create dummy SYNCH_MSG\n",
1755 				link_co_err);
1756 			return;
1757 		}
1758 
1759 		hdr = buf_msg(tnlskb);
1760 		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1761 		msg_set_syncpt(hdr, syncpt);
1762 		msg_set_bearer_id(hdr, l->peer_bearer_id);
1763 		__skb_queue_tail(&tnlq, tnlskb);
1764 		tipc_link_xmit(tnl, &tnlq, xmitq);
1765 		return;
1766 	}
1767 
1768 	__skb_queue_head_init(&tmpxq);
1769 	__skb_queue_head_init(&frags);
1770 	/* At least one packet required for safe algorithm => add dummy */
1771 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1772 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1773 			      0, 0, TIPC_ERR_NO_PORT);
1774 	if (!skb) {
1775 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1776 		return;
1777 	}
1778 	__skb_queue_tail(&tnlq, skb);
1779 	tipc_link_xmit(l, &tnlq, &tmpxq);
1780 	__skb_queue_purge(&tmpxq);
1781 
1782 	/* Initialize reusable tunnel packet header */
1783 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1784 		      mtyp, INT_H_SIZE, l->addr);
1785 	if (mtyp == SYNCH_MSG)
1786 		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1787 	else
1788 		pktcnt = skb_queue_len(&l->transmq);
1789 	pktcnt += skb_queue_len(&l->backlogq);
1790 	msg_set_msgcnt(&tnlhdr, pktcnt);
1791 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1792 tnl:
1793 	/* Wrap each packet into a tunnel packet */
1794 	skb_queue_walk(queue, skb) {
1795 		hdr = buf_msg(skb);
1796 		if (queue == &l->backlogq)
1797 			msg_set_seqno(hdr, seqno++);
1798 		pktlen = msg_size(hdr);
1799 
1800 		/* Tunnel link MTU is not large enough? This could be
1801 		 * due to:
1802 		 * 1) Link MTU has just changed or set differently;
1803 		 * 2) Or FAILOVER on the top of a SYNCH message
1804 		 *
1805 		 * The 2nd case should not happen if peer supports
1806 		 * TIPC_TUNNEL_ENHANCED
1807 		 */
1808 		if (pktlen > tnl->mtu - INT_H_SIZE) {
1809 			if (mtyp == FAILOVER_MSG &&
1810 			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1811 				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1812 						       &frags);
1813 				if (rc) {
1814 					pr_warn("%sunable to frag msg: rc %d\n",
1815 						link_co_err, rc);
1816 					return;
1817 				}
1818 				pktcnt += skb_queue_len(&frags) - 1;
1819 				pktcnt_need_update = true;
1820 				skb_queue_splice_tail_init(&frags, &tnlq);
1821 				continue;
1822 			}
1823 			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
1824 			 * => Just warn it and return!
1825 			 */
1826 			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
1827 					    link_co_err, msg_user(hdr),
1828 					    msg_type(hdr), msg_size(hdr));
1829 			return;
1830 		}
1831 
1832 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1833 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1834 		if (!tnlskb) {
1835 			pr_warn("%sunable to send packet\n", link_co_err);
1836 			return;
1837 		}
1838 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1839 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1840 		__skb_queue_tail(&tnlq, tnlskb);
1841 	}
1842 	if (queue != &l->backlogq) {
1843 		queue = &l->backlogq;
1844 		goto tnl;
1845 	}
1846 
1847 	if (pktcnt_need_update)
1848 		skb_queue_walk(&tnlq, skb) {
1849 			hdr = buf_msg(skb);
1850 			msg_set_msgcnt(hdr, pktcnt);
1851 		}
1852 
1853 	tipc_link_xmit(tnl, &tnlq, xmitq);
1854 
1855 	if (mtyp == FAILOVER_MSG) {
1856 		tnl->drop_point = l->rcv_nxt;
1857 		tnl->failover_reasm_skb = l->reasm_buf;
1858 		l->reasm_buf = NULL;
1859 
1860 		/* Failover the link's deferdq */
1861 		if (unlikely(!skb_queue_empty(fdefq))) {
1862 			pr_warn("Link failover deferdq not empty: %d!\n",
1863 				skb_queue_len(fdefq));
1864 			__skb_queue_purge(fdefq);
1865 		}
1866 		skb_queue_splice_init(&l->deferdq, fdefq);
1867 	}
1868 }
1869 
1870 /**
1871  * tipc_link_failover_prepare() - prepare tnl for link failover
1872  *
1873  * This is a special version of the precursor - tipc_link_tnl_prepare(),
1874  * see the tipc_node_link_failover() for details
1875  *
1876  * @l: failover link
1877  * @tnl: tunnel link
1878  * @xmitq: queue for messages to be xmited
1879  */
1880 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1881 				struct sk_buff_head *xmitq)
1882 {
1883 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1884 
1885 	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1886 
1887 	/* This failover link endpoint was never established before,
1888 	 * so it has not received anything from peer.
1889 	 * Otherwise, it must be a normal failover situation or the
1890 	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1891 	 * would have to start over from scratch instead.
1892 	 */
1893 	tnl->drop_point = 1;
1894 	tnl->failover_reasm_skb = NULL;
1895 
1896 	/* Initiate the link's failover deferdq */
1897 	if (unlikely(!skb_queue_empty(fdefq))) {
1898 		pr_warn("Link failover deferdq not empty: %d!\n",
1899 			skb_queue_len(fdefq));
1900 		__skb_queue_purge(fdefq);
1901 	}
1902 }
1903 
1904 /* tipc_link_validate_msg(): validate message against current link state
1905  * Returns true if message should be accepted, otherwise false
1906  */
1907 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1908 {
1909 	u16 curr_session = l->peer_session;
1910 	u16 session = msg_session(hdr);
1911 	int mtyp = msg_type(hdr);
1912 
1913 	if (msg_user(hdr) != LINK_PROTOCOL)
1914 		return true;
1915 
1916 	switch (mtyp) {
1917 	case RESET_MSG:
1918 		if (!l->in_session)
1919 			return true;
1920 		/* Accept only RESET with new session number */
1921 		return more(session, curr_session);
1922 	case ACTIVATE_MSG:
1923 		if (!l->in_session)
1924 			return true;
1925 		/* Accept only ACTIVATE with new or current session number */
1926 		return !less(session, curr_session);
1927 	case STATE_MSG:
1928 		/* Accept only STATE with current session number */
1929 		if (!l->in_session)
1930 			return false;
1931 		if (session != curr_session)
1932 			return false;
1933 		/* Extra sanity check */
1934 		if (!link_is_up(l) && msg_ack(hdr))
1935 			return false;
1936 		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1937 			return true;
1938 		/* Accept only STATE with new sequence number */
1939 		return !less(msg_seqno(hdr), l->rcv_nxt_state);
1940 	default:
1941 		return false;
1942 	}
1943 }
1944 
1945 /* tipc_link_proto_rcv(): receive link level protocol message :
1946  * Note that network plane id propagates through the network, and may
1947  * change at any time. The node with lowest numerical id determines
1948  * network plane
1949  */
1950 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1951 			       struct sk_buff_head *xmitq)
1952 {
1953 	struct tipc_msg *hdr = buf_msg(skb);
1954 	struct tipc_gap_ack_blks *ga = NULL;
1955 	u16 rcvgap = 0;
1956 	u16 ack = msg_ack(hdr);
1957 	u16 gap = msg_seq_gap(hdr);
1958 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1959 	u16 peers_tol = msg_link_tolerance(hdr);
1960 	u16 peers_prio = msg_linkprio(hdr);
1961 	u16 rcv_nxt = l->rcv_nxt;
1962 	u16 dlen = msg_data_sz(hdr);
1963 	int mtyp = msg_type(hdr);
1964 	bool reply = msg_probe(hdr);
1965 	u16 glen = 0;
1966 	void *data;
1967 	char *if_name;
1968 	int rc = 0;
1969 
1970 	trace_tipc_proto_rcv(skb, false, l->name);
1971 	if (tipc_link_is_blocked(l) || !xmitq)
1972 		goto exit;
1973 
1974 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1975 		l->net_plane = msg_net_plane(hdr);
1976 
1977 	skb_linearize(skb);
1978 	hdr = buf_msg(skb);
1979 	data = msg_data(hdr);
1980 
1981 	if (!tipc_link_validate_msg(l, hdr)) {
1982 		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1983 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1984 		goto exit;
1985 	}
1986 
1987 	switch (mtyp) {
1988 	case RESET_MSG:
1989 	case ACTIVATE_MSG:
1990 		/* Complete own link name with peer's interface name */
1991 		if_name =  strrchr(l->name, ':') + 1;
1992 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1993 			break;
1994 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1995 			break;
1996 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1997 
1998 		/* Update own tolerance if peer indicates a non-zero value */
1999 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2000 			l->tolerance = peers_tol;
2001 			l->bc_rcvlink->tolerance = peers_tol;
2002 		}
2003 		/* Update own priority if peer's priority is higher */
2004 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2005 			l->priority = peers_prio;
2006 
2007 		/* If peer is going down we want full re-establish cycle */
2008 		if (msg_peer_stopping(hdr)) {
2009 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2010 			break;
2011 		}
2012 
2013 		/* If this endpoint was re-created while peer was ESTABLISHING
2014 		 * it doesn't know current session number. Force re-synch.
2015 		 */
2016 		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2017 		    l->session != msg_dest_session(hdr)) {
2018 			if (less(l->session, msg_dest_session(hdr)))
2019 				l->session = msg_dest_session(hdr) + 1;
2020 			break;
2021 		}
2022 
2023 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2024 		if (mtyp == RESET_MSG || !link_is_up(l))
2025 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2026 
2027 		/* ACTIVATE_MSG takes up link if it was already locally reset */
2028 		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2029 			rc = TIPC_LINK_UP_EVT;
2030 
2031 		l->peer_session = msg_session(hdr);
2032 		l->in_session = true;
2033 		l->peer_bearer_id = msg_bearer_id(hdr);
2034 		if (l->mtu > msg_max_pkt(hdr))
2035 			l->mtu = msg_max_pkt(hdr);
2036 		break;
2037 
2038 	case STATE_MSG:
2039 		l->rcv_nxt_state = msg_seqno(hdr) + 1;
2040 
2041 		/* Update own tolerance if peer indicates a non-zero value */
2042 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2043 			l->tolerance = peers_tol;
2044 			l->bc_rcvlink->tolerance = peers_tol;
2045 		}
2046 		/* Update own prio if peer indicates a different value */
2047 		if ((peers_prio != l->priority) &&
2048 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2049 			l->priority = peers_prio;
2050 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2051 		}
2052 
2053 		l->silent_intv_cnt = 0;
2054 		l->stats.recv_states++;
2055 		if (msg_probe(hdr))
2056 			l->stats.recv_probes++;
2057 
2058 		if (!link_is_up(l)) {
2059 			if (l->state == LINK_ESTABLISHING)
2060 				rc = TIPC_LINK_UP_EVT;
2061 			break;
2062 		}
2063 
2064 		/* Receive Gap ACK blocks from peer if any */
2065 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
2066 			ga = (struct tipc_gap_ack_blks *)data;
2067 			glen = ntohs(ga->len);
2068 			/* sanity check: if failed, ignore Gap ACK blocks */
2069 			if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
2070 				ga = NULL;
2071 		}
2072 
2073 		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2074 			     &l->mon_state, l->bearer_id);
2075 
2076 		/* Send NACK if peer has sent pkts we haven't received yet */
2077 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
2078 			rcvgap = peers_snd_nxt - l->rcv_nxt;
2079 		if (rcvgap || reply)
2080 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2081 						  rcvgap, 0, 0, xmitq);
2082 
2083 		rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
2084 
2085 		/* If NACK, retransmit will now start at right position */
2086 		if (gap)
2087 			l->stats.recv_nacks++;
2088 
2089 		tipc_link_advance_backlog(l, xmitq);
2090 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2091 			link_prepare_wakeup(l);
2092 	}
2093 exit:
2094 	kfree_skb(skb);
2095 	return rc;
2096 }
2097 
2098 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2099  */
2100 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2101 					 u16 peers_snd_nxt,
2102 					 struct sk_buff_head *xmitq)
2103 {
2104 	struct sk_buff *skb;
2105 	struct tipc_msg *hdr;
2106 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2107 	u16 ack = l->rcv_nxt - 1;
2108 	u16 gap_to = peers_snd_nxt - 1;
2109 
2110 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2111 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2112 	if (!skb)
2113 		return false;
2114 	hdr = buf_msg(skb);
2115 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2116 	msg_set_bcast_ack(hdr, ack);
2117 	msg_set_bcgap_after(hdr, ack);
2118 	if (dfrd_skb)
2119 		gap_to = buf_seqno(dfrd_skb) - 1;
2120 	msg_set_bcgap_to(hdr, gap_to);
2121 	msg_set_non_seq(hdr, bcast);
2122 	__skb_queue_tail(xmitq, skb);
2123 	return true;
2124 }
2125 
2126 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2127  *
2128  * Give a newly added peer node the sequence number where it should
2129  * start receiving and acking broadcast packets.
2130  */
2131 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2132 					struct sk_buff_head *xmitq)
2133 {
2134 	struct sk_buff_head list;
2135 
2136 	__skb_queue_head_init(&list);
2137 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2138 		return;
2139 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2140 	tipc_link_xmit(l, &list, xmitq);
2141 }
2142 
2143 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2144  */
2145 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2146 {
2147 	int mtyp = msg_type(hdr);
2148 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2149 
2150 	if (link_is_up(l))
2151 		return;
2152 
2153 	if (msg_user(hdr) == BCAST_PROTOCOL) {
2154 		l->rcv_nxt = peers_snd_nxt;
2155 		l->state = LINK_ESTABLISHED;
2156 		return;
2157 	}
2158 
2159 	if (l->peer_caps & TIPC_BCAST_SYNCH)
2160 		return;
2161 
2162 	if (msg_peer_node_is_up(hdr))
2163 		return;
2164 
2165 	/* Compatibility: accept older, less safe initial synch data */
2166 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2167 		l->rcv_nxt = peers_snd_nxt;
2168 }
2169 
2170 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2171  */
2172 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2173 			  struct sk_buff_head *xmitq)
2174 {
2175 	struct tipc_link *snd_l = l->bc_sndlink;
2176 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2177 	u16 from = msg_bcast_ack(hdr) + 1;
2178 	u16 to = from + msg_bc_gap(hdr) - 1;
2179 	int rc = 0;
2180 
2181 	if (!link_is_up(l))
2182 		return rc;
2183 
2184 	if (!msg_peer_node_is_up(hdr))
2185 		return rc;
2186 
2187 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
2188 	if (msg_ack(hdr))
2189 		l->bc_peer_is_up = true;
2190 
2191 	if (!l->bc_peer_is_up)
2192 		return rc;
2193 
2194 	l->stats.recv_nacks++;
2195 
2196 	/* Ignore if peers_snd_nxt goes beyond receive window */
2197 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2198 		return rc;
2199 
2200 	rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
2201 
2202 	l->snd_nxt = peers_snd_nxt;
2203 	if (link_bc_rcv_gap(l))
2204 		rc |= TIPC_LINK_SND_STATE;
2205 
2206 	/* Return now if sender supports nack via STATE messages */
2207 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2208 		return rc;
2209 
2210 	/* Otherwise, be backwards compatible */
2211 
2212 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2213 		l->nack_state = BC_NACK_SND_CONDITIONAL;
2214 		return 0;
2215 	}
2216 
2217 	/* Don't NACK if one was recently sent or peeked */
2218 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2219 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2220 		return 0;
2221 	}
2222 
2223 	/* Conditionally delay NACK sending until next synch rcv */
2224 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2225 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2226 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2227 			return 0;
2228 	}
2229 
2230 	/* Send NACK now but suppress next one */
2231 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2232 	l->nack_state = BC_NACK_SND_SUPPRESS;
2233 	return 0;
2234 }
2235 
2236 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2237 			  struct sk_buff_head *xmitq)
2238 {
2239 	struct sk_buff *skb, *tmp;
2240 	struct tipc_link *snd_l = l->bc_sndlink;
2241 
2242 	if (!link_is_up(l) || !l->bc_peer_is_up)
2243 		return;
2244 
2245 	if (!more(acked, l->acked))
2246 		return;
2247 
2248 	trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
2249 	/* Skip over packets peer has already acked */
2250 	skb_queue_walk(&snd_l->transmq, skb) {
2251 		if (more(buf_seqno(skb), l->acked))
2252 			break;
2253 	}
2254 
2255 	/* Update/release the packets peer is acking now */
2256 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2257 		if (more(buf_seqno(skb), acked))
2258 			break;
2259 		if (!--TIPC_SKB_CB(skb)->ackers) {
2260 			__skb_unlink(skb, &snd_l->transmq);
2261 			kfree_skb(skb);
2262 		}
2263 	}
2264 	l->acked = acked;
2265 	tipc_link_advance_backlog(snd_l, xmitq);
2266 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2267 		link_prepare_wakeup(snd_l);
2268 }
2269 
2270 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2271  * This function is here for backwards compatibility, since
2272  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2273  */
2274 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2275 			  struct sk_buff_head *xmitq)
2276 {
2277 	struct tipc_msg *hdr = buf_msg(skb);
2278 	u32 dnode = msg_destnode(hdr);
2279 	int mtyp = msg_type(hdr);
2280 	u16 acked = msg_bcast_ack(hdr);
2281 	u16 from = acked + 1;
2282 	u16 to = msg_bcgap_to(hdr);
2283 	u16 peers_snd_nxt = to + 1;
2284 	int rc = 0;
2285 
2286 	kfree_skb(skb);
2287 
2288 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2289 		return 0;
2290 
2291 	if (mtyp != STATE_MSG)
2292 		return 0;
2293 
2294 	if (dnode == tipc_own_addr(l->net)) {
2295 		tipc_link_bc_ack_rcv(l, acked, xmitq);
2296 		rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
2297 		l->stats.recv_nacks++;
2298 		return rc;
2299 	}
2300 
2301 	/* Msg for other node => suppress own NACK at next sync if applicable */
2302 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2303 		l->nack_state = BC_NACK_SND_SUPPRESS;
2304 
2305 	return 0;
2306 }
2307 
2308 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
2309 {
2310 	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2311 
2312 	l->window = win;
2313 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
2314 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
2315 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
2316 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
2317 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2318 }
2319 
2320 /**
2321  * link_reset_stats - reset link statistics
2322  * @l: pointer to link
2323  */
2324 void tipc_link_reset_stats(struct tipc_link *l)
2325 {
2326 	memset(&l->stats, 0, sizeof(l->stats));
2327 }
2328 
2329 static void link_print(struct tipc_link *l, const char *str)
2330 {
2331 	struct sk_buff *hskb = skb_peek(&l->transmq);
2332 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2333 	u16 tail = l->snd_nxt - 1;
2334 
2335 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2336 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2337 		skb_queue_len(&l->transmq), head, tail,
2338 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2339 }
2340 
2341 /* Parse and validate nested (link) properties valid for media, bearer and link
2342  */
2343 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2344 {
2345 	int err;
2346 
2347 	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2348 					  tipc_nl_prop_policy, NULL);
2349 	if (err)
2350 		return err;
2351 
2352 	if (props[TIPC_NLA_PROP_PRIO]) {
2353 		u32 prio;
2354 
2355 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2356 		if (prio > TIPC_MAX_LINK_PRI)
2357 			return -EINVAL;
2358 	}
2359 
2360 	if (props[TIPC_NLA_PROP_TOL]) {
2361 		u32 tol;
2362 
2363 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2364 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2365 			return -EINVAL;
2366 	}
2367 
2368 	if (props[TIPC_NLA_PROP_WIN]) {
2369 		u32 win;
2370 
2371 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2372 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2373 			return -EINVAL;
2374 	}
2375 
2376 	return 0;
2377 }
2378 
2379 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2380 {
2381 	int i;
2382 	struct nlattr *stats;
2383 
2384 	struct nla_map {
2385 		u32 key;
2386 		u32 val;
2387 	};
2388 
2389 	struct nla_map map[] = {
2390 		{TIPC_NLA_STATS_RX_INFO, 0},
2391 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2392 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2393 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2394 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2395 		{TIPC_NLA_STATS_TX_INFO, 0},
2396 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2397 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2398 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2399 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2400 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2401 			s->msg_length_counts : 1},
2402 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2403 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2404 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2405 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2406 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2407 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2408 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2409 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2410 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2411 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2412 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2413 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2414 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2415 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2416 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2417 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2418 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2419 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2420 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2421 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2422 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2423 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2424 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2425 	};
2426 
2427 	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2428 	if (!stats)
2429 		return -EMSGSIZE;
2430 
2431 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2432 		if (nla_put_u32(skb, map[i].key, map[i].val))
2433 			goto msg_full;
2434 
2435 	nla_nest_end(skb, stats);
2436 
2437 	return 0;
2438 msg_full:
2439 	nla_nest_cancel(skb, stats);
2440 
2441 	return -EMSGSIZE;
2442 }
2443 
2444 /* Caller should hold appropriate locks to protect the link */
2445 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2446 		       struct tipc_link *link, int nlflags)
2447 {
2448 	u32 self = tipc_own_addr(net);
2449 	struct nlattr *attrs;
2450 	struct nlattr *prop;
2451 	void *hdr;
2452 	int err;
2453 
2454 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2455 			  nlflags, TIPC_NL_LINK_GET);
2456 	if (!hdr)
2457 		return -EMSGSIZE;
2458 
2459 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2460 	if (!attrs)
2461 		goto msg_full;
2462 
2463 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2464 		goto attr_msg_full;
2465 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2466 		goto attr_msg_full;
2467 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2468 		goto attr_msg_full;
2469 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2470 		goto attr_msg_full;
2471 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2472 		goto attr_msg_full;
2473 
2474 	if (tipc_link_is_up(link))
2475 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2476 			goto attr_msg_full;
2477 	if (link->active)
2478 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2479 			goto attr_msg_full;
2480 
2481 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2482 	if (!prop)
2483 		goto attr_msg_full;
2484 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2485 		goto prop_msg_full;
2486 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2487 		goto prop_msg_full;
2488 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2489 			link->window))
2490 		goto prop_msg_full;
2491 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2492 		goto prop_msg_full;
2493 	nla_nest_end(msg->skb, prop);
2494 
2495 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2496 	if (err)
2497 		goto attr_msg_full;
2498 
2499 	nla_nest_end(msg->skb, attrs);
2500 	genlmsg_end(msg->skb, hdr);
2501 
2502 	return 0;
2503 
2504 prop_msg_full:
2505 	nla_nest_cancel(msg->skb, prop);
2506 attr_msg_full:
2507 	nla_nest_cancel(msg->skb, attrs);
2508 msg_full:
2509 	genlmsg_cancel(msg->skb, hdr);
2510 
2511 	return -EMSGSIZE;
2512 }
2513 
2514 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2515 				      struct tipc_stats *stats)
2516 {
2517 	int i;
2518 	struct nlattr *nest;
2519 
2520 	struct nla_map {
2521 		__u32 key;
2522 		__u32 val;
2523 	};
2524 
2525 	struct nla_map map[] = {
2526 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2527 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2528 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2529 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2530 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2531 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2532 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2533 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2534 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2535 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2536 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2537 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2538 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2539 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2540 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2541 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2542 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2543 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2544 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2545 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2546 	};
2547 
2548 	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2549 	if (!nest)
2550 		return -EMSGSIZE;
2551 
2552 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2553 		if (nla_put_u32(skb, map[i].key, map[i].val))
2554 			goto msg_full;
2555 
2556 	nla_nest_end(skb, nest);
2557 
2558 	return 0;
2559 msg_full:
2560 	nla_nest_cancel(skb, nest);
2561 
2562 	return -EMSGSIZE;
2563 }
2564 
2565 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2566 {
2567 	int err;
2568 	void *hdr;
2569 	struct nlattr *attrs;
2570 	struct nlattr *prop;
2571 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2572 	u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2573 	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2574 	struct tipc_link *bcl = tn->bcl;
2575 
2576 	if (!bcl)
2577 		return 0;
2578 
2579 	tipc_bcast_lock(net);
2580 
2581 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2582 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2583 	if (!hdr) {
2584 		tipc_bcast_unlock(net);
2585 		return -EMSGSIZE;
2586 	}
2587 
2588 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2589 	if (!attrs)
2590 		goto msg_full;
2591 
2592 	/* The broadcast link is always up */
2593 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2594 		goto attr_msg_full;
2595 
2596 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2597 		goto attr_msg_full;
2598 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2599 		goto attr_msg_full;
2600 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2601 		goto attr_msg_full;
2602 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2603 		goto attr_msg_full;
2604 
2605 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2606 	if (!prop)
2607 		goto attr_msg_full;
2608 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2609 		goto prop_msg_full;
2610 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2611 		goto prop_msg_full;
2612 	if (bc_mode & BCLINK_MODE_SEL)
2613 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2614 				bc_ratio))
2615 			goto prop_msg_full;
2616 	nla_nest_end(msg->skb, prop);
2617 
2618 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2619 	if (err)
2620 		goto attr_msg_full;
2621 
2622 	tipc_bcast_unlock(net);
2623 	nla_nest_end(msg->skb, attrs);
2624 	genlmsg_end(msg->skb, hdr);
2625 
2626 	return 0;
2627 
2628 prop_msg_full:
2629 	nla_nest_cancel(msg->skb, prop);
2630 attr_msg_full:
2631 	nla_nest_cancel(msg->skb, attrs);
2632 msg_full:
2633 	tipc_bcast_unlock(net);
2634 	genlmsg_cancel(msg->skb, hdr);
2635 
2636 	return -EMSGSIZE;
2637 }
2638 
2639 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2640 			     struct sk_buff_head *xmitq)
2641 {
2642 	l->tolerance = tol;
2643 	if (l->bc_rcvlink)
2644 		l->bc_rcvlink->tolerance = tol;
2645 	if (link_is_up(l))
2646 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2647 }
2648 
2649 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2650 			struct sk_buff_head *xmitq)
2651 {
2652 	l->priority = prio;
2653 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2654 }
2655 
2656 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2657 {
2658 	l->abort_limit = limit;
2659 }
2660 
2661 char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2662 {
2663 	if (!l)
2664 		scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2665 	else if (link_is_bc_sndlink(l))
2666 		scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2667 	else if (link_is_bc_rcvlink(l))
2668 		scnprintf(buf, TIPC_MAX_LINK_NAME,
2669 			  "broadcast-receiver, peer %x", l->addr);
2670 	else
2671 		memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2672 
2673 	return buf;
2674 }
2675 
2676 /**
2677  * tipc_link_dump - dump TIPC link data
2678  * @l: tipc link to be dumped
2679  * @dqueues: bitmask to decide if any link queue to be dumped?
2680  *           - TIPC_DUMP_NONE: don't dump link queues
2681  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2682  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2683  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2684  *           - TIPC_DUMP_INPUTQ: dump link input queue
2685  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2686  *           - TIPC_DUMP_ALL: dump all the link queues above
2687  * @buf: returned buffer of dump data in format
2688  */
2689 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2690 {
2691 	int i = 0;
2692 	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2693 	struct sk_buff_head *list;
2694 	struct sk_buff *hskb, *tskb;
2695 	u32 len;
2696 
2697 	if (!l) {
2698 		i += scnprintf(buf, sz, "link data: (null)\n");
2699 		return i;
2700 	}
2701 
2702 	i += scnprintf(buf, sz, "link data: %x", l->addr);
2703 	i += scnprintf(buf + i, sz - i, " %x", l->state);
2704 	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2705 	i += scnprintf(buf + i, sz - i, " %u", l->session);
2706 	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2707 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2708 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2709 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2710 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2711 	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2712 	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2713 	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2714 	i += scnprintf(buf + i, sz - i, " %u", 0);
2715 	i += scnprintf(buf + i, sz - i, " %u", 0);
2716 	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2717 
2718 	list = &l->transmq;
2719 	len = skb_queue_len(list);
2720 	hskb = skb_peek(list);
2721 	tskb = skb_peek_tail(list);
2722 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2723 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2724 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2725 
2726 	list = &l->deferdq;
2727 	len = skb_queue_len(list);
2728 	hskb = skb_peek(list);
2729 	tskb = skb_peek_tail(list);
2730 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2731 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2732 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2733 
2734 	list = &l->backlogq;
2735 	len = skb_queue_len(list);
2736 	hskb = skb_peek(list);
2737 	tskb = skb_peek_tail(list);
2738 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2739 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2740 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2741 
2742 	list = l->inputq;
2743 	len = skb_queue_len(list);
2744 	hskb = skb_peek(list);
2745 	tskb = skb_peek_tail(list);
2746 	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2747 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2748 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2749 
2750 	if (dqueues & TIPC_DUMP_TRANSMQ) {
2751 		i += scnprintf(buf + i, sz - i, "transmq: ");
2752 		i += tipc_list_dump(&l->transmq, false, buf + i);
2753 	}
2754 	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2755 		i += scnprintf(buf + i, sz - i,
2756 			       "backlogq: <%u %u %u %u %u>, ",
2757 			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2758 			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2759 			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2760 			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2761 			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2762 		i += tipc_list_dump(&l->backlogq, false, buf + i);
2763 	}
2764 	if (dqueues & TIPC_DUMP_DEFERDQ) {
2765 		i += scnprintf(buf + i, sz - i, "deferdq: ");
2766 		i += tipc_list_dump(&l->deferdq, false, buf + i);
2767 	}
2768 	if (dqueues & TIPC_DUMP_INPUTQ) {
2769 		i += scnprintf(buf + i, sz - i, "inputq: ");
2770 		i += tipc_list_dump(l->inputq, false, buf + i);
2771 	}
2772 	if (dqueues & TIPC_DUMP_WAKEUP) {
2773 		i += scnprintf(buf + i, sz - i, "wakeup: ");
2774 		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2775 	}
2776 
2777 	return i;
2778 }
2779