xref: /openbmc/linux/net/tipc/link.c (revision 3fc7c707)
1  /*
2   * net/tipc/link.c: TIPC link code
3   *
4   * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5   * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6   * All rights reserved.
7   *
8   * Redistribution and use in source and binary forms, with or without
9   * modification, are permitted provided that the following conditions are met:
10   *
11   * 1. Redistributions of source code must retain the above copyright
12   *    notice, this list of conditions and the following disclaimer.
13   * 2. Redistributions in binary form must reproduce the above copyright
14   *    notice, this list of conditions and the following disclaimer in the
15   *    documentation and/or other materials provided with the distribution.
16   * 3. Neither the names of the copyright holders nor the names of its
17   *    contributors may be used to endorse or promote products derived from
18   *    this software without specific prior written permission.
19   *
20   * Alternatively, this software may be distributed under the terms of the
21   * GNU General Public License ("GPL") version 2 as published by the Free
22   * Software Foundation.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28   * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34   * POSSIBILITY OF SUCH DAMAGE.
35   */
36  
37  #include "core.h"
38  #include "subscr.h"
39  #include "link.h"
40  #include "bcast.h"
41  #include "socket.h"
42  #include "name_distr.h"
43  #include "discover.h"
44  #include "netlink.h"
45  #include "monitor.h"
46  #include "trace.h"
47  #include "crypto.h"
48  
49  #include <linux/pkt_sched.h>
50  
51  struct tipc_stats {
52  	u32 sent_pkts;
53  	u32 recv_pkts;
54  	u32 sent_states;
55  	u32 recv_states;
56  	u32 sent_probes;
57  	u32 recv_probes;
58  	u32 sent_nacks;
59  	u32 recv_nacks;
60  	u32 sent_acks;
61  	u32 sent_bundled;
62  	u32 sent_bundles;
63  	u32 recv_bundled;
64  	u32 recv_bundles;
65  	u32 retransmitted;
66  	u32 sent_fragmented;
67  	u32 sent_fragments;
68  	u32 recv_fragmented;
69  	u32 recv_fragments;
70  	u32 link_congs;		/* # port sends blocked by congestion */
71  	u32 deferred_recv;
72  	u32 duplicates;
73  	u32 max_queue_sz;	/* send queue size high water mark */
74  	u32 accu_queue_sz;	/* used for send queue size profiling */
75  	u32 queue_sz_counts;	/* used for send queue size profiling */
76  	u32 msg_length_counts;	/* used for message length profiling */
77  	u32 msg_lengths_total;	/* used for message length profiling */
78  	u32 msg_length_profile[7]; /* used for msg. length profiling */
79  };
80  
81  /**
82   * struct tipc_link - TIPC link data structure
83   * @addr: network address of link's peer node
84   * @name: link name character string
85   * @media_addr: media address to use when sending messages over link
86   * @timer: link timer
87   * @net: pointer to namespace struct
88   * @refcnt: reference counter for permanent references (owner node & timer)
89   * @peer_session: link session # being used by peer end of link
90   * @peer_bearer_id: bearer id used by link's peer endpoint
91   * @bearer_id: local bearer id used by link
92   * @tolerance: minimum link continuity loss needed to reset link [in ms]
93   * @abort_limit: # of unacknowledged continuity probes needed to reset link
94   * @state: current state of link FSM
95   * @peer_caps: bitmap describing capabilities of peer node
96   * @silent_intv_cnt: # of timer intervals without any reception from peer
97   * @proto_msg: template for control messages generated by link
98   * @pmsg: convenience pointer to "proto_msg" field
99   * @priority: current link priority
100   * @net_plane: current link network plane ('A' through 'H')
101   * @mon_state: cookie with information needed by link monitor
102   * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103   * @exp_msg_count: # of tunnelled messages expected during link changeover
104   * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105   * @mtu: current maximum packet size for this link
106   * @advertised_mtu: advertised own mtu when link is being established
107   * @transmitq: queue for sent, non-acked messages
108   * @backlogq: queue for messages waiting to be sent
109   * @snt_nxt: next sequence number to use for outbound messages
110   * @ackers: # of peers that needs to ack each packet before it can be released
111   * @acked: # last packet acked by a certain peer. Used for broadcast.
112   * @rcv_nxt: next sequence number to expect for inbound messages
113   * @deferred_queue: deferred queue saved OOS b'cast message received from node
114   * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115   * @inputq: buffer queue for messages to be delivered upwards
116   * @namedq: buffer queue for name table messages to be delivered upwards
117   * @next_out: ptr to first unsent outbound message in queue
118   * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119   * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120   * @reasm_buf: head of partially reassembled inbound message fragments
121   * @bc_rcvr: marks that this is a broadcast receiver link
122   * @stats: collects statistics regarding link activity
123   * @session: session to be used by link
124   * @snd_nxt_state: next send seq number
125   * @rcv_nxt_state: next rcv seq number
126   * @in_session: have received ACTIVATE_MSG from peer
127   * @active: link is active
128   * @if_name: associated interface name
129   * @rst_cnt: link reset counter
130   * @drop_point: seq number for failover handling (FIXME)
131   * @failover_reasm_skb: saved failover msg ptr (FIXME)
132   * @failover_deferdq: deferred message queue for failover processing (FIXME)
133   * @transmq: the link's transmit queue
134   * @backlog: link's backlog by priority (importance)
135   * @snd_nxt: next sequence number to be used
136   * @rcv_unacked: # messages read by user, but not yet acked back to peer
137   * @deferdq: deferred receive queue
138   * @window: sliding window size for congestion handling
139   * @min_win: minimal send window to be used by link
140   * @ssthresh: slow start threshold for congestion handling
141   * @max_win: maximal send window to be used by link
142   * @cong_acks: congestion acks for congestion avoidance (FIXME)
143   * @checkpoint: seq number for congestion window size handling
144   * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
145   * @last_gap: last gap ack blocks for bcast (FIXME)
146   * @last_ga: ptr to gap ack blocks
147   * @bc_rcvlink: the peer specific link used for broadcast reception
148   * @bc_sndlink: the namespace global link used for broadcast sending
149   * @nack_state: bcast nack state
150   * @bc_peer_is_up: peer has acked the bcast init msg
151   */
152  struct tipc_link {
153  	u32 addr;
154  	char name[TIPC_MAX_LINK_NAME];
155  	struct net *net;
156  
157  	/* Management and link supervision data */
158  	u16 peer_session;
159  	u16 session;
160  	u16 snd_nxt_state;
161  	u16 rcv_nxt_state;
162  	u32 peer_bearer_id;
163  	u32 bearer_id;
164  	u32 tolerance;
165  	u32 abort_limit;
166  	u32 state;
167  	u16 peer_caps;
168  	bool in_session;
169  	bool active;
170  	u32 silent_intv_cnt;
171  	char if_name[TIPC_MAX_IF_NAME];
172  	u32 priority;
173  	char net_plane;
174  	struct tipc_mon_state mon_state;
175  	u16 rst_cnt;
176  
177  	/* Failover/synch */
178  	u16 drop_point;
179  	struct sk_buff *failover_reasm_skb;
180  	struct sk_buff_head failover_deferdq;
181  
182  	/* Max packet negotiation */
183  	u16 mtu;
184  	u16 advertised_mtu;
185  
186  	/* Sending */
187  	struct sk_buff_head transmq;
188  	struct sk_buff_head backlogq;
189  	struct {
190  		u16 len;
191  		u16 limit;
192  		struct sk_buff *target_bskb;
193  	} backlog[5];
194  	u16 snd_nxt;
195  
196  	/* Reception */
197  	u16 rcv_nxt;
198  	u32 rcv_unacked;
199  	struct sk_buff_head deferdq;
200  	struct sk_buff_head *inputq;
201  	struct sk_buff_head *namedq;
202  
203  	/* Congestion handling */
204  	struct sk_buff_head wakeupq;
205  	u16 window;
206  	u16 min_win;
207  	u16 ssthresh;
208  	u16 max_win;
209  	u16 cong_acks;
210  	u16 checkpoint;
211  
212  	/* Fragmentation/reassembly */
213  	struct sk_buff *reasm_buf;
214  	struct sk_buff *reasm_tnlmsg;
215  
216  	/* Broadcast */
217  	u16 ackers;
218  	u16 acked;
219  	u16 last_gap;
220  	struct tipc_gap_ack_blks *last_ga;
221  	struct tipc_link *bc_rcvlink;
222  	struct tipc_link *bc_sndlink;
223  	u8 nack_state;
224  	bool bc_peer_is_up;
225  
226  	/* Statistics */
227  	struct tipc_stats stats;
228  };
229  
230  /*
231   * Error message prefixes
232   */
233  static const char *link_co_err = "Link tunneling error, ";
234  static const char *link_rst_msg = "Resetting link ";
235  
236  /* Send states for broadcast NACKs
237   */
238  enum {
239  	BC_NACK_SND_CONDITIONAL,
240  	BC_NACK_SND_UNCONDITIONAL,
241  	BC_NACK_SND_SUPPRESS,
242  };
243  
244  #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
245  #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
246  
247  /* Link FSM states:
248   */
249  enum {
250  	LINK_ESTABLISHED     = 0xe,
251  	LINK_ESTABLISHING    = 0xe  << 4,
252  	LINK_RESET           = 0x1  << 8,
253  	LINK_RESETTING       = 0x2  << 12,
254  	LINK_PEER_RESET      = 0xd  << 16,
255  	LINK_FAILINGOVER     = 0xf  << 20,
256  	LINK_SYNCHING        = 0xc  << 24
257  };
258  
259  /* Link FSM state checking routines
260   */
261  static int link_is_up(struct tipc_link *l)
262  {
263  	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
264  }
265  
266  static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
267  			       struct sk_buff_head *xmitq);
268  static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
269  				      bool probe_reply, u16 rcvgap,
270  				      int tolerance, int priority,
271  				      struct sk_buff_head *xmitq);
272  static void link_print(struct tipc_link *l, const char *str);
273  static int tipc_link_build_nack_msg(struct tipc_link *l,
274  				    struct sk_buff_head *xmitq);
275  static void tipc_link_build_bc_init_msg(struct tipc_link *l,
276  					struct sk_buff_head *xmitq);
277  static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
278  				    struct tipc_link *l, u8 start_index);
279  static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
280  static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
281  				     u16 acked, u16 gap,
282  				     struct tipc_gap_ack_blks *ga,
283  				     struct sk_buff_head *xmitq,
284  				     bool *retransmitted, int *rc);
285  static void tipc_link_update_cwin(struct tipc_link *l, int released,
286  				  bool retransmitted);
287  /*
288   *  Simple non-static link routines (i.e. referenced outside this file)
289   */
290  bool tipc_link_is_up(struct tipc_link *l)
291  {
292  	return link_is_up(l);
293  }
294  
295  bool tipc_link_peer_is_down(struct tipc_link *l)
296  {
297  	return l->state == LINK_PEER_RESET;
298  }
299  
300  bool tipc_link_is_reset(struct tipc_link *l)
301  {
302  	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
303  }
304  
305  bool tipc_link_is_establishing(struct tipc_link *l)
306  {
307  	return l->state == LINK_ESTABLISHING;
308  }
309  
310  bool tipc_link_is_synching(struct tipc_link *l)
311  {
312  	return l->state == LINK_SYNCHING;
313  }
314  
315  bool tipc_link_is_failingover(struct tipc_link *l)
316  {
317  	return l->state == LINK_FAILINGOVER;
318  }
319  
320  bool tipc_link_is_blocked(struct tipc_link *l)
321  {
322  	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
323  }
324  
325  static bool link_is_bc_sndlink(struct tipc_link *l)
326  {
327  	return !l->bc_sndlink;
328  }
329  
330  static bool link_is_bc_rcvlink(struct tipc_link *l)
331  {
332  	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
333  }
334  
335  void tipc_link_set_active(struct tipc_link *l, bool active)
336  {
337  	l->active = active;
338  }
339  
340  u32 tipc_link_id(struct tipc_link *l)
341  {
342  	return l->peer_bearer_id << 16 | l->bearer_id;
343  }
344  
345  int tipc_link_min_win(struct tipc_link *l)
346  {
347  	return l->min_win;
348  }
349  
350  int tipc_link_max_win(struct tipc_link *l)
351  {
352  	return l->max_win;
353  }
354  
355  int tipc_link_prio(struct tipc_link *l)
356  {
357  	return l->priority;
358  }
359  
360  unsigned long tipc_link_tolerance(struct tipc_link *l)
361  {
362  	return l->tolerance;
363  }
364  
365  struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
366  {
367  	return l->inputq;
368  }
369  
370  char tipc_link_plane(struct tipc_link *l)
371  {
372  	return l->net_plane;
373  }
374  
375  struct net *tipc_link_net(struct tipc_link *l)
376  {
377  	return l->net;
378  }
379  
380  void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
381  {
382  	l->peer_caps = capabilities;
383  }
384  
385  void tipc_link_add_bc_peer(struct tipc_link *snd_l,
386  			   struct tipc_link *uc_l,
387  			   struct sk_buff_head *xmitq)
388  {
389  	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
390  
391  	snd_l->ackers++;
392  	rcv_l->acked = snd_l->snd_nxt - 1;
393  	snd_l->state = LINK_ESTABLISHED;
394  	tipc_link_build_bc_init_msg(uc_l, xmitq);
395  }
396  
397  void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
398  			      struct tipc_link *rcv_l,
399  			      struct sk_buff_head *xmitq)
400  {
401  	u16 ack = snd_l->snd_nxt - 1;
402  
403  	snd_l->ackers--;
404  	rcv_l->bc_peer_is_up = true;
405  	rcv_l->state = LINK_ESTABLISHED;
406  	tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
407  	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
408  	tipc_link_reset(rcv_l);
409  	rcv_l->state = LINK_RESET;
410  	if (!snd_l->ackers) {
411  		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
412  		tipc_link_reset(snd_l);
413  		snd_l->state = LINK_RESET;
414  		__skb_queue_purge(xmitq);
415  	}
416  }
417  
418  int tipc_link_bc_peers(struct tipc_link *l)
419  {
420  	return l->ackers;
421  }
422  
423  static u16 link_bc_rcv_gap(struct tipc_link *l)
424  {
425  	struct sk_buff *skb = skb_peek(&l->deferdq);
426  	u16 gap = 0;
427  
428  	if (more(l->snd_nxt, l->rcv_nxt))
429  		gap = l->snd_nxt - l->rcv_nxt;
430  	if (skb)
431  		gap = buf_seqno(skb) - l->rcv_nxt;
432  	return gap;
433  }
434  
435  void tipc_link_set_mtu(struct tipc_link *l, int mtu)
436  {
437  	l->mtu = mtu;
438  }
439  
440  int tipc_link_mtu(struct tipc_link *l)
441  {
442  	return l->mtu;
443  }
444  
445  int tipc_link_mss(struct tipc_link *l)
446  {
447  #ifdef CONFIG_TIPC_CRYPTO
448  	return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
449  #else
450  	return l->mtu - INT_H_SIZE;
451  #endif
452  }
453  
454  u16 tipc_link_rcv_nxt(struct tipc_link *l)
455  {
456  	return l->rcv_nxt;
457  }
458  
459  u16 tipc_link_acked(struct tipc_link *l)
460  {
461  	return l->acked;
462  }
463  
464  char *tipc_link_name(struct tipc_link *l)
465  {
466  	return l->name;
467  }
468  
469  u32 tipc_link_state(struct tipc_link *l)
470  {
471  	return l->state;
472  }
473  
474  /**
475   * tipc_link_create - create a new link
476   * @net: pointer to associated network namespace
477   * @if_name: associated interface name
478   * @bearer_id: id (index) of associated bearer
479   * @tolerance: link tolerance to be used by link
480   * @net_plane: network plane (A,B,c..) this link belongs to
481   * @mtu: mtu to be advertised by link
482   * @priority: priority to be used by link
483   * @min_win: minimal send window to be used by link
484   * @max_win: maximal send window to be used by link
485   * @session: session to be used by link
486   * @peer: node id of peer node
487   * @peer_caps: bitmap describing peer node capabilities
488   * @bc_sndlink: the namespace global link used for broadcast sending
489   * @bc_rcvlink: the peer specific link used for broadcast reception
490   * @inputq: queue to put messages ready for delivery
491   * @namedq: queue to put binding table update messages ready for delivery
492   * @link: return value, pointer to put the created link
493   * @self: local unicast link id
494   * @peer_id: 128-bit ID of peer
495   *
496   * Return: true if link was created, otherwise false
497   */
498  bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
499  		      int tolerance, char net_plane, u32 mtu, int priority,
500  		      u32 min_win, u32 max_win, u32 session, u32 self,
501  		      u32 peer, u8 *peer_id, u16 peer_caps,
502  		      struct tipc_link *bc_sndlink,
503  		      struct tipc_link *bc_rcvlink,
504  		      struct sk_buff_head *inputq,
505  		      struct sk_buff_head *namedq,
506  		      struct tipc_link **link)
507  {
508  	char peer_str[NODE_ID_STR_LEN] = {0,};
509  	char self_str[NODE_ID_STR_LEN] = {0,};
510  	struct tipc_link *l;
511  
512  	l = kzalloc(sizeof(*l), GFP_ATOMIC);
513  	if (!l)
514  		return false;
515  	*link = l;
516  	l->session = session;
517  
518  	/* Set link name for unicast links only */
519  	if (peer_id) {
520  		tipc_nodeid2string(self_str, tipc_own_id(net));
521  		if (strlen(self_str) > 16)
522  			sprintf(self_str, "%x", self);
523  		tipc_nodeid2string(peer_str, peer_id);
524  		if (strlen(peer_str) > 16)
525  			sprintf(peer_str, "%x", peer);
526  	}
527  	/* Peer i/f name will be completed by reset/activate message */
528  	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
529  		 self_str, if_name, peer_str);
530  
531  	strcpy(l->if_name, if_name);
532  	l->addr = peer;
533  	l->peer_caps = peer_caps;
534  	l->net = net;
535  	l->in_session = false;
536  	l->bearer_id = bearer_id;
537  	l->tolerance = tolerance;
538  	if (bc_rcvlink)
539  		bc_rcvlink->tolerance = tolerance;
540  	l->net_plane = net_plane;
541  	l->advertised_mtu = mtu;
542  	l->mtu = mtu;
543  	l->priority = priority;
544  	tipc_link_set_queue_limits(l, min_win, max_win);
545  	l->ackers = 1;
546  	l->bc_sndlink = bc_sndlink;
547  	l->bc_rcvlink = bc_rcvlink;
548  	l->inputq = inputq;
549  	l->namedq = namedq;
550  	l->state = LINK_RESETTING;
551  	__skb_queue_head_init(&l->transmq);
552  	__skb_queue_head_init(&l->backlogq);
553  	__skb_queue_head_init(&l->deferdq);
554  	__skb_queue_head_init(&l->failover_deferdq);
555  	skb_queue_head_init(&l->wakeupq);
556  	skb_queue_head_init(l->inputq);
557  	return true;
558  }
559  
560  /**
561   * tipc_link_bc_create - create new link to be used for broadcast
562   * @net: pointer to associated network namespace
563   * @mtu: mtu to be used initially if no peers
564   * @min_win: minimal send window to be used by link
565   * @max_win: maximal send window to be used by link
566   * @inputq: queue to put messages ready for delivery
567   * @namedq: queue to put binding table update messages ready for delivery
568   * @link: return value, pointer to put the created link
569   * @ownnode: identity of own node
570   * @peer: node id of peer node
571   * @peer_id: 128-bit ID of peer
572   * @peer_caps: bitmap describing peer node capabilities
573   * @bc_sndlink: the namespace global link used for broadcast sending
574   *
575   * Return: true if link was created, otherwise false
576   */
577  bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
578  			 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
579  			 struct sk_buff_head *inputq,
580  			 struct sk_buff_head *namedq,
581  			 struct tipc_link *bc_sndlink,
582  			 struct tipc_link **link)
583  {
584  	struct tipc_link *l;
585  
586  	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
587  			      max_win, 0, ownnode, peer, NULL, peer_caps,
588  			      bc_sndlink, NULL, inputq, namedq, link))
589  		return false;
590  
591  	l = *link;
592  	if (peer_id) {
593  		char peer_str[NODE_ID_STR_LEN] = {0,};
594  
595  		tipc_nodeid2string(peer_str, peer_id);
596  		if (strlen(peer_str) > 16)
597  			sprintf(peer_str, "%x", peer);
598  		/* Broadcast receiver link name: "broadcast-link:<peer>" */
599  		snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
600  			 peer_str);
601  	} else {
602  		strcpy(l->name, tipc_bclink_name);
603  	}
604  	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
605  	tipc_link_reset(l);
606  	l->state = LINK_RESET;
607  	l->ackers = 0;
608  	l->bc_rcvlink = l;
609  
610  	/* Broadcast send link is always up */
611  	if (link_is_bc_sndlink(l))
612  		l->state = LINK_ESTABLISHED;
613  
614  	/* Disable replicast if even a single peer doesn't support it */
615  	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
616  		tipc_bcast_toggle_rcast(net, false);
617  
618  	return true;
619  }
620  
621  /**
622   * tipc_link_fsm_evt - link finite state machine
623   * @l: pointer to link
624   * @evt: state machine event to be processed
625   */
626  int tipc_link_fsm_evt(struct tipc_link *l, int evt)
627  {
628  	int rc = 0;
629  	int old_state = l->state;
630  
631  	switch (l->state) {
632  	case LINK_RESETTING:
633  		switch (evt) {
634  		case LINK_PEER_RESET_EVT:
635  			l->state = LINK_PEER_RESET;
636  			break;
637  		case LINK_RESET_EVT:
638  			l->state = LINK_RESET;
639  			break;
640  		case LINK_FAILURE_EVT:
641  		case LINK_FAILOVER_BEGIN_EVT:
642  		case LINK_ESTABLISH_EVT:
643  		case LINK_FAILOVER_END_EVT:
644  		case LINK_SYNCH_BEGIN_EVT:
645  		case LINK_SYNCH_END_EVT:
646  		default:
647  			goto illegal_evt;
648  		}
649  		break;
650  	case LINK_RESET:
651  		switch (evt) {
652  		case LINK_PEER_RESET_EVT:
653  			l->state = LINK_ESTABLISHING;
654  			break;
655  		case LINK_FAILOVER_BEGIN_EVT:
656  			l->state = LINK_FAILINGOVER;
657  			break;
658  		case LINK_FAILURE_EVT:
659  		case LINK_RESET_EVT:
660  		case LINK_ESTABLISH_EVT:
661  		case LINK_FAILOVER_END_EVT:
662  			break;
663  		case LINK_SYNCH_BEGIN_EVT:
664  		case LINK_SYNCH_END_EVT:
665  		default:
666  			goto illegal_evt;
667  		}
668  		break;
669  	case LINK_PEER_RESET:
670  		switch (evt) {
671  		case LINK_RESET_EVT:
672  			l->state = LINK_ESTABLISHING;
673  			break;
674  		case LINK_PEER_RESET_EVT:
675  		case LINK_ESTABLISH_EVT:
676  		case LINK_FAILURE_EVT:
677  			break;
678  		case LINK_SYNCH_BEGIN_EVT:
679  		case LINK_SYNCH_END_EVT:
680  		case LINK_FAILOVER_BEGIN_EVT:
681  		case LINK_FAILOVER_END_EVT:
682  		default:
683  			goto illegal_evt;
684  		}
685  		break;
686  	case LINK_FAILINGOVER:
687  		switch (evt) {
688  		case LINK_FAILOVER_END_EVT:
689  			l->state = LINK_RESET;
690  			break;
691  		case LINK_PEER_RESET_EVT:
692  		case LINK_RESET_EVT:
693  		case LINK_ESTABLISH_EVT:
694  		case LINK_FAILURE_EVT:
695  			break;
696  		case LINK_FAILOVER_BEGIN_EVT:
697  		case LINK_SYNCH_BEGIN_EVT:
698  		case LINK_SYNCH_END_EVT:
699  		default:
700  			goto illegal_evt;
701  		}
702  		break;
703  	case LINK_ESTABLISHING:
704  		switch (evt) {
705  		case LINK_ESTABLISH_EVT:
706  			l->state = LINK_ESTABLISHED;
707  			break;
708  		case LINK_FAILOVER_BEGIN_EVT:
709  			l->state = LINK_FAILINGOVER;
710  			break;
711  		case LINK_RESET_EVT:
712  			l->state = LINK_RESET;
713  			break;
714  		case LINK_FAILURE_EVT:
715  		case LINK_PEER_RESET_EVT:
716  		case LINK_SYNCH_BEGIN_EVT:
717  		case LINK_FAILOVER_END_EVT:
718  			break;
719  		case LINK_SYNCH_END_EVT:
720  		default:
721  			goto illegal_evt;
722  		}
723  		break;
724  	case LINK_ESTABLISHED:
725  		switch (evt) {
726  		case LINK_PEER_RESET_EVT:
727  			l->state = LINK_PEER_RESET;
728  			rc |= TIPC_LINK_DOWN_EVT;
729  			break;
730  		case LINK_FAILURE_EVT:
731  			l->state = LINK_RESETTING;
732  			rc |= TIPC_LINK_DOWN_EVT;
733  			break;
734  		case LINK_RESET_EVT:
735  			l->state = LINK_RESET;
736  			break;
737  		case LINK_ESTABLISH_EVT:
738  		case LINK_SYNCH_END_EVT:
739  			break;
740  		case LINK_SYNCH_BEGIN_EVT:
741  			l->state = LINK_SYNCHING;
742  			break;
743  		case LINK_FAILOVER_BEGIN_EVT:
744  		case LINK_FAILOVER_END_EVT:
745  		default:
746  			goto illegal_evt;
747  		}
748  		break;
749  	case LINK_SYNCHING:
750  		switch (evt) {
751  		case LINK_PEER_RESET_EVT:
752  			l->state = LINK_PEER_RESET;
753  			rc |= TIPC_LINK_DOWN_EVT;
754  			break;
755  		case LINK_FAILURE_EVT:
756  			l->state = LINK_RESETTING;
757  			rc |= TIPC_LINK_DOWN_EVT;
758  			break;
759  		case LINK_RESET_EVT:
760  			l->state = LINK_RESET;
761  			break;
762  		case LINK_ESTABLISH_EVT:
763  		case LINK_SYNCH_BEGIN_EVT:
764  			break;
765  		case LINK_SYNCH_END_EVT:
766  			l->state = LINK_ESTABLISHED;
767  			break;
768  		case LINK_FAILOVER_BEGIN_EVT:
769  		case LINK_FAILOVER_END_EVT:
770  		default:
771  			goto illegal_evt;
772  		}
773  		break;
774  	default:
775  		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
776  	}
777  	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
778  	return rc;
779  illegal_evt:
780  	pr_err("Illegal FSM event %x in state %x on link %s\n",
781  	       evt, l->state, l->name);
782  	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
783  	return rc;
784  }
785  
786  /* link_profile_stats - update statistical profiling of traffic
787   */
788  static void link_profile_stats(struct tipc_link *l)
789  {
790  	struct sk_buff *skb;
791  	struct tipc_msg *msg;
792  	int length;
793  
794  	/* Update counters used in statistical profiling of send traffic */
795  	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
796  	l->stats.queue_sz_counts++;
797  
798  	skb = skb_peek(&l->transmq);
799  	if (!skb)
800  		return;
801  	msg = buf_msg(skb);
802  	length = msg_size(msg);
803  
804  	if (msg_user(msg) == MSG_FRAGMENTER) {
805  		if (msg_type(msg) != FIRST_FRAGMENT)
806  			return;
807  		length = msg_size(msg_inner_hdr(msg));
808  	}
809  	l->stats.msg_lengths_total += length;
810  	l->stats.msg_length_counts++;
811  	if (length <= 64)
812  		l->stats.msg_length_profile[0]++;
813  	else if (length <= 256)
814  		l->stats.msg_length_profile[1]++;
815  	else if (length <= 1024)
816  		l->stats.msg_length_profile[2]++;
817  	else if (length <= 4096)
818  		l->stats.msg_length_profile[3]++;
819  	else if (length <= 16384)
820  		l->stats.msg_length_profile[4]++;
821  	else if (length <= 32768)
822  		l->stats.msg_length_profile[5]++;
823  	else
824  		l->stats.msg_length_profile[6]++;
825  }
826  
827  /**
828   * tipc_link_too_silent - check if link is "too silent"
829   * @l: tipc link to be checked
830   *
831   * Return: true if the link 'silent_intv_cnt' is about to reach the
832   * 'abort_limit' value, otherwise false
833   */
834  bool tipc_link_too_silent(struct tipc_link *l)
835  {
836  	return (l->silent_intv_cnt + 2 > l->abort_limit);
837  }
838  
839  /* tipc_link_timeout - perform periodic task as instructed from node timeout
840   */
841  int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
842  {
843  	int mtyp = 0;
844  	int rc = 0;
845  	bool state = false;
846  	bool probe = false;
847  	bool setup = false;
848  	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
849  	u16 bc_acked = l->bc_rcvlink->acked;
850  	struct tipc_mon_state *mstate = &l->mon_state;
851  
852  	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
853  	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
854  	switch (l->state) {
855  	case LINK_ESTABLISHED:
856  	case LINK_SYNCHING:
857  		mtyp = STATE_MSG;
858  		link_profile_stats(l);
859  		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
860  		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
861  			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
862  		state = bc_acked != bc_snt;
863  		state |= l->bc_rcvlink->rcv_unacked;
864  		state |= l->rcv_unacked;
865  		state |= !skb_queue_empty(&l->transmq);
866  		probe = mstate->probing;
867  		probe |= l->silent_intv_cnt;
868  		if (probe || mstate->monitoring)
869  			l->silent_intv_cnt++;
870  		probe |= !skb_queue_empty(&l->deferdq);
871  		if (l->snd_nxt == l->checkpoint) {
872  			tipc_link_update_cwin(l, 0, 0);
873  			probe = true;
874  		}
875  		l->checkpoint = l->snd_nxt;
876  		break;
877  	case LINK_RESET:
878  		setup = l->rst_cnt++ <= 4;
879  		setup |= !(l->rst_cnt % 16);
880  		mtyp = RESET_MSG;
881  		break;
882  	case LINK_ESTABLISHING:
883  		setup = true;
884  		mtyp = ACTIVATE_MSG;
885  		break;
886  	case LINK_PEER_RESET:
887  	case LINK_RESETTING:
888  	case LINK_FAILINGOVER:
889  		break;
890  	default:
891  		break;
892  	}
893  
894  	if (state || probe || setup)
895  		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
896  
897  	return rc;
898  }
899  
900  /**
901   * link_schedule_user - schedule a message sender for wakeup after congestion
902   * @l: congested link
903   * @hdr: header of message that is being sent
904   * Create pseudo msg to send back to user when congestion abates
905   */
906  static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
907  {
908  	u32 dnode = tipc_own_addr(l->net);
909  	u32 dport = msg_origport(hdr);
910  	struct sk_buff *skb;
911  
912  	/* Create and schedule wakeup pseudo message */
913  	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
914  			      dnode, l->addr, dport, 0, 0);
915  	if (!skb)
916  		return -ENOBUFS;
917  	msg_set_dest_droppable(buf_msg(skb), true);
918  	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
919  	skb_queue_tail(&l->wakeupq, skb);
920  	l->stats.link_congs++;
921  	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
922  	return -ELINKCONG;
923  }
924  
925  /**
926   * link_prepare_wakeup - prepare users for wakeup after congestion
927   * @l: congested link
928   * Wake up a number of waiting users, as permitted by available space
929   * in the send queue
930   */
931  static void link_prepare_wakeup(struct tipc_link *l)
932  {
933  	struct sk_buff_head *wakeupq = &l->wakeupq;
934  	struct sk_buff_head *inputq = l->inputq;
935  	struct sk_buff *skb, *tmp;
936  	struct sk_buff_head tmpq;
937  	int avail[5] = {0,};
938  	int imp = 0;
939  
940  	__skb_queue_head_init(&tmpq);
941  
942  	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
943  		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
944  
945  	skb_queue_walk_safe(wakeupq, skb, tmp) {
946  		imp = TIPC_SKB_CB(skb)->chain_imp;
947  		if (avail[imp] <= 0)
948  			continue;
949  		avail[imp]--;
950  		__skb_unlink(skb, wakeupq);
951  		__skb_queue_tail(&tmpq, skb);
952  	}
953  
954  	spin_lock_bh(&inputq->lock);
955  	skb_queue_splice_tail(&tmpq, inputq);
956  	spin_unlock_bh(&inputq->lock);
957  
958  }
959  
960  /**
961   * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
962   *                                     the given skb should be next attempted
963   * @skb: skb to set a future retransmission time for
964   * @l: link the skb will be transmitted on
965   */
966  static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
967  					      struct tipc_link *l)
968  {
969  	if (link_is_bc_sndlink(l))
970  		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
971  	else
972  		TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
973  }
974  
975  void tipc_link_reset(struct tipc_link *l)
976  {
977  	struct sk_buff_head list;
978  	u32 imp;
979  
980  	__skb_queue_head_init(&list);
981  
982  	l->in_session = false;
983  	/* Force re-synch of peer session number before establishing */
984  	l->peer_session--;
985  	l->session++;
986  	l->mtu = l->advertised_mtu;
987  
988  	spin_lock_bh(&l->wakeupq.lock);
989  	skb_queue_splice_init(&l->wakeupq, &list);
990  	spin_unlock_bh(&l->wakeupq.lock);
991  
992  	spin_lock_bh(&l->inputq->lock);
993  	skb_queue_splice_init(&list, l->inputq);
994  	spin_unlock_bh(&l->inputq->lock);
995  
996  	__skb_queue_purge(&l->transmq);
997  	__skb_queue_purge(&l->deferdq);
998  	__skb_queue_purge(&l->backlogq);
999  	__skb_queue_purge(&l->failover_deferdq);
1000  	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
1001  		l->backlog[imp].len = 0;
1002  		l->backlog[imp].target_bskb = NULL;
1003  	}
1004  	kfree_skb(l->reasm_buf);
1005  	kfree_skb(l->reasm_tnlmsg);
1006  	kfree_skb(l->failover_reasm_skb);
1007  	l->reasm_buf = NULL;
1008  	l->reasm_tnlmsg = NULL;
1009  	l->failover_reasm_skb = NULL;
1010  	l->rcv_unacked = 0;
1011  	l->snd_nxt = 1;
1012  	l->rcv_nxt = 1;
1013  	l->snd_nxt_state = 1;
1014  	l->rcv_nxt_state = 1;
1015  	l->acked = 0;
1016  	l->last_gap = 0;
1017  	kfree(l->last_ga);
1018  	l->last_ga = NULL;
1019  	l->silent_intv_cnt = 0;
1020  	l->rst_cnt = 0;
1021  	l->bc_peer_is_up = false;
1022  	memset(&l->mon_state, 0, sizeof(l->mon_state));
1023  	tipc_link_reset_stats(l);
1024  }
1025  
1026  /**
1027   * tipc_link_xmit(): enqueue buffer list according to queue situation
1028   * @l: link to use
1029   * @list: chain of buffers containing message
1030   * @xmitq: returned list of packets to be sent by caller
1031   *
1032   * Consumes the buffer chain.
1033   * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1034   * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1035   */
1036  int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1037  		   struct sk_buff_head *xmitq)
1038  {
1039  	struct sk_buff_head *backlogq = &l->backlogq;
1040  	struct sk_buff_head *transmq = &l->transmq;
1041  	struct sk_buff *skb, *_skb;
1042  	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1043  	u16 ack = l->rcv_nxt - 1;
1044  	u16 seqno = l->snd_nxt;
1045  	int pkt_cnt = skb_queue_len(list);
1046  	unsigned int mss = tipc_link_mss(l);
1047  	unsigned int cwin = l->window;
1048  	unsigned int mtu = l->mtu;
1049  	struct tipc_msg *hdr;
1050  	bool new_bundle;
1051  	int rc = 0;
1052  	int imp;
1053  
1054  	if (pkt_cnt <= 0)
1055  		return 0;
1056  
1057  	hdr = buf_msg(skb_peek(list));
1058  	if (unlikely(msg_size(hdr) > mtu)) {
1059  		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1060  			skb_queue_len(list), msg_user(hdr),
1061  			msg_type(hdr), msg_size(hdr), mtu);
1062  		__skb_queue_purge(list);
1063  		return -EMSGSIZE;
1064  	}
1065  
1066  	imp = msg_importance(hdr);
1067  	/* Allow oversubscription of one data msg per source at congestion */
1068  	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1069  		if (imp == TIPC_SYSTEM_IMPORTANCE) {
1070  			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1071  			return -ENOBUFS;
1072  		}
1073  		rc = link_schedule_user(l, hdr);
1074  	}
1075  
1076  	if (pkt_cnt > 1) {
1077  		l->stats.sent_fragmented++;
1078  		l->stats.sent_fragments += pkt_cnt;
1079  	}
1080  
1081  	/* Prepare each packet for sending, and add to relevant queue: */
1082  	while ((skb = __skb_dequeue(list))) {
1083  		if (likely(skb_queue_len(transmq) < cwin)) {
1084  			hdr = buf_msg(skb);
1085  			msg_set_seqno(hdr, seqno);
1086  			msg_set_ack(hdr, ack);
1087  			msg_set_bcast_ack(hdr, bc_ack);
1088  			_skb = skb_clone(skb, GFP_ATOMIC);
1089  			if (!_skb) {
1090  				kfree_skb(skb);
1091  				__skb_queue_purge(list);
1092  				return -ENOBUFS;
1093  			}
1094  			__skb_queue_tail(transmq, skb);
1095  			tipc_link_set_skb_retransmit_time(skb, l);
1096  			__skb_queue_tail(xmitq, _skb);
1097  			TIPC_SKB_CB(skb)->ackers = l->ackers;
1098  			l->rcv_unacked = 0;
1099  			l->stats.sent_pkts++;
1100  			seqno++;
1101  			continue;
1102  		}
1103  		if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1104  					mss, l->addr, &new_bundle)) {
1105  			if (skb) {
1106  				/* Keep a ref. to the skb for next try */
1107  				l->backlog[imp].target_bskb = skb;
1108  				l->backlog[imp].len++;
1109  				__skb_queue_tail(backlogq, skb);
1110  			} else {
1111  				if (new_bundle) {
1112  					l->stats.sent_bundles++;
1113  					l->stats.sent_bundled++;
1114  				}
1115  				l->stats.sent_bundled++;
1116  			}
1117  			continue;
1118  		}
1119  		l->backlog[imp].target_bskb = NULL;
1120  		l->backlog[imp].len += (1 + skb_queue_len(list));
1121  		__skb_queue_tail(backlogq, skb);
1122  		skb_queue_splice_tail_init(list, backlogq);
1123  	}
1124  	l->snd_nxt = seqno;
1125  	return rc;
1126  }
1127  
1128  static void tipc_link_update_cwin(struct tipc_link *l, int released,
1129  				  bool retransmitted)
1130  {
1131  	int bklog_len = skb_queue_len(&l->backlogq);
1132  	struct sk_buff_head *txq = &l->transmq;
1133  	int txq_len = skb_queue_len(txq);
1134  	u16 cwin = l->window;
1135  
1136  	/* Enter fast recovery */
1137  	if (unlikely(retransmitted)) {
1138  		l->ssthresh = max_t(u16, l->window / 2, 300);
1139  		l->window = min_t(u16, l->ssthresh, l->window);
1140  		return;
1141  	}
1142  	/* Enter slow start */
1143  	if (unlikely(!released)) {
1144  		l->ssthresh = max_t(u16, l->window / 2, 300);
1145  		l->window = l->min_win;
1146  		return;
1147  	}
1148  	/* Don't increase window if no pressure on the transmit queue */
1149  	if (txq_len + bklog_len < cwin)
1150  		return;
1151  
1152  	/* Don't increase window if there are holes the transmit queue */
1153  	if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1154  		return;
1155  
1156  	l->cong_acks += released;
1157  
1158  	/* Slow start  */
1159  	if (cwin <= l->ssthresh) {
1160  		l->window = min_t(u16, cwin + released, l->max_win);
1161  		return;
1162  	}
1163  	/* Congestion avoidance */
1164  	if (l->cong_acks < cwin)
1165  		return;
1166  	l->window = min_t(u16, ++cwin, l->max_win);
1167  	l->cong_acks = 0;
1168  }
1169  
1170  static void tipc_link_advance_backlog(struct tipc_link *l,
1171  				      struct sk_buff_head *xmitq)
1172  {
1173  	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1174  	struct sk_buff_head *txq = &l->transmq;
1175  	struct sk_buff *skb, *_skb;
1176  	u16 ack = l->rcv_nxt - 1;
1177  	u16 seqno = l->snd_nxt;
1178  	struct tipc_msg *hdr;
1179  	u16 cwin = l->window;
1180  	u32 imp;
1181  
1182  	while (skb_queue_len(txq) < cwin) {
1183  		skb = skb_peek(&l->backlogq);
1184  		if (!skb)
1185  			break;
1186  		_skb = skb_clone(skb, GFP_ATOMIC);
1187  		if (!_skb)
1188  			break;
1189  		__skb_dequeue(&l->backlogq);
1190  		hdr = buf_msg(skb);
1191  		imp = msg_importance(hdr);
1192  		l->backlog[imp].len--;
1193  		if (unlikely(skb == l->backlog[imp].target_bskb))
1194  			l->backlog[imp].target_bskb = NULL;
1195  		__skb_queue_tail(&l->transmq, skb);
1196  		tipc_link_set_skb_retransmit_time(skb, l);
1197  
1198  		__skb_queue_tail(xmitq, _skb);
1199  		TIPC_SKB_CB(skb)->ackers = l->ackers;
1200  		msg_set_seqno(hdr, seqno);
1201  		msg_set_ack(hdr, ack);
1202  		msg_set_bcast_ack(hdr, bc_ack);
1203  		l->rcv_unacked = 0;
1204  		l->stats.sent_pkts++;
1205  		seqno++;
1206  	}
1207  	l->snd_nxt = seqno;
1208  }
1209  
1210  /**
1211   * link_retransmit_failure() - Detect repeated retransmit failures
1212   * @l: tipc link sender
1213   * @r: tipc link receiver (= l in case of unicast)
1214   * @rc: returned code
1215   *
1216   * Return: true if the repeated retransmit failures happens, otherwise
1217   * false
1218   */
1219  static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1220  				    int *rc)
1221  {
1222  	struct sk_buff *skb = skb_peek(&l->transmq);
1223  	struct tipc_msg *hdr;
1224  
1225  	if (!skb)
1226  		return false;
1227  
1228  	if (!TIPC_SKB_CB(skb)->retr_cnt)
1229  		return false;
1230  
1231  	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1232  			msecs_to_jiffies(r->tolerance * 10)))
1233  		return false;
1234  
1235  	hdr = buf_msg(skb);
1236  	if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1237  		return false;
1238  
1239  	pr_warn("Retransmission failure on link <%s>\n", l->name);
1240  	link_print(l, "State of link ");
1241  	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1242  		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1243  	pr_info("sqno %u, prev: %x, dest: %x\n",
1244  		msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1245  	pr_info("retr_stamp %d, retr_cnt %d\n",
1246  		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1247  		TIPC_SKB_CB(skb)->retr_cnt);
1248  
1249  	trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1250  	trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1251  	trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1252  
1253  	if (link_is_bc_sndlink(l)) {
1254  		r->state = LINK_RESET;
1255  		*rc |= TIPC_LINK_DOWN_EVT;
1256  	} else {
1257  		*rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1258  	}
1259  
1260  	return true;
1261  }
1262  
1263  /* tipc_data_input - deliver data and name distr msgs to upper layer
1264   *
1265   * Consumes buffer if message is of right type
1266   * Node lock must be held
1267   */
1268  static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1269  			    struct sk_buff_head *inputq)
1270  {
1271  	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1272  	struct tipc_msg *hdr = buf_msg(skb);
1273  
1274  	switch (msg_user(hdr)) {
1275  	case TIPC_LOW_IMPORTANCE:
1276  	case TIPC_MEDIUM_IMPORTANCE:
1277  	case TIPC_HIGH_IMPORTANCE:
1278  	case TIPC_CRITICAL_IMPORTANCE:
1279  		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1280  			skb_queue_tail(mc_inputq, skb);
1281  			return true;
1282  		}
1283  		fallthrough;
1284  	case CONN_MANAGER:
1285  		skb_queue_tail(inputq, skb);
1286  		return true;
1287  	case GROUP_PROTOCOL:
1288  		skb_queue_tail(mc_inputq, skb);
1289  		return true;
1290  	case NAME_DISTRIBUTOR:
1291  		l->bc_rcvlink->state = LINK_ESTABLISHED;
1292  		skb_queue_tail(l->namedq, skb);
1293  		return true;
1294  	case MSG_BUNDLER:
1295  	case TUNNEL_PROTOCOL:
1296  	case MSG_FRAGMENTER:
1297  	case BCAST_PROTOCOL:
1298  		return false;
1299  #ifdef CONFIG_TIPC_CRYPTO
1300  	case MSG_CRYPTO:
1301  		if (sysctl_tipc_key_exchange_enabled &&
1302  		    TIPC_SKB_CB(skb)->decrypted) {
1303  			tipc_crypto_msg_rcv(l->net, skb);
1304  			return true;
1305  		}
1306  		fallthrough;
1307  #endif
1308  	default:
1309  		pr_warn("Dropping received illegal msg type\n");
1310  		kfree_skb(skb);
1311  		return true;
1312  	}
1313  }
1314  
1315  /* tipc_link_input - process packet that has passed link protocol check
1316   *
1317   * Consumes buffer
1318   */
1319  static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1320  			   struct sk_buff_head *inputq,
1321  			   struct sk_buff **reasm_skb)
1322  {
1323  	struct tipc_msg *hdr = buf_msg(skb);
1324  	struct sk_buff *iskb;
1325  	struct sk_buff_head tmpq;
1326  	int usr = msg_user(hdr);
1327  	int pos = 0;
1328  
1329  	if (usr == MSG_BUNDLER) {
1330  		skb_queue_head_init(&tmpq);
1331  		l->stats.recv_bundles++;
1332  		l->stats.recv_bundled += msg_msgcnt(hdr);
1333  		while (tipc_msg_extract(skb, &iskb, &pos))
1334  			tipc_data_input(l, iskb, &tmpq);
1335  		tipc_skb_queue_splice_tail(&tmpq, inputq);
1336  		return 0;
1337  	} else if (usr == MSG_FRAGMENTER) {
1338  		l->stats.recv_fragments++;
1339  		if (tipc_buf_append(reasm_skb, &skb)) {
1340  			l->stats.recv_fragmented++;
1341  			tipc_data_input(l, skb, inputq);
1342  		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1343  			pr_warn_ratelimited("Unable to build fragment list\n");
1344  			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1345  		}
1346  		return 0;
1347  	} else if (usr == BCAST_PROTOCOL) {
1348  		tipc_bcast_lock(l->net);
1349  		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1350  		tipc_bcast_unlock(l->net);
1351  	}
1352  
1353  	kfree_skb(skb);
1354  	return 0;
1355  }
1356  
1357  /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1358   *			 inner message along with the ones in the old link's
1359   *			 deferdq
1360   * @l: tunnel link
1361   * @skb: TUNNEL_PROTOCOL message
1362   * @inputq: queue to put messages ready for delivery
1363   */
1364  static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1365  			     struct sk_buff_head *inputq)
1366  {
1367  	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1368  	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1369  	struct sk_buff_head *fdefq = &l->failover_deferdq;
1370  	struct tipc_msg *hdr = buf_msg(skb);
1371  	struct sk_buff *iskb;
1372  	int ipos = 0;
1373  	int rc = 0;
1374  	u16 seqno;
1375  
1376  	if (msg_type(hdr) == SYNCH_MSG) {
1377  		kfree_skb(skb);
1378  		return 0;
1379  	}
1380  
1381  	/* Not a fragment? */
1382  	if (likely(!msg_nof_fragms(hdr))) {
1383  		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1384  			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1385  					    skb_queue_len(fdefq));
1386  			return 0;
1387  		}
1388  		kfree_skb(skb);
1389  	} else {
1390  		/* Set fragment type for buf_append */
1391  		if (msg_fragm_no(hdr) == 1)
1392  			msg_set_type(hdr, FIRST_FRAGMENT);
1393  		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1394  			msg_set_type(hdr, FRAGMENT);
1395  		else
1396  			msg_set_type(hdr, LAST_FRAGMENT);
1397  
1398  		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1399  			/* Successful but non-complete reassembly? */
1400  			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1401  				return 0;
1402  			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1403  			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1404  		}
1405  		iskb = skb;
1406  	}
1407  
1408  	do {
1409  		seqno = buf_seqno(iskb);
1410  		if (unlikely(less(seqno, l->drop_point))) {
1411  			kfree_skb(iskb);
1412  			continue;
1413  		}
1414  		if (unlikely(seqno != l->drop_point)) {
1415  			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1416  			continue;
1417  		}
1418  
1419  		l->drop_point++;
1420  		if (!tipc_data_input(l, iskb, inputq))
1421  			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1422  		if (unlikely(rc))
1423  			break;
1424  	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1425  
1426  	return rc;
1427  }
1428  
1429  /**
1430   * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1431   * @ga: returned pointer to the Gap ACK blocks if any
1432   * @l: the tipc link
1433   * @hdr: the PROTOCOL/STATE_MSG header
1434   * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1435   *
1436   * Return: the total Gap ACK blocks size
1437   */
1438  u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1439  			  struct tipc_msg *hdr, bool uc)
1440  {
1441  	struct tipc_gap_ack_blks *p;
1442  	u16 sz = 0;
1443  
1444  	/* Does peer support the Gap ACK blocks feature? */
1445  	if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1446  		p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1447  		sz = ntohs(p->len);
1448  		/* Sanity check */
1449  		if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
1450  			/* Good, check if the desired type exists */
1451  			if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1452  				goto ok;
1453  		/* Backward compatible: peer might not support bc, but uc? */
1454  		} else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1455  			if (p->ugack_cnt) {
1456  				p->bgack_cnt = 0;
1457  				goto ok;
1458  			}
1459  		}
1460  	}
1461  	/* Other cases: ignore! */
1462  	p = NULL;
1463  
1464  ok:
1465  	*ga = p;
1466  	return sz;
1467  }
1468  
1469  static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1470  				    struct tipc_link *l, u8 start_index)
1471  {
1472  	struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1473  	struct sk_buff *skb = skb_peek(&l->deferdq);
1474  	u16 expect, seqno = 0;
1475  	u8 n = 0;
1476  
1477  	if (!skb)
1478  		return 0;
1479  
1480  	expect = buf_seqno(skb);
1481  	skb_queue_walk(&l->deferdq, skb) {
1482  		seqno = buf_seqno(skb);
1483  		if (unlikely(more(seqno, expect))) {
1484  			gacks[n].ack = htons(expect - 1);
1485  			gacks[n].gap = htons(seqno - expect);
1486  			if (++n >= MAX_GAP_ACK_BLKS / 2) {
1487  				pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1488  						    l->name, n,
1489  						    skb_queue_len(&l->deferdq));
1490  				return n;
1491  			}
1492  		} else if (unlikely(less(seqno, expect))) {
1493  			pr_warn("Unexpected skb in deferdq!\n");
1494  			continue;
1495  		}
1496  		expect = seqno + 1;
1497  	}
1498  
1499  	/* last block */
1500  	gacks[n].ack = htons(seqno);
1501  	gacks[n].gap = 0;
1502  	n++;
1503  	return n;
1504  }
1505  
1506  /* tipc_build_gap_ack_blks - build Gap ACK blocks
1507   * @l: tipc unicast link
1508   * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1509   *
1510   * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1511   * links of a certain peer, the buffer after built has the network data format
1512   * as found at the struct tipc_gap_ack_blks definition.
1513   *
1514   * returns the actual allocated memory size
1515   */
1516  static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1517  {
1518  	struct tipc_link *bcl = l->bc_rcvlink;
1519  	struct tipc_gap_ack_blks *ga;
1520  	u16 len;
1521  
1522  	ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1523  
1524  	/* Start with broadcast link first */
1525  	tipc_bcast_lock(bcl->net);
1526  	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1527  	msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1528  	ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1529  	tipc_bcast_unlock(bcl->net);
1530  
1531  	/* Now for unicast link, but an explicit NACK only (???) */
1532  	ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1533  			__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1534  
1535  	/* Total len */
1536  	len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
1537  	ga->len = htons(len);
1538  	return len;
1539  }
1540  
1541  /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1542   *			       acked packets, also doing retransmissions if
1543   *			       gaps found
1544   * @l: tipc link with transmq queue to be advanced
1545   * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1546   * @acked: seqno of last packet acked by peer without any gaps before
1547   * @gap: # of gap packets
1548   * @ga: buffer pointer to Gap ACK blocks from peer
1549   * @xmitq: queue for accumulating the retransmitted packets if any
1550   * @retransmitted: returned boolean value if a retransmission is really issued
1551   * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1552   *      happens (- unlikely case)
1553   *
1554   * Return: the number of packets released from the link transmq
1555   */
1556  static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1557  				     u16 acked, u16 gap,
1558  				     struct tipc_gap_ack_blks *ga,
1559  				     struct sk_buff_head *xmitq,
1560  				     bool *retransmitted, int *rc)
1561  {
1562  	struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1563  	struct tipc_gap_ack *gacks = NULL;
1564  	struct sk_buff *skb, *_skb, *tmp;
1565  	struct tipc_msg *hdr;
1566  	u32 qlen = skb_queue_len(&l->transmq);
1567  	u16 nacked = acked, ngap = gap, gack_cnt = 0;
1568  	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1569  	u16 ack = l->rcv_nxt - 1;
1570  	u16 seqno, n = 0;
1571  	u16 end = r->acked, start = end, offset = r->last_gap;
1572  	u16 si = (last_ga) ? last_ga->start_index : 0;
1573  	bool is_uc = !link_is_bc_sndlink(l);
1574  	bool bc_has_acked = false;
1575  
1576  	trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1577  
1578  	/* Determine Gap ACK blocks if any for the particular link */
1579  	if (ga && is_uc) {
1580  		/* Get the Gap ACKs, uc part */
1581  		gack_cnt = ga->ugack_cnt;
1582  		gacks = &ga->gacks[ga->bgack_cnt];
1583  	} else if (ga) {
1584  		/* Copy the Gap ACKs, bc part, for later renewal if needed */
1585  		this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1586  				  GFP_ATOMIC);
1587  		if (likely(this_ga)) {
1588  			this_ga->start_index = 0;
1589  			/* Start with the bc Gap ACKs */
1590  			gack_cnt = this_ga->bgack_cnt;
1591  			gacks = &this_ga->gacks[0];
1592  		} else {
1593  			/* Hmm, we can get in trouble..., simply ignore it */
1594  			pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1595  		}
1596  	}
1597  
1598  	/* Advance the link transmq */
1599  	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1600  		seqno = buf_seqno(skb);
1601  
1602  next_gap_ack:
1603  		if (less_eq(seqno, nacked)) {
1604  			if (is_uc)
1605  				goto release;
1606  			/* Skip packets peer has already acked */
1607  			if (!more(seqno, r->acked))
1608  				continue;
1609  			/* Get the next of last Gap ACK blocks */
1610  			while (more(seqno, end)) {
1611  				if (!last_ga || si >= last_ga->bgack_cnt)
1612  					break;
1613  				start = end + offset + 1;
1614  				end = ntohs(last_ga->gacks[si].ack);
1615  				offset = ntohs(last_ga->gacks[si].gap);
1616  				si++;
1617  				WARN_ONCE(more(start, end) ||
1618  					  (!offset &&
1619  					   si < last_ga->bgack_cnt) ||
1620  					  si > MAX_GAP_ACK_BLKS,
1621  					  "Corrupted Gap ACK: %d %d %d %d %d\n",
1622  					  start, end, offset, si,
1623  					  last_ga->bgack_cnt);
1624  			}
1625  			/* Check against the last Gap ACK block */
1626  			if (tipc_in_range(seqno, start, end))
1627  				continue;
1628  			/* Update/release the packet peer is acking */
1629  			bc_has_acked = true;
1630  			if (--TIPC_SKB_CB(skb)->ackers)
1631  				continue;
1632  release:
1633  			/* release skb */
1634  			__skb_unlink(skb, &l->transmq);
1635  			kfree_skb(skb);
1636  		} else if (less_eq(seqno, nacked + ngap)) {
1637  			/* First gap: check if repeated retrans failures? */
1638  			if (unlikely(seqno == acked + 1 &&
1639  				     link_retransmit_failure(l, r, rc))) {
1640  				/* Ignore this bc Gap ACKs if any */
1641  				kfree(this_ga);
1642  				this_ga = NULL;
1643  				break;
1644  			}
1645  			/* retransmit skb if unrestricted*/
1646  			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1647  				continue;
1648  			tipc_link_set_skb_retransmit_time(skb, l);
1649  			_skb = pskb_copy(skb, GFP_ATOMIC);
1650  			if (!_skb)
1651  				continue;
1652  			hdr = buf_msg(_skb);
1653  			msg_set_ack(hdr, ack);
1654  			msg_set_bcast_ack(hdr, bc_ack);
1655  			_skb->priority = TC_PRIO_CONTROL;
1656  			__skb_queue_tail(xmitq, _skb);
1657  			l->stats.retransmitted++;
1658  			if (!is_uc)
1659  				r->stats.retransmitted++;
1660  			*retransmitted = true;
1661  			/* Increase actual retrans counter & mark first time */
1662  			if (!TIPC_SKB_CB(skb)->retr_cnt++)
1663  				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1664  		} else {
1665  			/* retry with Gap ACK blocks if any */
1666  			if (n >= gack_cnt)
1667  				break;
1668  			nacked = ntohs(gacks[n].ack);
1669  			ngap = ntohs(gacks[n].gap);
1670  			n++;
1671  			goto next_gap_ack;
1672  		}
1673  	}
1674  
1675  	/* Renew last Gap ACK blocks for bc if needed */
1676  	if (bc_has_acked) {
1677  		if (this_ga) {
1678  			kfree(last_ga);
1679  			r->last_ga = this_ga;
1680  			r->last_gap = gap;
1681  		} else if (last_ga) {
1682  			if (less(acked, start)) {
1683  				si--;
1684  				offset = start - acked - 1;
1685  			} else if (less(acked, end)) {
1686  				acked = end;
1687  			}
1688  			if (si < last_ga->bgack_cnt) {
1689  				last_ga->start_index = si;
1690  				r->last_gap = offset;
1691  			} else {
1692  				kfree(last_ga);
1693  				r->last_ga = NULL;
1694  				r->last_gap = 0;
1695  			}
1696  		} else {
1697  			r->last_gap = 0;
1698  		}
1699  		r->acked = acked;
1700  	} else {
1701  		kfree(this_ga);
1702  	}
1703  
1704  	return qlen - skb_queue_len(&l->transmq);
1705  }
1706  
1707  /* tipc_link_build_state_msg: prepare link state message for transmission
1708   *
1709   * Note that sending of broadcast ack is coordinated among nodes, to reduce
1710   * risk of ack storms towards the sender
1711   */
1712  int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1713  {
1714  	if (!l)
1715  		return 0;
1716  
1717  	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1718  	if (link_is_bc_rcvlink(l)) {
1719  		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1720  			return 0;
1721  		l->rcv_unacked = 0;
1722  
1723  		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1724  		l->snd_nxt = l->rcv_nxt;
1725  		return TIPC_LINK_SND_STATE;
1726  	}
1727  	/* Unicast ACK */
1728  	l->rcv_unacked = 0;
1729  	l->stats.sent_acks++;
1730  	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1731  	return 0;
1732  }
1733  
1734  /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1735   */
1736  void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1737  {
1738  	int mtyp = RESET_MSG;
1739  	struct sk_buff *skb;
1740  
1741  	if (l->state == LINK_ESTABLISHING)
1742  		mtyp = ACTIVATE_MSG;
1743  
1744  	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1745  
1746  	/* Inform peer that this endpoint is going down if applicable */
1747  	skb = skb_peek_tail(xmitq);
1748  	if (skb && (l->state == LINK_RESET))
1749  		msg_set_peer_stopping(buf_msg(skb), 1);
1750  }
1751  
1752  /* tipc_link_build_nack_msg: prepare link nack message for transmission
1753   * Note that sending of broadcast NACK is coordinated among nodes, to
1754   * reduce the risk of NACK storms towards the sender
1755   */
1756  static int tipc_link_build_nack_msg(struct tipc_link *l,
1757  				    struct sk_buff_head *xmitq)
1758  {
1759  	u32 def_cnt = ++l->stats.deferred_recv;
1760  	struct sk_buff_head *dfq = &l->deferdq;
1761  	u32 defq_len = skb_queue_len(dfq);
1762  	int match1, match2;
1763  
1764  	if (link_is_bc_rcvlink(l)) {
1765  		match1 = def_cnt & 0xf;
1766  		match2 = tipc_own_addr(l->net) & 0xf;
1767  		if (match1 == match2)
1768  			return TIPC_LINK_SND_STATE;
1769  		return 0;
1770  	}
1771  
1772  	if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1773  		u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1774  
1775  		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1776  					  rcvgap, 0, 0, xmitq);
1777  	}
1778  	return 0;
1779  }
1780  
1781  /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1782   * @l: the link that should handle the message
1783   * @skb: TIPC packet
1784   * @xmitq: queue to place packets to be sent after this call
1785   */
1786  int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1787  		  struct sk_buff_head *xmitq)
1788  {
1789  	struct sk_buff_head *defq = &l->deferdq;
1790  	struct tipc_msg *hdr = buf_msg(skb);
1791  	u16 seqno, rcv_nxt, win_lim;
1792  	int released = 0;
1793  	int rc = 0;
1794  
1795  	/* Verify and update link state */
1796  	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1797  		return tipc_link_proto_rcv(l, skb, xmitq);
1798  
1799  	/* Don't send probe at next timeout expiration */
1800  	l->silent_intv_cnt = 0;
1801  
1802  	do {
1803  		hdr = buf_msg(skb);
1804  		seqno = msg_seqno(hdr);
1805  		rcv_nxt = l->rcv_nxt;
1806  		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1807  
1808  		if (unlikely(!link_is_up(l))) {
1809  			if (l->state == LINK_ESTABLISHING)
1810  				rc = TIPC_LINK_UP_EVT;
1811  			kfree_skb(skb);
1812  			break;
1813  		}
1814  
1815  		/* Drop if outside receive window */
1816  		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1817  			l->stats.duplicates++;
1818  			kfree_skb(skb);
1819  			break;
1820  		}
1821  		released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1822  						      NULL, NULL, NULL, NULL);
1823  
1824  		/* Defer delivery if sequence gap */
1825  		if (unlikely(seqno != rcv_nxt)) {
1826  			if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1827  				l->stats.duplicates++;
1828  			rc |= tipc_link_build_nack_msg(l, xmitq);
1829  			break;
1830  		}
1831  
1832  		/* Deliver packet */
1833  		l->rcv_nxt++;
1834  		l->stats.recv_pkts++;
1835  
1836  		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1837  			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1838  		else if (!tipc_data_input(l, skb, l->inputq))
1839  			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1840  		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1841  			rc |= tipc_link_build_state_msg(l, xmitq);
1842  		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1843  			break;
1844  	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1845  
1846  	/* Forward queues and wake up waiting users */
1847  	if (released) {
1848  		tipc_link_update_cwin(l, released, 0);
1849  		tipc_link_advance_backlog(l, xmitq);
1850  		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1851  			link_prepare_wakeup(l);
1852  	}
1853  	return rc;
1854  }
1855  
1856  static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1857  				      bool probe_reply, u16 rcvgap,
1858  				      int tolerance, int priority,
1859  				      struct sk_buff_head *xmitq)
1860  {
1861  	struct tipc_mon_state *mstate = &l->mon_state;
1862  	struct sk_buff_head *dfq = &l->deferdq;
1863  	struct tipc_link *bcl = l->bc_rcvlink;
1864  	struct tipc_msg *hdr;
1865  	struct sk_buff *skb;
1866  	bool node_up = link_is_up(bcl);
1867  	u16 glen = 0, bc_rcvgap = 0;
1868  	int dlen = 0;
1869  	void *data;
1870  
1871  	/* Don't send protocol message during reset or link failover */
1872  	if (tipc_link_is_blocked(l))
1873  		return;
1874  
1875  	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1876  		return;
1877  
1878  	if ((probe || probe_reply) && !skb_queue_empty(dfq))
1879  		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1880  
1881  	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1882  			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1883  			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1884  	if (!skb)
1885  		return;
1886  
1887  	hdr = buf_msg(skb);
1888  	data = msg_data(hdr);
1889  	msg_set_session(hdr, l->session);
1890  	msg_set_bearer_id(hdr, l->bearer_id);
1891  	msg_set_net_plane(hdr, l->net_plane);
1892  	msg_set_next_sent(hdr, l->snd_nxt);
1893  	msg_set_ack(hdr, l->rcv_nxt - 1);
1894  	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1895  	msg_set_bc_ack_invalid(hdr, !node_up);
1896  	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1897  	msg_set_link_tolerance(hdr, tolerance);
1898  	msg_set_linkprio(hdr, priority);
1899  	msg_set_redundant_link(hdr, node_up);
1900  	msg_set_seq_gap(hdr, 0);
1901  	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1902  
1903  	if (mtyp == STATE_MSG) {
1904  		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1905  			msg_set_seqno(hdr, l->snd_nxt_state++);
1906  		msg_set_seq_gap(hdr, rcvgap);
1907  		bc_rcvgap = link_bc_rcv_gap(bcl);
1908  		msg_set_bc_gap(hdr, bc_rcvgap);
1909  		msg_set_probe(hdr, probe);
1910  		msg_set_is_keepalive(hdr, probe || probe_reply);
1911  		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1912  			glen = tipc_build_gap_ack_blks(l, hdr);
1913  		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1914  		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1915  		skb_trim(skb, INT_H_SIZE + glen + dlen);
1916  		l->stats.sent_states++;
1917  		l->rcv_unacked = 0;
1918  	} else {
1919  		/* RESET_MSG or ACTIVATE_MSG */
1920  		if (mtyp == ACTIVATE_MSG) {
1921  			msg_set_dest_session_valid(hdr, 1);
1922  			msg_set_dest_session(hdr, l->peer_session);
1923  		}
1924  		msg_set_max_pkt(hdr, l->advertised_mtu);
1925  		strcpy(data, l->if_name);
1926  		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1927  		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1928  	}
1929  	if (probe)
1930  		l->stats.sent_probes++;
1931  	if (rcvgap)
1932  		l->stats.sent_nacks++;
1933  	if (bc_rcvgap)
1934  		bcl->stats.sent_nacks++;
1935  	skb->priority = TC_PRIO_CONTROL;
1936  	__skb_queue_tail(xmitq, skb);
1937  	trace_tipc_proto_build(skb, false, l->name);
1938  }
1939  
1940  void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1941  				    struct sk_buff_head *xmitq)
1942  {
1943  	u32 onode = tipc_own_addr(l->net);
1944  	struct tipc_msg *hdr, *ihdr;
1945  	struct sk_buff_head tnlq;
1946  	struct sk_buff *skb;
1947  	u32 dnode = l->addr;
1948  
1949  	__skb_queue_head_init(&tnlq);
1950  	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1951  			      INT_H_SIZE, BASIC_H_SIZE,
1952  			      dnode, onode, 0, 0, 0);
1953  	if (!skb) {
1954  		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1955  		return;
1956  	}
1957  
1958  	hdr = buf_msg(skb);
1959  	msg_set_msgcnt(hdr, 1);
1960  	msg_set_bearer_id(hdr, l->peer_bearer_id);
1961  
1962  	ihdr = (struct tipc_msg *)msg_data(hdr);
1963  	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1964  		      BASIC_H_SIZE, dnode);
1965  	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1966  	__skb_queue_tail(&tnlq, skb);
1967  	tipc_link_xmit(l, &tnlq, xmitq);
1968  }
1969  
1970  /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1971   * with contents of the link's transmit and backlog queues.
1972   */
1973  void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1974  			   int mtyp, struct sk_buff_head *xmitq)
1975  {
1976  	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1977  	struct sk_buff *skb, *tnlskb;
1978  	struct tipc_msg *hdr, tnlhdr;
1979  	struct sk_buff_head *queue = &l->transmq;
1980  	struct sk_buff_head tmpxq, tnlq, frags;
1981  	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1982  	bool pktcnt_need_update = false;
1983  	u16 syncpt;
1984  	int rc;
1985  
1986  	if (!tnl)
1987  		return;
1988  
1989  	__skb_queue_head_init(&tnlq);
1990  	/* Link Synching:
1991  	 * From now on, send only one single ("dummy") SYNCH message
1992  	 * to peer. The SYNCH message does not contain any data, just
1993  	 * a header conveying the synch point to the peer.
1994  	 */
1995  	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1996  		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1997  					 INT_H_SIZE, 0, l->addr,
1998  					 tipc_own_addr(l->net),
1999  					 0, 0, 0);
2000  		if (!tnlskb) {
2001  			pr_warn("%sunable to create dummy SYNCH_MSG\n",
2002  				link_co_err);
2003  			return;
2004  		}
2005  
2006  		hdr = buf_msg(tnlskb);
2007  		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
2008  		msg_set_syncpt(hdr, syncpt);
2009  		msg_set_bearer_id(hdr, l->peer_bearer_id);
2010  		__skb_queue_tail(&tnlq, tnlskb);
2011  		tipc_link_xmit(tnl, &tnlq, xmitq);
2012  		return;
2013  	}
2014  
2015  	__skb_queue_head_init(&tmpxq);
2016  	__skb_queue_head_init(&frags);
2017  	/* At least one packet required for safe algorithm => add dummy */
2018  	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
2019  			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
2020  			      0, 0, TIPC_ERR_NO_PORT);
2021  	if (!skb) {
2022  		pr_warn("%sunable to create tunnel packet\n", link_co_err);
2023  		return;
2024  	}
2025  	__skb_queue_tail(&tnlq, skb);
2026  	tipc_link_xmit(l, &tnlq, &tmpxq);
2027  	__skb_queue_purge(&tmpxq);
2028  
2029  	/* Initialize reusable tunnel packet header */
2030  	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2031  		      mtyp, INT_H_SIZE, l->addr);
2032  	if (mtyp == SYNCH_MSG)
2033  		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2034  	else
2035  		pktcnt = skb_queue_len(&l->transmq);
2036  	pktcnt += skb_queue_len(&l->backlogq);
2037  	msg_set_msgcnt(&tnlhdr, pktcnt);
2038  	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2039  tnl:
2040  	/* Wrap each packet into a tunnel packet */
2041  	skb_queue_walk(queue, skb) {
2042  		hdr = buf_msg(skb);
2043  		if (queue == &l->backlogq)
2044  			msg_set_seqno(hdr, seqno++);
2045  		pktlen = msg_size(hdr);
2046  
2047  		/* Tunnel link MTU is not large enough? This could be
2048  		 * due to:
2049  		 * 1) Link MTU has just changed or set differently;
2050  		 * 2) Or FAILOVER on the top of a SYNCH message
2051  		 *
2052  		 * The 2nd case should not happen if peer supports
2053  		 * TIPC_TUNNEL_ENHANCED
2054  		 */
2055  		if (pktlen > tnl->mtu - INT_H_SIZE) {
2056  			if (mtyp == FAILOVER_MSG &&
2057  			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2058  				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2059  						       &frags);
2060  				if (rc) {
2061  					pr_warn("%sunable to frag msg: rc %d\n",
2062  						link_co_err, rc);
2063  					return;
2064  				}
2065  				pktcnt += skb_queue_len(&frags) - 1;
2066  				pktcnt_need_update = true;
2067  				skb_queue_splice_tail_init(&frags, &tnlq);
2068  				continue;
2069  			}
2070  			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2071  			 * => Just warn it and return!
2072  			 */
2073  			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2074  					    link_co_err, msg_user(hdr),
2075  					    msg_type(hdr), msg_size(hdr));
2076  			return;
2077  		}
2078  
2079  		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2080  		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2081  		if (!tnlskb) {
2082  			pr_warn("%sunable to send packet\n", link_co_err);
2083  			return;
2084  		}
2085  		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2086  		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2087  		__skb_queue_tail(&tnlq, tnlskb);
2088  	}
2089  	if (queue != &l->backlogq) {
2090  		queue = &l->backlogq;
2091  		goto tnl;
2092  	}
2093  
2094  	if (pktcnt_need_update)
2095  		skb_queue_walk(&tnlq, skb) {
2096  			hdr = buf_msg(skb);
2097  			msg_set_msgcnt(hdr, pktcnt);
2098  		}
2099  
2100  	tipc_link_xmit(tnl, &tnlq, xmitq);
2101  
2102  	if (mtyp == FAILOVER_MSG) {
2103  		tnl->drop_point = l->rcv_nxt;
2104  		tnl->failover_reasm_skb = l->reasm_buf;
2105  		l->reasm_buf = NULL;
2106  
2107  		/* Failover the link's deferdq */
2108  		if (unlikely(!skb_queue_empty(fdefq))) {
2109  			pr_warn("Link failover deferdq not empty: %d!\n",
2110  				skb_queue_len(fdefq));
2111  			__skb_queue_purge(fdefq);
2112  		}
2113  		skb_queue_splice_init(&l->deferdq, fdefq);
2114  	}
2115  }
2116  
2117  /**
2118   * tipc_link_failover_prepare() - prepare tnl for link failover
2119   *
2120   * This is a special version of the precursor - tipc_link_tnl_prepare(),
2121   * see the tipc_node_link_failover() for details
2122   *
2123   * @l: failover link
2124   * @tnl: tunnel link
2125   * @xmitq: queue for messages to be xmited
2126   */
2127  void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2128  				struct sk_buff_head *xmitq)
2129  {
2130  	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2131  
2132  	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2133  
2134  	/* This failover link endpoint was never established before,
2135  	 * so it has not received anything from peer.
2136  	 * Otherwise, it must be a normal failover situation or the
2137  	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2138  	 * would have to start over from scratch instead.
2139  	 */
2140  	tnl->drop_point = 1;
2141  	tnl->failover_reasm_skb = NULL;
2142  
2143  	/* Initiate the link's failover deferdq */
2144  	if (unlikely(!skb_queue_empty(fdefq))) {
2145  		pr_warn("Link failover deferdq not empty: %d!\n",
2146  			skb_queue_len(fdefq));
2147  		__skb_queue_purge(fdefq);
2148  	}
2149  }
2150  
2151  /* tipc_link_validate_msg(): validate message against current link state
2152   * Returns true if message should be accepted, otherwise false
2153   */
2154  bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2155  {
2156  	u16 curr_session = l->peer_session;
2157  	u16 session = msg_session(hdr);
2158  	int mtyp = msg_type(hdr);
2159  
2160  	if (msg_user(hdr) != LINK_PROTOCOL)
2161  		return true;
2162  
2163  	switch (mtyp) {
2164  	case RESET_MSG:
2165  		if (!l->in_session)
2166  			return true;
2167  		/* Accept only RESET with new session number */
2168  		return more(session, curr_session);
2169  	case ACTIVATE_MSG:
2170  		if (!l->in_session)
2171  			return true;
2172  		/* Accept only ACTIVATE with new or current session number */
2173  		return !less(session, curr_session);
2174  	case STATE_MSG:
2175  		/* Accept only STATE with current session number */
2176  		if (!l->in_session)
2177  			return false;
2178  		if (session != curr_session)
2179  			return false;
2180  		/* Extra sanity check */
2181  		if (!link_is_up(l) && msg_ack(hdr))
2182  			return false;
2183  		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2184  			return true;
2185  		/* Accept only STATE with new sequence number */
2186  		return !less(msg_seqno(hdr), l->rcv_nxt_state);
2187  	default:
2188  		return false;
2189  	}
2190  }
2191  
2192  /* tipc_link_proto_rcv(): receive link level protocol message :
2193   * Note that network plane id propagates through the network, and may
2194   * change at any time. The node with lowest numerical id determines
2195   * network plane
2196   */
2197  static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2198  			       struct sk_buff_head *xmitq)
2199  {
2200  	struct tipc_msg *hdr = buf_msg(skb);
2201  	struct tipc_gap_ack_blks *ga = NULL;
2202  	bool reply = msg_probe(hdr), retransmitted = false;
2203  	u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
2204  	u16 peers_snd_nxt =  msg_next_sent(hdr);
2205  	u16 peers_tol = msg_link_tolerance(hdr);
2206  	u16 peers_prio = msg_linkprio(hdr);
2207  	u16 gap = msg_seq_gap(hdr);
2208  	u16 ack = msg_ack(hdr);
2209  	u16 rcv_nxt = l->rcv_nxt;
2210  	u16 rcvgap = 0;
2211  	int mtyp = msg_type(hdr);
2212  	int rc = 0, released;
2213  	char *if_name;
2214  	void *data;
2215  
2216  	trace_tipc_proto_rcv(skb, false, l->name);
2217  
2218  	if (dlen > U16_MAX)
2219  		goto exit;
2220  
2221  	if (tipc_link_is_blocked(l) || !xmitq)
2222  		goto exit;
2223  
2224  	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2225  		l->net_plane = msg_net_plane(hdr);
2226  
2227  	if (skb_linearize(skb))
2228  		goto exit;
2229  
2230  	hdr = buf_msg(skb);
2231  	data = msg_data(hdr);
2232  
2233  	if (!tipc_link_validate_msg(l, hdr)) {
2234  		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2235  		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2236  		goto exit;
2237  	}
2238  
2239  	switch (mtyp) {
2240  	case RESET_MSG:
2241  	case ACTIVATE_MSG:
2242  		msg_max = msg_max_pkt(hdr);
2243  		if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
2244  			break;
2245  		/* Complete own link name with peer's interface name */
2246  		if_name =  strrchr(l->name, ':') + 1;
2247  		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2248  			break;
2249  		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2250  			break;
2251  		strncpy(if_name, data, TIPC_MAX_IF_NAME);
2252  
2253  		/* Update own tolerance if peer indicates a non-zero value */
2254  		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2255  			l->tolerance = peers_tol;
2256  			l->bc_rcvlink->tolerance = peers_tol;
2257  		}
2258  		/* Update own priority if peer's priority is higher */
2259  		if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2260  			l->priority = peers_prio;
2261  
2262  		/* If peer is going down we want full re-establish cycle */
2263  		if (msg_peer_stopping(hdr)) {
2264  			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2265  			break;
2266  		}
2267  
2268  		/* If this endpoint was re-created while peer was ESTABLISHING
2269  		 * it doesn't know current session number. Force re-synch.
2270  		 */
2271  		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2272  		    l->session != msg_dest_session(hdr)) {
2273  			if (less(l->session, msg_dest_session(hdr)))
2274  				l->session = msg_dest_session(hdr) + 1;
2275  			break;
2276  		}
2277  
2278  		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2279  		if (mtyp == RESET_MSG || !link_is_up(l))
2280  			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2281  
2282  		/* ACTIVATE_MSG takes up link if it was already locally reset */
2283  		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2284  			rc = TIPC_LINK_UP_EVT;
2285  
2286  		l->peer_session = msg_session(hdr);
2287  		l->in_session = true;
2288  		l->peer_bearer_id = msg_bearer_id(hdr);
2289  		if (l->mtu > msg_max)
2290  			l->mtu = msg_max;
2291  		break;
2292  
2293  	case STATE_MSG:
2294  		/* Validate Gap ACK blocks, drop if invalid */
2295  		glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2296  		if (glen > dlen)
2297  			break;
2298  
2299  		l->rcv_nxt_state = msg_seqno(hdr) + 1;
2300  
2301  		/* Update own tolerance if peer indicates a non-zero value */
2302  		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2303  			l->tolerance = peers_tol;
2304  			l->bc_rcvlink->tolerance = peers_tol;
2305  		}
2306  		/* Update own prio if peer indicates a different value */
2307  		if ((peers_prio != l->priority) &&
2308  		    tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2309  			l->priority = peers_prio;
2310  			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2311  		}
2312  
2313  		l->silent_intv_cnt = 0;
2314  		l->stats.recv_states++;
2315  		if (msg_probe(hdr))
2316  			l->stats.recv_probes++;
2317  
2318  		if (!link_is_up(l)) {
2319  			if (l->state == LINK_ESTABLISHING)
2320  				rc = TIPC_LINK_UP_EVT;
2321  			break;
2322  		}
2323  
2324  		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2325  			     &l->mon_state, l->bearer_id);
2326  
2327  		/* Send NACK if peer has sent pkts we haven't received yet */
2328  		if ((reply || msg_is_keepalive(hdr)) &&
2329  		    more(peers_snd_nxt, rcv_nxt) &&
2330  		    !tipc_link_is_synching(l) &&
2331  		    skb_queue_empty(&l->deferdq))
2332  			rcvgap = peers_snd_nxt - l->rcv_nxt;
2333  		if (rcvgap || reply)
2334  			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2335  						  rcvgap, 0, 0, xmitq);
2336  
2337  		released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2338  						     &retransmitted, &rc);
2339  		if (gap)
2340  			l->stats.recv_nacks++;
2341  		if (released || retransmitted)
2342  			tipc_link_update_cwin(l, released, retransmitted);
2343  		if (released)
2344  			tipc_link_advance_backlog(l, xmitq);
2345  		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2346  			link_prepare_wakeup(l);
2347  	}
2348  exit:
2349  	kfree_skb(skb);
2350  	return rc;
2351  }
2352  
2353  /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2354   */
2355  static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2356  					 u16 peers_snd_nxt,
2357  					 struct sk_buff_head *xmitq)
2358  {
2359  	struct sk_buff *skb;
2360  	struct tipc_msg *hdr;
2361  	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2362  	u16 ack = l->rcv_nxt - 1;
2363  	u16 gap_to = peers_snd_nxt - 1;
2364  
2365  	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2366  			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2367  	if (!skb)
2368  		return false;
2369  	hdr = buf_msg(skb);
2370  	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2371  	msg_set_bcast_ack(hdr, ack);
2372  	msg_set_bcgap_after(hdr, ack);
2373  	if (dfrd_skb)
2374  		gap_to = buf_seqno(dfrd_skb) - 1;
2375  	msg_set_bcgap_to(hdr, gap_to);
2376  	msg_set_non_seq(hdr, bcast);
2377  	__skb_queue_tail(xmitq, skb);
2378  	return true;
2379  }
2380  
2381  /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2382   *
2383   * Give a newly added peer node the sequence number where it should
2384   * start receiving and acking broadcast packets.
2385   */
2386  static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2387  					struct sk_buff_head *xmitq)
2388  {
2389  	struct sk_buff_head list;
2390  
2391  	__skb_queue_head_init(&list);
2392  	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2393  		return;
2394  	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2395  	tipc_link_xmit(l, &list, xmitq);
2396  }
2397  
2398  /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2399   */
2400  void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2401  {
2402  	int mtyp = msg_type(hdr);
2403  	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2404  
2405  	if (link_is_up(l))
2406  		return;
2407  
2408  	if (msg_user(hdr) == BCAST_PROTOCOL) {
2409  		l->rcv_nxt = peers_snd_nxt;
2410  		l->state = LINK_ESTABLISHED;
2411  		return;
2412  	}
2413  
2414  	if (l->peer_caps & TIPC_BCAST_SYNCH)
2415  		return;
2416  
2417  	if (msg_peer_node_is_up(hdr))
2418  		return;
2419  
2420  	/* Compatibility: accept older, less safe initial synch data */
2421  	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2422  		l->rcv_nxt = peers_snd_nxt;
2423  }
2424  
2425  /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2426   */
2427  int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2428  			  struct sk_buff_head *xmitq)
2429  {
2430  	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2431  	int rc = 0;
2432  
2433  	if (!link_is_up(l))
2434  		return rc;
2435  
2436  	if (!msg_peer_node_is_up(hdr))
2437  		return rc;
2438  
2439  	/* Open when peer acknowledges our bcast init msg (pkt #1) */
2440  	if (msg_ack(hdr))
2441  		l->bc_peer_is_up = true;
2442  
2443  	if (!l->bc_peer_is_up)
2444  		return rc;
2445  
2446  	/* Ignore if peers_snd_nxt goes beyond receive window */
2447  	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2448  		return rc;
2449  
2450  	l->snd_nxt = peers_snd_nxt;
2451  	if (link_bc_rcv_gap(l))
2452  		rc |= TIPC_LINK_SND_STATE;
2453  
2454  	/* Return now if sender supports nack via STATE messages */
2455  	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2456  		return rc;
2457  
2458  	/* Otherwise, be backwards compatible */
2459  
2460  	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2461  		l->nack_state = BC_NACK_SND_CONDITIONAL;
2462  		return 0;
2463  	}
2464  
2465  	/* Don't NACK if one was recently sent or peeked */
2466  	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2467  		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2468  		return 0;
2469  	}
2470  
2471  	/* Conditionally delay NACK sending until next synch rcv */
2472  	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2473  		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2474  		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2475  			return 0;
2476  	}
2477  
2478  	/* Send NACK now but suppress next one */
2479  	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2480  	l->nack_state = BC_NACK_SND_SUPPRESS;
2481  	return 0;
2482  }
2483  
2484  int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2485  			 struct tipc_gap_ack_blks *ga,
2486  			 struct sk_buff_head *xmitq,
2487  			 struct sk_buff_head *retrq)
2488  {
2489  	struct tipc_link *l = r->bc_sndlink;
2490  	bool unused = false;
2491  	int rc = 0;
2492  
2493  	if (!link_is_up(r) || !r->bc_peer_is_up)
2494  		return 0;
2495  
2496  	if (gap) {
2497  		l->stats.recv_nacks++;
2498  		r->stats.recv_nacks++;
2499  	}
2500  
2501  	if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2502  		return 0;
2503  
2504  	trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2505  	tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2506  
2507  	tipc_link_advance_backlog(l, xmitq);
2508  	if (unlikely(!skb_queue_empty(&l->wakeupq)))
2509  		link_prepare_wakeup(l);
2510  
2511  	return rc;
2512  }
2513  
2514  /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2515   * This function is here for backwards compatibility, since
2516   * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2517   */
2518  int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2519  			  struct sk_buff_head *xmitq)
2520  {
2521  	struct tipc_msg *hdr = buf_msg(skb);
2522  	u32 dnode = msg_destnode(hdr);
2523  	int mtyp = msg_type(hdr);
2524  	u16 acked = msg_bcast_ack(hdr);
2525  	u16 from = acked + 1;
2526  	u16 to = msg_bcgap_to(hdr);
2527  	u16 peers_snd_nxt = to + 1;
2528  	int rc = 0;
2529  
2530  	kfree_skb(skb);
2531  
2532  	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2533  		return 0;
2534  
2535  	if (mtyp != STATE_MSG)
2536  		return 0;
2537  
2538  	if (dnode == tipc_own_addr(l->net)) {
2539  		rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2540  					  xmitq);
2541  		l->stats.recv_nacks++;
2542  		return rc;
2543  	}
2544  
2545  	/* Msg for other node => suppress own NACK at next sync if applicable */
2546  	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2547  		l->nack_state = BC_NACK_SND_SUPPRESS;
2548  
2549  	return 0;
2550  }
2551  
2552  void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2553  {
2554  	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2555  
2556  	l->min_win = min_win;
2557  	l->ssthresh = max_win;
2558  	l->max_win = max_win;
2559  	l->window = min_win;
2560  	l->backlog[TIPC_LOW_IMPORTANCE].limit      = min_win * 2;
2561  	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = min_win * 4;
2562  	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = min_win * 6;
2563  	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2564  	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2565  }
2566  
2567  /**
2568   * tipc_link_reset_stats - reset link statistics
2569   * @l: pointer to link
2570   */
2571  void tipc_link_reset_stats(struct tipc_link *l)
2572  {
2573  	memset(&l->stats, 0, sizeof(l->stats));
2574  }
2575  
2576  static void link_print(struct tipc_link *l, const char *str)
2577  {
2578  	struct sk_buff *hskb = skb_peek(&l->transmq);
2579  	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2580  	u16 tail = l->snd_nxt - 1;
2581  
2582  	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2583  	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2584  		skb_queue_len(&l->transmq), head, tail,
2585  		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2586  }
2587  
2588  /* Parse and validate nested (link) properties valid for media, bearer and link
2589   */
2590  int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2591  {
2592  	int err;
2593  
2594  	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2595  					  tipc_nl_prop_policy, NULL);
2596  	if (err)
2597  		return err;
2598  
2599  	if (props[TIPC_NLA_PROP_PRIO]) {
2600  		u32 prio;
2601  
2602  		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2603  		if (prio > TIPC_MAX_LINK_PRI)
2604  			return -EINVAL;
2605  	}
2606  
2607  	if (props[TIPC_NLA_PROP_TOL]) {
2608  		u32 tol;
2609  
2610  		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2611  		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2612  			return -EINVAL;
2613  	}
2614  
2615  	if (props[TIPC_NLA_PROP_WIN]) {
2616  		u32 max_win;
2617  
2618  		max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2619  		if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2620  			return -EINVAL;
2621  	}
2622  
2623  	return 0;
2624  }
2625  
2626  static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2627  {
2628  	int i;
2629  	struct nlattr *stats;
2630  
2631  	struct nla_map {
2632  		u32 key;
2633  		u32 val;
2634  	};
2635  
2636  	struct nla_map map[] = {
2637  		{TIPC_NLA_STATS_RX_INFO, 0},
2638  		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2639  		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2640  		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2641  		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2642  		{TIPC_NLA_STATS_TX_INFO, 0},
2643  		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2644  		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2645  		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2646  		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2647  		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2648  			s->msg_length_counts : 1},
2649  		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2650  		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2651  		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2652  		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2653  		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2654  		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2655  		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2656  		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2657  		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2658  		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2659  		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2660  		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2661  		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2662  		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2663  		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2664  		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2665  		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2666  		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2667  		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2668  		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2669  		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2670  		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2671  			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2672  	};
2673  
2674  	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2675  	if (!stats)
2676  		return -EMSGSIZE;
2677  
2678  	for (i = 0; i <  ARRAY_SIZE(map); i++)
2679  		if (nla_put_u32(skb, map[i].key, map[i].val))
2680  			goto msg_full;
2681  
2682  	nla_nest_end(skb, stats);
2683  
2684  	return 0;
2685  msg_full:
2686  	nla_nest_cancel(skb, stats);
2687  
2688  	return -EMSGSIZE;
2689  }
2690  
2691  /* Caller should hold appropriate locks to protect the link */
2692  int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2693  		       struct tipc_link *link, int nlflags)
2694  {
2695  	u32 self = tipc_own_addr(net);
2696  	struct nlattr *attrs;
2697  	struct nlattr *prop;
2698  	void *hdr;
2699  	int err;
2700  
2701  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2702  			  nlflags, TIPC_NL_LINK_GET);
2703  	if (!hdr)
2704  		return -EMSGSIZE;
2705  
2706  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2707  	if (!attrs)
2708  		goto msg_full;
2709  
2710  	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2711  		goto attr_msg_full;
2712  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2713  		goto attr_msg_full;
2714  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2715  		goto attr_msg_full;
2716  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2717  		goto attr_msg_full;
2718  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2719  		goto attr_msg_full;
2720  
2721  	if (tipc_link_is_up(link))
2722  		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2723  			goto attr_msg_full;
2724  	if (link->active)
2725  		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2726  			goto attr_msg_full;
2727  
2728  	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2729  	if (!prop)
2730  		goto attr_msg_full;
2731  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2732  		goto prop_msg_full;
2733  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2734  		goto prop_msg_full;
2735  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2736  			link->window))
2737  		goto prop_msg_full;
2738  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2739  		goto prop_msg_full;
2740  	nla_nest_end(msg->skb, prop);
2741  
2742  	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2743  	if (err)
2744  		goto attr_msg_full;
2745  
2746  	nla_nest_end(msg->skb, attrs);
2747  	genlmsg_end(msg->skb, hdr);
2748  
2749  	return 0;
2750  
2751  prop_msg_full:
2752  	nla_nest_cancel(msg->skb, prop);
2753  attr_msg_full:
2754  	nla_nest_cancel(msg->skb, attrs);
2755  msg_full:
2756  	genlmsg_cancel(msg->skb, hdr);
2757  
2758  	return -EMSGSIZE;
2759  }
2760  
2761  static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2762  				      struct tipc_stats *stats)
2763  {
2764  	int i;
2765  	struct nlattr *nest;
2766  
2767  	struct nla_map {
2768  		__u32 key;
2769  		__u32 val;
2770  	};
2771  
2772  	struct nla_map map[] = {
2773  		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2774  		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2775  		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2776  		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2777  		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2778  		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2779  		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2780  		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2781  		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2782  		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2783  		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2784  		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2785  		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2786  		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2787  		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2788  		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2789  		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2790  		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2791  		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2792  			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2793  	};
2794  
2795  	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2796  	if (!nest)
2797  		return -EMSGSIZE;
2798  
2799  	for (i = 0; i <  ARRAY_SIZE(map); i++)
2800  		if (nla_put_u32(skb, map[i].key, map[i].val))
2801  			goto msg_full;
2802  
2803  	nla_nest_end(skb, nest);
2804  
2805  	return 0;
2806  msg_full:
2807  	nla_nest_cancel(skb, nest);
2808  
2809  	return -EMSGSIZE;
2810  }
2811  
2812  int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2813  			struct tipc_link *bcl)
2814  {
2815  	int err;
2816  	void *hdr;
2817  	struct nlattr *attrs;
2818  	struct nlattr *prop;
2819  	u32 bc_mode = tipc_bcast_get_mode(net);
2820  	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2821  
2822  	if (!bcl)
2823  		return 0;
2824  
2825  	tipc_bcast_lock(net);
2826  
2827  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2828  			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2829  	if (!hdr) {
2830  		tipc_bcast_unlock(net);
2831  		return -EMSGSIZE;
2832  	}
2833  
2834  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2835  	if (!attrs)
2836  		goto msg_full;
2837  
2838  	/* The broadcast link is always up */
2839  	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2840  		goto attr_msg_full;
2841  
2842  	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2843  		goto attr_msg_full;
2844  	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2845  		goto attr_msg_full;
2846  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2847  		goto attr_msg_full;
2848  	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2849  		goto attr_msg_full;
2850  
2851  	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2852  	if (!prop)
2853  		goto attr_msg_full;
2854  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2855  		goto prop_msg_full;
2856  	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2857  		goto prop_msg_full;
2858  	if (bc_mode & BCLINK_MODE_SEL)
2859  		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2860  				bc_ratio))
2861  			goto prop_msg_full;
2862  	nla_nest_end(msg->skb, prop);
2863  
2864  	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2865  	if (err)
2866  		goto attr_msg_full;
2867  
2868  	tipc_bcast_unlock(net);
2869  	nla_nest_end(msg->skb, attrs);
2870  	genlmsg_end(msg->skb, hdr);
2871  
2872  	return 0;
2873  
2874  prop_msg_full:
2875  	nla_nest_cancel(msg->skb, prop);
2876  attr_msg_full:
2877  	nla_nest_cancel(msg->skb, attrs);
2878  msg_full:
2879  	tipc_bcast_unlock(net);
2880  	genlmsg_cancel(msg->skb, hdr);
2881  
2882  	return -EMSGSIZE;
2883  }
2884  
2885  void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2886  			     struct sk_buff_head *xmitq)
2887  {
2888  	l->tolerance = tol;
2889  	if (l->bc_rcvlink)
2890  		l->bc_rcvlink->tolerance = tol;
2891  	if (link_is_up(l))
2892  		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2893  }
2894  
2895  void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2896  			struct sk_buff_head *xmitq)
2897  {
2898  	l->priority = prio;
2899  	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2900  }
2901  
2902  void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2903  {
2904  	l->abort_limit = limit;
2905  }
2906  
2907  /**
2908   * tipc_link_dump - dump TIPC link data
2909   * @l: tipc link to be dumped
2910   * @dqueues: bitmask to decide if any link queue to be dumped?
2911   *           - TIPC_DUMP_NONE: don't dump link queues
2912   *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2913   *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2914   *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2915   *           - TIPC_DUMP_INPUTQ: dump link input queue
2916   *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2917   *           - TIPC_DUMP_ALL: dump all the link queues above
2918   * @buf: returned buffer of dump data in format
2919   */
2920  int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2921  {
2922  	int i = 0;
2923  	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2924  	struct sk_buff_head *list;
2925  	struct sk_buff *hskb, *tskb;
2926  	u32 len;
2927  
2928  	if (!l) {
2929  		i += scnprintf(buf, sz, "link data: (null)\n");
2930  		return i;
2931  	}
2932  
2933  	i += scnprintf(buf, sz, "link data: %x", l->addr);
2934  	i += scnprintf(buf + i, sz - i, " %x", l->state);
2935  	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2936  	i += scnprintf(buf + i, sz - i, " %u", l->session);
2937  	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2938  	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2939  	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2940  	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2941  	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2942  	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2943  	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2944  	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2945  	i += scnprintf(buf + i, sz - i, " %u", 0);
2946  	i += scnprintf(buf + i, sz - i, " %u", 0);
2947  	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2948  
2949  	list = &l->transmq;
2950  	len = skb_queue_len(list);
2951  	hskb = skb_peek(list);
2952  	tskb = skb_peek_tail(list);
2953  	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2954  		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2955  		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2956  
2957  	list = &l->deferdq;
2958  	len = skb_queue_len(list);
2959  	hskb = skb_peek(list);
2960  	tskb = skb_peek_tail(list);
2961  	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2962  		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2963  		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2964  
2965  	list = &l->backlogq;
2966  	len = skb_queue_len(list);
2967  	hskb = skb_peek(list);
2968  	tskb = skb_peek_tail(list);
2969  	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2970  		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2971  		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2972  
2973  	list = l->inputq;
2974  	len = skb_queue_len(list);
2975  	hskb = skb_peek(list);
2976  	tskb = skb_peek_tail(list);
2977  	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2978  		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2979  		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2980  
2981  	if (dqueues & TIPC_DUMP_TRANSMQ) {
2982  		i += scnprintf(buf + i, sz - i, "transmq: ");
2983  		i += tipc_list_dump(&l->transmq, false, buf + i);
2984  	}
2985  	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2986  		i += scnprintf(buf + i, sz - i,
2987  			       "backlogq: <%u %u %u %u %u>, ",
2988  			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2989  			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2990  			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2991  			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2992  			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2993  		i += tipc_list_dump(&l->backlogq, false, buf + i);
2994  	}
2995  	if (dqueues & TIPC_DUMP_DEFERDQ) {
2996  		i += scnprintf(buf + i, sz - i, "deferdq: ");
2997  		i += tipc_list_dump(&l->deferdq, false, buf + i);
2998  	}
2999  	if (dqueues & TIPC_DUMP_INPUTQ) {
3000  		i += scnprintf(buf + i, sz - i, "inputq: ");
3001  		i += tipc_list_dump(l->inputq, false, buf + i);
3002  	}
3003  	if (dqueues & TIPC_DUMP_WAKEUP) {
3004  		i += scnprintf(buf + i, sz - i, "wakeup: ");
3005  		i += tipc_list_dump(&l->wakeupq, false, buf + i);
3006  	}
3007  
3008  	return i;
3009  }
3010