xref: /openbmc/linux/net/tipc/socket.c (revision 8b030a57)
1 /*
2  * net/tipc/socket.c: TIPC socket API
3  *
4  * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39 
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49 #include "trace.h"
50 
51 #define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
52 #define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
53 #define TIPC_FWD_MSG		1
54 #define TIPC_MAX_PORT		0xffffffff
55 #define TIPC_MIN_PORT		1
56 #define TIPC_ACK_RATE		4       /* ACK at 1/4 of of rcv window size */
57 
58 enum {
59 	TIPC_LISTEN = TCP_LISTEN,
60 	TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 	TIPC_OPEN = TCP_CLOSE,
62 	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 	TIPC_CONNECTING = TCP_SYN_SENT,
64 };
65 
66 struct sockaddr_pair {
67 	struct sockaddr_tipc sock;
68 	struct sockaddr_tipc member;
69 };
70 
71 /**
72  * struct tipc_sock - TIPC socket structure
73  * @sk: socket - interacts with 'port' and with user via the socket API
74  * @conn_type: TIPC type used when connection was established
75  * @conn_instance: TIPC instance used when connection was established
76  * @published: non-zero if port has one or more associated names
77  * @max_pkt: maximum packet size "hint" used when building messages sent by port
78  * @portid: unique port identity in TIPC socket hash table
79  * @phdr: preformatted message header used when sending messages
80  * #cong_links: list of congested links
81  * @publications: list of publications for port
82  * @blocking_link: address of the congested link we are currently sleeping on
83  * @pub_count: total # of publications port has made during its lifetime
84  * @conn_timeout: the time we can wait for an unresponded setup request
85  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86  * @cong_link_cnt: number of congested links
87  * @snt_unacked: # messages sent by socket, and not yet acked by peer
88  * @rcv_unacked: # messages read by user, but not yet acked back to peer
89  * @peer: 'connected' peer for dgram/rdm
90  * @node: hash table node
91  * @mc_method: cookie for use between socket and broadcast layer
92  * @rcu: rcu struct for tipc_sock
93  */
94 struct tipc_sock {
95 	struct sock sk;
96 	u32 conn_type;
97 	u32 conn_instance;
98 	int published;
99 	u32 max_pkt;
100 	u32 portid;
101 	struct tipc_msg phdr;
102 	struct list_head cong_links;
103 	struct list_head publications;
104 	u32 pub_count;
105 	atomic_t dupl_rcvcnt;
106 	u16 conn_timeout;
107 	bool probe_unacked;
108 	u16 cong_link_cnt;
109 	u16 snt_unacked;
110 	u16 snd_win;
111 	u16 peer_caps;
112 	u16 rcv_unacked;
113 	u16 rcv_win;
114 	struct sockaddr_tipc peer;
115 	struct rhash_head node;
116 	struct tipc_mc_method mc_method;
117 	struct rcu_head rcu;
118 	struct tipc_group *group;
119 	bool group_is_open;
120 };
121 
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 		       bool kern);
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 			   struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 			    struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140 
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
146 
147 static u32 tsk_own_node(struct tipc_sock *tsk)
148 {
149 	return msg_prevnode(&tsk->phdr);
150 }
151 
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
153 {
154 	return msg_destnode(&tsk->phdr);
155 }
156 
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
158 {
159 	return msg_destport(&tsk->phdr);
160 }
161 
162 static  bool tsk_unreliable(struct tipc_sock *tsk)
163 {
164 	return msg_src_droppable(&tsk->phdr) != 0;
165 }
166 
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168 {
169 	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170 }
171 
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
173 {
174 	return msg_dest_droppable(&tsk->phdr) != 0;
175 }
176 
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178 {
179 	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180 }
181 
182 static int tsk_importance(struct tipc_sock *tsk)
183 {
184 	return msg_importance(&tsk->phdr);
185 }
186 
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188 {
189 	if (imp > TIPC_CRITICAL_IMPORTANCE)
190 		return -EINVAL;
191 	msg_set_importance(&tsk->phdr, (u32)imp);
192 	return 0;
193 }
194 
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
196 {
197 	return container_of(sk, struct tipc_sock, sk);
198 }
199 
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
201 {
202 	return tsk->snt_unacked > tsk->snd_win;
203 }
204 
205 static u16 tsk_blocks(int len)
206 {
207 	return ((len / FLOWCTL_BLK_SZ) + 1);
208 }
209 
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211  * advertisable blocks, taking into account the ratio truesize(len)/len
212  * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213  */
214 static u16 tsk_adv_blocks(int len)
215 {
216 	return len / FLOWCTL_BLK_SZ / 4;
217 }
218 
219 /* tsk_inc(): increment counter for sent or received data
220  * - If block based flow control is not supported by peer we
221  *   fall back to message based ditto, incrementing the counter
222  */
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224 {
225 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 		return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 	return 1;
228 }
229 
230 /**
231  * tsk_advance_rx_queue - discard first buffer in socket receive queue
232  *
233  * Caller must hold socket lock
234  */
235 static void tsk_advance_rx_queue(struct sock *sk)
236 {
237 	trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
239 }
240 
241 /* tipc_sk_respond() : send response message back to sender
242  */
243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
244 {
245 	u32 selector;
246 	u32 dnode;
247 	u32 onode = tipc_own_addr(sock_net(sk));
248 
249 	if (!tipc_msg_reverse(onode, &skb, err))
250 		return;
251 
252 	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 	dnode = msg_destnode(buf_msg(skb));
254 	selector = msg_origport(buf_msg(skb));
255 	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
256 }
257 
258 /**
259  * tsk_rej_rx_queue - reject all buffers in socket receive queue
260  *
261  * Caller must hold socket lock
262  */
263 static void tsk_rej_rx_queue(struct sock *sk)
264 {
265 	struct sk_buff *skb;
266 
267 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269 }
270 
271 static bool tipc_sk_connected(struct sock *sk)
272 {
273 	return sk->sk_state == TIPC_ESTABLISHED;
274 }
275 
276 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277  * @sk: socket
278  *
279  * Returns true if connection less, false otherwise
280  */
281 static bool tipc_sk_type_connectionless(struct sock *sk)
282 {
283 	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284 }
285 
286 /* tsk_peer_msg - verify if message was sent by connected port's peer
287  *
288  * Handles cases where the node's network address has changed from
289  * the default of <0.0.0> to its configured setting.
290  */
291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
292 {
293 	struct sock *sk = &tsk->sk;
294 	u32 self = tipc_own_addr(sock_net(sk));
295 	u32 peer_port = tsk_peer_port(tsk);
296 	u32 orig_node, peer_node;
297 
298 	if (unlikely(!tipc_sk_connected(sk)))
299 		return false;
300 
301 	if (unlikely(msg_origport(msg) != peer_port))
302 		return false;
303 
304 	orig_node = msg_orignode(msg);
305 	peer_node = tsk_peer_node(tsk);
306 
307 	if (likely(orig_node == peer_node))
308 		return true;
309 
310 	if (!orig_node && peer_node == self)
311 		return true;
312 
313 	if (!peer_node && orig_node == self)
314 		return true;
315 
316 	return false;
317 }
318 
319 /* tipc_set_sk_state - set the sk_state of the socket
320  * @sk: socket
321  *
322  * Caller must hold socket lock
323  *
324  * Returns 0 on success, errno otherwise
325  */
326 static int tipc_set_sk_state(struct sock *sk, int state)
327 {
328 	int oldsk_state = sk->sk_state;
329 	int res = -EINVAL;
330 
331 	switch (state) {
332 	case TIPC_OPEN:
333 		res = 0;
334 		break;
335 	case TIPC_LISTEN:
336 	case TIPC_CONNECTING:
337 		if (oldsk_state == TIPC_OPEN)
338 			res = 0;
339 		break;
340 	case TIPC_ESTABLISHED:
341 		if (oldsk_state == TIPC_CONNECTING ||
342 		    oldsk_state == TIPC_OPEN)
343 			res = 0;
344 		break;
345 	case TIPC_DISCONNECTING:
346 		if (oldsk_state == TIPC_CONNECTING ||
347 		    oldsk_state == TIPC_ESTABLISHED)
348 			res = 0;
349 		break;
350 	}
351 
352 	if (!res)
353 		sk->sk_state = state;
354 
355 	return res;
356 }
357 
358 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
359 {
360 	struct sock *sk = sock->sk;
361 	int err = sock_error(sk);
362 	int typ = sock->type;
363 
364 	if (err)
365 		return err;
366 	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 		if (sk->sk_state == TIPC_DISCONNECTING)
368 			return -EPIPE;
369 		else if (!tipc_sk_connected(sk))
370 			return -ENOTCONN;
371 	}
372 	if (!*timeout)
373 		return -EAGAIN;
374 	if (signal_pending(current))
375 		return sock_intr_errno(*timeout);
376 
377 	return 0;
378 }
379 
380 #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
381 ({                                                                             \
382 	struct sock *sk_;						       \
383 	int rc_;							       \
384 									       \
385 	while ((rc_ = !(condition_))) {					       \
386 		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
387 		sk_ = (sock_)->sk;					       \
388 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
389 		if (rc_)						       \
390 			break;						       \
391 		add_wait_queue(sk_sleep(sk_), &wait_);                         \
392 		release_sock(sk_);					       \
393 		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 		sched_annotate_sleep();				               \
395 		lock_sock(sk_);						       \
396 		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
397 	}								       \
398 	rc_;								       \
399 })
400 
401 /**
402  * tipc_sk_create - create a TIPC socket
403  * @net: network namespace (must be default network)
404  * @sock: pre-allocated socket structure
405  * @protocol: protocol indicator (must be 0)
406  * @kern: caused by kernel or by userspace?
407  *
408  * This routine creates additional data structures used by the TIPC socket,
409  * initializes them, and links them together.
410  *
411  * Returns 0 on success, errno otherwise
412  */
413 static int tipc_sk_create(struct net *net, struct socket *sock,
414 			  int protocol, int kern)
415 {
416 	const struct proto_ops *ops;
417 	struct sock *sk;
418 	struct tipc_sock *tsk;
419 	struct tipc_msg *msg;
420 
421 	/* Validate arguments */
422 	if (unlikely(protocol != 0))
423 		return -EPROTONOSUPPORT;
424 
425 	switch (sock->type) {
426 	case SOCK_STREAM:
427 		ops = &stream_ops;
428 		break;
429 	case SOCK_SEQPACKET:
430 		ops = &packet_ops;
431 		break;
432 	case SOCK_DGRAM:
433 	case SOCK_RDM:
434 		ops = &msg_ops;
435 		break;
436 	default:
437 		return -EPROTOTYPE;
438 	}
439 
440 	/* Allocate socket's protocol area */
441 	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
442 	if (sk == NULL)
443 		return -ENOMEM;
444 
445 	tsk = tipc_sk(sk);
446 	tsk->max_pkt = MAX_PKT_DEFAULT;
447 	INIT_LIST_HEAD(&tsk->publications);
448 	INIT_LIST_HEAD(&tsk->cong_links);
449 	msg = &tsk->phdr;
450 
451 	/* Finish initializing socket data structures */
452 	sock->ops = ops;
453 	sock_init_data(sock, sk);
454 	tipc_set_sk_state(sk, TIPC_OPEN);
455 	if (tipc_sk_insert(tsk)) {
456 		pr_warn("Socket create failed; port number exhausted\n");
457 		return -EINVAL;
458 	}
459 
460 	/* Ensure tsk is visible before we read own_addr. */
461 	smp_mb();
462 
463 	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
464 		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
465 
466 	msg_set_origport(msg, tsk->portid);
467 	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
468 	sk->sk_shutdown = 0;
469 	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 	sk->sk_data_ready = tipc_data_ready;
472 	sk->sk_write_space = tipc_write_space;
473 	sk->sk_destruct = tipc_sock_destruct;
474 	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 	tsk->group_is_open = true;
476 	atomic_set(&tsk->dupl_rcvcnt, 0);
477 
478 	/* Start out with safe limits until we receive an advertised window */
479 	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
480 	tsk->rcv_win = tsk->snd_win;
481 
482 	if (tipc_sk_type_connectionless(sk)) {
483 		tsk_set_unreturnable(tsk, true);
484 		if (sock->type == SOCK_DGRAM)
485 			tsk_set_unreliable(tsk, true);
486 	}
487 
488 	trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
489 	return 0;
490 }
491 
492 static void tipc_sk_callback(struct rcu_head *head)
493 {
494 	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
495 
496 	sock_put(&tsk->sk);
497 }
498 
499 /* Caller should hold socket lock for the socket. */
500 static void __tipc_shutdown(struct socket *sock, int error)
501 {
502 	struct sock *sk = sock->sk;
503 	struct tipc_sock *tsk = tipc_sk(sk);
504 	struct net *net = sock_net(sk);
505 	long timeout = CONN_TIMEOUT_DEFAULT;
506 	u32 dnode = tsk_peer_node(tsk);
507 	struct sk_buff *skb;
508 
509 	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
510 	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
511 					    !tsk_conn_cong(tsk)));
512 
513 	/* Remove any pending SYN message */
514 	__skb_queue_purge(&sk->sk_write_queue);
515 
516 	/* Reject all unreceived messages, except on an active connection
517 	 * (which disconnects locally & sends a 'FIN+' to peer).
518 	 */
519 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
520 		if (TIPC_SKB_CB(skb)->bytes_read) {
521 			kfree_skb(skb);
522 			continue;
523 		}
524 		if (!tipc_sk_type_connectionless(sk) &&
525 		    sk->sk_state != TIPC_DISCONNECTING) {
526 			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
527 			tipc_node_remove_conn(net, dnode, tsk->portid);
528 		}
529 		tipc_sk_respond(sk, skb, error);
530 	}
531 
532 	if (tipc_sk_type_connectionless(sk))
533 		return;
534 
535 	if (sk->sk_state != TIPC_DISCONNECTING) {
536 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
537 				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
538 				      tsk_own_node(tsk), tsk_peer_port(tsk),
539 				      tsk->portid, error);
540 		if (skb)
541 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
542 		tipc_node_remove_conn(net, dnode, tsk->portid);
543 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
544 	}
545 }
546 
547 /**
548  * tipc_release - destroy a TIPC socket
549  * @sock: socket to destroy
550  *
551  * This routine cleans up any messages that are still queued on the socket.
552  * For DGRAM and RDM socket types, all queued messages are rejected.
553  * For SEQPACKET and STREAM socket types, the first message is rejected
554  * and any others are discarded.  (If the first message on a STREAM socket
555  * is partially-read, it is discarded and the next one is rejected instead.)
556  *
557  * NOTE: Rejected messages are not necessarily returned to the sender!  They
558  * are returned or discarded according to the "destination droppable" setting
559  * specified for the message by the sender.
560  *
561  * Returns 0 on success, errno otherwise
562  */
563 static int tipc_release(struct socket *sock)
564 {
565 	struct sock *sk = sock->sk;
566 	struct tipc_sock *tsk;
567 
568 	/*
569 	 * Exit if socket isn't fully initialized (occurs when a failed accept()
570 	 * releases a pre-allocated child socket that was never used)
571 	 */
572 	if (sk == NULL)
573 		return 0;
574 
575 	tsk = tipc_sk(sk);
576 	lock_sock(sk);
577 
578 	trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
579 	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
580 	sk->sk_shutdown = SHUTDOWN_MASK;
581 	tipc_sk_leave(tsk);
582 	tipc_sk_withdraw(tsk, 0, NULL);
583 	sk_stop_timer(sk, &sk->sk_timer);
584 	tipc_sk_remove(tsk);
585 
586 	sock_orphan(sk);
587 	/* Reject any messages that accumulated in backlog queue */
588 	release_sock(sk);
589 	tipc_dest_list_purge(&tsk->cong_links);
590 	tsk->cong_link_cnt = 0;
591 	call_rcu(&tsk->rcu, tipc_sk_callback);
592 	sock->sk = NULL;
593 
594 	return 0;
595 }
596 
597 /**
598  * tipc_bind - associate or disassocate TIPC name(s) with a socket
599  * @sock: socket structure
600  * @uaddr: socket address describing name(s) and desired operation
601  * @uaddr_len: size of socket address data structure
602  *
603  * Name and name sequence binding is indicated using a positive scope value;
604  * a negative scope value unbinds the specified name.  Specifying no name
605  * (i.e. a socket address length of 0) unbinds all names from the socket.
606  *
607  * Returns 0 on success, errno otherwise
608  *
609  * NOTE: This routine doesn't need to take the socket lock since it doesn't
610  *       access any non-constant socket information.
611  */
612 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
613 		     int uaddr_len)
614 {
615 	struct sock *sk = sock->sk;
616 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
617 	struct tipc_sock *tsk = tipc_sk(sk);
618 	int res = -EINVAL;
619 
620 	lock_sock(sk);
621 	if (unlikely(!uaddr_len)) {
622 		res = tipc_sk_withdraw(tsk, 0, NULL);
623 		goto exit;
624 	}
625 	if (tsk->group) {
626 		res = -EACCES;
627 		goto exit;
628 	}
629 	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
630 		res = -EINVAL;
631 		goto exit;
632 	}
633 	if (addr->family != AF_TIPC) {
634 		res = -EAFNOSUPPORT;
635 		goto exit;
636 	}
637 
638 	if (addr->addrtype == TIPC_ADDR_NAME)
639 		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
640 	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
641 		res = -EAFNOSUPPORT;
642 		goto exit;
643 	}
644 
645 	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
646 	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
647 	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
648 		res = -EACCES;
649 		goto exit;
650 	}
651 
652 	res = (addr->scope >= 0) ?
653 		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
654 		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
655 exit:
656 	release_sock(sk);
657 	return res;
658 }
659 
660 /**
661  * tipc_getname - get port ID of socket or peer socket
662  * @sock: socket structure
663  * @uaddr: area for returned socket address
664  * @uaddr_len: area for returned length of socket address
665  * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
666  *
667  * Returns 0 on success, errno otherwise
668  *
669  * NOTE: This routine doesn't need to take the socket lock since it only
670  *       accesses socket information that is unchanging (or which changes in
671  *       a completely predictable manner).
672  */
673 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
674 			int peer)
675 {
676 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
677 	struct sock *sk = sock->sk;
678 	struct tipc_sock *tsk = tipc_sk(sk);
679 
680 	memset(addr, 0, sizeof(*addr));
681 	if (peer) {
682 		if ((!tipc_sk_connected(sk)) &&
683 		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
684 			return -ENOTCONN;
685 		addr->addr.id.ref = tsk_peer_port(tsk);
686 		addr->addr.id.node = tsk_peer_node(tsk);
687 	} else {
688 		addr->addr.id.ref = tsk->portid;
689 		addr->addr.id.node = tipc_own_addr(sock_net(sk));
690 	}
691 
692 	addr->addrtype = TIPC_ADDR_ID;
693 	addr->family = AF_TIPC;
694 	addr->scope = 0;
695 	addr->addr.name.domain = 0;
696 
697 	return sizeof(*addr);
698 }
699 
700 /**
701  * tipc_poll - read and possibly block on pollmask
702  * @file: file structure associated with the socket
703  * @sock: socket for which to calculate the poll bits
704  * @wait: ???
705  *
706  * Returns pollmask value
707  *
708  * COMMENTARY:
709  * It appears that the usual socket locking mechanisms are not useful here
710  * since the pollmask info is potentially out-of-date the moment this routine
711  * exits.  TCP and other protocols seem to rely on higher level poll routines
712  * to handle any preventable race conditions, so TIPC will do the same ...
713  *
714  * IMPORTANT: The fact that a read or write operation is indicated does NOT
715  * imply that the operation will succeed, merely that it should be performed
716  * and will not block.
717  */
718 static __poll_t tipc_poll(struct file *file, struct socket *sock,
719 			      poll_table *wait)
720 {
721 	struct sock *sk = sock->sk;
722 	struct tipc_sock *tsk = tipc_sk(sk);
723 	__poll_t revents = 0;
724 
725 	sock_poll_wait(file, sock, wait);
726 	trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
727 
728 	if (sk->sk_shutdown & RCV_SHUTDOWN)
729 		revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
730 	if (sk->sk_shutdown == SHUTDOWN_MASK)
731 		revents |= EPOLLHUP;
732 
733 	switch (sk->sk_state) {
734 	case TIPC_ESTABLISHED:
735 	case TIPC_CONNECTING:
736 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
737 			revents |= EPOLLOUT;
738 		/* fall thru' */
739 	case TIPC_LISTEN:
740 		if (!skb_queue_empty(&sk->sk_receive_queue))
741 			revents |= EPOLLIN | EPOLLRDNORM;
742 		break;
743 	case TIPC_OPEN:
744 		if (tsk->group_is_open && !tsk->cong_link_cnt)
745 			revents |= EPOLLOUT;
746 		if (!tipc_sk_type_connectionless(sk))
747 			break;
748 		if (skb_queue_empty(&sk->sk_receive_queue))
749 			break;
750 		revents |= EPOLLIN | EPOLLRDNORM;
751 		break;
752 	case TIPC_DISCONNECTING:
753 		revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
754 		break;
755 	}
756 	return revents;
757 }
758 
759 /**
760  * tipc_sendmcast - send multicast message
761  * @sock: socket structure
762  * @seq: destination address
763  * @msg: message to send
764  * @dlen: length of data to send
765  * @timeout: timeout to wait for wakeup
766  *
767  * Called from function tipc_sendmsg(), which has done all sanity checks
768  * Returns the number of bytes sent on success, or errno
769  */
770 static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
771 			  struct msghdr *msg, size_t dlen, long timeout)
772 {
773 	struct sock *sk = sock->sk;
774 	struct tipc_sock *tsk = tipc_sk(sk);
775 	struct tipc_msg *hdr = &tsk->phdr;
776 	struct net *net = sock_net(sk);
777 	int mtu = tipc_bcast_get_mtu(net);
778 	struct tipc_mc_method *method = &tsk->mc_method;
779 	struct sk_buff_head pkts;
780 	struct tipc_nlist dsts;
781 	int rc;
782 
783 	if (tsk->group)
784 		return -EACCES;
785 
786 	/* Block or return if any destination link is congested */
787 	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
788 	if (unlikely(rc))
789 		return rc;
790 
791 	/* Lookup destination nodes */
792 	tipc_nlist_init(&dsts, tipc_own_addr(net));
793 	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
794 				      seq->upper, &dsts);
795 	if (!dsts.local && !dsts.remote)
796 		return -EHOSTUNREACH;
797 
798 	/* Build message header */
799 	msg_set_type(hdr, TIPC_MCAST_MSG);
800 	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
801 	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
802 	msg_set_destport(hdr, 0);
803 	msg_set_destnode(hdr, 0);
804 	msg_set_nametype(hdr, seq->type);
805 	msg_set_namelower(hdr, seq->lower);
806 	msg_set_nameupper(hdr, seq->upper);
807 
808 	/* Build message as chain of buffers */
809 	skb_queue_head_init(&pkts);
810 	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
811 
812 	/* Send message if build was successful */
813 	if (unlikely(rc == dlen)) {
814 		trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
815 					TIPC_DUMP_SK_SNDQ, " ");
816 		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
817 				     &tsk->cong_link_cnt);
818 	}
819 
820 	tipc_nlist_purge(&dsts);
821 
822 	return rc ? rc : dlen;
823 }
824 
825 /**
826  * tipc_send_group_msg - send a message to a member in the group
827  * @net: network namespace
828  * @m: message to send
829  * @mb: group member
830  * @dnode: destination node
831  * @dport: destination port
832  * @dlen: total length of message data
833  */
834 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
835 			       struct msghdr *m, struct tipc_member *mb,
836 			       u32 dnode, u32 dport, int dlen)
837 {
838 	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
839 	struct tipc_mc_method *method = &tsk->mc_method;
840 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
841 	struct tipc_msg *hdr = &tsk->phdr;
842 	struct sk_buff_head pkts;
843 	int mtu, rc;
844 
845 	/* Complete message header */
846 	msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
847 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
848 	msg_set_destport(hdr, dport);
849 	msg_set_destnode(hdr, dnode);
850 	msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
851 
852 	/* Build message as chain of buffers */
853 	skb_queue_head_init(&pkts);
854 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
855 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
856 	if (unlikely(rc != dlen))
857 		return rc;
858 
859 	/* Send message */
860 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
861 	if (unlikely(rc == -ELINKCONG)) {
862 		tipc_dest_push(&tsk->cong_links, dnode, 0);
863 		tsk->cong_link_cnt++;
864 	}
865 
866 	/* Update send window */
867 	tipc_group_update_member(mb, blks);
868 
869 	/* A broadcast sent within next EXPIRE period must follow same path */
870 	method->rcast = true;
871 	method->mandatory = true;
872 	return dlen;
873 }
874 
875 /**
876  * tipc_send_group_unicast - send message to a member in the group
877  * @sock: socket structure
878  * @m: message to send
879  * @dlen: total length of message data
880  * @timeout: timeout to wait for wakeup
881  *
882  * Called from function tipc_sendmsg(), which has done all sanity checks
883  * Returns the number of bytes sent on success, or errno
884  */
885 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
886 				   int dlen, long timeout)
887 {
888 	struct sock *sk = sock->sk;
889 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
890 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
891 	struct tipc_sock *tsk = tipc_sk(sk);
892 	struct net *net = sock_net(sk);
893 	struct tipc_member *mb = NULL;
894 	u32 node, port;
895 	int rc;
896 
897 	node = dest->addr.id.node;
898 	port = dest->addr.id.ref;
899 	if (!port && !node)
900 		return -EHOSTUNREACH;
901 
902 	/* Block or return if destination link or member is congested */
903 	rc = tipc_wait_for_cond(sock, &timeout,
904 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
905 				tsk->group &&
906 				!tipc_group_cong(tsk->group, node, port, blks,
907 						 &mb));
908 	if (unlikely(rc))
909 		return rc;
910 
911 	if (unlikely(!mb))
912 		return -EHOSTUNREACH;
913 
914 	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
915 
916 	return rc ? rc : dlen;
917 }
918 
919 /**
920  * tipc_send_group_anycast - send message to any member with given identity
921  * @sock: socket structure
922  * @m: message to send
923  * @dlen: total length of message data
924  * @timeout: timeout to wait for wakeup
925  *
926  * Called from function tipc_sendmsg(), which has done all sanity checks
927  * Returns the number of bytes sent on success, or errno
928  */
929 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
930 				   int dlen, long timeout)
931 {
932 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
933 	struct sock *sk = sock->sk;
934 	struct tipc_sock *tsk = tipc_sk(sk);
935 	struct list_head *cong_links = &tsk->cong_links;
936 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
937 	struct tipc_msg *hdr = &tsk->phdr;
938 	struct tipc_member *first = NULL;
939 	struct tipc_member *mbr = NULL;
940 	struct net *net = sock_net(sk);
941 	u32 node, port, exclude;
942 	struct list_head dsts;
943 	u32 type, inst, scope;
944 	int lookups = 0;
945 	int dstcnt, rc;
946 	bool cong;
947 
948 	INIT_LIST_HEAD(&dsts);
949 
950 	type = msg_nametype(hdr);
951 	inst = dest->addr.name.name.instance;
952 	scope = msg_lookup_scope(hdr);
953 
954 	while (++lookups < 4) {
955 		exclude = tipc_group_exclude(tsk->group);
956 
957 		first = NULL;
958 
959 		/* Look for a non-congested destination member, if any */
960 		while (1) {
961 			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
962 						 &dstcnt, exclude, false))
963 				return -EHOSTUNREACH;
964 			tipc_dest_pop(&dsts, &node, &port);
965 			cong = tipc_group_cong(tsk->group, node, port, blks,
966 					       &mbr);
967 			if (!cong)
968 				break;
969 			if (mbr == first)
970 				break;
971 			if (!first)
972 				first = mbr;
973 		}
974 
975 		/* Start over if destination was not in member list */
976 		if (unlikely(!mbr))
977 			continue;
978 
979 		if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
980 			break;
981 
982 		/* Block or return if destination link or member is congested */
983 		rc = tipc_wait_for_cond(sock, &timeout,
984 					!tipc_dest_find(cong_links, node, 0) &&
985 					tsk->group &&
986 					!tipc_group_cong(tsk->group, node, port,
987 							 blks, &mbr));
988 		if (unlikely(rc))
989 			return rc;
990 
991 		/* Send, unless destination disappeared while waiting */
992 		if (likely(mbr))
993 			break;
994 	}
995 
996 	if (unlikely(lookups >= 4))
997 		return -EHOSTUNREACH;
998 
999 	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1000 
1001 	return rc ? rc : dlen;
1002 }
1003 
1004 /**
1005  * tipc_send_group_bcast - send message to all members in communication group
1006  * @sk: socket structure
1007  * @m: message to send
1008  * @dlen: total length of message data
1009  * @timeout: timeout to wait for wakeup
1010  *
1011  * Called from function tipc_sendmsg(), which has done all sanity checks
1012  * Returns the number of bytes sent on success, or errno
1013  */
1014 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1015 				 int dlen, long timeout)
1016 {
1017 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1018 	struct sock *sk = sock->sk;
1019 	struct net *net = sock_net(sk);
1020 	struct tipc_sock *tsk = tipc_sk(sk);
1021 	struct tipc_nlist *dsts;
1022 	struct tipc_mc_method *method = &tsk->mc_method;
1023 	bool ack = method->mandatory && method->rcast;
1024 	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1025 	struct tipc_msg *hdr = &tsk->phdr;
1026 	int mtu = tipc_bcast_get_mtu(net);
1027 	struct sk_buff_head pkts;
1028 	int rc = -EHOSTUNREACH;
1029 
1030 	/* Block or return if any destination link or member is congested */
1031 	rc = tipc_wait_for_cond(sock, &timeout,
1032 				!tsk->cong_link_cnt && tsk->group &&
1033 				!tipc_group_bc_cong(tsk->group, blks));
1034 	if (unlikely(rc))
1035 		return rc;
1036 
1037 	dsts = tipc_group_dests(tsk->group);
1038 	if (!dsts->local && !dsts->remote)
1039 		return -EHOSTUNREACH;
1040 
1041 	/* Complete message header */
1042 	if (dest) {
1043 		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1044 		msg_set_nameinst(hdr, dest->addr.name.name.instance);
1045 	} else {
1046 		msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1047 		msg_set_nameinst(hdr, 0);
1048 	}
1049 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1050 	msg_set_destport(hdr, 0);
1051 	msg_set_destnode(hdr, 0);
1052 	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1053 
1054 	/* Avoid getting stuck with repeated forced replicasts */
1055 	msg_set_grp_bc_ack_req(hdr, ack);
1056 
1057 	/* Build message as chain of buffers */
1058 	skb_queue_head_init(&pkts);
1059 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1060 	if (unlikely(rc != dlen))
1061 		return rc;
1062 
1063 	/* Send message */
1064 	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1065 	if (unlikely(rc))
1066 		return rc;
1067 
1068 	/* Update broadcast sequence number and send windows */
1069 	tipc_group_update_bc_members(tsk->group, blks, ack);
1070 
1071 	/* Broadcast link is now free to choose method for next broadcast */
1072 	method->mandatory = false;
1073 	method->expires = jiffies;
1074 
1075 	return dlen;
1076 }
1077 
1078 /**
1079  * tipc_send_group_mcast - send message to all members with given identity
1080  * @sock: socket structure
1081  * @m: message to send
1082  * @dlen: total length of message data
1083  * @timeout: timeout to wait for wakeup
1084  *
1085  * Called from function tipc_sendmsg(), which has done all sanity checks
1086  * Returns the number of bytes sent on success, or errno
1087  */
1088 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1089 				 int dlen, long timeout)
1090 {
1091 	struct sock *sk = sock->sk;
1092 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1093 	struct tipc_sock *tsk = tipc_sk(sk);
1094 	struct tipc_group *grp = tsk->group;
1095 	struct tipc_msg *hdr = &tsk->phdr;
1096 	struct net *net = sock_net(sk);
1097 	u32 type, inst, scope, exclude;
1098 	struct list_head dsts;
1099 	u32 dstcnt;
1100 
1101 	INIT_LIST_HEAD(&dsts);
1102 
1103 	type = msg_nametype(hdr);
1104 	inst = dest->addr.name.name.instance;
1105 	scope = msg_lookup_scope(hdr);
1106 	exclude = tipc_group_exclude(grp);
1107 
1108 	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1109 				 &dstcnt, exclude, true))
1110 		return -EHOSTUNREACH;
1111 
1112 	if (dstcnt == 1) {
1113 		tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1114 		return tipc_send_group_unicast(sock, m, dlen, timeout);
1115 	}
1116 
1117 	tipc_dest_list_purge(&dsts);
1118 	return tipc_send_group_bcast(sock, m, dlen, timeout);
1119 }
1120 
1121 /**
1122  * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1123  * @arrvq: queue with arriving messages, to be cloned after destination lookup
1124  * @inputq: queue with cloned messages, delivered to socket after dest lookup
1125  *
1126  * Multi-threaded: parallel calls with reference to same queues may occur
1127  */
1128 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1129 		       struct sk_buff_head *inputq)
1130 {
1131 	u32 self = tipc_own_addr(net);
1132 	u32 type, lower, upper, scope;
1133 	struct sk_buff *skb, *_skb;
1134 	u32 portid, onode;
1135 	struct sk_buff_head tmpq;
1136 	struct list_head dports;
1137 	struct tipc_msg *hdr;
1138 	int user, mtyp, hlen;
1139 	bool exact;
1140 
1141 	__skb_queue_head_init(&tmpq);
1142 	INIT_LIST_HEAD(&dports);
1143 
1144 	skb = tipc_skb_peek(arrvq, &inputq->lock);
1145 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1146 		hdr = buf_msg(skb);
1147 		user = msg_user(hdr);
1148 		mtyp = msg_type(hdr);
1149 		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1150 		onode = msg_orignode(hdr);
1151 		type = msg_nametype(hdr);
1152 
1153 		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1154 			spin_lock_bh(&inputq->lock);
1155 			if (skb_peek(arrvq) == skb) {
1156 				__skb_dequeue(arrvq);
1157 				__skb_queue_tail(inputq, skb);
1158 			}
1159 			kfree_skb(skb);
1160 			spin_unlock_bh(&inputq->lock);
1161 			continue;
1162 		}
1163 
1164 		/* Group messages require exact scope match */
1165 		if (msg_in_group(hdr)) {
1166 			lower = 0;
1167 			upper = ~0;
1168 			scope = msg_lookup_scope(hdr);
1169 			exact = true;
1170 		} else {
1171 			/* TIPC_NODE_SCOPE means "any scope" in this context */
1172 			if (onode == self)
1173 				scope = TIPC_NODE_SCOPE;
1174 			else
1175 				scope = TIPC_CLUSTER_SCOPE;
1176 			exact = false;
1177 			lower = msg_namelower(hdr);
1178 			upper = msg_nameupper(hdr);
1179 		}
1180 
1181 		/* Create destination port list: */
1182 		tipc_nametbl_mc_lookup(net, type, lower, upper,
1183 				       scope, exact, &dports);
1184 
1185 		/* Clone message per destination */
1186 		while (tipc_dest_pop(&dports, NULL, &portid)) {
1187 			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1188 			if (_skb) {
1189 				msg_set_destport(buf_msg(_skb), portid);
1190 				__skb_queue_tail(&tmpq, _skb);
1191 				continue;
1192 			}
1193 			pr_warn("Failed to clone mcast rcv buffer\n");
1194 		}
1195 		/* Append to inputq if not already done by other thread */
1196 		spin_lock_bh(&inputq->lock);
1197 		if (skb_peek(arrvq) == skb) {
1198 			skb_queue_splice_tail_init(&tmpq, inputq);
1199 			kfree_skb(__skb_dequeue(arrvq));
1200 		}
1201 		spin_unlock_bh(&inputq->lock);
1202 		__skb_queue_purge(&tmpq);
1203 		kfree_skb(skb);
1204 	}
1205 	tipc_sk_rcv(net, inputq);
1206 }
1207 
1208 /**
1209  * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1210  * @tsk: receiving socket
1211  * @skb: pointer to message buffer.
1212  */
1213 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1214 				   struct sk_buff_head *inputq,
1215 				   struct sk_buff_head *xmitq)
1216 {
1217 	struct tipc_msg *hdr = buf_msg(skb);
1218 	u32 onode = tsk_own_node(tsk);
1219 	struct sock *sk = &tsk->sk;
1220 	int mtyp = msg_type(hdr);
1221 	bool conn_cong;
1222 
1223 	/* Ignore if connection cannot be validated: */
1224 	if (!tsk_peer_msg(tsk, hdr)) {
1225 		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1226 		goto exit;
1227 	}
1228 
1229 	if (unlikely(msg_errcode(hdr))) {
1230 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1231 		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1232 				      tsk_peer_port(tsk));
1233 		sk->sk_state_change(sk);
1234 
1235 		/* State change is ignored if socket already awake,
1236 		 * - convert msg to abort msg and add to inqueue
1237 		 */
1238 		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1239 		msg_set_type(hdr, TIPC_CONN_MSG);
1240 		msg_set_size(hdr, BASIC_H_SIZE);
1241 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1242 		__skb_queue_tail(inputq, skb);
1243 		return;
1244 	}
1245 
1246 	tsk->probe_unacked = false;
1247 
1248 	if (mtyp == CONN_PROBE) {
1249 		msg_set_type(hdr, CONN_PROBE_REPLY);
1250 		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1251 			__skb_queue_tail(xmitq, skb);
1252 		return;
1253 	} else if (mtyp == CONN_ACK) {
1254 		conn_cong = tsk_conn_cong(tsk);
1255 		tsk->snt_unacked -= msg_conn_ack(hdr);
1256 		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1257 			tsk->snd_win = msg_adv_win(hdr);
1258 		if (conn_cong)
1259 			sk->sk_write_space(sk);
1260 	} else if (mtyp != CONN_PROBE_REPLY) {
1261 		pr_warn("Received unknown CONN_PROTO msg\n");
1262 	}
1263 exit:
1264 	kfree_skb(skb);
1265 }
1266 
1267 /**
1268  * tipc_sendmsg - send message in connectionless manner
1269  * @sock: socket structure
1270  * @m: message to send
1271  * @dsz: amount of user data to be sent
1272  *
1273  * Message must have an destination specified explicitly.
1274  * Used for SOCK_RDM and SOCK_DGRAM messages,
1275  * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1276  * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1277  *
1278  * Returns the number of bytes sent on success, or errno otherwise
1279  */
1280 static int tipc_sendmsg(struct socket *sock,
1281 			struct msghdr *m, size_t dsz)
1282 {
1283 	struct sock *sk = sock->sk;
1284 	int ret;
1285 
1286 	lock_sock(sk);
1287 	ret = __tipc_sendmsg(sock, m, dsz);
1288 	release_sock(sk);
1289 
1290 	return ret;
1291 }
1292 
1293 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1294 {
1295 	struct sock *sk = sock->sk;
1296 	struct net *net = sock_net(sk);
1297 	struct tipc_sock *tsk = tipc_sk(sk);
1298 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1299 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1300 	struct list_head *clinks = &tsk->cong_links;
1301 	bool syn = !tipc_sk_type_connectionless(sk);
1302 	struct tipc_group *grp = tsk->group;
1303 	struct tipc_msg *hdr = &tsk->phdr;
1304 	struct tipc_name_seq *seq;
1305 	struct sk_buff_head pkts;
1306 	u32 dport, dnode = 0;
1307 	u32 type, inst;
1308 	int mtu, rc;
1309 
1310 	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1311 		return -EMSGSIZE;
1312 
1313 	if (likely(dest)) {
1314 		if (unlikely(m->msg_namelen < sizeof(*dest)))
1315 			return -EINVAL;
1316 		if (unlikely(dest->family != AF_TIPC))
1317 			return -EINVAL;
1318 	}
1319 
1320 	if (grp) {
1321 		if (!dest)
1322 			return tipc_send_group_bcast(sock, m, dlen, timeout);
1323 		if (dest->addrtype == TIPC_ADDR_NAME)
1324 			return tipc_send_group_anycast(sock, m, dlen, timeout);
1325 		if (dest->addrtype == TIPC_ADDR_ID)
1326 			return tipc_send_group_unicast(sock, m, dlen, timeout);
1327 		if (dest->addrtype == TIPC_ADDR_MCAST)
1328 			return tipc_send_group_mcast(sock, m, dlen, timeout);
1329 		return -EINVAL;
1330 	}
1331 
1332 	if (unlikely(!dest)) {
1333 		dest = &tsk->peer;
1334 		if (!syn || dest->family != AF_TIPC)
1335 			return -EDESTADDRREQ;
1336 	}
1337 
1338 	if (unlikely(syn)) {
1339 		if (sk->sk_state == TIPC_LISTEN)
1340 			return -EPIPE;
1341 		if (sk->sk_state != TIPC_OPEN)
1342 			return -EISCONN;
1343 		if (tsk->published)
1344 			return -EOPNOTSUPP;
1345 		if (dest->addrtype == TIPC_ADDR_NAME) {
1346 			tsk->conn_type = dest->addr.name.name.type;
1347 			tsk->conn_instance = dest->addr.name.name.instance;
1348 		}
1349 		msg_set_syn(hdr, 1);
1350 	}
1351 
1352 	seq = &dest->addr.nameseq;
1353 	if (dest->addrtype == TIPC_ADDR_MCAST)
1354 		return tipc_sendmcast(sock, seq, m, dlen, timeout);
1355 
1356 	if (dest->addrtype == TIPC_ADDR_NAME) {
1357 		type = dest->addr.name.name.type;
1358 		inst = dest->addr.name.name.instance;
1359 		dnode = dest->addr.name.domain;
1360 		msg_set_type(hdr, TIPC_NAMED_MSG);
1361 		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1362 		msg_set_nametype(hdr, type);
1363 		msg_set_nameinst(hdr, inst);
1364 		msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1365 		dport = tipc_nametbl_translate(net, type, inst, &dnode);
1366 		msg_set_destnode(hdr, dnode);
1367 		msg_set_destport(hdr, dport);
1368 		if (unlikely(!dport && !dnode))
1369 			return -EHOSTUNREACH;
1370 	} else if (dest->addrtype == TIPC_ADDR_ID) {
1371 		dnode = dest->addr.id.node;
1372 		msg_set_type(hdr, TIPC_DIRECT_MSG);
1373 		msg_set_lookup_scope(hdr, 0);
1374 		msg_set_destnode(hdr, dnode);
1375 		msg_set_destport(hdr, dest->addr.id.ref);
1376 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1377 	} else {
1378 		return -EINVAL;
1379 	}
1380 
1381 	/* Block or return if destination link is congested */
1382 	rc = tipc_wait_for_cond(sock, &timeout,
1383 				!tipc_dest_find(clinks, dnode, 0));
1384 	if (unlikely(rc))
1385 		return rc;
1386 
1387 	skb_queue_head_init(&pkts);
1388 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1389 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1390 	if (unlikely(rc != dlen))
1391 		return rc;
1392 	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1393 		return -ENOMEM;
1394 
1395 	trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1396 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1397 	if (unlikely(rc == -ELINKCONG)) {
1398 		tipc_dest_push(clinks, dnode, 0);
1399 		tsk->cong_link_cnt++;
1400 		rc = 0;
1401 	}
1402 
1403 	if (unlikely(syn && !rc))
1404 		tipc_set_sk_state(sk, TIPC_CONNECTING);
1405 
1406 	return rc ? rc : dlen;
1407 }
1408 
1409 /**
1410  * tipc_sendstream - send stream-oriented data
1411  * @sock: socket structure
1412  * @m: data to send
1413  * @dsz: total length of data to be transmitted
1414  *
1415  * Used for SOCK_STREAM data.
1416  *
1417  * Returns the number of bytes sent on success (or partial success),
1418  * or errno if no data sent
1419  */
1420 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1421 {
1422 	struct sock *sk = sock->sk;
1423 	int ret;
1424 
1425 	lock_sock(sk);
1426 	ret = __tipc_sendstream(sock, m, dsz);
1427 	release_sock(sk);
1428 
1429 	return ret;
1430 }
1431 
1432 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1433 {
1434 	struct sock *sk = sock->sk;
1435 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1436 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1437 	struct tipc_sock *tsk = tipc_sk(sk);
1438 	struct tipc_msg *hdr = &tsk->phdr;
1439 	struct net *net = sock_net(sk);
1440 	struct sk_buff_head pkts;
1441 	u32 dnode = tsk_peer_node(tsk);
1442 	int send, sent = 0;
1443 	int rc = 0;
1444 
1445 	skb_queue_head_init(&pkts);
1446 
1447 	if (unlikely(dlen > INT_MAX))
1448 		return -EMSGSIZE;
1449 
1450 	/* Handle implicit connection setup */
1451 	if (unlikely(dest)) {
1452 		rc = __tipc_sendmsg(sock, m, dlen);
1453 		if (dlen && dlen == rc) {
1454 			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1455 			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1456 		}
1457 		return rc;
1458 	}
1459 
1460 	do {
1461 		rc = tipc_wait_for_cond(sock, &timeout,
1462 					(!tsk->cong_link_cnt &&
1463 					 !tsk_conn_cong(tsk) &&
1464 					 tipc_sk_connected(sk)));
1465 		if (unlikely(rc))
1466 			break;
1467 
1468 		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1469 		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1470 		if (unlikely(rc != send))
1471 			break;
1472 
1473 		trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1474 					 TIPC_DUMP_SK_SNDQ, " ");
1475 		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1476 		if (unlikely(rc == -ELINKCONG)) {
1477 			tsk->cong_link_cnt = 1;
1478 			rc = 0;
1479 		}
1480 		if (likely(!rc)) {
1481 			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1482 			sent += send;
1483 		}
1484 	} while (sent < dlen && !rc);
1485 
1486 	return sent ? sent : rc;
1487 }
1488 
1489 /**
1490  * tipc_send_packet - send a connection-oriented message
1491  * @sock: socket structure
1492  * @m: message to send
1493  * @dsz: length of data to be transmitted
1494  *
1495  * Used for SOCK_SEQPACKET messages.
1496  *
1497  * Returns the number of bytes sent on success, or errno otherwise
1498  */
1499 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1500 {
1501 	if (dsz > TIPC_MAX_USER_MSG_SIZE)
1502 		return -EMSGSIZE;
1503 
1504 	return tipc_sendstream(sock, m, dsz);
1505 }
1506 
1507 /* tipc_sk_finish_conn - complete the setup of a connection
1508  */
1509 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1510 				u32 peer_node)
1511 {
1512 	struct sock *sk = &tsk->sk;
1513 	struct net *net = sock_net(sk);
1514 	struct tipc_msg *msg = &tsk->phdr;
1515 
1516 	msg_set_syn(msg, 0);
1517 	msg_set_destnode(msg, peer_node);
1518 	msg_set_destport(msg, peer_port);
1519 	msg_set_type(msg, TIPC_CONN_MSG);
1520 	msg_set_lookup_scope(msg, 0);
1521 	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1522 
1523 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1524 	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1525 	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1526 	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1527 	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1528 	__skb_queue_purge(&sk->sk_write_queue);
1529 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1530 		return;
1531 
1532 	/* Fall back to message based flow control */
1533 	tsk->rcv_win = FLOWCTL_MSG_WIN;
1534 	tsk->snd_win = FLOWCTL_MSG_WIN;
1535 }
1536 
1537 /**
1538  * tipc_sk_set_orig_addr - capture sender's address for received message
1539  * @m: descriptor for message info
1540  * @hdr: received message header
1541  *
1542  * Note: Address is not captured if not requested by receiver.
1543  */
1544 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1545 {
1546 	DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1547 	struct tipc_msg *hdr = buf_msg(skb);
1548 
1549 	if (!srcaddr)
1550 		return;
1551 
1552 	srcaddr->sock.family = AF_TIPC;
1553 	srcaddr->sock.addrtype = TIPC_ADDR_ID;
1554 	srcaddr->sock.scope = 0;
1555 	srcaddr->sock.addr.id.ref = msg_origport(hdr);
1556 	srcaddr->sock.addr.id.node = msg_orignode(hdr);
1557 	srcaddr->sock.addr.name.domain = 0;
1558 	m->msg_namelen = sizeof(struct sockaddr_tipc);
1559 
1560 	if (!msg_in_group(hdr))
1561 		return;
1562 
1563 	/* Group message users may also want to know sending member's id */
1564 	srcaddr->member.family = AF_TIPC;
1565 	srcaddr->member.addrtype = TIPC_ADDR_NAME;
1566 	srcaddr->member.scope = 0;
1567 	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1568 	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1569 	srcaddr->member.addr.name.domain = 0;
1570 	m->msg_namelen = sizeof(*srcaddr);
1571 }
1572 
1573 /**
1574  * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1575  * @m: descriptor for message info
1576  * @skb: received message buffer
1577  * @tsk: TIPC port associated with message
1578  *
1579  * Note: Ancillary data is not captured if not requested by receiver.
1580  *
1581  * Returns 0 if successful, otherwise errno
1582  */
1583 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1584 				 struct tipc_sock *tsk)
1585 {
1586 	struct tipc_msg *msg;
1587 	u32 anc_data[3];
1588 	u32 err;
1589 	u32 dest_type;
1590 	int has_name;
1591 	int res;
1592 
1593 	if (likely(m->msg_controllen == 0))
1594 		return 0;
1595 	msg = buf_msg(skb);
1596 
1597 	/* Optionally capture errored message object(s) */
1598 	err = msg ? msg_errcode(msg) : 0;
1599 	if (unlikely(err)) {
1600 		anc_data[0] = err;
1601 		anc_data[1] = msg_data_sz(msg);
1602 		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1603 		if (res)
1604 			return res;
1605 		if (anc_data[1]) {
1606 			if (skb_linearize(skb))
1607 				return -ENOMEM;
1608 			msg = buf_msg(skb);
1609 			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1610 				       msg_data(msg));
1611 			if (res)
1612 				return res;
1613 		}
1614 	}
1615 
1616 	/* Optionally capture message destination object */
1617 	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1618 	switch (dest_type) {
1619 	case TIPC_NAMED_MSG:
1620 		has_name = 1;
1621 		anc_data[0] = msg_nametype(msg);
1622 		anc_data[1] = msg_namelower(msg);
1623 		anc_data[2] = msg_namelower(msg);
1624 		break;
1625 	case TIPC_MCAST_MSG:
1626 		has_name = 1;
1627 		anc_data[0] = msg_nametype(msg);
1628 		anc_data[1] = msg_namelower(msg);
1629 		anc_data[2] = msg_nameupper(msg);
1630 		break;
1631 	case TIPC_CONN_MSG:
1632 		has_name = (tsk->conn_type != 0);
1633 		anc_data[0] = tsk->conn_type;
1634 		anc_data[1] = tsk->conn_instance;
1635 		anc_data[2] = tsk->conn_instance;
1636 		break;
1637 	default:
1638 		has_name = 0;
1639 	}
1640 	if (has_name) {
1641 		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1642 		if (res)
1643 			return res;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1650 {
1651 	struct sock *sk = &tsk->sk;
1652 	struct net *net = sock_net(sk);
1653 	struct sk_buff *skb = NULL;
1654 	struct tipc_msg *msg;
1655 	u32 peer_port = tsk_peer_port(tsk);
1656 	u32 dnode = tsk_peer_node(tsk);
1657 
1658 	if (!tipc_sk_connected(sk))
1659 		return;
1660 	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1661 			      dnode, tsk_own_node(tsk), peer_port,
1662 			      tsk->portid, TIPC_OK);
1663 	if (!skb)
1664 		return;
1665 	msg = buf_msg(skb);
1666 	msg_set_conn_ack(msg, tsk->rcv_unacked);
1667 	tsk->rcv_unacked = 0;
1668 
1669 	/* Adjust to and advertize the correct window limit */
1670 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1671 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1672 		msg_set_adv_win(msg, tsk->rcv_win);
1673 	}
1674 	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1675 }
1676 
1677 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1678 {
1679 	struct sock *sk = sock->sk;
1680 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1681 	long timeo = *timeop;
1682 	int err = sock_error(sk);
1683 
1684 	if (err)
1685 		return err;
1686 
1687 	for (;;) {
1688 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1689 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
1690 				err = -ENOTCONN;
1691 				break;
1692 			}
1693 			add_wait_queue(sk_sleep(sk), &wait);
1694 			release_sock(sk);
1695 			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1696 			sched_annotate_sleep();
1697 			lock_sock(sk);
1698 			remove_wait_queue(sk_sleep(sk), &wait);
1699 		}
1700 		err = 0;
1701 		if (!skb_queue_empty(&sk->sk_receive_queue))
1702 			break;
1703 		err = -EAGAIN;
1704 		if (!timeo)
1705 			break;
1706 		err = sock_intr_errno(timeo);
1707 		if (signal_pending(current))
1708 			break;
1709 
1710 		err = sock_error(sk);
1711 		if (err)
1712 			break;
1713 	}
1714 	*timeop = timeo;
1715 	return err;
1716 }
1717 
1718 /**
1719  * tipc_recvmsg - receive packet-oriented message
1720  * @m: descriptor for message info
1721  * @buflen: length of user buffer area
1722  * @flags: receive flags
1723  *
1724  * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1725  * If the complete message doesn't fit in user area, truncate it.
1726  *
1727  * Returns size of returned message data, errno otherwise
1728  */
1729 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1730 			size_t buflen,	int flags)
1731 {
1732 	struct sock *sk = sock->sk;
1733 	bool connected = !tipc_sk_type_connectionless(sk);
1734 	struct tipc_sock *tsk = tipc_sk(sk);
1735 	int rc, err, hlen, dlen, copy;
1736 	struct sk_buff_head xmitq;
1737 	struct tipc_msg *hdr;
1738 	struct sk_buff *skb;
1739 	bool grp_evt;
1740 	long timeout;
1741 
1742 	/* Catch invalid receive requests */
1743 	if (unlikely(!buflen))
1744 		return -EINVAL;
1745 
1746 	lock_sock(sk);
1747 	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1748 		rc = -ENOTCONN;
1749 		goto exit;
1750 	}
1751 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1752 
1753 	/* Step rcv queue to first msg with data or error; wait if necessary */
1754 	do {
1755 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1756 		if (unlikely(rc))
1757 			goto exit;
1758 		skb = skb_peek(&sk->sk_receive_queue);
1759 		hdr = buf_msg(skb);
1760 		dlen = msg_data_sz(hdr);
1761 		hlen = msg_hdr_sz(hdr);
1762 		err = msg_errcode(hdr);
1763 		grp_evt = msg_is_grp_evt(hdr);
1764 		if (likely(dlen || err))
1765 			break;
1766 		tsk_advance_rx_queue(sk);
1767 	} while (1);
1768 
1769 	/* Collect msg meta data, including error code and rejected data */
1770 	tipc_sk_set_orig_addr(m, skb);
1771 	rc = tipc_sk_anc_data_recv(m, skb, tsk);
1772 	if (unlikely(rc))
1773 		goto exit;
1774 	hdr = buf_msg(skb);
1775 
1776 	/* Capture data if non-error msg, otherwise just set return value */
1777 	if (likely(!err)) {
1778 		copy = min_t(int, dlen, buflen);
1779 		if (unlikely(copy != dlen))
1780 			m->msg_flags |= MSG_TRUNC;
1781 		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1782 	} else {
1783 		copy = 0;
1784 		rc = 0;
1785 		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1786 			rc = -ECONNRESET;
1787 	}
1788 	if (unlikely(rc))
1789 		goto exit;
1790 
1791 	/* Mark message as group event if applicable */
1792 	if (unlikely(grp_evt)) {
1793 		if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1794 			m->msg_flags |= MSG_EOR;
1795 		m->msg_flags |= MSG_OOB;
1796 		copy = 0;
1797 	}
1798 
1799 	/* Caption of data or error code/rejected data was successful */
1800 	if (unlikely(flags & MSG_PEEK))
1801 		goto exit;
1802 
1803 	/* Send group flow control advertisement when applicable */
1804 	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1805 		skb_queue_head_init(&xmitq);
1806 		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1807 					  msg_orignode(hdr), msg_origport(hdr),
1808 					  &xmitq);
1809 		tipc_node_distr_xmit(sock_net(sk), &xmitq);
1810 	}
1811 
1812 	tsk_advance_rx_queue(sk);
1813 
1814 	if (likely(!connected))
1815 		goto exit;
1816 
1817 	/* Send connection flow control advertisement when applicable */
1818 	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1819 	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1820 		tipc_sk_send_ack(tsk);
1821 exit:
1822 	release_sock(sk);
1823 	return rc ? rc : copy;
1824 }
1825 
1826 /**
1827  * tipc_recvstream - receive stream-oriented data
1828  * @m: descriptor for message info
1829  * @buflen: total size of user buffer area
1830  * @flags: receive flags
1831  *
1832  * Used for SOCK_STREAM messages only.  If not enough data is available
1833  * will optionally wait for more; never truncates data.
1834  *
1835  * Returns size of returned message data, errno otherwise
1836  */
1837 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1838 			   size_t buflen, int flags)
1839 {
1840 	struct sock *sk = sock->sk;
1841 	struct tipc_sock *tsk = tipc_sk(sk);
1842 	struct sk_buff *skb;
1843 	struct tipc_msg *hdr;
1844 	struct tipc_skb_cb *skb_cb;
1845 	bool peek = flags & MSG_PEEK;
1846 	int offset, required, copy, copied = 0;
1847 	int hlen, dlen, err, rc;
1848 	long timeout;
1849 
1850 	/* Catch invalid receive attempts */
1851 	if (unlikely(!buflen))
1852 		return -EINVAL;
1853 
1854 	lock_sock(sk);
1855 
1856 	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1857 		rc = -ENOTCONN;
1858 		goto exit;
1859 	}
1860 	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1861 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1862 
1863 	do {
1864 		/* Look at first msg in receive queue; wait if necessary */
1865 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1866 		if (unlikely(rc))
1867 			break;
1868 		skb = skb_peek(&sk->sk_receive_queue);
1869 		skb_cb = TIPC_SKB_CB(skb);
1870 		hdr = buf_msg(skb);
1871 		dlen = msg_data_sz(hdr);
1872 		hlen = msg_hdr_sz(hdr);
1873 		err = msg_errcode(hdr);
1874 
1875 		/* Discard any empty non-errored (SYN-) message */
1876 		if (unlikely(!dlen && !err)) {
1877 			tsk_advance_rx_queue(sk);
1878 			continue;
1879 		}
1880 
1881 		/* Collect msg meta data, incl. error code and rejected data */
1882 		if (!copied) {
1883 			tipc_sk_set_orig_addr(m, skb);
1884 			rc = tipc_sk_anc_data_recv(m, skb, tsk);
1885 			if (rc)
1886 				break;
1887 			hdr = buf_msg(skb);
1888 		}
1889 
1890 		/* Copy data if msg ok, otherwise return error/partial data */
1891 		if (likely(!err)) {
1892 			offset = skb_cb->bytes_read;
1893 			copy = min_t(int, dlen - offset, buflen - copied);
1894 			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1895 			if (unlikely(rc))
1896 				break;
1897 			copied += copy;
1898 			offset += copy;
1899 			if (unlikely(offset < dlen)) {
1900 				if (!peek)
1901 					skb_cb->bytes_read = offset;
1902 				break;
1903 			}
1904 		} else {
1905 			rc = 0;
1906 			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1907 				rc = -ECONNRESET;
1908 			if (copied || rc)
1909 				break;
1910 		}
1911 
1912 		if (unlikely(peek))
1913 			break;
1914 
1915 		tsk_advance_rx_queue(sk);
1916 
1917 		/* Send connection flow control advertisement when applicable */
1918 		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1919 		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1920 			tipc_sk_send_ack(tsk);
1921 
1922 		/* Exit if all requested data or FIN/error received */
1923 		if (copied == buflen || err)
1924 			break;
1925 
1926 	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1927 exit:
1928 	release_sock(sk);
1929 	return copied ? copied : rc;
1930 }
1931 
1932 /**
1933  * tipc_write_space - wake up thread if port congestion is released
1934  * @sk: socket
1935  */
1936 static void tipc_write_space(struct sock *sk)
1937 {
1938 	struct socket_wq *wq;
1939 
1940 	rcu_read_lock();
1941 	wq = rcu_dereference(sk->sk_wq);
1942 	if (skwq_has_sleeper(wq))
1943 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1944 						EPOLLWRNORM | EPOLLWRBAND);
1945 	rcu_read_unlock();
1946 }
1947 
1948 /**
1949  * tipc_data_ready - wake up threads to indicate messages have been received
1950  * @sk: socket
1951  * @len: the length of messages
1952  */
1953 static void tipc_data_ready(struct sock *sk)
1954 {
1955 	struct socket_wq *wq;
1956 
1957 	rcu_read_lock();
1958 	wq = rcu_dereference(sk->sk_wq);
1959 	if (skwq_has_sleeper(wq))
1960 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1961 						EPOLLRDNORM | EPOLLRDBAND);
1962 	rcu_read_unlock();
1963 }
1964 
1965 static void tipc_sock_destruct(struct sock *sk)
1966 {
1967 	__skb_queue_purge(&sk->sk_receive_queue);
1968 }
1969 
1970 static void tipc_sk_proto_rcv(struct sock *sk,
1971 			      struct sk_buff_head *inputq,
1972 			      struct sk_buff_head *xmitq)
1973 {
1974 	struct sk_buff *skb = __skb_dequeue(inputq);
1975 	struct tipc_sock *tsk = tipc_sk(sk);
1976 	struct tipc_msg *hdr = buf_msg(skb);
1977 	struct tipc_group *grp = tsk->group;
1978 	bool wakeup = false;
1979 
1980 	switch (msg_user(hdr)) {
1981 	case CONN_MANAGER:
1982 		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1983 		return;
1984 	case SOCK_WAKEUP:
1985 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1986 		tsk->cong_link_cnt--;
1987 		wakeup = true;
1988 		break;
1989 	case GROUP_PROTOCOL:
1990 		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1991 		break;
1992 	case TOP_SRV:
1993 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1994 				      hdr, inputq, xmitq);
1995 		break;
1996 	default:
1997 		break;
1998 	}
1999 
2000 	if (wakeup)
2001 		sk->sk_write_space(sk);
2002 
2003 	kfree_skb(skb);
2004 }
2005 
2006 /**
2007  * tipc_sk_filter_connect - check incoming message for a connection-based socket
2008  * @tsk: TIPC socket
2009  * @skb: pointer to message buffer.
2010  * Returns true if message should be added to receive queue, false otherwise
2011  */
2012 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2013 {
2014 	struct sock *sk = &tsk->sk;
2015 	struct net *net = sock_net(sk);
2016 	struct tipc_msg *hdr = buf_msg(skb);
2017 	bool con_msg = msg_connected(hdr);
2018 	u32 pport = tsk_peer_port(tsk);
2019 	u32 pnode = tsk_peer_node(tsk);
2020 	u32 oport = msg_origport(hdr);
2021 	u32 onode = msg_orignode(hdr);
2022 	int err = msg_errcode(hdr);
2023 	unsigned long delay;
2024 
2025 	if (unlikely(msg_mcast(hdr)))
2026 		return false;
2027 
2028 	switch (sk->sk_state) {
2029 	case TIPC_CONNECTING:
2030 		/* Setup ACK */
2031 		if (likely(con_msg)) {
2032 			if (err)
2033 				break;
2034 			tipc_sk_finish_conn(tsk, oport, onode);
2035 			msg_set_importance(&tsk->phdr, msg_importance(hdr));
2036 			/* ACK+ message with data is added to receive queue */
2037 			if (msg_data_sz(hdr))
2038 				return true;
2039 			/* Empty ACK-, - wake up sleeping connect() and drop */
2040 			sk->sk_data_ready(sk);
2041 			msg_set_dest_droppable(hdr, 1);
2042 			return false;
2043 		}
2044 		/* Ignore connectionless message if not from listening socket */
2045 		if (oport != pport || onode != pnode)
2046 			return false;
2047 
2048 		/* Rejected SYN */
2049 		if (err != TIPC_ERR_OVERLOAD)
2050 			break;
2051 
2052 		/* Prepare for new setup attempt if we have a SYN clone */
2053 		if (skb_queue_empty(&sk->sk_write_queue))
2054 			break;
2055 		get_random_bytes(&delay, 2);
2056 		delay %= (tsk->conn_timeout / 4);
2057 		delay = msecs_to_jiffies(delay + 100);
2058 		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2059 		return false;
2060 	case TIPC_OPEN:
2061 	case TIPC_DISCONNECTING:
2062 		return false;
2063 	case TIPC_LISTEN:
2064 		/* Accept only SYN message */
2065 		if (!msg_is_syn(hdr) &&
2066 		    tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2067 			return false;
2068 		if (!con_msg && !err)
2069 			return true;
2070 		return false;
2071 	case TIPC_ESTABLISHED:
2072 		/* Accept only connection-based messages sent by peer */
2073 		if (likely(con_msg && !err && pport == oport && pnode == onode))
2074 			return true;
2075 		if (!tsk_peer_msg(tsk, hdr))
2076 			return false;
2077 		if (!err)
2078 			return true;
2079 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2080 		tipc_node_remove_conn(net, pnode, tsk->portid);
2081 		sk->sk_state_change(sk);
2082 		return true;
2083 	default:
2084 		pr_err("Unknown sk_state %u\n", sk->sk_state);
2085 	}
2086 	/* Abort connection setup attempt */
2087 	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2088 	sk->sk_err = ECONNREFUSED;
2089 	sk->sk_state_change(sk);
2090 	return true;
2091 }
2092 
2093 /**
2094  * rcvbuf_limit - get proper overload limit of socket receive queue
2095  * @sk: socket
2096  * @skb: message
2097  *
2098  * For connection oriented messages, irrespective of importance,
2099  * default queue limit is 2 MB.
2100  *
2101  * For connectionless messages, queue limits are based on message
2102  * importance as follows:
2103  *
2104  * TIPC_LOW_IMPORTANCE       (2 MB)
2105  * TIPC_MEDIUM_IMPORTANCE    (4 MB)
2106  * TIPC_HIGH_IMPORTANCE      (8 MB)
2107  * TIPC_CRITICAL_IMPORTANCE  (16 MB)
2108  *
2109  * Returns overload limit according to corresponding message importance
2110  */
2111 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2112 {
2113 	struct tipc_sock *tsk = tipc_sk(sk);
2114 	struct tipc_msg *hdr = buf_msg(skb);
2115 
2116 	if (unlikely(msg_in_group(hdr)))
2117 		return sk->sk_rcvbuf;
2118 
2119 	if (unlikely(!msg_connected(hdr)))
2120 		return sk->sk_rcvbuf << msg_importance(hdr);
2121 
2122 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2123 		return sk->sk_rcvbuf;
2124 
2125 	return FLOWCTL_MSG_LIM;
2126 }
2127 
2128 /**
2129  * tipc_sk_filter_rcv - validate incoming message
2130  * @sk: socket
2131  * @skb: pointer to message.
2132  *
2133  * Enqueues message on receive queue if acceptable; optionally handles
2134  * disconnect indication for a connected socket.
2135  *
2136  * Called with socket lock already taken
2137  *
2138  */
2139 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2140 			       struct sk_buff_head *xmitq)
2141 {
2142 	bool sk_conn = !tipc_sk_type_connectionless(sk);
2143 	struct tipc_sock *tsk = tipc_sk(sk);
2144 	struct tipc_group *grp = tsk->group;
2145 	struct tipc_msg *hdr = buf_msg(skb);
2146 	struct net *net = sock_net(sk);
2147 	struct sk_buff_head inputq;
2148 	int limit, err = TIPC_OK;
2149 
2150 	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2151 	TIPC_SKB_CB(skb)->bytes_read = 0;
2152 	__skb_queue_head_init(&inputq);
2153 	__skb_queue_tail(&inputq, skb);
2154 
2155 	if (unlikely(!msg_isdata(hdr)))
2156 		tipc_sk_proto_rcv(sk, &inputq, xmitq);
2157 
2158 	if (unlikely(grp))
2159 		tipc_group_filter_msg(grp, &inputq, xmitq);
2160 
2161 	/* Validate and add to receive buffer if there is space */
2162 	while ((skb = __skb_dequeue(&inputq))) {
2163 		hdr = buf_msg(skb);
2164 		limit = rcvbuf_limit(sk, skb);
2165 		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2166 		    (!sk_conn && msg_connected(hdr)) ||
2167 		    (!grp && msg_in_group(hdr)))
2168 			err = TIPC_ERR_NO_PORT;
2169 		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2170 			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2171 					   "err_overload2!");
2172 			atomic_inc(&sk->sk_drops);
2173 			err = TIPC_ERR_OVERLOAD;
2174 		}
2175 
2176 		if (unlikely(err)) {
2177 			if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2178 				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2179 						      "@filter_rcv!");
2180 				__skb_queue_tail(xmitq, skb);
2181 			}
2182 			err = TIPC_OK;
2183 			continue;
2184 		}
2185 		__skb_queue_tail(&sk->sk_receive_queue, skb);
2186 		skb_set_owner_r(skb, sk);
2187 		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2188 					 "rcvq >90% allocated!");
2189 		sk->sk_data_ready(sk);
2190 	}
2191 }
2192 
2193 /**
2194  * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2195  * @sk: socket
2196  * @skb: message
2197  *
2198  * Caller must hold socket lock
2199  */
2200 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2201 {
2202 	unsigned int before = sk_rmem_alloc_get(sk);
2203 	struct sk_buff_head xmitq;
2204 	unsigned int added;
2205 
2206 	__skb_queue_head_init(&xmitq);
2207 
2208 	tipc_sk_filter_rcv(sk, skb, &xmitq);
2209 	added = sk_rmem_alloc_get(sk) - before;
2210 	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2211 
2212 	/* Send pending response/rejected messages, if any */
2213 	tipc_node_distr_xmit(sock_net(sk), &xmitq);
2214 	return 0;
2215 }
2216 
2217 /**
2218  * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2219  *                   inputq and try adding them to socket or backlog queue
2220  * @inputq: list of incoming buffers with potentially different destinations
2221  * @sk: socket where the buffers should be enqueued
2222  * @dport: port number for the socket
2223  *
2224  * Caller must hold socket lock
2225  */
2226 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2227 			    u32 dport, struct sk_buff_head *xmitq)
2228 {
2229 	unsigned long time_limit = jiffies + 2;
2230 	struct sk_buff *skb;
2231 	unsigned int lim;
2232 	atomic_t *dcnt;
2233 	u32 onode;
2234 
2235 	while (skb_queue_len(inputq)) {
2236 		if (unlikely(time_after_eq(jiffies, time_limit)))
2237 			return;
2238 
2239 		skb = tipc_skb_dequeue(inputq, dport);
2240 		if (unlikely(!skb))
2241 			return;
2242 
2243 		/* Add message directly to receive queue if possible */
2244 		if (!sock_owned_by_user(sk)) {
2245 			tipc_sk_filter_rcv(sk, skb, xmitq);
2246 			continue;
2247 		}
2248 
2249 		/* Try backlog, compensating for double-counted bytes */
2250 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2251 		if (!sk->sk_backlog.len)
2252 			atomic_set(dcnt, 0);
2253 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2254 		if (likely(!sk_add_backlog(sk, skb, lim))) {
2255 			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2256 						 "bklg & rcvq >90% allocated!");
2257 			continue;
2258 		}
2259 
2260 		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2261 		/* Overload => reject message back to sender */
2262 		onode = tipc_own_addr(sock_net(sk));
2263 		atomic_inc(&sk->sk_drops);
2264 		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2265 			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2266 					      "@sk_enqueue!");
2267 			__skb_queue_tail(xmitq, skb);
2268 		}
2269 		break;
2270 	}
2271 }
2272 
2273 /**
2274  * tipc_sk_rcv - handle a chain of incoming buffers
2275  * @inputq: buffer list containing the buffers
2276  * Consumes all buffers in list until inputq is empty
2277  * Note: may be called in multiple threads referring to the same queue
2278  */
2279 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2280 {
2281 	struct sk_buff_head xmitq;
2282 	u32 dnode, dport = 0;
2283 	int err;
2284 	struct tipc_sock *tsk;
2285 	struct sock *sk;
2286 	struct sk_buff *skb;
2287 
2288 	__skb_queue_head_init(&xmitq);
2289 	while (skb_queue_len(inputq)) {
2290 		dport = tipc_skb_peek_port(inputq, dport);
2291 		tsk = tipc_sk_lookup(net, dport);
2292 
2293 		if (likely(tsk)) {
2294 			sk = &tsk->sk;
2295 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2296 				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2297 				spin_unlock_bh(&sk->sk_lock.slock);
2298 			}
2299 			/* Send pending response/rejected messages, if any */
2300 			tipc_node_distr_xmit(sock_net(sk), &xmitq);
2301 			sock_put(sk);
2302 			continue;
2303 		}
2304 		/* No destination socket => dequeue skb if still there */
2305 		skb = tipc_skb_dequeue(inputq, dport);
2306 		if (!skb)
2307 			return;
2308 
2309 		/* Try secondary lookup if unresolved named message */
2310 		err = TIPC_ERR_NO_PORT;
2311 		if (tipc_msg_lookup_dest(net, skb, &err))
2312 			goto xmit;
2313 
2314 		/* Prepare for message rejection */
2315 		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2316 			continue;
2317 
2318 		trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2319 xmit:
2320 		dnode = msg_destnode(buf_msg(skb));
2321 		tipc_node_xmit_skb(net, skb, dnode, dport);
2322 	}
2323 }
2324 
2325 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2326 {
2327 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2328 	struct sock *sk = sock->sk;
2329 	int done;
2330 
2331 	do {
2332 		int err = sock_error(sk);
2333 		if (err)
2334 			return err;
2335 		if (!*timeo_p)
2336 			return -ETIMEDOUT;
2337 		if (signal_pending(current))
2338 			return sock_intr_errno(*timeo_p);
2339 
2340 		add_wait_queue(sk_sleep(sk), &wait);
2341 		done = sk_wait_event(sk, timeo_p,
2342 				     sk->sk_state != TIPC_CONNECTING, &wait);
2343 		remove_wait_queue(sk_sleep(sk), &wait);
2344 	} while (!done);
2345 	return 0;
2346 }
2347 
2348 /**
2349  * tipc_connect - establish a connection to another TIPC port
2350  * @sock: socket structure
2351  * @dest: socket address for destination port
2352  * @destlen: size of socket address data structure
2353  * @flags: file-related flags associated with socket
2354  *
2355  * Returns 0 on success, errno otherwise
2356  */
2357 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2358 			int destlen, int flags)
2359 {
2360 	struct sock *sk = sock->sk;
2361 	struct tipc_sock *tsk = tipc_sk(sk);
2362 	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2363 	struct msghdr m = {NULL,};
2364 	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2365 	int previous;
2366 	int res = 0;
2367 
2368 	if (destlen != sizeof(struct sockaddr_tipc))
2369 		return -EINVAL;
2370 
2371 	lock_sock(sk);
2372 
2373 	if (tsk->group) {
2374 		res = -EINVAL;
2375 		goto exit;
2376 	}
2377 
2378 	if (dst->family == AF_UNSPEC) {
2379 		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2380 		if (!tipc_sk_type_connectionless(sk))
2381 			res = -EINVAL;
2382 		goto exit;
2383 	} else if (dst->family != AF_TIPC) {
2384 		res = -EINVAL;
2385 	}
2386 	if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2387 		res = -EINVAL;
2388 	if (res)
2389 		goto exit;
2390 
2391 	/* DGRAM/RDM connect(), just save the destaddr */
2392 	if (tipc_sk_type_connectionless(sk)) {
2393 		memcpy(&tsk->peer, dest, destlen);
2394 		goto exit;
2395 	}
2396 
2397 	previous = sk->sk_state;
2398 
2399 	switch (sk->sk_state) {
2400 	case TIPC_OPEN:
2401 		/* Send a 'SYN-' to destination */
2402 		m.msg_name = dest;
2403 		m.msg_namelen = destlen;
2404 
2405 		/* If connect is in non-blocking case, set MSG_DONTWAIT to
2406 		 * indicate send_msg() is never blocked.
2407 		 */
2408 		if (!timeout)
2409 			m.msg_flags = MSG_DONTWAIT;
2410 
2411 		res = __tipc_sendmsg(sock, &m, 0);
2412 		if ((res < 0) && (res != -EWOULDBLOCK))
2413 			goto exit;
2414 
2415 		/* Just entered TIPC_CONNECTING state; the only
2416 		 * difference is that return value in non-blocking
2417 		 * case is EINPROGRESS, rather than EALREADY.
2418 		 */
2419 		res = -EINPROGRESS;
2420 		/* fall thru' */
2421 	case TIPC_CONNECTING:
2422 		if (!timeout) {
2423 			if (previous == TIPC_CONNECTING)
2424 				res = -EALREADY;
2425 			goto exit;
2426 		}
2427 		timeout = msecs_to_jiffies(timeout);
2428 		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2429 		res = tipc_wait_for_connect(sock, &timeout);
2430 		break;
2431 	case TIPC_ESTABLISHED:
2432 		res = -EISCONN;
2433 		break;
2434 	default:
2435 		res = -EINVAL;
2436 	}
2437 
2438 exit:
2439 	release_sock(sk);
2440 	return res;
2441 }
2442 
2443 /**
2444  * tipc_listen - allow socket to listen for incoming connections
2445  * @sock: socket structure
2446  * @len: (unused)
2447  *
2448  * Returns 0 on success, errno otherwise
2449  */
2450 static int tipc_listen(struct socket *sock, int len)
2451 {
2452 	struct sock *sk = sock->sk;
2453 	int res;
2454 
2455 	lock_sock(sk);
2456 	res = tipc_set_sk_state(sk, TIPC_LISTEN);
2457 	release_sock(sk);
2458 
2459 	return res;
2460 }
2461 
2462 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2463 {
2464 	struct sock *sk = sock->sk;
2465 	DEFINE_WAIT(wait);
2466 	int err;
2467 
2468 	/* True wake-one mechanism for incoming connections: only
2469 	 * one process gets woken up, not the 'whole herd'.
2470 	 * Since we do not 'race & poll' for established sockets
2471 	 * anymore, the common case will execute the loop only once.
2472 	*/
2473 	for (;;) {
2474 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2475 					  TASK_INTERRUPTIBLE);
2476 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2477 			release_sock(sk);
2478 			timeo = schedule_timeout(timeo);
2479 			lock_sock(sk);
2480 		}
2481 		err = 0;
2482 		if (!skb_queue_empty(&sk->sk_receive_queue))
2483 			break;
2484 		err = -EAGAIN;
2485 		if (!timeo)
2486 			break;
2487 		err = sock_intr_errno(timeo);
2488 		if (signal_pending(current))
2489 			break;
2490 	}
2491 	finish_wait(sk_sleep(sk), &wait);
2492 	return err;
2493 }
2494 
2495 /**
2496  * tipc_accept - wait for connection request
2497  * @sock: listening socket
2498  * @newsock: new socket that is to be connected
2499  * @flags: file-related flags associated with socket
2500  *
2501  * Returns 0 on success, errno otherwise
2502  */
2503 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2504 		       bool kern)
2505 {
2506 	struct sock *new_sk, *sk = sock->sk;
2507 	struct sk_buff *buf;
2508 	struct tipc_sock *new_tsock;
2509 	struct tipc_msg *msg;
2510 	long timeo;
2511 	int res;
2512 
2513 	lock_sock(sk);
2514 
2515 	if (sk->sk_state != TIPC_LISTEN) {
2516 		res = -EINVAL;
2517 		goto exit;
2518 	}
2519 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2520 	res = tipc_wait_for_accept(sock, timeo);
2521 	if (res)
2522 		goto exit;
2523 
2524 	buf = skb_peek(&sk->sk_receive_queue);
2525 
2526 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2527 	if (res)
2528 		goto exit;
2529 	security_sk_clone(sock->sk, new_sock->sk);
2530 
2531 	new_sk = new_sock->sk;
2532 	new_tsock = tipc_sk(new_sk);
2533 	msg = buf_msg(buf);
2534 
2535 	/* we lock on new_sk; but lockdep sees the lock on sk */
2536 	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2537 
2538 	/*
2539 	 * Reject any stray messages received by new socket
2540 	 * before the socket lock was taken (very, very unlikely)
2541 	 */
2542 	tsk_rej_rx_queue(new_sk);
2543 
2544 	/* Connect new socket to it's peer */
2545 	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2546 
2547 	tsk_set_importance(new_tsock, msg_importance(msg));
2548 	if (msg_named(msg)) {
2549 		new_tsock->conn_type = msg_nametype(msg);
2550 		new_tsock->conn_instance = msg_nameinst(msg);
2551 	}
2552 
2553 	/*
2554 	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2555 	 * Respond to 'SYN+' by queuing it on new socket.
2556 	 */
2557 	if (!msg_data_sz(msg)) {
2558 		struct msghdr m = {NULL,};
2559 
2560 		tsk_advance_rx_queue(sk);
2561 		__tipc_sendstream(new_sock, &m, 0);
2562 	} else {
2563 		__skb_dequeue(&sk->sk_receive_queue);
2564 		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2565 		skb_set_owner_r(buf, new_sk);
2566 	}
2567 	release_sock(new_sk);
2568 exit:
2569 	release_sock(sk);
2570 	return res;
2571 }
2572 
2573 /**
2574  * tipc_shutdown - shutdown socket connection
2575  * @sock: socket structure
2576  * @how: direction to close (must be SHUT_RDWR)
2577  *
2578  * Terminates connection (if necessary), then purges socket's receive queue.
2579  *
2580  * Returns 0 on success, errno otherwise
2581  */
2582 static int tipc_shutdown(struct socket *sock, int how)
2583 {
2584 	struct sock *sk = sock->sk;
2585 	int res;
2586 
2587 	if (how != SHUT_RDWR)
2588 		return -EINVAL;
2589 
2590 	lock_sock(sk);
2591 
2592 	trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2593 	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2594 	sk->sk_shutdown = SEND_SHUTDOWN;
2595 
2596 	if (sk->sk_state == TIPC_DISCONNECTING) {
2597 		/* Discard any unreceived messages */
2598 		__skb_queue_purge(&sk->sk_receive_queue);
2599 
2600 		/* Wake up anyone sleeping in poll */
2601 		sk->sk_state_change(sk);
2602 		res = 0;
2603 	} else {
2604 		res = -ENOTCONN;
2605 	}
2606 
2607 	release_sock(sk);
2608 	return res;
2609 }
2610 
2611 static void tipc_sk_check_probing_state(struct sock *sk,
2612 					struct sk_buff_head *list)
2613 {
2614 	struct tipc_sock *tsk = tipc_sk(sk);
2615 	u32 pnode = tsk_peer_node(tsk);
2616 	u32 pport = tsk_peer_port(tsk);
2617 	u32 self = tsk_own_node(tsk);
2618 	u32 oport = tsk->portid;
2619 	struct sk_buff *skb;
2620 
2621 	if (tsk->probe_unacked) {
2622 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2623 		sk->sk_err = ECONNABORTED;
2624 		tipc_node_remove_conn(sock_net(sk), pnode, pport);
2625 		sk->sk_state_change(sk);
2626 		return;
2627 	}
2628 	/* Prepare new probe */
2629 	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2630 			      pnode, self, pport, oport, TIPC_OK);
2631 	if (skb)
2632 		__skb_queue_tail(list, skb);
2633 	tsk->probe_unacked = true;
2634 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2635 }
2636 
2637 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2638 {
2639 	struct tipc_sock *tsk = tipc_sk(sk);
2640 
2641 	/* Try again later if dest link is congested */
2642 	if (tsk->cong_link_cnt) {
2643 		sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2644 		return;
2645 	}
2646 	/* Prepare SYN for retransmit */
2647 	tipc_msg_skb_clone(&sk->sk_write_queue, list);
2648 }
2649 
2650 static void tipc_sk_timeout(struct timer_list *t)
2651 {
2652 	struct sock *sk = from_timer(sk, t, sk_timer);
2653 	struct tipc_sock *tsk = tipc_sk(sk);
2654 	u32 pnode = tsk_peer_node(tsk);
2655 	struct sk_buff_head list;
2656 	int rc = 0;
2657 
2658 	skb_queue_head_init(&list);
2659 	bh_lock_sock(sk);
2660 
2661 	/* Try again later if socket is busy */
2662 	if (sock_owned_by_user(sk)) {
2663 		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2664 		bh_unlock_sock(sk);
2665 		return;
2666 	}
2667 
2668 	if (sk->sk_state == TIPC_ESTABLISHED)
2669 		tipc_sk_check_probing_state(sk, &list);
2670 	else if (sk->sk_state == TIPC_CONNECTING)
2671 		tipc_sk_retry_connect(sk, &list);
2672 
2673 	bh_unlock_sock(sk);
2674 
2675 	if (!skb_queue_empty(&list))
2676 		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2677 
2678 	/* SYN messages may cause link congestion */
2679 	if (rc == -ELINKCONG) {
2680 		tipc_dest_push(&tsk->cong_links, pnode, 0);
2681 		tsk->cong_link_cnt = 1;
2682 	}
2683 	sock_put(sk);
2684 }
2685 
2686 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2687 			   struct tipc_name_seq const *seq)
2688 {
2689 	struct sock *sk = &tsk->sk;
2690 	struct net *net = sock_net(sk);
2691 	struct publication *publ;
2692 	u32 key;
2693 
2694 	if (scope != TIPC_NODE_SCOPE)
2695 		scope = TIPC_CLUSTER_SCOPE;
2696 
2697 	if (tipc_sk_connected(sk))
2698 		return -EINVAL;
2699 	key = tsk->portid + tsk->pub_count + 1;
2700 	if (key == tsk->portid)
2701 		return -EADDRINUSE;
2702 
2703 	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2704 				    scope, tsk->portid, key);
2705 	if (unlikely(!publ))
2706 		return -EINVAL;
2707 
2708 	list_add(&publ->binding_sock, &tsk->publications);
2709 	tsk->pub_count++;
2710 	tsk->published = 1;
2711 	return 0;
2712 }
2713 
2714 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2715 			    struct tipc_name_seq const *seq)
2716 {
2717 	struct net *net = sock_net(&tsk->sk);
2718 	struct publication *publ;
2719 	struct publication *safe;
2720 	int rc = -EINVAL;
2721 
2722 	if (scope != TIPC_NODE_SCOPE)
2723 		scope = TIPC_CLUSTER_SCOPE;
2724 
2725 	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2726 		if (seq) {
2727 			if (publ->scope != scope)
2728 				continue;
2729 			if (publ->type != seq->type)
2730 				continue;
2731 			if (publ->lower != seq->lower)
2732 				continue;
2733 			if (publ->upper != seq->upper)
2734 				break;
2735 			tipc_nametbl_withdraw(net, publ->type, publ->lower,
2736 					      publ->upper, publ->key);
2737 			rc = 0;
2738 			break;
2739 		}
2740 		tipc_nametbl_withdraw(net, publ->type, publ->lower,
2741 				      publ->upper, publ->key);
2742 		rc = 0;
2743 	}
2744 	if (list_empty(&tsk->publications))
2745 		tsk->published = 0;
2746 	return rc;
2747 }
2748 
2749 /* tipc_sk_reinit: set non-zero address in all existing sockets
2750  *                 when we go from standalone to network mode.
2751  */
2752 void tipc_sk_reinit(struct net *net)
2753 {
2754 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2755 	struct rhashtable_iter iter;
2756 	struct tipc_sock *tsk;
2757 	struct tipc_msg *msg;
2758 
2759 	rhashtable_walk_enter(&tn->sk_rht, &iter);
2760 
2761 	do {
2762 		rhashtable_walk_start(&iter);
2763 
2764 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2765 			sock_hold(&tsk->sk);
2766 			rhashtable_walk_stop(&iter);
2767 			lock_sock(&tsk->sk);
2768 			msg = &tsk->phdr;
2769 			msg_set_prevnode(msg, tipc_own_addr(net));
2770 			msg_set_orignode(msg, tipc_own_addr(net));
2771 			release_sock(&tsk->sk);
2772 			rhashtable_walk_start(&iter);
2773 			sock_put(&tsk->sk);
2774 		}
2775 
2776 		rhashtable_walk_stop(&iter);
2777 	} while (tsk == ERR_PTR(-EAGAIN));
2778 
2779 	rhashtable_walk_exit(&iter);
2780 }
2781 
2782 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2783 {
2784 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2785 	struct tipc_sock *tsk;
2786 
2787 	rcu_read_lock();
2788 	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2789 	if (tsk)
2790 		sock_hold(&tsk->sk);
2791 	rcu_read_unlock();
2792 
2793 	return tsk;
2794 }
2795 
2796 static int tipc_sk_insert(struct tipc_sock *tsk)
2797 {
2798 	struct sock *sk = &tsk->sk;
2799 	struct net *net = sock_net(sk);
2800 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2801 	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2802 	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2803 
2804 	while (remaining--) {
2805 		portid++;
2806 		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2807 			portid = TIPC_MIN_PORT;
2808 		tsk->portid = portid;
2809 		sock_hold(&tsk->sk);
2810 		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2811 						   tsk_rht_params))
2812 			return 0;
2813 		sock_put(&tsk->sk);
2814 	}
2815 
2816 	return -1;
2817 }
2818 
2819 static void tipc_sk_remove(struct tipc_sock *tsk)
2820 {
2821 	struct sock *sk = &tsk->sk;
2822 	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2823 
2824 	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2825 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2826 		__sock_put(sk);
2827 	}
2828 }
2829 
2830 static const struct rhashtable_params tsk_rht_params = {
2831 	.nelem_hint = 192,
2832 	.head_offset = offsetof(struct tipc_sock, node),
2833 	.key_offset = offsetof(struct tipc_sock, portid),
2834 	.key_len = sizeof(u32), /* portid */
2835 	.max_size = 1048576,
2836 	.min_size = 256,
2837 	.automatic_shrinking = true,
2838 };
2839 
2840 int tipc_sk_rht_init(struct net *net)
2841 {
2842 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2843 
2844 	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2845 }
2846 
2847 void tipc_sk_rht_destroy(struct net *net)
2848 {
2849 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2850 
2851 	/* Wait for socket readers to complete */
2852 	synchronize_net();
2853 
2854 	rhashtable_destroy(&tn->sk_rht);
2855 }
2856 
2857 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2858 {
2859 	struct net *net = sock_net(&tsk->sk);
2860 	struct tipc_group *grp = tsk->group;
2861 	struct tipc_msg *hdr = &tsk->phdr;
2862 	struct tipc_name_seq seq;
2863 	int rc;
2864 
2865 	if (mreq->type < TIPC_RESERVED_TYPES)
2866 		return -EACCES;
2867 	if (mreq->scope > TIPC_NODE_SCOPE)
2868 		return -EINVAL;
2869 	if (grp)
2870 		return -EACCES;
2871 	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2872 	if (!grp)
2873 		return -ENOMEM;
2874 	tsk->group = grp;
2875 	msg_set_lookup_scope(hdr, mreq->scope);
2876 	msg_set_nametype(hdr, mreq->type);
2877 	msg_set_dest_droppable(hdr, true);
2878 	seq.type = mreq->type;
2879 	seq.lower = mreq->instance;
2880 	seq.upper = seq.lower;
2881 	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2882 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2883 	if (rc) {
2884 		tipc_group_delete(net, grp);
2885 		tsk->group = NULL;
2886 		return rc;
2887 	}
2888 	/* Eliminate any risk that a broadcast overtakes sent JOINs */
2889 	tsk->mc_method.rcast = true;
2890 	tsk->mc_method.mandatory = true;
2891 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2892 	return rc;
2893 }
2894 
2895 static int tipc_sk_leave(struct tipc_sock *tsk)
2896 {
2897 	struct net *net = sock_net(&tsk->sk);
2898 	struct tipc_group *grp = tsk->group;
2899 	struct tipc_name_seq seq;
2900 	int scope;
2901 
2902 	if (!grp)
2903 		return -EINVAL;
2904 	tipc_group_self(grp, &seq, &scope);
2905 	tipc_group_delete(net, grp);
2906 	tsk->group = NULL;
2907 	tipc_sk_withdraw(tsk, scope, &seq);
2908 	return 0;
2909 }
2910 
2911 /**
2912  * tipc_setsockopt - set socket option
2913  * @sock: socket structure
2914  * @lvl: option level
2915  * @opt: option identifier
2916  * @ov: pointer to new option value
2917  * @ol: length of option value
2918  *
2919  * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2920  * (to ease compatibility).
2921  *
2922  * Returns 0 on success, errno otherwise
2923  */
2924 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2925 			   char __user *ov, unsigned int ol)
2926 {
2927 	struct sock *sk = sock->sk;
2928 	struct tipc_sock *tsk = tipc_sk(sk);
2929 	struct tipc_group_req mreq;
2930 	u32 value = 0;
2931 	int res = 0;
2932 
2933 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2934 		return 0;
2935 	if (lvl != SOL_TIPC)
2936 		return -ENOPROTOOPT;
2937 
2938 	switch (opt) {
2939 	case TIPC_IMPORTANCE:
2940 	case TIPC_SRC_DROPPABLE:
2941 	case TIPC_DEST_DROPPABLE:
2942 	case TIPC_CONN_TIMEOUT:
2943 		if (ol < sizeof(value))
2944 			return -EINVAL;
2945 		if (get_user(value, (u32 __user *)ov))
2946 			return -EFAULT;
2947 		break;
2948 	case TIPC_GROUP_JOIN:
2949 		if (ol < sizeof(mreq))
2950 			return -EINVAL;
2951 		if (copy_from_user(&mreq, ov, sizeof(mreq)))
2952 			return -EFAULT;
2953 		break;
2954 	default:
2955 		if (ov || ol)
2956 			return -EINVAL;
2957 	}
2958 
2959 	lock_sock(sk);
2960 
2961 	switch (opt) {
2962 	case TIPC_IMPORTANCE:
2963 		res = tsk_set_importance(tsk, value);
2964 		break;
2965 	case TIPC_SRC_DROPPABLE:
2966 		if (sock->type != SOCK_STREAM)
2967 			tsk_set_unreliable(tsk, value);
2968 		else
2969 			res = -ENOPROTOOPT;
2970 		break;
2971 	case TIPC_DEST_DROPPABLE:
2972 		tsk_set_unreturnable(tsk, value);
2973 		break;
2974 	case TIPC_CONN_TIMEOUT:
2975 		tipc_sk(sk)->conn_timeout = value;
2976 		break;
2977 	case TIPC_MCAST_BROADCAST:
2978 		tsk->mc_method.rcast = false;
2979 		tsk->mc_method.mandatory = true;
2980 		break;
2981 	case TIPC_MCAST_REPLICAST:
2982 		tsk->mc_method.rcast = true;
2983 		tsk->mc_method.mandatory = true;
2984 		break;
2985 	case TIPC_GROUP_JOIN:
2986 		res = tipc_sk_join(tsk, &mreq);
2987 		break;
2988 	case TIPC_GROUP_LEAVE:
2989 		res = tipc_sk_leave(tsk);
2990 		break;
2991 	default:
2992 		res = -EINVAL;
2993 	}
2994 
2995 	release_sock(sk);
2996 
2997 	return res;
2998 }
2999 
3000 /**
3001  * tipc_getsockopt - get socket option
3002  * @sock: socket structure
3003  * @lvl: option level
3004  * @opt: option identifier
3005  * @ov: receptacle for option value
3006  * @ol: receptacle for length of option value
3007  *
3008  * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3009  * (to ease compatibility).
3010  *
3011  * Returns 0 on success, errno otherwise
3012  */
3013 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3014 			   char __user *ov, int __user *ol)
3015 {
3016 	struct sock *sk = sock->sk;
3017 	struct tipc_sock *tsk = tipc_sk(sk);
3018 	struct tipc_name_seq seq;
3019 	int len, scope;
3020 	u32 value;
3021 	int res;
3022 
3023 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3024 		return put_user(0, ol);
3025 	if (lvl != SOL_TIPC)
3026 		return -ENOPROTOOPT;
3027 	res = get_user(len, ol);
3028 	if (res)
3029 		return res;
3030 
3031 	lock_sock(sk);
3032 
3033 	switch (opt) {
3034 	case TIPC_IMPORTANCE:
3035 		value = tsk_importance(tsk);
3036 		break;
3037 	case TIPC_SRC_DROPPABLE:
3038 		value = tsk_unreliable(tsk);
3039 		break;
3040 	case TIPC_DEST_DROPPABLE:
3041 		value = tsk_unreturnable(tsk);
3042 		break;
3043 	case TIPC_CONN_TIMEOUT:
3044 		value = tsk->conn_timeout;
3045 		/* no need to set "res", since already 0 at this point */
3046 		break;
3047 	case TIPC_NODE_RECVQ_DEPTH:
3048 		value = 0; /* was tipc_queue_size, now obsolete */
3049 		break;
3050 	case TIPC_SOCK_RECVQ_DEPTH:
3051 		value = skb_queue_len(&sk->sk_receive_queue);
3052 		break;
3053 	case TIPC_GROUP_JOIN:
3054 		seq.type = 0;
3055 		if (tsk->group)
3056 			tipc_group_self(tsk->group, &seq, &scope);
3057 		value = seq.type;
3058 		break;
3059 	default:
3060 		res = -EINVAL;
3061 	}
3062 
3063 	release_sock(sk);
3064 
3065 	if (res)
3066 		return res;	/* "get" failed */
3067 
3068 	if (len < sizeof(value))
3069 		return -EINVAL;
3070 
3071 	if (copy_to_user(ov, &value, sizeof(value)))
3072 		return -EFAULT;
3073 
3074 	return put_user(sizeof(value), ol);
3075 }
3076 
3077 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3078 {
3079 	struct net *net = sock_net(sock->sk);
3080 	struct tipc_sioc_nodeid_req nr = {0};
3081 	struct tipc_sioc_ln_req lnr;
3082 	void __user *argp = (void __user *)arg;
3083 
3084 	switch (cmd) {
3085 	case SIOCGETLINKNAME:
3086 		if (copy_from_user(&lnr, argp, sizeof(lnr)))
3087 			return -EFAULT;
3088 		if (!tipc_node_get_linkname(net,
3089 					    lnr.bearer_id & 0xffff, lnr.peer,
3090 					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
3091 			if (copy_to_user(argp, &lnr, sizeof(lnr)))
3092 				return -EFAULT;
3093 			return 0;
3094 		}
3095 		return -EADDRNOTAVAIL;
3096 	case SIOCGETNODEID:
3097 		if (copy_from_user(&nr, argp, sizeof(nr)))
3098 			return -EFAULT;
3099 		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3100 			return -EADDRNOTAVAIL;
3101 		if (copy_to_user(argp, &nr, sizeof(nr)))
3102 			return -EFAULT;
3103 		return 0;
3104 	default:
3105 		return -ENOIOCTLCMD;
3106 	}
3107 }
3108 
3109 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3110 {
3111 	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3112 	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3113 	u32 onode = tipc_own_addr(sock_net(sock1->sk));
3114 
3115 	tsk1->peer.family = AF_TIPC;
3116 	tsk1->peer.addrtype = TIPC_ADDR_ID;
3117 	tsk1->peer.scope = TIPC_NODE_SCOPE;
3118 	tsk1->peer.addr.id.ref = tsk2->portid;
3119 	tsk1->peer.addr.id.node = onode;
3120 	tsk2->peer.family = AF_TIPC;
3121 	tsk2->peer.addrtype = TIPC_ADDR_ID;
3122 	tsk2->peer.scope = TIPC_NODE_SCOPE;
3123 	tsk2->peer.addr.id.ref = tsk1->portid;
3124 	tsk2->peer.addr.id.node = onode;
3125 
3126 	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3127 	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3128 	return 0;
3129 }
3130 
3131 /* Protocol switches for the various types of TIPC sockets */
3132 
3133 static const struct proto_ops msg_ops = {
3134 	.owner		= THIS_MODULE,
3135 	.family		= AF_TIPC,
3136 	.release	= tipc_release,
3137 	.bind		= tipc_bind,
3138 	.connect	= tipc_connect,
3139 	.socketpair	= tipc_socketpair,
3140 	.accept		= sock_no_accept,
3141 	.getname	= tipc_getname,
3142 	.poll		= tipc_poll,
3143 	.ioctl		= tipc_ioctl,
3144 	.listen		= sock_no_listen,
3145 	.shutdown	= tipc_shutdown,
3146 	.setsockopt	= tipc_setsockopt,
3147 	.getsockopt	= tipc_getsockopt,
3148 	.sendmsg	= tipc_sendmsg,
3149 	.recvmsg	= tipc_recvmsg,
3150 	.mmap		= sock_no_mmap,
3151 	.sendpage	= sock_no_sendpage
3152 };
3153 
3154 static const struct proto_ops packet_ops = {
3155 	.owner		= THIS_MODULE,
3156 	.family		= AF_TIPC,
3157 	.release	= tipc_release,
3158 	.bind		= tipc_bind,
3159 	.connect	= tipc_connect,
3160 	.socketpair	= tipc_socketpair,
3161 	.accept		= tipc_accept,
3162 	.getname	= tipc_getname,
3163 	.poll		= tipc_poll,
3164 	.ioctl		= tipc_ioctl,
3165 	.listen		= tipc_listen,
3166 	.shutdown	= tipc_shutdown,
3167 	.setsockopt	= tipc_setsockopt,
3168 	.getsockopt	= tipc_getsockopt,
3169 	.sendmsg	= tipc_send_packet,
3170 	.recvmsg	= tipc_recvmsg,
3171 	.mmap		= sock_no_mmap,
3172 	.sendpage	= sock_no_sendpage
3173 };
3174 
3175 static const struct proto_ops stream_ops = {
3176 	.owner		= THIS_MODULE,
3177 	.family		= AF_TIPC,
3178 	.release	= tipc_release,
3179 	.bind		= tipc_bind,
3180 	.connect	= tipc_connect,
3181 	.socketpair	= tipc_socketpair,
3182 	.accept		= tipc_accept,
3183 	.getname	= tipc_getname,
3184 	.poll		= tipc_poll,
3185 	.ioctl		= tipc_ioctl,
3186 	.listen		= tipc_listen,
3187 	.shutdown	= tipc_shutdown,
3188 	.setsockopt	= tipc_setsockopt,
3189 	.getsockopt	= tipc_getsockopt,
3190 	.sendmsg	= tipc_sendstream,
3191 	.recvmsg	= tipc_recvstream,
3192 	.mmap		= sock_no_mmap,
3193 	.sendpage	= sock_no_sendpage
3194 };
3195 
3196 static const struct net_proto_family tipc_family_ops = {
3197 	.owner		= THIS_MODULE,
3198 	.family		= AF_TIPC,
3199 	.create		= tipc_sk_create
3200 };
3201 
3202 static struct proto tipc_proto = {
3203 	.name		= "TIPC",
3204 	.owner		= THIS_MODULE,
3205 	.obj_size	= sizeof(struct tipc_sock),
3206 	.sysctl_rmem	= sysctl_tipc_rmem
3207 };
3208 
3209 /**
3210  * tipc_socket_init - initialize TIPC socket interface
3211  *
3212  * Returns 0 on success, errno otherwise
3213  */
3214 int tipc_socket_init(void)
3215 {
3216 	int res;
3217 
3218 	res = proto_register(&tipc_proto, 1);
3219 	if (res) {
3220 		pr_err("Failed to register TIPC protocol type\n");
3221 		goto out;
3222 	}
3223 
3224 	res = sock_register(&tipc_family_ops);
3225 	if (res) {
3226 		pr_err("Failed to register TIPC socket type\n");
3227 		proto_unregister(&tipc_proto);
3228 		goto out;
3229 	}
3230  out:
3231 	return res;
3232 }
3233 
3234 /**
3235  * tipc_socket_stop - stop TIPC socket interface
3236  */
3237 void tipc_socket_stop(void)
3238 {
3239 	sock_unregister(tipc_family_ops.family);
3240 	proto_unregister(&tipc_proto);
3241 }
3242 
3243 /* Caller should hold socket lock for the passed tipc socket. */
3244 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3245 {
3246 	u32 peer_node;
3247 	u32 peer_port;
3248 	struct nlattr *nest;
3249 
3250 	peer_node = tsk_peer_node(tsk);
3251 	peer_port = tsk_peer_port(tsk);
3252 
3253 	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3254 
3255 	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3256 		goto msg_full;
3257 	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3258 		goto msg_full;
3259 
3260 	if (tsk->conn_type != 0) {
3261 		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3262 			goto msg_full;
3263 		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3264 			goto msg_full;
3265 		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3266 			goto msg_full;
3267 	}
3268 	nla_nest_end(skb, nest);
3269 
3270 	return 0;
3271 
3272 msg_full:
3273 	nla_nest_cancel(skb, nest);
3274 
3275 	return -EMSGSIZE;
3276 }
3277 
3278 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3279 			  *tsk)
3280 {
3281 	struct net *net = sock_net(skb->sk);
3282 	struct sock *sk = &tsk->sk;
3283 
3284 	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3285 	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3286 		return -EMSGSIZE;
3287 
3288 	if (tipc_sk_connected(sk)) {
3289 		if (__tipc_nl_add_sk_con(skb, tsk))
3290 			return -EMSGSIZE;
3291 	} else if (!list_empty(&tsk->publications)) {
3292 		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3293 			return -EMSGSIZE;
3294 	}
3295 	return 0;
3296 }
3297 
3298 /* Caller should hold socket lock for the passed tipc socket. */
3299 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3300 			    struct tipc_sock *tsk)
3301 {
3302 	struct nlattr *attrs;
3303 	void *hdr;
3304 
3305 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3306 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3307 	if (!hdr)
3308 		goto msg_cancel;
3309 
3310 	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3311 	if (!attrs)
3312 		goto genlmsg_cancel;
3313 
3314 	if (__tipc_nl_add_sk_info(skb, tsk))
3315 		goto attr_msg_cancel;
3316 
3317 	nla_nest_end(skb, attrs);
3318 	genlmsg_end(skb, hdr);
3319 
3320 	return 0;
3321 
3322 attr_msg_cancel:
3323 	nla_nest_cancel(skb, attrs);
3324 genlmsg_cancel:
3325 	genlmsg_cancel(skb, hdr);
3326 msg_cancel:
3327 	return -EMSGSIZE;
3328 }
3329 
3330 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3331 		    int (*skb_handler)(struct sk_buff *skb,
3332 				       struct netlink_callback *cb,
3333 				       struct tipc_sock *tsk))
3334 {
3335 	struct rhashtable_iter *iter = (void *)cb->args[4];
3336 	struct tipc_sock *tsk;
3337 	int err;
3338 
3339 	rhashtable_walk_start(iter);
3340 	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3341 		if (IS_ERR(tsk)) {
3342 			err = PTR_ERR(tsk);
3343 			if (err == -EAGAIN) {
3344 				err = 0;
3345 				continue;
3346 			}
3347 			break;
3348 		}
3349 
3350 		sock_hold(&tsk->sk);
3351 		rhashtable_walk_stop(iter);
3352 		lock_sock(&tsk->sk);
3353 		err = skb_handler(skb, cb, tsk);
3354 		if (err) {
3355 			release_sock(&tsk->sk);
3356 			sock_put(&tsk->sk);
3357 			goto out;
3358 		}
3359 		release_sock(&tsk->sk);
3360 		rhashtable_walk_start(iter);
3361 		sock_put(&tsk->sk);
3362 	}
3363 	rhashtable_walk_stop(iter);
3364 out:
3365 	return skb->len;
3366 }
3367 EXPORT_SYMBOL(tipc_nl_sk_walk);
3368 
3369 int tipc_dump_start(struct netlink_callback *cb)
3370 {
3371 	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3372 }
3373 EXPORT_SYMBOL(tipc_dump_start);
3374 
3375 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3376 {
3377 	/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3378 	struct rhashtable_iter *iter = (void *)cb->args[4];
3379 	struct tipc_net *tn = tipc_net(net);
3380 
3381 	if (!iter) {
3382 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3383 		if (!iter)
3384 			return -ENOMEM;
3385 
3386 		cb->args[4] = (long)iter;
3387 	}
3388 
3389 	rhashtable_walk_enter(&tn->sk_rht, iter);
3390 	return 0;
3391 }
3392 
3393 int tipc_dump_done(struct netlink_callback *cb)
3394 {
3395 	struct rhashtable_iter *hti = (void *)cb->args[4];
3396 
3397 	rhashtable_walk_exit(hti);
3398 	kfree(hti);
3399 	return 0;
3400 }
3401 EXPORT_SYMBOL(tipc_dump_done);
3402 
3403 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3404 			   struct tipc_sock *tsk, u32 sk_filter_state,
3405 			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3406 {
3407 	struct sock *sk = &tsk->sk;
3408 	struct nlattr *attrs;
3409 	struct nlattr *stat;
3410 
3411 	/*filter response w.r.t sk_state*/
3412 	if (!(sk_filter_state & (1 << sk->sk_state)))
3413 		return 0;
3414 
3415 	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3416 	if (!attrs)
3417 		goto msg_cancel;
3418 
3419 	if (__tipc_nl_add_sk_info(skb, tsk))
3420 		goto attr_msg_cancel;
3421 
3422 	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3423 	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3424 	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3425 	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3426 			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3427 					 sock_i_uid(sk))) ||
3428 	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3429 			      tipc_diag_gen_cookie(sk),
3430 			      TIPC_NLA_SOCK_PAD))
3431 		goto attr_msg_cancel;
3432 
3433 	stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3434 	if (!stat)
3435 		goto attr_msg_cancel;
3436 
3437 	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3438 			skb_queue_len(&sk->sk_receive_queue)) ||
3439 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3440 			skb_queue_len(&sk->sk_write_queue)) ||
3441 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3442 			atomic_read(&sk->sk_drops)))
3443 		goto stat_msg_cancel;
3444 
3445 	if (tsk->cong_link_cnt &&
3446 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3447 		goto stat_msg_cancel;
3448 
3449 	if (tsk_conn_cong(tsk) &&
3450 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3451 		goto stat_msg_cancel;
3452 
3453 	nla_nest_end(skb, stat);
3454 
3455 	if (tsk->group)
3456 		if (tipc_group_fill_sock_diag(tsk->group, skb))
3457 			goto stat_msg_cancel;
3458 
3459 	nla_nest_end(skb, attrs);
3460 
3461 	return 0;
3462 
3463 stat_msg_cancel:
3464 	nla_nest_cancel(skb, stat);
3465 attr_msg_cancel:
3466 	nla_nest_cancel(skb, attrs);
3467 msg_cancel:
3468 	return -EMSGSIZE;
3469 }
3470 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3471 
3472 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3473 {
3474 	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3475 }
3476 
3477 /* Caller should hold socket lock for the passed tipc socket. */
3478 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3479 				 struct netlink_callback *cb,
3480 				 struct publication *publ)
3481 {
3482 	void *hdr;
3483 	struct nlattr *attrs;
3484 
3485 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3486 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3487 	if (!hdr)
3488 		goto msg_cancel;
3489 
3490 	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3491 	if (!attrs)
3492 		goto genlmsg_cancel;
3493 
3494 	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3495 		goto attr_msg_cancel;
3496 	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3497 		goto attr_msg_cancel;
3498 	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3499 		goto attr_msg_cancel;
3500 	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3501 		goto attr_msg_cancel;
3502 
3503 	nla_nest_end(skb, attrs);
3504 	genlmsg_end(skb, hdr);
3505 
3506 	return 0;
3507 
3508 attr_msg_cancel:
3509 	nla_nest_cancel(skb, attrs);
3510 genlmsg_cancel:
3511 	genlmsg_cancel(skb, hdr);
3512 msg_cancel:
3513 	return -EMSGSIZE;
3514 }
3515 
3516 /* Caller should hold socket lock for the passed tipc socket. */
3517 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3518 				  struct netlink_callback *cb,
3519 				  struct tipc_sock *tsk, u32 *last_publ)
3520 {
3521 	int err;
3522 	struct publication *p;
3523 
3524 	if (*last_publ) {
3525 		list_for_each_entry(p, &tsk->publications, binding_sock) {
3526 			if (p->key == *last_publ)
3527 				break;
3528 		}
3529 		if (p->key != *last_publ) {
3530 			/* We never set seq or call nl_dump_check_consistent()
3531 			 * this means that setting prev_seq here will cause the
3532 			 * consistence check to fail in the netlink callback
3533 			 * handler. Resulting in the last NLMSG_DONE message
3534 			 * having the NLM_F_DUMP_INTR flag set.
3535 			 */
3536 			cb->prev_seq = 1;
3537 			*last_publ = 0;
3538 			return -EPIPE;
3539 		}
3540 	} else {
3541 		p = list_first_entry(&tsk->publications, struct publication,
3542 				     binding_sock);
3543 	}
3544 
3545 	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3546 		err = __tipc_nl_add_sk_publ(skb, cb, p);
3547 		if (err) {
3548 			*last_publ = p->key;
3549 			return err;
3550 		}
3551 	}
3552 	*last_publ = 0;
3553 
3554 	return 0;
3555 }
3556 
3557 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3558 {
3559 	int err;
3560 	u32 tsk_portid = cb->args[0];
3561 	u32 last_publ = cb->args[1];
3562 	u32 done = cb->args[2];
3563 	struct net *net = sock_net(skb->sk);
3564 	struct tipc_sock *tsk;
3565 
3566 	if (!tsk_portid) {
3567 		struct nlattr **attrs;
3568 		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3569 
3570 		err = tipc_nlmsg_parse(cb->nlh, &attrs);
3571 		if (err)
3572 			return err;
3573 
3574 		if (!attrs[TIPC_NLA_SOCK])
3575 			return -EINVAL;
3576 
3577 		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3578 				       attrs[TIPC_NLA_SOCK],
3579 				       tipc_nl_sock_policy, NULL);
3580 		if (err)
3581 			return err;
3582 
3583 		if (!sock[TIPC_NLA_SOCK_REF])
3584 			return -EINVAL;
3585 
3586 		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3587 	}
3588 
3589 	if (done)
3590 		return 0;
3591 
3592 	tsk = tipc_sk_lookup(net, tsk_portid);
3593 	if (!tsk)
3594 		return -EINVAL;
3595 
3596 	lock_sock(&tsk->sk);
3597 	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3598 	if (!err)
3599 		done = 1;
3600 	release_sock(&tsk->sk);
3601 	sock_put(&tsk->sk);
3602 
3603 	cb->args[0] = tsk_portid;
3604 	cb->args[1] = last_publ;
3605 	cb->args[2] = done;
3606 
3607 	return skb->len;
3608 }
3609 
3610 /**
3611  * tipc_sk_filtering - check if a socket should be traced
3612  * @sk: the socket to be examined
3613  * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3614  *  (portid, sock type, name type, name lower, name upper)
3615  *
3616  * Returns true if the socket meets the socket tuple data
3617  * (value 0 = 'any') or when there is no tuple set (all = 0),
3618  * otherwise false
3619  */
3620 bool tipc_sk_filtering(struct sock *sk)
3621 {
3622 	struct tipc_sock *tsk;
3623 	struct publication *p;
3624 	u32 _port, _sktype, _type, _lower, _upper;
3625 	u32 type = 0, lower = 0, upper = 0;
3626 
3627 	if (!sk)
3628 		return true;
3629 
3630 	tsk = tipc_sk(sk);
3631 
3632 	_port = sysctl_tipc_sk_filter[0];
3633 	_sktype = sysctl_tipc_sk_filter[1];
3634 	_type = sysctl_tipc_sk_filter[2];
3635 	_lower = sysctl_tipc_sk_filter[3];
3636 	_upper = sysctl_tipc_sk_filter[4];
3637 
3638 	if (!_port && !_sktype && !_type && !_lower && !_upper)
3639 		return true;
3640 
3641 	if (_port)
3642 		return (_port == tsk->portid);
3643 
3644 	if (_sktype && _sktype != sk->sk_type)
3645 		return false;
3646 
3647 	if (tsk->published) {
3648 		p = list_first_entry_or_null(&tsk->publications,
3649 					     struct publication, binding_sock);
3650 		if (p) {
3651 			type = p->type;
3652 			lower = p->lower;
3653 			upper = p->upper;
3654 		}
3655 	}
3656 
3657 	if (!tipc_sk_type_connectionless(sk)) {
3658 		type = tsk->conn_type;
3659 		lower = tsk->conn_instance;
3660 		upper = tsk->conn_instance;
3661 	}
3662 
3663 	if ((_type && _type != type) || (_lower && _lower != lower) ||
3664 	    (_upper && _upper != upper))
3665 		return false;
3666 
3667 	return true;
3668 }
3669 
3670 u32 tipc_sock_get_portid(struct sock *sk)
3671 {
3672 	return (sk) ? (tipc_sk(sk))->portid : 0;
3673 }
3674 
3675 /**
3676  * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3677  *			both the rcv and backlog queues are considered
3678  * @sk: tipc sk to be checked
3679  * @skb: tipc msg to be checked
3680  *
3681  * Returns true if the socket rx queue allocation is > 90%, otherwise false
3682  */
3683 
3684 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3685 {
3686 	atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3687 	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3688 	unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3689 
3690 	return (qsize > lim * 90 / 100);
3691 }
3692 
3693 /**
3694  * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3695  *			only the rcv queue is considered
3696  * @sk: tipc sk to be checked
3697  * @skb: tipc msg to be checked
3698  *
3699  * Returns true if the socket rx queue allocation is > 90%, otherwise false
3700  */
3701 
3702 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3703 {
3704 	unsigned int lim = rcvbuf_limit(sk, skb);
3705 	unsigned int qsize = sk_rmem_alloc_get(sk);
3706 
3707 	return (qsize > lim * 90 / 100);
3708 }
3709 
3710 /**
3711  * tipc_sk_dump - dump TIPC socket
3712  * @sk: tipc sk to be dumped
3713  * @dqueues: bitmask to decide if any socket queue to be dumped?
3714  *           - TIPC_DUMP_NONE: don't dump socket queues
3715  *           - TIPC_DUMP_SK_SNDQ: dump socket send queue
3716  *           - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3717  *           - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3718  *           - TIPC_DUMP_ALL: dump all the socket queues above
3719  * @buf: returned buffer of dump data in format
3720  */
3721 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3722 {
3723 	int i = 0;
3724 	size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3725 	struct tipc_sock *tsk;
3726 	struct publication *p;
3727 	bool tsk_connected;
3728 
3729 	if (!sk) {
3730 		i += scnprintf(buf, sz, "sk data: (null)\n");
3731 		return i;
3732 	}
3733 
3734 	tsk = tipc_sk(sk);
3735 	tsk_connected = !tipc_sk_type_connectionless(sk);
3736 
3737 	i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3738 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3739 	i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3740 	i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3741 	i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3742 	if (tsk_connected) {
3743 		i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3744 		i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3745 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3746 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3747 	}
3748 	i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3749 	if (tsk->published) {
3750 		p = list_first_entry_or_null(&tsk->publications,
3751 					     struct publication, binding_sock);
3752 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3753 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3754 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3755 	}
3756 	i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3757 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3758 	i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3759 	i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3760 	i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3761 	i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3762 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3763 	i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3764 	i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3765 	i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3766 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3767 	i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3768 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3769 	i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
3770 
3771 	if (dqueues & TIPC_DUMP_SK_SNDQ) {
3772 		i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3773 		i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3774 	}
3775 
3776 	if (dqueues & TIPC_DUMP_SK_RCVQ) {
3777 		i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3778 		i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3779 	}
3780 
3781 	if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3782 		i += scnprintf(buf + i, sz - i, "sk_backlog:\n  head ");
3783 		i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3784 		if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3785 			i += scnprintf(buf + i, sz - i, "  tail ");
3786 			i += tipc_skb_dump(sk->sk_backlog.tail, false,
3787 					   buf + i);
3788 		}
3789 	}
3790 
3791 	return i;
3792 }
3793