xref: /openbmc/linux/net/netlink/af_netlink.c (revision 861e10be)
1 /*
2  * NETLINK      Kernel-user communication protocol.
3  *
4  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6  *
7  *		This program is free software; you can redistribute it and/or
8  *		modify it under the terms of the GNU General Public License
9  *		as published by the Free Software Foundation; either version
10  *		2 of the License, or (at your option) any later version.
11  *
12  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13  *                               added netlink_proto_exit
14  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
16  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17  * 				 - inc module use count of module that owns
18  * 				   the kernel socket in case userspace opens
19  * 				   socket of same protocol
20  * 				 - remove all module support, since netlink is
21  * 				   mandatory if CONFIG_NET=y these days
22  */
23 
24 #include <linux/module.h>
25 
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
35 #include <linux/un.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/fs.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
54 #include <linux/mm.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 
59 #include <net/net_namespace.h>
60 #include <net/sock.h>
61 #include <net/scm.h>
62 #include <net/netlink.h>
63 
64 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
66 
67 struct netlink_sock {
68 	/* struct sock has to be the first member of netlink_sock */
69 	struct sock		sk;
70 	u32			portid;
71 	u32			dst_portid;
72 	u32			dst_group;
73 	u32			flags;
74 	u32			subscriptions;
75 	u32			ngroups;
76 	unsigned long		*groups;
77 	unsigned long		state;
78 	wait_queue_head_t	wait;
79 	struct netlink_callback	*cb;
80 	struct mutex		*cb_mutex;
81 	struct mutex		cb_def_mutex;
82 	void			(*netlink_rcv)(struct sk_buff *skb);
83 	void			(*netlink_bind)(int group);
84 	struct module		*module;
85 };
86 
87 struct listeners {
88 	struct rcu_head		rcu;
89 	unsigned long		masks[0];
90 };
91 
92 #define NETLINK_KERNEL_SOCKET	0x1
93 #define NETLINK_RECV_PKTINFO	0x2
94 #define NETLINK_BROADCAST_SEND_ERROR	0x4
95 #define NETLINK_RECV_NO_ENOBUFS	0x8
96 
97 static inline struct netlink_sock *nlk_sk(struct sock *sk)
98 {
99 	return container_of(sk, struct netlink_sock, sk);
100 }
101 
102 static inline int netlink_is_kernel(struct sock *sk)
103 {
104 	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
105 }
106 
107 struct nl_portid_hash {
108 	struct hlist_head	*table;
109 	unsigned long		rehash_time;
110 
111 	unsigned int		mask;
112 	unsigned int		shift;
113 
114 	unsigned int		entries;
115 	unsigned int		max_shift;
116 
117 	u32			rnd;
118 };
119 
120 struct netlink_table {
121 	struct nl_portid_hash	hash;
122 	struct hlist_head	mc_list;
123 	struct listeners __rcu	*listeners;
124 	unsigned int		flags;
125 	unsigned int		groups;
126 	struct mutex		*cb_mutex;
127 	struct module		*module;
128 	void			(*bind)(int group);
129 	int			registered;
130 };
131 
132 static struct netlink_table *nl_table;
133 
134 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
135 
136 static int netlink_dump(struct sock *sk);
137 
138 static DEFINE_RWLOCK(nl_table_lock);
139 static atomic_t nl_table_users = ATOMIC_INIT(0);
140 
141 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
142 
143 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
144 
145 static inline u32 netlink_group_mask(u32 group)
146 {
147 	return group ? 1 << (group - 1) : 0;
148 }
149 
150 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
151 {
152 	return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
153 }
154 
155 static void netlink_destroy_callback(struct netlink_callback *cb)
156 {
157 	kfree_skb(cb->skb);
158 	kfree(cb);
159 }
160 
161 static void netlink_consume_callback(struct netlink_callback *cb)
162 {
163 	consume_skb(cb->skb);
164 	kfree(cb);
165 }
166 
167 static void netlink_sock_destruct(struct sock *sk)
168 {
169 	struct netlink_sock *nlk = nlk_sk(sk);
170 
171 	if (nlk->cb) {
172 		if (nlk->cb->done)
173 			nlk->cb->done(nlk->cb);
174 
175 		module_put(nlk->cb->module);
176 		netlink_destroy_callback(nlk->cb);
177 	}
178 
179 	skb_queue_purge(&sk->sk_receive_queue);
180 
181 	if (!sock_flag(sk, SOCK_DEAD)) {
182 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
183 		return;
184 	}
185 
186 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
187 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
188 	WARN_ON(nlk_sk(sk)->groups);
189 }
190 
191 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
192  * SMP. Look, when several writers sleep and reader wakes them up, all but one
193  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
194  * this, _but_ remember, it adds useless work on UP machines.
195  */
196 
197 void netlink_table_grab(void)
198 	__acquires(nl_table_lock)
199 {
200 	might_sleep();
201 
202 	write_lock_irq(&nl_table_lock);
203 
204 	if (atomic_read(&nl_table_users)) {
205 		DECLARE_WAITQUEUE(wait, current);
206 
207 		add_wait_queue_exclusive(&nl_table_wait, &wait);
208 		for (;;) {
209 			set_current_state(TASK_UNINTERRUPTIBLE);
210 			if (atomic_read(&nl_table_users) == 0)
211 				break;
212 			write_unlock_irq(&nl_table_lock);
213 			schedule();
214 			write_lock_irq(&nl_table_lock);
215 		}
216 
217 		__set_current_state(TASK_RUNNING);
218 		remove_wait_queue(&nl_table_wait, &wait);
219 	}
220 }
221 
222 void netlink_table_ungrab(void)
223 	__releases(nl_table_lock)
224 {
225 	write_unlock_irq(&nl_table_lock);
226 	wake_up(&nl_table_wait);
227 }
228 
229 static inline void
230 netlink_lock_table(void)
231 {
232 	/* read_lock() synchronizes us to netlink_table_grab */
233 
234 	read_lock(&nl_table_lock);
235 	atomic_inc(&nl_table_users);
236 	read_unlock(&nl_table_lock);
237 }
238 
239 static inline void
240 netlink_unlock_table(void)
241 {
242 	if (atomic_dec_and_test(&nl_table_users))
243 		wake_up(&nl_table_wait);
244 }
245 
246 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
247 {
248 	struct nl_portid_hash *hash = &nl_table[protocol].hash;
249 	struct hlist_head *head;
250 	struct sock *sk;
251 	struct hlist_node *node;
252 
253 	read_lock(&nl_table_lock);
254 	head = nl_portid_hashfn(hash, portid);
255 	sk_for_each(sk, node, head) {
256 		if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
257 			sock_hold(sk);
258 			goto found;
259 		}
260 	}
261 	sk = NULL;
262 found:
263 	read_unlock(&nl_table_lock);
264 	return sk;
265 }
266 
267 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
268 {
269 	if (size <= PAGE_SIZE)
270 		return kzalloc(size, GFP_ATOMIC);
271 	else
272 		return (struct hlist_head *)
273 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
274 					 get_order(size));
275 }
276 
277 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
278 {
279 	if (size <= PAGE_SIZE)
280 		kfree(table);
281 	else
282 		free_pages((unsigned long)table, get_order(size));
283 }
284 
285 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
286 {
287 	unsigned int omask, mask, shift;
288 	size_t osize, size;
289 	struct hlist_head *otable, *table;
290 	int i;
291 
292 	omask = mask = hash->mask;
293 	osize = size = (mask + 1) * sizeof(*table);
294 	shift = hash->shift;
295 
296 	if (grow) {
297 		if (++shift > hash->max_shift)
298 			return 0;
299 		mask = mask * 2 + 1;
300 		size *= 2;
301 	}
302 
303 	table = nl_portid_hash_zalloc(size);
304 	if (!table)
305 		return 0;
306 
307 	otable = hash->table;
308 	hash->table = table;
309 	hash->mask = mask;
310 	hash->shift = shift;
311 	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
312 
313 	for (i = 0; i <= omask; i++) {
314 		struct sock *sk;
315 		struct hlist_node *node, *tmp;
316 
317 		sk_for_each_safe(sk, node, tmp, &otable[i])
318 			__sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
319 	}
320 
321 	nl_portid_hash_free(otable, osize);
322 	hash->rehash_time = jiffies + 10 * 60 * HZ;
323 	return 1;
324 }
325 
326 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
327 {
328 	int avg = hash->entries >> hash->shift;
329 
330 	if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
331 		return 1;
332 
333 	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
334 		nl_portid_hash_rehash(hash, 0);
335 		return 1;
336 	}
337 
338 	return 0;
339 }
340 
341 static const struct proto_ops netlink_ops;
342 
343 static void
344 netlink_update_listeners(struct sock *sk)
345 {
346 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
347 	struct hlist_node *node;
348 	unsigned long mask;
349 	unsigned int i;
350 	struct listeners *listeners;
351 
352 	listeners = nl_deref_protected(tbl->listeners);
353 	if (!listeners)
354 		return;
355 
356 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
357 		mask = 0;
358 		sk_for_each_bound(sk, node, &tbl->mc_list) {
359 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
360 				mask |= nlk_sk(sk)->groups[i];
361 		}
362 		listeners->masks[i] = mask;
363 	}
364 	/* this function is only called with the netlink table "grabbed", which
365 	 * makes sure updates are visible before bind or setsockopt return. */
366 }
367 
368 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
369 {
370 	struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
371 	struct hlist_head *head;
372 	int err = -EADDRINUSE;
373 	struct sock *osk;
374 	struct hlist_node *node;
375 	int len;
376 
377 	netlink_table_grab();
378 	head = nl_portid_hashfn(hash, portid);
379 	len = 0;
380 	sk_for_each(osk, node, head) {
381 		if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
382 			break;
383 		len++;
384 	}
385 	if (node)
386 		goto err;
387 
388 	err = -EBUSY;
389 	if (nlk_sk(sk)->portid)
390 		goto err;
391 
392 	err = -ENOMEM;
393 	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
394 		goto err;
395 
396 	if (len && nl_portid_hash_dilute(hash, len))
397 		head = nl_portid_hashfn(hash, portid);
398 	hash->entries++;
399 	nlk_sk(sk)->portid = portid;
400 	sk_add_node(sk, head);
401 	err = 0;
402 
403 err:
404 	netlink_table_ungrab();
405 	return err;
406 }
407 
408 static void netlink_remove(struct sock *sk)
409 {
410 	netlink_table_grab();
411 	if (sk_del_node_init(sk))
412 		nl_table[sk->sk_protocol].hash.entries--;
413 	if (nlk_sk(sk)->subscriptions)
414 		__sk_del_bind_node(sk);
415 	netlink_table_ungrab();
416 }
417 
418 static struct proto netlink_proto = {
419 	.name	  = "NETLINK",
420 	.owner	  = THIS_MODULE,
421 	.obj_size = sizeof(struct netlink_sock),
422 };
423 
424 static int __netlink_create(struct net *net, struct socket *sock,
425 			    struct mutex *cb_mutex, int protocol)
426 {
427 	struct sock *sk;
428 	struct netlink_sock *nlk;
429 
430 	sock->ops = &netlink_ops;
431 
432 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
433 	if (!sk)
434 		return -ENOMEM;
435 
436 	sock_init_data(sock, sk);
437 
438 	nlk = nlk_sk(sk);
439 	if (cb_mutex) {
440 		nlk->cb_mutex = cb_mutex;
441 	} else {
442 		nlk->cb_mutex = &nlk->cb_def_mutex;
443 		mutex_init(nlk->cb_mutex);
444 	}
445 	init_waitqueue_head(&nlk->wait);
446 
447 	sk->sk_destruct = netlink_sock_destruct;
448 	sk->sk_protocol = protocol;
449 	return 0;
450 }
451 
452 static int netlink_create(struct net *net, struct socket *sock, int protocol,
453 			  int kern)
454 {
455 	struct module *module = NULL;
456 	struct mutex *cb_mutex;
457 	struct netlink_sock *nlk;
458 	void (*bind)(int group);
459 	int err = 0;
460 
461 	sock->state = SS_UNCONNECTED;
462 
463 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
464 		return -ESOCKTNOSUPPORT;
465 
466 	if (protocol < 0 || protocol >= MAX_LINKS)
467 		return -EPROTONOSUPPORT;
468 
469 	netlink_lock_table();
470 #ifdef CONFIG_MODULES
471 	if (!nl_table[protocol].registered) {
472 		netlink_unlock_table();
473 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
474 		netlink_lock_table();
475 	}
476 #endif
477 	if (nl_table[protocol].registered &&
478 	    try_module_get(nl_table[protocol].module))
479 		module = nl_table[protocol].module;
480 	else
481 		err = -EPROTONOSUPPORT;
482 	cb_mutex = nl_table[protocol].cb_mutex;
483 	bind = nl_table[protocol].bind;
484 	netlink_unlock_table();
485 
486 	if (err < 0)
487 		goto out;
488 
489 	err = __netlink_create(net, sock, cb_mutex, protocol);
490 	if (err < 0)
491 		goto out_module;
492 
493 	local_bh_disable();
494 	sock_prot_inuse_add(net, &netlink_proto, 1);
495 	local_bh_enable();
496 
497 	nlk = nlk_sk(sock->sk);
498 	nlk->module = module;
499 	nlk->netlink_bind = bind;
500 out:
501 	return err;
502 
503 out_module:
504 	module_put(module);
505 	goto out;
506 }
507 
508 static int netlink_release(struct socket *sock)
509 {
510 	struct sock *sk = sock->sk;
511 	struct netlink_sock *nlk;
512 
513 	if (!sk)
514 		return 0;
515 
516 	netlink_remove(sk);
517 	sock_orphan(sk);
518 	nlk = nlk_sk(sk);
519 
520 	/*
521 	 * OK. Socket is unlinked, any packets that arrive now
522 	 * will be purged.
523 	 */
524 
525 	sock->sk = NULL;
526 	wake_up_interruptible_all(&nlk->wait);
527 
528 	skb_queue_purge(&sk->sk_write_queue);
529 
530 	if (nlk->portid) {
531 		struct netlink_notify n = {
532 						.net = sock_net(sk),
533 						.protocol = sk->sk_protocol,
534 						.portid = nlk->portid,
535 					  };
536 		atomic_notifier_call_chain(&netlink_chain,
537 				NETLINK_URELEASE, &n);
538 	}
539 
540 	module_put(nlk->module);
541 
542 	netlink_table_grab();
543 	if (netlink_is_kernel(sk)) {
544 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
545 		if (--nl_table[sk->sk_protocol].registered == 0) {
546 			struct listeners *old;
547 
548 			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
549 			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
550 			kfree_rcu(old, rcu);
551 			nl_table[sk->sk_protocol].module = NULL;
552 			nl_table[sk->sk_protocol].bind = NULL;
553 			nl_table[sk->sk_protocol].flags = 0;
554 			nl_table[sk->sk_protocol].registered = 0;
555 		}
556 	} else if (nlk->subscriptions) {
557 		netlink_update_listeners(sk);
558 	}
559 	netlink_table_ungrab();
560 
561 	kfree(nlk->groups);
562 	nlk->groups = NULL;
563 
564 	local_bh_disable();
565 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
566 	local_bh_enable();
567 	sock_put(sk);
568 	return 0;
569 }
570 
571 static int netlink_autobind(struct socket *sock)
572 {
573 	struct sock *sk = sock->sk;
574 	struct net *net = sock_net(sk);
575 	struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
576 	struct hlist_head *head;
577 	struct sock *osk;
578 	struct hlist_node *node;
579 	s32 portid = task_tgid_vnr(current);
580 	int err;
581 	static s32 rover = -4097;
582 
583 retry:
584 	cond_resched();
585 	netlink_table_grab();
586 	head = nl_portid_hashfn(hash, portid);
587 	sk_for_each(osk, node, head) {
588 		if (!net_eq(sock_net(osk), net))
589 			continue;
590 		if (nlk_sk(osk)->portid == portid) {
591 			/* Bind collision, search negative portid values. */
592 			portid = rover--;
593 			if (rover > -4097)
594 				rover = -4097;
595 			netlink_table_ungrab();
596 			goto retry;
597 		}
598 	}
599 	netlink_table_ungrab();
600 
601 	err = netlink_insert(sk, net, portid);
602 	if (err == -EADDRINUSE)
603 		goto retry;
604 
605 	/* If 2 threads race to autobind, that is fine.  */
606 	if (err == -EBUSY)
607 		err = 0;
608 
609 	return err;
610 }
611 
612 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
613 {
614 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
615 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
616 }
617 
618 static void
619 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
620 {
621 	struct netlink_sock *nlk = nlk_sk(sk);
622 
623 	if (nlk->subscriptions && !subscriptions)
624 		__sk_del_bind_node(sk);
625 	else if (!nlk->subscriptions && subscriptions)
626 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
627 	nlk->subscriptions = subscriptions;
628 }
629 
630 static int netlink_realloc_groups(struct sock *sk)
631 {
632 	struct netlink_sock *nlk = nlk_sk(sk);
633 	unsigned int groups;
634 	unsigned long *new_groups;
635 	int err = 0;
636 
637 	netlink_table_grab();
638 
639 	groups = nl_table[sk->sk_protocol].groups;
640 	if (!nl_table[sk->sk_protocol].registered) {
641 		err = -ENOENT;
642 		goto out_unlock;
643 	}
644 
645 	if (nlk->ngroups >= groups)
646 		goto out_unlock;
647 
648 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
649 	if (new_groups == NULL) {
650 		err = -ENOMEM;
651 		goto out_unlock;
652 	}
653 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
654 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
655 
656 	nlk->groups = new_groups;
657 	nlk->ngroups = groups;
658  out_unlock:
659 	netlink_table_ungrab();
660 	return err;
661 }
662 
663 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
664 			int addr_len)
665 {
666 	struct sock *sk = sock->sk;
667 	struct net *net = sock_net(sk);
668 	struct netlink_sock *nlk = nlk_sk(sk);
669 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
670 	int err;
671 
672 	if (addr_len < sizeof(struct sockaddr_nl))
673 		return -EINVAL;
674 
675 	if (nladdr->nl_family != AF_NETLINK)
676 		return -EINVAL;
677 
678 	/* Only superuser is allowed to listen multicasts */
679 	if (nladdr->nl_groups) {
680 		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
681 			return -EPERM;
682 		err = netlink_realloc_groups(sk);
683 		if (err)
684 			return err;
685 	}
686 
687 	if (nlk->portid) {
688 		if (nladdr->nl_pid != nlk->portid)
689 			return -EINVAL;
690 	} else {
691 		err = nladdr->nl_pid ?
692 			netlink_insert(sk, net, nladdr->nl_pid) :
693 			netlink_autobind(sock);
694 		if (err)
695 			return err;
696 	}
697 
698 	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
699 		return 0;
700 
701 	netlink_table_grab();
702 	netlink_update_subscriptions(sk, nlk->subscriptions +
703 					 hweight32(nladdr->nl_groups) -
704 					 hweight32(nlk->groups[0]));
705 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
706 	netlink_update_listeners(sk);
707 	netlink_table_ungrab();
708 
709 	if (nlk->netlink_bind && nlk->groups[0]) {
710 		int i;
711 
712 		for (i=0; i<nlk->ngroups; i++) {
713 			if (test_bit(i, nlk->groups))
714 				nlk->netlink_bind(i);
715 		}
716 	}
717 
718 	return 0;
719 }
720 
721 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
722 			   int alen, int flags)
723 {
724 	int err = 0;
725 	struct sock *sk = sock->sk;
726 	struct netlink_sock *nlk = nlk_sk(sk);
727 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
728 
729 	if (alen < sizeof(addr->sa_family))
730 		return -EINVAL;
731 
732 	if (addr->sa_family == AF_UNSPEC) {
733 		sk->sk_state	= NETLINK_UNCONNECTED;
734 		nlk->dst_portid	= 0;
735 		nlk->dst_group  = 0;
736 		return 0;
737 	}
738 	if (addr->sa_family != AF_NETLINK)
739 		return -EINVAL;
740 
741 	/* Only superuser is allowed to send multicasts */
742 	if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
743 		return -EPERM;
744 
745 	if (!nlk->portid)
746 		err = netlink_autobind(sock);
747 
748 	if (err == 0) {
749 		sk->sk_state	= NETLINK_CONNECTED;
750 		nlk->dst_portid = nladdr->nl_pid;
751 		nlk->dst_group  = ffs(nladdr->nl_groups);
752 	}
753 
754 	return err;
755 }
756 
757 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
758 			   int *addr_len, int peer)
759 {
760 	struct sock *sk = sock->sk;
761 	struct netlink_sock *nlk = nlk_sk(sk);
762 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
763 
764 	nladdr->nl_family = AF_NETLINK;
765 	nladdr->nl_pad = 0;
766 	*addr_len = sizeof(*nladdr);
767 
768 	if (peer) {
769 		nladdr->nl_pid = nlk->dst_portid;
770 		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
771 	} else {
772 		nladdr->nl_pid = nlk->portid;
773 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
774 	}
775 	return 0;
776 }
777 
778 static void netlink_overrun(struct sock *sk)
779 {
780 	struct netlink_sock *nlk = nlk_sk(sk);
781 
782 	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
783 		if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
784 			sk->sk_err = ENOBUFS;
785 			sk->sk_error_report(sk);
786 		}
787 	}
788 	atomic_inc(&sk->sk_drops);
789 }
790 
791 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
792 {
793 	struct sock *sock;
794 	struct netlink_sock *nlk;
795 
796 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
797 	if (!sock)
798 		return ERR_PTR(-ECONNREFUSED);
799 
800 	/* Don't bother queuing skb if kernel socket has no input function */
801 	nlk = nlk_sk(sock);
802 	if (sock->sk_state == NETLINK_CONNECTED &&
803 	    nlk->dst_portid != nlk_sk(ssk)->portid) {
804 		sock_put(sock);
805 		return ERR_PTR(-ECONNREFUSED);
806 	}
807 	return sock;
808 }
809 
810 struct sock *netlink_getsockbyfilp(struct file *filp)
811 {
812 	struct inode *inode = filp->f_path.dentry->d_inode;
813 	struct sock *sock;
814 
815 	if (!S_ISSOCK(inode->i_mode))
816 		return ERR_PTR(-ENOTSOCK);
817 
818 	sock = SOCKET_I(inode)->sk;
819 	if (sock->sk_family != AF_NETLINK)
820 		return ERR_PTR(-EINVAL);
821 
822 	sock_hold(sock);
823 	return sock;
824 }
825 
826 /*
827  * Attach a skb to a netlink socket.
828  * The caller must hold a reference to the destination socket. On error, the
829  * reference is dropped. The skb is not send to the destination, just all
830  * all error checks are performed and memory in the queue is reserved.
831  * Return values:
832  * < 0: error. skb freed, reference to sock dropped.
833  * 0: continue
834  * 1: repeat lookup - reference dropped while waiting for socket memory.
835  */
836 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
837 		      long *timeo, struct sock *ssk)
838 {
839 	struct netlink_sock *nlk;
840 
841 	nlk = nlk_sk(sk);
842 
843 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
844 	    test_bit(0, &nlk->state)) {
845 		DECLARE_WAITQUEUE(wait, current);
846 		if (!*timeo) {
847 			if (!ssk || netlink_is_kernel(ssk))
848 				netlink_overrun(sk);
849 			sock_put(sk);
850 			kfree_skb(skb);
851 			return -EAGAIN;
852 		}
853 
854 		__set_current_state(TASK_INTERRUPTIBLE);
855 		add_wait_queue(&nlk->wait, &wait);
856 
857 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
858 		     test_bit(0, &nlk->state)) &&
859 		    !sock_flag(sk, SOCK_DEAD))
860 			*timeo = schedule_timeout(*timeo);
861 
862 		__set_current_state(TASK_RUNNING);
863 		remove_wait_queue(&nlk->wait, &wait);
864 		sock_put(sk);
865 
866 		if (signal_pending(current)) {
867 			kfree_skb(skb);
868 			return sock_intr_errno(*timeo);
869 		}
870 		return 1;
871 	}
872 	skb_set_owner_r(skb, sk);
873 	return 0;
874 }
875 
876 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
877 {
878 	int len = skb->len;
879 
880 	skb_queue_tail(&sk->sk_receive_queue, skb);
881 	sk->sk_data_ready(sk, len);
882 	return len;
883 }
884 
885 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
886 {
887 	int len = __netlink_sendskb(sk, skb);
888 
889 	sock_put(sk);
890 	return len;
891 }
892 
893 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
894 {
895 	kfree_skb(skb);
896 	sock_put(sk);
897 }
898 
899 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
900 {
901 	int delta;
902 
903 	skb_orphan(skb);
904 
905 	delta = skb->end - skb->tail;
906 	if (delta * 2 < skb->truesize)
907 		return skb;
908 
909 	if (skb_shared(skb)) {
910 		struct sk_buff *nskb = skb_clone(skb, allocation);
911 		if (!nskb)
912 			return skb;
913 		consume_skb(skb);
914 		skb = nskb;
915 	}
916 
917 	if (!pskb_expand_head(skb, 0, -delta, allocation))
918 		skb->truesize -= delta;
919 
920 	return skb;
921 }
922 
923 static void netlink_rcv_wake(struct sock *sk)
924 {
925 	struct netlink_sock *nlk = nlk_sk(sk);
926 
927 	if (skb_queue_empty(&sk->sk_receive_queue))
928 		clear_bit(0, &nlk->state);
929 	if (!test_bit(0, &nlk->state))
930 		wake_up_interruptible(&nlk->wait);
931 }
932 
933 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
934 				  struct sock *ssk)
935 {
936 	int ret;
937 	struct netlink_sock *nlk = nlk_sk(sk);
938 
939 	ret = -ECONNREFUSED;
940 	if (nlk->netlink_rcv != NULL) {
941 		ret = skb->len;
942 		skb_set_owner_r(skb, sk);
943 		NETLINK_CB(skb).ssk = ssk;
944 		nlk->netlink_rcv(skb);
945 		consume_skb(skb);
946 	} else {
947 		kfree_skb(skb);
948 	}
949 	sock_put(sk);
950 	return ret;
951 }
952 
953 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
954 		    u32 portid, int nonblock)
955 {
956 	struct sock *sk;
957 	int err;
958 	long timeo;
959 
960 	skb = netlink_trim(skb, gfp_any());
961 
962 	timeo = sock_sndtimeo(ssk, nonblock);
963 retry:
964 	sk = netlink_getsockbyportid(ssk, portid);
965 	if (IS_ERR(sk)) {
966 		kfree_skb(skb);
967 		return PTR_ERR(sk);
968 	}
969 	if (netlink_is_kernel(sk))
970 		return netlink_unicast_kernel(sk, skb, ssk);
971 
972 	if (sk_filter(sk, skb)) {
973 		err = skb->len;
974 		kfree_skb(skb);
975 		sock_put(sk);
976 		return err;
977 	}
978 
979 	err = netlink_attachskb(sk, skb, &timeo, ssk);
980 	if (err == 1)
981 		goto retry;
982 	if (err)
983 		return err;
984 
985 	return netlink_sendskb(sk, skb);
986 }
987 EXPORT_SYMBOL(netlink_unicast);
988 
989 int netlink_has_listeners(struct sock *sk, unsigned int group)
990 {
991 	int res = 0;
992 	struct listeners *listeners;
993 
994 	BUG_ON(!netlink_is_kernel(sk));
995 
996 	rcu_read_lock();
997 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
998 
999 	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1000 		res = test_bit(group - 1, listeners->masks);
1001 
1002 	rcu_read_unlock();
1003 
1004 	return res;
1005 }
1006 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1007 
1008 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1009 {
1010 	struct netlink_sock *nlk = nlk_sk(sk);
1011 
1012 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1013 	    !test_bit(0, &nlk->state)) {
1014 		skb_set_owner_r(skb, sk);
1015 		__netlink_sendskb(sk, skb);
1016 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1017 	}
1018 	return -1;
1019 }
1020 
1021 struct netlink_broadcast_data {
1022 	struct sock *exclude_sk;
1023 	struct net *net;
1024 	u32 portid;
1025 	u32 group;
1026 	int failure;
1027 	int delivery_failure;
1028 	int congested;
1029 	int delivered;
1030 	gfp_t allocation;
1031 	struct sk_buff *skb, *skb2;
1032 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1033 	void *tx_data;
1034 };
1035 
1036 static int do_one_broadcast(struct sock *sk,
1037 				   struct netlink_broadcast_data *p)
1038 {
1039 	struct netlink_sock *nlk = nlk_sk(sk);
1040 	int val;
1041 
1042 	if (p->exclude_sk == sk)
1043 		goto out;
1044 
1045 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1046 	    !test_bit(p->group - 1, nlk->groups))
1047 		goto out;
1048 
1049 	if (!net_eq(sock_net(sk), p->net))
1050 		goto out;
1051 
1052 	if (p->failure) {
1053 		netlink_overrun(sk);
1054 		goto out;
1055 	}
1056 
1057 	sock_hold(sk);
1058 	if (p->skb2 == NULL) {
1059 		if (skb_shared(p->skb)) {
1060 			p->skb2 = skb_clone(p->skb, p->allocation);
1061 		} else {
1062 			p->skb2 = skb_get(p->skb);
1063 			/*
1064 			 * skb ownership may have been set when
1065 			 * delivered to a previous socket.
1066 			 */
1067 			skb_orphan(p->skb2);
1068 		}
1069 	}
1070 	if (p->skb2 == NULL) {
1071 		netlink_overrun(sk);
1072 		/* Clone failed. Notify ALL listeners. */
1073 		p->failure = 1;
1074 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1075 			p->delivery_failure = 1;
1076 	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1077 		kfree_skb(p->skb2);
1078 		p->skb2 = NULL;
1079 	} else if (sk_filter(sk, p->skb2)) {
1080 		kfree_skb(p->skb2);
1081 		p->skb2 = NULL;
1082 	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1083 		netlink_overrun(sk);
1084 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1085 			p->delivery_failure = 1;
1086 	} else {
1087 		p->congested |= val;
1088 		p->delivered = 1;
1089 		p->skb2 = NULL;
1090 	}
1091 	sock_put(sk);
1092 
1093 out:
1094 	return 0;
1095 }
1096 
1097 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1098 	u32 group, gfp_t allocation,
1099 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1100 	void *filter_data)
1101 {
1102 	struct net *net = sock_net(ssk);
1103 	struct netlink_broadcast_data info;
1104 	struct hlist_node *node;
1105 	struct sock *sk;
1106 
1107 	skb = netlink_trim(skb, allocation);
1108 
1109 	info.exclude_sk = ssk;
1110 	info.net = net;
1111 	info.portid = portid;
1112 	info.group = group;
1113 	info.failure = 0;
1114 	info.delivery_failure = 0;
1115 	info.congested = 0;
1116 	info.delivered = 0;
1117 	info.allocation = allocation;
1118 	info.skb = skb;
1119 	info.skb2 = NULL;
1120 	info.tx_filter = filter;
1121 	info.tx_data = filter_data;
1122 
1123 	/* While we sleep in clone, do not allow to change socket list */
1124 
1125 	netlink_lock_table();
1126 
1127 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1128 		do_one_broadcast(sk, &info);
1129 
1130 	consume_skb(skb);
1131 
1132 	netlink_unlock_table();
1133 
1134 	if (info.delivery_failure) {
1135 		kfree_skb(info.skb2);
1136 		return -ENOBUFS;
1137 	}
1138 	consume_skb(info.skb2);
1139 
1140 	if (info.delivered) {
1141 		if (info.congested && (allocation & __GFP_WAIT))
1142 			yield();
1143 		return 0;
1144 	}
1145 	return -ESRCH;
1146 }
1147 EXPORT_SYMBOL(netlink_broadcast_filtered);
1148 
1149 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1150 		      u32 group, gfp_t allocation)
1151 {
1152 	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1153 		NULL, NULL);
1154 }
1155 EXPORT_SYMBOL(netlink_broadcast);
1156 
1157 struct netlink_set_err_data {
1158 	struct sock *exclude_sk;
1159 	u32 portid;
1160 	u32 group;
1161 	int code;
1162 };
1163 
1164 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1165 {
1166 	struct netlink_sock *nlk = nlk_sk(sk);
1167 	int ret = 0;
1168 
1169 	if (sk == p->exclude_sk)
1170 		goto out;
1171 
1172 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1173 		goto out;
1174 
1175 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1176 	    !test_bit(p->group - 1, nlk->groups))
1177 		goto out;
1178 
1179 	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1180 		ret = 1;
1181 		goto out;
1182 	}
1183 
1184 	sk->sk_err = p->code;
1185 	sk->sk_error_report(sk);
1186 out:
1187 	return ret;
1188 }
1189 
1190 /**
1191  * netlink_set_err - report error to broadcast listeners
1192  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1193  * @portid: the PORTID of a process that we want to skip (if any)
1194  * @groups: the broadcast group that will notice the error
1195  * @code: error code, must be negative (as usual in kernelspace)
1196  *
1197  * This function returns the number of broadcast listeners that have set the
1198  * NETLINK_RECV_NO_ENOBUFS socket option.
1199  */
1200 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1201 {
1202 	struct netlink_set_err_data info;
1203 	struct hlist_node *node;
1204 	struct sock *sk;
1205 	int ret = 0;
1206 
1207 	info.exclude_sk = ssk;
1208 	info.portid = portid;
1209 	info.group = group;
1210 	/* sk->sk_err wants a positive error value */
1211 	info.code = -code;
1212 
1213 	read_lock(&nl_table_lock);
1214 
1215 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1216 		ret += do_one_set_err(sk, &info);
1217 
1218 	read_unlock(&nl_table_lock);
1219 	return ret;
1220 }
1221 EXPORT_SYMBOL(netlink_set_err);
1222 
1223 /* must be called with netlink table grabbed */
1224 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1225 				     unsigned int group,
1226 				     int is_new)
1227 {
1228 	int old, new = !!is_new, subscriptions;
1229 
1230 	old = test_bit(group - 1, nlk->groups);
1231 	subscriptions = nlk->subscriptions - old + new;
1232 	if (new)
1233 		__set_bit(group - 1, nlk->groups);
1234 	else
1235 		__clear_bit(group - 1, nlk->groups);
1236 	netlink_update_subscriptions(&nlk->sk, subscriptions);
1237 	netlink_update_listeners(&nlk->sk);
1238 }
1239 
1240 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1241 			      char __user *optval, unsigned int optlen)
1242 {
1243 	struct sock *sk = sock->sk;
1244 	struct netlink_sock *nlk = nlk_sk(sk);
1245 	unsigned int val = 0;
1246 	int err;
1247 
1248 	if (level != SOL_NETLINK)
1249 		return -ENOPROTOOPT;
1250 
1251 	if (optlen >= sizeof(int) &&
1252 	    get_user(val, (unsigned int __user *)optval))
1253 		return -EFAULT;
1254 
1255 	switch (optname) {
1256 	case NETLINK_PKTINFO:
1257 		if (val)
1258 			nlk->flags |= NETLINK_RECV_PKTINFO;
1259 		else
1260 			nlk->flags &= ~NETLINK_RECV_PKTINFO;
1261 		err = 0;
1262 		break;
1263 	case NETLINK_ADD_MEMBERSHIP:
1264 	case NETLINK_DROP_MEMBERSHIP: {
1265 		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1266 			return -EPERM;
1267 		err = netlink_realloc_groups(sk);
1268 		if (err)
1269 			return err;
1270 		if (!val || val - 1 >= nlk->ngroups)
1271 			return -EINVAL;
1272 		netlink_table_grab();
1273 		netlink_update_socket_mc(nlk, val,
1274 					 optname == NETLINK_ADD_MEMBERSHIP);
1275 		netlink_table_ungrab();
1276 
1277 		if (nlk->netlink_bind)
1278 			nlk->netlink_bind(val);
1279 
1280 		err = 0;
1281 		break;
1282 	}
1283 	case NETLINK_BROADCAST_ERROR:
1284 		if (val)
1285 			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1286 		else
1287 			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1288 		err = 0;
1289 		break;
1290 	case NETLINK_NO_ENOBUFS:
1291 		if (val) {
1292 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1293 			clear_bit(0, &nlk->state);
1294 			wake_up_interruptible(&nlk->wait);
1295 		} else {
1296 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1297 		}
1298 		err = 0;
1299 		break;
1300 	default:
1301 		err = -ENOPROTOOPT;
1302 	}
1303 	return err;
1304 }
1305 
1306 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1307 			      char __user *optval, int __user *optlen)
1308 {
1309 	struct sock *sk = sock->sk;
1310 	struct netlink_sock *nlk = nlk_sk(sk);
1311 	int len, val, err;
1312 
1313 	if (level != SOL_NETLINK)
1314 		return -ENOPROTOOPT;
1315 
1316 	if (get_user(len, optlen))
1317 		return -EFAULT;
1318 	if (len < 0)
1319 		return -EINVAL;
1320 
1321 	switch (optname) {
1322 	case NETLINK_PKTINFO:
1323 		if (len < sizeof(int))
1324 			return -EINVAL;
1325 		len = sizeof(int);
1326 		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1327 		if (put_user(len, optlen) ||
1328 		    put_user(val, optval))
1329 			return -EFAULT;
1330 		err = 0;
1331 		break;
1332 	case NETLINK_BROADCAST_ERROR:
1333 		if (len < sizeof(int))
1334 			return -EINVAL;
1335 		len = sizeof(int);
1336 		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1337 		if (put_user(len, optlen) ||
1338 		    put_user(val, optval))
1339 			return -EFAULT;
1340 		err = 0;
1341 		break;
1342 	case NETLINK_NO_ENOBUFS:
1343 		if (len < sizeof(int))
1344 			return -EINVAL;
1345 		len = sizeof(int);
1346 		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1347 		if (put_user(len, optlen) ||
1348 		    put_user(val, optval))
1349 			return -EFAULT;
1350 		err = 0;
1351 		break;
1352 	default:
1353 		err = -ENOPROTOOPT;
1354 	}
1355 	return err;
1356 }
1357 
1358 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1359 {
1360 	struct nl_pktinfo info;
1361 
1362 	info.group = NETLINK_CB(skb).dst_group;
1363 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1364 }
1365 
1366 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1367 			   struct msghdr *msg, size_t len)
1368 {
1369 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1370 	struct sock *sk = sock->sk;
1371 	struct netlink_sock *nlk = nlk_sk(sk);
1372 	struct sockaddr_nl *addr = msg->msg_name;
1373 	u32 dst_portid;
1374 	u32 dst_group;
1375 	struct sk_buff *skb;
1376 	int err;
1377 	struct scm_cookie scm;
1378 
1379 	if (msg->msg_flags&MSG_OOB)
1380 		return -EOPNOTSUPP;
1381 
1382 	if (NULL == siocb->scm)
1383 		siocb->scm = &scm;
1384 
1385 	err = scm_send(sock, msg, siocb->scm, true);
1386 	if (err < 0)
1387 		return err;
1388 
1389 	if (msg->msg_namelen) {
1390 		err = -EINVAL;
1391 		if (addr->nl_family != AF_NETLINK)
1392 			goto out;
1393 		dst_portid = addr->nl_pid;
1394 		dst_group = ffs(addr->nl_groups);
1395 		err =  -EPERM;
1396 		if ((dst_group || dst_portid) &&
1397 		    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1398 			goto out;
1399 	} else {
1400 		dst_portid = nlk->dst_portid;
1401 		dst_group = nlk->dst_group;
1402 	}
1403 
1404 	if (!nlk->portid) {
1405 		err = netlink_autobind(sock);
1406 		if (err)
1407 			goto out;
1408 	}
1409 
1410 	err = -EMSGSIZE;
1411 	if (len > sk->sk_sndbuf - 32)
1412 		goto out;
1413 	err = -ENOBUFS;
1414 	skb = alloc_skb(len, GFP_KERNEL);
1415 	if (skb == NULL)
1416 		goto out;
1417 
1418 	NETLINK_CB(skb).portid	= nlk->portid;
1419 	NETLINK_CB(skb).dst_group = dst_group;
1420 	NETLINK_CB(skb).creds	= siocb->scm->creds;
1421 
1422 	err = -EFAULT;
1423 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1424 		kfree_skb(skb);
1425 		goto out;
1426 	}
1427 
1428 	err = security_netlink_send(sk, skb);
1429 	if (err) {
1430 		kfree_skb(skb);
1431 		goto out;
1432 	}
1433 
1434 	if (dst_group) {
1435 		atomic_inc(&skb->users);
1436 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1437 	}
1438 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1439 
1440 out:
1441 	scm_destroy(siocb->scm);
1442 	return err;
1443 }
1444 
1445 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1446 			   struct msghdr *msg, size_t len,
1447 			   int flags)
1448 {
1449 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1450 	struct scm_cookie scm;
1451 	struct sock *sk = sock->sk;
1452 	struct netlink_sock *nlk = nlk_sk(sk);
1453 	int noblock = flags&MSG_DONTWAIT;
1454 	size_t copied;
1455 	struct sk_buff *skb, *data_skb;
1456 	int err, ret;
1457 
1458 	if (flags&MSG_OOB)
1459 		return -EOPNOTSUPP;
1460 
1461 	copied = 0;
1462 
1463 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1464 	if (skb == NULL)
1465 		goto out;
1466 
1467 	data_skb = skb;
1468 
1469 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1470 	if (unlikely(skb_shinfo(skb)->frag_list)) {
1471 		/*
1472 		 * If this skb has a frag_list, then here that means that we
1473 		 * will have to use the frag_list skb's data for compat tasks
1474 		 * and the regular skb's data for normal (non-compat) tasks.
1475 		 *
1476 		 * If we need to send the compat skb, assign it to the
1477 		 * 'data_skb' variable so that it will be used below for data
1478 		 * copying. We keep 'skb' for everything else, including
1479 		 * freeing both later.
1480 		 */
1481 		if (flags & MSG_CMSG_COMPAT)
1482 			data_skb = skb_shinfo(skb)->frag_list;
1483 	}
1484 #endif
1485 
1486 	msg->msg_namelen = 0;
1487 
1488 	copied = data_skb->len;
1489 	if (len < copied) {
1490 		msg->msg_flags |= MSG_TRUNC;
1491 		copied = len;
1492 	}
1493 
1494 	skb_reset_transport_header(data_skb);
1495 	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1496 
1497 	if (msg->msg_name) {
1498 		struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1499 		addr->nl_family = AF_NETLINK;
1500 		addr->nl_pad    = 0;
1501 		addr->nl_pid	= NETLINK_CB(skb).portid;
1502 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1503 		msg->msg_namelen = sizeof(*addr);
1504 	}
1505 
1506 	if (nlk->flags & NETLINK_RECV_PKTINFO)
1507 		netlink_cmsg_recv_pktinfo(msg, skb);
1508 
1509 	if (NULL == siocb->scm) {
1510 		memset(&scm, 0, sizeof(scm));
1511 		siocb->scm = &scm;
1512 	}
1513 	siocb->scm->creds = *NETLINK_CREDS(skb);
1514 	if (flags & MSG_TRUNC)
1515 		copied = data_skb->len;
1516 
1517 	skb_free_datagram(sk, skb);
1518 
1519 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1520 		ret = netlink_dump(sk);
1521 		if (ret) {
1522 			sk->sk_err = ret;
1523 			sk->sk_error_report(sk);
1524 		}
1525 	}
1526 
1527 	scm_recv(sock, msg, siocb->scm, flags);
1528 out:
1529 	netlink_rcv_wake(sk);
1530 	return err ? : copied;
1531 }
1532 
1533 static void netlink_data_ready(struct sock *sk, int len)
1534 {
1535 	BUG();
1536 }
1537 
1538 /*
1539  *	We export these functions to other modules. They provide a
1540  *	complete set of kernel non-blocking support for message
1541  *	queueing.
1542  */
1543 
1544 struct sock *
1545 __netlink_kernel_create(struct net *net, int unit, struct module *module,
1546 			struct netlink_kernel_cfg *cfg)
1547 {
1548 	struct socket *sock;
1549 	struct sock *sk;
1550 	struct netlink_sock *nlk;
1551 	struct listeners *listeners = NULL;
1552 	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1553 	unsigned int groups;
1554 
1555 	BUG_ON(!nl_table);
1556 
1557 	if (unit < 0 || unit >= MAX_LINKS)
1558 		return NULL;
1559 
1560 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1561 		return NULL;
1562 
1563 	/*
1564 	 * We have to just have a reference on the net from sk, but don't
1565 	 * get_net it. Besides, we cannot get and then put the net here.
1566 	 * So we create one inside init_net and the move it to net.
1567 	 */
1568 
1569 	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1570 		goto out_sock_release_nosk;
1571 
1572 	sk = sock->sk;
1573 	sk_change_net(sk, net);
1574 
1575 	if (!cfg || cfg->groups < 32)
1576 		groups = 32;
1577 	else
1578 		groups = cfg->groups;
1579 
1580 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1581 	if (!listeners)
1582 		goto out_sock_release;
1583 
1584 	sk->sk_data_ready = netlink_data_ready;
1585 	if (cfg && cfg->input)
1586 		nlk_sk(sk)->netlink_rcv = cfg->input;
1587 
1588 	if (netlink_insert(sk, net, 0))
1589 		goto out_sock_release;
1590 
1591 	nlk = nlk_sk(sk);
1592 	nlk->flags |= NETLINK_KERNEL_SOCKET;
1593 
1594 	netlink_table_grab();
1595 	if (!nl_table[unit].registered) {
1596 		nl_table[unit].groups = groups;
1597 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1598 		nl_table[unit].cb_mutex = cb_mutex;
1599 		nl_table[unit].module = module;
1600 		if (cfg) {
1601 			nl_table[unit].bind = cfg->bind;
1602 			nl_table[unit].flags = cfg->flags;
1603 		}
1604 		nl_table[unit].registered = 1;
1605 	} else {
1606 		kfree(listeners);
1607 		nl_table[unit].registered++;
1608 	}
1609 	netlink_table_ungrab();
1610 	return sk;
1611 
1612 out_sock_release:
1613 	kfree(listeners);
1614 	netlink_kernel_release(sk);
1615 	return NULL;
1616 
1617 out_sock_release_nosk:
1618 	sock_release(sock);
1619 	return NULL;
1620 }
1621 EXPORT_SYMBOL(__netlink_kernel_create);
1622 
1623 void
1624 netlink_kernel_release(struct sock *sk)
1625 {
1626 	sk_release_kernel(sk);
1627 }
1628 EXPORT_SYMBOL(netlink_kernel_release);
1629 
1630 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1631 {
1632 	struct listeners *new, *old;
1633 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1634 
1635 	if (groups < 32)
1636 		groups = 32;
1637 
1638 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1639 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1640 		if (!new)
1641 			return -ENOMEM;
1642 		old = nl_deref_protected(tbl->listeners);
1643 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1644 		rcu_assign_pointer(tbl->listeners, new);
1645 
1646 		kfree_rcu(old, rcu);
1647 	}
1648 	tbl->groups = groups;
1649 
1650 	return 0;
1651 }
1652 
1653 /**
1654  * netlink_change_ngroups - change number of multicast groups
1655  *
1656  * This changes the number of multicast groups that are available
1657  * on a certain netlink family. Note that it is not possible to
1658  * change the number of groups to below 32. Also note that it does
1659  * not implicitly call netlink_clear_multicast_users() when the
1660  * number of groups is reduced.
1661  *
1662  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1663  * @groups: The new number of groups.
1664  */
1665 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1666 {
1667 	int err;
1668 
1669 	netlink_table_grab();
1670 	err = __netlink_change_ngroups(sk, groups);
1671 	netlink_table_ungrab();
1672 
1673 	return err;
1674 }
1675 
1676 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1677 {
1678 	struct sock *sk;
1679 	struct hlist_node *node;
1680 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1681 
1682 	sk_for_each_bound(sk, node, &tbl->mc_list)
1683 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
1684 }
1685 
1686 /**
1687  * netlink_clear_multicast_users - kick off multicast listeners
1688  *
1689  * This function removes all listeners from the given group.
1690  * @ksk: The kernel netlink socket, as returned by
1691  *	netlink_kernel_create().
1692  * @group: The multicast group to clear.
1693  */
1694 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1695 {
1696 	netlink_table_grab();
1697 	__netlink_clear_multicast_users(ksk, group);
1698 	netlink_table_ungrab();
1699 }
1700 
1701 struct nlmsghdr *
1702 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
1703 {
1704 	struct nlmsghdr *nlh;
1705 	int size = NLMSG_LENGTH(len);
1706 
1707 	nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1708 	nlh->nlmsg_type = type;
1709 	nlh->nlmsg_len = size;
1710 	nlh->nlmsg_flags = flags;
1711 	nlh->nlmsg_pid = portid;
1712 	nlh->nlmsg_seq = seq;
1713 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1714 		memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1715 	return nlh;
1716 }
1717 EXPORT_SYMBOL(__nlmsg_put);
1718 
1719 /*
1720  * It looks a bit ugly.
1721  * It would be better to create kernel thread.
1722  */
1723 
1724 static int netlink_dump(struct sock *sk)
1725 {
1726 	struct netlink_sock *nlk = nlk_sk(sk);
1727 	struct netlink_callback *cb;
1728 	struct sk_buff *skb = NULL;
1729 	struct nlmsghdr *nlh;
1730 	int len, err = -ENOBUFS;
1731 	int alloc_size;
1732 
1733 	mutex_lock(nlk->cb_mutex);
1734 
1735 	cb = nlk->cb;
1736 	if (cb == NULL) {
1737 		err = -EINVAL;
1738 		goto errout_skb;
1739 	}
1740 
1741 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1742 
1743 	skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1744 	if (!skb)
1745 		goto errout_skb;
1746 
1747 	len = cb->dump(skb, cb);
1748 
1749 	if (len > 0) {
1750 		mutex_unlock(nlk->cb_mutex);
1751 
1752 		if (sk_filter(sk, skb))
1753 			kfree_skb(skb);
1754 		else
1755 			__netlink_sendskb(sk, skb);
1756 		return 0;
1757 	}
1758 
1759 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1760 	if (!nlh)
1761 		goto errout_skb;
1762 
1763 	nl_dump_check_consistent(cb, nlh);
1764 
1765 	memcpy(nlmsg_data(nlh), &len, sizeof(len));
1766 
1767 	if (sk_filter(sk, skb))
1768 		kfree_skb(skb);
1769 	else
1770 		__netlink_sendskb(sk, skb);
1771 
1772 	if (cb->done)
1773 		cb->done(cb);
1774 	nlk->cb = NULL;
1775 	mutex_unlock(nlk->cb_mutex);
1776 
1777 	module_put(cb->module);
1778 	netlink_consume_callback(cb);
1779 	return 0;
1780 
1781 errout_skb:
1782 	mutex_unlock(nlk->cb_mutex);
1783 	kfree_skb(skb);
1784 	return err;
1785 }
1786 
1787 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1788 			 const struct nlmsghdr *nlh,
1789 			 struct netlink_dump_control *control)
1790 {
1791 	struct netlink_callback *cb;
1792 	struct sock *sk;
1793 	struct netlink_sock *nlk;
1794 	int ret;
1795 
1796 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1797 	if (cb == NULL)
1798 		return -ENOBUFS;
1799 
1800 	cb->dump = control->dump;
1801 	cb->done = control->done;
1802 	cb->nlh = nlh;
1803 	cb->data = control->data;
1804 	cb->module = control->module;
1805 	cb->min_dump_alloc = control->min_dump_alloc;
1806 	atomic_inc(&skb->users);
1807 	cb->skb = skb;
1808 
1809 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1810 	if (sk == NULL) {
1811 		netlink_destroy_callback(cb);
1812 		return -ECONNREFUSED;
1813 	}
1814 	nlk = nlk_sk(sk);
1815 
1816 	mutex_lock(nlk->cb_mutex);
1817 	/* A dump is in progress... */
1818 	if (nlk->cb) {
1819 		mutex_unlock(nlk->cb_mutex);
1820 		netlink_destroy_callback(cb);
1821 		ret = -EBUSY;
1822 		goto out;
1823 	}
1824 	/* add reference of module which cb->dump belongs to */
1825 	if (!try_module_get(cb->module)) {
1826 		mutex_unlock(nlk->cb_mutex);
1827 		netlink_destroy_callback(cb);
1828 		ret = -EPROTONOSUPPORT;
1829 		goto out;
1830 	}
1831 
1832 	nlk->cb = cb;
1833 	mutex_unlock(nlk->cb_mutex);
1834 
1835 	ret = netlink_dump(sk);
1836 out:
1837 	sock_put(sk);
1838 
1839 	if (ret)
1840 		return ret;
1841 
1842 	/* We successfully started a dump, by returning -EINTR we
1843 	 * signal not to send ACK even if it was requested.
1844 	 */
1845 	return -EINTR;
1846 }
1847 EXPORT_SYMBOL(__netlink_dump_start);
1848 
1849 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1850 {
1851 	struct sk_buff *skb;
1852 	struct nlmsghdr *rep;
1853 	struct nlmsgerr *errmsg;
1854 	size_t payload = sizeof(*errmsg);
1855 
1856 	/* error messages get the original request appened */
1857 	if (err)
1858 		payload += nlmsg_len(nlh);
1859 
1860 	skb = nlmsg_new(payload, GFP_KERNEL);
1861 	if (!skb) {
1862 		struct sock *sk;
1863 
1864 		sk = netlink_lookup(sock_net(in_skb->sk),
1865 				    in_skb->sk->sk_protocol,
1866 				    NETLINK_CB(in_skb).portid);
1867 		if (sk) {
1868 			sk->sk_err = ENOBUFS;
1869 			sk->sk_error_report(sk);
1870 			sock_put(sk);
1871 		}
1872 		return;
1873 	}
1874 
1875 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
1876 			  NLMSG_ERROR, payload, 0);
1877 	errmsg = nlmsg_data(rep);
1878 	errmsg->error = err;
1879 	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1880 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1881 }
1882 EXPORT_SYMBOL(netlink_ack);
1883 
1884 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1885 						     struct nlmsghdr *))
1886 {
1887 	struct nlmsghdr *nlh;
1888 	int err;
1889 
1890 	while (skb->len >= nlmsg_total_size(0)) {
1891 		int msglen;
1892 
1893 		nlh = nlmsg_hdr(skb);
1894 		err = 0;
1895 
1896 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1897 			return 0;
1898 
1899 		/* Only requests are handled by the kernel */
1900 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1901 			goto ack;
1902 
1903 		/* Skip control messages */
1904 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1905 			goto ack;
1906 
1907 		err = cb(skb, nlh);
1908 		if (err == -EINTR)
1909 			goto skip;
1910 
1911 ack:
1912 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
1913 			netlink_ack(skb, nlh, err);
1914 
1915 skip:
1916 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1917 		if (msglen > skb->len)
1918 			msglen = skb->len;
1919 		skb_pull(skb, msglen);
1920 	}
1921 
1922 	return 0;
1923 }
1924 EXPORT_SYMBOL(netlink_rcv_skb);
1925 
1926 /**
1927  * nlmsg_notify - send a notification netlink message
1928  * @sk: netlink socket to use
1929  * @skb: notification message
1930  * @portid: destination netlink portid for reports or 0
1931  * @group: destination multicast group or 0
1932  * @report: 1 to report back, 0 to disable
1933  * @flags: allocation flags
1934  */
1935 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
1936 		 unsigned int group, int report, gfp_t flags)
1937 {
1938 	int err = 0;
1939 
1940 	if (group) {
1941 		int exclude_portid = 0;
1942 
1943 		if (report) {
1944 			atomic_inc(&skb->users);
1945 			exclude_portid = portid;
1946 		}
1947 
1948 		/* errors reported via destination sk->sk_err, but propagate
1949 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1950 		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
1951 	}
1952 
1953 	if (report) {
1954 		int err2;
1955 
1956 		err2 = nlmsg_unicast(sk, skb, portid);
1957 		if (!err || err == -ESRCH)
1958 			err = err2;
1959 	}
1960 
1961 	return err;
1962 }
1963 EXPORT_SYMBOL(nlmsg_notify);
1964 
1965 #ifdef CONFIG_PROC_FS
1966 struct nl_seq_iter {
1967 	struct seq_net_private p;
1968 	int link;
1969 	int hash_idx;
1970 };
1971 
1972 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1973 {
1974 	struct nl_seq_iter *iter = seq->private;
1975 	int i, j;
1976 	struct sock *s;
1977 	struct hlist_node *node;
1978 	loff_t off = 0;
1979 
1980 	for (i = 0; i < MAX_LINKS; i++) {
1981 		struct nl_portid_hash *hash = &nl_table[i].hash;
1982 
1983 		for (j = 0; j <= hash->mask; j++) {
1984 			sk_for_each(s, node, &hash->table[j]) {
1985 				if (sock_net(s) != seq_file_net(seq))
1986 					continue;
1987 				if (off == pos) {
1988 					iter->link = i;
1989 					iter->hash_idx = j;
1990 					return s;
1991 				}
1992 				++off;
1993 			}
1994 		}
1995 	}
1996 	return NULL;
1997 }
1998 
1999 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2000 	__acquires(nl_table_lock)
2001 {
2002 	read_lock(&nl_table_lock);
2003 	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2004 }
2005 
2006 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2007 {
2008 	struct sock *s;
2009 	struct nl_seq_iter *iter;
2010 	int i, j;
2011 
2012 	++*pos;
2013 
2014 	if (v == SEQ_START_TOKEN)
2015 		return netlink_seq_socket_idx(seq, 0);
2016 
2017 	iter = seq->private;
2018 	s = v;
2019 	do {
2020 		s = sk_next(s);
2021 	} while (s && sock_net(s) != seq_file_net(seq));
2022 	if (s)
2023 		return s;
2024 
2025 	i = iter->link;
2026 	j = iter->hash_idx + 1;
2027 
2028 	do {
2029 		struct nl_portid_hash *hash = &nl_table[i].hash;
2030 
2031 		for (; j <= hash->mask; j++) {
2032 			s = sk_head(&hash->table[j]);
2033 			while (s && sock_net(s) != seq_file_net(seq))
2034 				s = sk_next(s);
2035 			if (s) {
2036 				iter->link = i;
2037 				iter->hash_idx = j;
2038 				return s;
2039 			}
2040 		}
2041 
2042 		j = 0;
2043 	} while (++i < MAX_LINKS);
2044 
2045 	return NULL;
2046 }
2047 
2048 static void netlink_seq_stop(struct seq_file *seq, void *v)
2049 	__releases(nl_table_lock)
2050 {
2051 	read_unlock(&nl_table_lock);
2052 }
2053 
2054 
2055 static int netlink_seq_show(struct seq_file *seq, void *v)
2056 {
2057 	if (v == SEQ_START_TOKEN) {
2058 		seq_puts(seq,
2059 			 "sk       Eth Pid    Groups   "
2060 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2061 	} else {
2062 		struct sock *s = v;
2063 		struct netlink_sock *nlk = nlk_sk(s);
2064 
2065 		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2066 			   s,
2067 			   s->sk_protocol,
2068 			   nlk->portid,
2069 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2070 			   sk_rmem_alloc_get(s),
2071 			   sk_wmem_alloc_get(s),
2072 			   nlk->cb,
2073 			   atomic_read(&s->sk_refcnt),
2074 			   atomic_read(&s->sk_drops),
2075 			   sock_i_ino(s)
2076 			);
2077 
2078 	}
2079 	return 0;
2080 }
2081 
2082 static const struct seq_operations netlink_seq_ops = {
2083 	.start  = netlink_seq_start,
2084 	.next   = netlink_seq_next,
2085 	.stop   = netlink_seq_stop,
2086 	.show   = netlink_seq_show,
2087 };
2088 
2089 
2090 static int netlink_seq_open(struct inode *inode, struct file *file)
2091 {
2092 	return seq_open_net(inode, file, &netlink_seq_ops,
2093 				sizeof(struct nl_seq_iter));
2094 }
2095 
2096 static const struct file_operations netlink_seq_fops = {
2097 	.owner		= THIS_MODULE,
2098 	.open		= netlink_seq_open,
2099 	.read		= seq_read,
2100 	.llseek		= seq_lseek,
2101 	.release	= seq_release_net,
2102 };
2103 
2104 #endif
2105 
2106 int netlink_register_notifier(struct notifier_block *nb)
2107 {
2108 	return atomic_notifier_chain_register(&netlink_chain, nb);
2109 }
2110 EXPORT_SYMBOL(netlink_register_notifier);
2111 
2112 int netlink_unregister_notifier(struct notifier_block *nb)
2113 {
2114 	return atomic_notifier_chain_unregister(&netlink_chain, nb);
2115 }
2116 EXPORT_SYMBOL(netlink_unregister_notifier);
2117 
2118 static const struct proto_ops netlink_ops = {
2119 	.family =	PF_NETLINK,
2120 	.owner =	THIS_MODULE,
2121 	.release =	netlink_release,
2122 	.bind =		netlink_bind,
2123 	.connect =	netlink_connect,
2124 	.socketpair =	sock_no_socketpair,
2125 	.accept =	sock_no_accept,
2126 	.getname =	netlink_getname,
2127 	.poll =		datagram_poll,
2128 	.ioctl =	sock_no_ioctl,
2129 	.listen =	sock_no_listen,
2130 	.shutdown =	sock_no_shutdown,
2131 	.setsockopt =	netlink_setsockopt,
2132 	.getsockopt =	netlink_getsockopt,
2133 	.sendmsg =	netlink_sendmsg,
2134 	.recvmsg =	netlink_recvmsg,
2135 	.mmap =		sock_no_mmap,
2136 	.sendpage =	sock_no_sendpage,
2137 };
2138 
2139 static const struct net_proto_family netlink_family_ops = {
2140 	.family = PF_NETLINK,
2141 	.create = netlink_create,
2142 	.owner	= THIS_MODULE,	/* for consistency 8) */
2143 };
2144 
2145 static int __net_init netlink_net_init(struct net *net)
2146 {
2147 #ifdef CONFIG_PROC_FS
2148 	if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
2149 		return -ENOMEM;
2150 #endif
2151 	return 0;
2152 }
2153 
2154 static void __net_exit netlink_net_exit(struct net *net)
2155 {
2156 #ifdef CONFIG_PROC_FS
2157 	proc_net_remove(net, "netlink");
2158 #endif
2159 }
2160 
2161 static void __init netlink_add_usersock_entry(void)
2162 {
2163 	struct listeners *listeners;
2164 	int groups = 32;
2165 
2166 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2167 	if (!listeners)
2168 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2169 
2170 	netlink_table_grab();
2171 
2172 	nl_table[NETLINK_USERSOCK].groups = groups;
2173 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2174 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2175 	nl_table[NETLINK_USERSOCK].registered = 1;
2176 	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2177 
2178 	netlink_table_ungrab();
2179 }
2180 
2181 static struct pernet_operations __net_initdata netlink_net_ops = {
2182 	.init = netlink_net_init,
2183 	.exit = netlink_net_exit,
2184 };
2185 
2186 static int __init netlink_proto_init(void)
2187 {
2188 	struct sk_buff *dummy_skb;
2189 	int i;
2190 	unsigned long limit;
2191 	unsigned int order;
2192 	int err = proto_register(&netlink_proto, 0);
2193 
2194 	if (err != 0)
2195 		goto out;
2196 
2197 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
2198 
2199 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2200 	if (!nl_table)
2201 		goto panic;
2202 
2203 	if (totalram_pages >= (128 * 1024))
2204 		limit = totalram_pages >> (21 - PAGE_SHIFT);
2205 	else
2206 		limit = totalram_pages >> (23 - PAGE_SHIFT);
2207 
2208 	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2209 	limit = (1UL << order) / sizeof(struct hlist_head);
2210 	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2211 
2212 	for (i = 0; i < MAX_LINKS; i++) {
2213 		struct nl_portid_hash *hash = &nl_table[i].hash;
2214 
2215 		hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
2216 		if (!hash->table) {
2217 			while (i-- > 0)
2218 				nl_portid_hash_free(nl_table[i].hash.table,
2219 						 1 * sizeof(*hash->table));
2220 			kfree(nl_table);
2221 			goto panic;
2222 		}
2223 		hash->max_shift = order;
2224 		hash->shift = 0;
2225 		hash->mask = 0;
2226 		hash->rehash_time = jiffies;
2227 	}
2228 
2229 	netlink_add_usersock_entry();
2230 
2231 	sock_register(&netlink_family_ops);
2232 	register_pernet_subsys(&netlink_net_ops);
2233 	/* The netlink device handler may be needed early. */
2234 	rtnetlink_init();
2235 out:
2236 	return err;
2237 panic:
2238 	panic("netlink_init: Cannot allocate nl_table\n");
2239 }
2240 
2241 core_initcall(netlink_proto_init);
2242