xref: /openbmc/linux/net/netlink/af_netlink.c (revision a31f2d17b331db970259e875b7223d3aba7e3821)
1 /*
2  * NETLINK      Kernel-user communication protocol.
3  *
4  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6  *
7  *		This program is free software; you can redistribute it and/or
8  *		modify it under the terms of the GNU General Public License
9  *		as published by the Free Software Foundation; either version
10  *		2 of the License, or (at your option) any later version.
11  *
12  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13  *                               added netlink_proto_exit
14  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
16  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17  * 				 - inc module use count of module that owns
18  * 				   the kernel socket in case userspace opens
19  * 				   socket of same protocol
20  * 				 - remove all module support, since netlink is
21  * 				   mandatory if CONFIG_NET=y these days
22  */
23 
24 #include <linux/module.h>
25 
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
35 #include <linux/un.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/fs.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
54 #include <linux/mm.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 
59 #include <net/net_namespace.h>
60 #include <net/sock.h>
61 #include <net/scm.h>
62 #include <net/netlink.h>
63 
64 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
66 
67 struct netlink_sock {
68 	/* struct sock has to be the first member of netlink_sock */
69 	struct sock		sk;
70 	u32			pid;
71 	u32			dst_pid;
72 	u32			dst_group;
73 	u32			flags;
74 	u32			subscriptions;
75 	u32			ngroups;
76 	unsigned long		*groups;
77 	unsigned long		state;
78 	wait_queue_head_t	wait;
79 	struct netlink_callback	*cb;
80 	struct mutex		*cb_mutex;
81 	struct mutex		cb_def_mutex;
82 	void			(*netlink_rcv)(struct sk_buff *skb);
83 	struct module		*module;
84 };
85 
86 struct listeners {
87 	struct rcu_head		rcu;
88 	unsigned long		masks[0];
89 };
90 
91 #define NETLINK_KERNEL_SOCKET	0x1
92 #define NETLINK_RECV_PKTINFO	0x2
93 #define NETLINK_BROADCAST_SEND_ERROR	0x4
94 #define NETLINK_RECV_NO_ENOBUFS	0x8
95 
96 static inline struct netlink_sock *nlk_sk(struct sock *sk)
97 {
98 	return container_of(sk, struct netlink_sock, sk);
99 }
100 
101 static inline int netlink_is_kernel(struct sock *sk)
102 {
103 	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
104 }
105 
106 struct nl_pid_hash {
107 	struct hlist_head	*table;
108 	unsigned long		rehash_time;
109 
110 	unsigned int		mask;
111 	unsigned int		shift;
112 
113 	unsigned int		entries;
114 	unsigned int		max_shift;
115 
116 	u32			rnd;
117 };
118 
119 struct netlink_table {
120 	struct nl_pid_hash	hash;
121 	struct hlist_head	mc_list;
122 	struct listeners __rcu	*listeners;
123 	unsigned int		nl_nonroot;
124 	unsigned int		groups;
125 	struct mutex		*cb_mutex;
126 	struct module		*module;
127 	int			registered;
128 };
129 
130 static struct netlink_table *nl_table;
131 
132 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
133 
134 static int netlink_dump(struct sock *sk);
135 
136 static DEFINE_RWLOCK(nl_table_lock);
137 static atomic_t nl_table_users = ATOMIC_INIT(0);
138 
139 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
140 
141 static inline u32 netlink_group_mask(u32 group)
142 {
143 	return group ? 1 << (group - 1) : 0;
144 }
145 
146 static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
147 {
148 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
149 }
150 
151 static void netlink_destroy_callback(struct netlink_callback *cb)
152 {
153 	kfree_skb(cb->skb);
154 	kfree(cb);
155 }
156 
157 static void netlink_consume_callback(struct netlink_callback *cb)
158 {
159 	consume_skb(cb->skb);
160 	kfree(cb);
161 }
162 
163 static void netlink_sock_destruct(struct sock *sk)
164 {
165 	struct netlink_sock *nlk = nlk_sk(sk);
166 
167 	if (nlk->cb) {
168 		if (nlk->cb->done)
169 			nlk->cb->done(nlk->cb);
170 		netlink_destroy_callback(nlk->cb);
171 	}
172 
173 	skb_queue_purge(&sk->sk_receive_queue);
174 
175 	if (!sock_flag(sk, SOCK_DEAD)) {
176 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
177 		return;
178 	}
179 
180 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
181 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
182 	WARN_ON(nlk_sk(sk)->groups);
183 }
184 
185 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
186  * SMP. Look, when several writers sleep and reader wakes them up, all but one
187  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
188  * this, _but_ remember, it adds useless work on UP machines.
189  */
190 
191 void netlink_table_grab(void)
192 	__acquires(nl_table_lock)
193 {
194 	might_sleep();
195 
196 	write_lock_irq(&nl_table_lock);
197 
198 	if (atomic_read(&nl_table_users)) {
199 		DECLARE_WAITQUEUE(wait, current);
200 
201 		add_wait_queue_exclusive(&nl_table_wait, &wait);
202 		for (;;) {
203 			set_current_state(TASK_UNINTERRUPTIBLE);
204 			if (atomic_read(&nl_table_users) == 0)
205 				break;
206 			write_unlock_irq(&nl_table_lock);
207 			schedule();
208 			write_lock_irq(&nl_table_lock);
209 		}
210 
211 		__set_current_state(TASK_RUNNING);
212 		remove_wait_queue(&nl_table_wait, &wait);
213 	}
214 }
215 
216 void netlink_table_ungrab(void)
217 	__releases(nl_table_lock)
218 {
219 	write_unlock_irq(&nl_table_lock);
220 	wake_up(&nl_table_wait);
221 }
222 
223 static inline void
224 netlink_lock_table(void)
225 {
226 	/* read_lock() synchronizes us to netlink_table_grab */
227 
228 	read_lock(&nl_table_lock);
229 	atomic_inc(&nl_table_users);
230 	read_unlock(&nl_table_lock);
231 }
232 
233 static inline void
234 netlink_unlock_table(void)
235 {
236 	if (atomic_dec_and_test(&nl_table_users))
237 		wake_up(&nl_table_wait);
238 }
239 
240 static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
241 {
242 	struct nl_pid_hash *hash = &nl_table[protocol].hash;
243 	struct hlist_head *head;
244 	struct sock *sk;
245 	struct hlist_node *node;
246 
247 	read_lock(&nl_table_lock);
248 	head = nl_pid_hashfn(hash, pid);
249 	sk_for_each(sk, node, head) {
250 		if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
251 			sock_hold(sk);
252 			goto found;
253 		}
254 	}
255 	sk = NULL;
256 found:
257 	read_unlock(&nl_table_lock);
258 	return sk;
259 }
260 
261 static struct hlist_head *nl_pid_hash_zalloc(size_t size)
262 {
263 	if (size <= PAGE_SIZE)
264 		return kzalloc(size, GFP_ATOMIC);
265 	else
266 		return (struct hlist_head *)
267 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
268 					 get_order(size));
269 }
270 
271 static void nl_pid_hash_free(struct hlist_head *table, size_t size)
272 {
273 	if (size <= PAGE_SIZE)
274 		kfree(table);
275 	else
276 		free_pages((unsigned long)table, get_order(size));
277 }
278 
279 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
280 {
281 	unsigned int omask, mask, shift;
282 	size_t osize, size;
283 	struct hlist_head *otable, *table;
284 	int i;
285 
286 	omask = mask = hash->mask;
287 	osize = size = (mask + 1) * sizeof(*table);
288 	shift = hash->shift;
289 
290 	if (grow) {
291 		if (++shift > hash->max_shift)
292 			return 0;
293 		mask = mask * 2 + 1;
294 		size *= 2;
295 	}
296 
297 	table = nl_pid_hash_zalloc(size);
298 	if (!table)
299 		return 0;
300 
301 	otable = hash->table;
302 	hash->table = table;
303 	hash->mask = mask;
304 	hash->shift = shift;
305 	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
306 
307 	for (i = 0; i <= omask; i++) {
308 		struct sock *sk;
309 		struct hlist_node *node, *tmp;
310 
311 		sk_for_each_safe(sk, node, tmp, &otable[i])
312 			__sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
313 	}
314 
315 	nl_pid_hash_free(otable, osize);
316 	hash->rehash_time = jiffies + 10 * 60 * HZ;
317 	return 1;
318 }
319 
320 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
321 {
322 	int avg = hash->entries >> hash->shift;
323 
324 	if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
325 		return 1;
326 
327 	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
328 		nl_pid_hash_rehash(hash, 0);
329 		return 1;
330 	}
331 
332 	return 0;
333 }
334 
335 static const struct proto_ops netlink_ops;
336 
337 static void
338 netlink_update_listeners(struct sock *sk)
339 {
340 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
341 	struct hlist_node *node;
342 	unsigned long mask;
343 	unsigned int i;
344 
345 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
346 		mask = 0;
347 		sk_for_each_bound(sk, node, &tbl->mc_list) {
348 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
349 				mask |= nlk_sk(sk)->groups[i];
350 		}
351 		tbl->listeners->masks[i] = mask;
352 	}
353 	/* this function is only called with the netlink table "grabbed", which
354 	 * makes sure updates are visible before bind or setsockopt return. */
355 }
356 
357 static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
358 {
359 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
360 	struct hlist_head *head;
361 	int err = -EADDRINUSE;
362 	struct sock *osk;
363 	struct hlist_node *node;
364 	int len;
365 
366 	netlink_table_grab();
367 	head = nl_pid_hashfn(hash, pid);
368 	len = 0;
369 	sk_for_each(osk, node, head) {
370 		if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
371 			break;
372 		len++;
373 	}
374 	if (node)
375 		goto err;
376 
377 	err = -EBUSY;
378 	if (nlk_sk(sk)->pid)
379 		goto err;
380 
381 	err = -ENOMEM;
382 	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
383 		goto err;
384 
385 	if (len && nl_pid_hash_dilute(hash, len))
386 		head = nl_pid_hashfn(hash, pid);
387 	hash->entries++;
388 	nlk_sk(sk)->pid = pid;
389 	sk_add_node(sk, head);
390 	err = 0;
391 
392 err:
393 	netlink_table_ungrab();
394 	return err;
395 }
396 
397 static void netlink_remove(struct sock *sk)
398 {
399 	netlink_table_grab();
400 	if (sk_del_node_init(sk))
401 		nl_table[sk->sk_protocol].hash.entries--;
402 	if (nlk_sk(sk)->subscriptions)
403 		__sk_del_bind_node(sk);
404 	netlink_table_ungrab();
405 }
406 
407 static struct proto netlink_proto = {
408 	.name	  = "NETLINK",
409 	.owner	  = THIS_MODULE,
410 	.obj_size = sizeof(struct netlink_sock),
411 };
412 
413 static int __netlink_create(struct net *net, struct socket *sock,
414 			    struct mutex *cb_mutex, int protocol)
415 {
416 	struct sock *sk;
417 	struct netlink_sock *nlk;
418 
419 	sock->ops = &netlink_ops;
420 
421 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
422 	if (!sk)
423 		return -ENOMEM;
424 
425 	sock_init_data(sock, sk);
426 
427 	nlk = nlk_sk(sk);
428 	if (cb_mutex) {
429 		nlk->cb_mutex = cb_mutex;
430 	} else {
431 		nlk->cb_mutex = &nlk->cb_def_mutex;
432 		mutex_init(nlk->cb_mutex);
433 	}
434 	init_waitqueue_head(&nlk->wait);
435 
436 	sk->sk_destruct = netlink_sock_destruct;
437 	sk->sk_protocol = protocol;
438 	return 0;
439 }
440 
441 static int netlink_create(struct net *net, struct socket *sock, int protocol,
442 			  int kern)
443 {
444 	struct module *module = NULL;
445 	struct mutex *cb_mutex;
446 	struct netlink_sock *nlk;
447 	int err = 0;
448 
449 	sock->state = SS_UNCONNECTED;
450 
451 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
452 		return -ESOCKTNOSUPPORT;
453 
454 	if (protocol < 0 || protocol >= MAX_LINKS)
455 		return -EPROTONOSUPPORT;
456 
457 	netlink_lock_table();
458 #ifdef CONFIG_MODULES
459 	if (!nl_table[protocol].registered) {
460 		netlink_unlock_table();
461 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
462 		netlink_lock_table();
463 	}
464 #endif
465 	if (nl_table[protocol].registered &&
466 	    try_module_get(nl_table[protocol].module))
467 		module = nl_table[protocol].module;
468 	else
469 		err = -EPROTONOSUPPORT;
470 	cb_mutex = nl_table[protocol].cb_mutex;
471 	netlink_unlock_table();
472 
473 	if (err < 0)
474 		goto out;
475 
476 	err = __netlink_create(net, sock, cb_mutex, protocol);
477 	if (err < 0)
478 		goto out_module;
479 
480 	local_bh_disable();
481 	sock_prot_inuse_add(net, &netlink_proto, 1);
482 	local_bh_enable();
483 
484 	nlk = nlk_sk(sock->sk);
485 	nlk->module = module;
486 out:
487 	return err;
488 
489 out_module:
490 	module_put(module);
491 	goto out;
492 }
493 
494 static int netlink_release(struct socket *sock)
495 {
496 	struct sock *sk = sock->sk;
497 	struct netlink_sock *nlk;
498 
499 	if (!sk)
500 		return 0;
501 
502 	netlink_remove(sk);
503 	sock_orphan(sk);
504 	nlk = nlk_sk(sk);
505 
506 	/*
507 	 * OK. Socket is unlinked, any packets that arrive now
508 	 * will be purged.
509 	 */
510 
511 	sock->sk = NULL;
512 	wake_up_interruptible_all(&nlk->wait);
513 
514 	skb_queue_purge(&sk->sk_write_queue);
515 
516 	if (nlk->pid) {
517 		struct netlink_notify n = {
518 						.net = sock_net(sk),
519 						.protocol = sk->sk_protocol,
520 						.pid = nlk->pid,
521 					  };
522 		atomic_notifier_call_chain(&netlink_chain,
523 				NETLINK_URELEASE, &n);
524 	}
525 
526 	module_put(nlk->module);
527 
528 	netlink_table_grab();
529 	if (netlink_is_kernel(sk)) {
530 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
531 		if (--nl_table[sk->sk_protocol].registered == 0) {
532 			kfree(nl_table[sk->sk_protocol].listeners);
533 			nl_table[sk->sk_protocol].module = NULL;
534 			nl_table[sk->sk_protocol].registered = 0;
535 		}
536 	} else if (nlk->subscriptions) {
537 		netlink_update_listeners(sk);
538 	}
539 	netlink_table_ungrab();
540 
541 	kfree(nlk->groups);
542 	nlk->groups = NULL;
543 
544 	local_bh_disable();
545 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
546 	local_bh_enable();
547 	sock_put(sk);
548 	return 0;
549 }
550 
551 static int netlink_autobind(struct socket *sock)
552 {
553 	struct sock *sk = sock->sk;
554 	struct net *net = sock_net(sk);
555 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
556 	struct hlist_head *head;
557 	struct sock *osk;
558 	struct hlist_node *node;
559 	s32 pid = task_tgid_vnr(current);
560 	int err;
561 	static s32 rover = -4097;
562 
563 retry:
564 	cond_resched();
565 	netlink_table_grab();
566 	head = nl_pid_hashfn(hash, pid);
567 	sk_for_each(osk, node, head) {
568 		if (!net_eq(sock_net(osk), net))
569 			continue;
570 		if (nlk_sk(osk)->pid == pid) {
571 			/* Bind collision, search negative pid values. */
572 			pid = rover--;
573 			if (rover > -4097)
574 				rover = -4097;
575 			netlink_table_ungrab();
576 			goto retry;
577 		}
578 	}
579 	netlink_table_ungrab();
580 
581 	err = netlink_insert(sk, net, pid);
582 	if (err == -EADDRINUSE)
583 		goto retry;
584 
585 	/* If 2 threads race to autobind, that is fine.  */
586 	if (err == -EBUSY)
587 		err = 0;
588 
589 	return err;
590 }
591 
592 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
593 {
594 	return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
595 	       capable(CAP_NET_ADMIN);
596 }
597 
598 static void
599 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
600 {
601 	struct netlink_sock *nlk = nlk_sk(sk);
602 
603 	if (nlk->subscriptions && !subscriptions)
604 		__sk_del_bind_node(sk);
605 	else if (!nlk->subscriptions && subscriptions)
606 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
607 	nlk->subscriptions = subscriptions;
608 }
609 
610 static int netlink_realloc_groups(struct sock *sk)
611 {
612 	struct netlink_sock *nlk = nlk_sk(sk);
613 	unsigned int groups;
614 	unsigned long *new_groups;
615 	int err = 0;
616 
617 	netlink_table_grab();
618 
619 	groups = nl_table[sk->sk_protocol].groups;
620 	if (!nl_table[sk->sk_protocol].registered) {
621 		err = -ENOENT;
622 		goto out_unlock;
623 	}
624 
625 	if (nlk->ngroups >= groups)
626 		goto out_unlock;
627 
628 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
629 	if (new_groups == NULL) {
630 		err = -ENOMEM;
631 		goto out_unlock;
632 	}
633 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
634 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
635 
636 	nlk->groups = new_groups;
637 	nlk->ngroups = groups;
638  out_unlock:
639 	netlink_table_ungrab();
640 	return err;
641 }
642 
643 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
644 			int addr_len)
645 {
646 	struct sock *sk = sock->sk;
647 	struct net *net = sock_net(sk);
648 	struct netlink_sock *nlk = nlk_sk(sk);
649 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
650 	int err;
651 
652 	if (nladdr->nl_family != AF_NETLINK)
653 		return -EINVAL;
654 
655 	/* Only superuser is allowed to listen multicasts */
656 	if (nladdr->nl_groups) {
657 		if (!netlink_capable(sock, NL_NONROOT_RECV))
658 			return -EPERM;
659 		err = netlink_realloc_groups(sk);
660 		if (err)
661 			return err;
662 	}
663 
664 	if (nlk->pid) {
665 		if (nladdr->nl_pid != nlk->pid)
666 			return -EINVAL;
667 	} else {
668 		err = nladdr->nl_pid ?
669 			netlink_insert(sk, net, nladdr->nl_pid) :
670 			netlink_autobind(sock);
671 		if (err)
672 			return err;
673 	}
674 
675 	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
676 		return 0;
677 
678 	netlink_table_grab();
679 	netlink_update_subscriptions(sk, nlk->subscriptions +
680 					 hweight32(nladdr->nl_groups) -
681 					 hweight32(nlk->groups[0]));
682 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
683 	netlink_update_listeners(sk);
684 	netlink_table_ungrab();
685 
686 	return 0;
687 }
688 
689 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
690 			   int alen, int flags)
691 {
692 	int err = 0;
693 	struct sock *sk = sock->sk;
694 	struct netlink_sock *nlk = nlk_sk(sk);
695 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
696 
697 	if (alen < sizeof(addr->sa_family))
698 		return -EINVAL;
699 
700 	if (addr->sa_family == AF_UNSPEC) {
701 		sk->sk_state	= NETLINK_UNCONNECTED;
702 		nlk->dst_pid	= 0;
703 		nlk->dst_group  = 0;
704 		return 0;
705 	}
706 	if (addr->sa_family != AF_NETLINK)
707 		return -EINVAL;
708 
709 	/* Only superuser is allowed to send multicasts */
710 	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
711 		return -EPERM;
712 
713 	if (!nlk->pid)
714 		err = netlink_autobind(sock);
715 
716 	if (err == 0) {
717 		sk->sk_state	= NETLINK_CONNECTED;
718 		nlk->dst_pid 	= nladdr->nl_pid;
719 		nlk->dst_group  = ffs(nladdr->nl_groups);
720 	}
721 
722 	return err;
723 }
724 
725 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
726 			   int *addr_len, int peer)
727 {
728 	struct sock *sk = sock->sk;
729 	struct netlink_sock *nlk = nlk_sk(sk);
730 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
731 
732 	nladdr->nl_family = AF_NETLINK;
733 	nladdr->nl_pad = 0;
734 	*addr_len = sizeof(*nladdr);
735 
736 	if (peer) {
737 		nladdr->nl_pid = nlk->dst_pid;
738 		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
739 	} else {
740 		nladdr->nl_pid = nlk->pid;
741 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
742 	}
743 	return 0;
744 }
745 
746 static void netlink_overrun(struct sock *sk)
747 {
748 	struct netlink_sock *nlk = nlk_sk(sk);
749 
750 	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
751 		if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
752 			sk->sk_err = ENOBUFS;
753 			sk->sk_error_report(sk);
754 		}
755 	}
756 	atomic_inc(&sk->sk_drops);
757 }
758 
759 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
760 {
761 	struct sock *sock;
762 	struct netlink_sock *nlk;
763 
764 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
765 	if (!sock)
766 		return ERR_PTR(-ECONNREFUSED);
767 
768 	/* Don't bother queuing skb if kernel socket has no input function */
769 	nlk = nlk_sk(sock);
770 	if (sock->sk_state == NETLINK_CONNECTED &&
771 	    nlk->dst_pid != nlk_sk(ssk)->pid) {
772 		sock_put(sock);
773 		return ERR_PTR(-ECONNREFUSED);
774 	}
775 	return sock;
776 }
777 
778 struct sock *netlink_getsockbyfilp(struct file *filp)
779 {
780 	struct inode *inode = filp->f_path.dentry->d_inode;
781 	struct sock *sock;
782 
783 	if (!S_ISSOCK(inode->i_mode))
784 		return ERR_PTR(-ENOTSOCK);
785 
786 	sock = SOCKET_I(inode)->sk;
787 	if (sock->sk_family != AF_NETLINK)
788 		return ERR_PTR(-EINVAL);
789 
790 	sock_hold(sock);
791 	return sock;
792 }
793 
794 /*
795  * Attach a skb to a netlink socket.
796  * The caller must hold a reference to the destination socket. On error, the
797  * reference is dropped. The skb is not send to the destination, just all
798  * all error checks are performed and memory in the queue is reserved.
799  * Return values:
800  * < 0: error. skb freed, reference to sock dropped.
801  * 0: continue
802  * 1: repeat lookup - reference dropped while waiting for socket memory.
803  */
804 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
805 		      long *timeo, struct sock *ssk)
806 {
807 	struct netlink_sock *nlk;
808 
809 	nlk = nlk_sk(sk);
810 
811 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
812 	    test_bit(0, &nlk->state)) {
813 		DECLARE_WAITQUEUE(wait, current);
814 		if (!*timeo) {
815 			if (!ssk || netlink_is_kernel(ssk))
816 				netlink_overrun(sk);
817 			sock_put(sk);
818 			kfree_skb(skb);
819 			return -EAGAIN;
820 		}
821 
822 		__set_current_state(TASK_INTERRUPTIBLE);
823 		add_wait_queue(&nlk->wait, &wait);
824 
825 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
826 		     test_bit(0, &nlk->state)) &&
827 		    !sock_flag(sk, SOCK_DEAD))
828 			*timeo = schedule_timeout(*timeo);
829 
830 		__set_current_state(TASK_RUNNING);
831 		remove_wait_queue(&nlk->wait, &wait);
832 		sock_put(sk);
833 
834 		if (signal_pending(current)) {
835 			kfree_skb(skb);
836 			return sock_intr_errno(*timeo);
837 		}
838 		return 1;
839 	}
840 	skb_set_owner_r(skb, sk);
841 	return 0;
842 }
843 
844 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
845 {
846 	int len = skb->len;
847 
848 	skb_queue_tail(&sk->sk_receive_queue, skb);
849 	sk->sk_data_ready(sk, len);
850 	return len;
851 }
852 
853 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
854 {
855 	int len = __netlink_sendskb(sk, skb);
856 
857 	sock_put(sk);
858 	return len;
859 }
860 
861 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
862 {
863 	kfree_skb(skb);
864 	sock_put(sk);
865 }
866 
867 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
868 {
869 	int delta;
870 
871 	skb_orphan(skb);
872 
873 	delta = skb->end - skb->tail;
874 	if (delta * 2 < skb->truesize)
875 		return skb;
876 
877 	if (skb_shared(skb)) {
878 		struct sk_buff *nskb = skb_clone(skb, allocation);
879 		if (!nskb)
880 			return skb;
881 		consume_skb(skb);
882 		skb = nskb;
883 	}
884 
885 	if (!pskb_expand_head(skb, 0, -delta, allocation))
886 		skb->truesize -= delta;
887 
888 	return skb;
889 }
890 
891 static void netlink_rcv_wake(struct sock *sk)
892 {
893 	struct netlink_sock *nlk = nlk_sk(sk);
894 
895 	if (skb_queue_empty(&sk->sk_receive_queue))
896 		clear_bit(0, &nlk->state);
897 	if (!test_bit(0, &nlk->state))
898 		wake_up_interruptible(&nlk->wait);
899 }
900 
901 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
902 {
903 	int ret;
904 	struct netlink_sock *nlk = nlk_sk(sk);
905 
906 	ret = -ECONNREFUSED;
907 	if (nlk->netlink_rcv != NULL) {
908 		ret = skb->len;
909 		skb_set_owner_r(skb, sk);
910 		nlk->netlink_rcv(skb);
911 		consume_skb(skb);
912 	} else {
913 		kfree_skb(skb);
914 	}
915 	sock_put(sk);
916 	return ret;
917 }
918 
919 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
920 		    u32 pid, int nonblock)
921 {
922 	struct sock *sk;
923 	int err;
924 	long timeo;
925 
926 	skb = netlink_trim(skb, gfp_any());
927 
928 	timeo = sock_sndtimeo(ssk, nonblock);
929 retry:
930 	sk = netlink_getsockbypid(ssk, pid);
931 	if (IS_ERR(sk)) {
932 		kfree_skb(skb);
933 		return PTR_ERR(sk);
934 	}
935 	if (netlink_is_kernel(sk))
936 		return netlink_unicast_kernel(sk, skb);
937 
938 	if (sk_filter(sk, skb)) {
939 		err = skb->len;
940 		kfree_skb(skb);
941 		sock_put(sk);
942 		return err;
943 	}
944 
945 	err = netlink_attachskb(sk, skb, &timeo, ssk);
946 	if (err == 1)
947 		goto retry;
948 	if (err)
949 		return err;
950 
951 	return netlink_sendskb(sk, skb);
952 }
953 EXPORT_SYMBOL(netlink_unicast);
954 
955 int netlink_has_listeners(struct sock *sk, unsigned int group)
956 {
957 	int res = 0;
958 	struct listeners *listeners;
959 
960 	BUG_ON(!netlink_is_kernel(sk));
961 
962 	rcu_read_lock();
963 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
964 
965 	if (group - 1 < nl_table[sk->sk_protocol].groups)
966 		res = test_bit(group - 1, listeners->masks);
967 
968 	rcu_read_unlock();
969 
970 	return res;
971 }
972 EXPORT_SYMBOL_GPL(netlink_has_listeners);
973 
974 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
975 {
976 	struct netlink_sock *nlk = nlk_sk(sk);
977 
978 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
979 	    !test_bit(0, &nlk->state)) {
980 		skb_set_owner_r(skb, sk);
981 		__netlink_sendskb(sk, skb);
982 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
983 	}
984 	return -1;
985 }
986 
987 struct netlink_broadcast_data {
988 	struct sock *exclude_sk;
989 	struct net *net;
990 	u32 pid;
991 	u32 group;
992 	int failure;
993 	int delivery_failure;
994 	int congested;
995 	int delivered;
996 	gfp_t allocation;
997 	struct sk_buff *skb, *skb2;
998 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
999 	void *tx_data;
1000 };
1001 
1002 static int do_one_broadcast(struct sock *sk,
1003 				   struct netlink_broadcast_data *p)
1004 {
1005 	struct netlink_sock *nlk = nlk_sk(sk);
1006 	int val;
1007 
1008 	if (p->exclude_sk == sk)
1009 		goto out;
1010 
1011 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1012 	    !test_bit(p->group - 1, nlk->groups))
1013 		goto out;
1014 
1015 	if (!net_eq(sock_net(sk), p->net))
1016 		goto out;
1017 
1018 	if (p->failure) {
1019 		netlink_overrun(sk);
1020 		goto out;
1021 	}
1022 
1023 	sock_hold(sk);
1024 	if (p->skb2 == NULL) {
1025 		if (skb_shared(p->skb)) {
1026 			p->skb2 = skb_clone(p->skb, p->allocation);
1027 		} else {
1028 			p->skb2 = skb_get(p->skb);
1029 			/*
1030 			 * skb ownership may have been set when
1031 			 * delivered to a previous socket.
1032 			 */
1033 			skb_orphan(p->skb2);
1034 		}
1035 	}
1036 	if (p->skb2 == NULL) {
1037 		netlink_overrun(sk);
1038 		/* Clone failed. Notify ALL listeners. */
1039 		p->failure = 1;
1040 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1041 			p->delivery_failure = 1;
1042 	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1043 		kfree_skb(p->skb2);
1044 		p->skb2 = NULL;
1045 	} else if (sk_filter(sk, p->skb2)) {
1046 		kfree_skb(p->skb2);
1047 		p->skb2 = NULL;
1048 	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1049 		netlink_overrun(sk);
1050 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1051 			p->delivery_failure = 1;
1052 	} else {
1053 		p->congested |= val;
1054 		p->delivered = 1;
1055 		p->skb2 = NULL;
1056 	}
1057 	sock_put(sk);
1058 
1059 out:
1060 	return 0;
1061 }
1062 
1063 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1064 	u32 group, gfp_t allocation,
1065 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1066 	void *filter_data)
1067 {
1068 	struct net *net = sock_net(ssk);
1069 	struct netlink_broadcast_data info;
1070 	struct hlist_node *node;
1071 	struct sock *sk;
1072 
1073 	skb = netlink_trim(skb, allocation);
1074 
1075 	info.exclude_sk = ssk;
1076 	info.net = net;
1077 	info.pid = pid;
1078 	info.group = group;
1079 	info.failure = 0;
1080 	info.delivery_failure = 0;
1081 	info.congested = 0;
1082 	info.delivered = 0;
1083 	info.allocation = allocation;
1084 	info.skb = skb;
1085 	info.skb2 = NULL;
1086 	info.tx_filter = filter;
1087 	info.tx_data = filter_data;
1088 
1089 	/* While we sleep in clone, do not allow to change socket list */
1090 
1091 	netlink_lock_table();
1092 
1093 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1094 		do_one_broadcast(sk, &info);
1095 
1096 	consume_skb(skb);
1097 
1098 	netlink_unlock_table();
1099 
1100 	if (info.delivery_failure) {
1101 		kfree_skb(info.skb2);
1102 		return -ENOBUFS;
1103 	}
1104 	consume_skb(info.skb2);
1105 
1106 	if (info.delivered) {
1107 		if (info.congested && (allocation & __GFP_WAIT))
1108 			yield();
1109 		return 0;
1110 	}
1111 	return -ESRCH;
1112 }
1113 EXPORT_SYMBOL(netlink_broadcast_filtered);
1114 
1115 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1116 		      u32 group, gfp_t allocation)
1117 {
1118 	return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
1119 		NULL, NULL);
1120 }
1121 EXPORT_SYMBOL(netlink_broadcast);
1122 
1123 struct netlink_set_err_data {
1124 	struct sock *exclude_sk;
1125 	u32 pid;
1126 	u32 group;
1127 	int code;
1128 };
1129 
1130 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1131 {
1132 	struct netlink_sock *nlk = nlk_sk(sk);
1133 	int ret = 0;
1134 
1135 	if (sk == p->exclude_sk)
1136 		goto out;
1137 
1138 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1139 		goto out;
1140 
1141 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1142 	    !test_bit(p->group - 1, nlk->groups))
1143 		goto out;
1144 
1145 	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1146 		ret = 1;
1147 		goto out;
1148 	}
1149 
1150 	sk->sk_err = p->code;
1151 	sk->sk_error_report(sk);
1152 out:
1153 	return ret;
1154 }
1155 
1156 /**
1157  * netlink_set_err - report error to broadcast listeners
1158  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1159  * @pid: the PID of a process that we want to skip (if any)
1160  * @groups: the broadcast group that will notice the error
1161  * @code: error code, must be negative (as usual in kernelspace)
1162  *
1163  * This function returns the number of broadcast listeners that have set the
1164  * NETLINK_RECV_NO_ENOBUFS socket option.
1165  */
1166 int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1167 {
1168 	struct netlink_set_err_data info;
1169 	struct hlist_node *node;
1170 	struct sock *sk;
1171 	int ret = 0;
1172 
1173 	info.exclude_sk = ssk;
1174 	info.pid = pid;
1175 	info.group = group;
1176 	/* sk->sk_err wants a positive error value */
1177 	info.code = -code;
1178 
1179 	read_lock(&nl_table_lock);
1180 
1181 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1182 		ret += do_one_set_err(sk, &info);
1183 
1184 	read_unlock(&nl_table_lock);
1185 	return ret;
1186 }
1187 EXPORT_SYMBOL(netlink_set_err);
1188 
1189 /* must be called with netlink table grabbed */
1190 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1191 				     unsigned int group,
1192 				     int is_new)
1193 {
1194 	int old, new = !!is_new, subscriptions;
1195 
1196 	old = test_bit(group - 1, nlk->groups);
1197 	subscriptions = nlk->subscriptions - old + new;
1198 	if (new)
1199 		__set_bit(group - 1, nlk->groups);
1200 	else
1201 		__clear_bit(group - 1, nlk->groups);
1202 	netlink_update_subscriptions(&nlk->sk, subscriptions);
1203 	netlink_update_listeners(&nlk->sk);
1204 }
1205 
1206 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1207 			      char __user *optval, unsigned int optlen)
1208 {
1209 	struct sock *sk = sock->sk;
1210 	struct netlink_sock *nlk = nlk_sk(sk);
1211 	unsigned int val = 0;
1212 	int err;
1213 
1214 	if (level != SOL_NETLINK)
1215 		return -ENOPROTOOPT;
1216 
1217 	if (optlen >= sizeof(int) &&
1218 	    get_user(val, (unsigned int __user *)optval))
1219 		return -EFAULT;
1220 
1221 	switch (optname) {
1222 	case NETLINK_PKTINFO:
1223 		if (val)
1224 			nlk->flags |= NETLINK_RECV_PKTINFO;
1225 		else
1226 			nlk->flags &= ~NETLINK_RECV_PKTINFO;
1227 		err = 0;
1228 		break;
1229 	case NETLINK_ADD_MEMBERSHIP:
1230 	case NETLINK_DROP_MEMBERSHIP: {
1231 		if (!netlink_capable(sock, NL_NONROOT_RECV))
1232 			return -EPERM;
1233 		err = netlink_realloc_groups(sk);
1234 		if (err)
1235 			return err;
1236 		if (!val || val - 1 >= nlk->ngroups)
1237 			return -EINVAL;
1238 		netlink_table_grab();
1239 		netlink_update_socket_mc(nlk, val,
1240 					 optname == NETLINK_ADD_MEMBERSHIP);
1241 		netlink_table_ungrab();
1242 		err = 0;
1243 		break;
1244 	}
1245 	case NETLINK_BROADCAST_ERROR:
1246 		if (val)
1247 			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1248 		else
1249 			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1250 		err = 0;
1251 		break;
1252 	case NETLINK_NO_ENOBUFS:
1253 		if (val) {
1254 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1255 			clear_bit(0, &nlk->state);
1256 			wake_up_interruptible(&nlk->wait);
1257 		} else {
1258 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1259 		}
1260 		err = 0;
1261 		break;
1262 	default:
1263 		err = -ENOPROTOOPT;
1264 	}
1265 	return err;
1266 }
1267 
1268 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1269 			      char __user *optval, int __user *optlen)
1270 {
1271 	struct sock *sk = sock->sk;
1272 	struct netlink_sock *nlk = nlk_sk(sk);
1273 	int len, val, err;
1274 
1275 	if (level != SOL_NETLINK)
1276 		return -ENOPROTOOPT;
1277 
1278 	if (get_user(len, optlen))
1279 		return -EFAULT;
1280 	if (len < 0)
1281 		return -EINVAL;
1282 
1283 	switch (optname) {
1284 	case NETLINK_PKTINFO:
1285 		if (len < sizeof(int))
1286 			return -EINVAL;
1287 		len = sizeof(int);
1288 		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1289 		if (put_user(len, optlen) ||
1290 		    put_user(val, optval))
1291 			return -EFAULT;
1292 		err = 0;
1293 		break;
1294 	case NETLINK_BROADCAST_ERROR:
1295 		if (len < sizeof(int))
1296 			return -EINVAL;
1297 		len = sizeof(int);
1298 		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1299 		if (put_user(len, optlen) ||
1300 		    put_user(val, optval))
1301 			return -EFAULT;
1302 		err = 0;
1303 		break;
1304 	case NETLINK_NO_ENOBUFS:
1305 		if (len < sizeof(int))
1306 			return -EINVAL;
1307 		len = sizeof(int);
1308 		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1309 		if (put_user(len, optlen) ||
1310 		    put_user(val, optval))
1311 			return -EFAULT;
1312 		err = 0;
1313 		break;
1314 	default:
1315 		err = -ENOPROTOOPT;
1316 	}
1317 	return err;
1318 }
1319 
1320 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1321 {
1322 	struct nl_pktinfo info;
1323 
1324 	info.group = NETLINK_CB(skb).dst_group;
1325 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1326 }
1327 
1328 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1329 			   struct msghdr *msg, size_t len)
1330 {
1331 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1332 	struct sock *sk = sock->sk;
1333 	struct netlink_sock *nlk = nlk_sk(sk);
1334 	struct sockaddr_nl *addr = msg->msg_name;
1335 	u32 dst_pid;
1336 	u32 dst_group;
1337 	struct sk_buff *skb;
1338 	int err;
1339 	struct scm_cookie scm;
1340 
1341 	if (msg->msg_flags&MSG_OOB)
1342 		return -EOPNOTSUPP;
1343 
1344 	if (NULL == siocb->scm)
1345 		siocb->scm = &scm;
1346 
1347 	err = scm_send(sock, msg, siocb->scm);
1348 	if (err < 0)
1349 		return err;
1350 
1351 	if (msg->msg_namelen) {
1352 		err = -EINVAL;
1353 		if (addr->nl_family != AF_NETLINK)
1354 			goto out;
1355 		dst_pid = addr->nl_pid;
1356 		dst_group = ffs(addr->nl_groups);
1357 		err =  -EPERM;
1358 		if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1359 			goto out;
1360 	} else {
1361 		dst_pid = nlk->dst_pid;
1362 		dst_group = nlk->dst_group;
1363 	}
1364 
1365 	if (!nlk->pid) {
1366 		err = netlink_autobind(sock);
1367 		if (err)
1368 			goto out;
1369 	}
1370 
1371 	err = -EMSGSIZE;
1372 	if (len > sk->sk_sndbuf - 32)
1373 		goto out;
1374 	err = -ENOBUFS;
1375 	skb = alloc_skb(len, GFP_KERNEL);
1376 	if (skb == NULL)
1377 		goto out;
1378 
1379 	NETLINK_CB(skb).pid	= nlk->pid;
1380 	NETLINK_CB(skb).dst_group = dst_group;
1381 	memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1382 
1383 	err = -EFAULT;
1384 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1385 		kfree_skb(skb);
1386 		goto out;
1387 	}
1388 
1389 	err = security_netlink_send(sk, skb);
1390 	if (err) {
1391 		kfree_skb(skb);
1392 		goto out;
1393 	}
1394 
1395 	if (dst_group) {
1396 		atomic_inc(&skb->users);
1397 		netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1398 	}
1399 	err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1400 
1401 out:
1402 	scm_destroy(siocb->scm);
1403 	return err;
1404 }
1405 
1406 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 			   struct msghdr *msg, size_t len,
1408 			   int flags)
1409 {
1410 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1411 	struct scm_cookie scm;
1412 	struct sock *sk = sock->sk;
1413 	struct netlink_sock *nlk = nlk_sk(sk);
1414 	int noblock = flags&MSG_DONTWAIT;
1415 	size_t copied;
1416 	struct sk_buff *skb, *data_skb;
1417 	int err, ret;
1418 
1419 	if (flags&MSG_OOB)
1420 		return -EOPNOTSUPP;
1421 
1422 	copied = 0;
1423 
1424 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1425 	if (skb == NULL)
1426 		goto out;
1427 
1428 	data_skb = skb;
1429 
1430 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1431 	if (unlikely(skb_shinfo(skb)->frag_list)) {
1432 		/*
1433 		 * If this skb has a frag_list, then here that means that we
1434 		 * will have to use the frag_list skb's data for compat tasks
1435 		 * and the regular skb's data for normal (non-compat) tasks.
1436 		 *
1437 		 * If we need to send the compat skb, assign it to the
1438 		 * 'data_skb' variable so that it will be used below for data
1439 		 * copying. We keep 'skb' for everything else, including
1440 		 * freeing both later.
1441 		 */
1442 		if (flags & MSG_CMSG_COMPAT)
1443 			data_skb = skb_shinfo(skb)->frag_list;
1444 	}
1445 #endif
1446 
1447 	msg->msg_namelen = 0;
1448 
1449 	copied = data_skb->len;
1450 	if (len < copied) {
1451 		msg->msg_flags |= MSG_TRUNC;
1452 		copied = len;
1453 	}
1454 
1455 	skb_reset_transport_header(data_skb);
1456 	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1457 
1458 	if (msg->msg_name) {
1459 		struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1460 		addr->nl_family = AF_NETLINK;
1461 		addr->nl_pad    = 0;
1462 		addr->nl_pid	= NETLINK_CB(skb).pid;
1463 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1464 		msg->msg_namelen = sizeof(*addr);
1465 	}
1466 
1467 	if (nlk->flags & NETLINK_RECV_PKTINFO)
1468 		netlink_cmsg_recv_pktinfo(msg, skb);
1469 
1470 	if (NULL == siocb->scm) {
1471 		memset(&scm, 0, sizeof(scm));
1472 		siocb->scm = &scm;
1473 	}
1474 	siocb->scm->creds = *NETLINK_CREDS(skb);
1475 	if (flags & MSG_TRUNC)
1476 		copied = data_skb->len;
1477 
1478 	skb_free_datagram(sk, skb);
1479 
1480 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1481 		ret = netlink_dump(sk);
1482 		if (ret) {
1483 			sk->sk_err = ret;
1484 			sk->sk_error_report(sk);
1485 		}
1486 	}
1487 
1488 	scm_recv(sock, msg, siocb->scm, flags);
1489 out:
1490 	netlink_rcv_wake(sk);
1491 	return err ? : copied;
1492 }
1493 
1494 static void netlink_data_ready(struct sock *sk, int len)
1495 {
1496 	BUG();
1497 }
1498 
1499 /*
1500  *	We export these functions to other modules. They provide a
1501  *	complete set of kernel non-blocking support for message
1502  *	queueing.
1503  */
1504 
1505 struct sock *
1506 netlink_kernel_create(struct net *net, int unit,
1507 		      struct module *module,
1508 		      struct netlink_kernel_cfg *cfg)
1509 {
1510 	struct socket *sock;
1511 	struct sock *sk;
1512 	struct netlink_sock *nlk;
1513 	struct listeners *listeners = NULL;
1514 	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1515 	unsigned int groups;
1516 
1517 	BUG_ON(!nl_table);
1518 
1519 	if (unit < 0 || unit >= MAX_LINKS)
1520 		return NULL;
1521 
1522 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1523 		return NULL;
1524 
1525 	/*
1526 	 * We have to just have a reference on the net from sk, but don't
1527 	 * get_net it. Besides, we cannot get and then put the net here.
1528 	 * So we create one inside init_net and the move it to net.
1529 	 */
1530 
1531 	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1532 		goto out_sock_release_nosk;
1533 
1534 	sk = sock->sk;
1535 	sk_change_net(sk, net);
1536 
1537 	if (!cfg || cfg->groups < 32)
1538 		groups = 32;
1539 	else
1540 		groups = cfg->groups;
1541 
1542 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1543 	if (!listeners)
1544 		goto out_sock_release;
1545 
1546 	sk->sk_data_ready = netlink_data_ready;
1547 	if (cfg && cfg->input)
1548 		nlk_sk(sk)->netlink_rcv = cfg->input;
1549 
1550 	if (netlink_insert(sk, net, 0))
1551 		goto out_sock_release;
1552 
1553 	nlk = nlk_sk(sk);
1554 	nlk->flags |= NETLINK_KERNEL_SOCKET;
1555 
1556 	netlink_table_grab();
1557 	if (!nl_table[unit].registered) {
1558 		nl_table[unit].groups = groups;
1559 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1560 		nl_table[unit].cb_mutex = cb_mutex;
1561 		nl_table[unit].module = module;
1562 		nl_table[unit].registered = 1;
1563 	} else {
1564 		kfree(listeners);
1565 		nl_table[unit].registered++;
1566 	}
1567 	netlink_table_ungrab();
1568 	return sk;
1569 
1570 out_sock_release:
1571 	kfree(listeners);
1572 	netlink_kernel_release(sk);
1573 	return NULL;
1574 
1575 out_sock_release_nosk:
1576 	sock_release(sock);
1577 	return NULL;
1578 }
1579 EXPORT_SYMBOL(netlink_kernel_create);
1580 
1581 
1582 void
1583 netlink_kernel_release(struct sock *sk)
1584 {
1585 	sk_release_kernel(sk);
1586 }
1587 EXPORT_SYMBOL(netlink_kernel_release);
1588 
1589 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1590 {
1591 	struct listeners *new, *old;
1592 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1593 
1594 	if (groups < 32)
1595 		groups = 32;
1596 
1597 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1598 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1599 		if (!new)
1600 			return -ENOMEM;
1601 		old = rcu_dereference_protected(tbl->listeners, 1);
1602 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1603 		rcu_assign_pointer(tbl->listeners, new);
1604 
1605 		kfree_rcu(old, rcu);
1606 	}
1607 	tbl->groups = groups;
1608 
1609 	return 0;
1610 }
1611 
1612 /**
1613  * netlink_change_ngroups - change number of multicast groups
1614  *
1615  * This changes the number of multicast groups that are available
1616  * on a certain netlink family. Note that it is not possible to
1617  * change the number of groups to below 32. Also note that it does
1618  * not implicitly call netlink_clear_multicast_users() when the
1619  * number of groups is reduced.
1620  *
1621  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1622  * @groups: The new number of groups.
1623  */
1624 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1625 {
1626 	int err;
1627 
1628 	netlink_table_grab();
1629 	err = __netlink_change_ngroups(sk, groups);
1630 	netlink_table_ungrab();
1631 
1632 	return err;
1633 }
1634 
1635 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1636 {
1637 	struct sock *sk;
1638 	struct hlist_node *node;
1639 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1640 
1641 	sk_for_each_bound(sk, node, &tbl->mc_list)
1642 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
1643 }
1644 
1645 /**
1646  * netlink_clear_multicast_users - kick off multicast listeners
1647  *
1648  * This function removes all listeners from the given group.
1649  * @ksk: The kernel netlink socket, as returned by
1650  *	netlink_kernel_create().
1651  * @group: The multicast group to clear.
1652  */
1653 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1654 {
1655 	netlink_table_grab();
1656 	__netlink_clear_multicast_users(ksk, group);
1657 	netlink_table_ungrab();
1658 }
1659 
1660 void netlink_set_nonroot(int protocol, unsigned int flags)
1661 {
1662 	if ((unsigned int)protocol < MAX_LINKS)
1663 		nl_table[protocol].nl_nonroot = flags;
1664 }
1665 EXPORT_SYMBOL(netlink_set_nonroot);
1666 
1667 struct nlmsghdr *
1668 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1669 {
1670 	struct nlmsghdr *nlh;
1671 	int size = NLMSG_LENGTH(len);
1672 
1673 	nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1674 	nlh->nlmsg_type = type;
1675 	nlh->nlmsg_len = size;
1676 	nlh->nlmsg_flags = flags;
1677 	nlh->nlmsg_pid = pid;
1678 	nlh->nlmsg_seq = seq;
1679 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1680 		memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1681 	return nlh;
1682 }
1683 EXPORT_SYMBOL(__nlmsg_put);
1684 
1685 /*
1686  * It looks a bit ugly.
1687  * It would be better to create kernel thread.
1688  */
1689 
1690 static int netlink_dump(struct sock *sk)
1691 {
1692 	struct netlink_sock *nlk = nlk_sk(sk);
1693 	struct netlink_callback *cb;
1694 	struct sk_buff *skb = NULL;
1695 	struct nlmsghdr *nlh;
1696 	int len, err = -ENOBUFS;
1697 	int alloc_size;
1698 
1699 	mutex_lock(nlk->cb_mutex);
1700 
1701 	cb = nlk->cb;
1702 	if (cb == NULL) {
1703 		err = -EINVAL;
1704 		goto errout_skb;
1705 	}
1706 
1707 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1708 
1709 	skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1710 	if (!skb)
1711 		goto errout_skb;
1712 
1713 	len = cb->dump(skb, cb);
1714 
1715 	if (len > 0) {
1716 		mutex_unlock(nlk->cb_mutex);
1717 
1718 		if (sk_filter(sk, skb))
1719 			kfree_skb(skb);
1720 		else
1721 			__netlink_sendskb(sk, skb);
1722 		return 0;
1723 	}
1724 
1725 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1726 	if (!nlh)
1727 		goto errout_skb;
1728 
1729 	nl_dump_check_consistent(cb, nlh);
1730 
1731 	memcpy(nlmsg_data(nlh), &len, sizeof(len));
1732 
1733 	if (sk_filter(sk, skb))
1734 		kfree_skb(skb);
1735 	else
1736 		__netlink_sendskb(sk, skb);
1737 
1738 	if (cb->done)
1739 		cb->done(cb);
1740 	nlk->cb = NULL;
1741 	mutex_unlock(nlk->cb_mutex);
1742 
1743 	netlink_consume_callback(cb);
1744 	return 0;
1745 
1746 errout_skb:
1747 	mutex_unlock(nlk->cb_mutex);
1748 	kfree_skb(skb);
1749 	return err;
1750 }
1751 
1752 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1753 		       const struct nlmsghdr *nlh,
1754 		       struct netlink_dump_control *control)
1755 {
1756 	struct netlink_callback *cb;
1757 	struct sock *sk;
1758 	struct netlink_sock *nlk;
1759 	int ret;
1760 
1761 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1762 	if (cb == NULL)
1763 		return -ENOBUFS;
1764 
1765 	cb->dump = control->dump;
1766 	cb->done = control->done;
1767 	cb->nlh = nlh;
1768 	cb->data = control->data;
1769 	cb->min_dump_alloc = control->min_dump_alloc;
1770 	atomic_inc(&skb->users);
1771 	cb->skb = skb;
1772 
1773 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1774 	if (sk == NULL) {
1775 		netlink_destroy_callback(cb);
1776 		return -ECONNREFUSED;
1777 	}
1778 	nlk = nlk_sk(sk);
1779 	/* A dump is in progress... */
1780 	mutex_lock(nlk->cb_mutex);
1781 	if (nlk->cb) {
1782 		mutex_unlock(nlk->cb_mutex);
1783 		netlink_destroy_callback(cb);
1784 		sock_put(sk);
1785 		return -EBUSY;
1786 	}
1787 	nlk->cb = cb;
1788 	mutex_unlock(nlk->cb_mutex);
1789 
1790 	ret = netlink_dump(sk);
1791 
1792 	sock_put(sk);
1793 
1794 	if (ret)
1795 		return ret;
1796 
1797 	/* We successfully started a dump, by returning -EINTR we
1798 	 * signal not to send ACK even if it was requested.
1799 	 */
1800 	return -EINTR;
1801 }
1802 EXPORT_SYMBOL(netlink_dump_start);
1803 
1804 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1805 {
1806 	struct sk_buff *skb;
1807 	struct nlmsghdr *rep;
1808 	struct nlmsgerr *errmsg;
1809 	size_t payload = sizeof(*errmsg);
1810 
1811 	/* error messages get the original request appened */
1812 	if (err)
1813 		payload += nlmsg_len(nlh);
1814 
1815 	skb = nlmsg_new(payload, GFP_KERNEL);
1816 	if (!skb) {
1817 		struct sock *sk;
1818 
1819 		sk = netlink_lookup(sock_net(in_skb->sk),
1820 				    in_skb->sk->sk_protocol,
1821 				    NETLINK_CB(in_skb).pid);
1822 		if (sk) {
1823 			sk->sk_err = ENOBUFS;
1824 			sk->sk_error_report(sk);
1825 			sock_put(sk);
1826 		}
1827 		return;
1828 	}
1829 
1830 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1831 			  NLMSG_ERROR, payload, 0);
1832 	errmsg = nlmsg_data(rep);
1833 	errmsg->error = err;
1834 	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1835 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1836 }
1837 EXPORT_SYMBOL(netlink_ack);
1838 
1839 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1840 						     struct nlmsghdr *))
1841 {
1842 	struct nlmsghdr *nlh;
1843 	int err;
1844 
1845 	while (skb->len >= nlmsg_total_size(0)) {
1846 		int msglen;
1847 
1848 		nlh = nlmsg_hdr(skb);
1849 		err = 0;
1850 
1851 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1852 			return 0;
1853 
1854 		/* Only requests are handled by the kernel */
1855 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1856 			goto ack;
1857 
1858 		/* Skip control messages */
1859 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1860 			goto ack;
1861 
1862 		err = cb(skb, nlh);
1863 		if (err == -EINTR)
1864 			goto skip;
1865 
1866 ack:
1867 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
1868 			netlink_ack(skb, nlh, err);
1869 
1870 skip:
1871 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1872 		if (msglen > skb->len)
1873 			msglen = skb->len;
1874 		skb_pull(skb, msglen);
1875 	}
1876 
1877 	return 0;
1878 }
1879 EXPORT_SYMBOL(netlink_rcv_skb);
1880 
1881 /**
1882  * nlmsg_notify - send a notification netlink message
1883  * @sk: netlink socket to use
1884  * @skb: notification message
1885  * @pid: destination netlink pid for reports or 0
1886  * @group: destination multicast group or 0
1887  * @report: 1 to report back, 0 to disable
1888  * @flags: allocation flags
1889  */
1890 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1891 		 unsigned int group, int report, gfp_t flags)
1892 {
1893 	int err = 0;
1894 
1895 	if (group) {
1896 		int exclude_pid = 0;
1897 
1898 		if (report) {
1899 			atomic_inc(&skb->users);
1900 			exclude_pid = pid;
1901 		}
1902 
1903 		/* errors reported via destination sk->sk_err, but propagate
1904 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1905 		err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1906 	}
1907 
1908 	if (report) {
1909 		int err2;
1910 
1911 		err2 = nlmsg_unicast(sk, skb, pid);
1912 		if (!err || err == -ESRCH)
1913 			err = err2;
1914 	}
1915 
1916 	return err;
1917 }
1918 EXPORT_SYMBOL(nlmsg_notify);
1919 
1920 #ifdef CONFIG_PROC_FS
1921 struct nl_seq_iter {
1922 	struct seq_net_private p;
1923 	int link;
1924 	int hash_idx;
1925 };
1926 
1927 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1928 {
1929 	struct nl_seq_iter *iter = seq->private;
1930 	int i, j;
1931 	struct sock *s;
1932 	struct hlist_node *node;
1933 	loff_t off = 0;
1934 
1935 	for (i = 0; i < MAX_LINKS; i++) {
1936 		struct nl_pid_hash *hash = &nl_table[i].hash;
1937 
1938 		for (j = 0; j <= hash->mask; j++) {
1939 			sk_for_each(s, node, &hash->table[j]) {
1940 				if (sock_net(s) != seq_file_net(seq))
1941 					continue;
1942 				if (off == pos) {
1943 					iter->link = i;
1944 					iter->hash_idx = j;
1945 					return s;
1946 				}
1947 				++off;
1948 			}
1949 		}
1950 	}
1951 	return NULL;
1952 }
1953 
1954 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1955 	__acquires(nl_table_lock)
1956 {
1957 	read_lock(&nl_table_lock);
1958 	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1959 }
1960 
1961 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1962 {
1963 	struct sock *s;
1964 	struct nl_seq_iter *iter;
1965 	int i, j;
1966 
1967 	++*pos;
1968 
1969 	if (v == SEQ_START_TOKEN)
1970 		return netlink_seq_socket_idx(seq, 0);
1971 
1972 	iter = seq->private;
1973 	s = v;
1974 	do {
1975 		s = sk_next(s);
1976 	} while (s && sock_net(s) != seq_file_net(seq));
1977 	if (s)
1978 		return s;
1979 
1980 	i = iter->link;
1981 	j = iter->hash_idx + 1;
1982 
1983 	do {
1984 		struct nl_pid_hash *hash = &nl_table[i].hash;
1985 
1986 		for (; j <= hash->mask; j++) {
1987 			s = sk_head(&hash->table[j]);
1988 			while (s && sock_net(s) != seq_file_net(seq))
1989 				s = sk_next(s);
1990 			if (s) {
1991 				iter->link = i;
1992 				iter->hash_idx = j;
1993 				return s;
1994 			}
1995 		}
1996 
1997 		j = 0;
1998 	} while (++i < MAX_LINKS);
1999 
2000 	return NULL;
2001 }
2002 
2003 static void netlink_seq_stop(struct seq_file *seq, void *v)
2004 	__releases(nl_table_lock)
2005 {
2006 	read_unlock(&nl_table_lock);
2007 }
2008 
2009 
2010 static int netlink_seq_show(struct seq_file *seq, void *v)
2011 {
2012 	if (v == SEQ_START_TOKEN) {
2013 		seq_puts(seq,
2014 			 "sk       Eth Pid    Groups   "
2015 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2016 	} else {
2017 		struct sock *s = v;
2018 		struct netlink_sock *nlk = nlk_sk(s);
2019 
2020 		seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2021 			   s,
2022 			   s->sk_protocol,
2023 			   nlk->pid,
2024 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2025 			   sk_rmem_alloc_get(s),
2026 			   sk_wmem_alloc_get(s),
2027 			   nlk->cb,
2028 			   atomic_read(&s->sk_refcnt),
2029 			   atomic_read(&s->sk_drops),
2030 			   sock_i_ino(s)
2031 			);
2032 
2033 	}
2034 	return 0;
2035 }
2036 
2037 static const struct seq_operations netlink_seq_ops = {
2038 	.start  = netlink_seq_start,
2039 	.next   = netlink_seq_next,
2040 	.stop   = netlink_seq_stop,
2041 	.show   = netlink_seq_show,
2042 };
2043 
2044 
2045 static int netlink_seq_open(struct inode *inode, struct file *file)
2046 {
2047 	return seq_open_net(inode, file, &netlink_seq_ops,
2048 				sizeof(struct nl_seq_iter));
2049 }
2050 
2051 static const struct file_operations netlink_seq_fops = {
2052 	.owner		= THIS_MODULE,
2053 	.open		= netlink_seq_open,
2054 	.read		= seq_read,
2055 	.llseek		= seq_lseek,
2056 	.release	= seq_release_net,
2057 };
2058 
2059 #endif
2060 
2061 int netlink_register_notifier(struct notifier_block *nb)
2062 {
2063 	return atomic_notifier_chain_register(&netlink_chain, nb);
2064 }
2065 EXPORT_SYMBOL(netlink_register_notifier);
2066 
2067 int netlink_unregister_notifier(struct notifier_block *nb)
2068 {
2069 	return atomic_notifier_chain_unregister(&netlink_chain, nb);
2070 }
2071 EXPORT_SYMBOL(netlink_unregister_notifier);
2072 
2073 static const struct proto_ops netlink_ops = {
2074 	.family =	PF_NETLINK,
2075 	.owner =	THIS_MODULE,
2076 	.release =	netlink_release,
2077 	.bind =		netlink_bind,
2078 	.connect =	netlink_connect,
2079 	.socketpair =	sock_no_socketpair,
2080 	.accept =	sock_no_accept,
2081 	.getname =	netlink_getname,
2082 	.poll =		datagram_poll,
2083 	.ioctl =	sock_no_ioctl,
2084 	.listen =	sock_no_listen,
2085 	.shutdown =	sock_no_shutdown,
2086 	.setsockopt =	netlink_setsockopt,
2087 	.getsockopt =	netlink_getsockopt,
2088 	.sendmsg =	netlink_sendmsg,
2089 	.recvmsg =	netlink_recvmsg,
2090 	.mmap =		sock_no_mmap,
2091 	.sendpage =	sock_no_sendpage,
2092 };
2093 
2094 static const struct net_proto_family netlink_family_ops = {
2095 	.family = PF_NETLINK,
2096 	.create = netlink_create,
2097 	.owner	= THIS_MODULE,	/* for consistency 8) */
2098 };
2099 
2100 static int __net_init netlink_net_init(struct net *net)
2101 {
2102 #ifdef CONFIG_PROC_FS
2103 	if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
2104 		return -ENOMEM;
2105 #endif
2106 	return 0;
2107 }
2108 
2109 static void __net_exit netlink_net_exit(struct net *net)
2110 {
2111 #ifdef CONFIG_PROC_FS
2112 	proc_net_remove(net, "netlink");
2113 #endif
2114 }
2115 
2116 static void __init netlink_add_usersock_entry(void)
2117 {
2118 	struct listeners *listeners;
2119 	int groups = 32;
2120 
2121 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2122 	if (!listeners)
2123 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2124 
2125 	netlink_table_grab();
2126 
2127 	nl_table[NETLINK_USERSOCK].groups = groups;
2128 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2129 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2130 	nl_table[NETLINK_USERSOCK].registered = 1;
2131 
2132 	netlink_table_ungrab();
2133 }
2134 
2135 static struct pernet_operations __net_initdata netlink_net_ops = {
2136 	.init = netlink_net_init,
2137 	.exit = netlink_net_exit,
2138 };
2139 
2140 static int __init netlink_proto_init(void)
2141 {
2142 	struct sk_buff *dummy_skb;
2143 	int i;
2144 	unsigned long limit;
2145 	unsigned int order;
2146 	int err = proto_register(&netlink_proto, 0);
2147 
2148 	if (err != 0)
2149 		goto out;
2150 
2151 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
2152 
2153 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2154 	if (!nl_table)
2155 		goto panic;
2156 
2157 	if (totalram_pages >= (128 * 1024))
2158 		limit = totalram_pages >> (21 - PAGE_SHIFT);
2159 	else
2160 		limit = totalram_pages >> (23 - PAGE_SHIFT);
2161 
2162 	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2163 	limit = (1UL << order) / sizeof(struct hlist_head);
2164 	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2165 
2166 	for (i = 0; i < MAX_LINKS; i++) {
2167 		struct nl_pid_hash *hash = &nl_table[i].hash;
2168 
2169 		hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2170 		if (!hash->table) {
2171 			while (i-- > 0)
2172 				nl_pid_hash_free(nl_table[i].hash.table,
2173 						 1 * sizeof(*hash->table));
2174 			kfree(nl_table);
2175 			goto panic;
2176 		}
2177 		hash->max_shift = order;
2178 		hash->shift = 0;
2179 		hash->mask = 0;
2180 		hash->rehash_time = jiffies;
2181 	}
2182 
2183 	netlink_add_usersock_entry();
2184 
2185 	sock_register(&netlink_family_ops);
2186 	register_pernet_subsys(&netlink_net_ops);
2187 	/* The netlink device handler may be needed early. */
2188 	rtnetlink_init();
2189 out:
2190 	return err;
2191 panic:
2192 	panic("netlink_init: Cannot allocate nl_table\n");
2193 }
2194 
2195 core_initcall(netlink_proto_init);
2196