xref: /openbmc/linux/net/netlink/af_netlink.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 /*
2  * NETLINK      Kernel-user communication protocol.
3  *
4  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6  *
7  *		This program is free software; you can redistribute it and/or
8  *		modify it under the terms of the GNU General Public License
9  *		as published by the Free Software Foundation; either version
10  *		2 of the License, or (at your option) any later version.
11  *
12  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13  *                               added netlink_proto_exit
14  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
16  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17  * 				 - inc module use count of module that owns
18  * 				   the kernel socket in case userspace opens
19  * 				   socket of same protocol
20  * 				 - remove all module support, since netlink is
21  * 				   mandatory if CONFIG_NET=y these days
22  */
23 
24 #include <linux/module.h>
25 
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
35 #include <linux/un.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/fs.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
54 #include <linux/mm.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 
59 #include <net/net_namespace.h>
60 #include <net/sock.h>
61 #include <net/scm.h>
62 #include <net/netlink.h>
63 
64 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
66 
67 struct netlink_sock {
68 	/* struct sock has to be the first member of netlink_sock */
69 	struct sock		sk;
70 	u32			pid;
71 	u32			dst_pid;
72 	u32			dst_group;
73 	u32			flags;
74 	u32			subscriptions;
75 	u32			ngroups;
76 	unsigned long		*groups;
77 	unsigned long		state;
78 	wait_queue_head_t	wait;
79 	struct netlink_callback	*cb;
80 	struct mutex		*cb_mutex;
81 	struct mutex		cb_def_mutex;
82 	void			(*netlink_rcv)(struct sk_buff *skb);
83 	struct module		*module;
84 };
85 
86 struct listeners {
87 	struct rcu_head		rcu;
88 	unsigned long		masks[0];
89 };
90 
91 #define NETLINK_KERNEL_SOCKET	0x1
92 #define NETLINK_RECV_PKTINFO	0x2
93 #define NETLINK_BROADCAST_SEND_ERROR	0x4
94 #define NETLINK_RECV_NO_ENOBUFS	0x8
95 
96 static inline struct netlink_sock *nlk_sk(struct sock *sk)
97 {
98 	return container_of(sk, struct netlink_sock, sk);
99 }
100 
101 static inline int netlink_is_kernel(struct sock *sk)
102 {
103 	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
104 }
105 
106 struct nl_pid_hash {
107 	struct hlist_head *table;
108 	unsigned long rehash_time;
109 
110 	unsigned int mask;
111 	unsigned int shift;
112 
113 	unsigned int entries;
114 	unsigned int max_shift;
115 
116 	u32 rnd;
117 };
118 
119 struct netlink_table {
120 	struct nl_pid_hash hash;
121 	struct hlist_head mc_list;
122 	struct listeners __rcu *listeners;
123 	unsigned int nl_nonroot;
124 	unsigned int groups;
125 	struct mutex *cb_mutex;
126 	struct module *module;
127 	int registered;
128 };
129 
130 static struct netlink_table *nl_table;
131 
132 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
133 
134 static int netlink_dump(struct sock *sk);
135 static void netlink_destroy_callback(struct netlink_callback *cb);
136 
137 static DEFINE_RWLOCK(nl_table_lock);
138 static atomic_t nl_table_users = ATOMIC_INIT(0);
139 
140 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
141 
142 static inline u32 netlink_group_mask(u32 group)
143 {
144 	return group ? 1 << (group - 1) : 0;
145 }
146 
147 static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
148 {
149 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
150 }
151 
152 static void netlink_sock_destruct(struct sock *sk)
153 {
154 	struct netlink_sock *nlk = nlk_sk(sk);
155 
156 	if (nlk->cb) {
157 		if (nlk->cb->done)
158 			nlk->cb->done(nlk->cb);
159 		netlink_destroy_callback(nlk->cb);
160 	}
161 
162 	skb_queue_purge(&sk->sk_receive_queue);
163 
164 	if (!sock_flag(sk, SOCK_DEAD)) {
165 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
166 		return;
167 	}
168 
169 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
170 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
171 	WARN_ON(nlk_sk(sk)->groups);
172 }
173 
174 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
175  * SMP. Look, when several writers sleep and reader wakes them up, all but one
176  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
177  * this, _but_ remember, it adds useless work on UP machines.
178  */
179 
180 void netlink_table_grab(void)
181 	__acquires(nl_table_lock)
182 {
183 	might_sleep();
184 
185 	write_lock_irq(&nl_table_lock);
186 
187 	if (atomic_read(&nl_table_users)) {
188 		DECLARE_WAITQUEUE(wait, current);
189 
190 		add_wait_queue_exclusive(&nl_table_wait, &wait);
191 		for (;;) {
192 			set_current_state(TASK_UNINTERRUPTIBLE);
193 			if (atomic_read(&nl_table_users) == 0)
194 				break;
195 			write_unlock_irq(&nl_table_lock);
196 			schedule();
197 			write_lock_irq(&nl_table_lock);
198 		}
199 
200 		__set_current_state(TASK_RUNNING);
201 		remove_wait_queue(&nl_table_wait, &wait);
202 	}
203 }
204 
205 void netlink_table_ungrab(void)
206 	__releases(nl_table_lock)
207 {
208 	write_unlock_irq(&nl_table_lock);
209 	wake_up(&nl_table_wait);
210 }
211 
212 static inline void
213 netlink_lock_table(void)
214 {
215 	/* read_lock() synchronizes us to netlink_table_grab */
216 
217 	read_lock(&nl_table_lock);
218 	atomic_inc(&nl_table_users);
219 	read_unlock(&nl_table_lock);
220 }
221 
222 static inline void
223 netlink_unlock_table(void)
224 {
225 	if (atomic_dec_and_test(&nl_table_users))
226 		wake_up(&nl_table_wait);
227 }
228 
229 static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
230 {
231 	struct nl_pid_hash *hash = &nl_table[protocol].hash;
232 	struct hlist_head *head;
233 	struct sock *sk;
234 	struct hlist_node *node;
235 
236 	read_lock(&nl_table_lock);
237 	head = nl_pid_hashfn(hash, pid);
238 	sk_for_each(sk, node, head) {
239 		if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
240 			sock_hold(sk);
241 			goto found;
242 		}
243 	}
244 	sk = NULL;
245 found:
246 	read_unlock(&nl_table_lock);
247 	return sk;
248 }
249 
250 static struct hlist_head *nl_pid_hash_zalloc(size_t size)
251 {
252 	if (size <= PAGE_SIZE)
253 		return kzalloc(size, GFP_ATOMIC);
254 	else
255 		return (struct hlist_head *)
256 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
257 					 get_order(size));
258 }
259 
260 static void nl_pid_hash_free(struct hlist_head *table, size_t size)
261 {
262 	if (size <= PAGE_SIZE)
263 		kfree(table);
264 	else
265 		free_pages((unsigned long)table, get_order(size));
266 }
267 
268 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
269 {
270 	unsigned int omask, mask, shift;
271 	size_t osize, size;
272 	struct hlist_head *otable, *table;
273 	int i;
274 
275 	omask = mask = hash->mask;
276 	osize = size = (mask + 1) * sizeof(*table);
277 	shift = hash->shift;
278 
279 	if (grow) {
280 		if (++shift > hash->max_shift)
281 			return 0;
282 		mask = mask * 2 + 1;
283 		size *= 2;
284 	}
285 
286 	table = nl_pid_hash_zalloc(size);
287 	if (!table)
288 		return 0;
289 
290 	otable = hash->table;
291 	hash->table = table;
292 	hash->mask = mask;
293 	hash->shift = shift;
294 	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
295 
296 	for (i = 0; i <= omask; i++) {
297 		struct sock *sk;
298 		struct hlist_node *node, *tmp;
299 
300 		sk_for_each_safe(sk, node, tmp, &otable[i])
301 			__sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
302 	}
303 
304 	nl_pid_hash_free(otable, osize);
305 	hash->rehash_time = jiffies + 10 * 60 * HZ;
306 	return 1;
307 }
308 
309 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
310 {
311 	int avg = hash->entries >> hash->shift;
312 
313 	if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
314 		return 1;
315 
316 	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
317 		nl_pid_hash_rehash(hash, 0);
318 		return 1;
319 	}
320 
321 	return 0;
322 }
323 
324 static const struct proto_ops netlink_ops;
325 
326 static void
327 netlink_update_listeners(struct sock *sk)
328 {
329 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
330 	struct hlist_node *node;
331 	unsigned long mask;
332 	unsigned int i;
333 
334 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
335 		mask = 0;
336 		sk_for_each_bound(sk, node, &tbl->mc_list) {
337 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
338 				mask |= nlk_sk(sk)->groups[i];
339 		}
340 		tbl->listeners->masks[i] = mask;
341 	}
342 	/* this function is only called with the netlink table "grabbed", which
343 	 * makes sure updates are visible before bind or setsockopt return. */
344 }
345 
346 static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
347 {
348 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
349 	struct hlist_head *head;
350 	int err = -EADDRINUSE;
351 	struct sock *osk;
352 	struct hlist_node *node;
353 	int len;
354 
355 	netlink_table_grab();
356 	head = nl_pid_hashfn(hash, pid);
357 	len = 0;
358 	sk_for_each(osk, node, head) {
359 		if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
360 			break;
361 		len++;
362 	}
363 	if (node)
364 		goto err;
365 
366 	err = -EBUSY;
367 	if (nlk_sk(sk)->pid)
368 		goto err;
369 
370 	err = -ENOMEM;
371 	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
372 		goto err;
373 
374 	if (len && nl_pid_hash_dilute(hash, len))
375 		head = nl_pid_hashfn(hash, pid);
376 	hash->entries++;
377 	nlk_sk(sk)->pid = pid;
378 	sk_add_node(sk, head);
379 	err = 0;
380 
381 err:
382 	netlink_table_ungrab();
383 	return err;
384 }
385 
386 static void netlink_remove(struct sock *sk)
387 {
388 	netlink_table_grab();
389 	if (sk_del_node_init(sk))
390 		nl_table[sk->sk_protocol].hash.entries--;
391 	if (nlk_sk(sk)->subscriptions)
392 		__sk_del_bind_node(sk);
393 	netlink_table_ungrab();
394 }
395 
396 static struct proto netlink_proto = {
397 	.name	  = "NETLINK",
398 	.owner	  = THIS_MODULE,
399 	.obj_size = sizeof(struct netlink_sock),
400 };
401 
402 static int __netlink_create(struct net *net, struct socket *sock,
403 			    struct mutex *cb_mutex, int protocol)
404 {
405 	struct sock *sk;
406 	struct netlink_sock *nlk;
407 
408 	sock->ops = &netlink_ops;
409 
410 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
411 	if (!sk)
412 		return -ENOMEM;
413 
414 	sock_init_data(sock, sk);
415 
416 	nlk = nlk_sk(sk);
417 	if (cb_mutex)
418 		nlk->cb_mutex = cb_mutex;
419 	else {
420 		nlk->cb_mutex = &nlk->cb_def_mutex;
421 		mutex_init(nlk->cb_mutex);
422 	}
423 	init_waitqueue_head(&nlk->wait);
424 
425 	sk->sk_destruct = netlink_sock_destruct;
426 	sk->sk_protocol = protocol;
427 	return 0;
428 }
429 
430 static int netlink_create(struct net *net, struct socket *sock, int protocol,
431 			  int kern)
432 {
433 	struct module *module = NULL;
434 	struct mutex *cb_mutex;
435 	struct netlink_sock *nlk;
436 	int err = 0;
437 
438 	sock->state = SS_UNCONNECTED;
439 
440 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
441 		return -ESOCKTNOSUPPORT;
442 
443 	if (protocol < 0 || protocol >= MAX_LINKS)
444 		return -EPROTONOSUPPORT;
445 
446 	netlink_lock_table();
447 #ifdef CONFIG_MODULES
448 	if (!nl_table[protocol].registered) {
449 		netlink_unlock_table();
450 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
451 		netlink_lock_table();
452 	}
453 #endif
454 	if (nl_table[protocol].registered &&
455 	    try_module_get(nl_table[protocol].module))
456 		module = nl_table[protocol].module;
457 	else
458 		err = -EPROTONOSUPPORT;
459 	cb_mutex = nl_table[protocol].cb_mutex;
460 	netlink_unlock_table();
461 
462 	if (err < 0)
463 		goto out;
464 
465 	err = __netlink_create(net, sock, cb_mutex, protocol);
466 	if (err < 0)
467 		goto out_module;
468 
469 	local_bh_disable();
470 	sock_prot_inuse_add(net, &netlink_proto, 1);
471 	local_bh_enable();
472 
473 	nlk = nlk_sk(sock->sk);
474 	nlk->module = module;
475 out:
476 	return err;
477 
478 out_module:
479 	module_put(module);
480 	goto out;
481 }
482 
483 static int netlink_release(struct socket *sock)
484 {
485 	struct sock *sk = sock->sk;
486 	struct netlink_sock *nlk;
487 
488 	if (!sk)
489 		return 0;
490 
491 	netlink_remove(sk);
492 	sock_orphan(sk);
493 	nlk = nlk_sk(sk);
494 
495 	/*
496 	 * OK. Socket is unlinked, any packets that arrive now
497 	 * will be purged.
498 	 */
499 
500 	sock->sk = NULL;
501 	wake_up_interruptible_all(&nlk->wait);
502 
503 	skb_queue_purge(&sk->sk_write_queue);
504 
505 	if (nlk->pid) {
506 		struct netlink_notify n = {
507 						.net = sock_net(sk),
508 						.protocol = sk->sk_protocol,
509 						.pid = nlk->pid,
510 					  };
511 		atomic_notifier_call_chain(&netlink_chain,
512 				NETLINK_URELEASE, &n);
513 	}
514 
515 	module_put(nlk->module);
516 
517 	netlink_table_grab();
518 	if (netlink_is_kernel(sk)) {
519 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
520 		if (--nl_table[sk->sk_protocol].registered == 0) {
521 			kfree(nl_table[sk->sk_protocol].listeners);
522 			nl_table[sk->sk_protocol].module = NULL;
523 			nl_table[sk->sk_protocol].registered = 0;
524 		}
525 	} else if (nlk->subscriptions)
526 		netlink_update_listeners(sk);
527 	netlink_table_ungrab();
528 
529 	kfree(nlk->groups);
530 	nlk->groups = NULL;
531 
532 	local_bh_disable();
533 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
534 	local_bh_enable();
535 	sock_put(sk);
536 	return 0;
537 }
538 
539 static int netlink_autobind(struct socket *sock)
540 {
541 	struct sock *sk = sock->sk;
542 	struct net *net = sock_net(sk);
543 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
544 	struct hlist_head *head;
545 	struct sock *osk;
546 	struct hlist_node *node;
547 	s32 pid = task_tgid_vnr(current);
548 	int err;
549 	static s32 rover = -4097;
550 
551 retry:
552 	cond_resched();
553 	netlink_table_grab();
554 	head = nl_pid_hashfn(hash, pid);
555 	sk_for_each(osk, node, head) {
556 		if (!net_eq(sock_net(osk), net))
557 			continue;
558 		if (nlk_sk(osk)->pid == pid) {
559 			/* Bind collision, search negative pid values. */
560 			pid = rover--;
561 			if (rover > -4097)
562 				rover = -4097;
563 			netlink_table_ungrab();
564 			goto retry;
565 		}
566 	}
567 	netlink_table_ungrab();
568 
569 	err = netlink_insert(sk, net, pid);
570 	if (err == -EADDRINUSE)
571 		goto retry;
572 
573 	/* If 2 threads race to autobind, that is fine.  */
574 	if (err == -EBUSY)
575 		err = 0;
576 
577 	return err;
578 }
579 
580 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
581 {
582 	return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
583 	       capable(CAP_NET_ADMIN);
584 }
585 
586 static void
587 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
588 {
589 	struct netlink_sock *nlk = nlk_sk(sk);
590 
591 	if (nlk->subscriptions && !subscriptions)
592 		__sk_del_bind_node(sk);
593 	else if (!nlk->subscriptions && subscriptions)
594 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
595 	nlk->subscriptions = subscriptions;
596 }
597 
598 static int netlink_realloc_groups(struct sock *sk)
599 {
600 	struct netlink_sock *nlk = nlk_sk(sk);
601 	unsigned int groups;
602 	unsigned long *new_groups;
603 	int err = 0;
604 
605 	netlink_table_grab();
606 
607 	groups = nl_table[sk->sk_protocol].groups;
608 	if (!nl_table[sk->sk_protocol].registered) {
609 		err = -ENOENT;
610 		goto out_unlock;
611 	}
612 
613 	if (nlk->ngroups >= groups)
614 		goto out_unlock;
615 
616 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
617 	if (new_groups == NULL) {
618 		err = -ENOMEM;
619 		goto out_unlock;
620 	}
621 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
622 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
623 
624 	nlk->groups = new_groups;
625 	nlk->ngroups = groups;
626  out_unlock:
627 	netlink_table_ungrab();
628 	return err;
629 }
630 
631 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
632 			int addr_len)
633 {
634 	struct sock *sk = sock->sk;
635 	struct net *net = sock_net(sk);
636 	struct netlink_sock *nlk = nlk_sk(sk);
637 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
638 	int err;
639 
640 	if (nladdr->nl_family != AF_NETLINK)
641 		return -EINVAL;
642 
643 	/* Only superuser is allowed to listen multicasts */
644 	if (nladdr->nl_groups) {
645 		if (!netlink_capable(sock, NL_NONROOT_RECV))
646 			return -EPERM;
647 		err = netlink_realloc_groups(sk);
648 		if (err)
649 			return err;
650 	}
651 
652 	if (nlk->pid) {
653 		if (nladdr->nl_pid != nlk->pid)
654 			return -EINVAL;
655 	} else {
656 		err = nladdr->nl_pid ?
657 			netlink_insert(sk, net, nladdr->nl_pid) :
658 			netlink_autobind(sock);
659 		if (err)
660 			return err;
661 	}
662 
663 	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
664 		return 0;
665 
666 	netlink_table_grab();
667 	netlink_update_subscriptions(sk, nlk->subscriptions +
668 					 hweight32(nladdr->nl_groups) -
669 					 hweight32(nlk->groups[0]));
670 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
671 	netlink_update_listeners(sk);
672 	netlink_table_ungrab();
673 
674 	return 0;
675 }
676 
677 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
678 			   int alen, int flags)
679 {
680 	int err = 0;
681 	struct sock *sk = sock->sk;
682 	struct netlink_sock *nlk = nlk_sk(sk);
683 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
684 
685 	if (alen < sizeof(addr->sa_family))
686 		return -EINVAL;
687 
688 	if (addr->sa_family == AF_UNSPEC) {
689 		sk->sk_state	= NETLINK_UNCONNECTED;
690 		nlk->dst_pid	= 0;
691 		nlk->dst_group  = 0;
692 		return 0;
693 	}
694 	if (addr->sa_family != AF_NETLINK)
695 		return -EINVAL;
696 
697 	/* Only superuser is allowed to send multicasts */
698 	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
699 		return -EPERM;
700 
701 	if (!nlk->pid)
702 		err = netlink_autobind(sock);
703 
704 	if (err == 0) {
705 		sk->sk_state	= NETLINK_CONNECTED;
706 		nlk->dst_pid 	= nladdr->nl_pid;
707 		nlk->dst_group  = ffs(nladdr->nl_groups);
708 	}
709 
710 	return err;
711 }
712 
713 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
714 			   int *addr_len, int peer)
715 {
716 	struct sock *sk = sock->sk;
717 	struct netlink_sock *nlk = nlk_sk(sk);
718 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
719 
720 	nladdr->nl_family = AF_NETLINK;
721 	nladdr->nl_pad = 0;
722 	*addr_len = sizeof(*nladdr);
723 
724 	if (peer) {
725 		nladdr->nl_pid = nlk->dst_pid;
726 		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
727 	} else {
728 		nladdr->nl_pid = nlk->pid;
729 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
730 	}
731 	return 0;
732 }
733 
734 static void netlink_overrun(struct sock *sk)
735 {
736 	struct netlink_sock *nlk = nlk_sk(sk);
737 
738 	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
739 		if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
740 			sk->sk_err = ENOBUFS;
741 			sk->sk_error_report(sk);
742 		}
743 	}
744 	atomic_inc(&sk->sk_drops);
745 }
746 
747 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
748 {
749 	struct sock *sock;
750 	struct netlink_sock *nlk;
751 
752 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
753 	if (!sock)
754 		return ERR_PTR(-ECONNREFUSED);
755 
756 	/* Don't bother queuing skb if kernel socket has no input function */
757 	nlk = nlk_sk(sock);
758 	if (sock->sk_state == NETLINK_CONNECTED &&
759 	    nlk->dst_pid != nlk_sk(ssk)->pid) {
760 		sock_put(sock);
761 		return ERR_PTR(-ECONNREFUSED);
762 	}
763 	return sock;
764 }
765 
766 struct sock *netlink_getsockbyfilp(struct file *filp)
767 {
768 	struct inode *inode = filp->f_path.dentry->d_inode;
769 	struct sock *sock;
770 
771 	if (!S_ISSOCK(inode->i_mode))
772 		return ERR_PTR(-ENOTSOCK);
773 
774 	sock = SOCKET_I(inode)->sk;
775 	if (sock->sk_family != AF_NETLINK)
776 		return ERR_PTR(-EINVAL);
777 
778 	sock_hold(sock);
779 	return sock;
780 }
781 
782 /*
783  * Attach a skb to a netlink socket.
784  * The caller must hold a reference to the destination socket. On error, the
785  * reference is dropped. The skb is not send to the destination, just all
786  * all error checks are performed and memory in the queue is reserved.
787  * Return values:
788  * < 0: error. skb freed, reference to sock dropped.
789  * 0: continue
790  * 1: repeat lookup - reference dropped while waiting for socket memory.
791  */
792 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
793 		      long *timeo, struct sock *ssk)
794 {
795 	struct netlink_sock *nlk;
796 
797 	nlk = nlk_sk(sk);
798 
799 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
800 	    test_bit(0, &nlk->state)) {
801 		DECLARE_WAITQUEUE(wait, current);
802 		if (!*timeo) {
803 			if (!ssk || netlink_is_kernel(ssk))
804 				netlink_overrun(sk);
805 			sock_put(sk);
806 			kfree_skb(skb);
807 			return -EAGAIN;
808 		}
809 
810 		__set_current_state(TASK_INTERRUPTIBLE);
811 		add_wait_queue(&nlk->wait, &wait);
812 
813 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
814 		     test_bit(0, &nlk->state)) &&
815 		    !sock_flag(sk, SOCK_DEAD))
816 			*timeo = schedule_timeout(*timeo);
817 
818 		__set_current_state(TASK_RUNNING);
819 		remove_wait_queue(&nlk->wait, &wait);
820 		sock_put(sk);
821 
822 		if (signal_pending(current)) {
823 			kfree_skb(skb);
824 			return sock_intr_errno(*timeo);
825 		}
826 		return 1;
827 	}
828 	skb_set_owner_r(skb, sk);
829 	return 0;
830 }
831 
832 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
833 {
834 	int len = skb->len;
835 
836 	skb_queue_tail(&sk->sk_receive_queue, skb);
837 	sk->sk_data_ready(sk, len);
838 	return len;
839 }
840 
841 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
842 {
843 	int len = __netlink_sendskb(sk, skb);
844 
845 	sock_put(sk);
846 	return len;
847 }
848 
849 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
850 {
851 	kfree_skb(skb);
852 	sock_put(sk);
853 }
854 
855 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
856 {
857 	int delta;
858 
859 	skb_orphan(skb);
860 
861 	delta = skb->end - skb->tail;
862 	if (delta * 2 < skb->truesize)
863 		return skb;
864 
865 	if (skb_shared(skb)) {
866 		struct sk_buff *nskb = skb_clone(skb, allocation);
867 		if (!nskb)
868 			return skb;
869 		kfree_skb(skb);
870 		skb = nskb;
871 	}
872 
873 	if (!pskb_expand_head(skb, 0, -delta, allocation))
874 		skb->truesize -= delta;
875 
876 	return skb;
877 }
878 
879 static void netlink_rcv_wake(struct sock *sk)
880 {
881 	struct netlink_sock *nlk = nlk_sk(sk);
882 
883 	if (skb_queue_empty(&sk->sk_receive_queue))
884 		clear_bit(0, &nlk->state);
885 	if (!test_bit(0, &nlk->state))
886 		wake_up_interruptible(&nlk->wait);
887 }
888 
889 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
890 {
891 	int ret;
892 	struct netlink_sock *nlk = nlk_sk(sk);
893 
894 	ret = -ECONNREFUSED;
895 	if (nlk->netlink_rcv != NULL) {
896 		ret = skb->len;
897 		skb_set_owner_r(skb, sk);
898 		nlk->netlink_rcv(skb);
899 	}
900 	kfree_skb(skb);
901 	sock_put(sk);
902 	return ret;
903 }
904 
905 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
906 		    u32 pid, int nonblock)
907 {
908 	struct sock *sk;
909 	int err;
910 	long timeo;
911 
912 	skb = netlink_trim(skb, gfp_any());
913 
914 	timeo = sock_sndtimeo(ssk, nonblock);
915 retry:
916 	sk = netlink_getsockbypid(ssk, pid);
917 	if (IS_ERR(sk)) {
918 		kfree_skb(skb);
919 		return PTR_ERR(sk);
920 	}
921 	if (netlink_is_kernel(sk))
922 		return netlink_unicast_kernel(sk, skb);
923 
924 	if (sk_filter(sk, skb)) {
925 		err = skb->len;
926 		kfree_skb(skb);
927 		sock_put(sk);
928 		return err;
929 	}
930 
931 	err = netlink_attachskb(sk, skb, &timeo, ssk);
932 	if (err == 1)
933 		goto retry;
934 	if (err)
935 		return err;
936 
937 	return netlink_sendskb(sk, skb);
938 }
939 EXPORT_SYMBOL(netlink_unicast);
940 
941 int netlink_has_listeners(struct sock *sk, unsigned int group)
942 {
943 	int res = 0;
944 	struct listeners *listeners;
945 
946 	BUG_ON(!netlink_is_kernel(sk));
947 
948 	rcu_read_lock();
949 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
950 
951 	if (group - 1 < nl_table[sk->sk_protocol].groups)
952 		res = test_bit(group - 1, listeners->masks);
953 
954 	rcu_read_unlock();
955 
956 	return res;
957 }
958 EXPORT_SYMBOL_GPL(netlink_has_listeners);
959 
960 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
961 {
962 	struct netlink_sock *nlk = nlk_sk(sk);
963 
964 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
965 	    !test_bit(0, &nlk->state)) {
966 		skb_set_owner_r(skb, sk);
967 		__netlink_sendskb(sk, skb);
968 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
969 	}
970 	return -1;
971 }
972 
973 struct netlink_broadcast_data {
974 	struct sock *exclude_sk;
975 	struct net *net;
976 	u32 pid;
977 	u32 group;
978 	int failure;
979 	int delivery_failure;
980 	int congested;
981 	int delivered;
982 	gfp_t allocation;
983 	struct sk_buff *skb, *skb2;
984 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
985 	void *tx_data;
986 };
987 
988 static int do_one_broadcast(struct sock *sk,
989 				   struct netlink_broadcast_data *p)
990 {
991 	struct netlink_sock *nlk = nlk_sk(sk);
992 	int val;
993 
994 	if (p->exclude_sk == sk)
995 		goto out;
996 
997 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
998 	    !test_bit(p->group - 1, nlk->groups))
999 		goto out;
1000 
1001 	if (!net_eq(sock_net(sk), p->net))
1002 		goto out;
1003 
1004 	if (p->failure) {
1005 		netlink_overrun(sk);
1006 		goto out;
1007 	}
1008 
1009 	sock_hold(sk);
1010 	if (p->skb2 == NULL) {
1011 		if (skb_shared(p->skb)) {
1012 			p->skb2 = skb_clone(p->skb, p->allocation);
1013 		} else {
1014 			p->skb2 = skb_get(p->skb);
1015 			/*
1016 			 * skb ownership may have been set when
1017 			 * delivered to a previous socket.
1018 			 */
1019 			skb_orphan(p->skb2);
1020 		}
1021 	}
1022 	if (p->skb2 == NULL) {
1023 		netlink_overrun(sk);
1024 		/* Clone failed. Notify ALL listeners. */
1025 		p->failure = 1;
1026 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1027 			p->delivery_failure = 1;
1028 	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1029 		kfree_skb(p->skb2);
1030 		p->skb2 = NULL;
1031 	} else if (sk_filter(sk, p->skb2)) {
1032 		kfree_skb(p->skb2);
1033 		p->skb2 = NULL;
1034 	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1035 		netlink_overrun(sk);
1036 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1037 			p->delivery_failure = 1;
1038 	} else {
1039 		p->congested |= val;
1040 		p->delivered = 1;
1041 		p->skb2 = NULL;
1042 	}
1043 	sock_put(sk);
1044 
1045 out:
1046 	return 0;
1047 }
1048 
1049 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1050 	u32 group, gfp_t allocation,
1051 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1052 	void *filter_data)
1053 {
1054 	struct net *net = sock_net(ssk);
1055 	struct netlink_broadcast_data info;
1056 	struct hlist_node *node;
1057 	struct sock *sk;
1058 
1059 	skb = netlink_trim(skb, allocation);
1060 
1061 	info.exclude_sk = ssk;
1062 	info.net = net;
1063 	info.pid = pid;
1064 	info.group = group;
1065 	info.failure = 0;
1066 	info.delivery_failure = 0;
1067 	info.congested = 0;
1068 	info.delivered = 0;
1069 	info.allocation = allocation;
1070 	info.skb = skb;
1071 	info.skb2 = NULL;
1072 	info.tx_filter = filter;
1073 	info.tx_data = filter_data;
1074 
1075 	/* While we sleep in clone, do not allow to change socket list */
1076 
1077 	netlink_lock_table();
1078 
1079 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1080 		do_one_broadcast(sk, &info);
1081 
1082 	consume_skb(skb);
1083 
1084 	netlink_unlock_table();
1085 
1086 	if (info.delivery_failure) {
1087 		kfree_skb(info.skb2);
1088 		return -ENOBUFS;
1089 	} else
1090 		consume_skb(info.skb2);
1091 
1092 	if (info.delivered) {
1093 		if (info.congested && (allocation & __GFP_WAIT))
1094 			yield();
1095 		return 0;
1096 	}
1097 	return -ESRCH;
1098 }
1099 EXPORT_SYMBOL(netlink_broadcast_filtered);
1100 
1101 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1102 		      u32 group, gfp_t allocation)
1103 {
1104 	return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
1105 		NULL, NULL);
1106 }
1107 EXPORT_SYMBOL(netlink_broadcast);
1108 
1109 struct netlink_set_err_data {
1110 	struct sock *exclude_sk;
1111 	u32 pid;
1112 	u32 group;
1113 	int code;
1114 };
1115 
1116 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1117 {
1118 	struct netlink_sock *nlk = nlk_sk(sk);
1119 	int ret = 0;
1120 
1121 	if (sk == p->exclude_sk)
1122 		goto out;
1123 
1124 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1125 		goto out;
1126 
1127 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1128 	    !test_bit(p->group - 1, nlk->groups))
1129 		goto out;
1130 
1131 	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1132 		ret = 1;
1133 		goto out;
1134 	}
1135 
1136 	sk->sk_err = p->code;
1137 	sk->sk_error_report(sk);
1138 out:
1139 	return ret;
1140 }
1141 
1142 /**
1143  * netlink_set_err - report error to broadcast listeners
1144  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1145  * @pid: the PID of a process that we want to skip (if any)
1146  * @groups: the broadcast group that will notice the error
1147  * @code: error code, must be negative (as usual in kernelspace)
1148  *
1149  * This function returns the number of broadcast listeners that have set the
1150  * NETLINK_RECV_NO_ENOBUFS socket option.
1151  */
1152 int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1153 {
1154 	struct netlink_set_err_data info;
1155 	struct hlist_node *node;
1156 	struct sock *sk;
1157 	int ret = 0;
1158 
1159 	info.exclude_sk = ssk;
1160 	info.pid = pid;
1161 	info.group = group;
1162 	/* sk->sk_err wants a positive error value */
1163 	info.code = -code;
1164 
1165 	read_lock(&nl_table_lock);
1166 
1167 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1168 		ret += do_one_set_err(sk, &info);
1169 
1170 	read_unlock(&nl_table_lock);
1171 	return ret;
1172 }
1173 EXPORT_SYMBOL(netlink_set_err);
1174 
1175 /* must be called with netlink table grabbed */
1176 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1177 				     unsigned int group,
1178 				     int is_new)
1179 {
1180 	int old, new = !!is_new, subscriptions;
1181 
1182 	old = test_bit(group - 1, nlk->groups);
1183 	subscriptions = nlk->subscriptions - old + new;
1184 	if (new)
1185 		__set_bit(group - 1, nlk->groups);
1186 	else
1187 		__clear_bit(group - 1, nlk->groups);
1188 	netlink_update_subscriptions(&nlk->sk, subscriptions);
1189 	netlink_update_listeners(&nlk->sk);
1190 }
1191 
1192 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1193 			      char __user *optval, unsigned int optlen)
1194 {
1195 	struct sock *sk = sock->sk;
1196 	struct netlink_sock *nlk = nlk_sk(sk);
1197 	unsigned int val = 0;
1198 	int err;
1199 
1200 	if (level != SOL_NETLINK)
1201 		return -ENOPROTOOPT;
1202 
1203 	if (optlen >= sizeof(int) &&
1204 	    get_user(val, (unsigned int __user *)optval))
1205 		return -EFAULT;
1206 
1207 	switch (optname) {
1208 	case NETLINK_PKTINFO:
1209 		if (val)
1210 			nlk->flags |= NETLINK_RECV_PKTINFO;
1211 		else
1212 			nlk->flags &= ~NETLINK_RECV_PKTINFO;
1213 		err = 0;
1214 		break;
1215 	case NETLINK_ADD_MEMBERSHIP:
1216 	case NETLINK_DROP_MEMBERSHIP: {
1217 		if (!netlink_capable(sock, NL_NONROOT_RECV))
1218 			return -EPERM;
1219 		err = netlink_realloc_groups(sk);
1220 		if (err)
1221 			return err;
1222 		if (!val || val - 1 >= nlk->ngroups)
1223 			return -EINVAL;
1224 		netlink_table_grab();
1225 		netlink_update_socket_mc(nlk, val,
1226 					 optname == NETLINK_ADD_MEMBERSHIP);
1227 		netlink_table_ungrab();
1228 		err = 0;
1229 		break;
1230 	}
1231 	case NETLINK_BROADCAST_ERROR:
1232 		if (val)
1233 			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1234 		else
1235 			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1236 		err = 0;
1237 		break;
1238 	case NETLINK_NO_ENOBUFS:
1239 		if (val) {
1240 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1241 			clear_bit(0, &nlk->state);
1242 			wake_up_interruptible(&nlk->wait);
1243 		} else
1244 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1245 		err = 0;
1246 		break;
1247 	default:
1248 		err = -ENOPROTOOPT;
1249 	}
1250 	return err;
1251 }
1252 
1253 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1254 			      char __user *optval, int __user *optlen)
1255 {
1256 	struct sock *sk = sock->sk;
1257 	struct netlink_sock *nlk = nlk_sk(sk);
1258 	int len, val, err;
1259 
1260 	if (level != SOL_NETLINK)
1261 		return -ENOPROTOOPT;
1262 
1263 	if (get_user(len, optlen))
1264 		return -EFAULT;
1265 	if (len < 0)
1266 		return -EINVAL;
1267 
1268 	switch (optname) {
1269 	case NETLINK_PKTINFO:
1270 		if (len < sizeof(int))
1271 			return -EINVAL;
1272 		len = sizeof(int);
1273 		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1274 		if (put_user(len, optlen) ||
1275 		    put_user(val, optval))
1276 			return -EFAULT;
1277 		err = 0;
1278 		break;
1279 	case NETLINK_BROADCAST_ERROR:
1280 		if (len < sizeof(int))
1281 			return -EINVAL;
1282 		len = sizeof(int);
1283 		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1284 		if (put_user(len, optlen) ||
1285 		    put_user(val, optval))
1286 			return -EFAULT;
1287 		err = 0;
1288 		break;
1289 	case NETLINK_NO_ENOBUFS:
1290 		if (len < sizeof(int))
1291 			return -EINVAL;
1292 		len = sizeof(int);
1293 		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1294 		if (put_user(len, optlen) ||
1295 		    put_user(val, optval))
1296 			return -EFAULT;
1297 		err = 0;
1298 		break;
1299 	default:
1300 		err = -ENOPROTOOPT;
1301 	}
1302 	return err;
1303 }
1304 
1305 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1306 {
1307 	struct nl_pktinfo info;
1308 
1309 	info.group = NETLINK_CB(skb).dst_group;
1310 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1311 }
1312 
1313 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1314 			   struct msghdr *msg, size_t len)
1315 {
1316 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1317 	struct sock *sk = sock->sk;
1318 	struct netlink_sock *nlk = nlk_sk(sk);
1319 	struct sockaddr_nl *addr = msg->msg_name;
1320 	u32 dst_pid;
1321 	u32 dst_group;
1322 	struct sk_buff *skb;
1323 	int err;
1324 	struct scm_cookie scm;
1325 
1326 	if (msg->msg_flags&MSG_OOB)
1327 		return -EOPNOTSUPP;
1328 
1329 	if (NULL == siocb->scm)
1330 		siocb->scm = &scm;
1331 
1332 	err = scm_send(sock, msg, siocb->scm);
1333 	if (err < 0)
1334 		return err;
1335 
1336 	if (msg->msg_namelen) {
1337 		err = -EINVAL;
1338 		if (addr->nl_family != AF_NETLINK)
1339 			goto out;
1340 		dst_pid = addr->nl_pid;
1341 		dst_group = ffs(addr->nl_groups);
1342 		err =  -EPERM;
1343 		if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1344 			goto out;
1345 	} else {
1346 		dst_pid = nlk->dst_pid;
1347 		dst_group = nlk->dst_group;
1348 	}
1349 
1350 	if (!nlk->pid) {
1351 		err = netlink_autobind(sock);
1352 		if (err)
1353 			goto out;
1354 	}
1355 
1356 	err = -EMSGSIZE;
1357 	if (len > sk->sk_sndbuf - 32)
1358 		goto out;
1359 	err = -ENOBUFS;
1360 	skb = alloc_skb(len, GFP_KERNEL);
1361 	if (skb == NULL)
1362 		goto out;
1363 
1364 	NETLINK_CB(skb).pid	= nlk->pid;
1365 	NETLINK_CB(skb).dst_group = dst_group;
1366 	memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1367 
1368 	err = -EFAULT;
1369 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1370 		kfree_skb(skb);
1371 		goto out;
1372 	}
1373 
1374 	err = security_netlink_send(sk, skb);
1375 	if (err) {
1376 		kfree_skb(skb);
1377 		goto out;
1378 	}
1379 
1380 	if (dst_group) {
1381 		atomic_inc(&skb->users);
1382 		netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1383 	}
1384 	err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1385 
1386 out:
1387 	scm_destroy(siocb->scm);
1388 	return err;
1389 }
1390 
1391 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1392 			   struct msghdr *msg, size_t len,
1393 			   int flags)
1394 {
1395 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1396 	struct scm_cookie scm;
1397 	struct sock *sk = sock->sk;
1398 	struct netlink_sock *nlk = nlk_sk(sk);
1399 	int noblock = flags&MSG_DONTWAIT;
1400 	size_t copied;
1401 	struct sk_buff *skb, *data_skb;
1402 	int err, ret;
1403 
1404 	if (flags&MSG_OOB)
1405 		return -EOPNOTSUPP;
1406 
1407 	copied = 0;
1408 
1409 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1410 	if (skb == NULL)
1411 		goto out;
1412 
1413 	data_skb = skb;
1414 
1415 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1416 	if (unlikely(skb_shinfo(skb)->frag_list)) {
1417 		/*
1418 		 * If this skb has a frag_list, then here that means that we
1419 		 * will have to use the frag_list skb's data for compat tasks
1420 		 * and the regular skb's data for normal (non-compat) tasks.
1421 		 *
1422 		 * If we need to send the compat skb, assign it to the
1423 		 * 'data_skb' variable so that it will be used below for data
1424 		 * copying. We keep 'skb' for everything else, including
1425 		 * freeing both later.
1426 		 */
1427 		if (flags & MSG_CMSG_COMPAT)
1428 			data_skb = skb_shinfo(skb)->frag_list;
1429 	}
1430 #endif
1431 
1432 	msg->msg_namelen = 0;
1433 
1434 	copied = data_skb->len;
1435 	if (len < copied) {
1436 		msg->msg_flags |= MSG_TRUNC;
1437 		copied = len;
1438 	}
1439 
1440 	skb_reset_transport_header(data_skb);
1441 	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1442 
1443 	if (msg->msg_name) {
1444 		struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1445 		addr->nl_family = AF_NETLINK;
1446 		addr->nl_pad    = 0;
1447 		addr->nl_pid	= NETLINK_CB(skb).pid;
1448 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1449 		msg->msg_namelen = sizeof(*addr);
1450 	}
1451 
1452 	if (nlk->flags & NETLINK_RECV_PKTINFO)
1453 		netlink_cmsg_recv_pktinfo(msg, skb);
1454 
1455 	if (NULL == siocb->scm) {
1456 		memset(&scm, 0, sizeof(scm));
1457 		siocb->scm = &scm;
1458 	}
1459 	siocb->scm->creds = *NETLINK_CREDS(skb);
1460 	if (flags & MSG_TRUNC)
1461 		copied = data_skb->len;
1462 
1463 	skb_free_datagram(sk, skb);
1464 
1465 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1466 		ret = netlink_dump(sk);
1467 		if (ret) {
1468 			sk->sk_err = ret;
1469 			sk->sk_error_report(sk);
1470 		}
1471 	}
1472 
1473 	scm_recv(sock, msg, siocb->scm, flags);
1474 out:
1475 	netlink_rcv_wake(sk);
1476 	return err ? : copied;
1477 }
1478 
1479 static void netlink_data_ready(struct sock *sk, int len)
1480 {
1481 	BUG();
1482 }
1483 
1484 /*
1485  *	We export these functions to other modules. They provide a
1486  *	complete set of kernel non-blocking support for message
1487  *	queueing.
1488  */
1489 
1490 struct sock *
1491 netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1492 		      void (*input)(struct sk_buff *skb),
1493 		      struct mutex *cb_mutex, struct module *module)
1494 {
1495 	struct socket *sock;
1496 	struct sock *sk;
1497 	struct netlink_sock *nlk;
1498 	struct listeners *listeners = NULL;
1499 
1500 	BUG_ON(!nl_table);
1501 
1502 	if (unit < 0 || unit >= MAX_LINKS)
1503 		return NULL;
1504 
1505 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1506 		return NULL;
1507 
1508 	/*
1509 	 * We have to just have a reference on the net from sk, but don't
1510 	 * get_net it. Besides, we cannot get and then put the net here.
1511 	 * So we create one inside init_net and the move it to net.
1512 	 */
1513 
1514 	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1515 		goto out_sock_release_nosk;
1516 
1517 	sk = sock->sk;
1518 	sk_change_net(sk, net);
1519 
1520 	if (groups < 32)
1521 		groups = 32;
1522 
1523 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1524 	if (!listeners)
1525 		goto out_sock_release;
1526 
1527 	sk->sk_data_ready = netlink_data_ready;
1528 	if (input)
1529 		nlk_sk(sk)->netlink_rcv = input;
1530 
1531 	if (netlink_insert(sk, net, 0))
1532 		goto out_sock_release;
1533 
1534 	nlk = nlk_sk(sk);
1535 	nlk->flags |= NETLINK_KERNEL_SOCKET;
1536 
1537 	netlink_table_grab();
1538 	if (!nl_table[unit].registered) {
1539 		nl_table[unit].groups = groups;
1540 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1541 		nl_table[unit].cb_mutex = cb_mutex;
1542 		nl_table[unit].module = module;
1543 		nl_table[unit].registered = 1;
1544 	} else {
1545 		kfree(listeners);
1546 		nl_table[unit].registered++;
1547 	}
1548 	netlink_table_ungrab();
1549 	return sk;
1550 
1551 out_sock_release:
1552 	kfree(listeners);
1553 	netlink_kernel_release(sk);
1554 	return NULL;
1555 
1556 out_sock_release_nosk:
1557 	sock_release(sock);
1558 	return NULL;
1559 }
1560 EXPORT_SYMBOL(netlink_kernel_create);
1561 
1562 
1563 void
1564 netlink_kernel_release(struct sock *sk)
1565 {
1566 	sk_release_kernel(sk);
1567 }
1568 EXPORT_SYMBOL(netlink_kernel_release);
1569 
1570 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1571 {
1572 	struct listeners *new, *old;
1573 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1574 
1575 	if (groups < 32)
1576 		groups = 32;
1577 
1578 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1579 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1580 		if (!new)
1581 			return -ENOMEM;
1582 		old = rcu_dereference_protected(tbl->listeners, 1);
1583 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1584 		rcu_assign_pointer(tbl->listeners, new);
1585 
1586 		kfree_rcu(old, rcu);
1587 	}
1588 	tbl->groups = groups;
1589 
1590 	return 0;
1591 }
1592 
1593 /**
1594  * netlink_change_ngroups - change number of multicast groups
1595  *
1596  * This changes the number of multicast groups that are available
1597  * on a certain netlink family. Note that it is not possible to
1598  * change the number of groups to below 32. Also note that it does
1599  * not implicitly call netlink_clear_multicast_users() when the
1600  * number of groups is reduced.
1601  *
1602  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1603  * @groups: The new number of groups.
1604  */
1605 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1606 {
1607 	int err;
1608 
1609 	netlink_table_grab();
1610 	err = __netlink_change_ngroups(sk, groups);
1611 	netlink_table_ungrab();
1612 
1613 	return err;
1614 }
1615 
1616 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1617 {
1618 	struct sock *sk;
1619 	struct hlist_node *node;
1620 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1621 
1622 	sk_for_each_bound(sk, node, &tbl->mc_list)
1623 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
1624 }
1625 
1626 /**
1627  * netlink_clear_multicast_users - kick off multicast listeners
1628  *
1629  * This function removes all listeners from the given group.
1630  * @ksk: The kernel netlink socket, as returned by
1631  *	netlink_kernel_create().
1632  * @group: The multicast group to clear.
1633  */
1634 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1635 {
1636 	netlink_table_grab();
1637 	__netlink_clear_multicast_users(ksk, group);
1638 	netlink_table_ungrab();
1639 }
1640 
1641 void netlink_set_nonroot(int protocol, unsigned int flags)
1642 {
1643 	if ((unsigned int)protocol < MAX_LINKS)
1644 		nl_table[protocol].nl_nonroot = flags;
1645 }
1646 EXPORT_SYMBOL(netlink_set_nonroot);
1647 
1648 static void netlink_destroy_callback(struct netlink_callback *cb)
1649 {
1650 	kfree_skb(cb->skb);
1651 	kfree(cb);
1652 }
1653 
1654 struct nlmsghdr *
1655 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1656 {
1657 	struct nlmsghdr *nlh;
1658 	int size = NLMSG_LENGTH(len);
1659 
1660 	nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1661 	nlh->nlmsg_type = type;
1662 	nlh->nlmsg_len = size;
1663 	nlh->nlmsg_flags = flags;
1664 	nlh->nlmsg_pid = pid;
1665 	nlh->nlmsg_seq = seq;
1666 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1667 		memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1668 	return nlh;
1669 }
1670 EXPORT_SYMBOL(__nlmsg_put);
1671 
1672 /*
1673  * It looks a bit ugly.
1674  * It would be better to create kernel thread.
1675  */
1676 
1677 static int netlink_dump(struct sock *sk)
1678 {
1679 	struct netlink_sock *nlk = nlk_sk(sk);
1680 	struct netlink_callback *cb;
1681 	struct sk_buff *skb = NULL;
1682 	struct nlmsghdr *nlh;
1683 	int len, err = -ENOBUFS;
1684 	int alloc_size;
1685 
1686 	mutex_lock(nlk->cb_mutex);
1687 
1688 	cb = nlk->cb;
1689 	if (cb == NULL) {
1690 		err = -EINVAL;
1691 		goto errout_skb;
1692 	}
1693 
1694 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1695 
1696 	skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1697 	if (!skb)
1698 		goto errout_skb;
1699 
1700 	len = cb->dump(skb, cb);
1701 
1702 	if (len > 0) {
1703 		mutex_unlock(nlk->cb_mutex);
1704 
1705 		if (sk_filter(sk, skb))
1706 			kfree_skb(skb);
1707 		else
1708 			__netlink_sendskb(sk, skb);
1709 		return 0;
1710 	}
1711 
1712 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1713 	if (!nlh)
1714 		goto errout_skb;
1715 
1716 	nl_dump_check_consistent(cb, nlh);
1717 
1718 	memcpy(nlmsg_data(nlh), &len, sizeof(len));
1719 
1720 	if (sk_filter(sk, skb))
1721 		kfree_skb(skb);
1722 	else
1723 		__netlink_sendskb(sk, skb);
1724 
1725 	if (cb->done)
1726 		cb->done(cb);
1727 	nlk->cb = NULL;
1728 	mutex_unlock(nlk->cb_mutex);
1729 
1730 	netlink_destroy_callback(cb);
1731 	return 0;
1732 
1733 errout_skb:
1734 	mutex_unlock(nlk->cb_mutex);
1735 	kfree_skb(skb);
1736 	return err;
1737 }
1738 
1739 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1740 		       const struct nlmsghdr *nlh,
1741 		       struct netlink_dump_control *control)
1742 {
1743 	struct netlink_callback *cb;
1744 	struct sock *sk;
1745 	struct netlink_sock *nlk;
1746 	int ret;
1747 
1748 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1749 	if (cb == NULL)
1750 		return -ENOBUFS;
1751 
1752 	cb->dump = control->dump;
1753 	cb->done = control->done;
1754 	cb->nlh = nlh;
1755 	cb->data = control->data;
1756 	cb->min_dump_alloc = control->min_dump_alloc;
1757 	atomic_inc(&skb->users);
1758 	cb->skb = skb;
1759 
1760 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1761 	if (sk == NULL) {
1762 		netlink_destroy_callback(cb);
1763 		return -ECONNREFUSED;
1764 	}
1765 	nlk = nlk_sk(sk);
1766 	/* A dump is in progress... */
1767 	mutex_lock(nlk->cb_mutex);
1768 	if (nlk->cb) {
1769 		mutex_unlock(nlk->cb_mutex);
1770 		netlink_destroy_callback(cb);
1771 		sock_put(sk);
1772 		return -EBUSY;
1773 	}
1774 	nlk->cb = cb;
1775 	mutex_unlock(nlk->cb_mutex);
1776 
1777 	ret = netlink_dump(sk);
1778 
1779 	sock_put(sk);
1780 
1781 	if (ret)
1782 		return ret;
1783 
1784 	/* We successfully started a dump, by returning -EINTR we
1785 	 * signal not to send ACK even if it was requested.
1786 	 */
1787 	return -EINTR;
1788 }
1789 EXPORT_SYMBOL(netlink_dump_start);
1790 
1791 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1792 {
1793 	struct sk_buff *skb;
1794 	struct nlmsghdr *rep;
1795 	struct nlmsgerr *errmsg;
1796 	size_t payload = sizeof(*errmsg);
1797 
1798 	/* error messages get the original request appened */
1799 	if (err)
1800 		payload += nlmsg_len(nlh);
1801 
1802 	skb = nlmsg_new(payload, GFP_KERNEL);
1803 	if (!skb) {
1804 		struct sock *sk;
1805 
1806 		sk = netlink_lookup(sock_net(in_skb->sk),
1807 				    in_skb->sk->sk_protocol,
1808 				    NETLINK_CB(in_skb).pid);
1809 		if (sk) {
1810 			sk->sk_err = ENOBUFS;
1811 			sk->sk_error_report(sk);
1812 			sock_put(sk);
1813 		}
1814 		return;
1815 	}
1816 
1817 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1818 			  NLMSG_ERROR, payload, 0);
1819 	errmsg = nlmsg_data(rep);
1820 	errmsg->error = err;
1821 	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1822 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1823 }
1824 EXPORT_SYMBOL(netlink_ack);
1825 
1826 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1827 						     struct nlmsghdr *))
1828 {
1829 	struct nlmsghdr *nlh;
1830 	int err;
1831 
1832 	while (skb->len >= nlmsg_total_size(0)) {
1833 		int msglen;
1834 
1835 		nlh = nlmsg_hdr(skb);
1836 		err = 0;
1837 
1838 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1839 			return 0;
1840 
1841 		/* Only requests are handled by the kernel */
1842 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1843 			goto ack;
1844 
1845 		/* Skip control messages */
1846 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1847 			goto ack;
1848 
1849 		err = cb(skb, nlh);
1850 		if (err == -EINTR)
1851 			goto skip;
1852 
1853 ack:
1854 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
1855 			netlink_ack(skb, nlh, err);
1856 
1857 skip:
1858 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1859 		if (msglen > skb->len)
1860 			msglen = skb->len;
1861 		skb_pull(skb, msglen);
1862 	}
1863 
1864 	return 0;
1865 }
1866 EXPORT_SYMBOL(netlink_rcv_skb);
1867 
1868 /**
1869  * nlmsg_notify - send a notification netlink message
1870  * @sk: netlink socket to use
1871  * @skb: notification message
1872  * @pid: destination netlink pid for reports or 0
1873  * @group: destination multicast group or 0
1874  * @report: 1 to report back, 0 to disable
1875  * @flags: allocation flags
1876  */
1877 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1878 		 unsigned int group, int report, gfp_t flags)
1879 {
1880 	int err = 0;
1881 
1882 	if (group) {
1883 		int exclude_pid = 0;
1884 
1885 		if (report) {
1886 			atomic_inc(&skb->users);
1887 			exclude_pid = pid;
1888 		}
1889 
1890 		/* errors reported via destination sk->sk_err, but propagate
1891 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1892 		err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1893 	}
1894 
1895 	if (report) {
1896 		int err2;
1897 
1898 		err2 = nlmsg_unicast(sk, skb, pid);
1899 		if (!err || err == -ESRCH)
1900 			err = err2;
1901 	}
1902 
1903 	return err;
1904 }
1905 EXPORT_SYMBOL(nlmsg_notify);
1906 
1907 #ifdef CONFIG_PROC_FS
1908 struct nl_seq_iter {
1909 	struct seq_net_private p;
1910 	int link;
1911 	int hash_idx;
1912 };
1913 
1914 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1915 {
1916 	struct nl_seq_iter *iter = seq->private;
1917 	int i, j;
1918 	struct sock *s;
1919 	struct hlist_node *node;
1920 	loff_t off = 0;
1921 
1922 	for (i = 0; i < MAX_LINKS; i++) {
1923 		struct nl_pid_hash *hash = &nl_table[i].hash;
1924 
1925 		for (j = 0; j <= hash->mask; j++) {
1926 			sk_for_each(s, node, &hash->table[j]) {
1927 				if (sock_net(s) != seq_file_net(seq))
1928 					continue;
1929 				if (off == pos) {
1930 					iter->link = i;
1931 					iter->hash_idx = j;
1932 					return s;
1933 				}
1934 				++off;
1935 			}
1936 		}
1937 	}
1938 	return NULL;
1939 }
1940 
1941 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1942 	__acquires(nl_table_lock)
1943 {
1944 	read_lock(&nl_table_lock);
1945 	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1946 }
1947 
1948 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1949 {
1950 	struct sock *s;
1951 	struct nl_seq_iter *iter;
1952 	int i, j;
1953 
1954 	++*pos;
1955 
1956 	if (v == SEQ_START_TOKEN)
1957 		return netlink_seq_socket_idx(seq, 0);
1958 
1959 	iter = seq->private;
1960 	s = v;
1961 	do {
1962 		s = sk_next(s);
1963 	} while (s && sock_net(s) != seq_file_net(seq));
1964 	if (s)
1965 		return s;
1966 
1967 	i = iter->link;
1968 	j = iter->hash_idx + 1;
1969 
1970 	do {
1971 		struct nl_pid_hash *hash = &nl_table[i].hash;
1972 
1973 		for (; j <= hash->mask; j++) {
1974 			s = sk_head(&hash->table[j]);
1975 			while (s && sock_net(s) != seq_file_net(seq))
1976 				s = sk_next(s);
1977 			if (s) {
1978 				iter->link = i;
1979 				iter->hash_idx = j;
1980 				return s;
1981 			}
1982 		}
1983 
1984 		j = 0;
1985 	} while (++i < MAX_LINKS);
1986 
1987 	return NULL;
1988 }
1989 
1990 static void netlink_seq_stop(struct seq_file *seq, void *v)
1991 	__releases(nl_table_lock)
1992 {
1993 	read_unlock(&nl_table_lock);
1994 }
1995 
1996 
1997 static int netlink_seq_show(struct seq_file *seq, void *v)
1998 {
1999 	if (v == SEQ_START_TOKEN)
2000 		seq_puts(seq,
2001 			 "sk       Eth Pid    Groups   "
2002 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2003 	else {
2004 		struct sock *s = v;
2005 		struct netlink_sock *nlk = nlk_sk(s);
2006 
2007 		seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2008 			   s,
2009 			   s->sk_protocol,
2010 			   nlk->pid,
2011 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2012 			   sk_rmem_alloc_get(s),
2013 			   sk_wmem_alloc_get(s),
2014 			   nlk->cb,
2015 			   atomic_read(&s->sk_refcnt),
2016 			   atomic_read(&s->sk_drops),
2017 			   sock_i_ino(s)
2018 			);
2019 
2020 	}
2021 	return 0;
2022 }
2023 
2024 static const struct seq_operations netlink_seq_ops = {
2025 	.start  = netlink_seq_start,
2026 	.next   = netlink_seq_next,
2027 	.stop   = netlink_seq_stop,
2028 	.show   = netlink_seq_show,
2029 };
2030 
2031 
2032 static int netlink_seq_open(struct inode *inode, struct file *file)
2033 {
2034 	return seq_open_net(inode, file, &netlink_seq_ops,
2035 				sizeof(struct nl_seq_iter));
2036 }
2037 
2038 static const struct file_operations netlink_seq_fops = {
2039 	.owner		= THIS_MODULE,
2040 	.open		= netlink_seq_open,
2041 	.read		= seq_read,
2042 	.llseek		= seq_lseek,
2043 	.release	= seq_release_net,
2044 };
2045 
2046 #endif
2047 
2048 int netlink_register_notifier(struct notifier_block *nb)
2049 {
2050 	return atomic_notifier_chain_register(&netlink_chain, nb);
2051 }
2052 EXPORT_SYMBOL(netlink_register_notifier);
2053 
2054 int netlink_unregister_notifier(struct notifier_block *nb)
2055 {
2056 	return atomic_notifier_chain_unregister(&netlink_chain, nb);
2057 }
2058 EXPORT_SYMBOL(netlink_unregister_notifier);
2059 
2060 static const struct proto_ops netlink_ops = {
2061 	.family =	PF_NETLINK,
2062 	.owner =	THIS_MODULE,
2063 	.release =	netlink_release,
2064 	.bind =		netlink_bind,
2065 	.connect =	netlink_connect,
2066 	.socketpair =	sock_no_socketpair,
2067 	.accept =	sock_no_accept,
2068 	.getname =	netlink_getname,
2069 	.poll =		datagram_poll,
2070 	.ioctl =	sock_no_ioctl,
2071 	.listen =	sock_no_listen,
2072 	.shutdown =	sock_no_shutdown,
2073 	.setsockopt =	netlink_setsockopt,
2074 	.getsockopt =	netlink_getsockopt,
2075 	.sendmsg =	netlink_sendmsg,
2076 	.recvmsg =	netlink_recvmsg,
2077 	.mmap =		sock_no_mmap,
2078 	.sendpage =	sock_no_sendpage,
2079 };
2080 
2081 static const struct net_proto_family netlink_family_ops = {
2082 	.family = PF_NETLINK,
2083 	.create = netlink_create,
2084 	.owner	= THIS_MODULE,	/* for consistency 8) */
2085 };
2086 
2087 static int __net_init netlink_net_init(struct net *net)
2088 {
2089 #ifdef CONFIG_PROC_FS
2090 	if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
2091 		return -ENOMEM;
2092 #endif
2093 	return 0;
2094 }
2095 
2096 static void __net_exit netlink_net_exit(struct net *net)
2097 {
2098 #ifdef CONFIG_PROC_FS
2099 	proc_net_remove(net, "netlink");
2100 #endif
2101 }
2102 
2103 static void __init netlink_add_usersock_entry(void)
2104 {
2105 	struct listeners *listeners;
2106 	int groups = 32;
2107 
2108 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2109 	if (!listeners)
2110 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2111 
2112 	netlink_table_grab();
2113 
2114 	nl_table[NETLINK_USERSOCK].groups = groups;
2115 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2116 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2117 	nl_table[NETLINK_USERSOCK].registered = 1;
2118 
2119 	netlink_table_ungrab();
2120 }
2121 
2122 static struct pernet_operations __net_initdata netlink_net_ops = {
2123 	.init = netlink_net_init,
2124 	.exit = netlink_net_exit,
2125 };
2126 
2127 static int __init netlink_proto_init(void)
2128 {
2129 	struct sk_buff *dummy_skb;
2130 	int i;
2131 	unsigned long limit;
2132 	unsigned int order;
2133 	int err = proto_register(&netlink_proto, 0);
2134 
2135 	if (err != 0)
2136 		goto out;
2137 
2138 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
2139 
2140 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2141 	if (!nl_table)
2142 		goto panic;
2143 
2144 	if (totalram_pages >= (128 * 1024))
2145 		limit = totalram_pages >> (21 - PAGE_SHIFT);
2146 	else
2147 		limit = totalram_pages >> (23 - PAGE_SHIFT);
2148 
2149 	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2150 	limit = (1UL << order) / sizeof(struct hlist_head);
2151 	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2152 
2153 	for (i = 0; i < MAX_LINKS; i++) {
2154 		struct nl_pid_hash *hash = &nl_table[i].hash;
2155 
2156 		hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2157 		if (!hash->table) {
2158 			while (i-- > 0)
2159 				nl_pid_hash_free(nl_table[i].hash.table,
2160 						 1 * sizeof(*hash->table));
2161 			kfree(nl_table);
2162 			goto panic;
2163 		}
2164 		hash->max_shift = order;
2165 		hash->shift = 0;
2166 		hash->mask = 0;
2167 		hash->rehash_time = jiffies;
2168 	}
2169 
2170 	netlink_add_usersock_entry();
2171 
2172 	sock_register(&netlink_family_ops);
2173 	register_pernet_subsys(&netlink_net_ops);
2174 	/* The netlink device handler may be needed early. */
2175 	rtnetlink_init();
2176 out:
2177 	return err;
2178 panic:
2179 	panic("netlink_init: Cannot allocate nl_table\n");
2180 }
2181 
2182 core_initcall(netlink_proto_init);
2183