xref: /openbmc/linux/net/netlink/af_netlink.c (revision 8a10bc9d)
1 /*
2  * NETLINK      Kernel-user communication protocol.
3  *
4  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6  * 				Patrick McHardy <kaber@trash.net>
7  *
8  *		This program is free software; you can redistribute it and/or
9  *		modify it under the terms of the GNU General Public License
10  *		as published by the Free Software Foundation; either version
11  *		2 of the License, or (at your option) any later version.
12  *
13  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14  *                               added netlink_proto_exit
15  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
17  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18  * 				 - inc module use count of module that owns
19  * 				   the kernel socket in case userspace opens
20  * 				   socket of same protocol
21  * 				 - remove all module support, since netlink is
22  * 				   mandatory if CONFIG_NET=y these days
23  */
24 
25 #include <linux/module.h>
26 
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
36 #include <linux/un.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
41 #include <linux/fs.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
55 #include <linux/mm.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <asm/cacheflush.h>
62 
63 #include <net/net_namespace.h>
64 #include <net/sock.h>
65 #include <net/scm.h>
66 #include <net/netlink.h>
67 
68 #include "af_netlink.h"
69 
70 struct listeners {
71 	struct rcu_head		rcu;
72 	unsigned long		masks[0];
73 };
74 
75 /* state bits */
76 #define NETLINK_CONGESTED	0x0
77 
78 /* flags */
79 #define NETLINK_KERNEL_SOCKET	0x1
80 #define NETLINK_RECV_PKTINFO	0x2
81 #define NETLINK_BROADCAST_SEND_ERROR	0x4
82 #define NETLINK_RECV_NO_ENOBUFS	0x8
83 
84 static inline int netlink_is_kernel(struct sock *sk)
85 {
86 	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
87 }
88 
89 struct netlink_table *nl_table;
90 EXPORT_SYMBOL_GPL(nl_table);
91 
92 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
93 
94 static int netlink_dump(struct sock *sk);
95 static void netlink_skb_destructor(struct sk_buff *skb);
96 
97 DEFINE_RWLOCK(nl_table_lock);
98 EXPORT_SYMBOL_GPL(nl_table_lock);
99 static atomic_t nl_table_users = ATOMIC_INIT(0);
100 
101 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
102 
103 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
104 
105 static DEFINE_SPINLOCK(netlink_tap_lock);
106 static struct list_head netlink_tap_all __read_mostly;
107 
108 static inline u32 netlink_group_mask(u32 group)
109 {
110 	return group ? 1 << (group - 1) : 0;
111 }
112 
113 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
114 {
115 	return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
116 }
117 
118 int netlink_add_tap(struct netlink_tap *nt)
119 {
120 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
121 		return -EINVAL;
122 
123 	spin_lock(&netlink_tap_lock);
124 	list_add_rcu(&nt->list, &netlink_tap_all);
125 	spin_unlock(&netlink_tap_lock);
126 
127 	if (nt->module)
128 		__module_get(nt->module);
129 
130 	return 0;
131 }
132 EXPORT_SYMBOL_GPL(netlink_add_tap);
133 
134 static int __netlink_remove_tap(struct netlink_tap *nt)
135 {
136 	bool found = false;
137 	struct netlink_tap *tmp;
138 
139 	spin_lock(&netlink_tap_lock);
140 
141 	list_for_each_entry(tmp, &netlink_tap_all, list) {
142 		if (nt == tmp) {
143 			list_del_rcu(&nt->list);
144 			found = true;
145 			goto out;
146 		}
147 	}
148 
149 	pr_warn("__netlink_remove_tap: %p not found\n", nt);
150 out:
151 	spin_unlock(&netlink_tap_lock);
152 
153 	if (found && nt->module)
154 		module_put(nt->module);
155 
156 	return found ? 0 : -ENODEV;
157 }
158 
159 int netlink_remove_tap(struct netlink_tap *nt)
160 {
161 	int ret;
162 
163 	ret = __netlink_remove_tap(nt);
164 	synchronize_net();
165 
166 	return ret;
167 }
168 EXPORT_SYMBOL_GPL(netlink_remove_tap);
169 
170 static bool netlink_filter_tap(const struct sk_buff *skb)
171 {
172 	struct sock *sk = skb->sk;
173 	bool pass = false;
174 
175 	/* We take the more conservative approach and
176 	 * whitelist socket protocols that may pass.
177 	 */
178 	switch (sk->sk_protocol) {
179 	case NETLINK_ROUTE:
180 	case NETLINK_USERSOCK:
181 	case NETLINK_SOCK_DIAG:
182 	case NETLINK_NFLOG:
183 	case NETLINK_XFRM:
184 	case NETLINK_FIB_LOOKUP:
185 	case NETLINK_NETFILTER:
186 	case NETLINK_GENERIC:
187 		pass = true;
188 		break;
189 	}
190 
191 	return pass;
192 }
193 
194 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
195 				     struct net_device *dev)
196 {
197 	struct sk_buff *nskb;
198 	struct sock *sk = skb->sk;
199 	int ret = -ENOMEM;
200 
201 	dev_hold(dev);
202 	nskb = skb_clone(skb, GFP_ATOMIC);
203 	if (nskb) {
204 		nskb->dev = dev;
205 		nskb->protocol = htons((u16) sk->sk_protocol);
206 		nskb->pkt_type = netlink_is_kernel(sk) ?
207 				 PACKET_KERNEL : PACKET_USER;
208 
209 		ret = dev_queue_xmit(nskb);
210 		if (unlikely(ret > 0))
211 			ret = net_xmit_errno(ret);
212 	}
213 
214 	dev_put(dev);
215 	return ret;
216 }
217 
218 static void __netlink_deliver_tap(struct sk_buff *skb)
219 {
220 	int ret;
221 	struct netlink_tap *tmp;
222 
223 	if (!netlink_filter_tap(skb))
224 		return;
225 
226 	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
227 		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
228 		if (unlikely(ret))
229 			break;
230 	}
231 }
232 
233 static void netlink_deliver_tap(struct sk_buff *skb)
234 {
235 	rcu_read_lock();
236 
237 	if (unlikely(!list_empty(&netlink_tap_all)))
238 		__netlink_deliver_tap(skb);
239 
240 	rcu_read_unlock();
241 }
242 
243 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
244 				       struct sk_buff *skb)
245 {
246 	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
247 		netlink_deliver_tap(skb);
248 }
249 
250 static void netlink_overrun(struct sock *sk)
251 {
252 	struct netlink_sock *nlk = nlk_sk(sk);
253 
254 	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
255 		if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
256 			sk->sk_err = ENOBUFS;
257 			sk->sk_error_report(sk);
258 		}
259 	}
260 	atomic_inc(&sk->sk_drops);
261 }
262 
263 static void netlink_rcv_wake(struct sock *sk)
264 {
265 	struct netlink_sock *nlk = nlk_sk(sk);
266 
267 	if (skb_queue_empty(&sk->sk_receive_queue))
268 		clear_bit(NETLINK_CONGESTED, &nlk->state);
269 	if (!test_bit(NETLINK_CONGESTED, &nlk->state))
270 		wake_up_interruptible(&nlk->wait);
271 }
272 
273 #ifdef CONFIG_NETLINK_MMAP
274 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
275 {
276 	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
277 }
278 
279 static bool netlink_rx_is_mmaped(struct sock *sk)
280 {
281 	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
282 }
283 
284 static bool netlink_tx_is_mmaped(struct sock *sk)
285 {
286 	return nlk_sk(sk)->tx_ring.pg_vec != NULL;
287 }
288 
289 static __pure struct page *pgvec_to_page(const void *addr)
290 {
291 	if (is_vmalloc_addr(addr))
292 		return vmalloc_to_page(addr);
293 	else
294 		return virt_to_page(addr);
295 }
296 
297 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
298 {
299 	unsigned int i;
300 
301 	for (i = 0; i < len; i++) {
302 		if (pg_vec[i] != NULL) {
303 			if (is_vmalloc_addr(pg_vec[i]))
304 				vfree(pg_vec[i]);
305 			else
306 				free_pages((unsigned long)pg_vec[i], order);
307 		}
308 	}
309 	kfree(pg_vec);
310 }
311 
312 static void *alloc_one_pg_vec_page(unsigned long order)
313 {
314 	void *buffer;
315 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
316 			  __GFP_NOWARN | __GFP_NORETRY;
317 
318 	buffer = (void *)__get_free_pages(gfp_flags, order);
319 	if (buffer != NULL)
320 		return buffer;
321 
322 	buffer = vzalloc((1 << order) * PAGE_SIZE);
323 	if (buffer != NULL)
324 		return buffer;
325 
326 	gfp_flags &= ~__GFP_NORETRY;
327 	return (void *)__get_free_pages(gfp_flags, order);
328 }
329 
330 static void **alloc_pg_vec(struct netlink_sock *nlk,
331 			   struct nl_mmap_req *req, unsigned int order)
332 {
333 	unsigned int block_nr = req->nm_block_nr;
334 	unsigned int i;
335 	void **pg_vec;
336 
337 	pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
338 	if (pg_vec == NULL)
339 		return NULL;
340 
341 	for (i = 0; i < block_nr; i++) {
342 		pg_vec[i] = alloc_one_pg_vec_page(order);
343 		if (pg_vec[i] == NULL)
344 			goto err1;
345 	}
346 
347 	return pg_vec;
348 err1:
349 	free_pg_vec(pg_vec, order, block_nr);
350 	return NULL;
351 }
352 
353 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
354 			    bool closing, bool tx_ring)
355 {
356 	struct netlink_sock *nlk = nlk_sk(sk);
357 	struct netlink_ring *ring;
358 	struct sk_buff_head *queue;
359 	void **pg_vec = NULL;
360 	unsigned int order = 0;
361 	int err;
362 
363 	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
364 	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
365 
366 	if (!closing) {
367 		if (atomic_read(&nlk->mapped))
368 			return -EBUSY;
369 		if (atomic_read(&ring->pending))
370 			return -EBUSY;
371 	}
372 
373 	if (req->nm_block_nr) {
374 		if (ring->pg_vec != NULL)
375 			return -EBUSY;
376 
377 		if ((int)req->nm_block_size <= 0)
378 			return -EINVAL;
379 		if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
380 			return -EINVAL;
381 		if (req->nm_frame_size < NL_MMAP_HDRLEN)
382 			return -EINVAL;
383 		if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
384 			return -EINVAL;
385 
386 		ring->frames_per_block = req->nm_block_size /
387 					 req->nm_frame_size;
388 		if (ring->frames_per_block == 0)
389 			return -EINVAL;
390 		if (ring->frames_per_block * req->nm_block_nr !=
391 		    req->nm_frame_nr)
392 			return -EINVAL;
393 
394 		order = get_order(req->nm_block_size);
395 		pg_vec = alloc_pg_vec(nlk, req, order);
396 		if (pg_vec == NULL)
397 			return -ENOMEM;
398 	} else {
399 		if (req->nm_frame_nr)
400 			return -EINVAL;
401 	}
402 
403 	err = -EBUSY;
404 	mutex_lock(&nlk->pg_vec_lock);
405 	if (closing || atomic_read(&nlk->mapped) == 0) {
406 		err = 0;
407 		spin_lock_bh(&queue->lock);
408 
409 		ring->frame_max		= req->nm_frame_nr - 1;
410 		ring->head		= 0;
411 		ring->frame_size	= req->nm_frame_size;
412 		ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
413 
414 		swap(ring->pg_vec_len, req->nm_block_nr);
415 		swap(ring->pg_vec_order, order);
416 		swap(ring->pg_vec, pg_vec);
417 
418 		__skb_queue_purge(queue);
419 		spin_unlock_bh(&queue->lock);
420 
421 		WARN_ON(atomic_read(&nlk->mapped));
422 	}
423 	mutex_unlock(&nlk->pg_vec_lock);
424 
425 	if (pg_vec)
426 		free_pg_vec(pg_vec, order, req->nm_block_nr);
427 	return err;
428 }
429 
430 static void netlink_mm_open(struct vm_area_struct *vma)
431 {
432 	struct file *file = vma->vm_file;
433 	struct socket *sock = file->private_data;
434 	struct sock *sk = sock->sk;
435 
436 	if (sk)
437 		atomic_inc(&nlk_sk(sk)->mapped);
438 }
439 
440 static void netlink_mm_close(struct vm_area_struct *vma)
441 {
442 	struct file *file = vma->vm_file;
443 	struct socket *sock = file->private_data;
444 	struct sock *sk = sock->sk;
445 
446 	if (sk)
447 		atomic_dec(&nlk_sk(sk)->mapped);
448 }
449 
450 static const struct vm_operations_struct netlink_mmap_ops = {
451 	.open	= netlink_mm_open,
452 	.close	= netlink_mm_close,
453 };
454 
455 static int netlink_mmap(struct file *file, struct socket *sock,
456 			struct vm_area_struct *vma)
457 {
458 	struct sock *sk = sock->sk;
459 	struct netlink_sock *nlk = nlk_sk(sk);
460 	struct netlink_ring *ring;
461 	unsigned long start, size, expected;
462 	unsigned int i;
463 	int err = -EINVAL;
464 
465 	if (vma->vm_pgoff)
466 		return -EINVAL;
467 
468 	mutex_lock(&nlk->pg_vec_lock);
469 
470 	expected = 0;
471 	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
472 		if (ring->pg_vec == NULL)
473 			continue;
474 		expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
475 	}
476 
477 	if (expected == 0)
478 		goto out;
479 
480 	size = vma->vm_end - vma->vm_start;
481 	if (size != expected)
482 		goto out;
483 
484 	start = vma->vm_start;
485 	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
486 		if (ring->pg_vec == NULL)
487 			continue;
488 
489 		for (i = 0; i < ring->pg_vec_len; i++) {
490 			struct page *page;
491 			void *kaddr = ring->pg_vec[i];
492 			unsigned int pg_num;
493 
494 			for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
495 				page = pgvec_to_page(kaddr);
496 				err = vm_insert_page(vma, start, page);
497 				if (err < 0)
498 					goto out;
499 				start += PAGE_SIZE;
500 				kaddr += PAGE_SIZE;
501 			}
502 		}
503 	}
504 
505 	atomic_inc(&nlk->mapped);
506 	vma->vm_ops = &netlink_mmap_ops;
507 	err = 0;
508 out:
509 	mutex_unlock(&nlk->pg_vec_lock);
510 	return err;
511 }
512 
513 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
514 {
515 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
516 	struct page *p_start, *p_end;
517 
518 	/* First page is flushed through netlink_{get,set}_status */
519 	p_start = pgvec_to_page(hdr + PAGE_SIZE);
520 	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
521 	while (p_start <= p_end) {
522 		flush_dcache_page(p_start);
523 		p_start++;
524 	}
525 #endif
526 }
527 
528 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
529 {
530 	smp_rmb();
531 	flush_dcache_page(pgvec_to_page(hdr));
532 	return hdr->nm_status;
533 }
534 
535 static void netlink_set_status(struct nl_mmap_hdr *hdr,
536 			       enum nl_mmap_status status)
537 {
538 	hdr->nm_status = status;
539 	flush_dcache_page(pgvec_to_page(hdr));
540 	smp_wmb();
541 }
542 
543 static struct nl_mmap_hdr *
544 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
545 {
546 	unsigned int pg_vec_pos, frame_off;
547 
548 	pg_vec_pos = pos / ring->frames_per_block;
549 	frame_off  = pos % ring->frames_per_block;
550 
551 	return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
552 }
553 
554 static struct nl_mmap_hdr *
555 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
556 		     enum nl_mmap_status status)
557 {
558 	struct nl_mmap_hdr *hdr;
559 
560 	hdr = __netlink_lookup_frame(ring, pos);
561 	if (netlink_get_status(hdr) != status)
562 		return NULL;
563 
564 	return hdr;
565 }
566 
567 static struct nl_mmap_hdr *
568 netlink_current_frame(const struct netlink_ring *ring,
569 		      enum nl_mmap_status status)
570 {
571 	return netlink_lookup_frame(ring, ring->head, status);
572 }
573 
574 static struct nl_mmap_hdr *
575 netlink_previous_frame(const struct netlink_ring *ring,
576 		       enum nl_mmap_status status)
577 {
578 	unsigned int prev;
579 
580 	prev = ring->head ? ring->head - 1 : ring->frame_max;
581 	return netlink_lookup_frame(ring, prev, status);
582 }
583 
584 static void netlink_increment_head(struct netlink_ring *ring)
585 {
586 	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
587 }
588 
589 static void netlink_forward_ring(struct netlink_ring *ring)
590 {
591 	unsigned int head = ring->head, pos = head;
592 	const struct nl_mmap_hdr *hdr;
593 
594 	do {
595 		hdr = __netlink_lookup_frame(ring, pos);
596 		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
597 			break;
598 		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
599 			break;
600 		netlink_increment_head(ring);
601 	} while (ring->head != head);
602 }
603 
604 static bool netlink_dump_space(struct netlink_sock *nlk)
605 {
606 	struct netlink_ring *ring = &nlk->rx_ring;
607 	struct nl_mmap_hdr *hdr;
608 	unsigned int n;
609 
610 	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
611 	if (hdr == NULL)
612 		return false;
613 
614 	n = ring->head + ring->frame_max / 2;
615 	if (n > ring->frame_max)
616 		n -= ring->frame_max;
617 
618 	hdr = __netlink_lookup_frame(ring, n);
619 
620 	return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
621 }
622 
623 static unsigned int netlink_poll(struct file *file, struct socket *sock,
624 				 poll_table *wait)
625 {
626 	struct sock *sk = sock->sk;
627 	struct netlink_sock *nlk = nlk_sk(sk);
628 	unsigned int mask;
629 	int err;
630 
631 	if (nlk->rx_ring.pg_vec != NULL) {
632 		/* Memory mapped sockets don't call recvmsg(), so flow control
633 		 * for dumps is performed here. A dump is allowed to continue
634 		 * if at least half the ring is unused.
635 		 */
636 		while (nlk->cb_running && netlink_dump_space(nlk)) {
637 			err = netlink_dump(sk);
638 			if (err < 0) {
639 				sk->sk_err = err;
640 				sk->sk_error_report(sk);
641 				break;
642 			}
643 		}
644 		netlink_rcv_wake(sk);
645 	}
646 
647 	mask = datagram_poll(file, sock, wait);
648 
649 	spin_lock_bh(&sk->sk_receive_queue.lock);
650 	if (nlk->rx_ring.pg_vec) {
651 		netlink_forward_ring(&nlk->rx_ring);
652 		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
653 			mask |= POLLIN | POLLRDNORM;
654 	}
655 	spin_unlock_bh(&sk->sk_receive_queue.lock);
656 
657 	spin_lock_bh(&sk->sk_write_queue.lock);
658 	if (nlk->tx_ring.pg_vec) {
659 		if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
660 			mask |= POLLOUT | POLLWRNORM;
661 	}
662 	spin_unlock_bh(&sk->sk_write_queue.lock);
663 
664 	return mask;
665 }
666 
667 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
668 {
669 	return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
670 }
671 
672 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
673 				   struct netlink_ring *ring,
674 				   struct nl_mmap_hdr *hdr)
675 {
676 	unsigned int size;
677 	void *data;
678 
679 	size = ring->frame_size - NL_MMAP_HDRLEN;
680 	data = (void *)hdr + NL_MMAP_HDRLEN;
681 
682 	skb->head	= data;
683 	skb->data	= data;
684 	skb_reset_tail_pointer(skb);
685 	skb->end	= skb->tail + size;
686 	skb->len	= 0;
687 
688 	skb->destructor	= netlink_skb_destructor;
689 	NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
690 	NETLINK_CB(skb).sk = sk;
691 }
692 
693 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
694 				u32 dst_portid, u32 dst_group,
695 				struct sock_iocb *siocb)
696 {
697 	struct netlink_sock *nlk = nlk_sk(sk);
698 	struct netlink_ring *ring;
699 	struct nl_mmap_hdr *hdr;
700 	struct sk_buff *skb;
701 	unsigned int maxlen;
702 	bool excl = true;
703 	int err = 0, len = 0;
704 
705 	/* Netlink messages are validated by the receiver before processing.
706 	 * In order to avoid userspace changing the contents of the message
707 	 * after validation, the socket and the ring may only be used by a
708 	 * single process, otherwise we fall back to copying.
709 	 */
710 	if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
711 	    atomic_read(&nlk->mapped) > 1)
712 		excl = false;
713 
714 	mutex_lock(&nlk->pg_vec_lock);
715 
716 	ring   = &nlk->tx_ring;
717 	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
718 
719 	do {
720 		hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
721 		if (hdr == NULL) {
722 			if (!(msg->msg_flags & MSG_DONTWAIT) &&
723 			    atomic_read(&nlk->tx_ring.pending))
724 				schedule();
725 			continue;
726 		}
727 		if (hdr->nm_len > maxlen) {
728 			err = -EINVAL;
729 			goto out;
730 		}
731 
732 		netlink_frame_flush_dcache(hdr);
733 
734 		if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
735 			skb = alloc_skb_head(GFP_KERNEL);
736 			if (skb == NULL) {
737 				err = -ENOBUFS;
738 				goto out;
739 			}
740 			sock_hold(sk);
741 			netlink_ring_setup_skb(skb, sk, ring, hdr);
742 			NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
743 			__skb_put(skb, hdr->nm_len);
744 			netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
745 			atomic_inc(&ring->pending);
746 		} else {
747 			skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
748 			if (skb == NULL) {
749 				err = -ENOBUFS;
750 				goto out;
751 			}
752 			__skb_put(skb, hdr->nm_len);
753 			memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
754 			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
755 		}
756 
757 		netlink_increment_head(ring);
758 
759 		NETLINK_CB(skb).portid	  = nlk->portid;
760 		NETLINK_CB(skb).dst_group = dst_group;
761 		NETLINK_CB(skb).creds	  = siocb->scm->creds;
762 
763 		err = security_netlink_send(sk, skb);
764 		if (err) {
765 			kfree_skb(skb);
766 			goto out;
767 		}
768 
769 		if (unlikely(dst_group)) {
770 			atomic_inc(&skb->users);
771 			netlink_broadcast(sk, skb, dst_portid, dst_group,
772 					  GFP_KERNEL);
773 		}
774 		err = netlink_unicast(sk, skb, dst_portid,
775 				      msg->msg_flags & MSG_DONTWAIT);
776 		if (err < 0)
777 			goto out;
778 		len += err;
779 
780 	} while (hdr != NULL ||
781 		 (!(msg->msg_flags & MSG_DONTWAIT) &&
782 		  atomic_read(&nlk->tx_ring.pending)));
783 
784 	if (len > 0)
785 		err = len;
786 out:
787 	mutex_unlock(&nlk->pg_vec_lock);
788 	return err;
789 }
790 
791 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
792 {
793 	struct nl_mmap_hdr *hdr;
794 
795 	hdr = netlink_mmap_hdr(skb);
796 	hdr->nm_len	= skb->len;
797 	hdr->nm_group	= NETLINK_CB(skb).dst_group;
798 	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
799 	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
800 	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
801 	netlink_frame_flush_dcache(hdr);
802 	netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
803 
804 	NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
805 	kfree_skb(skb);
806 }
807 
808 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
809 {
810 	struct netlink_sock *nlk = nlk_sk(sk);
811 	struct netlink_ring *ring = &nlk->rx_ring;
812 	struct nl_mmap_hdr *hdr;
813 
814 	spin_lock_bh(&sk->sk_receive_queue.lock);
815 	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
816 	if (hdr == NULL) {
817 		spin_unlock_bh(&sk->sk_receive_queue.lock);
818 		kfree_skb(skb);
819 		netlink_overrun(sk);
820 		return;
821 	}
822 	netlink_increment_head(ring);
823 	__skb_queue_tail(&sk->sk_receive_queue, skb);
824 	spin_unlock_bh(&sk->sk_receive_queue.lock);
825 
826 	hdr->nm_len	= skb->len;
827 	hdr->nm_group	= NETLINK_CB(skb).dst_group;
828 	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
829 	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
830 	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
831 	netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
832 }
833 
834 #else /* CONFIG_NETLINK_MMAP */
835 #define netlink_skb_is_mmaped(skb)	false
836 #define netlink_rx_is_mmaped(sk)	false
837 #define netlink_tx_is_mmaped(sk)	false
838 #define netlink_mmap			sock_no_mmap
839 #define netlink_poll			datagram_poll
840 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)	0
841 #endif /* CONFIG_NETLINK_MMAP */
842 
843 static void netlink_skb_destructor(struct sk_buff *skb)
844 {
845 #ifdef CONFIG_NETLINK_MMAP
846 	struct nl_mmap_hdr *hdr;
847 	struct netlink_ring *ring;
848 	struct sock *sk;
849 
850 	/* If a packet from the kernel to userspace was freed because of an
851 	 * error without being delivered to userspace, the kernel must reset
852 	 * the status. In the direction userspace to kernel, the status is
853 	 * always reset here after the packet was processed and freed.
854 	 */
855 	if (netlink_skb_is_mmaped(skb)) {
856 		hdr = netlink_mmap_hdr(skb);
857 		sk = NETLINK_CB(skb).sk;
858 
859 		if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
860 			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
861 			ring = &nlk_sk(sk)->tx_ring;
862 		} else {
863 			if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
864 				hdr->nm_len = 0;
865 				netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
866 			}
867 			ring = &nlk_sk(sk)->rx_ring;
868 		}
869 
870 		WARN_ON(atomic_read(&ring->pending) == 0);
871 		atomic_dec(&ring->pending);
872 		sock_put(sk);
873 
874 		skb->head = NULL;
875 	}
876 #endif
877 	if (is_vmalloc_addr(skb->head)) {
878 		if (!skb->cloned ||
879 		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
880 			vfree(skb->head);
881 
882 		skb->head = NULL;
883 	}
884 	if (skb->sk != NULL)
885 		sock_rfree(skb);
886 }
887 
888 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
889 {
890 	WARN_ON(skb->sk != NULL);
891 	skb->sk = sk;
892 	skb->destructor = netlink_skb_destructor;
893 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
894 	sk_mem_charge(sk, skb->truesize);
895 }
896 
897 static void netlink_sock_destruct(struct sock *sk)
898 {
899 	struct netlink_sock *nlk = nlk_sk(sk);
900 
901 	if (nlk->cb_running) {
902 		if (nlk->cb.done)
903 			nlk->cb.done(&nlk->cb);
904 
905 		module_put(nlk->cb.module);
906 		kfree_skb(nlk->cb.skb);
907 	}
908 
909 	skb_queue_purge(&sk->sk_receive_queue);
910 #ifdef CONFIG_NETLINK_MMAP
911 	if (1) {
912 		struct nl_mmap_req req;
913 
914 		memset(&req, 0, sizeof(req));
915 		if (nlk->rx_ring.pg_vec)
916 			netlink_set_ring(sk, &req, true, false);
917 		memset(&req, 0, sizeof(req));
918 		if (nlk->tx_ring.pg_vec)
919 			netlink_set_ring(sk, &req, true, true);
920 	}
921 #endif /* CONFIG_NETLINK_MMAP */
922 
923 	if (!sock_flag(sk, SOCK_DEAD)) {
924 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
925 		return;
926 	}
927 
928 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
929 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
930 	WARN_ON(nlk_sk(sk)->groups);
931 }
932 
933 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
934  * SMP. Look, when several writers sleep and reader wakes them up, all but one
935  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
936  * this, _but_ remember, it adds useless work on UP machines.
937  */
938 
939 void netlink_table_grab(void)
940 	__acquires(nl_table_lock)
941 {
942 	might_sleep();
943 
944 	write_lock_irq(&nl_table_lock);
945 
946 	if (atomic_read(&nl_table_users)) {
947 		DECLARE_WAITQUEUE(wait, current);
948 
949 		add_wait_queue_exclusive(&nl_table_wait, &wait);
950 		for (;;) {
951 			set_current_state(TASK_UNINTERRUPTIBLE);
952 			if (atomic_read(&nl_table_users) == 0)
953 				break;
954 			write_unlock_irq(&nl_table_lock);
955 			schedule();
956 			write_lock_irq(&nl_table_lock);
957 		}
958 
959 		__set_current_state(TASK_RUNNING);
960 		remove_wait_queue(&nl_table_wait, &wait);
961 	}
962 }
963 
964 void netlink_table_ungrab(void)
965 	__releases(nl_table_lock)
966 {
967 	write_unlock_irq(&nl_table_lock);
968 	wake_up(&nl_table_wait);
969 }
970 
971 static inline void
972 netlink_lock_table(void)
973 {
974 	/* read_lock() synchronizes us to netlink_table_grab */
975 
976 	read_lock(&nl_table_lock);
977 	atomic_inc(&nl_table_users);
978 	read_unlock(&nl_table_lock);
979 }
980 
981 static inline void
982 netlink_unlock_table(void)
983 {
984 	if (atomic_dec_and_test(&nl_table_users))
985 		wake_up(&nl_table_wait);
986 }
987 
988 static bool netlink_compare(struct net *net, struct sock *sk)
989 {
990 	return net_eq(sock_net(sk), net);
991 }
992 
993 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
994 {
995 	struct netlink_table *table = &nl_table[protocol];
996 	struct nl_portid_hash *hash = &table->hash;
997 	struct hlist_head *head;
998 	struct sock *sk;
999 
1000 	read_lock(&nl_table_lock);
1001 	head = nl_portid_hashfn(hash, portid);
1002 	sk_for_each(sk, head) {
1003 		if (table->compare(net, sk) &&
1004 		    (nlk_sk(sk)->portid == portid)) {
1005 			sock_hold(sk);
1006 			goto found;
1007 		}
1008 	}
1009 	sk = NULL;
1010 found:
1011 	read_unlock(&nl_table_lock);
1012 	return sk;
1013 }
1014 
1015 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
1016 {
1017 	if (size <= PAGE_SIZE)
1018 		return kzalloc(size, GFP_ATOMIC);
1019 	else
1020 		return (struct hlist_head *)
1021 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
1022 					 get_order(size));
1023 }
1024 
1025 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
1026 {
1027 	if (size <= PAGE_SIZE)
1028 		kfree(table);
1029 	else
1030 		free_pages((unsigned long)table, get_order(size));
1031 }
1032 
1033 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
1034 {
1035 	unsigned int omask, mask, shift;
1036 	size_t osize, size;
1037 	struct hlist_head *otable, *table;
1038 	int i;
1039 
1040 	omask = mask = hash->mask;
1041 	osize = size = (mask + 1) * sizeof(*table);
1042 	shift = hash->shift;
1043 
1044 	if (grow) {
1045 		if (++shift > hash->max_shift)
1046 			return 0;
1047 		mask = mask * 2 + 1;
1048 		size *= 2;
1049 	}
1050 
1051 	table = nl_portid_hash_zalloc(size);
1052 	if (!table)
1053 		return 0;
1054 
1055 	otable = hash->table;
1056 	hash->table = table;
1057 	hash->mask = mask;
1058 	hash->shift = shift;
1059 	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
1060 
1061 	for (i = 0; i <= omask; i++) {
1062 		struct sock *sk;
1063 		struct hlist_node *tmp;
1064 
1065 		sk_for_each_safe(sk, tmp, &otable[i])
1066 			__sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
1067 	}
1068 
1069 	nl_portid_hash_free(otable, osize);
1070 	hash->rehash_time = jiffies + 10 * 60 * HZ;
1071 	return 1;
1072 }
1073 
1074 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
1075 {
1076 	int avg = hash->entries >> hash->shift;
1077 
1078 	if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
1079 		return 1;
1080 
1081 	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
1082 		nl_portid_hash_rehash(hash, 0);
1083 		return 1;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 static const struct proto_ops netlink_ops;
1090 
1091 static void
1092 netlink_update_listeners(struct sock *sk)
1093 {
1094 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1095 	unsigned long mask;
1096 	unsigned int i;
1097 	struct listeners *listeners;
1098 
1099 	listeners = nl_deref_protected(tbl->listeners);
1100 	if (!listeners)
1101 		return;
1102 
1103 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1104 		mask = 0;
1105 		sk_for_each_bound(sk, &tbl->mc_list) {
1106 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1107 				mask |= nlk_sk(sk)->groups[i];
1108 		}
1109 		listeners->masks[i] = mask;
1110 	}
1111 	/* this function is only called with the netlink table "grabbed", which
1112 	 * makes sure updates are visible before bind or setsockopt return. */
1113 }
1114 
1115 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1116 {
1117 	struct netlink_table *table = &nl_table[sk->sk_protocol];
1118 	struct nl_portid_hash *hash = &table->hash;
1119 	struct hlist_head *head;
1120 	int err = -EADDRINUSE;
1121 	struct sock *osk;
1122 	int len;
1123 
1124 	netlink_table_grab();
1125 	head = nl_portid_hashfn(hash, portid);
1126 	len = 0;
1127 	sk_for_each(osk, head) {
1128 		if (table->compare(net, osk) &&
1129 		    (nlk_sk(osk)->portid == portid))
1130 			break;
1131 		len++;
1132 	}
1133 	if (osk)
1134 		goto err;
1135 
1136 	err = -EBUSY;
1137 	if (nlk_sk(sk)->portid)
1138 		goto err;
1139 
1140 	err = -ENOMEM;
1141 	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
1142 		goto err;
1143 
1144 	if (len && nl_portid_hash_dilute(hash, len))
1145 		head = nl_portid_hashfn(hash, portid);
1146 	hash->entries++;
1147 	nlk_sk(sk)->portid = portid;
1148 	sk_add_node(sk, head);
1149 	err = 0;
1150 
1151 err:
1152 	netlink_table_ungrab();
1153 	return err;
1154 }
1155 
1156 static void netlink_remove(struct sock *sk)
1157 {
1158 	netlink_table_grab();
1159 	if (sk_del_node_init(sk))
1160 		nl_table[sk->sk_protocol].hash.entries--;
1161 	if (nlk_sk(sk)->subscriptions)
1162 		__sk_del_bind_node(sk);
1163 	netlink_table_ungrab();
1164 }
1165 
1166 static struct proto netlink_proto = {
1167 	.name	  = "NETLINK",
1168 	.owner	  = THIS_MODULE,
1169 	.obj_size = sizeof(struct netlink_sock),
1170 };
1171 
1172 static int __netlink_create(struct net *net, struct socket *sock,
1173 			    struct mutex *cb_mutex, int protocol)
1174 {
1175 	struct sock *sk;
1176 	struct netlink_sock *nlk;
1177 
1178 	sock->ops = &netlink_ops;
1179 
1180 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1181 	if (!sk)
1182 		return -ENOMEM;
1183 
1184 	sock_init_data(sock, sk);
1185 
1186 	nlk = nlk_sk(sk);
1187 	if (cb_mutex) {
1188 		nlk->cb_mutex = cb_mutex;
1189 	} else {
1190 		nlk->cb_mutex = &nlk->cb_def_mutex;
1191 		mutex_init(nlk->cb_mutex);
1192 	}
1193 	init_waitqueue_head(&nlk->wait);
1194 #ifdef CONFIG_NETLINK_MMAP
1195 	mutex_init(&nlk->pg_vec_lock);
1196 #endif
1197 
1198 	sk->sk_destruct = netlink_sock_destruct;
1199 	sk->sk_protocol = protocol;
1200 	return 0;
1201 }
1202 
1203 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1204 			  int kern)
1205 {
1206 	struct module *module = NULL;
1207 	struct mutex *cb_mutex;
1208 	struct netlink_sock *nlk;
1209 	void (*bind)(int group);
1210 	int err = 0;
1211 
1212 	sock->state = SS_UNCONNECTED;
1213 
1214 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1215 		return -ESOCKTNOSUPPORT;
1216 
1217 	if (protocol < 0 || protocol >= MAX_LINKS)
1218 		return -EPROTONOSUPPORT;
1219 
1220 	netlink_lock_table();
1221 #ifdef CONFIG_MODULES
1222 	if (!nl_table[protocol].registered) {
1223 		netlink_unlock_table();
1224 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1225 		netlink_lock_table();
1226 	}
1227 #endif
1228 	if (nl_table[protocol].registered &&
1229 	    try_module_get(nl_table[protocol].module))
1230 		module = nl_table[protocol].module;
1231 	else
1232 		err = -EPROTONOSUPPORT;
1233 	cb_mutex = nl_table[protocol].cb_mutex;
1234 	bind = nl_table[protocol].bind;
1235 	netlink_unlock_table();
1236 
1237 	if (err < 0)
1238 		goto out;
1239 
1240 	err = __netlink_create(net, sock, cb_mutex, protocol);
1241 	if (err < 0)
1242 		goto out_module;
1243 
1244 	local_bh_disable();
1245 	sock_prot_inuse_add(net, &netlink_proto, 1);
1246 	local_bh_enable();
1247 
1248 	nlk = nlk_sk(sock->sk);
1249 	nlk->module = module;
1250 	nlk->netlink_bind = bind;
1251 out:
1252 	return err;
1253 
1254 out_module:
1255 	module_put(module);
1256 	goto out;
1257 }
1258 
1259 static int netlink_release(struct socket *sock)
1260 {
1261 	struct sock *sk = sock->sk;
1262 	struct netlink_sock *nlk;
1263 
1264 	if (!sk)
1265 		return 0;
1266 
1267 	netlink_remove(sk);
1268 	sock_orphan(sk);
1269 	nlk = nlk_sk(sk);
1270 
1271 	/*
1272 	 * OK. Socket is unlinked, any packets that arrive now
1273 	 * will be purged.
1274 	 */
1275 
1276 	sock->sk = NULL;
1277 	wake_up_interruptible_all(&nlk->wait);
1278 
1279 	skb_queue_purge(&sk->sk_write_queue);
1280 
1281 	if (nlk->portid) {
1282 		struct netlink_notify n = {
1283 						.net = sock_net(sk),
1284 						.protocol = sk->sk_protocol,
1285 						.portid = nlk->portid,
1286 					  };
1287 		atomic_notifier_call_chain(&netlink_chain,
1288 				NETLINK_URELEASE, &n);
1289 	}
1290 
1291 	module_put(nlk->module);
1292 
1293 	netlink_table_grab();
1294 	if (netlink_is_kernel(sk)) {
1295 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1296 		if (--nl_table[sk->sk_protocol].registered == 0) {
1297 			struct listeners *old;
1298 
1299 			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1300 			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1301 			kfree_rcu(old, rcu);
1302 			nl_table[sk->sk_protocol].module = NULL;
1303 			nl_table[sk->sk_protocol].bind = NULL;
1304 			nl_table[sk->sk_protocol].flags = 0;
1305 			nl_table[sk->sk_protocol].registered = 0;
1306 		}
1307 	} else if (nlk->subscriptions) {
1308 		netlink_update_listeners(sk);
1309 	}
1310 	netlink_table_ungrab();
1311 
1312 	kfree(nlk->groups);
1313 	nlk->groups = NULL;
1314 
1315 	local_bh_disable();
1316 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1317 	local_bh_enable();
1318 	sock_put(sk);
1319 	return 0;
1320 }
1321 
1322 static int netlink_autobind(struct socket *sock)
1323 {
1324 	struct sock *sk = sock->sk;
1325 	struct net *net = sock_net(sk);
1326 	struct netlink_table *table = &nl_table[sk->sk_protocol];
1327 	struct nl_portid_hash *hash = &table->hash;
1328 	struct hlist_head *head;
1329 	struct sock *osk;
1330 	s32 portid = task_tgid_vnr(current);
1331 	int err;
1332 	static s32 rover = -4097;
1333 
1334 retry:
1335 	cond_resched();
1336 	netlink_table_grab();
1337 	head = nl_portid_hashfn(hash, portid);
1338 	sk_for_each(osk, head) {
1339 		if (!table->compare(net, osk))
1340 			continue;
1341 		if (nlk_sk(osk)->portid == portid) {
1342 			/* Bind collision, search negative portid values. */
1343 			portid = rover--;
1344 			if (rover > -4097)
1345 				rover = -4097;
1346 			netlink_table_ungrab();
1347 			goto retry;
1348 		}
1349 	}
1350 	netlink_table_ungrab();
1351 
1352 	err = netlink_insert(sk, net, portid);
1353 	if (err == -EADDRINUSE)
1354 		goto retry;
1355 
1356 	/* If 2 threads race to autobind, that is fine.  */
1357 	if (err == -EBUSY)
1358 		err = 0;
1359 
1360 	return err;
1361 }
1362 
1363 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
1364 {
1365 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1366 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1367 }
1368 
1369 static void
1370 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1371 {
1372 	struct netlink_sock *nlk = nlk_sk(sk);
1373 
1374 	if (nlk->subscriptions && !subscriptions)
1375 		__sk_del_bind_node(sk);
1376 	else if (!nlk->subscriptions && subscriptions)
1377 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1378 	nlk->subscriptions = subscriptions;
1379 }
1380 
1381 static int netlink_realloc_groups(struct sock *sk)
1382 {
1383 	struct netlink_sock *nlk = nlk_sk(sk);
1384 	unsigned int groups;
1385 	unsigned long *new_groups;
1386 	int err = 0;
1387 
1388 	netlink_table_grab();
1389 
1390 	groups = nl_table[sk->sk_protocol].groups;
1391 	if (!nl_table[sk->sk_protocol].registered) {
1392 		err = -ENOENT;
1393 		goto out_unlock;
1394 	}
1395 
1396 	if (nlk->ngroups >= groups)
1397 		goto out_unlock;
1398 
1399 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1400 	if (new_groups == NULL) {
1401 		err = -ENOMEM;
1402 		goto out_unlock;
1403 	}
1404 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1405 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1406 
1407 	nlk->groups = new_groups;
1408 	nlk->ngroups = groups;
1409  out_unlock:
1410 	netlink_table_ungrab();
1411 	return err;
1412 }
1413 
1414 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1415 			int addr_len)
1416 {
1417 	struct sock *sk = sock->sk;
1418 	struct net *net = sock_net(sk);
1419 	struct netlink_sock *nlk = nlk_sk(sk);
1420 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1421 	int err;
1422 
1423 	if (addr_len < sizeof(struct sockaddr_nl))
1424 		return -EINVAL;
1425 
1426 	if (nladdr->nl_family != AF_NETLINK)
1427 		return -EINVAL;
1428 
1429 	/* Only superuser is allowed to listen multicasts */
1430 	if (nladdr->nl_groups) {
1431 		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1432 			return -EPERM;
1433 		err = netlink_realloc_groups(sk);
1434 		if (err)
1435 			return err;
1436 	}
1437 
1438 	if (nlk->portid) {
1439 		if (nladdr->nl_pid != nlk->portid)
1440 			return -EINVAL;
1441 	} else {
1442 		err = nladdr->nl_pid ?
1443 			netlink_insert(sk, net, nladdr->nl_pid) :
1444 			netlink_autobind(sock);
1445 		if (err)
1446 			return err;
1447 	}
1448 
1449 	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1450 		return 0;
1451 
1452 	netlink_table_grab();
1453 	netlink_update_subscriptions(sk, nlk->subscriptions +
1454 					 hweight32(nladdr->nl_groups) -
1455 					 hweight32(nlk->groups[0]));
1456 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1457 	netlink_update_listeners(sk);
1458 	netlink_table_ungrab();
1459 
1460 	if (nlk->netlink_bind && nlk->groups[0]) {
1461 		int i;
1462 
1463 		for (i=0; i<nlk->ngroups; i++) {
1464 			if (test_bit(i, nlk->groups))
1465 				nlk->netlink_bind(i);
1466 		}
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1473 			   int alen, int flags)
1474 {
1475 	int err = 0;
1476 	struct sock *sk = sock->sk;
1477 	struct netlink_sock *nlk = nlk_sk(sk);
1478 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1479 
1480 	if (alen < sizeof(addr->sa_family))
1481 		return -EINVAL;
1482 
1483 	if (addr->sa_family == AF_UNSPEC) {
1484 		sk->sk_state	= NETLINK_UNCONNECTED;
1485 		nlk->dst_portid	= 0;
1486 		nlk->dst_group  = 0;
1487 		return 0;
1488 	}
1489 	if (addr->sa_family != AF_NETLINK)
1490 		return -EINVAL;
1491 
1492 	/* Only superuser is allowed to send multicasts */
1493 	if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1494 		return -EPERM;
1495 
1496 	if (!nlk->portid)
1497 		err = netlink_autobind(sock);
1498 
1499 	if (err == 0) {
1500 		sk->sk_state	= NETLINK_CONNECTED;
1501 		nlk->dst_portid = nladdr->nl_pid;
1502 		nlk->dst_group  = ffs(nladdr->nl_groups);
1503 	}
1504 
1505 	return err;
1506 }
1507 
1508 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1509 			   int *addr_len, int peer)
1510 {
1511 	struct sock *sk = sock->sk;
1512 	struct netlink_sock *nlk = nlk_sk(sk);
1513 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1514 
1515 	nladdr->nl_family = AF_NETLINK;
1516 	nladdr->nl_pad = 0;
1517 	*addr_len = sizeof(*nladdr);
1518 
1519 	if (peer) {
1520 		nladdr->nl_pid = nlk->dst_portid;
1521 		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1522 	} else {
1523 		nladdr->nl_pid = nlk->portid;
1524 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1525 	}
1526 	return 0;
1527 }
1528 
1529 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1530 {
1531 	struct sock *sock;
1532 	struct netlink_sock *nlk;
1533 
1534 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1535 	if (!sock)
1536 		return ERR_PTR(-ECONNREFUSED);
1537 
1538 	/* Don't bother queuing skb if kernel socket has no input function */
1539 	nlk = nlk_sk(sock);
1540 	if (sock->sk_state == NETLINK_CONNECTED &&
1541 	    nlk->dst_portid != nlk_sk(ssk)->portid) {
1542 		sock_put(sock);
1543 		return ERR_PTR(-ECONNREFUSED);
1544 	}
1545 	return sock;
1546 }
1547 
1548 struct sock *netlink_getsockbyfilp(struct file *filp)
1549 {
1550 	struct inode *inode = file_inode(filp);
1551 	struct sock *sock;
1552 
1553 	if (!S_ISSOCK(inode->i_mode))
1554 		return ERR_PTR(-ENOTSOCK);
1555 
1556 	sock = SOCKET_I(inode)->sk;
1557 	if (sock->sk_family != AF_NETLINK)
1558 		return ERR_PTR(-EINVAL);
1559 
1560 	sock_hold(sock);
1561 	return sock;
1562 }
1563 
1564 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1565 					       int broadcast)
1566 {
1567 	struct sk_buff *skb;
1568 	void *data;
1569 
1570 	if (size <= NLMSG_GOODSIZE || broadcast)
1571 		return alloc_skb(size, GFP_KERNEL);
1572 
1573 	size = SKB_DATA_ALIGN(size) +
1574 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1575 
1576 	data = vmalloc(size);
1577 	if (data == NULL)
1578 		return NULL;
1579 
1580 	skb = build_skb(data, size);
1581 	if (skb == NULL)
1582 		vfree(data);
1583 	else {
1584 		skb->head_frag = 0;
1585 		skb->destructor = netlink_skb_destructor;
1586 	}
1587 
1588 	return skb;
1589 }
1590 
1591 /*
1592  * Attach a skb to a netlink socket.
1593  * The caller must hold a reference to the destination socket. On error, the
1594  * reference is dropped. The skb is not send to the destination, just all
1595  * all error checks are performed and memory in the queue is reserved.
1596  * Return values:
1597  * < 0: error. skb freed, reference to sock dropped.
1598  * 0: continue
1599  * 1: repeat lookup - reference dropped while waiting for socket memory.
1600  */
1601 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1602 		      long *timeo, struct sock *ssk)
1603 {
1604 	struct netlink_sock *nlk;
1605 
1606 	nlk = nlk_sk(sk);
1607 
1608 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1609 	     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1610 	    !netlink_skb_is_mmaped(skb)) {
1611 		DECLARE_WAITQUEUE(wait, current);
1612 		if (!*timeo) {
1613 			if (!ssk || netlink_is_kernel(ssk))
1614 				netlink_overrun(sk);
1615 			sock_put(sk);
1616 			kfree_skb(skb);
1617 			return -EAGAIN;
1618 		}
1619 
1620 		__set_current_state(TASK_INTERRUPTIBLE);
1621 		add_wait_queue(&nlk->wait, &wait);
1622 
1623 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1624 		     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1625 		    !sock_flag(sk, SOCK_DEAD))
1626 			*timeo = schedule_timeout(*timeo);
1627 
1628 		__set_current_state(TASK_RUNNING);
1629 		remove_wait_queue(&nlk->wait, &wait);
1630 		sock_put(sk);
1631 
1632 		if (signal_pending(current)) {
1633 			kfree_skb(skb);
1634 			return sock_intr_errno(*timeo);
1635 		}
1636 		return 1;
1637 	}
1638 	netlink_skb_set_owner_r(skb, sk);
1639 	return 0;
1640 }
1641 
1642 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1643 {
1644 	int len = skb->len;
1645 
1646 	netlink_deliver_tap(skb);
1647 
1648 #ifdef CONFIG_NETLINK_MMAP
1649 	if (netlink_skb_is_mmaped(skb))
1650 		netlink_queue_mmaped_skb(sk, skb);
1651 	else if (netlink_rx_is_mmaped(sk))
1652 		netlink_ring_set_copied(sk, skb);
1653 	else
1654 #endif /* CONFIG_NETLINK_MMAP */
1655 		skb_queue_tail(&sk->sk_receive_queue, skb);
1656 	sk->sk_data_ready(sk, len);
1657 	return len;
1658 }
1659 
1660 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1661 {
1662 	int len = __netlink_sendskb(sk, skb);
1663 
1664 	sock_put(sk);
1665 	return len;
1666 }
1667 
1668 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1669 {
1670 	kfree_skb(skb);
1671 	sock_put(sk);
1672 }
1673 
1674 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1675 {
1676 	int delta;
1677 
1678 	WARN_ON(skb->sk != NULL);
1679 	if (netlink_skb_is_mmaped(skb))
1680 		return skb;
1681 
1682 	delta = skb->end - skb->tail;
1683 	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1684 		return skb;
1685 
1686 	if (skb_shared(skb)) {
1687 		struct sk_buff *nskb = skb_clone(skb, allocation);
1688 		if (!nskb)
1689 			return skb;
1690 		consume_skb(skb);
1691 		skb = nskb;
1692 	}
1693 
1694 	if (!pskb_expand_head(skb, 0, -delta, allocation))
1695 		skb->truesize -= delta;
1696 
1697 	return skb;
1698 }
1699 
1700 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1701 				  struct sock *ssk)
1702 {
1703 	int ret;
1704 	struct netlink_sock *nlk = nlk_sk(sk);
1705 
1706 	ret = -ECONNREFUSED;
1707 	if (nlk->netlink_rcv != NULL) {
1708 		ret = skb->len;
1709 		netlink_skb_set_owner_r(skb, sk);
1710 		NETLINK_CB(skb).sk = ssk;
1711 		netlink_deliver_tap_kernel(sk, ssk, skb);
1712 		nlk->netlink_rcv(skb);
1713 		consume_skb(skb);
1714 	} else {
1715 		kfree_skb(skb);
1716 	}
1717 	sock_put(sk);
1718 	return ret;
1719 }
1720 
1721 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1722 		    u32 portid, int nonblock)
1723 {
1724 	struct sock *sk;
1725 	int err;
1726 	long timeo;
1727 
1728 	skb = netlink_trim(skb, gfp_any());
1729 
1730 	timeo = sock_sndtimeo(ssk, nonblock);
1731 retry:
1732 	sk = netlink_getsockbyportid(ssk, portid);
1733 	if (IS_ERR(sk)) {
1734 		kfree_skb(skb);
1735 		return PTR_ERR(sk);
1736 	}
1737 	if (netlink_is_kernel(sk))
1738 		return netlink_unicast_kernel(sk, skb, ssk);
1739 
1740 	if (sk_filter(sk, skb)) {
1741 		err = skb->len;
1742 		kfree_skb(skb);
1743 		sock_put(sk);
1744 		return err;
1745 	}
1746 
1747 	err = netlink_attachskb(sk, skb, &timeo, ssk);
1748 	if (err == 1)
1749 		goto retry;
1750 	if (err)
1751 		return err;
1752 
1753 	return netlink_sendskb(sk, skb);
1754 }
1755 EXPORT_SYMBOL(netlink_unicast);
1756 
1757 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1758 				  u32 dst_portid, gfp_t gfp_mask)
1759 {
1760 #ifdef CONFIG_NETLINK_MMAP
1761 	struct sock *sk = NULL;
1762 	struct sk_buff *skb;
1763 	struct netlink_ring *ring;
1764 	struct nl_mmap_hdr *hdr;
1765 	unsigned int maxlen;
1766 
1767 	sk = netlink_getsockbyportid(ssk, dst_portid);
1768 	if (IS_ERR(sk))
1769 		goto out;
1770 
1771 	ring = &nlk_sk(sk)->rx_ring;
1772 	/* fast-path without atomic ops for common case: non-mmaped receiver */
1773 	if (ring->pg_vec == NULL)
1774 		goto out_put;
1775 
1776 	if (ring->frame_size - NL_MMAP_HDRLEN < size)
1777 		goto out_put;
1778 
1779 	skb = alloc_skb_head(gfp_mask);
1780 	if (skb == NULL)
1781 		goto err1;
1782 
1783 	spin_lock_bh(&sk->sk_receive_queue.lock);
1784 	/* check again under lock */
1785 	if (ring->pg_vec == NULL)
1786 		goto out_free;
1787 
1788 	/* check again under lock */
1789 	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1790 	if (maxlen < size)
1791 		goto out_free;
1792 
1793 	netlink_forward_ring(ring);
1794 	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1795 	if (hdr == NULL)
1796 		goto err2;
1797 	netlink_ring_setup_skb(skb, sk, ring, hdr);
1798 	netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1799 	atomic_inc(&ring->pending);
1800 	netlink_increment_head(ring);
1801 
1802 	spin_unlock_bh(&sk->sk_receive_queue.lock);
1803 	return skb;
1804 
1805 err2:
1806 	kfree_skb(skb);
1807 	spin_unlock_bh(&sk->sk_receive_queue.lock);
1808 	netlink_overrun(sk);
1809 err1:
1810 	sock_put(sk);
1811 	return NULL;
1812 
1813 out_free:
1814 	kfree_skb(skb);
1815 	spin_unlock_bh(&sk->sk_receive_queue.lock);
1816 out_put:
1817 	sock_put(sk);
1818 out:
1819 #endif
1820 	return alloc_skb(size, gfp_mask);
1821 }
1822 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1823 
1824 int netlink_has_listeners(struct sock *sk, unsigned int group)
1825 {
1826 	int res = 0;
1827 	struct listeners *listeners;
1828 
1829 	BUG_ON(!netlink_is_kernel(sk));
1830 
1831 	rcu_read_lock();
1832 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1833 
1834 	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1835 		res = test_bit(group - 1, listeners->masks);
1836 
1837 	rcu_read_unlock();
1838 
1839 	return res;
1840 }
1841 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1842 
1843 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1844 {
1845 	struct netlink_sock *nlk = nlk_sk(sk);
1846 
1847 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1848 	    !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1849 		netlink_skb_set_owner_r(skb, sk);
1850 		__netlink_sendskb(sk, skb);
1851 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1852 	}
1853 	return -1;
1854 }
1855 
1856 struct netlink_broadcast_data {
1857 	struct sock *exclude_sk;
1858 	struct net *net;
1859 	u32 portid;
1860 	u32 group;
1861 	int failure;
1862 	int delivery_failure;
1863 	int congested;
1864 	int delivered;
1865 	gfp_t allocation;
1866 	struct sk_buff *skb, *skb2;
1867 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1868 	void *tx_data;
1869 };
1870 
1871 static int do_one_broadcast(struct sock *sk,
1872 				   struct netlink_broadcast_data *p)
1873 {
1874 	struct netlink_sock *nlk = nlk_sk(sk);
1875 	int val;
1876 
1877 	if (p->exclude_sk == sk)
1878 		goto out;
1879 
1880 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1881 	    !test_bit(p->group - 1, nlk->groups))
1882 		goto out;
1883 
1884 	if (!net_eq(sock_net(sk), p->net))
1885 		goto out;
1886 
1887 	if (p->failure) {
1888 		netlink_overrun(sk);
1889 		goto out;
1890 	}
1891 
1892 	sock_hold(sk);
1893 	if (p->skb2 == NULL) {
1894 		if (skb_shared(p->skb)) {
1895 			p->skb2 = skb_clone(p->skb, p->allocation);
1896 		} else {
1897 			p->skb2 = skb_get(p->skb);
1898 			/*
1899 			 * skb ownership may have been set when
1900 			 * delivered to a previous socket.
1901 			 */
1902 			skb_orphan(p->skb2);
1903 		}
1904 	}
1905 	if (p->skb2 == NULL) {
1906 		netlink_overrun(sk);
1907 		/* Clone failed. Notify ALL listeners. */
1908 		p->failure = 1;
1909 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1910 			p->delivery_failure = 1;
1911 	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1912 		kfree_skb(p->skb2);
1913 		p->skb2 = NULL;
1914 	} else if (sk_filter(sk, p->skb2)) {
1915 		kfree_skb(p->skb2);
1916 		p->skb2 = NULL;
1917 	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1918 		netlink_overrun(sk);
1919 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1920 			p->delivery_failure = 1;
1921 	} else {
1922 		p->congested |= val;
1923 		p->delivered = 1;
1924 		p->skb2 = NULL;
1925 	}
1926 	sock_put(sk);
1927 
1928 out:
1929 	return 0;
1930 }
1931 
1932 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1933 	u32 group, gfp_t allocation,
1934 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1935 	void *filter_data)
1936 {
1937 	struct net *net = sock_net(ssk);
1938 	struct netlink_broadcast_data info;
1939 	struct sock *sk;
1940 
1941 	skb = netlink_trim(skb, allocation);
1942 
1943 	info.exclude_sk = ssk;
1944 	info.net = net;
1945 	info.portid = portid;
1946 	info.group = group;
1947 	info.failure = 0;
1948 	info.delivery_failure = 0;
1949 	info.congested = 0;
1950 	info.delivered = 0;
1951 	info.allocation = allocation;
1952 	info.skb = skb;
1953 	info.skb2 = NULL;
1954 	info.tx_filter = filter;
1955 	info.tx_data = filter_data;
1956 
1957 	/* While we sleep in clone, do not allow to change socket list */
1958 
1959 	netlink_lock_table();
1960 
1961 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1962 		do_one_broadcast(sk, &info);
1963 
1964 	consume_skb(skb);
1965 
1966 	netlink_unlock_table();
1967 
1968 	if (info.delivery_failure) {
1969 		kfree_skb(info.skb2);
1970 		return -ENOBUFS;
1971 	}
1972 	consume_skb(info.skb2);
1973 
1974 	if (info.delivered) {
1975 		if (info.congested && (allocation & __GFP_WAIT))
1976 			yield();
1977 		return 0;
1978 	}
1979 	return -ESRCH;
1980 }
1981 EXPORT_SYMBOL(netlink_broadcast_filtered);
1982 
1983 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1984 		      u32 group, gfp_t allocation)
1985 {
1986 	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1987 		NULL, NULL);
1988 }
1989 EXPORT_SYMBOL(netlink_broadcast);
1990 
1991 struct netlink_set_err_data {
1992 	struct sock *exclude_sk;
1993 	u32 portid;
1994 	u32 group;
1995 	int code;
1996 };
1997 
1998 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1999 {
2000 	struct netlink_sock *nlk = nlk_sk(sk);
2001 	int ret = 0;
2002 
2003 	if (sk == p->exclude_sk)
2004 		goto out;
2005 
2006 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2007 		goto out;
2008 
2009 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2010 	    !test_bit(p->group - 1, nlk->groups))
2011 		goto out;
2012 
2013 	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2014 		ret = 1;
2015 		goto out;
2016 	}
2017 
2018 	sk->sk_err = p->code;
2019 	sk->sk_error_report(sk);
2020 out:
2021 	return ret;
2022 }
2023 
2024 /**
2025  * netlink_set_err - report error to broadcast listeners
2026  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2027  * @portid: the PORTID of a process that we want to skip (if any)
2028  * @group: the broadcast group that will notice the error
2029  * @code: error code, must be negative (as usual in kernelspace)
2030  *
2031  * This function returns the number of broadcast listeners that have set the
2032  * NETLINK_RECV_NO_ENOBUFS socket option.
2033  */
2034 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2035 {
2036 	struct netlink_set_err_data info;
2037 	struct sock *sk;
2038 	int ret = 0;
2039 
2040 	info.exclude_sk = ssk;
2041 	info.portid = portid;
2042 	info.group = group;
2043 	/* sk->sk_err wants a positive error value */
2044 	info.code = -code;
2045 
2046 	read_lock(&nl_table_lock);
2047 
2048 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2049 		ret += do_one_set_err(sk, &info);
2050 
2051 	read_unlock(&nl_table_lock);
2052 	return ret;
2053 }
2054 EXPORT_SYMBOL(netlink_set_err);
2055 
2056 /* must be called with netlink table grabbed */
2057 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2058 				     unsigned int group,
2059 				     int is_new)
2060 {
2061 	int old, new = !!is_new, subscriptions;
2062 
2063 	old = test_bit(group - 1, nlk->groups);
2064 	subscriptions = nlk->subscriptions - old + new;
2065 	if (new)
2066 		__set_bit(group - 1, nlk->groups);
2067 	else
2068 		__clear_bit(group - 1, nlk->groups);
2069 	netlink_update_subscriptions(&nlk->sk, subscriptions);
2070 	netlink_update_listeners(&nlk->sk);
2071 }
2072 
2073 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2074 			      char __user *optval, unsigned int optlen)
2075 {
2076 	struct sock *sk = sock->sk;
2077 	struct netlink_sock *nlk = nlk_sk(sk);
2078 	unsigned int val = 0;
2079 	int err;
2080 
2081 	if (level != SOL_NETLINK)
2082 		return -ENOPROTOOPT;
2083 
2084 	if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2085 	    optlen >= sizeof(int) &&
2086 	    get_user(val, (unsigned int __user *)optval))
2087 		return -EFAULT;
2088 
2089 	switch (optname) {
2090 	case NETLINK_PKTINFO:
2091 		if (val)
2092 			nlk->flags |= NETLINK_RECV_PKTINFO;
2093 		else
2094 			nlk->flags &= ~NETLINK_RECV_PKTINFO;
2095 		err = 0;
2096 		break;
2097 	case NETLINK_ADD_MEMBERSHIP:
2098 	case NETLINK_DROP_MEMBERSHIP: {
2099 		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
2100 			return -EPERM;
2101 		err = netlink_realloc_groups(sk);
2102 		if (err)
2103 			return err;
2104 		if (!val || val - 1 >= nlk->ngroups)
2105 			return -EINVAL;
2106 		netlink_table_grab();
2107 		netlink_update_socket_mc(nlk, val,
2108 					 optname == NETLINK_ADD_MEMBERSHIP);
2109 		netlink_table_ungrab();
2110 
2111 		if (nlk->netlink_bind)
2112 			nlk->netlink_bind(val);
2113 
2114 		err = 0;
2115 		break;
2116 	}
2117 	case NETLINK_BROADCAST_ERROR:
2118 		if (val)
2119 			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2120 		else
2121 			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2122 		err = 0;
2123 		break;
2124 	case NETLINK_NO_ENOBUFS:
2125 		if (val) {
2126 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2127 			clear_bit(NETLINK_CONGESTED, &nlk->state);
2128 			wake_up_interruptible(&nlk->wait);
2129 		} else {
2130 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2131 		}
2132 		err = 0;
2133 		break;
2134 #ifdef CONFIG_NETLINK_MMAP
2135 	case NETLINK_RX_RING:
2136 	case NETLINK_TX_RING: {
2137 		struct nl_mmap_req req;
2138 
2139 		/* Rings might consume more memory than queue limits, require
2140 		 * CAP_NET_ADMIN.
2141 		 */
2142 		if (!capable(CAP_NET_ADMIN))
2143 			return -EPERM;
2144 		if (optlen < sizeof(req))
2145 			return -EINVAL;
2146 		if (copy_from_user(&req, optval, sizeof(req)))
2147 			return -EFAULT;
2148 		err = netlink_set_ring(sk, &req, false,
2149 				       optname == NETLINK_TX_RING);
2150 		break;
2151 	}
2152 #endif /* CONFIG_NETLINK_MMAP */
2153 	default:
2154 		err = -ENOPROTOOPT;
2155 	}
2156 	return err;
2157 }
2158 
2159 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2160 			      char __user *optval, int __user *optlen)
2161 {
2162 	struct sock *sk = sock->sk;
2163 	struct netlink_sock *nlk = nlk_sk(sk);
2164 	int len, val, err;
2165 
2166 	if (level != SOL_NETLINK)
2167 		return -ENOPROTOOPT;
2168 
2169 	if (get_user(len, optlen))
2170 		return -EFAULT;
2171 	if (len < 0)
2172 		return -EINVAL;
2173 
2174 	switch (optname) {
2175 	case NETLINK_PKTINFO:
2176 		if (len < sizeof(int))
2177 			return -EINVAL;
2178 		len = sizeof(int);
2179 		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2180 		if (put_user(len, optlen) ||
2181 		    put_user(val, optval))
2182 			return -EFAULT;
2183 		err = 0;
2184 		break;
2185 	case NETLINK_BROADCAST_ERROR:
2186 		if (len < sizeof(int))
2187 			return -EINVAL;
2188 		len = sizeof(int);
2189 		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2190 		if (put_user(len, optlen) ||
2191 		    put_user(val, optval))
2192 			return -EFAULT;
2193 		err = 0;
2194 		break;
2195 	case NETLINK_NO_ENOBUFS:
2196 		if (len < sizeof(int))
2197 			return -EINVAL;
2198 		len = sizeof(int);
2199 		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2200 		if (put_user(len, optlen) ||
2201 		    put_user(val, optval))
2202 			return -EFAULT;
2203 		err = 0;
2204 		break;
2205 	default:
2206 		err = -ENOPROTOOPT;
2207 	}
2208 	return err;
2209 }
2210 
2211 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2212 {
2213 	struct nl_pktinfo info;
2214 
2215 	info.group = NETLINK_CB(skb).dst_group;
2216 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2217 }
2218 
2219 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2220 			   struct msghdr *msg, size_t len)
2221 {
2222 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2223 	struct sock *sk = sock->sk;
2224 	struct netlink_sock *nlk = nlk_sk(sk);
2225 	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2226 	u32 dst_portid;
2227 	u32 dst_group;
2228 	struct sk_buff *skb;
2229 	int err;
2230 	struct scm_cookie scm;
2231 
2232 	if (msg->msg_flags&MSG_OOB)
2233 		return -EOPNOTSUPP;
2234 
2235 	if (NULL == siocb->scm)
2236 		siocb->scm = &scm;
2237 
2238 	err = scm_send(sock, msg, siocb->scm, true);
2239 	if (err < 0)
2240 		return err;
2241 
2242 	if (msg->msg_namelen) {
2243 		err = -EINVAL;
2244 		if (addr->nl_family != AF_NETLINK)
2245 			goto out;
2246 		dst_portid = addr->nl_pid;
2247 		dst_group = ffs(addr->nl_groups);
2248 		err =  -EPERM;
2249 		if ((dst_group || dst_portid) &&
2250 		    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
2251 			goto out;
2252 	} else {
2253 		dst_portid = nlk->dst_portid;
2254 		dst_group = nlk->dst_group;
2255 	}
2256 
2257 	if (!nlk->portid) {
2258 		err = netlink_autobind(sock);
2259 		if (err)
2260 			goto out;
2261 	}
2262 
2263 	if (netlink_tx_is_mmaped(sk) &&
2264 	    msg->msg_iov->iov_base == NULL) {
2265 		err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2266 					   siocb);
2267 		goto out;
2268 	}
2269 
2270 	err = -EMSGSIZE;
2271 	if (len > sk->sk_sndbuf - 32)
2272 		goto out;
2273 	err = -ENOBUFS;
2274 	skb = netlink_alloc_large_skb(len, dst_group);
2275 	if (skb == NULL)
2276 		goto out;
2277 
2278 	NETLINK_CB(skb).portid	= nlk->portid;
2279 	NETLINK_CB(skb).dst_group = dst_group;
2280 	NETLINK_CB(skb).creds	= siocb->scm->creds;
2281 
2282 	err = -EFAULT;
2283 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2284 		kfree_skb(skb);
2285 		goto out;
2286 	}
2287 
2288 	err = security_netlink_send(sk, skb);
2289 	if (err) {
2290 		kfree_skb(skb);
2291 		goto out;
2292 	}
2293 
2294 	if (dst_group) {
2295 		atomic_inc(&skb->users);
2296 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2297 	}
2298 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2299 
2300 out:
2301 	scm_destroy(siocb->scm);
2302 	return err;
2303 }
2304 
2305 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2306 			   struct msghdr *msg, size_t len,
2307 			   int flags)
2308 {
2309 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2310 	struct scm_cookie scm;
2311 	struct sock *sk = sock->sk;
2312 	struct netlink_sock *nlk = nlk_sk(sk);
2313 	int noblock = flags&MSG_DONTWAIT;
2314 	size_t copied;
2315 	struct sk_buff *skb, *data_skb;
2316 	int err, ret;
2317 
2318 	if (flags&MSG_OOB)
2319 		return -EOPNOTSUPP;
2320 
2321 	copied = 0;
2322 
2323 	skb = skb_recv_datagram(sk, flags, noblock, &err);
2324 	if (skb == NULL)
2325 		goto out;
2326 
2327 	data_skb = skb;
2328 
2329 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2330 	if (unlikely(skb_shinfo(skb)->frag_list)) {
2331 		/*
2332 		 * If this skb has a frag_list, then here that means that we
2333 		 * will have to use the frag_list skb's data for compat tasks
2334 		 * and the regular skb's data for normal (non-compat) tasks.
2335 		 *
2336 		 * If we need to send the compat skb, assign it to the
2337 		 * 'data_skb' variable so that it will be used below for data
2338 		 * copying. We keep 'skb' for everything else, including
2339 		 * freeing both later.
2340 		 */
2341 		if (flags & MSG_CMSG_COMPAT)
2342 			data_skb = skb_shinfo(skb)->frag_list;
2343 	}
2344 #endif
2345 
2346 	copied = data_skb->len;
2347 	if (len < copied) {
2348 		msg->msg_flags |= MSG_TRUNC;
2349 		copied = len;
2350 	}
2351 
2352 	skb_reset_transport_header(data_skb);
2353 	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2354 
2355 	if (msg->msg_name) {
2356 		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2357 		addr->nl_family = AF_NETLINK;
2358 		addr->nl_pad    = 0;
2359 		addr->nl_pid	= NETLINK_CB(skb).portid;
2360 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
2361 		msg->msg_namelen = sizeof(*addr);
2362 	}
2363 
2364 	if (nlk->flags & NETLINK_RECV_PKTINFO)
2365 		netlink_cmsg_recv_pktinfo(msg, skb);
2366 
2367 	if (NULL == siocb->scm) {
2368 		memset(&scm, 0, sizeof(scm));
2369 		siocb->scm = &scm;
2370 	}
2371 	siocb->scm->creds = *NETLINK_CREDS(skb);
2372 	if (flags & MSG_TRUNC)
2373 		copied = data_skb->len;
2374 
2375 	skb_free_datagram(sk, skb);
2376 
2377 	if (nlk->cb_running &&
2378 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2379 		ret = netlink_dump(sk);
2380 		if (ret) {
2381 			sk->sk_err = ret;
2382 			sk->sk_error_report(sk);
2383 		}
2384 	}
2385 
2386 	scm_recv(sock, msg, siocb->scm, flags);
2387 out:
2388 	netlink_rcv_wake(sk);
2389 	return err ? : copied;
2390 }
2391 
2392 static void netlink_data_ready(struct sock *sk, int len)
2393 {
2394 	BUG();
2395 }
2396 
2397 /*
2398  *	We export these functions to other modules. They provide a
2399  *	complete set of kernel non-blocking support for message
2400  *	queueing.
2401  */
2402 
2403 struct sock *
2404 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2405 			struct netlink_kernel_cfg *cfg)
2406 {
2407 	struct socket *sock;
2408 	struct sock *sk;
2409 	struct netlink_sock *nlk;
2410 	struct listeners *listeners = NULL;
2411 	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2412 	unsigned int groups;
2413 
2414 	BUG_ON(!nl_table);
2415 
2416 	if (unit < 0 || unit >= MAX_LINKS)
2417 		return NULL;
2418 
2419 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2420 		return NULL;
2421 
2422 	/*
2423 	 * We have to just have a reference on the net from sk, but don't
2424 	 * get_net it. Besides, we cannot get and then put the net here.
2425 	 * So we create one inside init_net and the move it to net.
2426 	 */
2427 
2428 	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2429 		goto out_sock_release_nosk;
2430 
2431 	sk = sock->sk;
2432 	sk_change_net(sk, net);
2433 
2434 	if (!cfg || cfg->groups < 32)
2435 		groups = 32;
2436 	else
2437 		groups = cfg->groups;
2438 
2439 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2440 	if (!listeners)
2441 		goto out_sock_release;
2442 
2443 	sk->sk_data_ready = netlink_data_ready;
2444 	if (cfg && cfg->input)
2445 		nlk_sk(sk)->netlink_rcv = cfg->input;
2446 
2447 	if (netlink_insert(sk, net, 0))
2448 		goto out_sock_release;
2449 
2450 	nlk = nlk_sk(sk);
2451 	nlk->flags |= NETLINK_KERNEL_SOCKET;
2452 
2453 	netlink_table_grab();
2454 	if (!nl_table[unit].registered) {
2455 		nl_table[unit].groups = groups;
2456 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
2457 		nl_table[unit].cb_mutex = cb_mutex;
2458 		nl_table[unit].module = module;
2459 		if (cfg) {
2460 			nl_table[unit].bind = cfg->bind;
2461 			nl_table[unit].flags = cfg->flags;
2462 			if (cfg->compare)
2463 				nl_table[unit].compare = cfg->compare;
2464 		}
2465 		nl_table[unit].registered = 1;
2466 	} else {
2467 		kfree(listeners);
2468 		nl_table[unit].registered++;
2469 	}
2470 	netlink_table_ungrab();
2471 	return sk;
2472 
2473 out_sock_release:
2474 	kfree(listeners);
2475 	netlink_kernel_release(sk);
2476 	return NULL;
2477 
2478 out_sock_release_nosk:
2479 	sock_release(sock);
2480 	return NULL;
2481 }
2482 EXPORT_SYMBOL(__netlink_kernel_create);
2483 
2484 void
2485 netlink_kernel_release(struct sock *sk)
2486 {
2487 	sk_release_kernel(sk);
2488 }
2489 EXPORT_SYMBOL(netlink_kernel_release);
2490 
2491 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2492 {
2493 	struct listeners *new, *old;
2494 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2495 
2496 	if (groups < 32)
2497 		groups = 32;
2498 
2499 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2500 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2501 		if (!new)
2502 			return -ENOMEM;
2503 		old = nl_deref_protected(tbl->listeners);
2504 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2505 		rcu_assign_pointer(tbl->listeners, new);
2506 
2507 		kfree_rcu(old, rcu);
2508 	}
2509 	tbl->groups = groups;
2510 
2511 	return 0;
2512 }
2513 
2514 /**
2515  * netlink_change_ngroups - change number of multicast groups
2516  *
2517  * This changes the number of multicast groups that are available
2518  * on a certain netlink family. Note that it is not possible to
2519  * change the number of groups to below 32. Also note that it does
2520  * not implicitly call netlink_clear_multicast_users() when the
2521  * number of groups is reduced.
2522  *
2523  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2524  * @groups: The new number of groups.
2525  */
2526 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2527 {
2528 	int err;
2529 
2530 	netlink_table_grab();
2531 	err = __netlink_change_ngroups(sk, groups);
2532 	netlink_table_ungrab();
2533 
2534 	return err;
2535 }
2536 
2537 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2538 {
2539 	struct sock *sk;
2540 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2541 
2542 	sk_for_each_bound(sk, &tbl->mc_list)
2543 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
2544 }
2545 
2546 struct nlmsghdr *
2547 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2548 {
2549 	struct nlmsghdr *nlh;
2550 	int size = nlmsg_msg_size(len);
2551 
2552 	nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2553 	nlh->nlmsg_type = type;
2554 	nlh->nlmsg_len = size;
2555 	nlh->nlmsg_flags = flags;
2556 	nlh->nlmsg_pid = portid;
2557 	nlh->nlmsg_seq = seq;
2558 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2559 		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2560 	return nlh;
2561 }
2562 EXPORT_SYMBOL(__nlmsg_put);
2563 
2564 /*
2565  * It looks a bit ugly.
2566  * It would be better to create kernel thread.
2567  */
2568 
2569 static int netlink_dump(struct sock *sk)
2570 {
2571 	struct netlink_sock *nlk = nlk_sk(sk);
2572 	struct netlink_callback *cb;
2573 	struct sk_buff *skb = NULL;
2574 	struct nlmsghdr *nlh;
2575 	int len, err = -ENOBUFS;
2576 	int alloc_size;
2577 
2578 	mutex_lock(nlk->cb_mutex);
2579 	if (!nlk->cb_running) {
2580 		err = -EINVAL;
2581 		goto errout_skb;
2582 	}
2583 
2584 	cb = &nlk->cb;
2585 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2586 
2587 	if (!netlink_rx_is_mmaped(sk) &&
2588 	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2589 		goto errout_skb;
2590 	skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
2591 	if (!skb)
2592 		goto errout_skb;
2593 	netlink_skb_set_owner_r(skb, sk);
2594 
2595 	len = cb->dump(skb, cb);
2596 
2597 	if (len > 0) {
2598 		mutex_unlock(nlk->cb_mutex);
2599 
2600 		if (sk_filter(sk, skb))
2601 			kfree_skb(skb);
2602 		else
2603 			__netlink_sendskb(sk, skb);
2604 		return 0;
2605 	}
2606 
2607 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2608 	if (!nlh)
2609 		goto errout_skb;
2610 
2611 	nl_dump_check_consistent(cb, nlh);
2612 
2613 	memcpy(nlmsg_data(nlh), &len, sizeof(len));
2614 
2615 	if (sk_filter(sk, skb))
2616 		kfree_skb(skb);
2617 	else
2618 		__netlink_sendskb(sk, skb);
2619 
2620 	if (cb->done)
2621 		cb->done(cb);
2622 
2623 	nlk->cb_running = false;
2624 	mutex_unlock(nlk->cb_mutex);
2625 	module_put(cb->module);
2626 	consume_skb(cb->skb);
2627 	return 0;
2628 
2629 errout_skb:
2630 	mutex_unlock(nlk->cb_mutex);
2631 	kfree_skb(skb);
2632 	return err;
2633 }
2634 
2635 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2636 			 const struct nlmsghdr *nlh,
2637 			 struct netlink_dump_control *control)
2638 {
2639 	struct netlink_callback *cb;
2640 	struct sock *sk;
2641 	struct netlink_sock *nlk;
2642 	int ret;
2643 
2644 	/* Memory mapped dump requests need to be copied to avoid looping
2645 	 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2646 	 * a reference to the skb.
2647 	 */
2648 	if (netlink_skb_is_mmaped(skb)) {
2649 		skb = skb_copy(skb, GFP_KERNEL);
2650 		if (skb == NULL)
2651 			return -ENOBUFS;
2652 	} else
2653 		atomic_inc(&skb->users);
2654 
2655 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2656 	if (sk == NULL) {
2657 		ret = -ECONNREFUSED;
2658 		goto error_free;
2659 	}
2660 
2661 	nlk = nlk_sk(sk);
2662 	mutex_lock(nlk->cb_mutex);
2663 	/* A dump is in progress... */
2664 	if (nlk->cb_running) {
2665 		ret = -EBUSY;
2666 		goto error_unlock;
2667 	}
2668 	/* add reference of module which cb->dump belongs to */
2669 	if (!try_module_get(control->module)) {
2670 		ret = -EPROTONOSUPPORT;
2671 		goto error_unlock;
2672 	}
2673 
2674 	cb = &nlk->cb;
2675 	memset(cb, 0, sizeof(*cb));
2676 	cb->dump = control->dump;
2677 	cb->done = control->done;
2678 	cb->nlh = nlh;
2679 	cb->data = control->data;
2680 	cb->module = control->module;
2681 	cb->min_dump_alloc = control->min_dump_alloc;
2682 	cb->skb = skb;
2683 
2684 	nlk->cb_running = true;
2685 
2686 	mutex_unlock(nlk->cb_mutex);
2687 
2688 	ret = netlink_dump(sk);
2689 	sock_put(sk);
2690 
2691 	if (ret)
2692 		return ret;
2693 
2694 	/* We successfully started a dump, by returning -EINTR we
2695 	 * signal not to send ACK even if it was requested.
2696 	 */
2697 	return -EINTR;
2698 
2699 error_unlock:
2700 	sock_put(sk);
2701 	mutex_unlock(nlk->cb_mutex);
2702 error_free:
2703 	kfree_skb(skb);
2704 	return ret;
2705 }
2706 EXPORT_SYMBOL(__netlink_dump_start);
2707 
2708 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2709 {
2710 	struct sk_buff *skb;
2711 	struct nlmsghdr *rep;
2712 	struct nlmsgerr *errmsg;
2713 	size_t payload = sizeof(*errmsg);
2714 
2715 	/* error messages get the original request appened */
2716 	if (err)
2717 		payload += nlmsg_len(nlh);
2718 
2719 	skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2720 				NETLINK_CB(in_skb).portid, GFP_KERNEL);
2721 	if (!skb) {
2722 		struct sock *sk;
2723 
2724 		sk = netlink_lookup(sock_net(in_skb->sk),
2725 				    in_skb->sk->sk_protocol,
2726 				    NETLINK_CB(in_skb).portid);
2727 		if (sk) {
2728 			sk->sk_err = ENOBUFS;
2729 			sk->sk_error_report(sk);
2730 			sock_put(sk);
2731 		}
2732 		return;
2733 	}
2734 
2735 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2736 			  NLMSG_ERROR, payload, 0);
2737 	errmsg = nlmsg_data(rep);
2738 	errmsg->error = err;
2739 	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2740 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2741 }
2742 EXPORT_SYMBOL(netlink_ack);
2743 
2744 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2745 						     struct nlmsghdr *))
2746 {
2747 	struct nlmsghdr *nlh;
2748 	int err;
2749 
2750 	while (skb->len >= nlmsg_total_size(0)) {
2751 		int msglen;
2752 
2753 		nlh = nlmsg_hdr(skb);
2754 		err = 0;
2755 
2756 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2757 			return 0;
2758 
2759 		/* Only requests are handled by the kernel */
2760 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2761 			goto ack;
2762 
2763 		/* Skip control messages */
2764 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2765 			goto ack;
2766 
2767 		err = cb(skb, nlh);
2768 		if (err == -EINTR)
2769 			goto skip;
2770 
2771 ack:
2772 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2773 			netlink_ack(skb, nlh, err);
2774 
2775 skip:
2776 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2777 		if (msglen > skb->len)
2778 			msglen = skb->len;
2779 		skb_pull(skb, msglen);
2780 	}
2781 
2782 	return 0;
2783 }
2784 EXPORT_SYMBOL(netlink_rcv_skb);
2785 
2786 /**
2787  * nlmsg_notify - send a notification netlink message
2788  * @sk: netlink socket to use
2789  * @skb: notification message
2790  * @portid: destination netlink portid for reports or 0
2791  * @group: destination multicast group or 0
2792  * @report: 1 to report back, 0 to disable
2793  * @flags: allocation flags
2794  */
2795 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2796 		 unsigned int group, int report, gfp_t flags)
2797 {
2798 	int err = 0;
2799 
2800 	if (group) {
2801 		int exclude_portid = 0;
2802 
2803 		if (report) {
2804 			atomic_inc(&skb->users);
2805 			exclude_portid = portid;
2806 		}
2807 
2808 		/* errors reported via destination sk->sk_err, but propagate
2809 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2810 		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2811 	}
2812 
2813 	if (report) {
2814 		int err2;
2815 
2816 		err2 = nlmsg_unicast(sk, skb, portid);
2817 		if (!err || err == -ESRCH)
2818 			err = err2;
2819 	}
2820 
2821 	return err;
2822 }
2823 EXPORT_SYMBOL(nlmsg_notify);
2824 
2825 #ifdef CONFIG_PROC_FS
2826 struct nl_seq_iter {
2827 	struct seq_net_private p;
2828 	int link;
2829 	int hash_idx;
2830 };
2831 
2832 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2833 {
2834 	struct nl_seq_iter *iter = seq->private;
2835 	int i, j;
2836 	struct sock *s;
2837 	loff_t off = 0;
2838 
2839 	for (i = 0; i < MAX_LINKS; i++) {
2840 		struct nl_portid_hash *hash = &nl_table[i].hash;
2841 
2842 		for (j = 0; j <= hash->mask; j++) {
2843 			sk_for_each(s, &hash->table[j]) {
2844 				if (sock_net(s) != seq_file_net(seq))
2845 					continue;
2846 				if (off == pos) {
2847 					iter->link = i;
2848 					iter->hash_idx = j;
2849 					return s;
2850 				}
2851 				++off;
2852 			}
2853 		}
2854 	}
2855 	return NULL;
2856 }
2857 
2858 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2859 	__acquires(nl_table_lock)
2860 {
2861 	read_lock(&nl_table_lock);
2862 	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2863 }
2864 
2865 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2866 {
2867 	struct sock *s;
2868 	struct nl_seq_iter *iter;
2869 	struct net *net;
2870 	int i, j;
2871 
2872 	++*pos;
2873 
2874 	if (v == SEQ_START_TOKEN)
2875 		return netlink_seq_socket_idx(seq, 0);
2876 
2877 	net = seq_file_net(seq);
2878 	iter = seq->private;
2879 	s = v;
2880 	do {
2881 		s = sk_next(s);
2882 	} while (s && !nl_table[s->sk_protocol].compare(net, s));
2883 	if (s)
2884 		return s;
2885 
2886 	i = iter->link;
2887 	j = iter->hash_idx + 1;
2888 
2889 	do {
2890 		struct nl_portid_hash *hash = &nl_table[i].hash;
2891 
2892 		for (; j <= hash->mask; j++) {
2893 			s = sk_head(&hash->table[j]);
2894 
2895 			while (s && !nl_table[s->sk_protocol].compare(net, s))
2896 				s = sk_next(s);
2897 			if (s) {
2898 				iter->link = i;
2899 				iter->hash_idx = j;
2900 				return s;
2901 			}
2902 		}
2903 
2904 		j = 0;
2905 	} while (++i < MAX_LINKS);
2906 
2907 	return NULL;
2908 }
2909 
2910 static void netlink_seq_stop(struct seq_file *seq, void *v)
2911 	__releases(nl_table_lock)
2912 {
2913 	read_unlock(&nl_table_lock);
2914 }
2915 
2916 
2917 static int netlink_seq_show(struct seq_file *seq, void *v)
2918 {
2919 	if (v == SEQ_START_TOKEN) {
2920 		seq_puts(seq,
2921 			 "sk       Eth Pid    Groups   "
2922 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2923 	} else {
2924 		struct sock *s = v;
2925 		struct netlink_sock *nlk = nlk_sk(s);
2926 
2927 		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2928 			   s,
2929 			   s->sk_protocol,
2930 			   nlk->portid,
2931 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2932 			   sk_rmem_alloc_get(s),
2933 			   sk_wmem_alloc_get(s),
2934 			   nlk->cb_running,
2935 			   atomic_read(&s->sk_refcnt),
2936 			   atomic_read(&s->sk_drops),
2937 			   sock_i_ino(s)
2938 			);
2939 
2940 	}
2941 	return 0;
2942 }
2943 
2944 static const struct seq_operations netlink_seq_ops = {
2945 	.start  = netlink_seq_start,
2946 	.next   = netlink_seq_next,
2947 	.stop   = netlink_seq_stop,
2948 	.show   = netlink_seq_show,
2949 };
2950 
2951 
2952 static int netlink_seq_open(struct inode *inode, struct file *file)
2953 {
2954 	return seq_open_net(inode, file, &netlink_seq_ops,
2955 				sizeof(struct nl_seq_iter));
2956 }
2957 
2958 static const struct file_operations netlink_seq_fops = {
2959 	.owner		= THIS_MODULE,
2960 	.open		= netlink_seq_open,
2961 	.read		= seq_read,
2962 	.llseek		= seq_lseek,
2963 	.release	= seq_release_net,
2964 };
2965 
2966 #endif
2967 
2968 int netlink_register_notifier(struct notifier_block *nb)
2969 {
2970 	return atomic_notifier_chain_register(&netlink_chain, nb);
2971 }
2972 EXPORT_SYMBOL(netlink_register_notifier);
2973 
2974 int netlink_unregister_notifier(struct notifier_block *nb)
2975 {
2976 	return atomic_notifier_chain_unregister(&netlink_chain, nb);
2977 }
2978 EXPORT_SYMBOL(netlink_unregister_notifier);
2979 
2980 static const struct proto_ops netlink_ops = {
2981 	.family =	PF_NETLINK,
2982 	.owner =	THIS_MODULE,
2983 	.release =	netlink_release,
2984 	.bind =		netlink_bind,
2985 	.connect =	netlink_connect,
2986 	.socketpair =	sock_no_socketpair,
2987 	.accept =	sock_no_accept,
2988 	.getname =	netlink_getname,
2989 	.poll =		netlink_poll,
2990 	.ioctl =	sock_no_ioctl,
2991 	.listen =	sock_no_listen,
2992 	.shutdown =	sock_no_shutdown,
2993 	.setsockopt =	netlink_setsockopt,
2994 	.getsockopt =	netlink_getsockopt,
2995 	.sendmsg =	netlink_sendmsg,
2996 	.recvmsg =	netlink_recvmsg,
2997 	.mmap =		netlink_mmap,
2998 	.sendpage =	sock_no_sendpage,
2999 };
3000 
3001 static const struct net_proto_family netlink_family_ops = {
3002 	.family = PF_NETLINK,
3003 	.create = netlink_create,
3004 	.owner	= THIS_MODULE,	/* for consistency 8) */
3005 };
3006 
3007 static int __net_init netlink_net_init(struct net *net)
3008 {
3009 #ifdef CONFIG_PROC_FS
3010 	if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3011 		return -ENOMEM;
3012 #endif
3013 	return 0;
3014 }
3015 
3016 static void __net_exit netlink_net_exit(struct net *net)
3017 {
3018 #ifdef CONFIG_PROC_FS
3019 	remove_proc_entry("netlink", net->proc_net);
3020 #endif
3021 }
3022 
3023 static void __init netlink_add_usersock_entry(void)
3024 {
3025 	struct listeners *listeners;
3026 	int groups = 32;
3027 
3028 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3029 	if (!listeners)
3030 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3031 
3032 	netlink_table_grab();
3033 
3034 	nl_table[NETLINK_USERSOCK].groups = groups;
3035 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3036 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3037 	nl_table[NETLINK_USERSOCK].registered = 1;
3038 	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3039 
3040 	netlink_table_ungrab();
3041 }
3042 
3043 static struct pernet_operations __net_initdata netlink_net_ops = {
3044 	.init = netlink_net_init,
3045 	.exit = netlink_net_exit,
3046 };
3047 
3048 static int __init netlink_proto_init(void)
3049 {
3050 	int i;
3051 	unsigned long limit;
3052 	unsigned int order;
3053 	int err = proto_register(&netlink_proto, 0);
3054 
3055 	if (err != 0)
3056 		goto out;
3057 
3058 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3059 
3060 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3061 	if (!nl_table)
3062 		goto panic;
3063 
3064 	if (totalram_pages >= (128 * 1024))
3065 		limit = totalram_pages >> (21 - PAGE_SHIFT);
3066 	else
3067 		limit = totalram_pages >> (23 - PAGE_SHIFT);
3068 
3069 	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
3070 	limit = (1UL << order) / sizeof(struct hlist_head);
3071 	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
3072 
3073 	for (i = 0; i < MAX_LINKS; i++) {
3074 		struct nl_portid_hash *hash = &nl_table[i].hash;
3075 
3076 		hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
3077 		if (!hash->table) {
3078 			while (i-- > 0)
3079 				nl_portid_hash_free(nl_table[i].hash.table,
3080 						 1 * sizeof(*hash->table));
3081 			kfree(nl_table);
3082 			goto panic;
3083 		}
3084 		hash->max_shift = order;
3085 		hash->shift = 0;
3086 		hash->mask = 0;
3087 		hash->rehash_time = jiffies;
3088 
3089 		nl_table[i].compare = netlink_compare;
3090 	}
3091 
3092 	INIT_LIST_HEAD(&netlink_tap_all);
3093 
3094 	netlink_add_usersock_entry();
3095 
3096 	sock_register(&netlink_family_ops);
3097 	register_pernet_subsys(&netlink_net_ops);
3098 	/* The netlink device handler may be needed early. */
3099 	rtnetlink_init();
3100 out:
3101 	return err;
3102 panic:
3103 	panic("netlink_init: Cannot allocate nl_table\n");
3104 }
3105 
3106 core_initcall(netlink_proto_init);
3107