xref: /openbmc/linux/net/packet/af_packet.c (revision 93df8a1e)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PACKET - implements raw packet sockets.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *		Alan Cox	:	verify_area() now used correctly
14  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
15  *		Alan Cox	:	tidied skbuff lists.
16  *		Alan Cox	:	Now uses generic datagram routines I
17  *					added. Also fixed the peek/read crash
18  *					from all old Linux datagram code.
19  *		Alan Cox	:	Uses the improved datagram code.
20  *		Alan Cox	:	Added NULL's for socket options.
21  *		Alan Cox	:	Re-commented the code.
22  *		Alan Cox	:	Use new kernel side addressing
23  *		Rob Janssen	:	Correct MTU usage.
24  *		Dave Platt	:	Counter leaks caused by incorrect
25  *					interrupt locking and some slightly
26  *					dubious gcc output. Can you read
27  *					compiler: it said _VOLATILE_
28  *	Richard Kooijman	:	Timestamp fixes.
29  *		Alan Cox	:	New buffers. Use sk->mac.raw.
30  *		Alan Cox	:	sendmsg/recvmsg support.
31  *		Alan Cox	:	Protocol setting support
32  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
33  *	Cyrus Durgin		:	Fixed kerneld for kmod.
34  *	Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
38  *					The convention is that longer addresses
39  *					will simply extend the hardware address
40  *					byte arrays at the end of sockaddr_ll
41  *					and packet_mreq.
42  *		Johann Baudy	:	Added TX RING.
43  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
44  *					layer.
45  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *		This program is free software; you can redistribute it and/or
49  *		modify it under the terms of the GNU General Public License
50  *		as published by the Free Software Foundation; either version
51  *		2 of the License, or (at your option) any later version.
52  *
53  */
54 
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 
96 #include "internal.h"
97 
98 /*
99    Assumptions:
100    - if device has no dev->hard_header routine, it adds and removes ll header
101      inside itself. In this case ll header is invisible outside of device,
102      but higher levels still should reserve dev->hard_header_len.
103      Some devices are enough clever to reallocate skb, when header
104      will not fit to reserved space (tunnel), another ones are silly
105      (PPP).
106    - packet socket receives packets with pulled ll header,
107      so that SOCK_RAW should push it back.
108 
109 On receive:
110 -----------
111 
112 Incoming, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> data
115 
116 Outgoing, dev->hard_header!=NULL
117    mac_header -> ll header
118    data       -> ll header
119 
120 Incoming, dev->hard_header==NULL
121    mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 		 header.  PPP makes it, that is wrong, because introduce
123 		 assymetry between rx and tx paths.
124    data       -> data
125 
126 Outgoing, dev->hard_header==NULL
127    mac_header -> data. ll header is still not built!
128    data       -> data
129 
130 Resume
131   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 
133 
134 On transmit:
135 ------------
136 
137 dev->hard_header != NULL
138    mac_header -> ll header
139    data       -> ll header
140 
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
142    mac_header -> data
143    data       -> data
144 
145    We should set nh.raw on output to correct posistion,
146    packet classifier depends on it.
147  */
148 
149 /* Private packet socket structures. */
150 
151 /* identical to struct packet_mreq except it has
152  * a longer address field.
153  */
154 struct packet_mreq_max {
155 	int		mr_ifindex;
156 	unsigned short	mr_type;
157 	unsigned short	mr_alen;
158 	unsigned char	mr_address[MAX_ADDR_LEN];
159 };
160 
161 union tpacket_uhdr {
162 	struct tpacket_hdr  *h1;
163 	struct tpacket2_hdr *h2;
164 	struct tpacket3_hdr *h3;
165 	void *raw;
166 };
167 
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 		int closing, int tx_ring);
170 
171 #define V3_ALIGNMENT	(8)
172 
173 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174 
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177 
178 #define PGV_FROM_VMALLOC 1
179 
180 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
186 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187 
188 struct packet_sock;
189 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
190 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 		       struct packet_type *pt, struct net_device *orig_dev);
192 
193 static void *packet_previous_frame(struct packet_sock *po,
194 		struct packet_ring_buffer *rb,
195 		int status);
196 static void packet_increment_head(struct packet_ring_buffer *buff);
197 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 			struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 			struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 		struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 		struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(unsigned long);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_init_blk_timer(struct packet_sock *,
209 		struct tpacket_kbdq_core *,
210 		void (*func) (unsigned long));
211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 		struct tpacket3_hdr *);
214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 		struct tpacket3_hdr *);
216 static void packet_flush_mclist(struct sock *sk);
217 
218 struct packet_skb_cb {
219 	union {
220 		struct sockaddr_pkt pkt;
221 		union {
222 			/* Trick: alias skb original length with
223 			 * ll.sll_family and ll.protocol in order
224 			 * to save room.
225 			 */
226 			unsigned int origlen;
227 			struct sockaddr_ll ll;
228 		};
229 	} sa;
230 };
231 
232 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
233 
234 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid)	\
236 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
238 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 	((x)->kactive_blk_num+1) : 0)
242 
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245 
246 static int packet_direct_xmit(struct sk_buff *skb)
247 {
248 	struct net_device *dev = skb->dev;
249 	netdev_features_t features;
250 	struct netdev_queue *txq;
251 	int ret = NETDEV_TX_BUSY;
252 
253 	if (unlikely(!netif_running(dev) ||
254 		     !netif_carrier_ok(dev)))
255 		goto drop;
256 
257 	features = netif_skb_features(skb);
258 	if (skb_needs_linearize(skb, features) &&
259 	    __skb_linearize(skb))
260 		goto drop;
261 
262 	txq = skb_get_tx_queue(dev, skb);
263 
264 	local_bh_disable();
265 
266 	HARD_TX_LOCK(dev, txq, smp_processor_id());
267 	if (!netif_xmit_frozen_or_drv_stopped(txq))
268 		ret = netdev_start_xmit(skb, dev, txq, false);
269 	HARD_TX_UNLOCK(dev, txq);
270 
271 	local_bh_enable();
272 
273 	if (!dev_xmit_complete(ret))
274 		kfree_skb(skb);
275 
276 	return ret;
277 drop:
278 	atomic_long_inc(&dev->tx_dropped);
279 	kfree_skb(skb);
280 	return NET_XMIT_DROP;
281 }
282 
283 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284 {
285 	struct net_device *dev;
286 
287 	rcu_read_lock();
288 	dev = rcu_dereference(po->cached_dev);
289 	if (likely(dev))
290 		dev_hold(dev);
291 	rcu_read_unlock();
292 
293 	return dev;
294 }
295 
296 static void packet_cached_dev_assign(struct packet_sock *po,
297 				     struct net_device *dev)
298 {
299 	rcu_assign_pointer(po->cached_dev, dev);
300 }
301 
302 static void packet_cached_dev_reset(struct packet_sock *po)
303 {
304 	RCU_INIT_POINTER(po->cached_dev, NULL);
305 }
306 
307 static bool packet_use_direct_xmit(const struct packet_sock *po)
308 {
309 	return po->xmit == packet_direct_xmit;
310 }
311 
312 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
313 {
314 	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
315 }
316 
317 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318 {
319 	const struct net_device_ops *ops = dev->netdev_ops;
320 	u16 queue_index;
321 
322 	if (ops->ndo_select_queue) {
323 		queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 						    __packet_pick_tx_queue);
325 		queue_index = netdev_cap_txqueue(dev, queue_index);
326 	} else {
327 		queue_index = __packet_pick_tx_queue(dev, skb);
328 	}
329 
330 	skb_set_queue_mapping(skb, queue_index);
331 }
332 
333 /* register_prot_hook must be invoked with the po->bind_lock held,
334  * or from a context in which asynchronous accesses to the packet
335  * socket is not possible (packet_create()).
336  */
337 static void register_prot_hook(struct sock *sk)
338 {
339 	struct packet_sock *po = pkt_sk(sk);
340 
341 	if (!po->running) {
342 		if (po->fanout)
343 			__fanout_link(sk, po);
344 		else
345 			dev_add_pack(&po->prot_hook);
346 
347 		sock_hold(sk);
348 		po->running = 1;
349 	}
350 }
351 
352 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353  * held.   If the sync parameter is true, we will temporarily drop
354  * the po->bind_lock and do a synchronize_net to make sure no
355  * asynchronous packet processing paths still refer to the elements
356  * of po->prot_hook.  If the sync parameter is false, it is the
357  * callers responsibility to take care of this.
358  */
359 static void __unregister_prot_hook(struct sock *sk, bool sync)
360 {
361 	struct packet_sock *po = pkt_sk(sk);
362 
363 	po->running = 0;
364 
365 	if (po->fanout)
366 		__fanout_unlink(sk, po);
367 	else
368 		__dev_remove_pack(&po->prot_hook);
369 
370 	__sock_put(sk);
371 
372 	if (sync) {
373 		spin_unlock(&po->bind_lock);
374 		synchronize_net();
375 		spin_lock(&po->bind_lock);
376 	}
377 }
378 
379 static void unregister_prot_hook(struct sock *sk, bool sync)
380 {
381 	struct packet_sock *po = pkt_sk(sk);
382 
383 	if (po->running)
384 		__unregister_prot_hook(sk, sync);
385 }
386 
387 static inline struct page * __pure pgv_to_page(void *addr)
388 {
389 	if (is_vmalloc_addr(addr))
390 		return vmalloc_to_page(addr);
391 	return virt_to_page(addr);
392 }
393 
394 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
395 {
396 	union tpacket_uhdr h;
397 
398 	h.raw = frame;
399 	switch (po->tp_version) {
400 	case TPACKET_V1:
401 		h.h1->tp_status = status;
402 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
403 		break;
404 	case TPACKET_V2:
405 		h.h2->tp_status = status;
406 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
407 		break;
408 	case TPACKET_V3:
409 	default:
410 		WARN(1, "TPACKET version not supported.\n");
411 		BUG();
412 	}
413 
414 	smp_wmb();
415 }
416 
417 static int __packet_get_status(struct packet_sock *po, void *frame)
418 {
419 	union tpacket_uhdr h;
420 
421 	smp_rmb();
422 
423 	h.raw = frame;
424 	switch (po->tp_version) {
425 	case TPACKET_V1:
426 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
427 		return h.h1->tp_status;
428 	case TPACKET_V2:
429 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
430 		return h.h2->tp_status;
431 	case TPACKET_V3:
432 	default:
433 		WARN(1, "TPACKET version not supported.\n");
434 		BUG();
435 		return 0;
436 	}
437 }
438 
439 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 				   unsigned int flags)
441 {
442 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443 
444 	if (shhwtstamps &&
445 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 		return TP_STATUS_TS_RAW_HARDWARE;
448 
449 	if (ktime_to_timespec_cond(skb->tstamp, ts))
450 		return TP_STATUS_TS_SOFTWARE;
451 
452 	return 0;
453 }
454 
455 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 				    struct sk_buff *skb)
457 {
458 	union tpacket_uhdr h;
459 	struct timespec ts;
460 	__u32 ts_status;
461 
462 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 		return 0;
464 
465 	h.raw = frame;
466 	switch (po->tp_version) {
467 	case TPACKET_V1:
468 		h.h1->tp_sec = ts.tv_sec;
469 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 		break;
471 	case TPACKET_V2:
472 		h.h2->tp_sec = ts.tv_sec;
473 		h.h2->tp_nsec = ts.tv_nsec;
474 		break;
475 	case TPACKET_V3:
476 	default:
477 		WARN(1, "TPACKET version not supported.\n");
478 		BUG();
479 	}
480 
481 	/* one flush is safe, as both fields always lie on the same cacheline */
482 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 	smp_wmb();
484 
485 	return ts_status;
486 }
487 
488 static void *packet_lookup_frame(struct packet_sock *po,
489 		struct packet_ring_buffer *rb,
490 		unsigned int position,
491 		int status)
492 {
493 	unsigned int pg_vec_pos, frame_offset;
494 	union tpacket_uhdr h;
495 
496 	pg_vec_pos = position / rb->frames_per_block;
497 	frame_offset = position % rb->frames_per_block;
498 
499 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 		(frame_offset * rb->frame_size);
501 
502 	if (status != __packet_get_status(po, h.raw))
503 		return NULL;
504 
505 	return h.raw;
506 }
507 
508 static void *packet_current_frame(struct packet_sock *po,
509 		struct packet_ring_buffer *rb,
510 		int status)
511 {
512 	return packet_lookup_frame(po, rb, rb->head, status);
513 }
514 
515 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
516 {
517 	del_timer_sync(&pkc->retire_blk_timer);
518 }
519 
520 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 		int tx_ring,
522 		struct sk_buff_head *rb_queue)
523 {
524 	struct tpacket_kbdq_core *pkc;
525 
526 	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 			GET_PBDQC_FROM_RB(&po->rx_ring);
528 
529 	spin_lock_bh(&rb_queue->lock);
530 	pkc->delete_blk_timer = 1;
531 	spin_unlock_bh(&rb_queue->lock);
532 
533 	prb_del_retire_blk_timer(pkc);
534 }
535 
536 static void prb_init_blk_timer(struct packet_sock *po,
537 		struct tpacket_kbdq_core *pkc,
538 		void (*func) (unsigned long))
539 {
540 	init_timer(&pkc->retire_blk_timer);
541 	pkc->retire_blk_timer.data = (long)po;
542 	pkc->retire_blk_timer.function = func;
543 	pkc->retire_blk_timer.expires = jiffies;
544 }
545 
546 static void prb_setup_retire_blk_timer(struct packet_sock *po)
547 {
548 	struct tpacket_kbdq_core *pkc;
549 
550 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
551 	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
552 }
553 
554 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
555 				int blk_size_in_bytes)
556 {
557 	struct net_device *dev;
558 	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
559 	struct ethtool_cmd ecmd;
560 	int err;
561 	u32 speed;
562 
563 	rtnl_lock();
564 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
565 	if (unlikely(!dev)) {
566 		rtnl_unlock();
567 		return DEFAULT_PRB_RETIRE_TOV;
568 	}
569 	err = __ethtool_get_settings(dev, &ecmd);
570 	speed = ethtool_cmd_speed(&ecmd);
571 	rtnl_unlock();
572 	if (!err) {
573 		/*
574 		 * If the link speed is so slow you don't really
575 		 * need to worry about perf anyways
576 		 */
577 		if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
578 			return DEFAULT_PRB_RETIRE_TOV;
579 		} else {
580 			msec = 1;
581 			div = speed / 1000;
582 		}
583 	}
584 
585 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
586 
587 	if (div)
588 		mbits /= div;
589 
590 	tmo = mbits * msec;
591 
592 	if (div)
593 		return tmo+1;
594 	return tmo;
595 }
596 
597 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
598 			union tpacket_req_u *req_u)
599 {
600 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
601 }
602 
603 static void init_prb_bdqc(struct packet_sock *po,
604 			struct packet_ring_buffer *rb,
605 			struct pgv *pg_vec,
606 			union tpacket_req_u *req_u)
607 {
608 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
609 	struct tpacket_block_desc *pbd;
610 
611 	memset(p1, 0x0, sizeof(*p1));
612 
613 	p1->knxt_seq_num = 1;
614 	p1->pkbdq = pg_vec;
615 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
616 	p1->pkblk_start	= pg_vec[0].buffer;
617 	p1->kblk_size = req_u->req3.tp_block_size;
618 	p1->knum_blocks	= req_u->req3.tp_block_nr;
619 	p1->hdrlen = po->tp_hdrlen;
620 	p1->version = po->tp_version;
621 	p1->last_kactive_blk_num = 0;
622 	po->stats.stats3.tp_freeze_q_cnt = 0;
623 	if (req_u->req3.tp_retire_blk_tov)
624 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
625 	else
626 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
627 						req_u->req3.tp_block_size);
628 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
629 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
630 
631 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
632 	prb_init_ft_ops(p1, req_u);
633 	prb_setup_retire_blk_timer(po);
634 	prb_open_block(p1, pbd);
635 }
636 
637 /*  Do NOT update the last_blk_num first.
638  *  Assumes sk_buff_head lock is held.
639  */
640 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
641 {
642 	mod_timer(&pkc->retire_blk_timer,
643 			jiffies + pkc->tov_in_jiffies);
644 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
645 }
646 
647 /*
648  * Timer logic:
649  * 1) We refresh the timer only when we open a block.
650  *    By doing this we don't waste cycles refreshing the timer
651  *	  on packet-by-packet basis.
652  *
653  * With a 1MB block-size, on a 1Gbps line, it will take
654  * i) ~8 ms to fill a block + ii) memcpy etc.
655  * In this cut we are not accounting for the memcpy time.
656  *
657  * So, if the user sets the 'tmo' to 10ms then the timer
658  * will never fire while the block is still getting filled
659  * (which is what we want). However, the user could choose
660  * to close a block early and that's fine.
661  *
662  * But when the timer does fire, we check whether or not to refresh it.
663  * Since the tmo granularity is in msecs, it is not too expensive
664  * to refresh the timer, lets say every '8' msecs.
665  * Either the user can set the 'tmo' or we can derive it based on
666  * a) line-speed and b) block-size.
667  * prb_calc_retire_blk_tmo() calculates the tmo.
668  *
669  */
670 static void prb_retire_rx_blk_timer_expired(unsigned long data)
671 {
672 	struct packet_sock *po = (struct packet_sock *)data;
673 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
674 	unsigned int frozen;
675 	struct tpacket_block_desc *pbd;
676 
677 	spin_lock(&po->sk.sk_receive_queue.lock);
678 
679 	frozen = prb_queue_frozen(pkc);
680 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
681 
682 	if (unlikely(pkc->delete_blk_timer))
683 		goto out;
684 
685 	/* We only need to plug the race when the block is partially filled.
686 	 * tpacket_rcv:
687 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
688 	 *		copy_bits() is in progress ...
689 	 *		timer fires on other cpu:
690 	 *		we can't retire the current block because copy_bits
691 	 *		is in progress.
692 	 *
693 	 */
694 	if (BLOCK_NUM_PKTS(pbd)) {
695 		while (atomic_read(&pkc->blk_fill_in_prog)) {
696 			/* Waiting for skb_copy_bits to finish... */
697 			cpu_relax();
698 		}
699 	}
700 
701 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
702 		if (!frozen) {
703 			if (!BLOCK_NUM_PKTS(pbd)) {
704 				/* An empty block. Just refresh the timer. */
705 				goto refresh_timer;
706 			}
707 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
708 			if (!prb_dispatch_next_block(pkc, po))
709 				goto refresh_timer;
710 			else
711 				goto out;
712 		} else {
713 			/* Case 1. Queue was frozen because user-space was
714 			 *	   lagging behind.
715 			 */
716 			if (prb_curr_blk_in_use(pkc, pbd)) {
717 				/*
718 				 * Ok, user-space is still behind.
719 				 * So just refresh the timer.
720 				 */
721 				goto refresh_timer;
722 			} else {
723 			       /* Case 2. queue was frozen,user-space caught up,
724 				* now the link went idle && the timer fired.
725 				* We don't have a block to close.So we open this
726 				* block and restart the timer.
727 				* opening a block thaws the queue,restarts timer
728 				* Thawing/timer-refresh is a side effect.
729 				*/
730 				prb_open_block(pkc, pbd);
731 				goto out;
732 			}
733 		}
734 	}
735 
736 refresh_timer:
737 	_prb_refresh_rx_retire_blk_timer(pkc);
738 
739 out:
740 	spin_unlock(&po->sk.sk_receive_queue.lock);
741 }
742 
743 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
744 		struct tpacket_block_desc *pbd1, __u32 status)
745 {
746 	/* Flush everything minus the block header */
747 
748 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
749 	u8 *start, *end;
750 
751 	start = (u8 *)pbd1;
752 
753 	/* Skip the block header(we know header WILL fit in 4K) */
754 	start += PAGE_SIZE;
755 
756 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
757 	for (; start < end; start += PAGE_SIZE)
758 		flush_dcache_page(pgv_to_page(start));
759 
760 	smp_wmb();
761 #endif
762 
763 	/* Now update the block status. */
764 
765 	BLOCK_STATUS(pbd1) = status;
766 
767 	/* Flush the block header */
768 
769 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
770 	start = (u8 *)pbd1;
771 	flush_dcache_page(pgv_to_page(start));
772 
773 	smp_wmb();
774 #endif
775 }
776 
777 /*
778  * Side effect:
779  *
780  * 1) flush the block
781  * 2) Increment active_blk_num
782  *
783  * Note:We DONT refresh the timer on purpose.
784  *	Because almost always the next block will be opened.
785  */
786 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
787 		struct tpacket_block_desc *pbd1,
788 		struct packet_sock *po, unsigned int stat)
789 {
790 	__u32 status = TP_STATUS_USER | stat;
791 
792 	struct tpacket3_hdr *last_pkt;
793 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
794 	struct sock *sk = &po->sk;
795 
796 	if (po->stats.stats3.tp_drops)
797 		status |= TP_STATUS_LOSING;
798 
799 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
800 	last_pkt->tp_next_offset = 0;
801 
802 	/* Get the ts of the last pkt */
803 	if (BLOCK_NUM_PKTS(pbd1)) {
804 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
805 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
806 	} else {
807 		/* Ok, we tmo'd - so get the current time.
808 		 *
809 		 * It shouldn't really happen as we don't close empty
810 		 * blocks. See prb_retire_rx_blk_timer_expired().
811 		 */
812 		struct timespec ts;
813 		getnstimeofday(&ts);
814 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
815 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
816 	}
817 
818 	smp_wmb();
819 
820 	/* Flush the block */
821 	prb_flush_block(pkc1, pbd1, status);
822 
823 	sk->sk_data_ready(sk);
824 
825 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
826 }
827 
828 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
829 {
830 	pkc->reset_pending_on_curr_blk = 0;
831 }
832 
833 /*
834  * Side effect of opening a block:
835  *
836  * 1) prb_queue is thawed.
837  * 2) retire_blk_timer is refreshed.
838  *
839  */
840 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
841 	struct tpacket_block_desc *pbd1)
842 {
843 	struct timespec ts;
844 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
845 
846 	smp_rmb();
847 
848 	/* We could have just memset this but we will lose the
849 	 * flexibility of making the priv area sticky
850 	 */
851 
852 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
853 	BLOCK_NUM_PKTS(pbd1) = 0;
854 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
855 
856 	getnstimeofday(&ts);
857 
858 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
859 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
860 
861 	pkc1->pkblk_start = (char *)pbd1;
862 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
863 
864 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
865 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
866 
867 	pbd1->version = pkc1->version;
868 	pkc1->prev = pkc1->nxt_offset;
869 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
870 
871 	prb_thaw_queue(pkc1);
872 	_prb_refresh_rx_retire_blk_timer(pkc1);
873 
874 	smp_wmb();
875 }
876 
877 /*
878  * Queue freeze logic:
879  * 1) Assume tp_block_nr = 8 blocks.
880  * 2) At time 't0', user opens Rx ring.
881  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
882  * 4) user-space is either sleeping or processing block '0'.
883  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
884  *    it will close block-7,loop around and try to fill block '0'.
885  *    call-flow:
886  *    __packet_lookup_frame_in_block
887  *      prb_retire_current_block()
888  *      prb_dispatch_next_block()
889  *        |->(BLOCK_STATUS == USER) evaluates to true
890  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
891  * 6) Now there are two cases:
892  *    6.1) Link goes idle right after the queue is frozen.
893  *         But remember, the last open_block() refreshed the timer.
894  *         When this timer expires,it will refresh itself so that we can
895  *         re-open block-0 in near future.
896  *    6.2) Link is busy and keeps on receiving packets. This is a simple
897  *         case and __packet_lookup_frame_in_block will check if block-0
898  *         is free and can now be re-used.
899  */
900 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
901 				  struct packet_sock *po)
902 {
903 	pkc->reset_pending_on_curr_blk = 1;
904 	po->stats.stats3.tp_freeze_q_cnt++;
905 }
906 
907 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
908 
909 /*
910  * If the next block is free then we will dispatch it
911  * and return a good offset.
912  * Else, we will freeze the queue.
913  * So, caller must check the return value.
914  */
915 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
916 		struct packet_sock *po)
917 {
918 	struct tpacket_block_desc *pbd;
919 
920 	smp_rmb();
921 
922 	/* 1. Get current block num */
923 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
924 
925 	/* 2. If this block is currently in_use then freeze the queue */
926 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
927 		prb_freeze_queue(pkc, po);
928 		return NULL;
929 	}
930 
931 	/*
932 	 * 3.
933 	 * open this block and return the offset where the first packet
934 	 * needs to get stored.
935 	 */
936 	prb_open_block(pkc, pbd);
937 	return (void *)pkc->nxt_offset;
938 }
939 
940 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
941 		struct packet_sock *po, unsigned int status)
942 {
943 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
944 
945 	/* retire/close the current block */
946 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
947 		/*
948 		 * Plug the case where copy_bits() is in progress on
949 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
950 		 * have space to copy the pkt in the current block and
951 		 * called prb_retire_current_block()
952 		 *
953 		 * We don't need to worry about the TMO case because
954 		 * the timer-handler already handled this case.
955 		 */
956 		if (!(status & TP_STATUS_BLK_TMO)) {
957 			while (atomic_read(&pkc->blk_fill_in_prog)) {
958 				/* Waiting for skb_copy_bits to finish... */
959 				cpu_relax();
960 			}
961 		}
962 		prb_close_block(pkc, pbd, po, status);
963 		return;
964 	}
965 }
966 
967 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
968 				      struct tpacket_block_desc *pbd)
969 {
970 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
971 }
972 
973 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
974 {
975 	return pkc->reset_pending_on_curr_blk;
976 }
977 
978 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
979 {
980 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
981 	atomic_dec(&pkc->blk_fill_in_prog);
982 }
983 
984 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
985 			struct tpacket3_hdr *ppd)
986 {
987 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
988 }
989 
990 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
991 			struct tpacket3_hdr *ppd)
992 {
993 	ppd->hv1.tp_rxhash = 0;
994 }
995 
996 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
997 			struct tpacket3_hdr *ppd)
998 {
999 	if (skb_vlan_tag_present(pkc->skb)) {
1000 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1001 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1002 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1003 	} else {
1004 		ppd->hv1.tp_vlan_tci = 0;
1005 		ppd->hv1.tp_vlan_tpid = 0;
1006 		ppd->tp_status = TP_STATUS_AVAILABLE;
1007 	}
1008 }
1009 
1010 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1011 			struct tpacket3_hdr *ppd)
1012 {
1013 	ppd->hv1.tp_padding = 0;
1014 	prb_fill_vlan_info(pkc, ppd);
1015 
1016 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1017 		prb_fill_rxhash(pkc, ppd);
1018 	else
1019 		prb_clear_rxhash(pkc, ppd);
1020 }
1021 
1022 static void prb_fill_curr_block(char *curr,
1023 				struct tpacket_kbdq_core *pkc,
1024 				struct tpacket_block_desc *pbd,
1025 				unsigned int len)
1026 {
1027 	struct tpacket3_hdr *ppd;
1028 
1029 	ppd  = (struct tpacket3_hdr *)curr;
1030 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1031 	pkc->prev = curr;
1032 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1033 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034 	BLOCK_NUM_PKTS(pbd) += 1;
1035 	atomic_inc(&pkc->blk_fill_in_prog);
1036 	prb_run_all_ft_ops(pkc, ppd);
1037 }
1038 
1039 /* Assumes caller has the sk->rx_queue.lock */
1040 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1041 					    struct sk_buff *skb,
1042 						int status,
1043 					    unsigned int len
1044 					    )
1045 {
1046 	struct tpacket_kbdq_core *pkc;
1047 	struct tpacket_block_desc *pbd;
1048 	char *curr, *end;
1049 
1050 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1051 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1052 
1053 	/* Queue is frozen when user space is lagging behind */
1054 	if (prb_queue_frozen(pkc)) {
1055 		/*
1056 		 * Check if that last block which caused the queue to freeze,
1057 		 * is still in_use by user-space.
1058 		 */
1059 		if (prb_curr_blk_in_use(pkc, pbd)) {
1060 			/* Can't record this packet */
1061 			return NULL;
1062 		} else {
1063 			/*
1064 			 * Ok, the block was released by user-space.
1065 			 * Now let's open that block.
1066 			 * opening a block also thaws the queue.
1067 			 * Thawing is a side effect.
1068 			 */
1069 			prb_open_block(pkc, pbd);
1070 		}
1071 	}
1072 
1073 	smp_mb();
1074 	curr = pkc->nxt_offset;
1075 	pkc->skb = skb;
1076 	end = (char *)pbd + pkc->kblk_size;
1077 
1078 	/* first try the current block */
1079 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1080 		prb_fill_curr_block(curr, pkc, pbd, len);
1081 		return (void *)curr;
1082 	}
1083 
1084 	/* Ok, close the current block */
1085 	prb_retire_current_block(pkc, po, 0);
1086 
1087 	/* Now, try to dispatch the next block */
1088 	curr = (char *)prb_dispatch_next_block(pkc, po);
1089 	if (curr) {
1090 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1091 		prb_fill_curr_block(curr, pkc, pbd, len);
1092 		return (void *)curr;
1093 	}
1094 
1095 	/*
1096 	 * No free blocks are available.user_space hasn't caught up yet.
1097 	 * Queue was just frozen and now this packet will get dropped.
1098 	 */
1099 	return NULL;
1100 }
1101 
1102 static void *packet_current_rx_frame(struct packet_sock *po,
1103 					    struct sk_buff *skb,
1104 					    int status, unsigned int len)
1105 {
1106 	char *curr = NULL;
1107 	switch (po->tp_version) {
1108 	case TPACKET_V1:
1109 	case TPACKET_V2:
1110 		curr = packet_lookup_frame(po, &po->rx_ring,
1111 					po->rx_ring.head, status);
1112 		return curr;
1113 	case TPACKET_V3:
1114 		return __packet_lookup_frame_in_block(po, skb, status, len);
1115 	default:
1116 		WARN(1, "TPACKET version not supported\n");
1117 		BUG();
1118 		return NULL;
1119 	}
1120 }
1121 
1122 static void *prb_lookup_block(struct packet_sock *po,
1123 				     struct packet_ring_buffer *rb,
1124 				     unsigned int idx,
1125 				     int status)
1126 {
1127 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1128 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1129 
1130 	if (status != BLOCK_STATUS(pbd))
1131 		return NULL;
1132 	return pbd;
1133 }
1134 
1135 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1136 {
1137 	unsigned int prev;
1138 	if (rb->prb_bdqc.kactive_blk_num)
1139 		prev = rb->prb_bdqc.kactive_blk_num-1;
1140 	else
1141 		prev = rb->prb_bdqc.knum_blocks-1;
1142 	return prev;
1143 }
1144 
1145 /* Assumes caller has held the rx_queue.lock */
1146 static void *__prb_previous_block(struct packet_sock *po,
1147 					 struct packet_ring_buffer *rb,
1148 					 int status)
1149 {
1150 	unsigned int previous = prb_previous_blk_num(rb);
1151 	return prb_lookup_block(po, rb, previous, status);
1152 }
1153 
1154 static void *packet_previous_rx_frame(struct packet_sock *po,
1155 					     struct packet_ring_buffer *rb,
1156 					     int status)
1157 {
1158 	if (po->tp_version <= TPACKET_V2)
1159 		return packet_previous_frame(po, rb, status);
1160 
1161 	return __prb_previous_block(po, rb, status);
1162 }
1163 
1164 static void packet_increment_rx_head(struct packet_sock *po,
1165 					    struct packet_ring_buffer *rb)
1166 {
1167 	switch (po->tp_version) {
1168 	case TPACKET_V1:
1169 	case TPACKET_V2:
1170 		return packet_increment_head(rb);
1171 	case TPACKET_V3:
1172 	default:
1173 		WARN(1, "TPACKET version not supported.\n");
1174 		BUG();
1175 		return;
1176 	}
1177 }
1178 
1179 static void *packet_previous_frame(struct packet_sock *po,
1180 		struct packet_ring_buffer *rb,
1181 		int status)
1182 {
1183 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1184 	return packet_lookup_frame(po, rb, previous, status);
1185 }
1186 
1187 static void packet_increment_head(struct packet_ring_buffer *buff)
1188 {
1189 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1190 }
1191 
1192 static void packet_inc_pending(struct packet_ring_buffer *rb)
1193 {
1194 	this_cpu_inc(*rb->pending_refcnt);
1195 }
1196 
1197 static void packet_dec_pending(struct packet_ring_buffer *rb)
1198 {
1199 	this_cpu_dec(*rb->pending_refcnt);
1200 }
1201 
1202 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1203 {
1204 	unsigned int refcnt = 0;
1205 	int cpu;
1206 
1207 	/* We don't use pending refcount in rx_ring. */
1208 	if (rb->pending_refcnt == NULL)
1209 		return 0;
1210 
1211 	for_each_possible_cpu(cpu)
1212 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1213 
1214 	return refcnt;
1215 }
1216 
1217 static int packet_alloc_pending(struct packet_sock *po)
1218 {
1219 	po->rx_ring.pending_refcnt = NULL;
1220 
1221 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1222 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1223 		return -ENOBUFS;
1224 
1225 	return 0;
1226 }
1227 
1228 static void packet_free_pending(struct packet_sock *po)
1229 {
1230 	free_percpu(po->tx_ring.pending_refcnt);
1231 }
1232 
1233 #define ROOM_POW_OFF	2
1234 #define ROOM_NONE	0x0
1235 #define ROOM_LOW	0x1
1236 #define ROOM_NORMAL	0x2
1237 
1238 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1239 {
1240 	int idx, len;
1241 
1242 	len = po->rx_ring.frame_max + 1;
1243 	idx = po->rx_ring.head;
1244 	if (pow_off)
1245 		idx += len >> pow_off;
1246 	if (idx >= len)
1247 		idx -= len;
1248 	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1249 }
1250 
1251 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1252 {
1253 	int idx, len;
1254 
1255 	len = po->rx_ring.prb_bdqc.knum_blocks;
1256 	idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1257 	if (pow_off)
1258 		idx += len >> pow_off;
1259 	if (idx >= len)
1260 		idx -= len;
1261 	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1262 }
1263 
1264 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1265 {
1266 	struct sock *sk = &po->sk;
1267 	int ret = ROOM_NONE;
1268 
1269 	if (po->prot_hook.func != tpacket_rcv) {
1270 		int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1271 					  - (skb ? skb->truesize : 0);
1272 		if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1273 			return ROOM_NORMAL;
1274 		else if (avail > 0)
1275 			return ROOM_LOW;
1276 		else
1277 			return ROOM_NONE;
1278 	}
1279 
1280 	if (po->tp_version == TPACKET_V3) {
1281 		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1282 			ret = ROOM_NORMAL;
1283 		else if (__tpacket_v3_has_room(po, 0))
1284 			ret = ROOM_LOW;
1285 	} else {
1286 		if (__tpacket_has_room(po, ROOM_POW_OFF))
1287 			ret = ROOM_NORMAL;
1288 		else if (__tpacket_has_room(po, 0))
1289 			ret = ROOM_LOW;
1290 	}
1291 
1292 	return ret;
1293 }
1294 
1295 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1296 {
1297 	int ret;
1298 	bool has_room;
1299 
1300 	spin_lock_bh(&po->sk.sk_receive_queue.lock);
1301 	ret = __packet_rcv_has_room(po, skb);
1302 	has_room = ret == ROOM_NORMAL;
1303 	if (po->pressure == has_room)
1304 		po->pressure = !has_room;
1305 	spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1306 
1307 	return ret;
1308 }
1309 
1310 static void packet_sock_destruct(struct sock *sk)
1311 {
1312 	skb_queue_purge(&sk->sk_error_queue);
1313 
1314 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1315 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1316 
1317 	if (!sock_flag(sk, SOCK_DEAD)) {
1318 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1319 		return;
1320 	}
1321 
1322 	sk_refcnt_debug_dec(sk);
1323 }
1324 
1325 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1326 {
1327 	u32 rxhash;
1328 	int i, count = 0;
1329 
1330 	rxhash = skb_get_hash(skb);
1331 	for (i = 0; i < ROLLOVER_HLEN; i++)
1332 		if (po->rollover->history[i] == rxhash)
1333 			count++;
1334 
1335 	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1336 	return count > (ROLLOVER_HLEN >> 1);
1337 }
1338 
1339 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1340 				      struct sk_buff *skb,
1341 				      unsigned int num)
1342 {
1343 	return reciprocal_scale(skb_get_hash(skb), num);
1344 }
1345 
1346 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1347 				    struct sk_buff *skb,
1348 				    unsigned int num)
1349 {
1350 	unsigned int val = atomic_inc_return(&f->rr_cur);
1351 
1352 	return val % num;
1353 }
1354 
1355 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1356 				     struct sk_buff *skb,
1357 				     unsigned int num)
1358 {
1359 	return smp_processor_id() % num;
1360 }
1361 
1362 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1363 				     struct sk_buff *skb,
1364 				     unsigned int num)
1365 {
1366 	return prandom_u32_max(num);
1367 }
1368 
1369 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1370 					  struct sk_buff *skb,
1371 					  unsigned int idx, bool try_self,
1372 					  unsigned int num)
1373 {
1374 	struct packet_sock *po, *po_next, *po_skip = NULL;
1375 	unsigned int i, j, room = ROOM_NONE;
1376 
1377 	po = pkt_sk(f->arr[idx]);
1378 
1379 	if (try_self) {
1380 		room = packet_rcv_has_room(po, skb);
1381 		if (room == ROOM_NORMAL ||
1382 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1383 			return idx;
1384 		po_skip = po;
1385 	}
1386 
1387 	i = j = min_t(int, po->rollover->sock, num - 1);
1388 	do {
1389 		po_next = pkt_sk(f->arr[i]);
1390 		if (po_next != po_skip && !po_next->pressure &&
1391 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1392 			if (i != j)
1393 				po->rollover->sock = i;
1394 			atomic_long_inc(&po->rollover->num);
1395 			if (room == ROOM_LOW)
1396 				atomic_long_inc(&po->rollover->num_huge);
1397 			return i;
1398 		}
1399 
1400 		if (++i == num)
1401 			i = 0;
1402 	} while (i != j);
1403 
1404 	atomic_long_inc(&po->rollover->num_failed);
1405 	return idx;
1406 }
1407 
1408 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1409 				    struct sk_buff *skb,
1410 				    unsigned int num)
1411 {
1412 	return skb_get_queue_mapping(skb) % num;
1413 }
1414 
1415 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1416 {
1417 	return f->flags & (flag >> 8);
1418 }
1419 
1420 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1421 			     struct packet_type *pt, struct net_device *orig_dev)
1422 {
1423 	struct packet_fanout *f = pt->af_packet_priv;
1424 	unsigned int num = READ_ONCE(f->num_members);
1425 	struct packet_sock *po;
1426 	unsigned int idx;
1427 
1428 	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1429 	    !num) {
1430 		kfree_skb(skb);
1431 		return 0;
1432 	}
1433 
1434 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1435 		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1436 		if (!skb)
1437 			return 0;
1438 	}
1439 	switch (f->type) {
1440 	case PACKET_FANOUT_HASH:
1441 	default:
1442 		idx = fanout_demux_hash(f, skb, num);
1443 		break;
1444 	case PACKET_FANOUT_LB:
1445 		idx = fanout_demux_lb(f, skb, num);
1446 		break;
1447 	case PACKET_FANOUT_CPU:
1448 		idx = fanout_demux_cpu(f, skb, num);
1449 		break;
1450 	case PACKET_FANOUT_RND:
1451 		idx = fanout_demux_rnd(f, skb, num);
1452 		break;
1453 	case PACKET_FANOUT_QM:
1454 		idx = fanout_demux_qm(f, skb, num);
1455 		break;
1456 	case PACKET_FANOUT_ROLLOVER:
1457 		idx = fanout_demux_rollover(f, skb, 0, false, num);
1458 		break;
1459 	}
1460 
1461 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1462 		idx = fanout_demux_rollover(f, skb, idx, true, num);
1463 
1464 	po = pkt_sk(f->arr[idx]);
1465 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1466 }
1467 
1468 DEFINE_MUTEX(fanout_mutex);
1469 EXPORT_SYMBOL_GPL(fanout_mutex);
1470 static LIST_HEAD(fanout_list);
1471 
1472 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1473 {
1474 	struct packet_fanout *f = po->fanout;
1475 
1476 	spin_lock(&f->lock);
1477 	f->arr[f->num_members] = sk;
1478 	smp_wmb();
1479 	f->num_members++;
1480 	spin_unlock(&f->lock);
1481 }
1482 
1483 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1484 {
1485 	struct packet_fanout *f = po->fanout;
1486 	int i;
1487 
1488 	spin_lock(&f->lock);
1489 	for (i = 0; i < f->num_members; i++) {
1490 		if (f->arr[i] == sk)
1491 			break;
1492 	}
1493 	BUG_ON(i >= f->num_members);
1494 	f->arr[i] = f->arr[f->num_members - 1];
1495 	f->num_members--;
1496 	spin_unlock(&f->lock);
1497 }
1498 
1499 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1500 {
1501 	if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
1502 		return true;
1503 
1504 	return false;
1505 }
1506 
1507 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1508 {
1509 	struct packet_sock *po = pkt_sk(sk);
1510 	struct packet_fanout *f, *match;
1511 	u8 type = type_flags & 0xff;
1512 	u8 flags = type_flags >> 8;
1513 	int err;
1514 
1515 	switch (type) {
1516 	case PACKET_FANOUT_ROLLOVER:
1517 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1518 			return -EINVAL;
1519 	case PACKET_FANOUT_HASH:
1520 	case PACKET_FANOUT_LB:
1521 	case PACKET_FANOUT_CPU:
1522 	case PACKET_FANOUT_RND:
1523 	case PACKET_FANOUT_QM:
1524 		break;
1525 	default:
1526 		return -EINVAL;
1527 	}
1528 
1529 	if (!po->running)
1530 		return -EINVAL;
1531 
1532 	if (po->fanout)
1533 		return -EALREADY;
1534 
1535 	if (type == PACKET_FANOUT_ROLLOVER ||
1536 	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1537 		po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1538 		if (!po->rollover)
1539 			return -ENOMEM;
1540 		atomic_long_set(&po->rollover->num, 0);
1541 		atomic_long_set(&po->rollover->num_huge, 0);
1542 		atomic_long_set(&po->rollover->num_failed, 0);
1543 	}
1544 
1545 	mutex_lock(&fanout_mutex);
1546 	match = NULL;
1547 	list_for_each_entry(f, &fanout_list, list) {
1548 		if (f->id == id &&
1549 		    read_pnet(&f->net) == sock_net(sk)) {
1550 			match = f;
1551 			break;
1552 		}
1553 	}
1554 	err = -EINVAL;
1555 	if (match && match->flags != flags)
1556 		goto out;
1557 	if (!match) {
1558 		err = -ENOMEM;
1559 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1560 		if (!match)
1561 			goto out;
1562 		write_pnet(&match->net, sock_net(sk));
1563 		match->id = id;
1564 		match->type = type;
1565 		match->flags = flags;
1566 		atomic_set(&match->rr_cur, 0);
1567 		INIT_LIST_HEAD(&match->list);
1568 		spin_lock_init(&match->lock);
1569 		atomic_set(&match->sk_ref, 0);
1570 		match->prot_hook.type = po->prot_hook.type;
1571 		match->prot_hook.dev = po->prot_hook.dev;
1572 		match->prot_hook.func = packet_rcv_fanout;
1573 		match->prot_hook.af_packet_priv = match;
1574 		match->prot_hook.id_match = match_fanout_group;
1575 		dev_add_pack(&match->prot_hook);
1576 		list_add(&match->list, &fanout_list);
1577 	}
1578 	err = -EINVAL;
1579 	if (match->type == type &&
1580 	    match->prot_hook.type == po->prot_hook.type &&
1581 	    match->prot_hook.dev == po->prot_hook.dev) {
1582 		err = -ENOSPC;
1583 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1584 			__dev_remove_pack(&po->prot_hook);
1585 			po->fanout = match;
1586 			atomic_inc(&match->sk_ref);
1587 			__fanout_link(sk, po);
1588 			err = 0;
1589 		}
1590 	}
1591 out:
1592 	mutex_unlock(&fanout_mutex);
1593 	if (err) {
1594 		kfree(po->rollover);
1595 		po->rollover = NULL;
1596 	}
1597 	return err;
1598 }
1599 
1600 static void fanout_release(struct sock *sk)
1601 {
1602 	struct packet_sock *po = pkt_sk(sk);
1603 	struct packet_fanout *f;
1604 
1605 	f = po->fanout;
1606 	if (!f)
1607 		return;
1608 
1609 	mutex_lock(&fanout_mutex);
1610 	po->fanout = NULL;
1611 
1612 	if (atomic_dec_and_test(&f->sk_ref)) {
1613 		list_del(&f->list);
1614 		dev_remove_pack(&f->prot_hook);
1615 		kfree(f);
1616 	}
1617 	mutex_unlock(&fanout_mutex);
1618 
1619 	if (po->rollover)
1620 		kfree_rcu(po->rollover, rcu);
1621 }
1622 
1623 static const struct proto_ops packet_ops;
1624 
1625 static const struct proto_ops packet_ops_spkt;
1626 
1627 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1628 			   struct packet_type *pt, struct net_device *orig_dev)
1629 {
1630 	struct sock *sk;
1631 	struct sockaddr_pkt *spkt;
1632 
1633 	/*
1634 	 *	When we registered the protocol we saved the socket in the data
1635 	 *	field for just this event.
1636 	 */
1637 
1638 	sk = pt->af_packet_priv;
1639 
1640 	/*
1641 	 *	Yank back the headers [hope the device set this
1642 	 *	right or kerboom...]
1643 	 *
1644 	 *	Incoming packets have ll header pulled,
1645 	 *	push it back.
1646 	 *
1647 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1648 	 *	so that this procedure is noop.
1649 	 */
1650 
1651 	if (skb->pkt_type == PACKET_LOOPBACK)
1652 		goto out;
1653 
1654 	if (!net_eq(dev_net(dev), sock_net(sk)))
1655 		goto out;
1656 
1657 	skb = skb_share_check(skb, GFP_ATOMIC);
1658 	if (skb == NULL)
1659 		goto oom;
1660 
1661 	/* drop any routing info */
1662 	skb_dst_drop(skb);
1663 
1664 	/* drop conntrack reference */
1665 	nf_reset(skb);
1666 
1667 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1668 
1669 	skb_push(skb, skb->data - skb_mac_header(skb));
1670 
1671 	/*
1672 	 *	The SOCK_PACKET socket receives _all_ frames.
1673 	 */
1674 
1675 	spkt->spkt_family = dev->type;
1676 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1677 	spkt->spkt_protocol = skb->protocol;
1678 
1679 	/*
1680 	 *	Charge the memory to the socket. This is done specifically
1681 	 *	to prevent sockets using all the memory up.
1682 	 */
1683 
1684 	if (sock_queue_rcv_skb(sk, skb) == 0)
1685 		return 0;
1686 
1687 out:
1688 	kfree_skb(skb);
1689 oom:
1690 	return 0;
1691 }
1692 
1693 
1694 /*
1695  *	Output a raw packet to a device layer. This bypasses all the other
1696  *	protocol layers and you must therefore supply it with a complete frame
1697  */
1698 
1699 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1700 			       size_t len)
1701 {
1702 	struct sock *sk = sock->sk;
1703 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1704 	struct sk_buff *skb = NULL;
1705 	struct net_device *dev;
1706 	__be16 proto = 0;
1707 	int err;
1708 	int extra_len = 0;
1709 
1710 	/*
1711 	 *	Get and verify the address.
1712 	 */
1713 
1714 	if (saddr) {
1715 		if (msg->msg_namelen < sizeof(struct sockaddr))
1716 			return -EINVAL;
1717 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1718 			proto = saddr->spkt_protocol;
1719 	} else
1720 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1721 
1722 	/*
1723 	 *	Find the device first to size check it
1724 	 */
1725 
1726 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1727 retry:
1728 	rcu_read_lock();
1729 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1730 	err = -ENODEV;
1731 	if (dev == NULL)
1732 		goto out_unlock;
1733 
1734 	err = -ENETDOWN;
1735 	if (!(dev->flags & IFF_UP))
1736 		goto out_unlock;
1737 
1738 	/*
1739 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1740 	 * raw protocol and you must do your own fragmentation at this level.
1741 	 */
1742 
1743 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1744 		if (!netif_supports_nofcs(dev)) {
1745 			err = -EPROTONOSUPPORT;
1746 			goto out_unlock;
1747 		}
1748 		extra_len = 4; /* We're doing our own CRC */
1749 	}
1750 
1751 	err = -EMSGSIZE;
1752 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1753 		goto out_unlock;
1754 
1755 	if (!skb) {
1756 		size_t reserved = LL_RESERVED_SPACE(dev);
1757 		int tlen = dev->needed_tailroom;
1758 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1759 
1760 		rcu_read_unlock();
1761 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1762 		if (skb == NULL)
1763 			return -ENOBUFS;
1764 		/* FIXME: Save some space for broken drivers that write a hard
1765 		 * header at transmission time by themselves. PPP is the notable
1766 		 * one here. This should really be fixed at the driver level.
1767 		 */
1768 		skb_reserve(skb, reserved);
1769 		skb_reset_network_header(skb);
1770 
1771 		/* Try to align data part correctly */
1772 		if (hhlen) {
1773 			skb->data -= hhlen;
1774 			skb->tail -= hhlen;
1775 			if (len < hhlen)
1776 				skb_reset_network_header(skb);
1777 		}
1778 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1779 		if (err)
1780 			goto out_free;
1781 		goto retry;
1782 	}
1783 
1784 	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1785 		/* Earlier code assumed this would be a VLAN pkt,
1786 		 * double-check this now that we have the actual
1787 		 * packet in hand.
1788 		 */
1789 		struct ethhdr *ehdr;
1790 		skb_reset_mac_header(skb);
1791 		ehdr = eth_hdr(skb);
1792 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1793 			err = -EMSGSIZE;
1794 			goto out_unlock;
1795 		}
1796 	}
1797 
1798 	skb->protocol = proto;
1799 	skb->dev = dev;
1800 	skb->priority = sk->sk_priority;
1801 	skb->mark = sk->sk_mark;
1802 
1803 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1804 
1805 	if (unlikely(extra_len == 4))
1806 		skb->no_fcs = 1;
1807 
1808 	skb_probe_transport_header(skb, 0);
1809 
1810 	dev_queue_xmit(skb);
1811 	rcu_read_unlock();
1812 	return len;
1813 
1814 out_unlock:
1815 	rcu_read_unlock();
1816 out_free:
1817 	kfree_skb(skb);
1818 	return err;
1819 }
1820 
1821 static unsigned int run_filter(const struct sk_buff *skb,
1822 				      const struct sock *sk,
1823 				      unsigned int res)
1824 {
1825 	struct sk_filter *filter;
1826 
1827 	rcu_read_lock();
1828 	filter = rcu_dereference(sk->sk_filter);
1829 	if (filter != NULL)
1830 		res = SK_RUN_FILTER(filter, skb);
1831 	rcu_read_unlock();
1832 
1833 	return res;
1834 }
1835 
1836 /*
1837  * This function makes lazy skb cloning in hope that most of packets
1838  * are discarded by BPF.
1839  *
1840  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1841  * and skb->cb are mangled. It works because (and until) packets
1842  * falling here are owned by current CPU. Output packets are cloned
1843  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1844  * sequencially, so that if we return skb to original state on exit,
1845  * we will not harm anyone.
1846  */
1847 
1848 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1849 		      struct packet_type *pt, struct net_device *orig_dev)
1850 {
1851 	struct sock *sk;
1852 	struct sockaddr_ll *sll;
1853 	struct packet_sock *po;
1854 	u8 *skb_head = skb->data;
1855 	int skb_len = skb->len;
1856 	unsigned int snaplen, res;
1857 
1858 	if (skb->pkt_type == PACKET_LOOPBACK)
1859 		goto drop;
1860 
1861 	sk = pt->af_packet_priv;
1862 	po = pkt_sk(sk);
1863 
1864 	if (!net_eq(dev_net(dev), sock_net(sk)))
1865 		goto drop;
1866 
1867 	skb->dev = dev;
1868 
1869 	if (dev->header_ops) {
1870 		/* The device has an explicit notion of ll header,
1871 		 * exported to higher levels.
1872 		 *
1873 		 * Otherwise, the device hides details of its frame
1874 		 * structure, so that corresponding packet head is
1875 		 * never delivered to user.
1876 		 */
1877 		if (sk->sk_type != SOCK_DGRAM)
1878 			skb_push(skb, skb->data - skb_mac_header(skb));
1879 		else if (skb->pkt_type == PACKET_OUTGOING) {
1880 			/* Special case: outgoing packets have ll header at head */
1881 			skb_pull(skb, skb_network_offset(skb));
1882 		}
1883 	}
1884 
1885 	snaplen = skb->len;
1886 
1887 	res = run_filter(skb, sk, snaplen);
1888 	if (!res)
1889 		goto drop_n_restore;
1890 	if (snaplen > res)
1891 		snaplen = res;
1892 
1893 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1894 		goto drop_n_acct;
1895 
1896 	if (skb_shared(skb)) {
1897 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1898 		if (nskb == NULL)
1899 			goto drop_n_acct;
1900 
1901 		if (skb_head != skb->data) {
1902 			skb->data = skb_head;
1903 			skb->len = skb_len;
1904 		}
1905 		consume_skb(skb);
1906 		skb = nskb;
1907 	}
1908 
1909 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
1910 
1911 	sll = &PACKET_SKB_CB(skb)->sa.ll;
1912 	sll->sll_hatype = dev->type;
1913 	sll->sll_pkttype = skb->pkt_type;
1914 	if (unlikely(po->origdev))
1915 		sll->sll_ifindex = orig_dev->ifindex;
1916 	else
1917 		sll->sll_ifindex = dev->ifindex;
1918 
1919 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1920 
1921 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1922 	 * Use their space for storing the original skb length.
1923 	 */
1924 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
1925 
1926 	if (pskb_trim(skb, snaplen))
1927 		goto drop_n_acct;
1928 
1929 	skb_set_owner_r(skb, sk);
1930 	skb->dev = NULL;
1931 	skb_dst_drop(skb);
1932 
1933 	/* drop conntrack reference */
1934 	nf_reset(skb);
1935 
1936 	spin_lock(&sk->sk_receive_queue.lock);
1937 	po->stats.stats1.tp_packets++;
1938 	sock_skb_set_dropcount(sk, skb);
1939 	__skb_queue_tail(&sk->sk_receive_queue, skb);
1940 	spin_unlock(&sk->sk_receive_queue.lock);
1941 	sk->sk_data_ready(sk);
1942 	return 0;
1943 
1944 drop_n_acct:
1945 	spin_lock(&sk->sk_receive_queue.lock);
1946 	po->stats.stats1.tp_drops++;
1947 	atomic_inc(&sk->sk_drops);
1948 	spin_unlock(&sk->sk_receive_queue.lock);
1949 
1950 drop_n_restore:
1951 	if (skb_head != skb->data && skb_shared(skb)) {
1952 		skb->data = skb_head;
1953 		skb->len = skb_len;
1954 	}
1955 drop:
1956 	consume_skb(skb);
1957 	return 0;
1958 }
1959 
1960 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1961 		       struct packet_type *pt, struct net_device *orig_dev)
1962 {
1963 	struct sock *sk;
1964 	struct packet_sock *po;
1965 	struct sockaddr_ll *sll;
1966 	union tpacket_uhdr h;
1967 	u8 *skb_head = skb->data;
1968 	int skb_len = skb->len;
1969 	unsigned int snaplen, res;
1970 	unsigned long status = TP_STATUS_USER;
1971 	unsigned short macoff, netoff, hdrlen;
1972 	struct sk_buff *copy_skb = NULL;
1973 	struct timespec ts;
1974 	__u32 ts_status;
1975 
1976 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1977 	 * We may add members to them until current aligned size without forcing
1978 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1979 	 */
1980 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1981 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1982 
1983 	if (skb->pkt_type == PACKET_LOOPBACK)
1984 		goto drop;
1985 
1986 	sk = pt->af_packet_priv;
1987 	po = pkt_sk(sk);
1988 
1989 	if (!net_eq(dev_net(dev), sock_net(sk)))
1990 		goto drop;
1991 
1992 	if (dev->header_ops) {
1993 		if (sk->sk_type != SOCK_DGRAM)
1994 			skb_push(skb, skb->data - skb_mac_header(skb));
1995 		else if (skb->pkt_type == PACKET_OUTGOING) {
1996 			/* Special case: outgoing packets have ll header at head */
1997 			skb_pull(skb, skb_network_offset(skb));
1998 		}
1999 	}
2000 
2001 	snaplen = skb->len;
2002 
2003 	res = run_filter(skb, sk, snaplen);
2004 	if (!res)
2005 		goto drop_n_restore;
2006 
2007 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2008 		status |= TP_STATUS_CSUMNOTREADY;
2009 	else if (skb->pkt_type != PACKET_OUTGOING &&
2010 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2011 		  skb_csum_unnecessary(skb)))
2012 		status |= TP_STATUS_CSUM_VALID;
2013 
2014 	if (snaplen > res)
2015 		snaplen = res;
2016 
2017 	if (sk->sk_type == SOCK_DGRAM) {
2018 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2019 				  po->tp_reserve;
2020 	} else {
2021 		unsigned int maclen = skb_network_offset(skb);
2022 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2023 				       (maclen < 16 ? 16 : maclen)) +
2024 			po->tp_reserve;
2025 		macoff = netoff - maclen;
2026 	}
2027 	if (po->tp_version <= TPACKET_V2) {
2028 		if (macoff + snaplen > po->rx_ring.frame_size) {
2029 			if (po->copy_thresh &&
2030 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2031 				if (skb_shared(skb)) {
2032 					copy_skb = skb_clone(skb, GFP_ATOMIC);
2033 				} else {
2034 					copy_skb = skb_get(skb);
2035 					skb_head = skb->data;
2036 				}
2037 				if (copy_skb)
2038 					skb_set_owner_r(copy_skb, sk);
2039 			}
2040 			snaplen = po->rx_ring.frame_size - macoff;
2041 			if ((int)snaplen < 0)
2042 				snaplen = 0;
2043 		}
2044 	} else if (unlikely(macoff + snaplen >
2045 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2046 		u32 nval;
2047 
2048 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2049 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2050 			    snaplen, nval, macoff);
2051 		snaplen = nval;
2052 		if (unlikely((int)snaplen < 0)) {
2053 			snaplen = 0;
2054 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2055 		}
2056 	}
2057 	spin_lock(&sk->sk_receive_queue.lock);
2058 	h.raw = packet_current_rx_frame(po, skb,
2059 					TP_STATUS_KERNEL, (macoff+snaplen));
2060 	if (!h.raw)
2061 		goto ring_is_full;
2062 	if (po->tp_version <= TPACKET_V2) {
2063 		packet_increment_rx_head(po, &po->rx_ring);
2064 	/*
2065 	 * LOSING will be reported till you read the stats,
2066 	 * because it's COR - Clear On Read.
2067 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2068 	 * at packet level.
2069 	 */
2070 		if (po->stats.stats1.tp_drops)
2071 			status |= TP_STATUS_LOSING;
2072 	}
2073 	po->stats.stats1.tp_packets++;
2074 	if (copy_skb) {
2075 		status |= TP_STATUS_COPY;
2076 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2077 	}
2078 	spin_unlock(&sk->sk_receive_queue.lock);
2079 
2080 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2081 
2082 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2083 		getnstimeofday(&ts);
2084 
2085 	status |= ts_status;
2086 
2087 	switch (po->tp_version) {
2088 	case TPACKET_V1:
2089 		h.h1->tp_len = skb->len;
2090 		h.h1->tp_snaplen = snaplen;
2091 		h.h1->tp_mac = macoff;
2092 		h.h1->tp_net = netoff;
2093 		h.h1->tp_sec = ts.tv_sec;
2094 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2095 		hdrlen = sizeof(*h.h1);
2096 		break;
2097 	case TPACKET_V2:
2098 		h.h2->tp_len = skb->len;
2099 		h.h2->tp_snaplen = snaplen;
2100 		h.h2->tp_mac = macoff;
2101 		h.h2->tp_net = netoff;
2102 		h.h2->tp_sec = ts.tv_sec;
2103 		h.h2->tp_nsec = ts.tv_nsec;
2104 		if (skb_vlan_tag_present(skb)) {
2105 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2106 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2107 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2108 		} else {
2109 			h.h2->tp_vlan_tci = 0;
2110 			h.h2->tp_vlan_tpid = 0;
2111 		}
2112 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2113 		hdrlen = sizeof(*h.h2);
2114 		break;
2115 	case TPACKET_V3:
2116 		/* tp_nxt_offset,vlan are already populated above.
2117 		 * So DONT clear those fields here
2118 		 */
2119 		h.h3->tp_status |= status;
2120 		h.h3->tp_len = skb->len;
2121 		h.h3->tp_snaplen = snaplen;
2122 		h.h3->tp_mac = macoff;
2123 		h.h3->tp_net = netoff;
2124 		h.h3->tp_sec  = ts.tv_sec;
2125 		h.h3->tp_nsec = ts.tv_nsec;
2126 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2127 		hdrlen = sizeof(*h.h3);
2128 		break;
2129 	default:
2130 		BUG();
2131 	}
2132 
2133 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2134 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2135 	sll->sll_family = AF_PACKET;
2136 	sll->sll_hatype = dev->type;
2137 	sll->sll_protocol = skb->protocol;
2138 	sll->sll_pkttype = skb->pkt_type;
2139 	if (unlikely(po->origdev))
2140 		sll->sll_ifindex = orig_dev->ifindex;
2141 	else
2142 		sll->sll_ifindex = dev->ifindex;
2143 
2144 	smp_mb();
2145 
2146 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2147 	if (po->tp_version <= TPACKET_V2) {
2148 		u8 *start, *end;
2149 
2150 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2151 					macoff + snaplen);
2152 
2153 		for (start = h.raw; start < end; start += PAGE_SIZE)
2154 			flush_dcache_page(pgv_to_page(start));
2155 	}
2156 	smp_wmb();
2157 #endif
2158 
2159 	if (po->tp_version <= TPACKET_V2) {
2160 		__packet_set_status(po, h.raw, status);
2161 		sk->sk_data_ready(sk);
2162 	} else {
2163 		prb_clear_blk_fill_status(&po->rx_ring);
2164 	}
2165 
2166 drop_n_restore:
2167 	if (skb_head != skb->data && skb_shared(skb)) {
2168 		skb->data = skb_head;
2169 		skb->len = skb_len;
2170 	}
2171 drop:
2172 	kfree_skb(skb);
2173 	return 0;
2174 
2175 ring_is_full:
2176 	po->stats.stats1.tp_drops++;
2177 	spin_unlock(&sk->sk_receive_queue.lock);
2178 
2179 	sk->sk_data_ready(sk);
2180 	kfree_skb(copy_skb);
2181 	goto drop_n_restore;
2182 }
2183 
2184 static void tpacket_destruct_skb(struct sk_buff *skb)
2185 {
2186 	struct packet_sock *po = pkt_sk(skb->sk);
2187 
2188 	if (likely(po->tx_ring.pg_vec)) {
2189 		void *ph;
2190 		__u32 ts;
2191 
2192 		ph = skb_shinfo(skb)->destructor_arg;
2193 		packet_dec_pending(&po->tx_ring);
2194 
2195 		ts = __packet_set_timestamp(po, ph, skb);
2196 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2197 	}
2198 
2199 	sock_wfree(skb);
2200 }
2201 
2202 static bool ll_header_truncated(const struct net_device *dev, int len)
2203 {
2204 	/* net device doesn't like empty head */
2205 	if (unlikely(len <= dev->hard_header_len)) {
2206 		net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2207 				     current->comm, len, dev->hard_header_len);
2208 		return true;
2209 	}
2210 
2211 	return false;
2212 }
2213 
2214 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2215 		void *frame, struct net_device *dev, int size_max,
2216 		__be16 proto, unsigned char *addr, int hlen)
2217 {
2218 	union tpacket_uhdr ph;
2219 	int to_write, offset, len, tp_len, nr_frags, len_max;
2220 	struct socket *sock = po->sk.sk_socket;
2221 	struct page *page;
2222 	void *data;
2223 	int err;
2224 
2225 	ph.raw = frame;
2226 
2227 	skb->protocol = proto;
2228 	skb->dev = dev;
2229 	skb->priority = po->sk.sk_priority;
2230 	skb->mark = po->sk.sk_mark;
2231 	sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2232 	skb_shinfo(skb)->destructor_arg = ph.raw;
2233 
2234 	switch (po->tp_version) {
2235 	case TPACKET_V2:
2236 		tp_len = ph.h2->tp_len;
2237 		break;
2238 	default:
2239 		tp_len = ph.h1->tp_len;
2240 		break;
2241 	}
2242 	if (unlikely(tp_len > size_max)) {
2243 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2244 		return -EMSGSIZE;
2245 	}
2246 
2247 	skb_reserve(skb, hlen);
2248 	skb_reset_network_header(skb);
2249 
2250 	if (!packet_use_direct_xmit(po))
2251 		skb_probe_transport_header(skb, 0);
2252 	if (unlikely(po->tp_tx_has_off)) {
2253 		int off_min, off_max, off;
2254 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2255 		off_max = po->tx_ring.frame_size - tp_len;
2256 		if (sock->type == SOCK_DGRAM) {
2257 			switch (po->tp_version) {
2258 			case TPACKET_V2:
2259 				off = ph.h2->tp_net;
2260 				break;
2261 			default:
2262 				off = ph.h1->tp_net;
2263 				break;
2264 			}
2265 		} else {
2266 			switch (po->tp_version) {
2267 			case TPACKET_V2:
2268 				off = ph.h2->tp_mac;
2269 				break;
2270 			default:
2271 				off = ph.h1->tp_mac;
2272 				break;
2273 			}
2274 		}
2275 		if (unlikely((off < off_min) || (off_max < off)))
2276 			return -EINVAL;
2277 		data = ph.raw + off;
2278 	} else {
2279 		data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2280 	}
2281 	to_write = tp_len;
2282 
2283 	if (sock->type == SOCK_DGRAM) {
2284 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2285 				NULL, tp_len);
2286 		if (unlikely(err < 0))
2287 			return -EINVAL;
2288 	} else if (dev->hard_header_len) {
2289 		if (ll_header_truncated(dev, tp_len))
2290 			return -EINVAL;
2291 
2292 		skb_push(skb, dev->hard_header_len);
2293 		err = skb_store_bits(skb, 0, data,
2294 				dev->hard_header_len);
2295 		if (unlikely(err))
2296 			return err;
2297 
2298 		data += dev->hard_header_len;
2299 		to_write -= dev->hard_header_len;
2300 	}
2301 
2302 	offset = offset_in_page(data);
2303 	len_max = PAGE_SIZE - offset;
2304 	len = ((to_write > len_max) ? len_max : to_write);
2305 
2306 	skb->data_len = to_write;
2307 	skb->len += to_write;
2308 	skb->truesize += to_write;
2309 	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2310 
2311 	while (likely(to_write)) {
2312 		nr_frags = skb_shinfo(skb)->nr_frags;
2313 
2314 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2315 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2316 			       MAX_SKB_FRAGS);
2317 			return -EFAULT;
2318 		}
2319 
2320 		page = pgv_to_page(data);
2321 		data += len;
2322 		flush_dcache_page(page);
2323 		get_page(page);
2324 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2325 		to_write -= len;
2326 		offset = 0;
2327 		len_max = PAGE_SIZE;
2328 		len = ((to_write > len_max) ? len_max : to_write);
2329 	}
2330 
2331 	return tp_len;
2332 }
2333 
2334 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2335 {
2336 	struct sk_buff *skb;
2337 	struct net_device *dev;
2338 	__be16 proto;
2339 	int err, reserve = 0;
2340 	void *ph;
2341 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2342 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2343 	int tp_len, size_max;
2344 	unsigned char *addr;
2345 	int len_sum = 0;
2346 	int status = TP_STATUS_AVAILABLE;
2347 	int hlen, tlen;
2348 
2349 	mutex_lock(&po->pg_vec_lock);
2350 
2351 	if (likely(saddr == NULL)) {
2352 		dev	= packet_cached_dev_get(po);
2353 		proto	= po->num;
2354 		addr	= NULL;
2355 	} else {
2356 		err = -EINVAL;
2357 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2358 			goto out;
2359 		if (msg->msg_namelen < (saddr->sll_halen
2360 					+ offsetof(struct sockaddr_ll,
2361 						sll_addr)))
2362 			goto out;
2363 		proto	= saddr->sll_protocol;
2364 		addr	= saddr->sll_addr;
2365 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2366 	}
2367 
2368 	err = -ENXIO;
2369 	if (unlikely(dev == NULL))
2370 		goto out;
2371 	err = -ENETDOWN;
2372 	if (unlikely(!(dev->flags & IFF_UP)))
2373 		goto out_put;
2374 
2375 	reserve = dev->hard_header_len + VLAN_HLEN;
2376 	size_max = po->tx_ring.frame_size
2377 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2378 
2379 	if (size_max > dev->mtu + reserve)
2380 		size_max = dev->mtu + reserve;
2381 
2382 	do {
2383 		ph = packet_current_frame(po, &po->tx_ring,
2384 					  TP_STATUS_SEND_REQUEST);
2385 		if (unlikely(ph == NULL)) {
2386 			if (need_wait && need_resched())
2387 				schedule();
2388 			continue;
2389 		}
2390 
2391 		status = TP_STATUS_SEND_REQUEST;
2392 		hlen = LL_RESERVED_SPACE(dev);
2393 		tlen = dev->needed_tailroom;
2394 		skb = sock_alloc_send_skb(&po->sk,
2395 				hlen + tlen + sizeof(struct sockaddr_ll),
2396 				!need_wait, &err);
2397 
2398 		if (unlikely(skb == NULL)) {
2399 			/* we assume the socket was initially writeable ... */
2400 			if (likely(len_sum > 0))
2401 				err = len_sum;
2402 			goto out_status;
2403 		}
2404 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 					  addr, hlen);
2406 		if (tp_len > dev->mtu + dev->hard_header_len) {
2407 			struct ethhdr *ehdr;
2408 			/* Earlier code assumed this would be a VLAN pkt,
2409 			 * double-check this now that we have the actual
2410 			 * packet in hand.
2411 			 */
2412 
2413 			skb_reset_mac_header(skb);
2414 			ehdr = eth_hdr(skb);
2415 			if (ehdr->h_proto != htons(ETH_P_8021Q))
2416 				tp_len = -EMSGSIZE;
2417 		}
2418 		if (unlikely(tp_len < 0)) {
2419 			if (po->tp_loss) {
2420 				__packet_set_status(po, ph,
2421 						TP_STATUS_AVAILABLE);
2422 				packet_increment_head(&po->tx_ring);
2423 				kfree_skb(skb);
2424 				continue;
2425 			} else {
2426 				status = TP_STATUS_WRONG_FORMAT;
2427 				err = tp_len;
2428 				goto out_status;
2429 			}
2430 		}
2431 
2432 		packet_pick_tx_queue(dev, skb);
2433 
2434 		skb->destructor = tpacket_destruct_skb;
2435 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2436 		packet_inc_pending(&po->tx_ring);
2437 
2438 		status = TP_STATUS_SEND_REQUEST;
2439 		err = po->xmit(skb);
2440 		if (unlikely(err > 0)) {
2441 			err = net_xmit_errno(err);
2442 			if (err && __packet_get_status(po, ph) ==
2443 				   TP_STATUS_AVAILABLE) {
2444 				/* skb was destructed already */
2445 				skb = NULL;
2446 				goto out_status;
2447 			}
2448 			/*
2449 			 * skb was dropped but not destructed yet;
2450 			 * let's treat it like congestion or err < 0
2451 			 */
2452 			err = 0;
2453 		}
2454 		packet_increment_head(&po->tx_ring);
2455 		len_sum += tp_len;
2456 	} while (likely((ph != NULL) ||
2457 		/* Note: packet_read_pending() might be slow if we have
2458 		 * to call it as it's per_cpu variable, but in fast-path
2459 		 * we already short-circuit the loop with the first
2460 		 * condition, and luckily don't have to go that path
2461 		 * anyway.
2462 		 */
2463 		 (need_wait && packet_read_pending(&po->tx_ring))));
2464 
2465 	err = len_sum;
2466 	goto out_put;
2467 
2468 out_status:
2469 	__packet_set_status(po, ph, status);
2470 	kfree_skb(skb);
2471 out_put:
2472 	dev_put(dev);
2473 out:
2474 	mutex_unlock(&po->pg_vec_lock);
2475 	return err;
2476 }
2477 
2478 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2479 				        size_t reserve, size_t len,
2480 				        size_t linear, int noblock,
2481 				        int *err)
2482 {
2483 	struct sk_buff *skb;
2484 
2485 	/* Under a page?  Don't bother with paged skb. */
2486 	if (prepad + len < PAGE_SIZE || !linear)
2487 		linear = len;
2488 
2489 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2490 				   err, 0);
2491 	if (!skb)
2492 		return NULL;
2493 
2494 	skb_reserve(skb, reserve);
2495 	skb_put(skb, linear);
2496 	skb->data_len = len - linear;
2497 	skb->len += len - linear;
2498 
2499 	return skb;
2500 }
2501 
2502 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2503 {
2504 	struct sock *sk = sock->sk;
2505 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2506 	struct sk_buff *skb;
2507 	struct net_device *dev;
2508 	__be16 proto;
2509 	unsigned char *addr;
2510 	int err, reserve = 0;
2511 	struct virtio_net_hdr vnet_hdr = { 0 };
2512 	int offset = 0;
2513 	int vnet_hdr_len;
2514 	struct packet_sock *po = pkt_sk(sk);
2515 	unsigned short gso_type = 0;
2516 	int hlen, tlen;
2517 	int extra_len = 0;
2518 	ssize_t n;
2519 
2520 	/*
2521 	 *	Get and verify the address.
2522 	 */
2523 
2524 	if (likely(saddr == NULL)) {
2525 		dev	= packet_cached_dev_get(po);
2526 		proto	= po->num;
2527 		addr	= NULL;
2528 	} else {
2529 		err = -EINVAL;
2530 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2531 			goto out;
2532 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2533 			goto out;
2534 		proto	= saddr->sll_protocol;
2535 		addr	= saddr->sll_addr;
2536 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2537 	}
2538 
2539 	err = -ENXIO;
2540 	if (unlikely(dev == NULL))
2541 		goto out_unlock;
2542 	err = -ENETDOWN;
2543 	if (unlikely(!(dev->flags & IFF_UP)))
2544 		goto out_unlock;
2545 
2546 	if (sock->type == SOCK_RAW)
2547 		reserve = dev->hard_header_len;
2548 	if (po->has_vnet_hdr) {
2549 		vnet_hdr_len = sizeof(vnet_hdr);
2550 
2551 		err = -EINVAL;
2552 		if (len < vnet_hdr_len)
2553 			goto out_unlock;
2554 
2555 		len -= vnet_hdr_len;
2556 
2557 		err = -EFAULT;
2558 		n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2559 		if (n != vnet_hdr_len)
2560 			goto out_unlock;
2561 
2562 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2563 		    (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2564 		     __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2565 		      __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2566 			vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2567 				 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2568 				__virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
2569 
2570 		err = -EINVAL;
2571 		if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
2572 			goto out_unlock;
2573 
2574 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2575 			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2576 			case VIRTIO_NET_HDR_GSO_TCPV4:
2577 				gso_type = SKB_GSO_TCPV4;
2578 				break;
2579 			case VIRTIO_NET_HDR_GSO_TCPV6:
2580 				gso_type = SKB_GSO_TCPV6;
2581 				break;
2582 			case VIRTIO_NET_HDR_GSO_UDP:
2583 				gso_type = SKB_GSO_UDP;
2584 				break;
2585 			default:
2586 				goto out_unlock;
2587 			}
2588 
2589 			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2590 				gso_type |= SKB_GSO_TCP_ECN;
2591 
2592 			if (vnet_hdr.gso_size == 0)
2593 				goto out_unlock;
2594 
2595 		}
2596 	}
2597 
2598 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2599 		if (!netif_supports_nofcs(dev)) {
2600 			err = -EPROTONOSUPPORT;
2601 			goto out_unlock;
2602 		}
2603 		extra_len = 4; /* We're doing our own CRC */
2604 	}
2605 
2606 	err = -EMSGSIZE;
2607 	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2608 		goto out_unlock;
2609 
2610 	err = -ENOBUFS;
2611 	hlen = LL_RESERVED_SPACE(dev);
2612 	tlen = dev->needed_tailroom;
2613 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2614 			       __virtio16_to_cpu(false, vnet_hdr.hdr_len),
2615 			       msg->msg_flags & MSG_DONTWAIT, &err);
2616 	if (skb == NULL)
2617 		goto out_unlock;
2618 
2619 	skb_set_network_header(skb, reserve);
2620 
2621 	err = -EINVAL;
2622 	if (sock->type == SOCK_DGRAM) {
2623 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2624 		if (unlikely(offset < 0))
2625 			goto out_free;
2626 	} else {
2627 		if (ll_header_truncated(dev, len))
2628 			goto out_free;
2629 	}
2630 
2631 	/* Returns -EFAULT on error */
2632 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2633 	if (err)
2634 		goto out_free;
2635 
2636 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2637 
2638 	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2639 		/* Earlier code assumed this would be a VLAN pkt,
2640 		 * double-check this now that we have the actual
2641 		 * packet in hand.
2642 		 */
2643 		struct ethhdr *ehdr;
2644 		skb_reset_mac_header(skb);
2645 		ehdr = eth_hdr(skb);
2646 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2647 			err = -EMSGSIZE;
2648 			goto out_free;
2649 		}
2650 	}
2651 
2652 	skb->protocol = proto;
2653 	skb->dev = dev;
2654 	skb->priority = sk->sk_priority;
2655 	skb->mark = sk->sk_mark;
2656 
2657 	packet_pick_tx_queue(dev, skb);
2658 
2659 	if (po->has_vnet_hdr) {
2660 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2661 			u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2662 			u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2663 			if (!skb_partial_csum_set(skb, s, o)) {
2664 				err = -EINVAL;
2665 				goto out_free;
2666 			}
2667 		}
2668 
2669 		skb_shinfo(skb)->gso_size =
2670 			__virtio16_to_cpu(false, vnet_hdr.gso_size);
2671 		skb_shinfo(skb)->gso_type = gso_type;
2672 
2673 		/* Header must be checked, and gso_segs computed. */
2674 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2675 		skb_shinfo(skb)->gso_segs = 0;
2676 
2677 		len += vnet_hdr_len;
2678 	}
2679 
2680 	if (!packet_use_direct_xmit(po))
2681 		skb_probe_transport_header(skb, reserve);
2682 	if (unlikely(extra_len == 4))
2683 		skb->no_fcs = 1;
2684 
2685 	err = po->xmit(skb);
2686 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2687 		goto out_unlock;
2688 
2689 	dev_put(dev);
2690 
2691 	return len;
2692 
2693 out_free:
2694 	kfree_skb(skb);
2695 out_unlock:
2696 	if (dev)
2697 		dev_put(dev);
2698 out:
2699 	return err;
2700 }
2701 
2702 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2703 {
2704 	struct sock *sk = sock->sk;
2705 	struct packet_sock *po = pkt_sk(sk);
2706 
2707 	if (po->tx_ring.pg_vec)
2708 		return tpacket_snd(po, msg);
2709 	else
2710 		return packet_snd(sock, msg, len);
2711 }
2712 
2713 /*
2714  *	Close a PACKET socket. This is fairly simple. We immediately go
2715  *	to 'closed' state and remove our protocol entry in the device list.
2716  */
2717 
2718 static int packet_release(struct socket *sock)
2719 {
2720 	struct sock *sk = sock->sk;
2721 	struct packet_sock *po;
2722 	struct net *net;
2723 	union tpacket_req_u req_u;
2724 
2725 	if (!sk)
2726 		return 0;
2727 
2728 	net = sock_net(sk);
2729 	po = pkt_sk(sk);
2730 
2731 	mutex_lock(&net->packet.sklist_lock);
2732 	sk_del_node_init_rcu(sk);
2733 	mutex_unlock(&net->packet.sklist_lock);
2734 
2735 	preempt_disable();
2736 	sock_prot_inuse_add(net, sk->sk_prot, -1);
2737 	preempt_enable();
2738 
2739 	spin_lock(&po->bind_lock);
2740 	unregister_prot_hook(sk, false);
2741 	packet_cached_dev_reset(po);
2742 
2743 	if (po->prot_hook.dev) {
2744 		dev_put(po->prot_hook.dev);
2745 		po->prot_hook.dev = NULL;
2746 	}
2747 	spin_unlock(&po->bind_lock);
2748 
2749 	packet_flush_mclist(sk);
2750 
2751 	if (po->rx_ring.pg_vec) {
2752 		memset(&req_u, 0, sizeof(req_u));
2753 		packet_set_ring(sk, &req_u, 1, 0);
2754 	}
2755 
2756 	if (po->tx_ring.pg_vec) {
2757 		memset(&req_u, 0, sizeof(req_u));
2758 		packet_set_ring(sk, &req_u, 1, 1);
2759 	}
2760 
2761 	fanout_release(sk);
2762 
2763 	synchronize_net();
2764 	/*
2765 	 *	Now the socket is dead. No more input will appear.
2766 	 */
2767 	sock_orphan(sk);
2768 	sock->sk = NULL;
2769 
2770 	/* Purge queues */
2771 
2772 	skb_queue_purge(&sk->sk_receive_queue);
2773 	packet_free_pending(po);
2774 	sk_refcnt_debug_release(sk);
2775 
2776 	sock_put(sk);
2777 	return 0;
2778 }
2779 
2780 /*
2781  *	Attach a packet hook.
2782  */
2783 
2784 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785 {
2786 	struct packet_sock *po = pkt_sk(sk);
2787 	const struct net_device *dev_curr;
2788 	__be16 proto_curr;
2789 	bool need_rehook;
2790 
2791 	if (po->fanout) {
2792 		if (dev)
2793 			dev_put(dev);
2794 
2795 		return -EINVAL;
2796 	}
2797 
2798 	lock_sock(sk);
2799 	spin_lock(&po->bind_lock);
2800 
2801 	proto_curr = po->prot_hook.type;
2802 	dev_curr = po->prot_hook.dev;
2803 
2804 	need_rehook = proto_curr != proto || dev_curr != dev;
2805 
2806 	if (need_rehook) {
2807 		unregister_prot_hook(sk, true);
2808 
2809 		po->num = proto;
2810 		po->prot_hook.type = proto;
2811 
2812 		if (po->prot_hook.dev)
2813 			dev_put(po->prot_hook.dev);
2814 
2815 		po->prot_hook.dev = dev;
2816 
2817 		po->ifindex = dev ? dev->ifindex : 0;
2818 		packet_cached_dev_assign(po, dev);
2819 	}
2820 
2821 	if (proto == 0 || !need_rehook)
2822 		goto out_unlock;
2823 
2824 	if (!dev || (dev->flags & IFF_UP)) {
2825 		register_prot_hook(sk);
2826 	} else {
2827 		sk->sk_err = ENETDOWN;
2828 		if (!sock_flag(sk, SOCK_DEAD))
2829 			sk->sk_error_report(sk);
2830 	}
2831 
2832 out_unlock:
2833 	spin_unlock(&po->bind_lock);
2834 	release_sock(sk);
2835 	return 0;
2836 }
2837 
2838 /*
2839  *	Bind a packet socket to a device
2840  */
2841 
2842 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2843 			    int addr_len)
2844 {
2845 	struct sock *sk = sock->sk;
2846 	char name[15];
2847 	struct net_device *dev;
2848 	int err = -ENODEV;
2849 
2850 	/*
2851 	 *	Check legality
2852 	 */
2853 
2854 	if (addr_len != sizeof(struct sockaddr))
2855 		return -EINVAL;
2856 	strlcpy(name, uaddr->sa_data, sizeof(name));
2857 
2858 	dev = dev_get_by_name(sock_net(sk), name);
2859 	if (dev)
2860 		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2861 	return err;
2862 }
2863 
2864 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2865 {
2866 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2867 	struct sock *sk = sock->sk;
2868 	struct net_device *dev = NULL;
2869 	int err;
2870 
2871 
2872 	/*
2873 	 *	Check legality
2874 	 */
2875 
2876 	if (addr_len < sizeof(struct sockaddr_ll))
2877 		return -EINVAL;
2878 	if (sll->sll_family != AF_PACKET)
2879 		return -EINVAL;
2880 
2881 	if (sll->sll_ifindex) {
2882 		err = -ENODEV;
2883 		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2884 		if (dev == NULL)
2885 			goto out;
2886 	}
2887 	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2888 
2889 out:
2890 	return err;
2891 }
2892 
2893 static struct proto packet_proto = {
2894 	.name	  = "PACKET",
2895 	.owner	  = THIS_MODULE,
2896 	.obj_size = sizeof(struct packet_sock),
2897 };
2898 
2899 /*
2900  *	Create a packet of type SOCK_PACKET.
2901  */
2902 
2903 static int packet_create(struct net *net, struct socket *sock, int protocol,
2904 			 int kern)
2905 {
2906 	struct sock *sk;
2907 	struct packet_sock *po;
2908 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
2909 	int err;
2910 
2911 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
2912 		return -EPERM;
2913 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2914 	    sock->type != SOCK_PACKET)
2915 		return -ESOCKTNOSUPPORT;
2916 
2917 	sock->state = SS_UNCONNECTED;
2918 
2919 	err = -ENOBUFS;
2920 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
2921 	if (sk == NULL)
2922 		goto out;
2923 
2924 	sock->ops = &packet_ops;
2925 	if (sock->type == SOCK_PACKET)
2926 		sock->ops = &packet_ops_spkt;
2927 
2928 	sock_init_data(sock, sk);
2929 
2930 	po = pkt_sk(sk);
2931 	sk->sk_family = PF_PACKET;
2932 	po->num = proto;
2933 	po->xmit = dev_queue_xmit;
2934 
2935 	err = packet_alloc_pending(po);
2936 	if (err)
2937 		goto out2;
2938 
2939 	packet_cached_dev_reset(po);
2940 
2941 	sk->sk_destruct = packet_sock_destruct;
2942 	sk_refcnt_debug_inc(sk);
2943 
2944 	/*
2945 	 *	Attach a protocol block
2946 	 */
2947 
2948 	spin_lock_init(&po->bind_lock);
2949 	mutex_init(&po->pg_vec_lock);
2950 	po->rollover = NULL;
2951 	po->prot_hook.func = packet_rcv;
2952 
2953 	if (sock->type == SOCK_PACKET)
2954 		po->prot_hook.func = packet_rcv_spkt;
2955 
2956 	po->prot_hook.af_packet_priv = sk;
2957 
2958 	if (proto) {
2959 		po->prot_hook.type = proto;
2960 		register_prot_hook(sk);
2961 	}
2962 
2963 	mutex_lock(&net->packet.sklist_lock);
2964 	sk_add_node_rcu(sk, &net->packet.sklist);
2965 	mutex_unlock(&net->packet.sklist_lock);
2966 
2967 	preempt_disable();
2968 	sock_prot_inuse_add(net, &packet_proto, 1);
2969 	preempt_enable();
2970 
2971 	return 0;
2972 out2:
2973 	sk_free(sk);
2974 out:
2975 	return err;
2976 }
2977 
2978 /*
2979  *	Pull a packet from our receive queue and hand it to the user.
2980  *	If necessary we block.
2981  */
2982 
2983 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2984 			  int flags)
2985 {
2986 	struct sock *sk = sock->sk;
2987 	struct sk_buff *skb;
2988 	int copied, err;
2989 	int vnet_hdr_len = 0;
2990 	unsigned int origlen = 0;
2991 
2992 	err = -EINVAL;
2993 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2994 		goto out;
2995 
2996 #if 0
2997 	/* What error should we return now? EUNATTACH? */
2998 	if (pkt_sk(sk)->ifindex < 0)
2999 		return -ENODEV;
3000 #endif
3001 
3002 	if (flags & MSG_ERRQUEUE) {
3003 		err = sock_recv_errqueue(sk, msg, len,
3004 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3005 		goto out;
3006 	}
3007 
3008 	/*
3009 	 *	Call the generic datagram receiver. This handles all sorts
3010 	 *	of horrible races and re-entrancy so we can forget about it
3011 	 *	in the protocol layers.
3012 	 *
3013 	 *	Now it will return ENETDOWN, if device have just gone down,
3014 	 *	but then it will block.
3015 	 */
3016 
3017 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3018 
3019 	/*
3020 	 *	An error occurred so return it. Because skb_recv_datagram()
3021 	 *	handles the blocking we don't see and worry about blocking
3022 	 *	retries.
3023 	 */
3024 
3025 	if (skb == NULL)
3026 		goto out;
3027 
3028 	if (pkt_sk(sk)->pressure)
3029 		packet_rcv_has_room(pkt_sk(sk), NULL);
3030 
3031 	if (pkt_sk(sk)->has_vnet_hdr) {
3032 		struct virtio_net_hdr vnet_hdr = { 0 };
3033 
3034 		err = -EINVAL;
3035 		vnet_hdr_len = sizeof(vnet_hdr);
3036 		if (len < vnet_hdr_len)
3037 			goto out_free;
3038 
3039 		len -= vnet_hdr_len;
3040 
3041 		if (skb_is_gso(skb)) {
3042 			struct skb_shared_info *sinfo = skb_shinfo(skb);
3043 
3044 			/* This is a hint as to how much should be linear. */
3045 			vnet_hdr.hdr_len =
3046 				__cpu_to_virtio16(false, skb_headlen(skb));
3047 			vnet_hdr.gso_size =
3048 				__cpu_to_virtio16(false, sinfo->gso_size);
3049 			if (sinfo->gso_type & SKB_GSO_TCPV4)
3050 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3051 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
3052 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
3053 			else if (sinfo->gso_type & SKB_GSO_UDP)
3054 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
3055 			else if (sinfo->gso_type & SKB_GSO_FCOE)
3056 				goto out_free;
3057 			else
3058 				BUG();
3059 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
3060 				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
3061 		} else
3062 			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
3063 
3064 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3065 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3066 			vnet_hdr.csum_start = __cpu_to_virtio16(false,
3067 					  skb_checksum_start_offset(skb));
3068 			vnet_hdr.csum_offset = __cpu_to_virtio16(false,
3069 							 skb->csum_offset);
3070 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3071 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
3072 		} /* else everything is zero */
3073 
3074 		err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
3075 		if (err < 0)
3076 			goto out_free;
3077 	}
3078 
3079 	/* You lose any data beyond the buffer you gave. If it worries
3080 	 * a user program they can ask the device for its MTU
3081 	 * anyway.
3082 	 */
3083 	copied = skb->len;
3084 	if (copied > len) {
3085 		copied = len;
3086 		msg->msg_flags |= MSG_TRUNC;
3087 	}
3088 
3089 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3090 	if (err)
3091 		goto out_free;
3092 
3093 	if (sock->type != SOCK_PACKET) {
3094 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3095 
3096 		/* Original length was stored in sockaddr_ll fields */
3097 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3098 		sll->sll_family = AF_PACKET;
3099 		sll->sll_protocol = skb->protocol;
3100 	}
3101 
3102 	sock_recv_ts_and_drops(msg, sk, skb);
3103 
3104 	if (msg->msg_name) {
3105 		/* If the address length field is there to be filled
3106 		 * in, we fill it in now.
3107 		 */
3108 		if (sock->type == SOCK_PACKET) {
3109 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3110 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3111 		} else {
3112 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3113 
3114 			msg->msg_namelen = sll->sll_halen +
3115 				offsetof(struct sockaddr_ll, sll_addr);
3116 		}
3117 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3118 		       msg->msg_namelen);
3119 	}
3120 
3121 	if (pkt_sk(sk)->auxdata) {
3122 		struct tpacket_auxdata aux;
3123 
3124 		aux.tp_status = TP_STATUS_USER;
3125 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3126 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3127 		else if (skb->pkt_type != PACKET_OUTGOING &&
3128 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3129 			  skb_csum_unnecessary(skb)))
3130 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3131 
3132 		aux.tp_len = origlen;
3133 		aux.tp_snaplen = skb->len;
3134 		aux.tp_mac = 0;
3135 		aux.tp_net = skb_network_offset(skb);
3136 		if (skb_vlan_tag_present(skb)) {
3137 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3138 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3139 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3140 		} else {
3141 			aux.tp_vlan_tci = 0;
3142 			aux.tp_vlan_tpid = 0;
3143 		}
3144 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3145 	}
3146 
3147 	/*
3148 	 *	Free or return the buffer as appropriate. Again this
3149 	 *	hides all the races and re-entrancy issues from us.
3150 	 */
3151 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3152 
3153 out_free:
3154 	skb_free_datagram(sk, skb);
3155 out:
3156 	return err;
3157 }
3158 
3159 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3160 			       int *uaddr_len, int peer)
3161 {
3162 	struct net_device *dev;
3163 	struct sock *sk	= sock->sk;
3164 
3165 	if (peer)
3166 		return -EOPNOTSUPP;
3167 
3168 	uaddr->sa_family = AF_PACKET;
3169 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3170 	rcu_read_lock();
3171 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3172 	if (dev)
3173 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3174 	rcu_read_unlock();
3175 	*uaddr_len = sizeof(*uaddr);
3176 
3177 	return 0;
3178 }
3179 
3180 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3181 			  int *uaddr_len, int peer)
3182 {
3183 	struct net_device *dev;
3184 	struct sock *sk = sock->sk;
3185 	struct packet_sock *po = pkt_sk(sk);
3186 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3187 
3188 	if (peer)
3189 		return -EOPNOTSUPP;
3190 
3191 	sll->sll_family = AF_PACKET;
3192 	sll->sll_ifindex = po->ifindex;
3193 	sll->sll_protocol = po->num;
3194 	sll->sll_pkttype = 0;
3195 	rcu_read_lock();
3196 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3197 	if (dev) {
3198 		sll->sll_hatype = dev->type;
3199 		sll->sll_halen = dev->addr_len;
3200 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3201 	} else {
3202 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3203 		sll->sll_halen = 0;
3204 	}
3205 	rcu_read_unlock();
3206 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3207 
3208 	return 0;
3209 }
3210 
3211 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3212 			 int what)
3213 {
3214 	switch (i->type) {
3215 	case PACKET_MR_MULTICAST:
3216 		if (i->alen != dev->addr_len)
3217 			return -EINVAL;
3218 		if (what > 0)
3219 			return dev_mc_add(dev, i->addr);
3220 		else
3221 			return dev_mc_del(dev, i->addr);
3222 		break;
3223 	case PACKET_MR_PROMISC:
3224 		return dev_set_promiscuity(dev, what);
3225 	case PACKET_MR_ALLMULTI:
3226 		return dev_set_allmulti(dev, what);
3227 	case PACKET_MR_UNICAST:
3228 		if (i->alen != dev->addr_len)
3229 			return -EINVAL;
3230 		if (what > 0)
3231 			return dev_uc_add(dev, i->addr);
3232 		else
3233 			return dev_uc_del(dev, i->addr);
3234 		break;
3235 	default:
3236 		break;
3237 	}
3238 	return 0;
3239 }
3240 
3241 static void packet_dev_mclist_delete(struct net_device *dev,
3242 				     struct packet_mclist **mlp)
3243 {
3244 	struct packet_mclist *ml;
3245 
3246 	while ((ml = *mlp) != NULL) {
3247 		if (ml->ifindex == dev->ifindex) {
3248 			packet_dev_mc(dev, ml, -1);
3249 			*mlp = ml->next;
3250 			kfree(ml);
3251 		} else
3252 			mlp = &ml->next;
3253 	}
3254 }
3255 
3256 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3257 {
3258 	struct packet_sock *po = pkt_sk(sk);
3259 	struct packet_mclist *ml, *i;
3260 	struct net_device *dev;
3261 	int err;
3262 
3263 	rtnl_lock();
3264 
3265 	err = -ENODEV;
3266 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3267 	if (!dev)
3268 		goto done;
3269 
3270 	err = -EINVAL;
3271 	if (mreq->mr_alen > dev->addr_len)
3272 		goto done;
3273 
3274 	err = -ENOBUFS;
3275 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3276 	if (i == NULL)
3277 		goto done;
3278 
3279 	err = 0;
3280 	for (ml = po->mclist; ml; ml = ml->next) {
3281 		if (ml->ifindex == mreq->mr_ifindex &&
3282 		    ml->type == mreq->mr_type &&
3283 		    ml->alen == mreq->mr_alen &&
3284 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3285 			ml->count++;
3286 			/* Free the new element ... */
3287 			kfree(i);
3288 			goto done;
3289 		}
3290 	}
3291 
3292 	i->type = mreq->mr_type;
3293 	i->ifindex = mreq->mr_ifindex;
3294 	i->alen = mreq->mr_alen;
3295 	memcpy(i->addr, mreq->mr_address, i->alen);
3296 	i->count = 1;
3297 	i->next = po->mclist;
3298 	po->mclist = i;
3299 	err = packet_dev_mc(dev, i, 1);
3300 	if (err) {
3301 		po->mclist = i->next;
3302 		kfree(i);
3303 	}
3304 
3305 done:
3306 	rtnl_unlock();
3307 	return err;
3308 }
3309 
3310 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3311 {
3312 	struct packet_mclist *ml, **mlp;
3313 
3314 	rtnl_lock();
3315 
3316 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3317 		if (ml->ifindex == mreq->mr_ifindex &&
3318 		    ml->type == mreq->mr_type &&
3319 		    ml->alen == mreq->mr_alen &&
3320 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3321 			if (--ml->count == 0) {
3322 				struct net_device *dev;
3323 				*mlp = ml->next;
3324 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3325 				if (dev)
3326 					packet_dev_mc(dev, ml, -1);
3327 				kfree(ml);
3328 			}
3329 			break;
3330 		}
3331 	}
3332 	rtnl_unlock();
3333 	return 0;
3334 }
3335 
3336 static void packet_flush_mclist(struct sock *sk)
3337 {
3338 	struct packet_sock *po = pkt_sk(sk);
3339 	struct packet_mclist *ml;
3340 
3341 	if (!po->mclist)
3342 		return;
3343 
3344 	rtnl_lock();
3345 	while ((ml = po->mclist) != NULL) {
3346 		struct net_device *dev;
3347 
3348 		po->mclist = ml->next;
3349 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3350 		if (dev != NULL)
3351 			packet_dev_mc(dev, ml, -1);
3352 		kfree(ml);
3353 	}
3354 	rtnl_unlock();
3355 }
3356 
3357 static int
3358 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3359 {
3360 	struct sock *sk = sock->sk;
3361 	struct packet_sock *po = pkt_sk(sk);
3362 	int ret;
3363 
3364 	if (level != SOL_PACKET)
3365 		return -ENOPROTOOPT;
3366 
3367 	switch (optname) {
3368 	case PACKET_ADD_MEMBERSHIP:
3369 	case PACKET_DROP_MEMBERSHIP:
3370 	{
3371 		struct packet_mreq_max mreq;
3372 		int len = optlen;
3373 		memset(&mreq, 0, sizeof(mreq));
3374 		if (len < sizeof(struct packet_mreq))
3375 			return -EINVAL;
3376 		if (len > sizeof(mreq))
3377 			len = sizeof(mreq);
3378 		if (copy_from_user(&mreq, optval, len))
3379 			return -EFAULT;
3380 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3381 			return -EINVAL;
3382 		if (optname == PACKET_ADD_MEMBERSHIP)
3383 			ret = packet_mc_add(sk, &mreq);
3384 		else
3385 			ret = packet_mc_drop(sk, &mreq);
3386 		return ret;
3387 	}
3388 
3389 	case PACKET_RX_RING:
3390 	case PACKET_TX_RING:
3391 	{
3392 		union tpacket_req_u req_u;
3393 		int len;
3394 
3395 		switch (po->tp_version) {
3396 		case TPACKET_V1:
3397 		case TPACKET_V2:
3398 			len = sizeof(req_u.req);
3399 			break;
3400 		case TPACKET_V3:
3401 		default:
3402 			len = sizeof(req_u.req3);
3403 			break;
3404 		}
3405 		if (optlen < len)
3406 			return -EINVAL;
3407 		if (pkt_sk(sk)->has_vnet_hdr)
3408 			return -EINVAL;
3409 		if (copy_from_user(&req_u.req, optval, len))
3410 			return -EFAULT;
3411 		return packet_set_ring(sk, &req_u, 0,
3412 			optname == PACKET_TX_RING);
3413 	}
3414 	case PACKET_COPY_THRESH:
3415 	{
3416 		int val;
3417 
3418 		if (optlen != sizeof(val))
3419 			return -EINVAL;
3420 		if (copy_from_user(&val, optval, sizeof(val)))
3421 			return -EFAULT;
3422 
3423 		pkt_sk(sk)->copy_thresh = val;
3424 		return 0;
3425 	}
3426 	case PACKET_VERSION:
3427 	{
3428 		int val;
3429 
3430 		if (optlen != sizeof(val))
3431 			return -EINVAL;
3432 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3433 			return -EBUSY;
3434 		if (copy_from_user(&val, optval, sizeof(val)))
3435 			return -EFAULT;
3436 		switch (val) {
3437 		case TPACKET_V1:
3438 		case TPACKET_V2:
3439 		case TPACKET_V3:
3440 			po->tp_version = val;
3441 			return 0;
3442 		default:
3443 			return -EINVAL;
3444 		}
3445 	}
3446 	case PACKET_RESERVE:
3447 	{
3448 		unsigned int val;
3449 
3450 		if (optlen != sizeof(val))
3451 			return -EINVAL;
3452 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3453 			return -EBUSY;
3454 		if (copy_from_user(&val, optval, sizeof(val)))
3455 			return -EFAULT;
3456 		po->tp_reserve = val;
3457 		return 0;
3458 	}
3459 	case PACKET_LOSS:
3460 	{
3461 		unsigned int val;
3462 
3463 		if (optlen != sizeof(val))
3464 			return -EINVAL;
3465 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3466 			return -EBUSY;
3467 		if (copy_from_user(&val, optval, sizeof(val)))
3468 			return -EFAULT;
3469 		po->tp_loss = !!val;
3470 		return 0;
3471 	}
3472 	case PACKET_AUXDATA:
3473 	{
3474 		int val;
3475 
3476 		if (optlen < sizeof(val))
3477 			return -EINVAL;
3478 		if (copy_from_user(&val, optval, sizeof(val)))
3479 			return -EFAULT;
3480 
3481 		po->auxdata = !!val;
3482 		return 0;
3483 	}
3484 	case PACKET_ORIGDEV:
3485 	{
3486 		int val;
3487 
3488 		if (optlen < sizeof(val))
3489 			return -EINVAL;
3490 		if (copy_from_user(&val, optval, sizeof(val)))
3491 			return -EFAULT;
3492 
3493 		po->origdev = !!val;
3494 		return 0;
3495 	}
3496 	case PACKET_VNET_HDR:
3497 	{
3498 		int val;
3499 
3500 		if (sock->type != SOCK_RAW)
3501 			return -EINVAL;
3502 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3503 			return -EBUSY;
3504 		if (optlen < sizeof(val))
3505 			return -EINVAL;
3506 		if (copy_from_user(&val, optval, sizeof(val)))
3507 			return -EFAULT;
3508 
3509 		po->has_vnet_hdr = !!val;
3510 		return 0;
3511 	}
3512 	case PACKET_TIMESTAMP:
3513 	{
3514 		int val;
3515 
3516 		if (optlen != sizeof(val))
3517 			return -EINVAL;
3518 		if (copy_from_user(&val, optval, sizeof(val)))
3519 			return -EFAULT;
3520 
3521 		po->tp_tstamp = val;
3522 		return 0;
3523 	}
3524 	case PACKET_FANOUT:
3525 	{
3526 		int val;
3527 
3528 		if (optlen != sizeof(val))
3529 			return -EINVAL;
3530 		if (copy_from_user(&val, optval, sizeof(val)))
3531 			return -EFAULT;
3532 
3533 		return fanout_add(sk, val & 0xffff, val >> 16);
3534 	}
3535 	case PACKET_TX_HAS_OFF:
3536 	{
3537 		unsigned int val;
3538 
3539 		if (optlen != sizeof(val))
3540 			return -EINVAL;
3541 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3542 			return -EBUSY;
3543 		if (copy_from_user(&val, optval, sizeof(val)))
3544 			return -EFAULT;
3545 		po->tp_tx_has_off = !!val;
3546 		return 0;
3547 	}
3548 	case PACKET_QDISC_BYPASS:
3549 	{
3550 		int val;
3551 
3552 		if (optlen != sizeof(val))
3553 			return -EINVAL;
3554 		if (copy_from_user(&val, optval, sizeof(val)))
3555 			return -EFAULT;
3556 
3557 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3558 		return 0;
3559 	}
3560 	default:
3561 		return -ENOPROTOOPT;
3562 	}
3563 }
3564 
3565 static int packet_getsockopt(struct socket *sock, int level, int optname,
3566 			     char __user *optval, int __user *optlen)
3567 {
3568 	int len;
3569 	int val, lv = sizeof(val);
3570 	struct sock *sk = sock->sk;
3571 	struct packet_sock *po = pkt_sk(sk);
3572 	void *data = &val;
3573 	union tpacket_stats_u st;
3574 	struct tpacket_rollover_stats rstats;
3575 
3576 	if (level != SOL_PACKET)
3577 		return -ENOPROTOOPT;
3578 
3579 	if (get_user(len, optlen))
3580 		return -EFAULT;
3581 
3582 	if (len < 0)
3583 		return -EINVAL;
3584 
3585 	switch (optname) {
3586 	case PACKET_STATISTICS:
3587 		spin_lock_bh(&sk->sk_receive_queue.lock);
3588 		memcpy(&st, &po->stats, sizeof(st));
3589 		memset(&po->stats, 0, sizeof(po->stats));
3590 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3591 
3592 		if (po->tp_version == TPACKET_V3) {
3593 			lv = sizeof(struct tpacket_stats_v3);
3594 			st.stats3.tp_packets += st.stats3.tp_drops;
3595 			data = &st.stats3;
3596 		} else {
3597 			lv = sizeof(struct tpacket_stats);
3598 			st.stats1.tp_packets += st.stats1.tp_drops;
3599 			data = &st.stats1;
3600 		}
3601 
3602 		break;
3603 	case PACKET_AUXDATA:
3604 		val = po->auxdata;
3605 		break;
3606 	case PACKET_ORIGDEV:
3607 		val = po->origdev;
3608 		break;
3609 	case PACKET_VNET_HDR:
3610 		val = po->has_vnet_hdr;
3611 		break;
3612 	case PACKET_VERSION:
3613 		val = po->tp_version;
3614 		break;
3615 	case PACKET_HDRLEN:
3616 		if (len > sizeof(int))
3617 			len = sizeof(int);
3618 		if (copy_from_user(&val, optval, len))
3619 			return -EFAULT;
3620 		switch (val) {
3621 		case TPACKET_V1:
3622 			val = sizeof(struct tpacket_hdr);
3623 			break;
3624 		case TPACKET_V2:
3625 			val = sizeof(struct tpacket2_hdr);
3626 			break;
3627 		case TPACKET_V3:
3628 			val = sizeof(struct tpacket3_hdr);
3629 			break;
3630 		default:
3631 			return -EINVAL;
3632 		}
3633 		break;
3634 	case PACKET_RESERVE:
3635 		val = po->tp_reserve;
3636 		break;
3637 	case PACKET_LOSS:
3638 		val = po->tp_loss;
3639 		break;
3640 	case PACKET_TIMESTAMP:
3641 		val = po->tp_tstamp;
3642 		break;
3643 	case PACKET_FANOUT:
3644 		val = (po->fanout ?
3645 		       ((u32)po->fanout->id |
3646 			((u32)po->fanout->type << 16) |
3647 			((u32)po->fanout->flags << 24)) :
3648 		       0);
3649 		break;
3650 	case PACKET_ROLLOVER_STATS:
3651 		if (!po->rollover)
3652 			return -EINVAL;
3653 		rstats.tp_all = atomic_long_read(&po->rollover->num);
3654 		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3655 		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3656 		data = &rstats;
3657 		lv = sizeof(rstats);
3658 		break;
3659 	case PACKET_TX_HAS_OFF:
3660 		val = po->tp_tx_has_off;
3661 		break;
3662 	case PACKET_QDISC_BYPASS:
3663 		val = packet_use_direct_xmit(po);
3664 		break;
3665 	default:
3666 		return -ENOPROTOOPT;
3667 	}
3668 
3669 	if (len > lv)
3670 		len = lv;
3671 	if (put_user(len, optlen))
3672 		return -EFAULT;
3673 	if (copy_to_user(optval, data, len))
3674 		return -EFAULT;
3675 	return 0;
3676 }
3677 
3678 
3679 static int packet_notifier(struct notifier_block *this,
3680 			   unsigned long msg, void *ptr)
3681 {
3682 	struct sock *sk;
3683 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3684 	struct net *net = dev_net(dev);
3685 
3686 	rcu_read_lock();
3687 	sk_for_each_rcu(sk, &net->packet.sklist) {
3688 		struct packet_sock *po = pkt_sk(sk);
3689 
3690 		switch (msg) {
3691 		case NETDEV_UNREGISTER:
3692 			if (po->mclist)
3693 				packet_dev_mclist_delete(dev, &po->mclist);
3694 			/* fallthrough */
3695 
3696 		case NETDEV_DOWN:
3697 			if (dev->ifindex == po->ifindex) {
3698 				spin_lock(&po->bind_lock);
3699 				if (po->running) {
3700 					__unregister_prot_hook(sk, false);
3701 					sk->sk_err = ENETDOWN;
3702 					if (!sock_flag(sk, SOCK_DEAD))
3703 						sk->sk_error_report(sk);
3704 				}
3705 				if (msg == NETDEV_UNREGISTER) {
3706 					packet_cached_dev_reset(po);
3707 					po->ifindex = -1;
3708 					if (po->prot_hook.dev)
3709 						dev_put(po->prot_hook.dev);
3710 					po->prot_hook.dev = NULL;
3711 				}
3712 				spin_unlock(&po->bind_lock);
3713 			}
3714 			break;
3715 		case NETDEV_UP:
3716 			if (dev->ifindex == po->ifindex) {
3717 				spin_lock(&po->bind_lock);
3718 				if (po->num)
3719 					register_prot_hook(sk);
3720 				spin_unlock(&po->bind_lock);
3721 			}
3722 			break;
3723 		}
3724 	}
3725 	rcu_read_unlock();
3726 	return NOTIFY_DONE;
3727 }
3728 
3729 
3730 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3731 			unsigned long arg)
3732 {
3733 	struct sock *sk = sock->sk;
3734 
3735 	switch (cmd) {
3736 	case SIOCOUTQ:
3737 	{
3738 		int amount = sk_wmem_alloc_get(sk);
3739 
3740 		return put_user(amount, (int __user *)arg);
3741 	}
3742 	case SIOCINQ:
3743 	{
3744 		struct sk_buff *skb;
3745 		int amount = 0;
3746 
3747 		spin_lock_bh(&sk->sk_receive_queue.lock);
3748 		skb = skb_peek(&sk->sk_receive_queue);
3749 		if (skb)
3750 			amount = skb->len;
3751 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3752 		return put_user(amount, (int __user *)arg);
3753 	}
3754 	case SIOCGSTAMP:
3755 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3756 	case SIOCGSTAMPNS:
3757 		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3758 
3759 #ifdef CONFIG_INET
3760 	case SIOCADDRT:
3761 	case SIOCDELRT:
3762 	case SIOCDARP:
3763 	case SIOCGARP:
3764 	case SIOCSARP:
3765 	case SIOCGIFADDR:
3766 	case SIOCSIFADDR:
3767 	case SIOCGIFBRDADDR:
3768 	case SIOCSIFBRDADDR:
3769 	case SIOCGIFNETMASK:
3770 	case SIOCSIFNETMASK:
3771 	case SIOCGIFDSTADDR:
3772 	case SIOCSIFDSTADDR:
3773 	case SIOCSIFFLAGS:
3774 		return inet_dgram_ops.ioctl(sock, cmd, arg);
3775 #endif
3776 
3777 	default:
3778 		return -ENOIOCTLCMD;
3779 	}
3780 	return 0;
3781 }
3782 
3783 static unsigned int packet_poll(struct file *file, struct socket *sock,
3784 				poll_table *wait)
3785 {
3786 	struct sock *sk = sock->sk;
3787 	struct packet_sock *po = pkt_sk(sk);
3788 	unsigned int mask = datagram_poll(file, sock, wait);
3789 
3790 	spin_lock_bh(&sk->sk_receive_queue.lock);
3791 	if (po->rx_ring.pg_vec) {
3792 		if (!packet_previous_rx_frame(po, &po->rx_ring,
3793 			TP_STATUS_KERNEL))
3794 			mask |= POLLIN | POLLRDNORM;
3795 	}
3796 	if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
3797 		po->pressure = 0;
3798 	spin_unlock_bh(&sk->sk_receive_queue.lock);
3799 	spin_lock_bh(&sk->sk_write_queue.lock);
3800 	if (po->tx_ring.pg_vec) {
3801 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3802 			mask |= POLLOUT | POLLWRNORM;
3803 	}
3804 	spin_unlock_bh(&sk->sk_write_queue.lock);
3805 	return mask;
3806 }
3807 
3808 
3809 /* Dirty? Well, I still did not learn better way to account
3810  * for user mmaps.
3811  */
3812 
3813 static void packet_mm_open(struct vm_area_struct *vma)
3814 {
3815 	struct file *file = vma->vm_file;
3816 	struct socket *sock = file->private_data;
3817 	struct sock *sk = sock->sk;
3818 
3819 	if (sk)
3820 		atomic_inc(&pkt_sk(sk)->mapped);
3821 }
3822 
3823 static void packet_mm_close(struct vm_area_struct *vma)
3824 {
3825 	struct file *file = vma->vm_file;
3826 	struct socket *sock = file->private_data;
3827 	struct sock *sk = sock->sk;
3828 
3829 	if (sk)
3830 		atomic_dec(&pkt_sk(sk)->mapped);
3831 }
3832 
3833 static const struct vm_operations_struct packet_mmap_ops = {
3834 	.open	=	packet_mm_open,
3835 	.close	=	packet_mm_close,
3836 };
3837 
3838 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3839 			unsigned int len)
3840 {
3841 	int i;
3842 
3843 	for (i = 0; i < len; i++) {
3844 		if (likely(pg_vec[i].buffer)) {
3845 			if (is_vmalloc_addr(pg_vec[i].buffer))
3846 				vfree(pg_vec[i].buffer);
3847 			else
3848 				free_pages((unsigned long)pg_vec[i].buffer,
3849 					   order);
3850 			pg_vec[i].buffer = NULL;
3851 		}
3852 	}
3853 	kfree(pg_vec);
3854 }
3855 
3856 static char *alloc_one_pg_vec_page(unsigned long order)
3857 {
3858 	char *buffer;
3859 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3860 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3861 
3862 	buffer = (char *) __get_free_pages(gfp_flags, order);
3863 	if (buffer)
3864 		return buffer;
3865 
3866 	/* __get_free_pages failed, fall back to vmalloc */
3867 	buffer = vzalloc((1 << order) * PAGE_SIZE);
3868 	if (buffer)
3869 		return buffer;
3870 
3871 	/* vmalloc failed, lets dig into swap here */
3872 	gfp_flags &= ~__GFP_NORETRY;
3873 	buffer = (char *) __get_free_pages(gfp_flags, order);
3874 	if (buffer)
3875 		return buffer;
3876 
3877 	/* complete and utter failure */
3878 	return NULL;
3879 }
3880 
3881 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3882 {
3883 	unsigned int block_nr = req->tp_block_nr;
3884 	struct pgv *pg_vec;
3885 	int i;
3886 
3887 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3888 	if (unlikely(!pg_vec))
3889 		goto out;
3890 
3891 	for (i = 0; i < block_nr; i++) {
3892 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3893 		if (unlikely(!pg_vec[i].buffer))
3894 			goto out_free_pgvec;
3895 	}
3896 
3897 out:
3898 	return pg_vec;
3899 
3900 out_free_pgvec:
3901 	free_pg_vec(pg_vec, order, block_nr);
3902 	pg_vec = NULL;
3903 	goto out;
3904 }
3905 
3906 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3907 		int closing, int tx_ring)
3908 {
3909 	struct pgv *pg_vec = NULL;
3910 	struct packet_sock *po = pkt_sk(sk);
3911 	int was_running, order = 0;
3912 	struct packet_ring_buffer *rb;
3913 	struct sk_buff_head *rb_queue;
3914 	__be16 num;
3915 	int err = -EINVAL;
3916 	/* Added to avoid minimal code churn */
3917 	struct tpacket_req *req = &req_u->req;
3918 
3919 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3920 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3921 		WARN(1, "Tx-ring is not supported.\n");
3922 		goto out;
3923 	}
3924 
3925 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3926 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3927 
3928 	err = -EBUSY;
3929 	if (!closing) {
3930 		if (atomic_read(&po->mapped))
3931 			goto out;
3932 		if (packet_read_pending(rb))
3933 			goto out;
3934 	}
3935 
3936 	if (req->tp_block_nr) {
3937 		/* Sanity tests and some calculations */
3938 		err = -EBUSY;
3939 		if (unlikely(rb->pg_vec))
3940 			goto out;
3941 
3942 		switch (po->tp_version) {
3943 		case TPACKET_V1:
3944 			po->tp_hdrlen = TPACKET_HDRLEN;
3945 			break;
3946 		case TPACKET_V2:
3947 			po->tp_hdrlen = TPACKET2_HDRLEN;
3948 			break;
3949 		case TPACKET_V3:
3950 			po->tp_hdrlen = TPACKET3_HDRLEN;
3951 			break;
3952 		}
3953 
3954 		err = -EINVAL;
3955 		if (unlikely((int)req->tp_block_size <= 0))
3956 			goto out;
3957 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3958 			goto out;
3959 		if (po->tp_version >= TPACKET_V3 &&
3960 		    (int)(req->tp_block_size -
3961 			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3962 			goto out;
3963 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3964 					po->tp_reserve))
3965 			goto out;
3966 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3967 			goto out;
3968 
3969 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3970 		if (unlikely(rb->frames_per_block <= 0))
3971 			goto out;
3972 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3973 					req->tp_frame_nr))
3974 			goto out;
3975 
3976 		err = -ENOMEM;
3977 		order = get_order(req->tp_block_size);
3978 		pg_vec = alloc_pg_vec(req, order);
3979 		if (unlikely(!pg_vec))
3980 			goto out;
3981 		switch (po->tp_version) {
3982 		case TPACKET_V3:
3983 		/* Transmit path is not supported. We checked
3984 		 * it above but just being paranoid
3985 		 */
3986 			if (!tx_ring)
3987 				init_prb_bdqc(po, rb, pg_vec, req_u);
3988 			break;
3989 		default:
3990 			break;
3991 		}
3992 	}
3993 	/* Done */
3994 	else {
3995 		err = -EINVAL;
3996 		if (unlikely(req->tp_frame_nr))
3997 			goto out;
3998 	}
3999 
4000 	lock_sock(sk);
4001 
4002 	/* Detach socket from network */
4003 	spin_lock(&po->bind_lock);
4004 	was_running = po->running;
4005 	num = po->num;
4006 	if (was_running) {
4007 		po->num = 0;
4008 		__unregister_prot_hook(sk, false);
4009 	}
4010 	spin_unlock(&po->bind_lock);
4011 
4012 	synchronize_net();
4013 
4014 	err = -EBUSY;
4015 	mutex_lock(&po->pg_vec_lock);
4016 	if (closing || atomic_read(&po->mapped) == 0) {
4017 		err = 0;
4018 		spin_lock_bh(&rb_queue->lock);
4019 		swap(rb->pg_vec, pg_vec);
4020 		rb->frame_max = (req->tp_frame_nr - 1);
4021 		rb->head = 0;
4022 		rb->frame_size = req->tp_frame_size;
4023 		spin_unlock_bh(&rb_queue->lock);
4024 
4025 		swap(rb->pg_vec_order, order);
4026 		swap(rb->pg_vec_len, req->tp_block_nr);
4027 
4028 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4029 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4030 						tpacket_rcv : packet_rcv;
4031 		skb_queue_purge(rb_queue);
4032 		if (atomic_read(&po->mapped))
4033 			pr_err("packet_mmap: vma is busy: %d\n",
4034 			       atomic_read(&po->mapped));
4035 	}
4036 	mutex_unlock(&po->pg_vec_lock);
4037 
4038 	spin_lock(&po->bind_lock);
4039 	if (was_running) {
4040 		po->num = num;
4041 		register_prot_hook(sk);
4042 	}
4043 	spin_unlock(&po->bind_lock);
4044 	if (closing && (po->tp_version > TPACKET_V2)) {
4045 		/* Because we don't support block-based V3 on tx-ring */
4046 		if (!tx_ring)
4047 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
4048 	}
4049 	release_sock(sk);
4050 
4051 	if (pg_vec)
4052 		free_pg_vec(pg_vec, order, req->tp_block_nr);
4053 out:
4054 	return err;
4055 }
4056 
4057 static int packet_mmap(struct file *file, struct socket *sock,
4058 		struct vm_area_struct *vma)
4059 {
4060 	struct sock *sk = sock->sk;
4061 	struct packet_sock *po = pkt_sk(sk);
4062 	unsigned long size, expected_size;
4063 	struct packet_ring_buffer *rb;
4064 	unsigned long start;
4065 	int err = -EINVAL;
4066 	int i;
4067 
4068 	if (vma->vm_pgoff)
4069 		return -EINVAL;
4070 
4071 	mutex_lock(&po->pg_vec_lock);
4072 
4073 	expected_size = 0;
4074 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4075 		if (rb->pg_vec) {
4076 			expected_size += rb->pg_vec_len
4077 						* rb->pg_vec_pages
4078 						* PAGE_SIZE;
4079 		}
4080 	}
4081 
4082 	if (expected_size == 0)
4083 		goto out;
4084 
4085 	size = vma->vm_end - vma->vm_start;
4086 	if (size != expected_size)
4087 		goto out;
4088 
4089 	start = vma->vm_start;
4090 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4091 		if (rb->pg_vec == NULL)
4092 			continue;
4093 
4094 		for (i = 0; i < rb->pg_vec_len; i++) {
4095 			struct page *page;
4096 			void *kaddr = rb->pg_vec[i].buffer;
4097 			int pg_num;
4098 
4099 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4100 				page = pgv_to_page(kaddr);
4101 				err = vm_insert_page(vma, start, page);
4102 				if (unlikely(err))
4103 					goto out;
4104 				start += PAGE_SIZE;
4105 				kaddr += PAGE_SIZE;
4106 			}
4107 		}
4108 	}
4109 
4110 	atomic_inc(&po->mapped);
4111 	vma->vm_ops = &packet_mmap_ops;
4112 	err = 0;
4113 
4114 out:
4115 	mutex_unlock(&po->pg_vec_lock);
4116 	return err;
4117 }
4118 
4119 static const struct proto_ops packet_ops_spkt = {
4120 	.family =	PF_PACKET,
4121 	.owner =	THIS_MODULE,
4122 	.release =	packet_release,
4123 	.bind =		packet_bind_spkt,
4124 	.connect =	sock_no_connect,
4125 	.socketpair =	sock_no_socketpair,
4126 	.accept =	sock_no_accept,
4127 	.getname =	packet_getname_spkt,
4128 	.poll =		datagram_poll,
4129 	.ioctl =	packet_ioctl,
4130 	.listen =	sock_no_listen,
4131 	.shutdown =	sock_no_shutdown,
4132 	.setsockopt =	sock_no_setsockopt,
4133 	.getsockopt =	sock_no_getsockopt,
4134 	.sendmsg =	packet_sendmsg_spkt,
4135 	.recvmsg =	packet_recvmsg,
4136 	.mmap =		sock_no_mmap,
4137 	.sendpage =	sock_no_sendpage,
4138 };
4139 
4140 static const struct proto_ops packet_ops = {
4141 	.family =	PF_PACKET,
4142 	.owner =	THIS_MODULE,
4143 	.release =	packet_release,
4144 	.bind =		packet_bind,
4145 	.connect =	sock_no_connect,
4146 	.socketpair =	sock_no_socketpair,
4147 	.accept =	sock_no_accept,
4148 	.getname =	packet_getname,
4149 	.poll =		packet_poll,
4150 	.ioctl =	packet_ioctl,
4151 	.listen =	sock_no_listen,
4152 	.shutdown =	sock_no_shutdown,
4153 	.setsockopt =	packet_setsockopt,
4154 	.getsockopt =	packet_getsockopt,
4155 	.sendmsg =	packet_sendmsg,
4156 	.recvmsg =	packet_recvmsg,
4157 	.mmap =		packet_mmap,
4158 	.sendpage =	sock_no_sendpage,
4159 };
4160 
4161 static const struct net_proto_family packet_family_ops = {
4162 	.family =	PF_PACKET,
4163 	.create =	packet_create,
4164 	.owner	=	THIS_MODULE,
4165 };
4166 
4167 static struct notifier_block packet_netdev_notifier = {
4168 	.notifier_call =	packet_notifier,
4169 };
4170 
4171 #ifdef CONFIG_PROC_FS
4172 
4173 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4174 	__acquires(RCU)
4175 {
4176 	struct net *net = seq_file_net(seq);
4177 
4178 	rcu_read_lock();
4179 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4180 }
4181 
4182 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4183 {
4184 	struct net *net = seq_file_net(seq);
4185 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4186 }
4187 
4188 static void packet_seq_stop(struct seq_file *seq, void *v)
4189 	__releases(RCU)
4190 {
4191 	rcu_read_unlock();
4192 }
4193 
4194 static int packet_seq_show(struct seq_file *seq, void *v)
4195 {
4196 	if (v == SEQ_START_TOKEN)
4197 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4198 	else {
4199 		struct sock *s = sk_entry(v);
4200 		const struct packet_sock *po = pkt_sk(s);
4201 
4202 		seq_printf(seq,
4203 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4204 			   s,
4205 			   atomic_read(&s->sk_refcnt),
4206 			   s->sk_type,
4207 			   ntohs(po->num),
4208 			   po->ifindex,
4209 			   po->running,
4210 			   atomic_read(&s->sk_rmem_alloc),
4211 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4212 			   sock_i_ino(s));
4213 	}
4214 
4215 	return 0;
4216 }
4217 
4218 static const struct seq_operations packet_seq_ops = {
4219 	.start	= packet_seq_start,
4220 	.next	= packet_seq_next,
4221 	.stop	= packet_seq_stop,
4222 	.show	= packet_seq_show,
4223 };
4224 
4225 static int packet_seq_open(struct inode *inode, struct file *file)
4226 {
4227 	return seq_open_net(inode, file, &packet_seq_ops,
4228 			    sizeof(struct seq_net_private));
4229 }
4230 
4231 static const struct file_operations packet_seq_fops = {
4232 	.owner		= THIS_MODULE,
4233 	.open		= packet_seq_open,
4234 	.read		= seq_read,
4235 	.llseek		= seq_lseek,
4236 	.release	= seq_release_net,
4237 };
4238 
4239 #endif
4240 
4241 static int __net_init packet_net_init(struct net *net)
4242 {
4243 	mutex_init(&net->packet.sklist_lock);
4244 	INIT_HLIST_HEAD(&net->packet.sklist);
4245 
4246 	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4247 		return -ENOMEM;
4248 
4249 	return 0;
4250 }
4251 
4252 static void __net_exit packet_net_exit(struct net *net)
4253 {
4254 	remove_proc_entry("packet", net->proc_net);
4255 }
4256 
4257 static struct pernet_operations packet_net_ops = {
4258 	.init = packet_net_init,
4259 	.exit = packet_net_exit,
4260 };
4261 
4262 
4263 static void __exit packet_exit(void)
4264 {
4265 	unregister_netdevice_notifier(&packet_netdev_notifier);
4266 	unregister_pernet_subsys(&packet_net_ops);
4267 	sock_unregister(PF_PACKET);
4268 	proto_unregister(&packet_proto);
4269 }
4270 
4271 static int __init packet_init(void)
4272 {
4273 	int rc = proto_register(&packet_proto, 0);
4274 
4275 	if (rc != 0)
4276 		goto out;
4277 
4278 	sock_register(&packet_family_ops);
4279 	register_pernet_subsys(&packet_net_ops);
4280 	register_netdevice_notifier(&packet_netdev_notifier);
4281 out:
4282 	return rc;
4283 }
4284 
4285 module_init(packet_init);
4286 module_exit(packet_exit);
4287 MODULE_LICENSE("GPL");
4288 MODULE_ALIAS_NETPROTO(PF_PACKET);
4289