xref: /openbmc/linux/net/packet/af_packet.c (revision d7a3d85e)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PACKET - implements raw packet sockets.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *		Alan Cox	:	verify_area() now used correctly
14  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
15  *		Alan Cox	:	tidied skbuff lists.
16  *		Alan Cox	:	Now uses generic datagram routines I
17  *					added. Also fixed the peek/read crash
18  *					from all old Linux datagram code.
19  *		Alan Cox	:	Uses the improved datagram code.
20  *		Alan Cox	:	Added NULL's for socket options.
21  *		Alan Cox	:	Re-commented the code.
22  *		Alan Cox	:	Use new kernel side addressing
23  *		Rob Janssen	:	Correct MTU usage.
24  *		Dave Platt	:	Counter leaks caused by incorrect
25  *					interrupt locking and some slightly
26  *					dubious gcc output. Can you read
27  *					compiler: it said _VOLATILE_
28  *	Richard Kooijman	:	Timestamp fixes.
29  *		Alan Cox	:	New buffers. Use sk->mac.raw.
30  *		Alan Cox	:	sendmsg/recvmsg support.
31  *		Alan Cox	:	Protocol setting support
32  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
33  *	Cyrus Durgin		:	Fixed kerneld for kmod.
34  *	Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
38  *					The convention is that longer addresses
39  *					will simply extend the hardware address
40  *					byte arrays at the end of sockaddr_ll
41  *					and packet_mreq.
42  *		Johann Baudy	:	Added TX RING.
43  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
44  *					layer.
45  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *		This program is free software; you can redistribute it and/or
49  *		modify it under the terms of the GNU General Public License
50  *		as published by the Free Software Foundation; either version
51  *		2 of the License, or (at your option) any later version.
52  *
53  */
54 
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 
96 #include "internal.h"
97 
98 /*
99    Assumptions:
100    - if device has no dev->hard_header routine, it adds and removes ll header
101      inside itself. In this case ll header is invisible outside of device,
102      but higher levels still should reserve dev->hard_header_len.
103      Some devices are enough clever to reallocate skb, when header
104      will not fit to reserved space (tunnel), another ones are silly
105      (PPP).
106    - packet socket receives packets with pulled ll header,
107      so that SOCK_RAW should push it back.
108 
109 On receive:
110 -----------
111 
112 Incoming, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> data
115 
116 Outgoing, dev->hard_header!=NULL
117    mac_header -> ll header
118    data       -> ll header
119 
120 Incoming, dev->hard_header==NULL
121    mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 		 header.  PPP makes it, that is wrong, because introduce
123 		 assymetry between rx and tx paths.
124    data       -> data
125 
126 Outgoing, dev->hard_header==NULL
127    mac_header -> data. ll header is still not built!
128    data       -> data
129 
130 Resume
131   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 
133 
134 On transmit:
135 ------------
136 
137 dev->hard_header != NULL
138    mac_header -> ll header
139    data       -> ll header
140 
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
142    mac_header -> data
143    data       -> data
144 
145    We should set nh.raw on output to correct posistion,
146    packet classifier depends on it.
147  */
148 
149 /* Private packet socket structures. */
150 
151 /* identical to struct packet_mreq except it has
152  * a longer address field.
153  */
154 struct packet_mreq_max {
155 	int		mr_ifindex;
156 	unsigned short	mr_type;
157 	unsigned short	mr_alen;
158 	unsigned char	mr_address[MAX_ADDR_LEN];
159 };
160 
161 union tpacket_uhdr {
162 	struct tpacket_hdr  *h1;
163 	struct tpacket2_hdr *h2;
164 	struct tpacket3_hdr *h3;
165 	void *raw;
166 };
167 
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 		int closing, int tx_ring);
170 
171 #define V3_ALIGNMENT	(8)
172 
173 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174 
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177 
178 #define PGV_FROM_VMALLOC 1
179 
180 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
186 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187 
188 struct packet_sock;
189 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
190 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 		       struct packet_type *pt, struct net_device *orig_dev);
192 
193 static void *packet_previous_frame(struct packet_sock *po,
194 		struct packet_ring_buffer *rb,
195 		int status);
196 static void packet_increment_head(struct packet_ring_buffer *buff);
197 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 			struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 			struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 		struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 		struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(unsigned long);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_init_blk_timer(struct packet_sock *,
209 		struct tpacket_kbdq_core *,
210 		void (*func) (unsigned long));
211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 		struct tpacket3_hdr *);
214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 		struct tpacket3_hdr *);
216 static void packet_flush_mclist(struct sock *sk);
217 
218 struct packet_skb_cb {
219 	union {
220 		struct sockaddr_pkt pkt;
221 		union {
222 			/* Trick: alias skb original length with
223 			 * ll.sll_family and ll.protocol in order
224 			 * to save room.
225 			 */
226 			unsigned int origlen;
227 			struct sockaddr_ll ll;
228 		};
229 	} sa;
230 };
231 
232 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
233 
234 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid)	\
236 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
238 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 	((x)->kactive_blk_num+1) : 0)
242 
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245 
246 static int packet_direct_xmit(struct sk_buff *skb)
247 {
248 	struct net_device *dev = skb->dev;
249 	netdev_features_t features;
250 	struct netdev_queue *txq;
251 	int ret = NETDEV_TX_BUSY;
252 
253 	if (unlikely(!netif_running(dev) ||
254 		     !netif_carrier_ok(dev)))
255 		goto drop;
256 
257 	features = netif_skb_features(skb);
258 	if (skb_needs_linearize(skb, features) &&
259 	    __skb_linearize(skb))
260 		goto drop;
261 
262 	txq = skb_get_tx_queue(dev, skb);
263 
264 	local_bh_disable();
265 
266 	HARD_TX_LOCK(dev, txq, smp_processor_id());
267 	if (!netif_xmit_frozen_or_drv_stopped(txq))
268 		ret = netdev_start_xmit(skb, dev, txq, false);
269 	HARD_TX_UNLOCK(dev, txq);
270 
271 	local_bh_enable();
272 
273 	if (!dev_xmit_complete(ret))
274 		kfree_skb(skb);
275 
276 	return ret;
277 drop:
278 	atomic_long_inc(&dev->tx_dropped);
279 	kfree_skb(skb);
280 	return NET_XMIT_DROP;
281 }
282 
283 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284 {
285 	struct net_device *dev;
286 
287 	rcu_read_lock();
288 	dev = rcu_dereference(po->cached_dev);
289 	if (likely(dev))
290 		dev_hold(dev);
291 	rcu_read_unlock();
292 
293 	return dev;
294 }
295 
296 static void packet_cached_dev_assign(struct packet_sock *po,
297 				     struct net_device *dev)
298 {
299 	rcu_assign_pointer(po->cached_dev, dev);
300 }
301 
302 static void packet_cached_dev_reset(struct packet_sock *po)
303 {
304 	RCU_INIT_POINTER(po->cached_dev, NULL);
305 }
306 
307 static bool packet_use_direct_xmit(const struct packet_sock *po)
308 {
309 	return po->xmit == packet_direct_xmit;
310 }
311 
312 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
313 {
314 	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
315 }
316 
317 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318 {
319 	const struct net_device_ops *ops = dev->netdev_ops;
320 	u16 queue_index;
321 
322 	if (ops->ndo_select_queue) {
323 		queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 						    __packet_pick_tx_queue);
325 		queue_index = netdev_cap_txqueue(dev, queue_index);
326 	} else {
327 		queue_index = __packet_pick_tx_queue(dev, skb);
328 	}
329 
330 	skb_set_queue_mapping(skb, queue_index);
331 }
332 
333 /* register_prot_hook must be invoked with the po->bind_lock held,
334  * or from a context in which asynchronous accesses to the packet
335  * socket is not possible (packet_create()).
336  */
337 static void register_prot_hook(struct sock *sk)
338 {
339 	struct packet_sock *po = pkt_sk(sk);
340 
341 	if (!po->running) {
342 		if (po->fanout)
343 			__fanout_link(sk, po);
344 		else
345 			dev_add_pack(&po->prot_hook);
346 
347 		sock_hold(sk);
348 		po->running = 1;
349 	}
350 }
351 
352 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353  * held.   If the sync parameter is true, we will temporarily drop
354  * the po->bind_lock and do a synchronize_net to make sure no
355  * asynchronous packet processing paths still refer to the elements
356  * of po->prot_hook.  If the sync parameter is false, it is the
357  * callers responsibility to take care of this.
358  */
359 static void __unregister_prot_hook(struct sock *sk, bool sync)
360 {
361 	struct packet_sock *po = pkt_sk(sk);
362 
363 	po->running = 0;
364 
365 	if (po->fanout)
366 		__fanout_unlink(sk, po);
367 	else
368 		__dev_remove_pack(&po->prot_hook);
369 
370 	__sock_put(sk);
371 
372 	if (sync) {
373 		spin_unlock(&po->bind_lock);
374 		synchronize_net();
375 		spin_lock(&po->bind_lock);
376 	}
377 }
378 
379 static void unregister_prot_hook(struct sock *sk, bool sync)
380 {
381 	struct packet_sock *po = pkt_sk(sk);
382 
383 	if (po->running)
384 		__unregister_prot_hook(sk, sync);
385 }
386 
387 static inline struct page * __pure pgv_to_page(void *addr)
388 {
389 	if (is_vmalloc_addr(addr))
390 		return vmalloc_to_page(addr);
391 	return virt_to_page(addr);
392 }
393 
394 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
395 {
396 	union tpacket_uhdr h;
397 
398 	h.raw = frame;
399 	switch (po->tp_version) {
400 	case TPACKET_V1:
401 		h.h1->tp_status = status;
402 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
403 		break;
404 	case TPACKET_V2:
405 		h.h2->tp_status = status;
406 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
407 		break;
408 	case TPACKET_V3:
409 	default:
410 		WARN(1, "TPACKET version not supported.\n");
411 		BUG();
412 	}
413 
414 	smp_wmb();
415 }
416 
417 static int __packet_get_status(struct packet_sock *po, void *frame)
418 {
419 	union tpacket_uhdr h;
420 
421 	smp_rmb();
422 
423 	h.raw = frame;
424 	switch (po->tp_version) {
425 	case TPACKET_V1:
426 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
427 		return h.h1->tp_status;
428 	case TPACKET_V2:
429 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
430 		return h.h2->tp_status;
431 	case TPACKET_V3:
432 	default:
433 		WARN(1, "TPACKET version not supported.\n");
434 		BUG();
435 		return 0;
436 	}
437 }
438 
439 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 				   unsigned int flags)
441 {
442 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443 
444 	if (shhwtstamps &&
445 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 		return TP_STATUS_TS_RAW_HARDWARE;
448 
449 	if (ktime_to_timespec_cond(skb->tstamp, ts))
450 		return TP_STATUS_TS_SOFTWARE;
451 
452 	return 0;
453 }
454 
455 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 				    struct sk_buff *skb)
457 {
458 	union tpacket_uhdr h;
459 	struct timespec ts;
460 	__u32 ts_status;
461 
462 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 		return 0;
464 
465 	h.raw = frame;
466 	switch (po->tp_version) {
467 	case TPACKET_V1:
468 		h.h1->tp_sec = ts.tv_sec;
469 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 		break;
471 	case TPACKET_V2:
472 		h.h2->tp_sec = ts.tv_sec;
473 		h.h2->tp_nsec = ts.tv_nsec;
474 		break;
475 	case TPACKET_V3:
476 	default:
477 		WARN(1, "TPACKET version not supported.\n");
478 		BUG();
479 	}
480 
481 	/* one flush is safe, as both fields always lie on the same cacheline */
482 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 	smp_wmb();
484 
485 	return ts_status;
486 }
487 
488 static void *packet_lookup_frame(struct packet_sock *po,
489 		struct packet_ring_buffer *rb,
490 		unsigned int position,
491 		int status)
492 {
493 	unsigned int pg_vec_pos, frame_offset;
494 	union tpacket_uhdr h;
495 
496 	pg_vec_pos = position / rb->frames_per_block;
497 	frame_offset = position % rb->frames_per_block;
498 
499 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 		(frame_offset * rb->frame_size);
501 
502 	if (status != __packet_get_status(po, h.raw))
503 		return NULL;
504 
505 	return h.raw;
506 }
507 
508 static void *packet_current_frame(struct packet_sock *po,
509 		struct packet_ring_buffer *rb,
510 		int status)
511 {
512 	return packet_lookup_frame(po, rb, rb->head, status);
513 }
514 
515 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
516 {
517 	del_timer_sync(&pkc->retire_blk_timer);
518 }
519 
520 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 		int tx_ring,
522 		struct sk_buff_head *rb_queue)
523 {
524 	struct tpacket_kbdq_core *pkc;
525 
526 	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 			GET_PBDQC_FROM_RB(&po->rx_ring);
528 
529 	spin_lock_bh(&rb_queue->lock);
530 	pkc->delete_blk_timer = 1;
531 	spin_unlock_bh(&rb_queue->lock);
532 
533 	prb_del_retire_blk_timer(pkc);
534 }
535 
536 static void prb_init_blk_timer(struct packet_sock *po,
537 		struct tpacket_kbdq_core *pkc,
538 		void (*func) (unsigned long))
539 {
540 	init_timer(&pkc->retire_blk_timer);
541 	pkc->retire_blk_timer.data = (long)po;
542 	pkc->retire_blk_timer.function = func;
543 	pkc->retire_blk_timer.expires = jiffies;
544 }
545 
546 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
547 {
548 	struct tpacket_kbdq_core *pkc;
549 
550 	if (tx_ring)
551 		BUG();
552 
553 	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
554 			GET_PBDQC_FROM_RB(&po->rx_ring);
555 	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
556 }
557 
558 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
559 				int blk_size_in_bytes)
560 {
561 	struct net_device *dev;
562 	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
563 	struct ethtool_cmd ecmd;
564 	int err;
565 	u32 speed;
566 
567 	rtnl_lock();
568 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
569 	if (unlikely(!dev)) {
570 		rtnl_unlock();
571 		return DEFAULT_PRB_RETIRE_TOV;
572 	}
573 	err = __ethtool_get_settings(dev, &ecmd);
574 	speed = ethtool_cmd_speed(&ecmd);
575 	rtnl_unlock();
576 	if (!err) {
577 		/*
578 		 * If the link speed is so slow you don't really
579 		 * need to worry about perf anyways
580 		 */
581 		if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
582 			return DEFAULT_PRB_RETIRE_TOV;
583 		} else {
584 			msec = 1;
585 			div = speed / 1000;
586 		}
587 	}
588 
589 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
590 
591 	if (div)
592 		mbits /= div;
593 
594 	tmo = mbits * msec;
595 
596 	if (div)
597 		return tmo+1;
598 	return tmo;
599 }
600 
601 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
602 			union tpacket_req_u *req_u)
603 {
604 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
605 }
606 
607 static void init_prb_bdqc(struct packet_sock *po,
608 			struct packet_ring_buffer *rb,
609 			struct pgv *pg_vec,
610 			union tpacket_req_u *req_u, int tx_ring)
611 {
612 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
613 	struct tpacket_block_desc *pbd;
614 
615 	memset(p1, 0x0, sizeof(*p1));
616 
617 	p1->knxt_seq_num = 1;
618 	p1->pkbdq = pg_vec;
619 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
620 	p1->pkblk_start	= pg_vec[0].buffer;
621 	p1->kblk_size = req_u->req3.tp_block_size;
622 	p1->knum_blocks	= req_u->req3.tp_block_nr;
623 	p1->hdrlen = po->tp_hdrlen;
624 	p1->version = po->tp_version;
625 	p1->last_kactive_blk_num = 0;
626 	po->stats.stats3.tp_freeze_q_cnt = 0;
627 	if (req_u->req3.tp_retire_blk_tov)
628 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
629 	else
630 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
631 						req_u->req3.tp_block_size);
632 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
633 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
634 
635 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
636 	prb_init_ft_ops(p1, req_u);
637 	prb_setup_retire_blk_timer(po, tx_ring);
638 	prb_open_block(p1, pbd);
639 }
640 
641 /*  Do NOT update the last_blk_num first.
642  *  Assumes sk_buff_head lock is held.
643  */
644 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
645 {
646 	mod_timer(&pkc->retire_blk_timer,
647 			jiffies + pkc->tov_in_jiffies);
648 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
649 }
650 
651 /*
652  * Timer logic:
653  * 1) We refresh the timer only when we open a block.
654  *    By doing this we don't waste cycles refreshing the timer
655  *	  on packet-by-packet basis.
656  *
657  * With a 1MB block-size, on a 1Gbps line, it will take
658  * i) ~8 ms to fill a block + ii) memcpy etc.
659  * In this cut we are not accounting for the memcpy time.
660  *
661  * So, if the user sets the 'tmo' to 10ms then the timer
662  * will never fire while the block is still getting filled
663  * (which is what we want). However, the user could choose
664  * to close a block early and that's fine.
665  *
666  * But when the timer does fire, we check whether or not to refresh it.
667  * Since the tmo granularity is in msecs, it is not too expensive
668  * to refresh the timer, lets say every '8' msecs.
669  * Either the user can set the 'tmo' or we can derive it based on
670  * a) line-speed and b) block-size.
671  * prb_calc_retire_blk_tmo() calculates the tmo.
672  *
673  */
674 static void prb_retire_rx_blk_timer_expired(unsigned long data)
675 {
676 	struct packet_sock *po = (struct packet_sock *)data;
677 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
678 	unsigned int frozen;
679 	struct tpacket_block_desc *pbd;
680 
681 	spin_lock(&po->sk.sk_receive_queue.lock);
682 
683 	frozen = prb_queue_frozen(pkc);
684 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
685 
686 	if (unlikely(pkc->delete_blk_timer))
687 		goto out;
688 
689 	/* We only need to plug the race when the block is partially filled.
690 	 * tpacket_rcv:
691 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
692 	 *		copy_bits() is in progress ...
693 	 *		timer fires on other cpu:
694 	 *		we can't retire the current block because copy_bits
695 	 *		is in progress.
696 	 *
697 	 */
698 	if (BLOCK_NUM_PKTS(pbd)) {
699 		while (atomic_read(&pkc->blk_fill_in_prog)) {
700 			/* Waiting for skb_copy_bits to finish... */
701 			cpu_relax();
702 		}
703 	}
704 
705 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
706 		if (!frozen) {
707 			if (!BLOCK_NUM_PKTS(pbd)) {
708 				/* An empty block. Just refresh the timer. */
709 				goto refresh_timer;
710 			}
711 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
712 			if (!prb_dispatch_next_block(pkc, po))
713 				goto refresh_timer;
714 			else
715 				goto out;
716 		} else {
717 			/* Case 1. Queue was frozen because user-space was
718 			 *	   lagging behind.
719 			 */
720 			if (prb_curr_blk_in_use(pkc, pbd)) {
721 				/*
722 				 * Ok, user-space is still behind.
723 				 * So just refresh the timer.
724 				 */
725 				goto refresh_timer;
726 			} else {
727 			       /* Case 2. queue was frozen,user-space caught up,
728 				* now the link went idle && the timer fired.
729 				* We don't have a block to close.So we open this
730 				* block and restart the timer.
731 				* opening a block thaws the queue,restarts timer
732 				* Thawing/timer-refresh is a side effect.
733 				*/
734 				prb_open_block(pkc, pbd);
735 				goto out;
736 			}
737 		}
738 	}
739 
740 refresh_timer:
741 	_prb_refresh_rx_retire_blk_timer(pkc);
742 
743 out:
744 	spin_unlock(&po->sk.sk_receive_queue.lock);
745 }
746 
747 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
748 		struct tpacket_block_desc *pbd1, __u32 status)
749 {
750 	/* Flush everything minus the block header */
751 
752 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
753 	u8 *start, *end;
754 
755 	start = (u8 *)pbd1;
756 
757 	/* Skip the block header(we know header WILL fit in 4K) */
758 	start += PAGE_SIZE;
759 
760 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
761 	for (; start < end; start += PAGE_SIZE)
762 		flush_dcache_page(pgv_to_page(start));
763 
764 	smp_wmb();
765 #endif
766 
767 	/* Now update the block status. */
768 
769 	BLOCK_STATUS(pbd1) = status;
770 
771 	/* Flush the block header */
772 
773 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
774 	start = (u8 *)pbd1;
775 	flush_dcache_page(pgv_to_page(start));
776 
777 	smp_wmb();
778 #endif
779 }
780 
781 /*
782  * Side effect:
783  *
784  * 1) flush the block
785  * 2) Increment active_blk_num
786  *
787  * Note:We DONT refresh the timer on purpose.
788  *	Because almost always the next block will be opened.
789  */
790 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
791 		struct tpacket_block_desc *pbd1,
792 		struct packet_sock *po, unsigned int stat)
793 {
794 	__u32 status = TP_STATUS_USER | stat;
795 
796 	struct tpacket3_hdr *last_pkt;
797 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
798 	struct sock *sk = &po->sk;
799 
800 	if (po->stats.stats3.tp_drops)
801 		status |= TP_STATUS_LOSING;
802 
803 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
804 	last_pkt->tp_next_offset = 0;
805 
806 	/* Get the ts of the last pkt */
807 	if (BLOCK_NUM_PKTS(pbd1)) {
808 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
809 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
810 	} else {
811 		/* Ok, we tmo'd - so get the current time.
812 		 *
813 		 * It shouldn't really happen as we don't close empty
814 		 * blocks. See prb_retire_rx_blk_timer_expired().
815 		 */
816 		struct timespec ts;
817 		getnstimeofday(&ts);
818 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
819 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
820 	}
821 
822 	smp_wmb();
823 
824 	/* Flush the block */
825 	prb_flush_block(pkc1, pbd1, status);
826 
827 	sk->sk_data_ready(sk);
828 
829 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
830 }
831 
832 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
833 {
834 	pkc->reset_pending_on_curr_blk = 0;
835 }
836 
837 /*
838  * Side effect of opening a block:
839  *
840  * 1) prb_queue is thawed.
841  * 2) retire_blk_timer is refreshed.
842  *
843  */
844 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
845 	struct tpacket_block_desc *pbd1)
846 {
847 	struct timespec ts;
848 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
849 
850 	smp_rmb();
851 
852 	/* We could have just memset this but we will lose the
853 	 * flexibility of making the priv area sticky
854 	 */
855 
856 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
857 	BLOCK_NUM_PKTS(pbd1) = 0;
858 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
859 
860 	getnstimeofday(&ts);
861 
862 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
863 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
864 
865 	pkc1->pkblk_start = (char *)pbd1;
866 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
867 
868 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
869 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
870 
871 	pbd1->version = pkc1->version;
872 	pkc1->prev = pkc1->nxt_offset;
873 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
874 
875 	prb_thaw_queue(pkc1);
876 	_prb_refresh_rx_retire_blk_timer(pkc1);
877 
878 	smp_wmb();
879 }
880 
881 /*
882  * Queue freeze logic:
883  * 1) Assume tp_block_nr = 8 blocks.
884  * 2) At time 't0', user opens Rx ring.
885  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
886  * 4) user-space is either sleeping or processing block '0'.
887  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
888  *    it will close block-7,loop around and try to fill block '0'.
889  *    call-flow:
890  *    __packet_lookup_frame_in_block
891  *      prb_retire_current_block()
892  *      prb_dispatch_next_block()
893  *        |->(BLOCK_STATUS == USER) evaluates to true
894  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
895  * 6) Now there are two cases:
896  *    6.1) Link goes idle right after the queue is frozen.
897  *         But remember, the last open_block() refreshed the timer.
898  *         When this timer expires,it will refresh itself so that we can
899  *         re-open block-0 in near future.
900  *    6.2) Link is busy and keeps on receiving packets. This is a simple
901  *         case and __packet_lookup_frame_in_block will check if block-0
902  *         is free and can now be re-used.
903  */
904 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
905 				  struct packet_sock *po)
906 {
907 	pkc->reset_pending_on_curr_blk = 1;
908 	po->stats.stats3.tp_freeze_q_cnt++;
909 }
910 
911 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
912 
913 /*
914  * If the next block is free then we will dispatch it
915  * and return a good offset.
916  * Else, we will freeze the queue.
917  * So, caller must check the return value.
918  */
919 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
920 		struct packet_sock *po)
921 {
922 	struct tpacket_block_desc *pbd;
923 
924 	smp_rmb();
925 
926 	/* 1. Get current block num */
927 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
928 
929 	/* 2. If this block is currently in_use then freeze the queue */
930 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
931 		prb_freeze_queue(pkc, po);
932 		return NULL;
933 	}
934 
935 	/*
936 	 * 3.
937 	 * open this block and return the offset where the first packet
938 	 * needs to get stored.
939 	 */
940 	prb_open_block(pkc, pbd);
941 	return (void *)pkc->nxt_offset;
942 }
943 
944 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
945 		struct packet_sock *po, unsigned int status)
946 {
947 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
948 
949 	/* retire/close the current block */
950 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
951 		/*
952 		 * Plug the case where copy_bits() is in progress on
953 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
954 		 * have space to copy the pkt in the current block and
955 		 * called prb_retire_current_block()
956 		 *
957 		 * We don't need to worry about the TMO case because
958 		 * the timer-handler already handled this case.
959 		 */
960 		if (!(status & TP_STATUS_BLK_TMO)) {
961 			while (atomic_read(&pkc->blk_fill_in_prog)) {
962 				/* Waiting for skb_copy_bits to finish... */
963 				cpu_relax();
964 			}
965 		}
966 		prb_close_block(pkc, pbd, po, status);
967 		return;
968 	}
969 }
970 
971 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
972 				      struct tpacket_block_desc *pbd)
973 {
974 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
975 }
976 
977 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
978 {
979 	return pkc->reset_pending_on_curr_blk;
980 }
981 
982 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
983 {
984 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
985 	atomic_dec(&pkc->blk_fill_in_prog);
986 }
987 
988 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
989 			struct tpacket3_hdr *ppd)
990 {
991 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
992 }
993 
994 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
995 			struct tpacket3_hdr *ppd)
996 {
997 	ppd->hv1.tp_rxhash = 0;
998 }
999 
1000 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1001 			struct tpacket3_hdr *ppd)
1002 {
1003 	if (skb_vlan_tag_present(pkc->skb)) {
1004 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1005 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1006 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1007 	} else {
1008 		ppd->hv1.tp_vlan_tci = 0;
1009 		ppd->hv1.tp_vlan_tpid = 0;
1010 		ppd->tp_status = TP_STATUS_AVAILABLE;
1011 	}
1012 }
1013 
1014 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1015 			struct tpacket3_hdr *ppd)
1016 {
1017 	ppd->hv1.tp_padding = 0;
1018 	prb_fill_vlan_info(pkc, ppd);
1019 
1020 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1021 		prb_fill_rxhash(pkc, ppd);
1022 	else
1023 		prb_clear_rxhash(pkc, ppd);
1024 }
1025 
1026 static void prb_fill_curr_block(char *curr,
1027 				struct tpacket_kbdq_core *pkc,
1028 				struct tpacket_block_desc *pbd,
1029 				unsigned int len)
1030 {
1031 	struct tpacket3_hdr *ppd;
1032 
1033 	ppd  = (struct tpacket3_hdr *)curr;
1034 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 	pkc->prev = curr;
1036 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1037 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1038 	BLOCK_NUM_PKTS(pbd) += 1;
1039 	atomic_inc(&pkc->blk_fill_in_prog);
1040 	prb_run_all_ft_ops(pkc, ppd);
1041 }
1042 
1043 /* Assumes caller has the sk->rx_queue.lock */
1044 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1045 					    struct sk_buff *skb,
1046 						int status,
1047 					    unsigned int len
1048 					    )
1049 {
1050 	struct tpacket_kbdq_core *pkc;
1051 	struct tpacket_block_desc *pbd;
1052 	char *curr, *end;
1053 
1054 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1055 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1056 
1057 	/* Queue is frozen when user space is lagging behind */
1058 	if (prb_queue_frozen(pkc)) {
1059 		/*
1060 		 * Check if that last block which caused the queue to freeze,
1061 		 * is still in_use by user-space.
1062 		 */
1063 		if (prb_curr_blk_in_use(pkc, pbd)) {
1064 			/* Can't record this packet */
1065 			return NULL;
1066 		} else {
1067 			/*
1068 			 * Ok, the block was released by user-space.
1069 			 * Now let's open that block.
1070 			 * opening a block also thaws the queue.
1071 			 * Thawing is a side effect.
1072 			 */
1073 			prb_open_block(pkc, pbd);
1074 		}
1075 	}
1076 
1077 	smp_mb();
1078 	curr = pkc->nxt_offset;
1079 	pkc->skb = skb;
1080 	end = (char *)pbd + pkc->kblk_size;
1081 
1082 	/* first try the current block */
1083 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1084 		prb_fill_curr_block(curr, pkc, pbd, len);
1085 		return (void *)curr;
1086 	}
1087 
1088 	/* Ok, close the current block */
1089 	prb_retire_current_block(pkc, po, 0);
1090 
1091 	/* Now, try to dispatch the next block */
1092 	curr = (char *)prb_dispatch_next_block(pkc, po);
1093 	if (curr) {
1094 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1095 		prb_fill_curr_block(curr, pkc, pbd, len);
1096 		return (void *)curr;
1097 	}
1098 
1099 	/*
1100 	 * No free blocks are available.user_space hasn't caught up yet.
1101 	 * Queue was just frozen and now this packet will get dropped.
1102 	 */
1103 	return NULL;
1104 }
1105 
1106 static void *packet_current_rx_frame(struct packet_sock *po,
1107 					    struct sk_buff *skb,
1108 					    int status, unsigned int len)
1109 {
1110 	char *curr = NULL;
1111 	switch (po->tp_version) {
1112 	case TPACKET_V1:
1113 	case TPACKET_V2:
1114 		curr = packet_lookup_frame(po, &po->rx_ring,
1115 					po->rx_ring.head, status);
1116 		return curr;
1117 	case TPACKET_V3:
1118 		return __packet_lookup_frame_in_block(po, skb, status, len);
1119 	default:
1120 		WARN(1, "TPACKET version not supported\n");
1121 		BUG();
1122 		return NULL;
1123 	}
1124 }
1125 
1126 static void *prb_lookup_block(struct packet_sock *po,
1127 				     struct packet_ring_buffer *rb,
1128 				     unsigned int idx,
1129 				     int status)
1130 {
1131 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1132 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1133 
1134 	if (status != BLOCK_STATUS(pbd))
1135 		return NULL;
1136 	return pbd;
1137 }
1138 
1139 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1140 {
1141 	unsigned int prev;
1142 	if (rb->prb_bdqc.kactive_blk_num)
1143 		prev = rb->prb_bdqc.kactive_blk_num-1;
1144 	else
1145 		prev = rb->prb_bdqc.knum_blocks-1;
1146 	return prev;
1147 }
1148 
1149 /* Assumes caller has held the rx_queue.lock */
1150 static void *__prb_previous_block(struct packet_sock *po,
1151 					 struct packet_ring_buffer *rb,
1152 					 int status)
1153 {
1154 	unsigned int previous = prb_previous_blk_num(rb);
1155 	return prb_lookup_block(po, rb, previous, status);
1156 }
1157 
1158 static void *packet_previous_rx_frame(struct packet_sock *po,
1159 					     struct packet_ring_buffer *rb,
1160 					     int status)
1161 {
1162 	if (po->tp_version <= TPACKET_V2)
1163 		return packet_previous_frame(po, rb, status);
1164 
1165 	return __prb_previous_block(po, rb, status);
1166 }
1167 
1168 static void packet_increment_rx_head(struct packet_sock *po,
1169 					    struct packet_ring_buffer *rb)
1170 {
1171 	switch (po->tp_version) {
1172 	case TPACKET_V1:
1173 	case TPACKET_V2:
1174 		return packet_increment_head(rb);
1175 	case TPACKET_V3:
1176 	default:
1177 		WARN(1, "TPACKET version not supported.\n");
1178 		BUG();
1179 		return;
1180 	}
1181 }
1182 
1183 static void *packet_previous_frame(struct packet_sock *po,
1184 		struct packet_ring_buffer *rb,
1185 		int status)
1186 {
1187 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1188 	return packet_lookup_frame(po, rb, previous, status);
1189 }
1190 
1191 static void packet_increment_head(struct packet_ring_buffer *buff)
1192 {
1193 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1194 }
1195 
1196 static void packet_inc_pending(struct packet_ring_buffer *rb)
1197 {
1198 	this_cpu_inc(*rb->pending_refcnt);
1199 }
1200 
1201 static void packet_dec_pending(struct packet_ring_buffer *rb)
1202 {
1203 	this_cpu_dec(*rb->pending_refcnt);
1204 }
1205 
1206 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1207 {
1208 	unsigned int refcnt = 0;
1209 	int cpu;
1210 
1211 	/* We don't use pending refcount in rx_ring. */
1212 	if (rb->pending_refcnt == NULL)
1213 		return 0;
1214 
1215 	for_each_possible_cpu(cpu)
1216 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1217 
1218 	return refcnt;
1219 }
1220 
1221 static int packet_alloc_pending(struct packet_sock *po)
1222 {
1223 	po->rx_ring.pending_refcnt = NULL;
1224 
1225 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1226 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1227 		return -ENOBUFS;
1228 
1229 	return 0;
1230 }
1231 
1232 static void packet_free_pending(struct packet_sock *po)
1233 {
1234 	free_percpu(po->tx_ring.pending_refcnt);
1235 }
1236 
1237 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1238 {
1239 	struct sock *sk = &po->sk;
1240 	bool has_room;
1241 
1242 	if (po->prot_hook.func != tpacket_rcv)
1243 		return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1244 			<= sk->sk_rcvbuf;
1245 
1246 	spin_lock(&sk->sk_receive_queue.lock);
1247 	if (po->tp_version == TPACKET_V3)
1248 		has_room = prb_lookup_block(po, &po->rx_ring,
1249 					    po->rx_ring.prb_bdqc.kactive_blk_num,
1250 					    TP_STATUS_KERNEL);
1251 	else
1252 		has_room = packet_lookup_frame(po, &po->rx_ring,
1253 					       po->rx_ring.head,
1254 					       TP_STATUS_KERNEL);
1255 	spin_unlock(&sk->sk_receive_queue.lock);
1256 
1257 	return has_room;
1258 }
1259 
1260 static void packet_sock_destruct(struct sock *sk)
1261 {
1262 	skb_queue_purge(&sk->sk_error_queue);
1263 
1264 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1265 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1266 
1267 	if (!sock_flag(sk, SOCK_DEAD)) {
1268 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1269 		return;
1270 	}
1271 
1272 	sk_refcnt_debug_dec(sk);
1273 }
1274 
1275 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1276 {
1277 	int x = atomic_read(&f->rr_cur) + 1;
1278 
1279 	if (x >= num)
1280 		x = 0;
1281 
1282 	return x;
1283 }
1284 
1285 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1286 				      struct sk_buff *skb,
1287 				      unsigned int num)
1288 {
1289 	return reciprocal_scale(skb_get_hash(skb), num);
1290 }
1291 
1292 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1293 				    struct sk_buff *skb,
1294 				    unsigned int num)
1295 {
1296 	int cur, old;
1297 
1298 	cur = atomic_read(&f->rr_cur);
1299 	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1300 				     fanout_rr_next(f, num))) != cur)
1301 		cur = old;
1302 	return cur;
1303 }
1304 
1305 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1306 				     struct sk_buff *skb,
1307 				     unsigned int num)
1308 {
1309 	return smp_processor_id() % num;
1310 }
1311 
1312 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1313 				     struct sk_buff *skb,
1314 				     unsigned int num)
1315 {
1316 	return prandom_u32_max(num);
1317 }
1318 
1319 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1320 					  struct sk_buff *skb,
1321 					  unsigned int idx, unsigned int skip,
1322 					  unsigned int num)
1323 {
1324 	unsigned int i, j;
1325 
1326 	i = j = min_t(int, f->next[idx], num - 1);
1327 	do {
1328 		if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1329 			if (i != j)
1330 				f->next[idx] = i;
1331 			return i;
1332 		}
1333 		if (++i == num)
1334 			i = 0;
1335 	} while (i != j);
1336 
1337 	return idx;
1338 }
1339 
1340 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1341 				    struct sk_buff *skb,
1342 				    unsigned int num)
1343 {
1344 	return skb_get_queue_mapping(skb) % num;
1345 }
1346 
1347 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1348 {
1349 	return f->flags & (flag >> 8);
1350 }
1351 
1352 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1353 			     struct packet_type *pt, struct net_device *orig_dev)
1354 {
1355 	struct packet_fanout *f = pt->af_packet_priv;
1356 	unsigned int num = f->num_members;
1357 	struct packet_sock *po;
1358 	unsigned int idx;
1359 
1360 	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1361 	    !num) {
1362 		kfree_skb(skb);
1363 		return 0;
1364 	}
1365 
1366 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1367 		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1368 		if (!skb)
1369 			return 0;
1370 	}
1371 	switch (f->type) {
1372 	case PACKET_FANOUT_HASH:
1373 	default:
1374 		idx = fanout_demux_hash(f, skb, num);
1375 		break;
1376 	case PACKET_FANOUT_LB:
1377 		idx = fanout_demux_lb(f, skb, num);
1378 		break;
1379 	case PACKET_FANOUT_CPU:
1380 		idx = fanout_demux_cpu(f, skb, num);
1381 		break;
1382 	case PACKET_FANOUT_RND:
1383 		idx = fanout_demux_rnd(f, skb, num);
1384 		break;
1385 	case PACKET_FANOUT_QM:
1386 		idx = fanout_demux_qm(f, skb, num);
1387 		break;
1388 	case PACKET_FANOUT_ROLLOVER:
1389 		idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1390 		break;
1391 	}
1392 
1393 	po = pkt_sk(f->arr[idx]);
1394 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1395 	    unlikely(!packet_rcv_has_room(po, skb))) {
1396 		idx = fanout_demux_rollover(f, skb, idx, idx, num);
1397 		po = pkt_sk(f->arr[idx]);
1398 	}
1399 
1400 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1401 }
1402 
1403 DEFINE_MUTEX(fanout_mutex);
1404 EXPORT_SYMBOL_GPL(fanout_mutex);
1405 static LIST_HEAD(fanout_list);
1406 
1407 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1408 {
1409 	struct packet_fanout *f = po->fanout;
1410 
1411 	spin_lock(&f->lock);
1412 	f->arr[f->num_members] = sk;
1413 	smp_wmb();
1414 	f->num_members++;
1415 	spin_unlock(&f->lock);
1416 }
1417 
1418 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1419 {
1420 	struct packet_fanout *f = po->fanout;
1421 	int i;
1422 
1423 	spin_lock(&f->lock);
1424 	for (i = 0; i < f->num_members; i++) {
1425 		if (f->arr[i] == sk)
1426 			break;
1427 	}
1428 	BUG_ON(i >= f->num_members);
1429 	f->arr[i] = f->arr[f->num_members - 1];
1430 	f->num_members--;
1431 	spin_unlock(&f->lock);
1432 }
1433 
1434 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1435 {
1436 	if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
1437 		return true;
1438 
1439 	return false;
1440 }
1441 
1442 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1443 {
1444 	struct packet_sock *po = pkt_sk(sk);
1445 	struct packet_fanout *f, *match;
1446 	u8 type = type_flags & 0xff;
1447 	u8 flags = type_flags >> 8;
1448 	int err;
1449 
1450 	switch (type) {
1451 	case PACKET_FANOUT_ROLLOVER:
1452 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1453 			return -EINVAL;
1454 	case PACKET_FANOUT_HASH:
1455 	case PACKET_FANOUT_LB:
1456 	case PACKET_FANOUT_CPU:
1457 	case PACKET_FANOUT_RND:
1458 	case PACKET_FANOUT_QM:
1459 		break;
1460 	default:
1461 		return -EINVAL;
1462 	}
1463 
1464 	if (!po->running)
1465 		return -EINVAL;
1466 
1467 	if (po->fanout)
1468 		return -EALREADY;
1469 
1470 	mutex_lock(&fanout_mutex);
1471 	match = NULL;
1472 	list_for_each_entry(f, &fanout_list, list) {
1473 		if (f->id == id &&
1474 		    read_pnet(&f->net) == sock_net(sk)) {
1475 			match = f;
1476 			break;
1477 		}
1478 	}
1479 	err = -EINVAL;
1480 	if (match && match->flags != flags)
1481 		goto out;
1482 	if (!match) {
1483 		err = -ENOMEM;
1484 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1485 		if (!match)
1486 			goto out;
1487 		write_pnet(&match->net, sock_net(sk));
1488 		match->id = id;
1489 		match->type = type;
1490 		match->flags = flags;
1491 		atomic_set(&match->rr_cur, 0);
1492 		INIT_LIST_HEAD(&match->list);
1493 		spin_lock_init(&match->lock);
1494 		atomic_set(&match->sk_ref, 0);
1495 		match->prot_hook.type = po->prot_hook.type;
1496 		match->prot_hook.dev = po->prot_hook.dev;
1497 		match->prot_hook.func = packet_rcv_fanout;
1498 		match->prot_hook.af_packet_priv = match;
1499 		match->prot_hook.id_match = match_fanout_group;
1500 		dev_add_pack(&match->prot_hook);
1501 		list_add(&match->list, &fanout_list);
1502 	}
1503 	err = -EINVAL;
1504 	if (match->type == type &&
1505 	    match->prot_hook.type == po->prot_hook.type &&
1506 	    match->prot_hook.dev == po->prot_hook.dev) {
1507 		err = -ENOSPC;
1508 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1509 			__dev_remove_pack(&po->prot_hook);
1510 			po->fanout = match;
1511 			atomic_inc(&match->sk_ref);
1512 			__fanout_link(sk, po);
1513 			err = 0;
1514 		}
1515 	}
1516 out:
1517 	mutex_unlock(&fanout_mutex);
1518 	return err;
1519 }
1520 
1521 static void fanout_release(struct sock *sk)
1522 {
1523 	struct packet_sock *po = pkt_sk(sk);
1524 	struct packet_fanout *f;
1525 
1526 	f = po->fanout;
1527 	if (!f)
1528 		return;
1529 
1530 	mutex_lock(&fanout_mutex);
1531 	po->fanout = NULL;
1532 
1533 	if (atomic_dec_and_test(&f->sk_ref)) {
1534 		list_del(&f->list);
1535 		dev_remove_pack(&f->prot_hook);
1536 		kfree(f);
1537 	}
1538 	mutex_unlock(&fanout_mutex);
1539 }
1540 
1541 static const struct proto_ops packet_ops;
1542 
1543 static const struct proto_ops packet_ops_spkt;
1544 
1545 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1546 			   struct packet_type *pt, struct net_device *orig_dev)
1547 {
1548 	struct sock *sk;
1549 	struct sockaddr_pkt *spkt;
1550 
1551 	/*
1552 	 *	When we registered the protocol we saved the socket in the data
1553 	 *	field for just this event.
1554 	 */
1555 
1556 	sk = pt->af_packet_priv;
1557 
1558 	/*
1559 	 *	Yank back the headers [hope the device set this
1560 	 *	right or kerboom...]
1561 	 *
1562 	 *	Incoming packets have ll header pulled,
1563 	 *	push it back.
1564 	 *
1565 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1566 	 *	so that this procedure is noop.
1567 	 */
1568 
1569 	if (skb->pkt_type == PACKET_LOOPBACK)
1570 		goto out;
1571 
1572 	if (!net_eq(dev_net(dev), sock_net(sk)))
1573 		goto out;
1574 
1575 	skb = skb_share_check(skb, GFP_ATOMIC);
1576 	if (skb == NULL)
1577 		goto oom;
1578 
1579 	/* drop any routing info */
1580 	skb_dst_drop(skb);
1581 
1582 	/* drop conntrack reference */
1583 	nf_reset(skb);
1584 
1585 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1586 
1587 	skb_push(skb, skb->data - skb_mac_header(skb));
1588 
1589 	/*
1590 	 *	The SOCK_PACKET socket receives _all_ frames.
1591 	 */
1592 
1593 	spkt->spkt_family = dev->type;
1594 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1595 	spkt->spkt_protocol = skb->protocol;
1596 
1597 	/*
1598 	 *	Charge the memory to the socket. This is done specifically
1599 	 *	to prevent sockets using all the memory up.
1600 	 */
1601 
1602 	if (sock_queue_rcv_skb(sk, skb) == 0)
1603 		return 0;
1604 
1605 out:
1606 	kfree_skb(skb);
1607 oom:
1608 	return 0;
1609 }
1610 
1611 
1612 /*
1613  *	Output a raw packet to a device layer. This bypasses all the other
1614  *	protocol layers and you must therefore supply it with a complete frame
1615  */
1616 
1617 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1618 			       size_t len)
1619 {
1620 	struct sock *sk = sock->sk;
1621 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1622 	struct sk_buff *skb = NULL;
1623 	struct net_device *dev;
1624 	__be16 proto = 0;
1625 	int err;
1626 	int extra_len = 0;
1627 
1628 	/*
1629 	 *	Get and verify the address.
1630 	 */
1631 
1632 	if (saddr) {
1633 		if (msg->msg_namelen < sizeof(struct sockaddr))
1634 			return -EINVAL;
1635 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1636 			proto = saddr->spkt_protocol;
1637 	} else
1638 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1639 
1640 	/*
1641 	 *	Find the device first to size check it
1642 	 */
1643 
1644 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1645 retry:
1646 	rcu_read_lock();
1647 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1648 	err = -ENODEV;
1649 	if (dev == NULL)
1650 		goto out_unlock;
1651 
1652 	err = -ENETDOWN;
1653 	if (!(dev->flags & IFF_UP))
1654 		goto out_unlock;
1655 
1656 	/*
1657 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1658 	 * raw protocol and you must do your own fragmentation at this level.
1659 	 */
1660 
1661 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1662 		if (!netif_supports_nofcs(dev)) {
1663 			err = -EPROTONOSUPPORT;
1664 			goto out_unlock;
1665 		}
1666 		extra_len = 4; /* We're doing our own CRC */
1667 	}
1668 
1669 	err = -EMSGSIZE;
1670 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1671 		goto out_unlock;
1672 
1673 	if (!skb) {
1674 		size_t reserved = LL_RESERVED_SPACE(dev);
1675 		int tlen = dev->needed_tailroom;
1676 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1677 
1678 		rcu_read_unlock();
1679 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1680 		if (skb == NULL)
1681 			return -ENOBUFS;
1682 		/* FIXME: Save some space for broken drivers that write a hard
1683 		 * header at transmission time by themselves. PPP is the notable
1684 		 * one here. This should really be fixed at the driver level.
1685 		 */
1686 		skb_reserve(skb, reserved);
1687 		skb_reset_network_header(skb);
1688 
1689 		/* Try to align data part correctly */
1690 		if (hhlen) {
1691 			skb->data -= hhlen;
1692 			skb->tail -= hhlen;
1693 			if (len < hhlen)
1694 				skb_reset_network_header(skb);
1695 		}
1696 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1697 		if (err)
1698 			goto out_free;
1699 		goto retry;
1700 	}
1701 
1702 	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1703 		/* Earlier code assumed this would be a VLAN pkt,
1704 		 * double-check this now that we have the actual
1705 		 * packet in hand.
1706 		 */
1707 		struct ethhdr *ehdr;
1708 		skb_reset_mac_header(skb);
1709 		ehdr = eth_hdr(skb);
1710 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1711 			err = -EMSGSIZE;
1712 			goto out_unlock;
1713 		}
1714 	}
1715 
1716 	skb->protocol = proto;
1717 	skb->dev = dev;
1718 	skb->priority = sk->sk_priority;
1719 	skb->mark = sk->sk_mark;
1720 
1721 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1722 
1723 	if (unlikely(extra_len == 4))
1724 		skb->no_fcs = 1;
1725 
1726 	skb_probe_transport_header(skb, 0);
1727 
1728 	dev_queue_xmit(skb);
1729 	rcu_read_unlock();
1730 	return len;
1731 
1732 out_unlock:
1733 	rcu_read_unlock();
1734 out_free:
1735 	kfree_skb(skb);
1736 	return err;
1737 }
1738 
1739 static unsigned int run_filter(const struct sk_buff *skb,
1740 				      const struct sock *sk,
1741 				      unsigned int res)
1742 {
1743 	struct sk_filter *filter;
1744 
1745 	rcu_read_lock();
1746 	filter = rcu_dereference(sk->sk_filter);
1747 	if (filter != NULL)
1748 		res = SK_RUN_FILTER(filter, skb);
1749 	rcu_read_unlock();
1750 
1751 	return res;
1752 }
1753 
1754 /*
1755  * This function makes lazy skb cloning in hope that most of packets
1756  * are discarded by BPF.
1757  *
1758  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1759  * and skb->cb are mangled. It works because (and until) packets
1760  * falling here are owned by current CPU. Output packets are cloned
1761  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1762  * sequencially, so that if we return skb to original state on exit,
1763  * we will not harm anyone.
1764  */
1765 
1766 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1767 		      struct packet_type *pt, struct net_device *orig_dev)
1768 {
1769 	struct sock *sk;
1770 	struct sockaddr_ll *sll;
1771 	struct packet_sock *po;
1772 	u8 *skb_head = skb->data;
1773 	int skb_len = skb->len;
1774 	unsigned int snaplen, res;
1775 
1776 	if (skb->pkt_type == PACKET_LOOPBACK)
1777 		goto drop;
1778 
1779 	sk = pt->af_packet_priv;
1780 	po = pkt_sk(sk);
1781 
1782 	if (!net_eq(dev_net(dev), sock_net(sk)))
1783 		goto drop;
1784 
1785 	skb->dev = dev;
1786 
1787 	if (dev->header_ops) {
1788 		/* The device has an explicit notion of ll header,
1789 		 * exported to higher levels.
1790 		 *
1791 		 * Otherwise, the device hides details of its frame
1792 		 * structure, so that corresponding packet head is
1793 		 * never delivered to user.
1794 		 */
1795 		if (sk->sk_type != SOCK_DGRAM)
1796 			skb_push(skb, skb->data - skb_mac_header(skb));
1797 		else if (skb->pkt_type == PACKET_OUTGOING) {
1798 			/* Special case: outgoing packets have ll header at head */
1799 			skb_pull(skb, skb_network_offset(skb));
1800 		}
1801 	}
1802 
1803 	snaplen = skb->len;
1804 
1805 	res = run_filter(skb, sk, snaplen);
1806 	if (!res)
1807 		goto drop_n_restore;
1808 	if (snaplen > res)
1809 		snaplen = res;
1810 
1811 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1812 		goto drop_n_acct;
1813 
1814 	if (skb_shared(skb)) {
1815 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1816 		if (nskb == NULL)
1817 			goto drop_n_acct;
1818 
1819 		if (skb_head != skb->data) {
1820 			skb->data = skb_head;
1821 			skb->len = skb_len;
1822 		}
1823 		consume_skb(skb);
1824 		skb = nskb;
1825 	}
1826 
1827 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
1828 
1829 	sll = &PACKET_SKB_CB(skb)->sa.ll;
1830 	sll->sll_hatype = dev->type;
1831 	sll->sll_pkttype = skb->pkt_type;
1832 	if (unlikely(po->origdev))
1833 		sll->sll_ifindex = orig_dev->ifindex;
1834 	else
1835 		sll->sll_ifindex = dev->ifindex;
1836 
1837 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1838 
1839 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1840 	 * Use their space for storing the original skb length.
1841 	 */
1842 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
1843 
1844 	if (pskb_trim(skb, snaplen))
1845 		goto drop_n_acct;
1846 
1847 	skb_set_owner_r(skb, sk);
1848 	skb->dev = NULL;
1849 	skb_dst_drop(skb);
1850 
1851 	/* drop conntrack reference */
1852 	nf_reset(skb);
1853 
1854 	spin_lock(&sk->sk_receive_queue.lock);
1855 	po->stats.stats1.tp_packets++;
1856 	sock_skb_set_dropcount(sk, skb);
1857 	__skb_queue_tail(&sk->sk_receive_queue, skb);
1858 	spin_unlock(&sk->sk_receive_queue.lock);
1859 	sk->sk_data_ready(sk);
1860 	return 0;
1861 
1862 drop_n_acct:
1863 	spin_lock(&sk->sk_receive_queue.lock);
1864 	po->stats.stats1.tp_drops++;
1865 	atomic_inc(&sk->sk_drops);
1866 	spin_unlock(&sk->sk_receive_queue.lock);
1867 
1868 drop_n_restore:
1869 	if (skb_head != skb->data && skb_shared(skb)) {
1870 		skb->data = skb_head;
1871 		skb->len = skb_len;
1872 	}
1873 drop:
1874 	consume_skb(skb);
1875 	return 0;
1876 }
1877 
1878 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1879 		       struct packet_type *pt, struct net_device *orig_dev)
1880 {
1881 	struct sock *sk;
1882 	struct packet_sock *po;
1883 	struct sockaddr_ll *sll;
1884 	union tpacket_uhdr h;
1885 	u8 *skb_head = skb->data;
1886 	int skb_len = skb->len;
1887 	unsigned int snaplen, res;
1888 	unsigned long status = TP_STATUS_USER;
1889 	unsigned short macoff, netoff, hdrlen;
1890 	struct sk_buff *copy_skb = NULL;
1891 	struct timespec ts;
1892 	__u32 ts_status;
1893 
1894 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1895 	 * We may add members to them until current aligned size without forcing
1896 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1897 	 */
1898 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1899 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1900 
1901 	if (skb->pkt_type == PACKET_LOOPBACK)
1902 		goto drop;
1903 
1904 	sk = pt->af_packet_priv;
1905 	po = pkt_sk(sk);
1906 
1907 	if (!net_eq(dev_net(dev), sock_net(sk)))
1908 		goto drop;
1909 
1910 	if (dev->header_ops) {
1911 		if (sk->sk_type != SOCK_DGRAM)
1912 			skb_push(skb, skb->data - skb_mac_header(skb));
1913 		else if (skb->pkt_type == PACKET_OUTGOING) {
1914 			/* Special case: outgoing packets have ll header at head */
1915 			skb_pull(skb, skb_network_offset(skb));
1916 		}
1917 	}
1918 
1919 	snaplen = skb->len;
1920 
1921 	res = run_filter(skb, sk, snaplen);
1922 	if (!res)
1923 		goto drop_n_restore;
1924 
1925 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1926 		status |= TP_STATUS_CSUMNOTREADY;
1927 	else if (skb->pkt_type != PACKET_OUTGOING &&
1928 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
1929 		  skb_csum_unnecessary(skb)))
1930 		status |= TP_STATUS_CSUM_VALID;
1931 
1932 	if (snaplen > res)
1933 		snaplen = res;
1934 
1935 	if (sk->sk_type == SOCK_DGRAM) {
1936 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1937 				  po->tp_reserve;
1938 	} else {
1939 		unsigned int maclen = skb_network_offset(skb);
1940 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
1941 				       (maclen < 16 ? 16 : maclen)) +
1942 			po->tp_reserve;
1943 		macoff = netoff - maclen;
1944 	}
1945 	if (po->tp_version <= TPACKET_V2) {
1946 		if (macoff + snaplen > po->rx_ring.frame_size) {
1947 			if (po->copy_thresh &&
1948 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1949 				if (skb_shared(skb)) {
1950 					copy_skb = skb_clone(skb, GFP_ATOMIC);
1951 				} else {
1952 					copy_skb = skb_get(skb);
1953 					skb_head = skb->data;
1954 				}
1955 				if (copy_skb)
1956 					skb_set_owner_r(copy_skb, sk);
1957 			}
1958 			snaplen = po->rx_ring.frame_size - macoff;
1959 			if ((int)snaplen < 0)
1960 				snaplen = 0;
1961 		}
1962 	} else if (unlikely(macoff + snaplen >
1963 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1964 		u32 nval;
1965 
1966 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1967 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1968 			    snaplen, nval, macoff);
1969 		snaplen = nval;
1970 		if (unlikely((int)snaplen < 0)) {
1971 			snaplen = 0;
1972 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1973 		}
1974 	}
1975 	spin_lock(&sk->sk_receive_queue.lock);
1976 	h.raw = packet_current_rx_frame(po, skb,
1977 					TP_STATUS_KERNEL, (macoff+snaplen));
1978 	if (!h.raw)
1979 		goto ring_is_full;
1980 	if (po->tp_version <= TPACKET_V2) {
1981 		packet_increment_rx_head(po, &po->rx_ring);
1982 	/*
1983 	 * LOSING will be reported till you read the stats,
1984 	 * because it's COR - Clear On Read.
1985 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1986 	 * at packet level.
1987 	 */
1988 		if (po->stats.stats1.tp_drops)
1989 			status |= TP_STATUS_LOSING;
1990 	}
1991 	po->stats.stats1.tp_packets++;
1992 	if (copy_skb) {
1993 		status |= TP_STATUS_COPY;
1994 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1995 	}
1996 	spin_unlock(&sk->sk_receive_queue.lock);
1997 
1998 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1999 
2000 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2001 		getnstimeofday(&ts);
2002 
2003 	status |= ts_status;
2004 
2005 	switch (po->tp_version) {
2006 	case TPACKET_V1:
2007 		h.h1->tp_len = skb->len;
2008 		h.h1->tp_snaplen = snaplen;
2009 		h.h1->tp_mac = macoff;
2010 		h.h1->tp_net = netoff;
2011 		h.h1->tp_sec = ts.tv_sec;
2012 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2013 		hdrlen = sizeof(*h.h1);
2014 		break;
2015 	case TPACKET_V2:
2016 		h.h2->tp_len = skb->len;
2017 		h.h2->tp_snaplen = snaplen;
2018 		h.h2->tp_mac = macoff;
2019 		h.h2->tp_net = netoff;
2020 		h.h2->tp_sec = ts.tv_sec;
2021 		h.h2->tp_nsec = ts.tv_nsec;
2022 		if (skb_vlan_tag_present(skb)) {
2023 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2024 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2025 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2026 		} else {
2027 			h.h2->tp_vlan_tci = 0;
2028 			h.h2->tp_vlan_tpid = 0;
2029 		}
2030 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2031 		hdrlen = sizeof(*h.h2);
2032 		break;
2033 	case TPACKET_V3:
2034 		/* tp_nxt_offset,vlan are already populated above.
2035 		 * So DONT clear those fields here
2036 		 */
2037 		h.h3->tp_status |= status;
2038 		h.h3->tp_len = skb->len;
2039 		h.h3->tp_snaplen = snaplen;
2040 		h.h3->tp_mac = macoff;
2041 		h.h3->tp_net = netoff;
2042 		h.h3->tp_sec  = ts.tv_sec;
2043 		h.h3->tp_nsec = ts.tv_nsec;
2044 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2045 		hdrlen = sizeof(*h.h3);
2046 		break;
2047 	default:
2048 		BUG();
2049 	}
2050 
2051 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2052 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2053 	sll->sll_family = AF_PACKET;
2054 	sll->sll_hatype = dev->type;
2055 	sll->sll_protocol = skb->protocol;
2056 	sll->sll_pkttype = skb->pkt_type;
2057 	if (unlikely(po->origdev))
2058 		sll->sll_ifindex = orig_dev->ifindex;
2059 	else
2060 		sll->sll_ifindex = dev->ifindex;
2061 
2062 	smp_mb();
2063 
2064 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2065 	if (po->tp_version <= TPACKET_V2) {
2066 		u8 *start, *end;
2067 
2068 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2069 					macoff + snaplen);
2070 
2071 		for (start = h.raw; start < end; start += PAGE_SIZE)
2072 			flush_dcache_page(pgv_to_page(start));
2073 	}
2074 	smp_wmb();
2075 #endif
2076 
2077 	if (po->tp_version <= TPACKET_V2) {
2078 		__packet_set_status(po, h.raw, status);
2079 		sk->sk_data_ready(sk);
2080 	} else {
2081 		prb_clear_blk_fill_status(&po->rx_ring);
2082 	}
2083 
2084 drop_n_restore:
2085 	if (skb_head != skb->data && skb_shared(skb)) {
2086 		skb->data = skb_head;
2087 		skb->len = skb_len;
2088 	}
2089 drop:
2090 	kfree_skb(skb);
2091 	return 0;
2092 
2093 ring_is_full:
2094 	po->stats.stats1.tp_drops++;
2095 	spin_unlock(&sk->sk_receive_queue.lock);
2096 
2097 	sk->sk_data_ready(sk);
2098 	kfree_skb(copy_skb);
2099 	goto drop_n_restore;
2100 }
2101 
2102 static void tpacket_destruct_skb(struct sk_buff *skb)
2103 {
2104 	struct packet_sock *po = pkt_sk(skb->sk);
2105 
2106 	if (likely(po->tx_ring.pg_vec)) {
2107 		void *ph;
2108 		__u32 ts;
2109 
2110 		ph = skb_shinfo(skb)->destructor_arg;
2111 		packet_dec_pending(&po->tx_ring);
2112 
2113 		ts = __packet_set_timestamp(po, ph, skb);
2114 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2115 	}
2116 
2117 	sock_wfree(skb);
2118 }
2119 
2120 static bool ll_header_truncated(const struct net_device *dev, int len)
2121 {
2122 	/* net device doesn't like empty head */
2123 	if (unlikely(len <= dev->hard_header_len)) {
2124 		net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2125 				     current->comm, len, dev->hard_header_len);
2126 		return true;
2127 	}
2128 
2129 	return false;
2130 }
2131 
2132 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2133 		void *frame, struct net_device *dev, int size_max,
2134 		__be16 proto, unsigned char *addr, int hlen)
2135 {
2136 	union tpacket_uhdr ph;
2137 	int to_write, offset, len, tp_len, nr_frags, len_max;
2138 	struct socket *sock = po->sk.sk_socket;
2139 	struct page *page;
2140 	void *data;
2141 	int err;
2142 
2143 	ph.raw = frame;
2144 
2145 	skb->protocol = proto;
2146 	skb->dev = dev;
2147 	skb->priority = po->sk.sk_priority;
2148 	skb->mark = po->sk.sk_mark;
2149 	sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2150 	skb_shinfo(skb)->destructor_arg = ph.raw;
2151 
2152 	switch (po->tp_version) {
2153 	case TPACKET_V2:
2154 		tp_len = ph.h2->tp_len;
2155 		break;
2156 	default:
2157 		tp_len = ph.h1->tp_len;
2158 		break;
2159 	}
2160 	if (unlikely(tp_len > size_max)) {
2161 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2162 		return -EMSGSIZE;
2163 	}
2164 
2165 	skb_reserve(skb, hlen);
2166 	skb_reset_network_header(skb);
2167 
2168 	if (!packet_use_direct_xmit(po))
2169 		skb_probe_transport_header(skb, 0);
2170 	if (unlikely(po->tp_tx_has_off)) {
2171 		int off_min, off_max, off;
2172 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2173 		off_max = po->tx_ring.frame_size - tp_len;
2174 		if (sock->type == SOCK_DGRAM) {
2175 			switch (po->tp_version) {
2176 			case TPACKET_V2:
2177 				off = ph.h2->tp_net;
2178 				break;
2179 			default:
2180 				off = ph.h1->tp_net;
2181 				break;
2182 			}
2183 		} else {
2184 			switch (po->tp_version) {
2185 			case TPACKET_V2:
2186 				off = ph.h2->tp_mac;
2187 				break;
2188 			default:
2189 				off = ph.h1->tp_mac;
2190 				break;
2191 			}
2192 		}
2193 		if (unlikely((off < off_min) || (off_max < off)))
2194 			return -EINVAL;
2195 		data = ph.raw + off;
2196 	} else {
2197 		data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2198 	}
2199 	to_write = tp_len;
2200 
2201 	if (sock->type == SOCK_DGRAM) {
2202 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2203 				NULL, tp_len);
2204 		if (unlikely(err < 0))
2205 			return -EINVAL;
2206 	} else if (dev->hard_header_len) {
2207 		if (ll_header_truncated(dev, tp_len))
2208 			return -EINVAL;
2209 
2210 		skb_push(skb, dev->hard_header_len);
2211 		err = skb_store_bits(skb, 0, data,
2212 				dev->hard_header_len);
2213 		if (unlikely(err))
2214 			return err;
2215 
2216 		data += dev->hard_header_len;
2217 		to_write -= dev->hard_header_len;
2218 	}
2219 
2220 	offset = offset_in_page(data);
2221 	len_max = PAGE_SIZE - offset;
2222 	len = ((to_write > len_max) ? len_max : to_write);
2223 
2224 	skb->data_len = to_write;
2225 	skb->len += to_write;
2226 	skb->truesize += to_write;
2227 	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2228 
2229 	while (likely(to_write)) {
2230 		nr_frags = skb_shinfo(skb)->nr_frags;
2231 
2232 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2233 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2234 			       MAX_SKB_FRAGS);
2235 			return -EFAULT;
2236 		}
2237 
2238 		page = pgv_to_page(data);
2239 		data += len;
2240 		flush_dcache_page(page);
2241 		get_page(page);
2242 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2243 		to_write -= len;
2244 		offset = 0;
2245 		len_max = PAGE_SIZE;
2246 		len = ((to_write > len_max) ? len_max : to_write);
2247 	}
2248 
2249 	return tp_len;
2250 }
2251 
2252 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2253 {
2254 	struct sk_buff *skb;
2255 	struct net_device *dev;
2256 	__be16 proto;
2257 	int err, reserve = 0;
2258 	void *ph;
2259 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2260 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2261 	int tp_len, size_max;
2262 	unsigned char *addr;
2263 	int len_sum = 0;
2264 	int status = TP_STATUS_AVAILABLE;
2265 	int hlen, tlen;
2266 
2267 	mutex_lock(&po->pg_vec_lock);
2268 
2269 	if (likely(saddr == NULL)) {
2270 		dev	= packet_cached_dev_get(po);
2271 		proto	= po->num;
2272 		addr	= NULL;
2273 	} else {
2274 		err = -EINVAL;
2275 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2276 			goto out;
2277 		if (msg->msg_namelen < (saddr->sll_halen
2278 					+ offsetof(struct sockaddr_ll,
2279 						sll_addr)))
2280 			goto out;
2281 		proto	= saddr->sll_protocol;
2282 		addr	= saddr->sll_addr;
2283 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2284 	}
2285 
2286 	err = -ENXIO;
2287 	if (unlikely(dev == NULL))
2288 		goto out;
2289 	err = -ENETDOWN;
2290 	if (unlikely(!(dev->flags & IFF_UP)))
2291 		goto out_put;
2292 
2293 	reserve = dev->hard_header_len + VLAN_HLEN;
2294 	size_max = po->tx_ring.frame_size
2295 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2296 
2297 	if (size_max > dev->mtu + reserve)
2298 		size_max = dev->mtu + reserve;
2299 
2300 	do {
2301 		ph = packet_current_frame(po, &po->tx_ring,
2302 					  TP_STATUS_SEND_REQUEST);
2303 		if (unlikely(ph == NULL)) {
2304 			if (need_wait && need_resched())
2305 				schedule();
2306 			continue;
2307 		}
2308 
2309 		status = TP_STATUS_SEND_REQUEST;
2310 		hlen = LL_RESERVED_SPACE(dev);
2311 		tlen = dev->needed_tailroom;
2312 		skb = sock_alloc_send_skb(&po->sk,
2313 				hlen + tlen + sizeof(struct sockaddr_ll),
2314 				!need_wait, &err);
2315 
2316 		if (unlikely(skb == NULL)) {
2317 			/* we assume the socket was initially writeable ... */
2318 			if (likely(len_sum > 0))
2319 				err = len_sum;
2320 			goto out_status;
2321 		}
2322 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2323 					  addr, hlen);
2324 		if (tp_len > dev->mtu + dev->hard_header_len) {
2325 			struct ethhdr *ehdr;
2326 			/* Earlier code assumed this would be a VLAN pkt,
2327 			 * double-check this now that we have the actual
2328 			 * packet in hand.
2329 			 */
2330 
2331 			skb_reset_mac_header(skb);
2332 			ehdr = eth_hdr(skb);
2333 			if (ehdr->h_proto != htons(ETH_P_8021Q))
2334 				tp_len = -EMSGSIZE;
2335 		}
2336 		if (unlikely(tp_len < 0)) {
2337 			if (po->tp_loss) {
2338 				__packet_set_status(po, ph,
2339 						TP_STATUS_AVAILABLE);
2340 				packet_increment_head(&po->tx_ring);
2341 				kfree_skb(skb);
2342 				continue;
2343 			} else {
2344 				status = TP_STATUS_WRONG_FORMAT;
2345 				err = tp_len;
2346 				goto out_status;
2347 			}
2348 		}
2349 
2350 		packet_pick_tx_queue(dev, skb);
2351 
2352 		skb->destructor = tpacket_destruct_skb;
2353 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2354 		packet_inc_pending(&po->tx_ring);
2355 
2356 		status = TP_STATUS_SEND_REQUEST;
2357 		err = po->xmit(skb);
2358 		if (unlikely(err > 0)) {
2359 			err = net_xmit_errno(err);
2360 			if (err && __packet_get_status(po, ph) ==
2361 				   TP_STATUS_AVAILABLE) {
2362 				/* skb was destructed already */
2363 				skb = NULL;
2364 				goto out_status;
2365 			}
2366 			/*
2367 			 * skb was dropped but not destructed yet;
2368 			 * let's treat it like congestion or err < 0
2369 			 */
2370 			err = 0;
2371 		}
2372 		packet_increment_head(&po->tx_ring);
2373 		len_sum += tp_len;
2374 	} while (likely((ph != NULL) ||
2375 		/* Note: packet_read_pending() might be slow if we have
2376 		 * to call it as it's per_cpu variable, but in fast-path
2377 		 * we already short-circuit the loop with the first
2378 		 * condition, and luckily don't have to go that path
2379 		 * anyway.
2380 		 */
2381 		 (need_wait && packet_read_pending(&po->tx_ring))));
2382 
2383 	err = len_sum;
2384 	goto out_put;
2385 
2386 out_status:
2387 	__packet_set_status(po, ph, status);
2388 	kfree_skb(skb);
2389 out_put:
2390 	dev_put(dev);
2391 out:
2392 	mutex_unlock(&po->pg_vec_lock);
2393 	return err;
2394 }
2395 
2396 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2397 				        size_t reserve, size_t len,
2398 				        size_t linear, int noblock,
2399 				        int *err)
2400 {
2401 	struct sk_buff *skb;
2402 
2403 	/* Under a page?  Don't bother with paged skb. */
2404 	if (prepad + len < PAGE_SIZE || !linear)
2405 		linear = len;
2406 
2407 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2408 				   err, 0);
2409 	if (!skb)
2410 		return NULL;
2411 
2412 	skb_reserve(skb, reserve);
2413 	skb_put(skb, linear);
2414 	skb->data_len = len - linear;
2415 	skb->len += len - linear;
2416 
2417 	return skb;
2418 }
2419 
2420 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2421 {
2422 	struct sock *sk = sock->sk;
2423 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2424 	struct sk_buff *skb;
2425 	struct net_device *dev;
2426 	__be16 proto;
2427 	unsigned char *addr;
2428 	int err, reserve = 0;
2429 	struct virtio_net_hdr vnet_hdr = { 0 };
2430 	int offset = 0;
2431 	int vnet_hdr_len;
2432 	struct packet_sock *po = pkt_sk(sk);
2433 	unsigned short gso_type = 0;
2434 	int hlen, tlen;
2435 	int extra_len = 0;
2436 	ssize_t n;
2437 
2438 	/*
2439 	 *	Get and verify the address.
2440 	 */
2441 
2442 	if (likely(saddr == NULL)) {
2443 		dev	= packet_cached_dev_get(po);
2444 		proto	= po->num;
2445 		addr	= NULL;
2446 	} else {
2447 		err = -EINVAL;
2448 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2449 			goto out;
2450 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2451 			goto out;
2452 		proto	= saddr->sll_protocol;
2453 		addr	= saddr->sll_addr;
2454 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2455 	}
2456 
2457 	err = -ENXIO;
2458 	if (unlikely(dev == NULL))
2459 		goto out_unlock;
2460 	err = -ENETDOWN;
2461 	if (unlikely(!(dev->flags & IFF_UP)))
2462 		goto out_unlock;
2463 
2464 	if (sock->type == SOCK_RAW)
2465 		reserve = dev->hard_header_len;
2466 	if (po->has_vnet_hdr) {
2467 		vnet_hdr_len = sizeof(vnet_hdr);
2468 
2469 		err = -EINVAL;
2470 		if (len < vnet_hdr_len)
2471 			goto out_unlock;
2472 
2473 		len -= vnet_hdr_len;
2474 
2475 		err = -EFAULT;
2476 		n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2477 		if (n != vnet_hdr_len)
2478 			goto out_unlock;
2479 
2480 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2481 		    (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2482 		     __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2483 		      __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2484 			vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2485 				 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2486 				__virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
2487 
2488 		err = -EINVAL;
2489 		if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
2490 			goto out_unlock;
2491 
2492 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2493 			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2494 			case VIRTIO_NET_HDR_GSO_TCPV4:
2495 				gso_type = SKB_GSO_TCPV4;
2496 				break;
2497 			case VIRTIO_NET_HDR_GSO_TCPV6:
2498 				gso_type = SKB_GSO_TCPV6;
2499 				break;
2500 			case VIRTIO_NET_HDR_GSO_UDP:
2501 				gso_type = SKB_GSO_UDP;
2502 				break;
2503 			default:
2504 				goto out_unlock;
2505 			}
2506 
2507 			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2508 				gso_type |= SKB_GSO_TCP_ECN;
2509 
2510 			if (vnet_hdr.gso_size == 0)
2511 				goto out_unlock;
2512 
2513 		}
2514 	}
2515 
2516 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2517 		if (!netif_supports_nofcs(dev)) {
2518 			err = -EPROTONOSUPPORT;
2519 			goto out_unlock;
2520 		}
2521 		extra_len = 4; /* We're doing our own CRC */
2522 	}
2523 
2524 	err = -EMSGSIZE;
2525 	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2526 		goto out_unlock;
2527 
2528 	err = -ENOBUFS;
2529 	hlen = LL_RESERVED_SPACE(dev);
2530 	tlen = dev->needed_tailroom;
2531 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2532 			       __virtio16_to_cpu(false, vnet_hdr.hdr_len),
2533 			       msg->msg_flags & MSG_DONTWAIT, &err);
2534 	if (skb == NULL)
2535 		goto out_unlock;
2536 
2537 	skb_set_network_header(skb, reserve);
2538 
2539 	err = -EINVAL;
2540 	if (sock->type == SOCK_DGRAM) {
2541 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2542 		if (unlikely(offset < 0))
2543 			goto out_free;
2544 	} else {
2545 		if (ll_header_truncated(dev, len))
2546 			goto out_free;
2547 	}
2548 
2549 	/* Returns -EFAULT on error */
2550 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2551 	if (err)
2552 		goto out_free;
2553 
2554 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2555 
2556 	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2557 		/* Earlier code assumed this would be a VLAN pkt,
2558 		 * double-check this now that we have the actual
2559 		 * packet in hand.
2560 		 */
2561 		struct ethhdr *ehdr;
2562 		skb_reset_mac_header(skb);
2563 		ehdr = eth_hdr(skb);
2564 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2565 			err = -EMSGSIZE;
2566 			goto out_free;
2567 		}
2568 	}
2569 
2570 	skb->protocol = proto;
2571 	skb->dev = dev;
2572 	skb->priority = sk->sk_priority;
2573 	skb->mark = sk->sk_mark;
2574 
2575 	packet_pick_tx_queue(dev, skb);
2576 
2577 	if (po->has_vnet_hdr) {
2578 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2579 			u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2580 			u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2581 			if (!skb_partial_csum_set(skb, s, o)) {
2582 				err = -EINVAL;
2583 				goto out_free;
2584 			}
2585 		}
2586 
2587 		skb_shinfo(skb)->gso_size =
2588 			__virtio16_to_cpu(false, vnet_hdr.gso_size);
2589 		skb_shinfo(skb)->gso_type = gso_type;
2590 
2591 		/* Header must be checked, and gso_segs computed. */
2592 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2593 		skb_shinfo(skb)->gso_segs = 0;
2594 
2595 		len += vnet_hdr_len;
2596 	}
2597 
2598 	if (!packet_use_direct_xmit(po))
2599 		skb_probe_transport_header(skb, reserve);
2600 	if (unlikely(extra_len == 4))
2601 		skb->no_fcs = 1;
2602 
2603 	err = po->xmit(skb);
2604 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2605 		goto out_unlock;
2606 
2607 	dev_put(dev);
2608 
2609 	return len;
2610 
2611 out_free:
2612 	kfree_skb(skb);
2613 out_unlock:
2614 	if (dev)
2615 		dev_put(dev);
2616 out:
2617 	return err;
2618 }
2619 
2620 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2621 {
2622 	struct sock *sk = sock->sk;
2623 	struct packet_sock *po = pkt_sk(sk);
2624 
2625 	if (po->tx_ring.pg_vec)
2626 		return tpacket_snd(po, msg);
2627 	else
2628 		return packet_snd(sock, msg, len);
2629 }
2630 
2631 /*
2632  *	Close a PACKET socket. This is fairly simple. We immediately go
2633  *	to 'closed' state and remove our protocol entry in the device list.
2634  */
2635 
2636 static int packet_release(struct socket *sock)
2637 {
2638 	struct sock *sk = sock->sk;
2639 	struct packet_sock *po;
2640 	struct net *net;
2641 	union tpacket_req_u req_u;
2642 
2643 	if (!sk)
2644 		return 0;
2645 
2646 	net = sock_net(sk);
2647 	po = pkt_sk(sk);
2648 
2649 	mutex_lock(&net->packet.sklist_lock);
2650 	sk_del_node_init_rcu(sk);
2651 	mutex_unlock(&net->packet.sklist_lock);
2652 
2653 	preempt_disable();
2654 	sock_prot_inuse_add(net, sk->sk_prot, -1);
2655 	preempt_enable();
2656 
2657 	spin_lock(&po->bind_lock);
2658 	unregister_prot_hook(sk, false);
2659 	packet_cached_dev_reset(po);
2660 
2661 	if (po->prot_hook.dev) {
2662 		dev_put(po->prot_hook.dev);
2663 		po->prot_hook.dev = NULL;
2664 	}
2665 	spin_unlock(&po->bind_lock);
2666 
2667 	packet_flush_mclist(sk);
2668 
2669 	if (po->rx_ring.pg_vec) {
2670 		memset(&req_u, 0, sizeof(req_u));
2671 		packet_set_ring(sk, &req_u, 1, 0);
2672 	}
2673 
2674 	if (po->tx_ring.pg_vec) {
2675 		memset(&req_u, 0, sizeof(req_u));
2676 		packet_set_ring(sk, &req_u, 1, 1);
2677 	}
2678 
2679 	fanout_release(sk);
2680 
2681 	synchronize_net();
2682 	/*
2683 	 *	Now the socket is dead. No more input will appear.
2684 	 */
2685 	sock_orphan(sk);
2686 	sock->sk = NULL;
2687 
2688 	/* Purge queues */
2689 
2690 	skb_queue_purge(&sk->sk_receive_queue);
2691 	packet_free_pending(po);
2692 	sk_refcnt_debug_release(sk);
2693 
2694 	sock_put(sk);
2695 	return 0;
2696 }
2697 
2698 /*
2699  *	Attach a packet hook.
2700  */
2701 
2702 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2703 {
2704 	struct packet_sock *po = pkt_sk(sk);
2705 	const struct net_device *dev_curr;
2706 	__be16 proto_curr;
2707 	bool need_rehook;
2708 
2709 	if (po->fanout) {
2710 		if (dev)
2711 			dev_put(dev);
2712 
2713 		return -EINVAL;
2714 	}
2715 
2716 	lock_sock(sk);
2717 	spin_lock(&po->bind_lock);
2718 
2719 	proto_curr = po->prot_hook.type;
2720 	dev_curr = po->prot_hook.dev;
2721 
2722 	need_rehook = proto_curr != proto || dev_curr != dev;
2723 
2724 	if (need_rehook) {
2725 		unregister_prot_hook(sk, true);
2726 
2727 		po->num = proto;
2728 		po->prot_hook.type = proto;
2729 
2730 		if (po->prot_hook.dev)
2731 			dev_put(po->prot_hook.dev);
2732 
2733 		po->prot_hook.dev = dev;
2734 
2735 		po->ifindex = dev ? dev->ifindex : 0;
2736 		packet_cached_dev_assign(po, dev);
2737 	}
2738 
2739 	if (proto == 0 || !need_rehook)
2740 		goto out_unlock;
2741 
2742 	if (!dev || (dev->flags & IFF_UP)) {
2743 		register_prot_hook(sk);
2744 	} else {
2745 		sk->sk_err = ENETDOWN;
2746 		if (!sock_flag(sk, SOCK_DEAD))
2747 			sk->sk_error_report(sk);
2748 	}
2749 
2750 out_unlock:
2751 	spin_unlock(&po->bind_lock);
2752 	release_sock(sk);
2753 	return 0;
2754 }
2755 
2756 /*
2757  *	Bind a packet socket to a device
2758  */
2759 
2760 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2761 			    int addr_len)
2762 {
2763 	struct sock *sk = sock->sk;
2764 	char name[15];
2765 	struct net_device *dev;
2766 	int err = -ENODEV;
2767 
2768 	/*
2769 	 *	Check legality
2770 	 */
2771 
2772 	if (addr_len != sizeof(struct sockaddr))
2773 		return -EINVAL;
2774 	strlcpy(name, uaddr->sa_data, sizeof(name));
2775 
2776 	dev = dev_get_by_name(sock_net(sk), name);
2777 	if (dev)
2778 		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2779 	return err;
2780 }
2781 
2782 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2783 {
2784 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2785 	struct sock *sk = sock->sk;
2786 	struct net_device *dev = NULL;
2787 	int err;
2788 
2789 
2790 	/*
2791 	 *	Check legality
2792 	 */
2793 
2794 	if (addr_len < sizeof(struct sockaddr_ll))
2795 		return -EINVAL;
2796 	if (sll->sll_family != AF_PACKET)
2797 		return -EINVAL;
2798 
2799 	if (sll->sll_ifindex) {
2800 		err = -ENODEV;
2801 		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2802 		if (dev == NULL)
2803 			goto out;
2804 	}
2805 	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2806 
2807 out:
2808 	return err;
2809 }
2810 
2811 static struct proto packet_proto = {
2812 	.name	  = "PACKET",
2813 	.owner	  = THIS_MODULE,
2814 	.obj_size = sizeof(struct packet_sock),
2815 };
2816 
2817 /*
2818  *	Create a packet of type SOCK_PACKET.
2819  */
2820 
2821 static int packet_create(struct net *net, struct socket *sock, int protocol,
2822 			 int kern)
2823 {
2824 	struct sock *sk;
2825 	struct packet_sock *po;
2826 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
2827 	int err;
2828 
2829 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
2830 		return -EPERM;
2831 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2832 	    sock->type != SOCK_PACKET)
2833 		return -ESOCKTNOSUPPORT;
2834 
2835 	sock->state = SS_UNCONNECTED;
2836 
2837 	err = -ENOBUFS;
2838 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2839 	if (sk == NULL)
2840 		goto out;
2841 
2842 	sock->ops = &packet_ops;
2843 	if (sock->type == SOCK_PACKET)
2844 		sock->ops = &packet_ops_spkt;
2845 
2846 	sock_init_data(sock, sk);
2847 
2848 	po = pkt_sk(sk);
2849 	sk->sk_family = PF_PACKET;
2850 	po->num = proto;
2851 	po->xmit = dev_queue_xmit;
2852 
2853 	err = packet_alloc_pending(po);
2854 	if (err)
2855 		goto out2;
2856 
2857 	packet_cached_dev_reset(po);
2858 
2859 	sk->sk_destruct = packet_sock_destruct;
2860 	sk_refcnt_debug_inc(sk);
2861 
2862 	/*
2863 	 *	Attach a protocol block
2864 	 */
2865 
2866 	spin_lock_init(&po->bind_lock);
2867 	mutex_init(&po->pg_vec_lock);
2868 	po->prot_hook.func = packet_rcv;
2869 
2870 	if (sock->type == SOCK_PACKET)
2871 		po->prot_hook.func = packet_rcv_spkt;
2872 
2873 	po->prot_hook.af_packet_priv = sk;
2874 
2875 	if (proto) {
2876 		po->prot_hook.type = proto;
2877 		register_prot_hook(sk);
2878 	}
2879 
2880 	mutex_lock(&net->packet.sklist_lock);
2881 	sk_add_node_rcu(sk, &net->packet.sklist);
2882 	mutex_unlock(&net->packet.sklist_lock);
2883 
2884 	preempt_disable();
2885 	sock_prot_inuse_add(net, &packet_proto, 1);
2886 	preempt_enable();
2887 
2888 	return 0;
2889 out2:
2890 	sk_free(sk);
2891 out:
2892 	return err;
2893 }
2894 
2895 /*
2896  *	Pull a packet from our receive queue and hand it to the user.
2897  *	If necessary we block.
2898  */
2899 
2900 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2901 			  int flags)
2902 {
2903 	struct sock *sk = sock->sk;
2904 	struct sk_buff *skb;
2905 	int copied, err;
2906 	int vnet_hdr_len = 0;
2907 	unsigned int origlen = 0;
2908 
2909 	err = -EINVAL;
2910 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2911 		goto out;
2912 
2913 #if 0
2914 	/* What error should we return now? EUNATTACH? */
2915 	if (pkt_sk(sk)->ifindex < 0)
2916 		return -ENODEV;
2917 #endif
2918 
2919 	if (flags & MSG_ERRQUEUE) {
2920 		err = sock_recv_errqueue(sk, msg, len,
2921 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
2922 		goto out;
2923 	}
2924 
2925 	/*
2926 	 *	Call the generic datagram receiver. This handles all sorts
2927 	 *	of horrible races and re-entrancy so we can forget about it
2928 	 *	in the protocol layers.
2929 	 *
2930 	 *	Now it will return ENETDOWN, if device have just gone down,
2931 	 *	but then it will block.
2932 	 */
2933 
2934 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2935 
2936 	/*
2937 	 *	An error occurred so return it. Because skb_recv_datagram()
2938 	 *	handles the blocking we don't see and worry about blocking
2939 	 *	retries.
2940 	 */
2941 
2942 	if (skb == NULL)
2943 		goto out;
2944 
2945 	if (pkt_sk(sk)->has_vnet_hdr) {
2946 		struct virtio_net_hdr vnet_hdr = { 0 };
2947 
2948 		err = -EINVAL;
2949 		vnet_hdr_len = sizeof(vnet_hdr);
2950 		if (len < vnet_hdr_len)
2951 			goto out_free;
2952 
2953 		len -= vnet_hdr_len;
2954 
2955 		if (skb_is_gso(skb)) {
2956 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2957 
2958 			/* This is a hint as to how much should be linear. */
2959 			vnet_hdr.hdr_len =
2960 				__cpu_to_virtio16(false, skb_headlen(skb));
2961 			vnet_hdr.gso_size =
2962 				__cpu_to_virtio16(false, sinfo->gso_size);
2963 			if (sinfo->gso_type & SKB_GSO_TCPV4)
2964 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2965 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
2966 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2967 			else if (sinfo->gso_type & SKB_GSO_UDP)
2968 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2969 			else if (sinfo->gso_type & SKB_GSO_FCOE)
2970 				goto out_free;
2971 			else
2972 				BUG();
2973 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2974 				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2975 		} else
2976 			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2977 
2978 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2979 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2980 			vnet_hdr.csum_start = __cpu_to_virtio16(false,
2981 					  skb_checksum_start_offset(skb));
2982 			vnet_hdr.csum_offset = __cpu_to_virtio16(false,
2983 							 skb->csum_offset);
2984 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2985 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2986 		} /* else everything is zero */
2987 
2988 		err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
2989 		if (err < 0)
2990 			goto out_free;
2991 	}
2992 
2993 	/* You lose any data beyond the buffer you gave. If it worries
2994 	 * a user program they can ask the device for its MTU
2995 	 * anyway.
2996 	 */
2997 	copied = skb->len;
2998 	if (copied > len) {
2999 		copied = len;
3000 		msg->msg_flags |= MSG_TRUNC;
3001 	}
3002 
3003 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3004 	if (err)
3005 		goto out_free;
3006 
3007 	if (sock->type != SOCK_PACKET) {
3008 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3009 
3010 		/* Original length was stored in sockaddr_ll fields */
3011 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3012 		sll->sll_family = AF_PACKET;
3013 		sll->sll_protocol = skb->protocol;
3014 	}
3015 
3016 	sock_recv_ts_and_drops(msg, sk, skb);
3017 
3018 	if (msg->msg_name) {
3019 		/* If the address length field is there to be filled
3020 		 * in, we fill it in now.
3021 		 */
3022 		if (sock->type == SOCK_PACKET) {
3023 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3024 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3025 		} else {
3026 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3027 
3028 			msg->msg_namelen = sll->sll_halen +
3029 				offsetof(struct sockaddr_ll, sll_addr);
3030 		}
3031 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3032 		       msg->msg_namelen);
3033 	}
3034 
3035 	if (pkt_sk(sk)->auxdata) {
3036 		struct tpacket_auxdata aux;
3037 
3038 		aux.tp_status = TP_STATUS_USER;
3039 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3040 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3041 		else if (skb->pkt_type != PACKET_OUTGOING &&
3042 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3043 			  skb_csum_unnecessary(skb)))
3044 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3045 
3046 		aux.tp_len = origlen;
3047 		aux.tp_snaplen = skb->len;
3048 		aux.tp_mac = 0;
3049 		aux.tp_net = skb_network_offset(skb);
3050 		if (skb_vlan_tag_present(skb)) {
3051 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3052 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3053 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3054 		} else {
3055 			aux.tp_vlan_tci = 0;
3056 			aux.tp_vlan_tpid = 0;
3057 		}
3058 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3059 	}
3060 
3061 	/*
3062 	 *	Free or return the buffer as appropriate. Again this
3063 	 *	hides all the races and re-entrancy issues from us.
3064 	 */
3065 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3066 
3067 out_free:
3068 	skb_free_datagram(sk, skb);
3069 out:
3070 	return err;
3071 }
3072 
3073 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3074 			       int *uaddr_len, int peer)
3075 {
3076 	struct net_device *dev;
3077 	struct sock *sk	= sock->sk;
3078 
3079 	if (peer)
3080 		return -EOPNOTSUPP;
3081 
3082 	uaddr->sa_family = AF_PACKET;
3083 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3084 	rcu_read_lock();
3085 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3086 	if (dev)
3087 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3088 	rcu_read_unlock();
3089 	*uaddr_len = sizeof(*uaddr);
3090 
3091 	return 0;
3092 }
3093 
3094 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3095 			  int *uaddr_len, int peer)
3096 {
3097 	struct net_device *dev;
3098 	struct sock *sk = sock->sk;
3099 	struct packet_sock *po = pkt_sk(sk);
3100 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3101 
3102 	if (peer)
3103 		return -EOPNOTSUPP;
3104 
3105 	sll->sll_family = AF_PACKET;
3106 	sll->sll_ifindex = po->ifindex;
3107 	sll->sll_protocol = po->num;
3108 	sll->sll_pkttype = 0;
3109 	rcu_read_lock();
3110 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3111 	if (dev) {
3112 		sll->sll_hatype = dev->type;
3113 		sll->sll_halen = dev->addr_len;
3114 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3115 	} else {
3116 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3117 		sll->sll_halen = 0;
3118 	}
3119 	rcu_read_unlock();
3120 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3121 
3122 	return 0;
3123 }
3124 
3125 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3126 			 int what)
3127 {
3128 	switch (i->type) {
3129 	case PACKET_MR_MULTICAST:
3130 		if (i->alen != dev->addr_len)
3131 			return -EINVAL;
3132 		if (what > 0)
3133 			return dev_mc_add(dev, i->addr);
3134 		else
3135 			return dev_mc_del(dev, i->addr);
3136 		break;
3137 	case PACKET_MR_PROMISC:
3138 		return dev_set_promiscuity(dev, what);
3139 	case PACKET_MR_ALLMULTI:
3140 		return dev_set_allmulti(dev, what);
3141 	case PACKET_MR_UNICAST:
3142 		if (i->alen != dev->addr_len)
3143 			return -EINVAL;
3144 		if (what > 0)
3145 			return dev_uc_add(dev, i->addr);
3146 		else
3147 			return dev_uc_del(dev, i->addr);
3148 		break;
3149 	default:
3150 		break;
3151 	}
3152 	return 0;
3153 }
3154 
3155 static void packet_dev_mclist_delete(struct net_device *dev,
3156 				     struct packet_mclist **mlp)
3157 {
3158 	struct packet_mclist *ml;
3159 
3160 	while ((ml = *mlp) != NULL) {
3161 		if (ml->ifindex == dev->ifindex) {
3162 			packet_dev_mc(dev, ml, -1);
3163 			*mlp = ml->next;
3164 			kfree(ml);
3165 		} else
3166 			mlp = &ml->next;
3167 	}
3168 }
3169 
3170 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3171 {
3172 	struct packet_sock *po = pkt_sk(sk);
3173 	struct packet_mclist *ml, *i;
3174 	struct net_device *dev;
3175 	int err;
3176 
3177 	rtnl_lock();
3178 
3179 	err = -ENODEV;
3180 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3181 	if (!dev)
3182 		goto done;
3183 
3184 	err = -EINVAL;
3185 	if (mreq->mr_alen > dev->addr_len)
3186 		goto done;
3187 
3188 	err = -ENOBUFS;
3189 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3190 	if (i == NULL)
3191 		goto done;
3192 
3193 	err = 0;
3194 	for (ml = po->mclist; ml; ml = ml->next) {
3195 		if (ml->ifindex == mreq->mr_ifindex &&
3196 		    ml->type == mreq->mr_type &&
3197 		    ml->alen == mreq->mr_alen &&
3198 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3199 			ml->count++;
3200 			/* Free the new element ... */
3201 			kfree(i);
3202 			goto done;
3203 		}
3204 	}
3205 
3206 	i->type = mreq->mr_type;
3207 	i->ifindex = mreq->mr_ifindex;
3208 	i->alen = mreq->mr_alen;
3209 	memcpy(i->addr, mreq->mr_address, i->alen);
3210 	i->count = 1;
3211 	i->next = po->mclist;
3212 	po->mclist = i;
3213 	err = packet_dev_mc(dev, i, 1);
3214 	if (err) {
3215 		po->mclist = i->next;
3216 		kfree(i);
3217 	}
3218 
3219 done:
3220 	rtnl_unlock();
3221 	return err;
3222 }
3223 
3224 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3225 {
3226 	struct packet_mclist *ml, **mlp;
3227 
3228 	rtnl_lock();
3229 
3230 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3231 		if (ml->ifindex == mreq->mr_ifindex &&
3232 		    ml->type == mreq->mr_type &&
3233 		    ml->alen == mreq->mr_alen &&
3234 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3235 			if (--ml->count == 0) {
3236 				struct net_device *dev;
3237 				*mlp = ml->next;
3238 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3239 				if (dev)
3240 					packet_dev_mc(dev, ml, -1);
3241 				kfree(ml);
3242 			}
3243 			break;
3244 		}
3245 	}
3246 	rtnl_unlock();
3247 	return 0;
3248 }
3249 
3250 static void packet_flush_mclist(struct sock *sk)
3251 {
3252 	struct packet_sock *po = pkt_sk(sk);
3253 	struct packet_mclist *ml;
3254 
3255 	if (!po->mclist)
3256 		return;
3257 
3258 	rtnl_lock();
3259 	while ((ml = po->mclist) != NULL) {
3260 		struct net_device *dev;
3261 
3262 		po->mclist = ml->next;
3263 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3264 		if (dev != NULL)
3265 			packet_dev_mc(dev, ml, -1);
3266 		kfree(ml);
3267 	}
3268 	rtnl_unlock();
3269 }
3270 
3271 static int
3272 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3273 {
3274 	struct sock *sk = sock->sk;
3275 	struct packet_sock *po = pkt_sk(sk);
3276 	int ret;
3277 
3278 	if (level != SOL_PACKET)
3279 		return -ENOPROTOOPT;
3280 
3281 	switch (optname) {
3282 	case PACKET_ADD_MEMBERSHIP:
3283 	case PACKET_DROP_MEMBERSHIP:
3284 	{
3285 		struct packet_mreq_max mreq;
3286 		int len = optlen;
3287 		memset(&mreq, 0, sizeof(mreq));
3288 		if (len < sizeof(struct packet_mreq))
3289 			return -EINVAL;
3290 		if (len > sizeof(mreq))
3291 			len = sizeof(mreq);
3292 		if (copy_from_user(&mreq, optval, len))
3293 			return -EFAULT;
3294 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3295 			return -EINVAL;
3296 		if (optname == PACKET_ADD_MEMBERSHIP)
3297 			ret = packet_mc_add(sk, &mreq);
3298 		else
3299 			ret = packet_mc_drop(sk, &mreq);
3300 		return ret;
3301 	}
3302 
3303 	case PACKET_RX_RING:
3304 	case PACKET_TX_RING:
3305 	{
3306 		union tpacket_req_u req_u;
3307 		int len;
3308 
3309 		switch (po->tp_version) {
3310 		case TPACKET_V1:
3311 		case TPACKET_V2:
3312 			len = sizeof(req_u.req);
3313 			break;
3314 		case TPACKET_V3:
3315 		default:
3316 			len = sizeof(req_u.req3);
3317 			break;
3318 		}
3319 		if (optlen < len)
3320 			return -EINVAL;
3321 		if (pkt_sk(sk)->has_vnet_hdr)
3322 			return -EINVAL;
3323 		if (copy_from_user(&req_u.req, optval, len))
3324 			return -EFAULT;
3325 		return packet_set_ring(sk, &req_u, 0,
3326 			optname == PACKET_TX_RING);
3327 	}
3328 	case PACKET_COPY_THRESH:
3329 	{
3330 		int val;
3331 
3332 		if (optlen != sizeof(val))
3333 			return -EINVAL;
3334 		if (copy_from_user(&val, optval, sizeof(val)))
3335 			return -EFAULT;
3336 
3337 		pkt_sk(sk)->copy_thresh = val;
3338 		return 0;
3339 	}
3340 	case PACKET_VERSION:
3341 	{
3342 		int val;
3343 
3344 		if (optlen != sizeof(val))
3345 			return -EINVAL;
3346 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3347 			return -EBUSY;
3348 		if (copy_from_user(&val, optval, sizeof(val)))
3349 			return -EFAULT;
3350 		switch (val) {
3351 		case TPACKET_V1:
3352 		case TPACKET_V2:
3353 		case TPACKET_V3:
3354 			po->tp_version = val;
3355 			return 0;
3356 		default:
3357 			return -EINVAL;
3358 		}
3359 	}
3360 	case PACKET_RESERVE:
3361 	{
3362 		unsigned int val;
3363 
3364 		if (optlen != sizeof(val))
3365 			return -EINVAL;
3366 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3367 			return -EBUSY;
3368 		if (copy_from_user(&val, optval, sizeof(val)))
3369 			return -EFAULT;
3370 		po->tp_reserve = val;
3371 		return 0;
3372 	}
3373 	case PACKET_LOSS:
3374 	{
3375 		unsigned int val;
3376 
3377 		if (optlen != sizeof(val))
3378 			return -EINVAL;
3379 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3380 			return -EBUSY;
3381 		if (copy_from_user(&val, optval, sizeof(val)))
3382 			return -EFAULT;
3383 		po->tp_loss = !!val;
3384 		return 0;
3385 	}
3386 	case PACKET_AUXDATA:
3387 	{
3388 		int val;
3389 
3390 		if (optlen < sizeof(val))
3391 			return -EINVAL;
3392 		if (copy_from_user(&val, optval, sizeof(val)))
3393 			return -EFAULT;
3394 
3395 		po->auxdata = !!val;
3396 		return 0;
3397 	}
3398 	case PACKET_ORIGDEV:
3399 	{
3400 		int val;
3401 
3402 		if (optlen < sizeof(val))
3403 			return -EINVAL;
3404 		if (copy_from_user(&val, optval, sizeof(val)))
3405 			return -EFAULT;
3406 
3407 		po->origdev = !!val;
3408 		return 0;
3409 	}
3410 	case PACKET_VNET_HDR:
3411 	{
3412 		int val;
3413 
3414 		if (sock->type != SOCK_RAW)
3415 			return -EINVAL;
3416 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3417 			return -EBUSY;
3418 		if (optlen < sizeof(val))
3419 			return -EINVAL;
3420 		if (copy_from_user(&val, optval, sizeof(val)))
3421 			return -EFAULT;
3422 
3423 		po->has_vnet_hdr = !!val;
3424 		return 0;
3425 	}
3426 	case PACKET_TIMESTAMP:
3427 	{
3428 		int val;
3429 
3430 		if (optlen != sizeof(val))
3431 			return -EINVAL;
3432 		if (copy_from_user(&val, optval, sizeof(val)))
3433 			return -EFAULT;
3434 
3435 		po->tp_tstamp = val;
3436 		return 0;
3437 	}
3438 	case PACKET_FANOUT:
3439 	{
3440 		int val;
3441 
3442 		if (optlen != sizeof(val))
3443 			return -EINVAL;
3444 		if (copy_from_user(&val, optval, sizeof(val)))
3445 			return -EFAULT;
3446 
3447 		return fanout_add(sk, val & 0xffff, val >> 16);
3448 	}
3449 	case PACKET_TX_HAS_OFF:
3450 	{
3451 		unsigned int val;
3452 
3453 		if (optlen != sizeof(val))
3454 			return -EINVAL;
3455 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3456 			return -EBUSY;
3457 		if (copy_from_user(&val, optval, sizeof(val)))
3458 			return -EFAULT;
3459 		po->tp_tx_has_off = !!val;
3460 		return 0;
3461 	}
3462 	case PACKET_QDISC_BYPASS:
3463 	{
3464 		int val;
3465 
3466 		if (optlen != sizeof(val))
3467 			return -EINVAL;
3468 		if (copy_from_user(&val, optval, sizeof(val)))
3469 			return -EFAULT;
3470 
3471 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3472 		return 0;
3473 	}
3474 	default:
3475 		return -ENOPROTOOPT;
3476 	}
3477 }
3478 
3479 static int packet_getsockopt(struct socket *sock, int level, int optname,
3480 			     char __user *optval, int __user *optlen)
3481 {
3482 	int len;
3483 	int val, lv = sizeof(val);
3484 	struct sock *sk = sock->sk;
3485 	struct packet_sock *po = pkt_sk(sk);
3486 	void *data = &val;
3487 	union tpacket_stats_u st;
3488 
3489 	if (level != SOL_PACKET)
3490 		return -ENOPROTOOPT;
3491 
3492 	if (get_user(len, optlen))
3493 		return -EFAULT;
3494 
3495 	if (len < 0)
3496 		return -EINVAL;
3497 
3498 	switch (optname) {
3499 	case PACKET_STATISTICS:
3500 		spin_lock_bh(&sk->sk_receive_queue.lock);
3501 		memcpy(&st, &po->stats, sizeof(st));
3502 		memset(&po->stats, 0, sizeof(po->stats));
3503 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3504 
3505 		if (po->tp_version == TPACKET_V3) {
3506 			lv = sizeof(struct tpacket_stats_v3);
3507 			st.stats3.tp_packets += st.stats3.tp_drops;
3508 			data = &st.stats3;
3509 		} else {
3510 			lv = sizeof(struct tpacket_stats);
3511 			st.stats1.tp_packets += st.stats1.tp_drops;
3512 			data = &st.stats1;
3513 		}
3514 
3515 		break;
3516 	case PACKET_AUXDATA:
3517 		val = po->auxdata;
3518 		break;
3519 	case PACKET_ORIGDEV:
3520 		val = po->origdev;
3521 		break;
3522 	case PACKET_VNET_HDR:
3523 		val = po->has_vnet_hdr;
3524 		break;
3525 	case PACKET_VERSION:
3526 		val = po->tp_version;
3527 		break;
3528 	case PACKET_HDRLEN:
3529 		if (len > sizeof(int))
3530 			len = sizeof(int);
3531 		if (copy_from_user(&val, optval, len))
3532 			return -EFAULT;
3533 		switch (val) {
3534 		case TPACKET_V1:
3535 			val = sizeof(struct tpacket_hdr);
3536 			break;
3537 		case TPACKET_V2:
3538 			val = sizeof(struct tpacket2_hdr);
3539 			break;
3540 		case TPACKET_V3:
3541 			val = sizeof(struct tpacket3_hdr);
3542 			break;
3543 		default:
3544 			return -EINVAL;
3545 		}
3546 		break;
3547 	case PACKET_RESERVE:
3548 		val = po->tp_reserve;
3549 		break;
3550 	case PACKET_LOSS:
3551 		val = po->tp_loss;
3552 		break;
3553 	case PACKET_TIMESTAMP:
3554 		val = po->tp_tstamp;
3555 		break;
3556 	case PACKET_FANOUT:
3557 		val = (po->fanout ?
3558 		       ((u32)po->fanout->id |
3559 			((u32)po->fanout->type << 16) |
3560 			((u32)po->fanout->flags << 24)) :
3561 		       0);
3562 		break;
3563 	case PACKET_TX_HAS_OFF:
3564 		val = po->tp_tx_has_off;
3565 		break;
3566 	case PACKET_QDISC_BYPASS:
3567 		val = packet_use_direct_xmit(po);
3568 		break;
3569 	default:
3570 		return -ENOPROTOOPT;
3571 	}
3572 
3573 	if (len > lv)
3574 		len = lv;
3575 	if (put_user(len, optlen))
3576 		return -EFAULT;
3577 	if (copy_to_user(optval, data, len))
3578 		return -EFAULT;
3579 	return 0;
3580 }
3581 
3582 
3583 static int packet_notifier(struct notifier_block *this,
3584 			   unsigned long msg, void *ptr)
3585 {
3586 	struct sock *sk;
3587 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3588 	struct net *net = dev_net(dev);
3589 
3590 	rcu_read_lock();
3591 	sk_for_each_rcu(sk, &net->packet.sklist) {
3592 		struct packet_sock *po = pkt_sk(sk);
3593 
3594 		switch (msg) {
3595 		case NETDEV_UNREGISTER:
3596 			if (po->mclist)
3597 				packet_dev_mclist_delete(dev, &po->mclist);
3598 			/* fallthrough */
3599 
3600 		case NETDEV_DOWN:
3601 			if (dev->ifindex == po->ifindex) {
3602 				spin_lock(&po->bind_lock);
3603 				if (po->running) {
3604 					__unregister_prot_hook(sk, false);
3605 					sk->sk_err = ENETDOWN;
3606 					if (!sock_flag(sk, SOCK_DEAD))
3607 						sk->sk_error_report(sk);
3608 				}
3609 				if (msg == NETDEV_UNREGISTER) {
3610 					packet_cached_dev_reset(po);
3611 					po->ifindex = -1;
3612 					if (po->prot_hook.dev)
3613 						dev_put(po->prot_hook.dev);
3614 					po->prot_hook.dev = NULL;
3615 				}
3616 				spin_unlock(&po->bind_lock);
3617 			}
3618 			break;
3619 		case NETDEV_UP:
3620 			if (dev->ifindex == po->ifindex) {
3621 				spin_lock(&po->bind_lock);
3622 				if (po->num)
3623 					register_prot_hook(sk);
3624 				spin_unlock(&po->bind_lock);
3625 			}
3626 			break;
3627 		}
3628 	}
3629 	rcu_read_unlock();
3630 	return NOTIFY_DONE;
3631 }
3632 
3633 
3634 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3635 			unsigned long arg)
3636 {
3637 	struct sock *sk = sock->sk;
3638 
3639 	switch (cmd) {
3640 	case SIOCOUTQ:
3641 	{
3642 		int amount = sk_wmem_alloc_get(sk);
3643 
3644 		return put_user(amount, (int __user *)arg);
3645 	}
3646 	case SIOCINQ:
3647 	{
3648 		struct sk_buff *skb;
3649 		int amount = 0;
3650 
3651 		spin_lock_bh(&sk->sk_receive_queue.lock);
3652 		skb = skb_peek(&sk->sk_receive_queue);
3653 		if (skb)
3654 			amount = skb->len;
3655 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3656 		return put_user(amount, (int __user *)arg);
3657 	}
3658 	case SIOCGSTAMP:
3659 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3660 	case SIOCGSTAMPNS:
3661 		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3662 
3663 #ifdef CONFIG_INET
3664 	case SIOCADDRT:
3665 	case SIOCDELRT:
3666 	case SIOCDARP:
3667 	case SIOCGARP:
3668 	case SIOCSARP:
3669 	case SIOCGIFADDR:
3670 	case SIOCSIFADDR:
3671 	case SIOCGIFBRDADDR:
3672 	case SIOCSIFBRDADDR:
3673 	case SIOCGIFNETMASK:
3674 	case SIOCSIFNETMASK:
3675 	case SIOCGIFDSTADDR:
3676 	case SIOCSIFDSTADDR:
3677 	case SIOCSIFFLAGS:
3678 		return inet_dgram_ops.ioctl(sock, cmd, arg);
3679 #endif
3680 
3681 	default:
3682 		return -ENOIOCTLCMD;
3683 	}
3684 	return 0;
3685 }
3686 
3687 static unsigned int packet_poll(struct file *file, struct socket *sock,
3688 				poll_table *wait)
3689 {
3690 	struct sock *sk = sock->sk;
3691 	struct packet_sock *po = pkt_sk(sk);
3692 	unsigned int mask = datagram_poll(file, sock, wait);
3693 
3694 	spin_lock_bh(&sk->sk_receive_queue.lock);
3695 	if (po->rx_ring.pg_vec) {
3696 		if (!packet_previous_rx_frame(po, &po->rx_ring,
3697 			TP_STATUS_KERNEL))
3698 			mask |= POLLIN | POLLRDNORM;
3699 	}
3700 	spin_unlock_bh(&sk->sk_receive_queue.lock);
3701 	spin_lock_bh(&sk->sk_write_queue.lock);
3702 	if (po->tx_ring.pg_vec) {
3703 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3704 			mask |= POLLOUT | POLLWRNORM;
3705 	}
3706 	spin_unlock_bh(&sk->sk_write_queue.lock);
3707 	return mask;
3708 }
3709 
3710 
3711 /* Dirty? Well, I still did not learn better way to account
3712  * for user mmaps.
3713  */
3714 
3715 static void packet_mm_open(struct vm_area_struct *vma)
3716 {
3717 	struct file *file = vma->vm_file;
3718 	struct socket *sock = file->private_data;
3719 	struct sock *sk = sock->sk;
3720 
3721 	if (sk)
3722 		atomic_inc(&pkt_sk(sk)->mapped);
3723 }
3724 
3725 static void packet_mm_close(struct vm_area_struct *vma)
3726 {
3727 	struct file *file = vma->vm_file;
3728 	struct socket *sock = file->private_data;
3729 	struct sock *sk = sock->sk;
3730 
3731 	if (sk)
3732 		atomic_dec(&pkt_sk(sk)->mapped);
3733 }
3734 
3735 static const struct vm_operations_struct packet_mmap_ops = {
3736 	.open	=	packet_mm_open,
3737 	.close	=	packet_mm_close,
3738 };
3739 
3740 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3741 			unsigned int len)
3742 {
3743 	int i;
3744 
3745 	for (i = 0; i < len; i++) {
3746 		if (likely(pg_vec[i].buffer)) {
3747 			if (is_vmalloc_addr(pg_vec[i].buffer))
3748 				vfree(pg_vec[i].buffer);
3749 			else
3750 				free_pages((unsigned long)pg_vec[i].buffer,
3751 					   order);
3752 			pg_vec[i].buffer = NULL;
3753 		}
3754 	}
3755 	kfree(pg_vec);
3756 }
3757 
3758 static char *alloc_one_pg_vec_page(unsigned long order)
3759 {
3760 	char *buffer;
3761 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3762 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3763 
3764 	buffer = (char *) __get_free_pages(gfp_flags, order);
3765 	if (buffer)
3766 		return buffer;
3767 
3768 	/* __get_free_pages failed, fall back to vmalloc */
3769 	buffer = vzalloc((1 << order) * PAGE_SIZE);
3770 	if (buffer)
3771 		return buffer;
3772 
3773 	/* vmalloc failed, lets dig into swap here */
3774 	gfp_flags &= ~__GFP_NORETRY;
3775 	buffer = (char *) __get_free_pages(gfp_flags, order);
3776 	if (buffer)
3777 		return buffer;
3778 
3779 	/* complete and utter failure */
3780 	return NULL;
3781 }
3782 
3783 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3784 {
3785 	unsigned int block_nr = req->tp_block_nr;
3786 	struct pgv *pg_vec;
3787 	int i;
3788 
3789 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3790 	if (unlikely(!pg_vec))
3791 		goto out;
3792 
3793 	for (i = 0; i < block_nr; i++) {
3794 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3795 		if (unlikely(!pg_vec[i].buffer))
3796 			goto out_free_pgvec;
3797 	}
3798 
3799 out:
3800 	return pg_vec;
3801 
3802 out_free_pgvec:
3803 	free_pg_vec(pg_vec, order, block_nr);
3804 	pg_vec = NULL;
3805 	goto out;
3806 }
3807 
3808 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3809 		int closing, int tx_ring)
3810 {
3811 	struct pgv *pg_vec = NULL;
3812 	struct packet_sock *po = pkt_sk(sk);
3813 	int was_running, order = 0;
3814 	struct packet_ring_buffer *rb;
3815 	struct sk_buff_head *rb_queue;
3816 	__be16 num;
3817 	int err = -EINVAL;
3818 	/* Added to avoid minimal code churn */
3819 	struct tpacket_req *req = &req_u->req;
3820 
3821 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3822 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3823 		WARN(1, "Tx-ring is not supported.\n");
3824 		goto out;
3825 	}
3826 
3827 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3828 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3829 
3830 	err = -EBUSY;
3831 	if (!closing) {
3832 		if (atomic_read(&po->mapped))
3833 			goto out;
3834 		if (packet_read_pending(rb))
3835 			goto out;
3836 	}
3837 
3838 	if (req->tp_block_nr) {
3839 		/* Sanity tests and some calculations */
3840 		err = -EBUSY;
3841 		if (unlikely(rb->pg_vec))
3842 			goto out;
3843 
3844 		switch (po->tp_version) {
3845 		case TPACKET_V1:
3846 			po->tp_hdrlen = TPACKET_HDRLEN;
3847 			break;
3848 		case TPACKET_V2:
3849 			po->tp_hdrlen = TPACKET2_HDRLEN;
3850 			break;
3851 		case TPACKET_V3:
3852 			po->tp_hdrlen = TPACKET3_HDRLEN;
3853 			break;
3854 		}
3855 
3856 		err = -EINVAL;
3857 		if (unlikely((int)req->tp_block_size <= 0))
3858 			goto out;
3859 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3860 			goto out;
3861 		if (po->tp_version >= TPACKET_V3 &&
3862 		    (int)(req->tp_block_size -
3863 			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3864 			goto out;
3865 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3866 					po->tp_reserve))
3867 			goto out;
3868 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3869 			goto out;
3870 
3871 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3872 		if (unlikely(rb->frames_per_block <= 0))
3873 			goto out;
3874 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3875 					req->tp_frame_nr))
3876 			goto out;
3877 
3878 		err = -ENOMEM;
3879 		order = get_order(req->tp_block_size);
3880 		pg_vec = alloc_pg_vec(req, order);
3881 		if (unlikely(!pg_vec))
3882 			goto out;
3883 		switch (po->tp_version) {
3884 		case TPACKET_V3:
3885 		/* Transmit path is not supported. We checked
3886 		 * it above but just being paranoid
3887 		 */
3888 			if (!tx_ring)
3889 				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3890 			break;
3891 		default:
3892 			break;
3893 		}
3894 	}
3895 	/* Done */
3896 	else {
3897 		err = -EINVAL;
3898 		if (unlikely(req->tp_frame_nr))
3899 			goto out;
3900 	}
3901 
3902 	lock_sock(sk);
3903 
3904 	/* Detach socket from network */
3905 	spin_lock(&po->bind_lock);
3906 	was_running = po->running;
3907 	num = po->num;
3908 	if (was_running) {
3909 		po->num = 0;
3910 		__unregister_prot_hook(sk, false);
3911 	}
3912 	spin_unlock(&po->bind_lock);
3913 
3914 	synchronize_net();
3915 
3916 	err = -EBUSY;
3917 	mutex_lock(&po->pg_vec_lock);
3918 	if (closing || atomic_read(&po->mapped) == 0) {
3919 		err = 0;
3920 		spin_lock_bh(&rb_queue->lock);
3921 		swap(rb->pg_vec, pg_vec);
3922 		rb->frame_max = (req->tp_frame_nr - 1);
3923 		rb->head = 0;
3924 		rb->frame_size = req->tp_frame_size;
3925 		spin_unlock_bh(&rb_queue->lock);
3926 
3927 		swap(rb->pg_vec_order, order);
3928 		swap(rb->pg_vec_len, req->tp_block_nr);
3929 
3930 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3931 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
3932 						tpacket_rcv : packet_rcv;
3933 		skb_queue_purge(rb_queue);
3934 		if (atomic_read(&po->mapped))
3935 			pr_err("packet_mmap: vma is busy: %d\n",
3936 			       atomic_read(&po->mapped));
3937 	}
3938 	mutex_unlock(&po->pg_vec_lock);
3939 
3940 	spin_lock(&po->bind_lock);
3941 	if (was_running) {
3942 		po->num = num;
3943 		register_prot_hook(sk);
3944 	}
3945 	spin_unlock(&po->bind_lock);
3946 	if (closing && (po->tp_version > TPACKET_V2)) {
3947 		/* Because we don't support block-based V3 on tx-ring */
3948 		if (!tx_ring)
3949 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3950 	}
3951 	release_sock(sk);
3952 
3953 	if (pg_vec)
3954 		free_pg_vec(pg_vec, order, req->tp_block_nr);
3955 out:
3956 	return err;
3957 }
3958 
3959 static int packet_mmap(struct file *file, struct socket *sock,
3960 		struct vm_area_struct *vma)
3961 {
3962 	struct sock *sk = sock->sk;
3963 	struct packet_sock *po = pkt_sk(sk);
3964 	unsigned long size, expected_size;
3965 	struct packet_ring_buffer *rb;
3966 	unsigned long start;
3967 	int err = -EINVAL;
3968 	int i;
3969 
3970 	if (vma->vm_pgoff)
3971 		return -EINVAL;
3972 
3973 	mutex_lock(&po->pg_vec_lock);
3974 
3975 	expected_size = 0;
3976 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3977 		if (rb->pg_vec) {
3978 			expected_size += rb->pg_vec_len
3979 						* rb->pg_vec_pages
3980 						* PAGE_SIZE;
3981 		}
3982 	}
3983 
3984 	if (expected_size == 0)
3985 		goto out;
3986 
3987 	size = vma->vm_end - vma->vm_start;
3988 	if (size != expected_size)
3989 		goto out;
3990 
3991 	start = vma->vm_start;
3992 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3993 		if (rb->pg_vec == NULL)
3994 			continue;
3995 
3996 		for (i = 0; i < rb->pg_vec_len; i++) {
3997 			struct page *page;
3998 			void *kaddr = rb->pg_vec[i].buffer;
3999 			int pg_num;
4000 
4001 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4002 				page = pgv_to_page(kaddr);
4003 				err = vm_insert_page(vma, start, page);
4004 				if (unlikely(err))
4005 					goto out;
4006 				start += PAGE_SIZE;
4007 				kaddr += PAGE_SIZE;
4008 			}
4009 		}
4010 	}
4011 
4012 	atomic_inc(&po->mapped);
4013 	vma->vm_ops = &packet_mmap_ops;
4014 	err = 0;
4015 
4016 out:
4017 	mutex_unlock(&po->pg_vec_lock);
4018 	return err;
4019 }
4020 
4021 static const struct proto_ops packet_ops_spkt = {
4022 	.family =	PF_PACKET,
4023 	.owner =	THIS_MODULE,
4024 	.release =	packet_release,
4025 	.bind =		packet_bind_spkt,
4026 	.connect =	sock_no_connect,
4027 	.socketpair =	sock_no_socketpair,
4028 	.accept =	sock_no_accept,
4029 	.getname =	packet_getname_spkt,
4030 	.poll =		datagram_poll,
4031 	.ioctl =	packet_ioctl,
4032 	.listen =	sock_no_listen,
4033 	.shutdown =	sock_no_shutdown,
4034 	.setsockopt =	sock_no_setsockopt,
4035 	.getsockopt =	sock_no_getsockopt,
4036 	.sendmsg =	packet_sendmsg_spkt,
4037 	.recvmsg =	packet_recvmsg,
4038 	.mmap =		sock_no_mmap,
4039 	.sendpage =	sock_no_sendpage,
4040 };
4041 
4042 static const struct proto_ops packet_ops = {
4043 	.family =	PF_PACKET,
4044 	.owner =	THIS_MODULE,
4045 	.release =	packet_release,
4046 	.bind =		packet_bind,
4047 	.connect =	sock_no_connect,
4048 	.socketpair =	sock_no_socketpair,
4049 	.accept =	sock_no_accept,
4050 	.getname =	packet_getname,
4051 	.poll =		packet_poll,
4052 	.ioctl =	packet_ioctl,
4053 	.listen =	sock_no_listen,
4054 	.shutdown =	sock_no_shutdown,
4055 	.setsockopt =	packet_setsockopt,
4056 	.getsockopt =	packet_getsockopt,
4057 	.sendmsg =	packet_sendmsg,
4058 	.recvmsg =	packet_recvmsg,
4059 	.mmap =		packet_mmap,
4060 	.sendpage =	sock_no_sendpage,
4061 };
4062 
4063 static const struct net_proto_family packet_family_ops = {
4064 	.family =	PF_PACKET,
4065 	.create =	packet_create,
4066 	.owner	=	THIS_MODULE,
4067 };
4068 
4069 static struct notifier_block packet_netdev_notifier = {
4070 	.notifier_call =	packet_notifier,
4071 };
4072 
4073 #ifdef CONFIG_PROC_FS
4074 
4075 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4076 	__acquires(RCU)
4077 {
4078 	struct net *net = seq_file_net(seq);
4079 
4080 	rcu_read_lock();
4081 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4082 }
4083 
4084 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4085 {
4086 	struct net *net = seq_file_net(seq);
4087 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4088 }
4089 
4090 static void packet_seq_stop(struct seq_file *seq, void *v)
4091 	__releases(RCU)
4092 {
4093 	rcu_read_unlock();
4094 }
4095 
4096 static int packet_seq_show(struct seq_file *seq, void *v)
4097 {
4098 	if (v == SEQ_START_TOKEN)
4099 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4100 	else {
4101 		struct sock *s = sk_entry(v);
4102 		const struct packet_sock *po = pkt_sk(s);
4103 
4104 		seq_printf(seq,
4105 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4106 			   s,
4107 			   atomic_read(&s->sk_refcnt),
4108 			   s->sk_type,
4109 			   ntohs(po->num),
4110 			   po->ifindex,
4111 			   po->running,
4112 			   atomic_read(&s->sk_rmem_alloc),
4113 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4114 			   sock_i_ino(s));
4115 	}
4116 
4117 	return 0;
4118 }
4119 
4120 static const struct seq_operations packet_seq_ops = {
4121 	.start	= packet_seq_start,
4122 	.next	= packet_seq_next,
4123 	.stop	= packet_seq_stop,
4124 	.show	= packet_seq_show,
4125 };
4126 
4127 static int packet_seq_open(struct inode *inode, struct file *file)
4128 {
4129 	return seq_open_net(inode, file, &packet_seq_ops,
4130 			    sizeof(struct seq_net_private));
4131 }
4132 
4133 static const struct file_operations packet_seq_fops = {
4134 	.owner		= THIS_MODULE,
4135 	.open		= packet_seq_open,
4136 	.read		= seq_read,
4137 	.llseek		= seq_lseek,
4138 	.release	= seq_release_net,
4139 };
4140 
4141 #endif
4142 
4143 static int __net_init packet_net_init(struct net *net)
4144 {
4145 	mutex_init(&net->packet.sklist_lock);
4146 	INIT_HLIST_HEAD(&net->packet.sklist);
4147 
4148 	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4149 		return -ENOMEM;
4150 
4151 	return 0;
4152 }
4153 
4154 static void __net_exit packet_net_exit(struct net *net)
4155 {
4156 	remove_proc_entry("packet", net->proc_net);
4157 }
4158 
4159 static struct pernet_operations packet_net_ops = {
4160 	.init = packet_net_init,
4161 	.exit = packet_net_exit,
4162 };
4163 
4164 
4165 static void __exit packet_exit(void)
4166 {
4167 	unregister_netdevice_notifier(&packet_netdev_notifier);
4168 	unregister_pernet_subsys(&packet_net_ops);
4169 	sock_unregister(PF_PACKET);
4170 	proto_unregister(&packet_proto);
4171 }
4172 
4173 static int __init packet_init(void)
4174 {
4175 	int rc = proto_register(&packet_proto, 0);
4176 
4177 	if (rc != 0)
4178 		goto out;
4179 
4180 	sock_register(&packet_family_ops);
4181 	register_pernet_subsys(&packet_net_ops);
4182 	register_netdevice_notifier(&packet_netdev_notifier);
4183 out:
4184 	return rc;
4185 }
4186 
4187 module_init(packet_init);
4188 module_exit(packet_exit);
4189 MODULE_LICENSE("GPL");
4190 MODULE_ALIAS_NETPROTO(PF_PACKET);
4191