xref: /openbmc/linux/net/packet/af_packet.c (revision d198b34f3855eee2571dda03eea75a09c7c31480)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PACKET - implements raw packet sockets.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *		Alan Cox	:	verify_area() now used correctly
15  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
16  *		Alan Cox	:	tidied skbuff lists.
17  *		Alan Cox	:	Now uses generic datagram routines I
18  *					added. Also fixed the peek/read crash
19  *					from all old Linux datagram code.
20  *		Alan Cox	:	Uses the improved datagram code.
21  *		Alan Cox	:	Added NULL's for socket options.
22  *		Alan Cox	:	Re-commented the code.
23  *		Alan Cox	:	Use new kernel side addressing
24  *		Rob Janssen	:	Correct MTU usage.
25  *		Dave Platt	:	Counter leaks caused by incorrect
26  *					interrupt locking and some slightly
27  *					dubious gcc output. Can you read
28  *					compiler: it said _VOLATILE_
29  *	Richard Kooijman	:	Timestamp fixes.
30  *		Alan Cox	:	New buffers. Use sk->mac.raw.
31  *		Alan Cox	:	sendmsg/recvmsg support.
32  *		Alan Cox	:	Protocol setting support
33  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
34  *	Cyrus Durgin		:	Fixed kerneld for kmod.
35  *	Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
39  *					The convention is that longer addresses
40  *					will simply extend the hardware address
41  *					byte arrays at the end of sockaddr_ll
42  *					and packet_mreq.
43  *		Johann Baudy	:	Added TX RING.
44  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
45  *					layer.
46  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48 
49 #include <linux/types.h>
50 #include <linux/mm.h>
51 #include <linux/capability.h>
52 #include <linux/fcntl.h>
53 #include <linux/socket.h>
54 #include <linux/in.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/if_packet.h>
58 #include <linux/wireless.h>
59 #include <linux/kernel.h>
60 #include <linux/kmod.h>
61 #include <linux/slab.h>
62 #include <linux/vmalloc.h>
63 #include <net/net_namespace.h>
64 #include <net/ip.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <linux/uaccess.h>
71 #include <asm/ioctls.h>
72 #include <asm/page.h>
73 #include <asm/cacheflush.h>
74 #include <asm/io.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
80 #include <linux/mutex.h>
81 #include <linux/if_vlan.h>
82 #include <linux/virtio_net.h>
83 #include <linux/errqueue.h>
84 #include <linux/net_tstamp.h>
85 #include <linux/percpu.h>
86 #ifdef CONFIG_INET
87 #include <net/inet_common.h>
88 #endif
89 #include <linux/bpf.h>
90 #include <net/compat.h>
91 
92 #include "internal.h"
93 
94 /*
95    Assumptions:
96    - if device has no dev->hard_header routine, it adds and removes ll header
97      inside itself. In this case ll header is invisible outside of device,
98      but higher levels still should reserve dev->hard_header_len.
99      Some devices are enough clever to reallocate skb, when header
100      will not fit to reserved space (tunnel), another ones are silly
101      (PPP).
102    - packet socket receives packets with pulled ll header,
103      so that SOCK_RAW should push it back.
104 
105 On receive:
106 -----------
107 
108 Incoming, dev->hard_header!=NULL
109    mac_header -> ll header
110    data       -> data
111 
112 Outgoing, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> ll header
115 
116 Incoming, dev->hard_header==NULL
117    mac_header -> UNKNOWN position. It is very likely, that it points to ll
118 		 header.  PPP makes it, that is wrong, because introduce
119 		 assymetry between rx and tx paths.
120    data       -> data
121 
122 Outgoing, dev->hard_header==NULL
123    mac_header -> data. ll header is still not built!
124    data       -> data
125 
126 Resume
127   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
128 
129 
130 On transmit:
131 ------------
132 
133 dev->hard_header != NULL
134    mac_header -> ll header
135    data       -> ll header
136 
137 dev->hard_header == NULL (ll header is added by device, we cannot control it)
138    mac_header -> data
139    data       -> data
140 
141    We should set nh.raw on output to correct posistion,
142    packet classifier depends on it.
143  */
144 
145 /* Private packet socket structures. */
146 
147 /* identical to struct packet_mreq except it has
148  * a longer address field.
149  */
150 struct packet_mreq_max {
151 	int		mr_ifindex;
152 	unsigned short	mr_type;
153 	unsigned short	mr_alen;
154 	unsigned char	mr_address[MAX_ADDR_LEN];
155 };
156 
157 union tpacket_uhdr {
158 	struct tpacket_hdr  *h1;
159 	struct tpacket2_hdr *h2;
160 	struct tpacket3_hdr *h3;
161 	void *raw;
162 };
163 
164 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
165 		int closing, int tx_ring);
166 
167 #define V3_ALIGNMENT	(8)
168 
169 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
170 
171 #define BLK_PLUS_PRIV(sz_of_priv) \
172 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
173 
174 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
175 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
176 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
177 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
178 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
179 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
180 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
181 
182 struct packet_sock;
183 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
184 		       struct packet_type *pt, struct net_device *orig_dev);
185 
186 static void *packet_previous_frame(struct packet_sock *po,
187 		struct packet_ring_buffer *rb,
188 		int status);
189 static void packet_increment_head(struct packet_ring_buffer *buff);
190 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
191 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
192 			struct packet_sock *);
193 static void prb_retire_current_block(struct tpacket_kbdq_core *,
194 		struct packet_sock *, unsigned int status);
195 static int prb_queue_frozen(struct tpacket_kbdq_core *);
196 static void prb_open_block(struct tpacket_kbdq_core *,
197 		struct tpacket_block_desc *);
198 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
199 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
200 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
201 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
202 		struct tpacket3_hdr *);
203 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
204 		struct tpacket3_hdr *);
205 static void packet_flush_mclist(struct sock *sk);
206 static u16 packet_pick_tx_queue(struct sk_buff *skb);
207 
208 struct packet_skb_cb {
209 	union {
210 		struct sockaddr_pkt pkt;
211 		union {
212 			/* Trick: alias skb original length with
213 			 * ll.sll_family and ll.protocol in order
214 			 * to save room.
215 			 */
216 			unsigned int origlen;
217 			struct sockaddr_ll ll;
218 		};
219 	} sa;
220 };
221 
222 #define vio_le() virtio_legacy_is_little_endian()
223 
224 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
225 
226 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
227 #define GET_PBLOCK_DESC(x, bid)	\
228 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
229 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
230 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
231 #define GET_NEXT_PRB_BLK_NUM(x) \
232 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
233 	((x)->kactive_blk_num+1) : 0)
234 
235 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
236 static void __fanout_link(struct sock *sk, struct packet_sock *po);
237 
238 static int packet_direct_xmit(struct sk_buff *skb)
239 {
240 	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
241 }
242 
243 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
244 {
245 	struct net_device *dev;
246 
247 	rcu_read_lock();
248 	dev = rcu_dereference(po->cached_dev);
249 	if (likely(dev))
250 		dev_hold(dev);
251 	rcu_read_unlock();
252 
253 	return dev;
254 }
255 
256 static void packet_cached_dev_assign(struct packet_sock *po,
257 				     struct net_device *dev)
258 {
259 	rcu_assign_pointer(po->cached_dev, dev);
260 }
261 
262 static void packet_cached_dev_reset(struct packet_sock *po)
263 {
264 	RCU_INIT_POINTER(po->cached_dev, NULL);
265 }
266 
267 static bool packet_use_direct_xmit(const struct packet_sock *po)
268 {
269 	return po->xmit == packet_direct_xmit;
270 }
271 
272 static u16 packet_pick_tx_queue(struct sk_buff *skb)
273 {
274 	struct net_device *dev = skb->dev;
275 	const struct net_device_ops *ops = dev->netdev_ops;
276 	int cpu = raw_smp_processor_id();
277 	u16 queue_index;
278 
279 #ifdef CONFIG_XPS
280 	skb->sender_cpu = cpu + 1;
281 #endif
282 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
283 	if (ops->ndo_select_queue) {
284 		queue_index = ops->ndo_select_queue(dev, skb, NULL);
285 		queue_index = netdev_cap_txqueue(dev, queue_index);
286 	} else {
287 		queue_index = netdev_pick_tx(dev, skb, NULL);
288 	}
289 
290 	return queue_index;
291 }
292 
293 /* __register_prot_hook must be invoked through register_prot_hook
294  * or from a context in which asynchronous accesses to the packet
295  * socket is not possible (packet_create()).
296  */
297 static void __register_prot_hook(struct sock *sk)
298 {
299 	struct packet_sock *po = pkt_sk(sk);
300 
301 	if (!po->running) {
302 		if (po->fanout)
303 			__fanout_link(sk, po);
304 		else
305 			dev_add_pack(&po->prot_hook);
306 
307 		sock_hold(sk);
308 		po->running = 1;
309 	}
310 }
311 
312 static void register_prot_hook(struct sock *sk)
313 {
314 	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
315 	__register_prot_hook(sk);
316 }
317 
318 /* If the sync parameter is true, we will temporarily drop
319  * the po->bind_lock and do a synchronize_net to make sure no
320  * asynchronous packet processing paths still refer to the elements
321  * of po->prot_hook.  If the sync parameter is false, it is the
322  * callers responsibility to take care of this.
323  */
324 static void __unregister_prot_hook(struct sock *sk, bool sync)
325 {
326 	struct packet_sock *po = pkt_sk(sk);
327 
328 	lockdep_assert_held_once(&po->bind_lock);
329 
330 	po->running = 0;
331 
332 	if (po->fanout)
333 		__fanout_unlink(sk, po);
334 	else
335 		__dev_remove_pack(&po->prot_hook);
336 
337 	__sock_put(sk);
338 
339 	if (sync) {
340 		spin_unlock(&po->bind_lock);
341 		synchronize_net();
342 		spin_lock(&po->bind_lock);
343 	}
344 }
345 
346 static void unregister_prot_hook(struct sock *sk, bool sync)
347 {
348 	struct packet_sock *po = pkt_sk(sk);
349 
350 	if (po->running)
351 		__unregister_prot_hook(sk, sync);
352 }
353 
354 static inline struct page * __pure pgv_to_page(void *addr)
355 {
356 	if (is_vmalloc_addr(addr))
357 		return vmalloc_to_page(addr);
358 	return virt_to_page(addr);
359 }
360 
361 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
362 {
363 	union tpacket_uhdr h;
364 
365 	h.raw = frame;
366 	switch (po->tp_version) {
367 	case TPACKET_V1:
368 		h.h1->tp_status = status;
369 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
370 		break;
371 	case TPACKET_V2:
372 		h.h2->tp_status = status;
373 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
374 		break;
375 	case TPACKET_V3:
376 		h.h3->tp_status = status;
377 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
378 		break;
379 	default:
380 		WARN(1, "TPACKET version not supported.\n");
381 		BUG();
382 	}
383 
384 	smp_wmb();
385 }
386 
387 static int __packet_get_status(const struct packet_sock *po, void *frame)
388 {
389 	union tpacket_uhdr h;
390 
391 	smp_rmb();
392 
393 	h.raw = frame;
394 	switch (po->tp_version) {
395 	case TPACKET_V1:
396 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
397 		return h.h1->tp_status;
398 	case TPACKET_V2:
399 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
400 		return h.h2->tp_status;
401 	case TPACKET_V3:
402 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
403 		return h.h3->tp_status;
404 	default:
405 		WARN(1, "TPACKET version not supported.\n");
406 		BUG();
407 		return 0;
408 	}
409 }
410 
411 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
412 				   unsigned int flags)
413 {
414 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
415 
416 	if (shhwtstamps &&
417 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
418 	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
419 		return TP_STATUS_TS_RAW_HARDWARE;
420 
421 	if (ktime_to_timespec64_cond(skb->tstamp, ts))
422 		return TP_STATUS_TS_SOFTWARE;
423 
424 	return 0;
425 }
426 
427 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
428 				    struct sk_buff *skb)
429 {
430 	union tpacket_uhdr h;
431 	struct timespec64 ts;
432 	__u32 ts_status;
433 
434 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
435 		return 0;
436 
437 	h.raw = frame;
438 	/*
439 	 * versions 1 through 3 overflow the timestamps in y2106, since they
440 	 * all store the seconds in a 32-bit unsigned integer.
441 	 * If we create a version 4, that should have a 64-bit timestamp,
442 	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
443 	 * nanoseconds.
444 	 */
445 	switch (po->tp_version) {
446 	case TPACKET_V1:
447 		h.h1->tp_sec = ts.tv_sec;
448 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
449 		break;
450 	case TPACKET_V2:
451 		h.h2->tp_sec = ts.tv_sec;
452 		h.h2->tp_nsec = ts.tv_nsec;
453 		break;
454 	case TPACKET_V3:
455 		h.h3->tp_sec = ts.tv_sec;
456 		h.h3->tp_nsec = ts.tv_nsec;
457 		break;
458 	default:
459 		WARN(1, "TPACKET version not supported.\n");
460 		BUG();
461 	}
462 
463 	/* one flush is safe, as both fields always lie on the same cacheline */
464 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
465 	smp_wmb();
466 
467 	return ts_status;
468 }
469 
470 static void *packet_lookup_frame(const struct packet_sock *po,
471 				 const struct packet_ring_buffer *rb,
472 				 unsigned int position,
473 				 int status)
474 {
475 	unsigned int pg_vec_pos, frame_offset;
476 	union tpacket_uhdr h;
477 
478 	pg_vec_pos = position / rb->frames_per_block;
479 	frame_offset = position % rb->frames_per_block;
480 
481 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
482 		(frame_offset * rb->frame_size);
483 
484 	if (status != __packet_get_status(po, h.raw))
485 		return NULL;
486 
487 	return h.raw;
488 }
489 
490 static void *packet_current_frame(struct packet_sock *po,
491 		struct packet_ring_buffer *rb,
492 		int status)
493 {
494 	return packet_lookup_frame(po, rb, rb->head, status);
495 }
496 
497 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
498 {
499 	del_timer_sync(&pkc->retire_blk_timer);
500 }
501 
502 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
503 		struct sk_buff_head *rb_queue)
504 {
505 	struct tpacket_kbdq_core *pkc;
506 
507 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
508 
509 	spin_lock_bh(&rb_queue->lock);
510 	pkc->delete_blk_timer = 1;
511 	spin_unlock_bh(&rb_queue->lock);
512 
513 	prb_del_retire_blk_timer(pkc);
514 }
515 
516 static void prb_setup_retire_blk_timer(struct packet_sock *po)
517 {
518 	struct tpacket_kbdq_core *pkc;
519 
520 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
521 	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
522 		    0);
523 	pkc->retire_blk_timer.expires = jiffies;
524 }
525 
526 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
527 				int blk_size_in_bytes)
528 {
529 	struct net_device *dev;
530 	unsigned int mbits, div;
531 	struct ethtool_link_ksettings ecmd;
532 	int err;
533 
534 	rtnl_lock();
535 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
536 	if (unlikely(!dev)) {
537 		rtnl_unlock();
538 		return DEFAULT_PRB_RETIRE_TOV;
539 	}
540 	err = __ethtool_get_link_ksettings(dev, &ecmd);
541 	rtnl_unlock();
542 	if (err)
543 		return DEFAULT_PRB_RETIRE_TOV;
544 
545 	/* If the link speed is so slow you don't really
546 	 * need to worry about perf anyways
547 	 */
548 	if (ecmd.base.speed < SPEED_1000 ||
549 	    ecmd.base.speed == SPEED_UNKNOWN)
550 		return DEFAULT_PRB_RETIRE_TOV;
551 
552 	div = ecmd.base.speed / 1000;
553 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
554 
555 	if (div)
556 		mbits /= div;
557 
558 	if (div)
559 		return mbits + 1;
560 	return mbits;
561 }
562 
563 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
564 			union tpacket_req_u *req_u)
565 {
566 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
567 }
568 
569 static void init_prb_bdqc(struct packet_sock *po,
570 			struct packet_ring_buffer *rb,
571 			struct pgv *pg_vec,
572 			union tpacket_req_u *req_u)
573 {
574 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
575 	struct tpacket_block_desc *pbd;
576 
577 	memset(p1, 0x0, sizeof(*p1));
578 
579 	p1->knxt_seq_num = 1;
580 	p1->pkbdq = pg_vec;
581 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
582 	p1->pkblk_start	= pg_vec[0].buffer;
583 	p1->kblk_size = req_u->req3.tp_block_size;
584 	p1->knum_blocks	= req_u->req3.tp_block_nr;
585 	p1->hdrlen = po->tp_hdrlen;
586 	p1->version = po->tp_version;
587 	p1->last_kactive_blk_num = 0;
588 	po->stats.stats3.tp_freeze_q_cnt = 0;
589 	if (req_u->req3.tp_retire_blk_tov)
590 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
591 	else
592 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
593 						req_u->req3.tp_block_size);
594 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
595 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
596 
597 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
598 	prb_init_ft_ops(p1, req_u);
599 	prb_setup_retire_blk_timer(po);
600 	prb_open_block(p1, pbd);
601 }
602 
603 /*  Do NOT update the last_blk_num first.
604  *  Assumes sk_buff_head lock is held.
605  */
606 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
607 {
608 	mod_timer(&pkc->retire_blk_timer,
609 			jiffies + pkc->tov_in_jiffies);
610 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
611 }
612 
613 /*
614  * Timer logic:
615  * 1) We refresh the timer only when we open a block.
616  *    By doing this we don't waste cycles refreshing the timer
617  *	  on packet-by-packet basis.
618  *
619  * With a 1MB block-size, on a 1Gbps line, it will take
620  * i) ~8 ms to fill a block + ii) memcpy etc.
621  * In this cut we are not accounting for the memcpy time.
622  *
623  * So, if the user sets the 'tmo' to 10ms then the timer
624  * will never fire while the block is still getting filled
625  * (which is what we want). However, the user could choose
626  * to close a block early and that's fine.
627  *
628  * But when the timer does fire, we check whether or not to refresh it.
629  * Since the tmo granularity is in msecs, it is not too expensive
630  * to refresh the timer, lets say every '8' msecs.
631  * Either the user can set the 'tmo' or we can derive it based on
632  * a) line-speed and b) block-size.
633  * prb_calc_retire_blk_tmo() calculates the tmo.
634  *
635  */
636 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
637 {
638 	struct packet_sock *po =
639 		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
640 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
641 	unsigned int frozen;
642 	struct tpacket_block_desc *pbd;
643 
644 	spin_lock(&po->sk.sk_receive_queue.lock);
645 
646 	frozen = prb_queue_frozen(pkc);
647 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
648 
649 	if (unlikely(pkc->delete_blk_timer))
650 		goto out;
651 
652 	/* We only need to plug the race when the block is partially filled.
653 	 * tpacket_rcv:
654 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
655 	 *		copy_bits() is in progress ...
656 	 *		timer fires on other cpu:
657 	 *		we can't retire the current block because copy_bits
658 	 *		is in progress.
659 	 *
660 	 */
661 	if (BLOCK_NUM_PKTS(pbd)) {
662 		while (atomic_read(&pkc->blk_fill_in_prog)) {
663 			/* Waiting for skb_copy_bits to finish... */
664 			cpu_relax();
665 		}
666 	}
667 
668 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
669 		if (!frozen) {
670 			if (!BLOCK_NUM_PKTS(pbd)) {
671 				/* An empty block. Just refresh the timer. */
672 				goto refresh_timer;
673 			}
674 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
675 			if (!prb_dispatch_next_block(pkc, po))
676 				goto refresh_timer;
677 			else
678 				goto out;
679 		} else {
680 			/* Case 1. Queue was frozen because user-space was
681 			 *	   lagging behind.
682 			 */
683 			if (prb_curr_blk_in_use(pbd)) {
684 				/*
685 				 * Ok, user-space is still behind.
686 				 * So just refresh the timer.
687 				 */
688 				goto refresh_timer;
689 			} else {
690 			       /* Case 2. queue was frozen,user-space caught up,
691 				* now the link went idle && the timer fired.
692 				* We don't have a block to close.So we open this
693 				* block and restart the timer.
694 				* opening a block thaws the queue,restarts timer
695 				* Thawing/timer-refresh is a side effect.
696 				*/
697 				prb_open_block(pkc, pbd);
698 				goto out;
699 			}
700 		}
701 	}
702 
703 refresh_timer:
704 	_prb_refresh_rx_retire_blk_timer(pkc);
705 
706 out:
707 	spin_unlock(&po->sk.sk_receive_queue.lock);
708 }
709 
710 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
711 		struct tpacket_block_desc *pbd1, __u32 status)
712 {
713 	/* Flush everything minus the block header */
714 
715 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
716 	u8 *start, *end;
717 
718 	start = (u8 *)pbd1;
719 
720 	/* Skip the block header(we know header WILL fit in 4K) */
721 	start += PAGE_SIZE;
722 
723 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
724 	for (; start < end; start += PAGE_SIZE)
725 		flush_dcache_page(pgv_to_page(start));
726 
727 	smp_wmb();
728 #endif
729 
730 	/* Now update the block status. */
731 
732 	BLOCK_STATUS(pbd1) = status;
733 
734 	/* Flush the block header */
735 
736 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
737 	start = (u8 *)pbd1;
738 	flush_dcache_page(pgv_to_page(start));
739 
740 	smp_wmb();
741 #endif
742 }
743 
744 /*
745  * Side effect:
746  *
747  * 1) flush the block
748  * 2) Increment active_blk_num
749  *
750  * Note:We DONT refresh the timer on purpose.
751  *	Because almost always the next block will be opened.
752  */
753 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
754 		struct tpacket_block_desc *pbd1,
755 		struct packet_sock *po, unsigned int stat)
756 {
757 	__u32 status = TP_STATUS_USER | stat;
758 
759 	struct tpacket3_hdr *last_pkt;
760 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
761 	struct sock *sk = &po->sk;
762 
763 	if (atomic_read(&po->tp_drops))
764 		status |= TP_STATUS_LOSING;
765 
766 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
767 	last_pkt->tp_next_offset = 0;
768 
769 	/* Get the ts of the last pkt */
770 	if (BLOCK_NUM_PKTS(pbd1)) {
771 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
772 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
773 	} else {
774 		/* Ok, we tmo'd - so get the current time.
775 		 *
776 		 * It shouldn't really happen as we don't close empty
777 		 * blocks. See prb_retire_rx_blk_timer_expired().
778 		 */
779 		struct timespec64 ts;
780 		ktime_get_real_ts64(&ts);
781 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
782 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
783 	}
784 
785 	smp_wmb();
786 
787 	/* Flush the block */
788 	prb_flush_block(pkc1, pbd1, status);
789 
790 	sk->sk_data_ready(sk);
791 
792 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
793 }
794 
795 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
796 {
797 	pkc->reset_pending_on_curr_blk = 0;
798 }
799 
800 /*
801  * Side effect of opening a block:
802  *
803  * 1) prb_queue is thawed.
804  * 2) retire_blk_timer is refreshed.
805  *
806  */
807 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
808 	struct tpacket_block_desc *pbd1)
809 {
810 	struct timespec64 ts;
811 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
812 
813 	smp_rmb();
814 
815 	/* We could have just memset this but we will lose the
816 	 * flexibility of making the priv area sticky
817 	 */
818 
819 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
820 	BLOCK_NUM_PKTS(pbd1) = 0;
821 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
822 
823 	ktime_get_real_ts64(&ts);
824 
825 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
826 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
827 
828 	pkc1->pkblk_start = (char *)pbd1;
829 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830 
831 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
832 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
833 
834 	pbd1->version = pkc1->version;
835 	pkc1->prev = pkc1->nxt_offset;
836 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
837 
838 	prb_thaw_queue(pkc1);
839 	_prb_refresh_rx_retire_blk_timer(pkc1);
840 
841 	smp_wmb();
842 }
843 
844 /*
845  * Queue freeze logic:
846  * 1) Assume tp_block_nr = 8 blocks.
847  * 2) At time 't0', user opens Rx ring.
848  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
849  * 4) user-space is either sleeping or processing block '0'.
850  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
851  *    it will close block-7,loop around and try to fill block '0'.
852  *    call-flow:
853  *    __packet_lookup_frame_in_block
854  *      prb_retire_current_block()
855  *      prb_dispatch_next_block()
856  *        |->(BLOCK_STATUS == USER) evaluates to true
857  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
858  * 6) Now there are two cases:
859  *    6.1) Link goes idle right after the queue is frozen.
860  *         But remember, the last open_block() refreshed the timer.
861  *         When this timer expires,it will refresh itself so that we can
862  *         re-open block-0 in near future.
863  *    6.2) Link is busy and keeps on receiving packets. This is a simple
864  *         case and __packet_lookup_frame_in_block will check if block-0
865  *         is free and can now be re-used.
866  */
867 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
868 				  struct packet_sock *po)
869 {
870 	pkc->reset_pending_on_curr_blk = 1;
871 	po->stats.stats3.tp_freeze_q_cnt++;
872 }
873 
874 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
875 
876 /*
877  * If the next block is free then we will dispatch it
878  * and return a good offset.
879  * Else, we will freeze the queue.
880  * So, caller must check the return value.
881  */
882 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
883 		struct packet_sock *po)
884 {
885 	struct tpacket_block_desc *pbd;
886 
887 	smp_rmb();
888 
889 	/* 1. Get current block num */
890 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
891 
892 	/* 2. If this block is currently in_use then freeze the queue */
893 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
894 		prb_freeze_queue(pkc, po);
895 		return NULL;
896 	}
897 
898 	/*
899 	 * 3.
900 	 * open this block and return the offset where the first packet
901 	 * needs to get stored.
902 	 */
903 	prb_open_block(pkc, pbd);
904 	return (void *)pkc->nxt_offset;
905 }
906 
907 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
908 		struct packet_sock *po, unsigned int status)
909 {
910 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
911 
912 	/* retire/close the current block */
913 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
914 		/*
915 		 * Plug the case where copy_bits() is in progress on
916 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
917 		 * have space to copy the pkt in the current block and
918 		 * called prb_retire_current_block()
919 		 *
920 		 * We don't need to worry about the TMO case because
921 		 * the timer-handler already handled this case.
922 		 */
923 		if (!(status & TP_STATUS_BLK_TMO)) {
924 			while (atomic_read(&pkc->blk_fill_in_prog)) {
925 				/* Waiting for skb_copy_bits to finish... */
926 				cpu_relax();
927 			}
928 		}
929 		prb_close_block(pkc, pbd, po, status);
930 		return;
931 	}
932 }
933 
934 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
935 {
936 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
937 }
938 
939 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
940 {
941 	return pkc->reset_pending_on_curr_blk;
942 }
943 
944 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
945 {
946 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
947 	atomic_dec(&pkc->blk_fill_in_prog);
948 }
949 
950 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
951 			struct tpacket3_hdr *ppd)
952 {
953 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
954 }
955 
956 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
957 			struct tpacket3_hdr *ppd)
958 {
959 	ppd->hv1.tp_rxhash = 0;
960 }
961 
962 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
963 			struct tpacket3_hdr *ppd)
964 {
965 	if (skb_vlan_tag_present(pkc->skb)) {
966 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
967 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
968 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
969 	} else {
970 		ppd->hv1.tp_vlan_tci = 0;
971 		ppd->hv1.tp_vlan_tpid = 0;
972 		ppd->tp_status = TP_STATUS_AVAILABLE;
973 	}
974 }
975 
976 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
977 			struct tpacket3_hdr *ppd)
978 {
979 	ppd->hv1.tp_padding = 0;
980 	prb_fill_vlan_info(pkc, ppd);
981 
982 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
983 		prb_fill_rxhash(pkc, ppd);
984 	else
985 		prb_clear_rxhash(pkc, ppd);
986 }
987 
988 static void prb_fill_curr_block(char *curr,
989 				struct tpacket_kbdq_core *pkc,
990 				struct tpacket_block_desc *pbd,
991 				unsigned int len)
992 {
993 	struct tpacket3_hdr *ppd;
994 
995 	ppd  = (struct tpacket3_hdr *)curr;
996 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
997 	pkc->prev = curr;
998 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
999 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1000 	BLOCK_NUM_PKTS(pbd) += 1;
1001 	atomic_inc(&pkc->blk_fill_in_prog);
1002 	prb_run_all_ft_ops(pkc, ppd);
1003 }
1004 
1005 /* Assumes caller has the sk->rx_queue.lock */
1006 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1007 					    struct sk_buff *skb,
1008 					    unsigned int len
1009 					    )
1010 {
1011 	struct tpacket_kbdq_core *pkc;
1012 	struct tpacket_block_desc *pbd;
1013 	char *curr, *end;
1014 
1015 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1016 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1017 
1018 	/* Queue is frozen when user space is lagging behind */
1019 	if (prb_queue_frozen(pkc)) {
1020 		/*
1021 		 * Check if that last block which caused the queue to freeze,
1022 		 * is still in_use by user-space.
1023 		 */
1024 		if (prb_curr_blk_in_use(pbd)) {
1025 			/* Can't record this packet */
1026 			return NULL;
1027 		} else {
1028 			/*
1029 			 * Ok, the block was released by user-space.
1030 			 * Now let's open that block.
1031 			 * opening a block also thaws the queue.
1032 			 * Thawing is a side effect.
1033 			 */
1034 			prb_open_block(pkc, pbd);
1035 		}
1036 	}
1037 
1038 	smp_mb();
1039 	curr = pkc->nxt_offset;
1040 	pkc->skb = skb;
1041 	end = (char *)pbd + pkc->kblk_size;
1042 
1043 	/* first try the current block */
1044 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1045 		prb_fill_curr_block(curr, pkc, pbd, len);
1046 		return (void *)curr;
1047 	}
1048 
1049 	/* Ok, close the current block */
1050 	prb_retire_current_block(pkc, po, 0);
1051 
1052 	/* Now, try to dispatch the next block */
1053 	curr = (char *)prb_dispatch_next_block(pkc, po);
1054 	if (curr) {
1055 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1056 		prb_fill_curr_block(curr, pkc, pbd, len);
1057 		return (void *)curr;
1058 	}
1059 
1060 	/*
1061 	 * No free blocks are available.user_space hasn't caught up yet.
1062 	 * Queue was just frozen and now this packet will get dropped.
1063 	 */
1064 	return NULL;
1065 }
1066 
1067 static void *packet_current_rx_frame(struct packet_sock *po,
1068 					    struct sk_buff *skb,
1069 					    int status, unsigned int len)
1070 {
1071 	char *curr = NULL;
1072 	switch (po->tp_version) {
1073 	case TPACKET_V1:
1074 	case TPACKET_V2:
1075 		curr = packet_lookup_frame(po, &po->rx_ring,
1076 					po->rx_ring.head, status);
1077 		return curr;
1078 	case TPACKET_V3:
1079 		return __packet_lookup_frame_in_block(po, skb, len);
1080 	default:
1081 		WARN(1, "TPACKET version not supported\n");
1082 		BUG();
1083 		return NULL;
1084 	}
1085 }
1086 
1087 static void *prb_lookup_block(const struct packet_sock *po,
1088 			      const struct packet_ring_buffer *rb,
1089 			      unsigned int idx,
1090 			      int status)
1091 {
1092 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1093 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1094 
1095 	if (status != BLOCK_STATUS(pbd))
1096 		return NULL;
1097 	return pbd;
1098 }
1099 
1100 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1101 {
1102 	unsigned int prev;
1103 	if (rb->prb_bdqc.kactive_blk_num)
1104 		prev = rb->prb_bdqc.kactive_blk_num-1;
1105 	else
1106 		prev = rb->prb_bdqc.knum_blocks-1;
1107 	return prev;
1108 }
1109 
1110 /* Assumes caller has held the rx_queue.lock */
1111 static void *__prb_previous_block(struct packet_sock *po,
1112 					 struct packet_ring_buffer *rb,
1113 					 int status)
1114 {
1115 	unsigned int previous = prb_previous_blk_num(rb);
1116 	return prb_lookup_block(po, rb, previous, status);
1117 }
1118 
1119 static void *packet_previous_rx_frame(struct packet_sock *po,
1120 					     struct packet_ring_buffer *rb,
1121 					     int status)
1122 {
1123 	if (po->tp_version <= TPACKET_V2)
1124 		return packet_previous_frame(po, rb, status);
1125 
1126 	return __prb_previous_block(po, rb, status);
1127 }
1128 
1129 static void packet_increment_rx_head(struct packet_sock *po,
1130 					    struct packet_ring_buffer *rb)
1131 {
1132 	switch (po->tp_version) {
1133 	case TPACKET_V1:
1134 	case TPACKET_V2:
1135 		return packet_increment_head(rb);
1136 	case TPACKET_V3:
1137 	default:
1138 		WARN(1, "TPACKET version not supported.\n");
1139 		BUG();
1140 		return;
1141 	}
1142 }
1143 
1144 static void *packet_previous_frame(struct packet_sock *po,
1145 		struct packet_ring_buffer *rb,
1146 		int status)
1147 {
1148 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1149 	return packet_lookup_frame(po, rb, previous, status);
1150 }
1151 
1152 static void packet_increment_head(struct packet_ring_buffer *buff)
1153 {
1154 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1155 }
1156 
1157 static void packet_inc_pending(struct packet_ring_buffer *rb)
1158 {
1159 	this_cpu_inc(*rb->pending_refcnt);
1160 }
1161 
1162 static void packet_dec_pending(struct packet_ring_buffer *rb)
1163 {
1164 	this_cpu_dec(*rb->pending_refcnt);
1165 }
1166 
1167 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1168 {
1169 	unsigned int refcnt = 0;
1170 	int cpu;
1171 
1172 	/* We don't use pending refcount in rx_ring. */
1173 	if (rb->pending_refcnt == NULL)
1174 		return 0;
1175 
1176 	for_each_possible_cpu(cpu)
1177 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1178 
1179 	return refcnt;
1180 }
1181 
1182 static int packet_alloc_pending(struct packet_sock *po)
1183 {
1184 	po->rx_ring.pending_refcnt = NULL;
1185 
1186 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1187 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1188 		return -ENOBUFS;
1189 
1190 	return 0;
1191 }
1192 
1193 static void packet_free_pending(struct packet_sock *po)
1194 {
1195 	free_percpu(po->tx_ring.pending_refcnt);
1196 }
1197 
1198 #define ROOM_POW_OFF	2
1199 #define ROOM_NONE	0x0
1200 #define ROOM_LOW	0x1
1201 #define ROOM_NORMAL	0x2
1202 
1203 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1204 {
1205 	int idx, len;
1206 
1207 	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1208 	idx = READ_ONCE(po->rx_ring.head);
1209 	if (pow_off)
1210 		idx += len >> pow_off;
1211 	if (idx >= len)
1212 		idx -= len;
1213 	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1214 }
1215 
1216 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1217 {
1218 	int idx, len;
1219 
1220 	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1221 	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1222 	if (pow_off)
1223 		idx += len >> pow_off;
1224 	if (idx >= len)
1225 		idx -= len;
1226 	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1227 }
1228 
1229 static int __packet_rcv_has_room(const struct packet_sock *po,
1230 				 const struct sk_buff *skb)
1231 {
1232 	const struct sock *sk = &po->sk;
1233 	int ret = ROOM_NONE;
1234 
1235 	if (po->prot_hook.func != tpacket_rcv) {
1236 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1237 		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1238 				   - (skb ? skb->truesize : 0);
1239 
1240 		if (avail > (rcvbuf >> ROOM_POW_OFF))
1241 			return ROOM_NORMAL;
1242 		else if (avail > 0)
1243 			return ROOM_LOW;
1244 		else
1245 			return ROOM_NONE;
1246 	}
1247 
1248 	if (po->tp_version == TPACKET_V3) {
1249 		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1250 			ret = ROOM_NORMAL;
1251 		else if (__tpacket_v3_has_room(po, 0))
1252 			ret = ROOM_LOW;
1253 	} else {
1254 		if (__tpacket_has_room(po, ROOM_POW_OFF))
1255 			ret = ROOM_NORMAL;
1256 		else if (__tpacket_has_room(po, 0))
1257 			ret = ROOM_LOW;
1258 	}
1259 
1260 	return ret;
1261 }
1262 
1263 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1264 {
1265 	int pressure, ret;
1266 
1267 	ret = __packet_rcv_has_room(po, skb);
1268 	pressure = ret != ROOM_NORMAL;
1269 
1270 	if (READ_ONCE(po->pressure) != pressure)
1271 		WRITE_ONCE(po->pressure, pressure);
1272 
1273 	return ret;
1274 }
1275 
1276 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1277 {
1278 	if (READ_ONCE(po->pressure) &&
1279 	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1280 		WRITE_ONCE(po->pressure,  0);
1281 }
1282 
1283 static void packet_sock_destruct(struct sock *sk)
1284 {
1285 	skb_queue_purge(&sk->sk_error_queue);
1286 
1287 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1288 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1289 
1290 	if (!sock_flag(sk, SOCK_DEAD)) {
1291 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1292 		return;
1293 	}
1294 
1295 	sk_refcnt_debug_dec(sk);
1296 }
1297 
1298 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1299 {
1300 	u32 *history = po->rollover->history;
1301 	u32 victim, rxhash;
1302 	int i, count = 0;
1303 
1304 	rxhash = skb_get_hash(skb);
1305 	for (i = 0; i < ROLLOVER_HLEN; i++)
1306 		if (READ_ONCE(history[i]) == rxhash)
1307 			count++;
1308 
1309 	victim = prandom_u32() % ROLLOVER_HLEN;
1310 
1311 	/* Avoid dirtying the cache line if possible */
1312 	if (READ_ONCE(history[victim]) != rxhash)
1313 		WRITE_ONCE(history[victim], rxhash);
1314 
1315 	return count > (ROLLOVER_HLEN >> 1);
1316 }
1317 
1318 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1319 				      struct sk_buff *skb,
1320 				      unsigned int num)
1321 {
1322 	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1323 }
1324 
1325 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1326 				    struct sk_buff *skb,
1327 				    unsigned int num)
1328 {
1329 	unsigned int val = atomic_inc_return(&f->rr_cur);
1330 
1331 	return val % num;
1332 }
1333 
1334 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1335 				     struct sk_buff *skb,
1336 				     unsigned int num)
1337 {
1338 	return smp_processor_id() % num;
1339 }
1340 
1341 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1342 				     struct sk_buff *skb,
1343 				     unsigned int num)
1344 {
1345 	return prandom_u32_max(num);
1346 }
1347 
1348 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1349 					  struct sk_buff *skb,
1350 					  unsigned int idx, bool try_self,
1351 					  unsigned int num)
1352 {
1353 	struct packet_sock *po, *po_next, *po_skip = NULL;
1354 	unsigned int i, j, room = ROOM_NONE;
1355 
1356 	po = pkt_sk(f->arr[idx]);
1357 
1358 	if (try_self) {
1359 		room = packet_rcv_has_room(po, skb);
1360 		if (room == ROOM_NORMAL ||
1361 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1362 			return idx;
1363 		po_skip = po;
1364 	}
1365 
1366 	i = j = min_t(int, po->rollover->sock, num - 1);
1367 	do {
1368 		po_next = pkt_sk(f->arr[i]);
1369 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1370 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1371 			if (i != j)
1372 				po->rollover->sock = i;
1373 			atomic_long_inc(&po->rollover->num);
1374 			if (room == ROOM_LOW)
1375 				atomic_long_inc(&po->rollover->num_huge);
1376 			return i;
1377 		}
1378 
1379 		if (++i == num)
1380 			i = 0;
1381 	} while (i != j);
1382 
1383 	atomic_long_inc(&po->rollover->num_failed);
1384 	return idx;
1385 }
1386 
1387 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1388 				    struct sk_buff *skb,
1389 				    unsigned int num)
1390 {
1391 	return skb_get_queue_mapping(skb) % num;
1392 }
1393 
1394 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1395 				     struct sk_buff *skb,
1396 				     unsigned int num)
1397 {
1398 	struct bpf_prog *prog;
1399 	unsigned int ret = 0;
1400 
1401 	rcu_read_lock();
1402 	prog = rcu_dereference(f->bpf_prog);
1403 	if (prog)
1404 		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1405 	rcu_read_unlock();
1406 
1407 	return ret;
1408 }
1409 
1410 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1411 {
1412 	return f->flags & (flag >> 8);
1413 }
1414 
1415 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1416 			     struct packet_type *pt, struct net_device *orig_dev)
1417 {
1418 	struct packet_fanout *f = pt->af_packet_priv;
1419 	unsigned int num = READ_ONCE(f->num_members);
1420 	struct net *net = read_pnet(&f->net);
1421 	struct packet_sock *po;
1422 	unsigned int idx;
1423 
1424 	if (!net_eq(dev_net(dev), net) || !num) {
1425 		kfree_skb(skb);
1426 		return 0;
1427 	}
1428 
1429 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1430 		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1431 		if (!skb)
1432 			return 0;
1433 	}
1434 	switch (f->type) {
1435 	case PACKET_FANOUT_HASH:
1436 	default:
1437 		idx = fanout_demux_hash(f, skb, num);
1438 		break;
1439 	case PACKET_FANOUT_LB:
1440 		idx = fanout_demux_lb(f, skb, num);
1441 		break;
1442 	case PACKET_FANOUT_CPU:
1443 		idx = fanout_demux_cpu(f, skb, num);
1444 		break;
1445 	case PACKET_FANOUT_RND:
1446 		idx = fanout_demux_rnd(f, skb, num);
1447 		break;
1448 	case PACKET_FANOUT_QM:
1449 		idx = fanout_demux_qm(f, skb, num);
1450 		break;
1451 	case PACKET_FANOUT_ROLLOVER:
1452 		idx = fanout_demux_rollover(f, skb, 0, false, num);
1453 		break;
1454 	case PACKET_FANOUT_CBPF:
1455 	case PACKET_FANOUT_EBPF:
1456 		idx = fanout_demux_bpf(f, skb, num);
1457 		break;
1458 	}
1459 
1460 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1461 		idx = fanout_demux_rollover(f, skb, idx, true, num);
1462 
1463 	po = pkt_sk(f->arr[idx]);
1464 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1465 }
1466 
1467 DEFINE_MUTEX(fanout_mutex);
1468 EXPORT_SYMBOL_GPL(fanout_mutex);
1469 static LIST_HEAD(fanout_list);
1470 static u16 fanout_next_id;
1471 
1472 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1473 {
1474 	struct packet_fanout *f = po->fanout;
1475 
1476 	spin_lock(&f->lock);
1477 	f->arr[f->num_members] = sk;
1478 	smp_wmb();
1479 	f->num_members++;
1480 	if (f->num_members == 1)
1481 		dev_add_pack(&f->prot_hook);
1482 	spin_unlock(&f->lock);
1483 }
1484 
1485 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1486 {
1487 	struct packet_fanout *f = po->fanout;
1488 	int i;
1489 
1490 	spin_lock(&f->lock);
1491 	for (i = 0; i < f->num_members; i++) {
1492 		if (f->arr[i] == sk)
1493 			break;
1494 	}
1495 	BUG_ON(i >= f->num_members);
1496 	f->arr[i] = f->arr[f->num_members - 1];
1497 	f->num_members--;
1498 	if (f->num_members == 0)
1499 		__dev_remove_pack(&f->prot_hook);
1500 	spin_unlock(&f->lock);
1501 }
1502 
1503 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1504 {
1505 	if (sk->sk_family != PF_PACKET)
1506 		return false;
1507 
1508 	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1509 }
1510 
1511 static void fanout_init_data(struct packet_fanout *f)
1512 {
1513 	switch (f->type) {
1514 	case PACKET_FANOUT_LB:
1515 		atomic_set(&f->rr_cur, 0);
1516 		break;
1517 	case PACKET_FANOUT_CBPF:
1518 	case PACKET_FANOUT_EBPF:
1519 		RCU_INIT_POINTER(f->bpf_prog, NULL);
1520 		break;
1521 	}
1522 }
1523 
1524 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1525 {
1526 	struct bpf_prog *old;
1527 
1528 	spin_lock(&f->lock);
1529 	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1530 	rcu_assign_pointer(f->bpf_prog, new);
1531 	spin_unlock(&f->lock);
1532 
1533 	if (old) {
1534 		synchronize_net();
1535 		bpf_prog_destroy(old);
1536 	}
1537 }
1538 
1539 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1540 				unsigned int len)
1541 {
1542 	struct bpf_prog *new;
1543 	struct sock_fprog fprog;
1544 	int ret;
1545 
1546 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1547 		return -EPERM;
1548 	if (len != sizeof(fprog))
1549 		return -EINVAL;
1550 	if (copy_from_user(&fprog, data, len))
1551 		return -EFAULT;
1552 
1553 	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1554 	if (ret)
1555 		return ret;
1556 
1557 	__fanout_set_data_bpf(po->fanout, new);
1558 	return 0;
1559 }
1560 
1561 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1562 				unsigned int len)
1563 {
1564 	struct bpf_prog *new;
1565 	u32 fd;
1566 
1567 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1568 		return -EPERM;
1569 	if (len != sizeof(fd))
1570 		return -EINVAL;
1571 	if (copy_from_user(&fd, data, len))
1572 		return -EFAULT;
1573 
1574 	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1575 	if (IS_ERR(new))
1576 		return PTR_ERR(new);
1577 
1578 	__fanout_set_data_bpf(po->fanout, new);
1579 	return 0;
1580 }
1581 
1582 static int fanout_set_data(struct packet_sock *po, char __user *data,
1583 			   unsigned int len)
1584 {
1585 	switch (po->fanout->type) {
1586 	case PACKET_FANOUT_CBPF:
1587 		return fanout_set_data_cbpf(po, data, len);
1588 	case PACKET_FANOUT_EBPF:
1589 		return fanout_set_data_ebpf(po, data, len);
1590 	default:
1591 		return -EINVAL;
1592 	}
1593 }
1594 
1595 static void fanout_release_data(struct packet_fanout *f)
1596 {
1597 	switch (f->type) {
1598 	case PACKET_FANOUT_CBPF:
1599 	case PACKET_FANOUT_EBPF:
1600 		__fanout_set_data_bpf(f, NULL);
1601 	}
1602 }
1603 
1604 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1605 {
1606 	struct packet_fanout *f;
1607 
1608 	list_for_each_entry(f, &fanout_list, list) {
1609 		if (f->id == candidate_id &&
1610 		    read_pnet(&f->net) == sock_net(sk)) {
1611 			return false;
1612 		}
1613 	}
1614 	return true;
1615 }
1616 
1617 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1618 {
1619 	u16 id = fanout_next_id;
1620 
1621 	do {
1622 		if (__fanout_id_is_free(sk, id)) {
1623 			*new_id = id;
1624 			fanout_next_id = id + 1;
1625 			return true;
1626 		}
1627 
1628 		id++;
1629 	} while (id != fanout_next_id);
1630 
1631 	return false;
1632 }
1633 
1634 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1635 {
1636 	struct packet_rollover *rollover = NULL;
1637 	struct packet_sock *po = pkt_sk(sk);
1638 	struct packet_fanout *f, *match;
1639 	u8 type = type_flags & 0xff;
1640 	u8 flags = type_flags >> 8;
1641 	int err;
1642 
1643 	switch (type) {
1644 	case PACKET_FANOUT_ROLLOVER:
1645 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1646 			return -EINVAL;
1647 	case PACKET_FANOUT_HASH:
1648 	case PACKET_FANOUT_LB:
1649 	case PACKET_FANOUT_CPU:
1650 	case PACKET_FANOUT_RND:
1651 	case PACKET_FANOUT_QM:
1652 	case PACKET_FANOUT_CBPF:
1653 	case PACKET_FANOUT_EBPF:
1654 		break;
1655 	default:
1656 		return -EINVAL;
1657 	}
1658 
1659 	mutex_lock(&fanout_mutex);
1660 
1661 	err = -EALREADY;
1662 	if (po->fanout)
1663 		goto out;
1664 
1665 	if (type == PACKET_FANOUT_ROLLOVER ||
1666 	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1667 		err = -ENOMEM;
1668 		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1669 		if (!rollover)
1670 			goto out;
1671 		atomic_long_set(&rollover->num, 0);
1672 		atomic_long_set(&rollover->num_huge, 0);
1673 		atomic_long_set(&rollover->num_failed, 0);
1674 	}
1675 
1676 	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1677 		if (id != 0) {
1678 			err = -EINVAL;
1679 			goto out;
1680 		}
1681 		if (!fanout_find_new_id(sk, &id)) {
1682 			err = -ENOMEM;
1683 			goto out;
1684 		}
1685 		/* ephemeral flag for the first socket in the group: drop it */
1686 		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1687 	}
1688 
1689 	match = NULL;
1690 	list_for_each_entry(f, &fanout_list, list) {
1691 		if (f->id == id &&
1692 		    read_pnet(&f->net) == sock_net(sk)) {
1693 			match = f;
1694 			break;
1695 		}
1696 	}
1697 	err = -EINVAL;
1698 	if (match && match->flags != flags)
1699 		goto out;
1700 	if (!match) {
1701 		err = -ENOMEM;
1702 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1703 		if (!match)
1704 			goto out;
1705 		write_pnet(&match->net, sock_net(sk));
1706 		match->id = id;
1707 		match->type = type;
1708 		match->flags = flags;
1709 		INIT_LIST_HEAD(&match->list);
1710 		spin_lock_init(&match->lock);
1711 		refcount_set(&match->sk_ref, 0);
1712 		fanout_init_data(match);
1713 		match->prot_hook.type = po->prot_hook.type;
1714 		match->prot_hook.dev = po->prot_hook.dev;
1715 		match->prot_hook.func = packet_rcv_fanout;
1716 		match->prot_hook.af_packet_priv = match;
1717 		match->prot_hook.id_match = match_fanout_group;
1718 		list_add(&match->list, &fanout_list);
1719 	}
1720 	err = -EINVAL;
1721 
1722 	spin_lock(&po->bind_lock);
1723 	if (po->running &&
1724 	    match->type == type &&
1725 	    match->prot_hook.type == po->prot_hook.type &&
1726 	    match->prot_hook.dev == po->prot_hook.dev) {
1727 		err = -ENOSPC;
1728 		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1729 			__dev_remove_pack(&po->prot_hook);
1730 			po->fanout = match;
1731 			po->rollover = rollover;
1732 			rollover = NULL;
1733 			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1734 			__fanout_link(sk, po);
1735 			err = 0;
1736 		}
1737 	}
1738 	spin_unlock(&po->bind_lock);
1739 
1740 	if (err && !refcount_read(&match->sk_ref)) {
1741 		list_del(&match->list);
1742 		kfree(match);
1743 	}
1744 
1745 out:
1746 	kfree(rollover);
1747 	mutex_unlock(&fanout_mutex);
1748 	return err;
1749 }
1750 
1751 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1752  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1753  * It is the responsibility of the caller to call fanout_release_data() and
1754  * free the returned packet_fanout (after synchronize_net())
1755  */
1756 static struct packet_fanout *fanout_release(struct sock *sk)
1757 {
1758 	struct packet_sock *po = pkt_sk(sk);
1759 	struct packet_fanout *f;
1760 
1761 	mutex_lock(&fanout_mutex);
1762 	f = po->fanout;
1763 	if (f) {
1764 		po->fanout = NULL;
1765 
1766 		if (refcount_dec_and_test(&f->sk_ref))
1767 			list_del(&f->list);
1768 		else
1769 			f = NULL;
1770 	}
1771 	mutex_unlock(&fanout_mutex);
1772 
1773 	return f;
1774 }
1775 
1776 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1777 					  struct sk_buff *skb)
1778 {
1779 	/* Earlier code assumed this would be a VLAN pkt, double-check
1780 	 * this now that we have the actual packet in hand. We can only
1781 	 * do this check on Ethernet devices.
1782 	 */
1783 	if (unlikely(dev->type != ARPHRD_ETHER))
1784 		return false;
1785 
1786 	skb_reset_mac_header(skb);
1787 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1788 }
1789 
1790 static const struct proto_ops packet_ops;
1791 
1792 static const struct proto_ops packet_ops_spkt;
1793 
1794 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1795 			   struct packet_type *pt, struct net_device *orig_dev)
1796 {
1797 	struct sock *sk;
1798 	struct sockaddr_pkt *spkt;
1799 
1800 	/*
1801 	 *	When we registered the protocol we saved the socket in the data
1802 	 *	field for just this event.
1803 	 */
1804 
1805 	sk = pt->af_packet_priv;
1806 
1807 	/*
1808 	 *	Yank back the headers [hope the device set this
1809 	 *	right or kerboom...]
1810 	 *
1811 	 *	Incoming packets have ll header pulled,
1812 	 *	push it back.
1813 	 *
1814 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1815 	 *	so that this procedure is noop.
1816 	 */
1817 
1818 	if (skb->pkt_type == PACKET_LOOPBACK)
1819 		goto out;
1820 
1821 	if (!net_eq(dev_net(dev), sock_net(sk)))
1822 		goto out;
1823 
1824 	skb = skb_share_check(skb, GFP_ATOMIC);
1825 	if (skb == NULL)
1826 		goto oom;
1827 
1828 	/* drop any routing info */
1829 	skb_dst_drop(skb);
1830 
1831 	/* drop conntrack reference */
1832 	nf_reset_ct(skb);
1833 
1834 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1835 
1836 	skb_push(skb, skb->data - skb_mac_header(skb));
1837 
1838 	/*
1839 	 *	The SOCK_PACKET socket receives _all_ frames.
1840 	 */
1841 
1842 	spkt->spkt_family = dev->type;
1843 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1844 	spkt->spkt_protocol = skb->protocol;
1845 
1846 	/*
1847 	 *	Charge the memory to the socket. This is done specifically
1848 	 *	to prevent sockets using all the memory up.
1849 	 */
1850 
1851 	if (sock_queue_rcv_skb(sk, skb) == 0)
1852 		return 0;
1853 
1854 out:
1855 	kfree_skb(skb);
1856 oom:
1857 	return 0;
1858 }
1859 
1860 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1861 {
1862 	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1863 	    sock->type == SOCK_RAW) {
1864 		skb_reset_mac_header(skb);
1865 		skb->protocol = dev_parse_header_protocol(skb);
1866 	}
1867 
1868 	skb_probe_transport_header(skb);
1869 }
1870 
1871 /*
1872  *	Output a raw packet to a device layer. This bypasses all the other
1873  *	protocol layers and you must therefore supply it with a complete frame
1874  */
1875 
1876 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1877 			       size_t len)
1878 {
1879 	struct sock *sk = sock->sk;
1880 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1881 	struct sk_buff *skb = NULL;
1882 	struct net_device *dev;
1883 	struct sockcm_cookie sockc;
1884 	__be16 proto = 0;
1885 	int err;
1886 	int extra_len = 0;
1887 
1888 	/*
1889 	 *	Get and verify the address.
1890 	 */
1891 
1892 	if (saddr) {
1893 		if (msg->msg_namelen < sizeof(struct sockaddr))
1894 			return -EINVAL;
1895 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1896 			proto = saddr->spkt_protocol;
1897 	} else
1898 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1899 
1900 	/*
1901 	 *	Find the device first to size check it
1902 	 */
1903 
1904 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1905 retry:
1906 	rcu_read_lock();
1907 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1908 	err = -ENODEV;
1909 	if (dev == NULL)
1910 		goto out_unlock;
1911 
1912 	err = -ENETDOWN;
1913 	if (!(dev->flags & IFF_UP))
1914 		goto out_unlock;
1915 
1916 	/*
1917 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1918 	 * raw protocol and you must do your own fragmentation at this level.
1919 	 */
1920 
1921 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1922 		if (!netif_supports_nofcs(dev)) {
1923 			err = -EPROTONOSUPPORT;
1924 			goto out_unlock;
1925 		}
1926 		extra_len = 4; /* We're doing our own CRC */
1927 	}
1928 
1929 	err = -EMSGSIZE;
1930 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1931 		goto out_unlock;
1932 
1933 	if (!skb) {
1934 		size_t reserved = LL_RESERVED_SPACE(dev);
1935 		int tlen = dev->needed_tailroom;
1936 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1937 
1938 		rcu_read_unlock();
1939 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1940 		if (skb == NULL)
1941 			return -ENOBUFS;
1942 		/* FIXME: Save some space for broken drivers that write a hard
1943 		 * header at transmission time by themselves. PPP is the notable
1944 		 * one here. This should really be fixed at the driver level.
1945 		 */
1946 		skb_reserve(skb, reserved);
1947 		skb_reset_network_header(skb);
1948 
1949 		/* Try to align data part correctly */
1950 		if (hhlen) {
1951 			skb->data -= hhlen;
1952 			skb->tail -= hhlen;
1953 			if (len < hhlen)
1954 				skb_reset_network_header(skb);
1955 		}
1956 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1957 		if (err)
1958 			goto out_free;
1959 		goto retry;
1960 	}
1961 
1962 	if (!dev_validate_header(dev, skb->data, len)) {
1963 		err = -EINVAL;
1964 		goto out_unlock;
1965 	}
1966 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1967 	    !packet_extra_vlan_len_allowed(dev, skb)) {
1968 		err = -EMSGSIZE;
1969 		goto out_unlock;
1970 	}
1971 
1972 	sockcm_init(&sockc, sk);
1973 	if (msg->msg_controllen) {
1974 		err = sock_cmsg_send(sk, msg, &sockc);
1975 		if (unlikely(err))
1976 			goto out_unlock;
1977 	}
1978 
1979 	skb->protocol = proto;
1980 	skb->dev = dev;
1981 	skb->priority = sk->sk_priority;
1982 	skb->mark = sk->sk_mark;
1983 	skb->tstamp = sockc.transmit_time;
1984 
1985 	skb_setup_tx_timestamp(skb, sockc.tsflags);
1986 
1987 	if (unlikely(extra_len == 4))
1988 		skb->no_fcs = 1;
1989 
1990 	packet_parse_headers(skb, sock);
1991 
1992 	dev_queue_xmit(skb);
1993 	rcu_read_unlock();
1994 	return len;
1995 
1996 out_unlock:
1997 	rcu_read_unlock();
1998 out_free:
1999 	kfree_skb(skb);
2000 	return err;
2001 }
2002 
2003 static unsigned int run_filter(struct sk_buff *skb,
2004 			       const struct sock *sk,
2005 			       unsigned int res)
2006 {
2007 	struct sk_filter *filter;
2008 
2009 	rcu_read_lock();
2010 	filter = rcu_dereference(sk->sk_filter);
2011 	if (filter != NULL)
2012 		res = bpf_prog_run_clear_cb(filter->prog, skb);
2013 	rcu_read_unlock();
2014 
2015 	return res;
2016 }
2017 
2018 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2019 			   size_t *len)
2020 {
2021 	struct virtio_net_hdr vnet_hdr;
2022 
2023 	if (*len < sizeof(vnet_hdr))
2024 		return -EINVAL;
2025 	*len -= sizeof(vnet_hdr);
2026 
2027 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2028 		return -EINVAL;
2029 
2030 	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2031 }
2032 
2033 /*
2034  * This function makes lazy skb cloning in hope that most of packets
2035  * are discarded by BPF.
2036  *
2037  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2038  * and skb->cb are mangled. It works because (and until) packets
2039  * falling here are owned by current CPU. Output packets are cloned
2040  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2041  * sequencially, so that if we return skb to original state on exit,
2042  * we will not harm anyone.
2043  */
2044 
2045 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2046 		      struct packet_type *pt, struct net_device *orig_dev)
2047 {
2048 	struct sock *sk;
2049 	struct sockaddr_ll *sll;
2050 	struct packet_sock *po;
2051 	u8 *skb_head = skb->data;
2052 	int skb_len = skb->len;
2053 	unsigned int snaplen, res;
2054 	bool is_drop_n_account = false;
2055 
2056 	if (skb->pkt_type == PACKET_LOOPBACK)
2057 		goto drop;
2058 
2059 	sk = pt->af_packet_priv;
2060 	po = pkt_sk(sk);
2061 
2062 	if (!net_eq(dev_net(dev), sock_net(sk)))
2063 		goto drop;
2064 
2065 	skb->dev = dev;
2066 
2067 	if (dev->header_ops) {
2068 		/* The device has an explicit notion of ll header,
2069 		 * exported to higher levels.
2070 		 *
2071 		 * Otherwise, the device hides details of its frame
2072 		 * structure, so that corresponding packet head is
2073 		 * never delivered to user.
2074 		 */
2075 		if (sk->sk_type != SOCK_DGRAM)
2076 			skb_push(skb, skb->data - skb_mac_header(skb));
2077 		else if (skb->pkt_type == PACKET_OUTGOING) {
2078 			/* Special case: outgoing packets have ll header at head */
2079 			skb_pull(skb, skb_network_offset(skb));
2080 		}
2081 	}
2082 
2083 	snaplen = skb->len;
2084 
2085 	res = run_filter(skb, sk, snaplen);
2086 	if (!res)
2087 		goto drop_n_restore;
2088 	if (snaplen > res)
2089 		snaplen = res;
2090 
2091 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2092 		goto drop_n_acct;
2093 
2094 	if (skb_shared(skb)) {
2095 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2096 		if (nskb == NULL)
2097 			goto drop_n_acct;
2098 
2099 		if (skb_head != skb->data) {
2100 			skb->data = skb_head;
2101 			skb->len = skb_len;
2102 		}
2103 		consume_skb(skb);
2104 		skb = nskb;
2105 	}
2106 
2107 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2108 
2109 	sll = &PACKET_SKB_CB(skb)->sa.ll;
2110 	sll->sll_hatype = dev->type;
2111 	sll->sll_pkttype = skb->pkt_type;
2112 	if (unlikely(po->origdev))
2113 		sll->sll_ifindex = orig_dev->ifindex;
2114 	else
2115 		sll->sll_ifindex = dev->ifindex;
2116 
2117 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2118 
2119 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2120 	 * Use their space for storing the original skb length.
2121 	 */
2122 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2123 
2124 	if (pskb_trim(skb, snaplen))
2125 		goto drop_n_acct;
2126 
2127 	skb_set_owner_r(skb, sk);
2128 	skb->dev = NULL;
2129 	skb_dst_drop(skb);
2130 
2131 	/* drop conntrack reference */
2132 	nf_reset_ct(skb);
2133 
2134 	spin_lock(&sk->sk_receive_queue.lock);
2135 	po->stats.stats1.tp_packets++;
2136 	sock_skb_set_dropcount(sk, skb);
2137 	__skb_queue_tail(&sk->sk_receive_queue, skb);
2138 	spin_unlock(&sk->sk_receive_queue.lock);
2139 	sk->sk_data_ready(sk);
2140 	return 0;
2141 
2142 drop_n_acct:
2143 	is_drop_n_account = true;
2144 	atomic_inc(&po->tp_drops);
2145 	atomic_inc(&sk->sk_drops);
2146 
2147 drop_n_restore:
2148 	if (skb_head != skb->data && skb_shared(skb)) {
2149 		skb->data = skb_head;
2150 		skb->len = skb_len;
2151 	}
2152 drop:
2153 	if (!is_drop_n_account)
2154 		consume_skb(skb);
2155 	else
2156 		kfree_skb(skb);
2157 	return 0;
2158 }
2159 
2160 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2161 		       struct packet_type *pt, struct net_device *orig_dev)
2162 {
2163 	struct sock *sk;
2164 	struct packet_sock *po;
2165 	struct sockaddr_ll *sll;
2166 	union tpacket_uhdr h;
2167 	u8 *skb_head = skb->data;
2168 	int skb_len = skb->len;
2169 	unsigned int snaplen, res;
2170 	unsigned long status = TP_STATUS_USER;
2171 	unsigned short macoff, netoff, hdrlen;
2172 	struct sk_buff *copy_skb = NULL;
2173 	struct timespec64 ts;
2174 	__u32 ts_status;
2175 	bool is_drop_n_account = false;
2176 	bool do_vnet = false;
2177 
2178 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2179 	 * We may add members to them until current aligned size without forcing
2180 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2181 	 */
2182 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2183 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2184 
2185 	if (skb->pkt_type == PACKET_LOOPBACK)
2186 		goto drop;
2187 
2188 	sk = pt->af_packet_priv;
2189 	po = pkt_sk(sk);
2190 
2191 	if (!net_eq(dev_net(dev), sock_net(sk)))
2192 		goto drop;
2193 
2194 	if (dev->header_ops) {
2195 		if (sk->sk_type != SOCK_DGRAM)
2196 			skb_push(skb, skb->data - skb_mac_header(skb));
2197 		else if (skb->pkt_type == PACKET_OUTGOING) {
2198 			/* Special case: outgoing packets have ll header at head */
2199 			skb_pull(skb, skb_network_offset(skb));
2200 		}
2201 	}
2202 
2203 	snaplen = skb->len;
2204 
2205 	res = run_filter(skb, sk, snaplen);
2206 	if (!res)
2207 		goto drop_n_restore;
2208 
2209 	/* If we are flooded, just give up */
2210 	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2211 		atomic_inc(&po->tp_drops);
2212 		goto drop_n_restore;
2213 	}
2214 
2215 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2216 		status |= TP_STATUS_CSUMNOTREADY;
2217 	else if (skb->pkt_type != PACKET_OUTGOING &&
2218 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2219 		  skb_csum_unnecessary(skb)))
2220 		status |= TP_STATUS_CSUM_VALID;
2221 
2222 	if (snaplen > res)
2223 		snaplen = res;
2224 
2225 	if (sk->sk_type == SOCK_DGRAM) {
2226 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2227 				  po->tp_reserve;
2228 	} else {
2229 		unsigned int maclen = skb_network_offset(skb);
2230 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2231 				       (maclen < 16 ? 16 : maclen)) +
2232 				       po->tp_reserve;
2233 		if (po->has_vnet_hdr) {
2234 			netoff += sizeof(struct virtio_net_hdr);
2235 			do_vnet = true;
2236 		}
2237 		macoff = netoff - maclen;
2238 	}
2239 	if (po->tp_version <= TPACKET_V2) {
2240 		if (macoff + snaplen > po->rx_ring.frame_size) {
2241 			if (po->copy_thresh &&
2242 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2243 				if (skb_shared(skb)) {
2244 					copy_skb = skb_clone(skb, GFP_ATOMIC);
2245 				} else {
2246 					copy_skb = skb_get(skb);
2247 					skb_head = skb->data;
2248 				}
2249 				if (copy_skb)
2250 					skb_set_owner_r(copy_skb, sk);
2251 			}
2252 			snaplen = po->rx_ring.frame_size - macoff;
2253 			if ((int)snaplen < 0) {
2254 				snaplen = 0;
2255 				do_vnet = false;
2256 			}
2257 		}
2258 	} else if (unlikely(macoff + snaplen >
2259 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2260 		u32 nval;
2261 
2262 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2263 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2264 			    snaplen, nval, macoff);
2265 		snaplen = nval;
2266 		if (unlikely((int)snaplen < 0)) {
2267 			snaplen = 0;
2268 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2269 			do_vnet = false;
2270 		}
2271 	}
2272 	spin_lock(&sk->sk_receive_queue.lock);
2273 	h.raw = packet_current_rx_frame(po, skb,
2274 					TP_STATUS_KERNEL, (macoff+snaplen));
2275 	if (!h.raw)
2276 		goto drop_n_account;
2277 
2278 	if (do_vnet &&
2279 	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2280 				    sizeof(struct virtio_net_hdr),
2281 				    vio_le(), true, 0))
2282 		goto drop_n_account;
2283 
2284 	if (po->tp_version <= TPACKET_V2) {
2285 		packet_increment_rx_head(po, &po->rx_ring);
2286 	/*
2287 	 * LOSING will be reported till you read the stats,
2288 	 * because it's COR - Clear On Read.
2289 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2290 	 * at packet level.
2291 	 */
2292 		if (atomic_read(&po->tp_drops))
2293 			status |= TP_STATUS_LOSING;
2294 	}
2295 
2296 	po->stats.stats1.tp_packets++;
2297 	if (copy_skb) {
2298 		status |= TP_STATUS_COPY;
2299 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2300 	}
2301 	spin_unlock(&sk->sk_receive_queue.lock);
2302 
2303 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2304 
2305 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2306 		ktime_get_real_ts64(&ts);
2307 
2308 	status |= ts_status;
2309 
2310 	switch (po->tp_version) {
2311 	case TPACKET_V1:
2312 		h.h1->tp_len = skb->len;
2313 		h.h1->tp_snaplen = snaplen;
2314 		h.h1->tp_mac = macoff;
2315 		h.h1->tp_net = netoff;
2316 		h.h1->tp_sec = ts.tv_sec;
2317 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2318 		hdrlen = sizeof(*h.h1);
2319 		break;
2320 	case TPACKET_V2:
2321 		h.h2->tp_len = skb->len;
2322 		h.h2->tp_snaplen = snaplen;
2323 		h.h2->tp_mac = macoff;
2324 		h.h2->tp_net = netoff;
2325 		h.h2->tp_sec = ts.tv_sec;
2326 		h.h2->tp_nsec = ts.tv_nsec;
2327 		if (skb_vlan_tag_present(skb)) {
2328 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2329 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2330 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2331 		} else {
2332 			h.h2->tp_vlan_tci = 0;
2333 			h.h2->tp_vlan_tpid = 0;
2334 		}
2335 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2336 		hdrlen = sizeof(*h.h2);
2337 		break;
2338 	case TPACKET_V3:
2339 		/* tp_nxt_offset,vlan are already populated above.
2340 		 * So DONT clear those fields here
2341 		 */
2342 		h.h3->tp_status |= status;
2343 		h.h3->tp_len = skb->len;
2344 		h.h3->tp_snaplen = snaplen;
2345 		h.h3->tp_mac = macoff;
2346 		h.h3->tp_net = netoff;
2347 		h.h3->tp_sec  = ts.tv_sec;
2348 		h.h3->tp_nsec = ts.tv_nsec;
2349 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2350 		hdrlen = sizeof(*h.h3);
2351 		break;
2352 	default:
2353 		BUG();
2354 	}
2355 
2356 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2357 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2358 	sll->sll_family = AF_PACKET;
2359 	sll->sll_hatype = dev->type;
2360 	sll->sll_protocol = skb->protocol;
2361 	sll->sll_pkttype = skb->pkt_type;
2362 	if (unlikely(po->origdev))
2363 		sll->sll_ifindex = orig_dev->ifindex;
2364 	else
2365 		sll->sll_ifindex = dev->ifindex;
2366 
2367 	smp_mb();
2368 
2369 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2370 	if (po->tp_version <= TPACKET_V2) {
2371 		u8 *start, *end;
2372 
2373 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2374 					macoff + snaplen);
2375 
2376 		for (start = h.raw; start < end; start += PAGE_SIZE)
2377 			flush_dcache_page(pgv_to_page(start));
2378 	}
2379 	smp_wmb();
2380 #endif
2381 
2382 	if (po->tp_version <= TPACKET_V2) {
2383 		__packet_set_status(po, h.raw, status);
2384 		sk->sk_data_ready(sk);
2385 	} else {
2386 		prb_clear_blk_fill_status(&po->rx_ring);
2387 	}
2388 
2389 drop_n_restore:
2390 	if (skb_head != skb->data && skb_shared(skb)) {
2391 		skb->data = skb_head;
2392 		skb->len = skb_len;
2393 	}
2394 drop:
2395 	if (!is_drop_n_account)
2396 		consume_skb(skb);
2397 	else
2398 		kfree_skb(skb);
2399 	return 0;
2400 
2401 drop_n_account:
2402 	spin_unlock(&sk->sk_receive_queue.lock);
2403 	atomic_inc(&po->tp_drops);
2404 	is_drop_n_account = true;
2405 
2406 	sk->sk_data_ready(sk);
2407 	kfree_skb(copy_skb);
2408 	goto drop_n_restore;
2409 }
2410 
2411 static void tpacket_destruct_skb(struct sk_buff *skb)
2412 {
2413 	struct packet_sock *po = pkt_sk(skb->sk);
2414 
2415 	if (likely(po->tx_ring.pg_vec)) {
2416 		void *ph;
2417 		__u32 ts;
2418 
2419 		ph = skb_zcopy_get_nouarg(skb);
2420 		packet_dec_pending(&po->tx_ring);
2421 
2422 		ts = __packet_set_timestamp(po, ph, skb);
2423 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2424 
2425 		if (!packet_read_pending(&po->tx_ring))
2426 			complete(&po->skb_completion);
2427 	}
2428 
2429 	sock_wfree(skb);
2430 }
2431 
2432 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2433 {
2434 	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2435 	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2436 	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2437 	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2438 		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2439 			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2440 			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2441 
2442 	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2443 		return -EINVAL;
2444 
2445 	return 0;
2446 }
2447 
2448 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2449 				 struct virtio_net_hdr *vnet_hdr)
2450 {
2451 	if (*len < sizeof(*vnet_hdr))
2452 		return -EINVAL;
2453 	*len -= sizeof(*vnet_hdr);
2454 
2455 	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2456 		return -EFAULT;
2457 
2458 	return __packet_snd_vnet_parse(vnet_hdr, *len);
2459 }
2460 
2461 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2462 		void *frame, struct net_device *dev, void *data, int tp_len,
2463 		__be16 proto, unsigned char *addr, int hlen, int copylen,
2464 		const struct sockcm_cookie *sockc)
2465 {
2466 	union tpacket_uhdr ph;
2467 	int to_write, offset, len, nr_frags, len_max;
2468 	struct socket *sock = po->sk.sk_socket;
2469 	struct page *page;
2470 	int err;
2471 
2472 	ph.raw = frame;
2473 
2474 	skb->protocol = proto;
2475 	skb->dev = dev;
2476 	skb->priority = po->sk.sk_priority;
2477 	skb->mark = po->sk.sk_mark;
2478 	skb->tstamp = sockc->transmit_time;
2479 	skb_setup_tx_timestamp(skb, sockc->tsflags);
2480 	skb_zcopy_set_nouarg(skb, ph.raw);
2481 
2482 	skb_reserve(skb, hlen);
2483 	skb_reset_network_header(skb);
2484 
2485 	to_write = tp_len;
2486 
2487 	if (sock->type == SOCK_DGRAM) {
2488 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2489 				NULL, tp_len);
2490 		if (unlikely(err < 0))
2491 			return -EINVAL;
2492 	} else if (copylen) {
2493 		int hdrlen = min_t(int, copylen, tp_len);
2494 
2495 		skb_push(skb, dev->hard_header_len);
2496 		skb_put(skb, copylen - dev->hard_header_len);
2497 		err = skb_store_bits(skb, 0, data, hdrlen);
2498 		if (unlikely(err))
2499 			return err;
2500 		if (!dev_validate_header(dev, skb->data, hdrlen))
2501 			return -EINVAL;
2502 
2503 		data += hdrlen;
2504 		to_write -= hdrlen;
2505 	}
2506 
2507 	offset = offset_in_page(data);
2508 	len_max = PAGE_SIZE - offset;
2509 	len = ((to_write > len_max) ? len_max : to_write);
2510 
2511 	skb->data_len = to_write;
2512 	skb->len += to_write;
2513 	skb->truesize += to_write;
2514 	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2515 
2516 	while (likely(to_write)) {
2517 		nr_frags = skb_shinfo(skb)->nr_frags;
2518 
2519 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2520 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2521 			       MAX_SKB_FRAGS);
2522 			return -EFAULT;
2523 		}
2524 
2525 		page = pgv_to_page(data);
2526 		data += len;
2527 		flush_dcache_page(page);
2528 		get_page(page);
2529 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2530 		to_write -= len;
2531 		offset = 0;
2532 		len_max = PAGE_SIZE;
2533 		len = ((to_write > len_max) ? len_max : to_write);
2534 	}
2535 
2536 	packet_parse_headers(skb, sock);
2537 
2538 	return tp_len;
2539 }
2540 
2541 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2542 				int size_max, void **data)
2543 {
2544 	union tpacket_uhdr ph;
2545 	int tp_len, off;
2546 
2547 	ph.raw = frame;
2548 
2549 	switch (po->tp_version) {
2550 	case TPACKET_V3:
2551 		if (ph.h3->tp_next_offset != 0) {
2552 			pr_warn_once("variable sized slot not supported");
2553 			return -EINVAL;
2554 		}
2555 		tp_len = ph.h3->tp_len;
2556 		break;
2557 	case TPACKET_V2:
2558 		tp_len = ph.h2->tp_len;
2559 		break;
2560 	default:
2561 		tp_len = ph.h1->tp_len;
2562 		break;
2563 	}
2564 	if (unlikely(tp_len > size_max)) {
2565 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2566 		return -EMSGSIZE;
2567 	}
2568 
2569 	if (unlikely(po->tp_tx_has_off)) {
2570 		int off_min, off_max;
2571 
2572 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2573 		off_max = po->tx_ring.frame_size - tp_len;
2574 		if (po->sk.sk_type == SOCK_DGRAM) {
2575 			switch (po->tp_version) {
2576 			case TPACKET_V3:
2577 				off = ph.h3->tp_net;
2578 				break;
2579 			case TPACKET_V2:
2580 				off = ph.h2->tp_net;
2581 				break;
2582 			default:
2583 				off = ph.h1->tp_net;
2584 				break;
2585 			}
2586 		} else {
2587 			switch (po->tp_version) {
2588 			case TPACKET_V3:
2589 				off = ph.h3->tp_mac;
2590 				break;
2591 			case TPACKET_V2:
2592 				off = ph.h2->tp_mac;
2593 				break;
2594 			default:
2595 				off = ph.h1->tp_mac;
2596 				break;
2597 			}
2598 		}
2599 		if (unlikely((off < off_min) || (off_max < off)))
2600 			return -EINVAL;
2601 	} else {
2602 		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2603 	}
2604 
2605 	*data = frame + off;
2606 	return tp_len;
2607 }
2608 
2609 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2610 {
2611 	struct sk_buff *skb = NULL;
2612 	struct net_device *dev;
2613 	struct virtio_net_hdr *vnet_hdr = NULL;
2614 	struct sockcm_cookie sockc;
2615 	__be16 proto;
2616 	int err, reserve = 0;
2617 	void *ph;
2618 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2619 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2620 	unsigned char *addr = NULL;
2621 	int tp_len, size_max;
2622 	void *data;
2623 	int len_sum = 0;
2624 	int status = TP_STATUS_AVAILABLE;
2625 	int hlen, tlen, copylen = 0;
2626 	long timeo = 0;
2627 
2628 	mutex_lock(&po->pg_vec_lock);
2629 
2630 	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2631 	 * we need to confirm it under protection of pg_vec_lock.
2632 	 */
2633 	if (unlikely(!po->tx_ring.pg_vec)) {
2634 		err = -EBUSY;
2635 		goto out;
2636 	}
2637 	if (likely(saddr == NULL)) {
2638 		dev	= packet_cached_dev_get(po);
2639 		proto	= po->num;
2640 	} else {
2641 		err = -EINVAL;
2642 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2643 			goto out;
2644 		if (msg->msg_namelen < (saddr->sll_halen
2645 					+ offsetof(struct sockaddr_ll,
2646 						sll_addr)))
2647 			goto out;
2648 		proto	= saddr->sll_protocol;
2649 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2650 		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2651 			if (dev && msg->msg_namelen < dev->addr_len +
2652 				   offsetof(struct sockaddr_ll, sll_addr))
2653 				goto out_put;
2654 			addr = saddr->sll_addr;
2655 		}
2656 	}
2657 
2658 	err = -ENXIO;
2659 	if (unlikely(dev == NULL))
2660 		goto out;
2661 	err = -ENETDOWN;
2662 	if (unlikely(!(dev->flags & IFF_UP)))
2663 		goto out_put;
2664 
2665 	sockcm_init(&sockc, &po->sk);
2666 	if (msg->msg_controllen) {
2667 		err = sock_cmsg_send(&po->sk, msg, &sockc);
2668 		if (unlikely(err))
2669 			goto out_put;
2670 	}
2671 
2672 	if (po->sk.sk_socket->type == SOCK_RAW)
2673 		reserve = dev->hard_header_len;
2674 	size_max = po->tx_ring.frame_size
2675 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2676 
2677 	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2678 		size_max = dev->mtu + reserve + VLAN_HLEN;
2679 
2680 	reinit_completion(&po->skb_completion);
2681 
2682 	do {
2683 		ph = packet_current_frame(po, &po->tx_ring,
2684 					  TP_STATUS_SEND_REQUEST);
2685 		if (unlikely(ph == NULL)) {
2686 			if (need_wait && skb) {
2687 				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2688 				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2689 				if (timeo <= 0) {
2690 					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2691 					goto out_put;
2692 				}
2693 			}
2694 			/* check for additional frames */
2695 			continue;
2696 		}
2697 
2698 		skb = NULL;
2699 		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2700 		if (tp_len < 0)
2701 			goto tpacket_error;
2702 
2703 		status = TP_STATUS_SEND_REQUEST;
2704 		hlen = LL_RESERVED_SPACE(dev);
2705 		tlen = dev->needed_tailroom;
2706 		if (po->has_vnet_hdr) {
2707 			vnet_hdr = data;
2708 			data += sizeof(*vnet_hdr);
2709 			tp_len -= sizeof(*vnet_hdr);
2710 			if (tp_len < 0 ||
2711 			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2712 				tp_len = -EINVAL;
2713 				goto tpacket_error;
2714 			}
2715 			copylen = __virtio16_to_cpu(vio_le(),
2716 						    vnet_hdr->hdr_len);
2717 		}
2718 		copylen = max_t(int, copylen, dev->hard_header_len);
2719 		skb = sock_alloc_send_skb(&po->sk,
2720 				hlen + tlen + sizeof(struct sockaddr_ll) +
2721 				(copylen - dev->hard_header_len),
2722 				!need_wait, &err);
2723 
2724 		if (unlikely(skb == NULL)) {
2725 			/* we assume the socket was initially writeable ... */
2726 			if (likely(len_sum > 0))
2727 				err = len_sum;
2728 			goto out_status;
2729 		}
2730 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2731 					  addr, hlen, copylen, &sockc);
2732 		if (likely(tp_len >= 0) &&
2733 		    tp_len > dev->mtu + reserve &&
2734 		    !po->has_vnet_hdr &&
2735 		    !packet_extra_vlan_len_allowed(dev, skb))
2736 			tp_len = -EMSGSIZE;
2737 
2738 		if (unlikely(tp_len < 0)) {
2739 tpacket_error:
2740 			if (po->tp_loss) {
2741 				__packet_set_status(po, ph,
2742 						TP_STATUS_AVAILABLE);
2743 				packet_increment_head(&po->tx_ring);
2744 				kfree_skb(skb);
2745 				continue;
2746 			} else {
2747 				status = TP_STATUS_WRONG_FORMAT;
2748 				err = tp_len;
2749 				goto out_status;
2750 			}
2751 		}
2752 
2753 		if (po->has_vnet_hdr) {
2754 			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2755 				tp_len = -EINVAL;
2756 				goto tpacket_error;
2757 			}
2758 			virtio_net_hdr_set_proto(skb, vnet_hdr);
2759 		}
2760 
2761 		skb->destructor = tpacket_destruct_skb;
2762 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2763 		packet_inc_pending(&po->tx_ring);
2764 
2765 		status = TP_STATUS_SEND_REQUEST;
2766 		err = po->xmit(skb);
2767 		if (unlikely(err > 0)) {
2768 			err = net_xmit_errno(err);
2769 			if (err && __packet_get_status(po, ph) ==
2770 				   TP_STATUS_AVAILABLE) {
2771 				/* skb was destructed already */
2772 				skb = NULL;
2773 				goto out_status;
2774 			}
2775 			/*
2776 			 * skb was dropped but not destructed yet;
2777 			 * let's treat it like congestion or err < 0
2778 			 */
2779 			err = 0;
2780 		}
2781 		packet_increment_head(&po->tx_ring);
2782 		len_sum += tp_len;
2783 	} while (likely((ph != NULL) ||
2784 		/* Note: packet_read_pending() might be slow if we have
2785 		 * to call it as it's per_cpu variable, but in fast-path
2786 		 * we already short-circuit the loop with the first
2787 		 * condition, and luckily don't have to go that path
2788 		 * anyway.
2789 		 */
2790 		 (need_wait && packet_read_pending(&po->tx_ring))));
2791 
2792 	err = len_sum;
2793 	goto out_put;
2794 
2795 out_status:
2796 	__packet_set_status(po, ph, status);
2797 	kfree_skb(skb);
2798 out_put:
2799 	dev_put(dev);
2800 out:
2801 	mutex_unlock(&po->pg_vec_lock);
2802 	return err;
2803 }
2804 
2805 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2806 				        size_t reserve, size_t len,
2807 				        size_t linear, int noblock,
2808 				        int *err)
2809 {
2810 	struct sk_buff *skb;
2811 
2812 	/* Under a page?  Don't bother with paged skb. */
2813 	if (prepad + len < PAGE_SIZE || !linear)
2814 		linear = len;
2815 
2816 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2817 				   err, 0);
2818 	if (!skb)
2819 		return NULL;
2820 
2821 	skb_reserve(skb, reserve);
2822 	skb_put(skb, linear);
2823 	skb->data_len = len - linear;
2824 	skb->len += len - linear;
2825 
2826 	return skb;
2827 }
2828 
2829 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2830 {
2831 	struct sock *sk = sock->sk;
2832 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2833 	struct sk_buff *skb;
2834 	struct net_device *dev;
2835 	__be16 proto;
2836 	unsigned char *addr = NULL;
2837 	int err, reserve = 0;
2838 	struct sockcm_cookie sockc;
2839 	struct virtio_net_hdr vnet_hdr = { 0 };
2840 	int offset = 0;
2841 	struct packet_sock *po = pkt_sk(sk);
2842 	bool has_vnet_hdr = false;
2843 	int hlen, tlen, linear;
2844 	int extra_len = 0;
2845 
2846 	/*
2847 	 *	Get and verify the address.
2848 	 */
2849 
2850 	if (likely(saddr == NULL)) {
2851 		dev	= packet_cached_dev_get(po);
2852 		proto	= po->num;
2853 	} else {
2854 		err = -EINVAL;
2855 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2856 			goto out;
2857 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2858 			goto out;
2859 		proto	= saddr->sll_protocol;
2860 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2861 		if (sock->type == SOCK_DGRAM) {
2862 			if (dev && msg->msg_namelen < dev->addr_len +
2863 				   offsetof(struct sockaddr_ll, sll_addr))
2864 				goto out_unlock;
2865 			addr = saddr->sll_addr;
2866 		}
2867 	}
2868 
2869 	err = -ENXIO;
2870 	if (unlikely(dev == NULL))
2871 		goto out_unlock;
2872 	err = -ENETDOWN;
2873 	if (unlikely(!(dev->flags & IFF_UP)))
2874 		goto out_unlock;
2875 
2876 	sockcm_init(&sockc, sk);
2877 	sockc.mark = sk->sk_mark;
2878 	if (msg->msg_controllen) {
2879 		err = sock_cmsg_send(sk, msg, &sockc);
2880 		if (unlikely(err))
2881 			goto out_unlock;
2882 	}
2883 
2884 	if (sock->type == SOCK_RAW)
2885 		reserve = dev->hard_header_len;
2886 	if (po->has_vnet_hdr) {
2887 		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2888 		if (err)
2889 			goto out_unlock;
2890 		has_vnet_hdr = true;
2891 	}
2892 
2893 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2894 		if (!netif_supports_nofcs(dev)) {
2895 			err = -EPROTONOSUPPORT;
2896 			goto out_unlock;
2897 		}
2898 		extra_len = 4; /* We're doing our own CRC */
2899 	}
2900 
2901 	err = -EMSGSIZE;
2902 	if (!vnet_hdr.gso_type &&
2903 	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2904 		goto out_unlock;
2905 
2906 	err = -ENOBUFS;
2907 	hlen = LL_RESERVED_SPACE(dev);
2908 	tlen = dev->needed_tailroom;
2909 	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2910 	linear = max(linear, min_t(int, len, dev->hard_header_len));
2911 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2912 			       msg->msg_flags & MSG_DONTWAIT, &err);
2913 	if (skb == NULL)
2914 		goto out_unlock;
2915 
2916 	skb_reset_network_header(skb);
2917 
2918 	err = -EINVAL;
2919 	if (sock->type == SOCK_DGRAM) {
2920 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2921 		if (unlikely(offset < 0))
2922 			goto out_free;
2923 	} else if (reserve) {
2924 		skb_reserve(skb, -reserve);
2925 		if (len < reserve + sizeof(struct ipv6hdr) &&
2926 		    dev->min_header_len != dev->hard_header_len)
2927 			skb_reset_network_header(skb);
2928 	}
2929 
2930 	/* Returns -EFAULT on error */
2931 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2932 	if (err)
2933 		goto out_free;
2934 
2935 	if (sock->type == SOCK_RAW &&
2936 	    !dev_validate_header(dev, skb->data, len)) {
2937 		err = -EINVAL;
2938 		goto out_free;
2939 	}
2940 
2941 	skb_setup_tx_timestamp(skb, sockc.tsflags);
2942 
2943 	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2944 	    !packet_extra_vlan_len_allowed(dev, skb)) {
2945 		err = -EMSGSIZE;
2946 		goto out_free;
2947 	}
2948 
2949 	skb->protocol = proto;
2950 	skb->dev = dev;
2951 	skb->priority = sk->sk_priority;
2952 	skb->mark = sockc.mark;
2953 	skb->tstamp = sockc.transmit_time;
2954 
2955 	if (has_vnet_hdr) {
2956 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2957 		if (err)
2958 			goto out_free;
2959 		len += sizeof(vnet_hdr);
2960 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2961 	}
2962 
2963 	packet_parse_headers(skb, sock);
2964 
2965 	if (unlikely(extra_len == 4))
2966 		skb->no_fcs = 1;
2967 
2968 	err = po->xmit(skb);
2969 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2970 		goto out_unlock;
2971 
2972 	dev_put(dev);
2973 
2974 	return len;
2975 
2976 out_free:
2977 	kfree_skb(skb);
2978 out_unlock:
2979 	if (dev)
2980 		dev_put(dev);
2981 out:
2982 	return err;
2983 }
2984 
2985 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2986 {
2987 	struct sock *sk = sock->sk;
2988 	struct packet_sock *po = pkt_sk(sk);
2989 
2990 	if (po->tx_ring.pg_vec)
2991 		return tpacket_snd(po, msg);
2992 	else
2993 		return packet_snd(sock, msg, len);
2994 }
2995 
2996 /*
2997  *	Close a PACKET socket. This is fairly simple. We immediately go
2998  *	to 'closed' state and remove our protocol entry in the device list.
2999  */
3000 
3001 static int packet_release(struct socket *sock)
3002 {
3003 	struct sock *sk = sock->sk;
3004 	struct packet_sock *po;
3005 	struct packet_fanout *f;
3006 	struct net *net;
3007 	union tpacket_req_u req_u;
3008 
3009 	if (!sk)
3010 		return 0;
3011 
3012 	net = sock_net(sk);
3013 	po = pkt_sk(sk);
3014 
3015 	mutex_lock(&net->packet.sklist_lock);
3016 	sk_del_node_init_rcu(sk);
3017 	mutex_unlock(&net->packet.sklist_lock);
3018 
3019 	preempt_disable();
3020 	sock_prot_inuse_add(net, sk->sk_prot, -1);
3021 	preempt_enable();
3022 
3023 	spin_lock(&po->bind_lock);
3024 	unregister_prot_hook(sk, false);
3025 	packet_cached_dev_reset(po);
3026 
3027 	if (po->prot_hook.dev) {
3028 		dev_put(po->prot_hook.dev);
3029 		po->prot_hook.dev = NULL;
3030 	}
3031 	spin_unlock(&po->bind_lock);
3032 
3033 	packet_flush_mclist(sk);
3034 
3035 	lock_sock(sk);
3036 	if (po->rx_ring.pg_vec) {
3037 		memset(&req_u, 0, sizeof(req_u));
3038 		packet_set_ring(sk, &req_u, 1, 0);
3039 	}
3040 
3041 	if (po->tx_ring.pg_vec) {
3042 		memset(&req_u, 0, sizeof(req_u));
3043 		packet_set_ring(sk, &req_u, 1, 1);
3044 	}
3045 	release_sock(sk);
3046 
3047 	f = fanout_release(sk);
3048 
3049 	synchronize_net();
3050 
3051 	kfree(po->rollover);
3052 	if (f) {
3053 		fanout_release_data(f);
3054 		kfree(f);
3055 	}
3056 	/*
3057 	 *	Now the socket is dead. No more input will appear.
3058 	 */
3059 	sock_orphan(sk);
3060 	sock->sk = NULL;
3061 
3062 	/* Purge queues */
3063 
3064 	skb_queue_purge(&sk->sk_receive_queue);
3065 	packet_free_pending(po);
3066 	sk_refcnt_debug_release(sk);
3067 
3068 	sock_put(sk);
3069 	return 0;
3070 }
3071 
3072 /*
3073  *	Attach a packet hook.
3074  */
3075 
3076 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3077 			  __be16 proto)
3078 {
3079 	struct packet_sock *po = pkt_sk(sk);
3080 	struct net_device *dev_curr;
3081 	__be16 proto_curr;
3082 	bool need_rehook;
3083 	struct net_device *dev = NULL;
3084 	int ret = 0;
3085 	bool unlisted = false;
3086 
3087 	lock_sock(sk);
3088 	spin_lock(&po->bind_lock);
3089 	rcu_read_lock();
3090 
3091 	if (po->fanout) {
3092 		ret = -EINVAL;
3093 		goto out_unlock;
3094 	}
3095 
3096 	if (name) {
3097 		dev = dev_get_by_name_rcu(sock_net(sk), name);
3098 		if (!dev) {
3099 			ret = -ENODEV;
3100 			goto out_unlock;
3101 		}
3102 	} else if (ifindex) {
3103 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3104 		if (!dev) {
3105 			ret = -ENODEV;
3106 			goto out_unlock;
3107 		}
3108 	}
3109 
3110 	if (dev)
3111 		dev_hold(dev);
3112 
3113 	proto_curr = po->prot_hook.type;
3114 	dev_curr = po->prot_hook.dev;
3115 
3116 	need_rehook = proto_curr != proto || dev_curr != dev;
3117 
3118 	if (need_rehook) {
3119 		if (po->running) {
3120 			rcu_read_unlock();
3121 			/* prevents packet_notifier() from calling
3122 			 * register_prot_hook()
3123 			 */
3124 			po->num = 0;
3125 			__unregister_prot_hook(sk, true);
3126 			rcu_read_lock();
3127 			dev_curr = po->prot_hook.dev;
3128 			if (dev)
3129 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3130 								 dev->ifindex);
3131 		}
3132 
3133 		BUG_ON(po->running);
3134 		po->num = proto;
3135 		po->prot_hook.type = proto;
3136 
3137 		if (unlikely(unlisted)) {
3138 			dev_put(dev);
3139 			po->prot_hook.dev = NULL;
3140 			po->ifindex = -1;
3141 			packet_cached_dev_reset(po);
3142 		} else {
3143 			po->prot_hook.dev = dev;
3144 			po->ifindex = dev ? dev->ifindex : 0;
3145 			packet_cached_dev_assign(po, dev);
3146 		}
3147 	}
3148 	if (dev_curr)
3149 		dev_put(dev_curr);
3150 
3151 	if (proto == 0 || !need_rehook)
3152 		goto out_unlock;
3153 
3154 	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3155 		register_prot_hook(sk);
3156 	} else {
3157 		sk->sk_err = ENETDOWN;
3158 		if (!sock_flag(sk, SOCK_DEAD))
3159 			sk->sk_error_report(sk);
3160 	}
3161 
3162 out_unlock:
3163 	rcu_read_unlock();
3164 	spin_unlock(&po->bind_lock);
3165 	release_sock(sk);
3166 	return ret;
3167 }
3168 
3169 /*
3170  *	Bind a packet socket to a device
3171  */
3172 
3173 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3174 			    int addr_len)
3175 {
3176 	struct sock *sk = sock->sk;
3177 	char name[sizeof(uaddr->sa_data) + 1];
3178 
3179 	/*
3180 	 *	Check legality
3181 	 */
3182 
3183 	if (addr_len != sizeof(struct sockaddr))
3184 		return -EINVAL;
3185 	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3186 	 * zero-terminated.
3187 	 */
3188 	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3189 	name[sizeof(uaddr->sa_data)] = 0;
3190 
3191 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3192 }
3193 
3194 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3195 {
3196 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3197 	struct sock *sk = sock->sk;
3198 
3199 	/*
3200 	 *	Check legality
3201 	 */
3202 
3203 	if (addr_len < sizeof(struct sockaddr_ll))
3204 		return -EINVAL;
3205 	if (sll->sll_family != AF_PACKET)
3206 		return -EINVAL;
3207 
3208 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3209 			      sll->sll_protocol ? : pkt_sk(sk)->num);
3210 }
3211 
3212 static struct proto packet_proto = {
3213 	.name	  = "PACKET",
3214 	.owner	  = THIS_MODULE,
3215 	.obj_size = sizeof(struct packet_sock),
3216 };
3217 
3218 /*
3219  *	Create a packet of type SOCK_PACKET.
3220  */
3221 
3222 static int packet_create(struct net *net, struct socket *sock, int protocol,
3223 			 int kern)
3224 {
3225 	struct sock *sk;
3226 	struct packet_sock *po;
3227 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3228 	int err;
3229 
3230 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3231 		return -EPERM;
3232 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3233 	    sock->type != SOCK_PACKET)
3234 		return -ESOCKTNOSUPPORT;
3235 
3236 	sock->state = SS_UNCONNECTED;
3237 
3238 	err = -ENOBUFS;
3239 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3240 	if (sk == NULL)
3241 		goto out;
3242 
3243 	sock->ops = &packet_ops;
3244 	if (sock->type == SOCK_PACKET)
3245 		sock->ops = &packet_ops_spkt;
3246 
3247 	sock_init_data(sock, sk);
3248 
3249 	po = pkt_sk(sk);
3250 	init_completion(&po->skb_completion);
3251 	sk->sk_family = PF_PACKET;
3252 	po->num = proto;
3253 	po->xmit = dev_queue_xmit;
3254 
3255 	err = packet_alloc_pending(po);
3256 	if (err)
3257 		goto out2;
3258 
3259 	packet_cached_dev_reset(po);
3260 
3261 	sk->sk_destruct = packet_sock_destruct;
3262 	sk_refcnt_debug_inc(sk);
3263 
3264 	/*
3265 	 *	Attach a protocol block
3266 	 */
3267 
3268 	spin_lock_init(&po->bind_lock);
3269 	mutex_init(&po->pg_vec_lock);
3270 	po->rollover = NULL;
3271 	po->prot_hook.func = packet_rcv;
3272 
3273 	if (sock->type == SOCK_PACKET)
3274 		po->prot_hook.func = packet_rcv_spkt;
3275 
3276 	po->prot_hook.af_packet_priv = sk;
3277 
3278 	if (proto) {
3279 		po->prot_hook.type = proto;
3280 		__register_prot_hook(sk);
3281 	}
3282 
3283 	mutex_lock(&net->packet.sklist_lock);
3284 	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3285 	mutex_unlock(&net->packet.sklist_lock);
3286 
3287 	preempt_disable();
3288 	sock_prot_inuse_add(net, &packet_proto, 1);
3289 	preempt_enable();
3290 
3291 	return 0;
3292 out2:
3293 	sk_free(sk);
3294 out:
3295 	return err;
3296 }
3297 
3298 /*
3299  *	Pull a packet from our receive queue and hand it to the user.
3300  *	If necessary we block.
3301  */
3302 
3303 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3304 			  int flags)
3305 {
3306 	struct sock *sk = sock->sk;
3307 	struct sk_buff *skb;
3308 	int copied, err;
3309 	int vnet_hdr_len = 0;
3310 	unsigned int origlen = 0;
3311 
3312 	err = -EINVAL;
3313 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3314 		goto out;
3315 
3316 #if 0
3317 	/* What error should we return now? EUNATTACH? */
3318 	if (pkt_sk(sk)->ifindex < 0)
3319 		return -ENODEV;
3320 #endif
3321 
3322 	if (flags & MSG_ERRQUEUE) {
3323 		err = sock_recv_errqueue(sk, msg, len,
3324 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3325 		goto out;
3326 	}
3327 
3328 	/*
3329 	 *	Call the generic datagram receiver. This handles all sorts
3330 	 *	of horrible races and re-entrancy so we can forget about it
3331 	 *	in the protocol layers.
3332 	 *
3333 	 *	Now it will return ENETDOWN, if device have just gone down,
3334 	 *	but then it will block.
3335 	 */
3336 
3337 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3338 
3339 	/*
3340 	 *	An error occurred so return it. Because skb_recv_datagram()
3341 	 *	handles the blocking we don't see and worry about blocking
3342 	 *	retries.
3343 	 */
3344 
3345 	if (skb == NULL)
3346 		goto out;
3347 
3348 	packet_rcv_try_clear_pressure(pkt_sk(sk));
3349 
3350 	if (pkt_sk(sk)->has_vnet_hdr) {
3351 		err = packet_rcv_vnet(msg, skb, &len);
3352 		if (err)
3353 			goto out_free;
3354 		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3355 	}
3356 
3357 	/* You lose any data beyond the buffer you gave. If it worries
3358 	 * a user program they can ask the device for its MTU
3359 	 * anyway.
3360 	 */
3361 	copied = skb->len;
3362 	if (copied > len) {
3363 		copied = len;
3364 		msg->msg_flags |= MSG_TRUNC;
3365 	}
3366 
3367 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3368 	if (err)
3369 		goto out_free;
3370 
3371 	if (sock->type != SOCK_PACKET) {
3372 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3373 
3374 		/* Original length was stored in sockaddr_ll fields */
3375 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3376 		sll->sll_family = AF_PACKET;
3377 		sll->sll_protocol = skb->protocol;
3378 	}
3379 
3380 	sock_recv_ts_and_drops(msg, sk, skb);
3381 
3382 	if (msg->msg_name) {
3383 		int copy_len;
3384 
3385 		/* If the address length field is there to be filled
3386 		 * in, we fill it in now.
3387 		 */
3388 		if (sock->type == SOCK_PACKET) {
3389 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3390 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3391 			copy_len = msg->msg_namelen;
3392 		} else {
3393 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3394 
3395 			msg->msg_namelen = sll->sll_halen +
3396 				offsetof(struct sockaddr_ll, sll_addr);
3397 			copy_len = msg->msg_namelen;
3398 			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3399 				memset(msg->msg_name +
3400 				       offsetof(struct sockaddr_ll, sll_addr),
3401 				       0, sizeof(sll->sll_addr));
3402 				msg->msg_namelen = sizeof(struct sockaddr_ll);
3403 			}
3404 		}
3405 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3406 	}
3407 
3408 	if (pkt_sk(sk)->auxdata) {
3409 		struct tpacket_auxdata aux;
3410 
3411 		aux.tp_status = TP_STATUS_USER;
3412 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3413 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3414 		else if (skb->pkt_type != PACKET_OUTGOING &&
3415 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3416 			  skb_csum_unnecessary(skb)))
3417 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3418 
3419 		aux.tp_len = origlen;
3420 		aux.tp_snaplen = skb->len;
3421 		aux.tp_mac = 0;
3422 		aux.tp_net = skb_network_offset(skb);
3423 		if (skb_vlan_tag_present(skb)) {
3424 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3425 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3426 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3427 		} else {
3428 			aux.tp_vlan_tci = 0;
3429 			aux.tp_vlan_tpid = 0;
3430 		}
3431 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3432 	}
3433 
3434 	/*
3435 	 *	Free or return the buffer as appropriate. Again this
3436 	 *	hides all the races and re-entrancy issues from us.
3437 	 */
3438 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3439 
3440 out_free:
3441 	skb_free_datagram(sk, skb);
3442 out:
3443 	return err;
3444 }
3445 
3446 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3447 			       int peer)
3448 {
3449 	struct net_device *dev;
3450 	struct sock *sk	= sock->sk;
3451 
3452 	if (peer)
3453 		return -EOPNOTSUPP;
3454 
3455 	uaddr->sa_family = AF_PACKET;
3456 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3457 	rcu_read_lock();
3458 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3459 	if (dev)
3460 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3461 	rcu_read_unlock();
3462 
3463 	return sizeof(*uaddr);
3464 }
3465 
3466 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3467 			  int peer)
3468 {
3469 	struct net_device *dev;
3470 	struct sock *sk = sock->sk;
3471 	struct packet_sock *po = pkt_sk(sk);
3472 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3473 
3474 	if (peer)
3475 		return -EOPNOTSUPP;
3476 
3477 	sll->sll_family = AF_PACKET;
3478 	sll->sll_ifindex = po->ifindex;
3479 	sll->sll_protocol = po->num;
3480 	sll->sll_pkttype = 0;
3481 	rcu_read_lock();
3482 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3483 	if (dev) {
3484 		sll->sll_hatype = dev->type;
3485 		sll->sll_halen = dev->addr_len;
3486 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3487 	} else {
3488 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3489 		sll->sll_halen = 0;
3490 	}
3491 	rcu_read_unlock();
3492 
3493 	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3494 }
3495 
3496 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3497 			 int what)
3498 {
3499 	switch (i->type) {
3500 	case PACKET_MR_MULTICAST:
3501 		if (i->alen != dev->addr_len)
3502 			return -EINVAL;
3503 		if (what > 0)
3504 			return dev_mc_add(dev, i->addr);
3505 		else
3506 			return dev_mc_del(dev, i->addr);
3507 		break;
3508 	case PACKET_MR_PROMISC:
3509 		return dev_set_promiscuity(dev, what);
3510 	case PACKET_MR_ALLMULTI:
3511 		return dev_set_allmulti(dev, what);
3512 	case PACKET_MR_UNICAST:
3513 		if (i->alen != dev->addr_len)
3514 			return -EINVAL;
3515 		if (what > 0)
3516 			return dev_uc_add(dev, i->addr);
3517 		else
3518 			return dev_uc_del(dev, i->addr);
3519 		break;
3520 	default:
3521 		break;
3522 	}
3523 	return 0;
3524 }
3525 
3526 static void packet_dev_mclist_delete(struct net_device *dev,
3527 				     struct packet_mclist **mlp)
3528 {
3529 	struct packet_mclist *ml;
3530 
3531 	while ((ml = *mlp) != NULL) {
3532 		if (ml->ifindex == dev->ifindex) {
3533 			packet_dev_mc(dev, ml, -1);
3534 			*mlp = ml->next;
3535 			kfree(ml);
3536 		} else
3537 			mlp = &ml->next;
3538 	}
3539 }
3540 
3541 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3542 {
3543 	struct packet_sock *po = pkt_sk(sk);
3544 	struct packet_mclist *ml, *i;
3545 	struct net_device *dev;
3546 	int err;
3547 
3548 	rtnl_lock();
3549 
3550 	err = -ENODEV;
3551 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3552 	if (!dev)
3553 		goto done;
3554 
3555 	err = -EINVAL;
3556 	if (mreq->mr_alen > dev->addr_len)
3557 		goto done;
3558 
3559 	err = -ENOBUFS;
3560 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3561 	if (i == NULL)
3562 		goto done;
3563 
3564 	err = 0;
3565 	for (ml = po->mclist; ml; ml = ml->next) {
3566 		if (ml->ifindex == mreq->mr_ifindex &&
3567 		    ml->type == mreq->mr_type &&
3568 		    ml->alen == mreq->mr_alen &&
3569 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3570 			ml->count++;
3571 			/* Free the new element ... */
3572 			kfree(i);
3573 			goto done;
3574 		}
3575 	}
3576 
3577 	i->type = mreq->mr_type;
3578 	i->ifindex = mreq->mr_ifindex;
3579 	i->alen = mreq->mr_alen;
3580 	memcpy(i->addr, mreq->mr_address, i->alen);
3581 	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3582 	i->count = 1;
3583 	i->next = po->mclist;
3584 	po->mclist = i;
3585 	err = packet_dev_mc(dev, i, 1);
3586 	if (err) {
3587 		po->mclist = i->next;
3588 		kfree(i);
3589 	}
3590 
3591 done:
3592 	rtnl_unlock();
3593 	return err;
3594 }
3595 
3596 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3597 {
3598 	struct packet_mclist *ml, **mlp;
3599 
3600 	rtnl_lock();
3601 
3602 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3603 		if (ml->ifindex == mreq->mr_ifindex &&
3604 		    ml->type == mreq->mr_type &&
3605 		    ml->alen == mreq->mr_alen &&
3606 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3607 			if (--ml->count == 0) {
3608 				struct net_device *dev;
3609 				*mlp = ml->next;
3610 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3611 				if (dev)
3612 					packet_dev_mc(dev, ml, -1);
3613 				kfree(ml);
3614 			}
3615 			break;
3616 		}
3617 	}
3618 	rtnl_unlock();
3619 	return 0;
3620 }
3621 
3622 static void packet_flush_mclist(struct sock *sk)
3623 {
3624 	struct packet_sock *po = pkt_sk(sk);
3625 	struct packet_mclist *ml;
3626 
3627 	if (!po->mclist)
3628 		return;
3629 
3630 	rtnl_lock();
3631 	while ((ml = po->mclist) != NULL) {
3632 		struct net_device *dev;
3633 
3634 		po->mclist = ml->next;
3635 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3636 		if (dev != NULL)
3637 			packet_dev_mc(dev, ml, -1);
3638 		kfree(ml);
3639 	}
3640 	rtnl_unlock();
3641 }
3642 
3643 static int
3644 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3645 {
3646 	struct sock *sk = sock->sk;
3647 	struct packet_sock *po = pkt_sk(sk);
3648 	int ret;
3649 
3650 	if (level != SOL_PACKET)
3651 		return -ENOPROTOOPT;
3652 
3653 	switch (optname) {
3654 	case PACKET_ADD_MEMBERSHIP:
3655 	case PACKET_DROP_MEMBERSHIP:
3656 	{
3657 		struct packet_mreq_max mreq;
3658 		int len = optlen;
3659 		memset(&mreq, 0, sizeof(mreq));
3660 		if (len < sizeof(struct packet_mreq))
3661 			return -EINVAL;
3662 		if (len > sizeof(mreq))
3663 			len = sizeof(mreq);
3664 		if (copy_from_user(&mreq, optval, len))
3665 			return -EFAULT;
3666 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3667 			return -EINVAL;
3668 		if (optname == PACKET_ADD_MEMBERSHIP)
3669 			ret = packet_mc_add(sk, &mreq);
3670 		else
3671 			ret = packet_mc_drop(sk, &mreq);
3672 		return ret;
3673 	}
3674 
3675 	case PACKET_RX_RING:
3676 	case PACKET_TX_RING:
3677 	{
3678 		union tpacket_req_u req_u;
3679 		int len;
3680 
3681 		lock_sock(sk);
3682 		switch (po->tp_version) {
3683 		case TPACKET_V1:
3684 		case TPACKET_V2:
3685 			len = sizeof(req_u.req);
3686 			break;
3687 		case TPACKET_V3:
3688 		default:
3689 			len = sizeof(req_u.req3);
3690 			break;
3691 		}
3692 		if (optlen < len) {
3693 			ret = -EINVAL;
3694 		} else {
3695 			if (copy_from_user(&req_u.req, optval, len))
3696 				ret = -EFAULT;
3697 			else
3698 				ret = packet_set_ring(sk, &req_u, 0,
3699 						    optname == PACKET_TX_RING);
3700 		}
3701 		release_sock(sk);
3702 		return ret;
3703 	}
3704 	case PACKET_COPY_THRESH:
3705 	{
3706 		int val;
3707 
3708 		if (optlen != sizeof(val))
3709 			return -EINVAL;
3710 		if (copy_from_user(&val, optval, sizeof(val)))
3711 			return -EFAULT;
3712 
3713 		pkt_sk(sk)->copy_thresh = val;
3714 		return 0;
3715 	}
3716 	case PACKET_VERSION:
3717 	{
3718 		int val;
3719 
3720 		if (optlen != sizeof(val))
3721 			return -EINVAL;
3722 		if (copy_from_user(&val, optval, sizeof(val)))
3723 			return -EFAULT;
3724 		switch (val) {
3725 		case TPACKET_V1:
3726 		case TPACKET_V2:
3727 		case TPACKET_V3:
3728 			break;
3729 		default:
3730 			return -EINVAL;
3731 		}
3732 		lock_sock(sk);
3733 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3734 			ret = -EBUSY;
3735 		} else {
3736 			po->tp_version = val;
3737 			ret = 0;
3738 		}
3739 		release_sock(sk);
3740 		return ret;
3741 	}
3742 	case PACKET_RESERVE:
3743 	{
3744 		unsigned int val;
3745 
3746 		if (optlen != sizeof(val))
3747 			return -EINVAL;
3748 		if (copy_from_user(&val, optval, sizeof(val)))
3749 			return -EFAULT;
3750 		if (val > INT_MAX)
3751 			return -EINVAL;
3752 		lock_sock(sk);
3753 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3754 			ret = -EBUSY;
3755 		} else {
3756 			po->tp_reserve = val;
3757 			ret = 0;
3758 		}
3759 		release_sock(sk);
3760 		return ret;
3761 	}
3762 	case PACKET_LOSS:
3763 	{
3764 		unsigned int val;
3765 
3766 		if (optlen != sizeof(val))
3767 			return -EINVAL;
3768 		if (copy_from_user(&val, optval, sizeof(val)))
3769 			return -EFAULT;
3770 
3771 		lock_sock(sk);
3772 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3773 			ret = -EBUSY;
3774 		} else {
3775 			po->tp_loss = !!val;
3776 			ret = 0;
3777 		}
3778 		release_sock(sk);
3779 		return ret;
3780 	}
3781 	case PACKET_AUXDATA:
3782 	{
3783 		int val;
3784 
3785 		if (optlen < sizeof(val))
3786 			return -EINVAL;
3787 		if (copy_from_user(&val, optval, sizeof(val)))
3788 			return -EFAULT;
3789 
3790 		lock_sock(sk);
3791 		po->auxdata = !!val;
3792 		release_sock(sk);
3793 		return 0;
3794 	}
3795 	case PACKET_ORIGDEV:
3796 	{
3797 		int val;
3798 
3799 		if (optlen < sizeof(val))
3800 			return -EINVAL;
3801 		if (copy_from_user(&val, optval, sizeof(val)))
3802 			return -EFAULT;
3803 
3804 		lock_sock(sk);
3805 		po->origdev = !!val;
3806 		release_sock(sk);
3807 		return 0;
3808 	}
3809 	case PACKET_VNET_HDR:
3810 	{
3811 		int val;
3812 
3813 		if (sock->type != SOCK_RAW)
3814 			return -EINVAL;
3815 		if (optlen < sizeof(val))
3816 			return -EINVAL;
3817 		if (copy_from_user(&val, optval, sizeof(val)))
3818 			return -EFAULT;
3819 
3820 		lock_sock(sk);
3821 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3822 			ret = -EBUSY;
3823 		} else {
3824 			po->has_vnet_hdr = !!val;
3825 			ret = 0;
3826 		}
3827 		release_sock(sk);
3828 		return ret;
3829 	}
3830 	case PACKET_TIMESTAMP:
3831 	{
3832 		int val;
3833 
3834 		if (optlen != sizeof(val))
3835 			return -EINVAL;
3836 		if (copy_from_user(&val, optval, sizeof(val)))
3837 			return -EFAULT;
3838 
3839 		po->tp_tstamp = val;
3840 		return 0;
3841 	}
3842 	case PACKET_FANOUT:
3843 	{
3844 		int val;
3845 
3846 		if (optlen != sizeof(val))
3847 			return -EINVAL;
3848 		if (copy_from_user(&val, optval, sizeof(val)))
3849 			return -EFAULT;
3850 
3851 		return fanout_add(sk, val & 0xffff, val >> 16);
3852 	}
3853 	case PACKET_FANOUT_DATA:
3854 	{
3855 		if (!po->fanout)
3856 			return -EINVAL;
3857 
3858 		return fanout_set_data(po, optval, optlen);
3859 	}
3860 	case PACKET_IGNORE_OUTGOING:
3861 	{
3862 		int val;
3863 
3864 		if (optlen != sizeof(val))
3865 			return -EINVAL;
3866 		if (copy_from_user(&val, optval, sizeof(val)))
3867 			return -EFAULT;
3868 		if (val < 0 || val > 1)
3869 			return -EINVAL;
3870 
3871 		po->prot_hook.ignore_outgoing = !!val;
3872 		return 0;
3873 	}
3874 	case PACKET_TX_HAS_OFF:
3875 	{
3876 		unsigned int val;
3877 
3878 		if (optlen != sizeof(val))
3879 			return -EINVAL;
3880 		if (copy_from_user(&val, optval, sizeof(val)))
3881 			return -EFAULT;
3882 
3883 		lock_sock(sk);
3884 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3885 			ret = -EBUSY;
3886 		} else {
3887 			po->tp_tx_has_off = !!val;
3888 			ret = 0;
3889 		}
3890 		release_sock(sk);
3891 		return 0;
3892 	}
3893 	case PACKET_QDISC_BYPASS:
3894 	{
3895 		int val;
3896 
3897 		if (optlen != sizeof(val))
3898 			return -EINVAL;
3899 		if (copy_from_user(&val, optval, sizeof(val)))
3900 			return -EFAULT;
3901 
3902 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3903 		return 0;
3904 	}
3905 	default:
3906 		return -ENOPROTOOPT;
3907 	}
3908 }
3909 
3910 static int packet_getsockopt(struct socket *sock, int level, int optname,
3911 			     char __user *optval, int __user *optlen)
3912 {
3913 	int len;
3914 	int val, lv = sizeof(val);
3915 	struct sock *sk = sock->sk;
3916 	struct packet_sock *po = pkt_sk(sk);
3917 	void *data = &val;
3918 	union tpacket_stats_u st;
3919 	struct tpacket_rollover_stats rstats;
3920 	int drops;
3921 
3922 	if (level != SOL_PACKET)
3923 		return -ENOPROTOOPT;
3924 
3925 	if (get_user(len, optlen))
3926 		return -EFAULT;
3927 
3928 	if (len < 0)
3929 		return -EINVAL;
3930 
3931 	switch (optname) {
3932 	case PACKET_STATISTICS:
3933 		spin_lock_bh(&sk->sk_receive_queue.lock);
3934 		memcpy(&st, &po->stats, sizeof(st));
3935 		memset(&po->stats, 0, sizeof(po->stats));
3936 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3937 		drops = atomic_xchg(&po->tp_drops, 0);
3938 
3939 		if (po->tp_version == TPACKET_V3) {
3940 			lv = sizeof(struct tpacket_stats_v3);
3941 			st.stats3.tp_drops = drops;
3942 			st.stats3.tp_packets += drops;
3943 			data = &st.stats3;
3944 		} else {
3945 			lv = sizeof(struct tpacket_stats);
3946 			st.stats1.tp_drops = drops;
3947 			st.stats1.tp_packets += drops;
3948 			data = &st.stats1;
3949 		}
3950 
3951 		break;
3952 	case PACKET_AUXDATA:
3953 		val = po->auxdata;
3954 		break;
3955 	case PACKET_ORIGDEV:
3956 		val = po->origdev;
3957 		break;
3958 	case PACKET_VNET_HDR:
3959 		val = po->has_vnet_hdr;
3960 		break;
3961 	case PACKET_VERSION:
3962 		val = po->tp_version;
3963 		break;
3964 	case PACKET_HDRLEN:
3965 		if (len > sizeof(int))
3966 			len = sizeof(int);
3967 		if (len < sizeof(int))
3968 			return -EINVAL;
3969 		if (copy_from_user(&val, optval, len))
3970 			return -EFAULT;
3971 		switch (val) {
3972 		case TPACKET_V1:
3973 			val = sizeof(struct tpacket_hdr);
3974 			break;
3975 		case TPACKET_V2:
3976 			val = sizeof(struct tpacket2_hdr);
3977 			break;
3978 		case TPACKET_V3:
3979 			val = sizeof(struct tpacket3_hdr);
3980 			break;
3981 		default:
3982 			return -EINVAL;
3983 		}
3984 		break;
3985 	case PACKET_RESERVE:
3986 		val = po->tp_reserve;
3987 		break;
3988 	case PACKET_LOSS:
3989 		val = po->tp_loss;
3990 		break;
3991 	case PACKET_TIMESTAMP:
3992 		val = po->tp_tstamp;
3993 		break;
3994 	case PACKET_FANOUT:
3995 		val = (po->fanout ?
3996 		       ((u32)po->fanout->id |
3997 			((u32)po->fanout->type << 16) |
3998 			((u32)po->fanout->flags << 24)) :
3999 		       0);
4000 		break;
4001 	case PACKET_IGNORE_OUTGOING:
4002 		val = po->prot_hook.ignore_outgoing;
4003 		break;
4004 	case PACKET_ROLLOVER_STATS:
4005 		if (!po->rollover)
4006 			return -EINVAL;
4007 		rstats.tp_all = atomic_long_read(&po->rollover->num);
4008 		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4009 		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4010 		data = &rstats;
4011 		lv = sizeof(rstats);
4012 		break;
4013 	case PACKET_TX_HAS_OFF:
4014 		val = po->tp_tx_has_off;
4015 		break;
4016 	case PACKET_QDISC_BYPASS:
4017 		val = packet_use_direct_xmit(po);
4018 		break;
4019 	default:
4020 		return -ENOPROTOOPT;
4021 	}
4022 
4023 	if (len > lv)
4024 		len = lv;
4025 	if (put_user(len, optlen))
4026 		return -EFAULT;
4027 	if (copy_to_user(optval, data, len))
4028 		return -EFAULT;
4029 	return 0;
4030 }
4031 
4032 
4033 #ifdef CONFIG_COMPAT
4034 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
4035 				    char __user *optval, unsigned int optlen)
4036 {
4037 	struct packet_sock *po = pkt_sk(sock->sk);
4038 
4039 	if (level != SOL_PACKET)
4040 		return -ENOPROTOOPT;
4041 
4042 	if (optname == PACKET_FANOUT_DATA &&
4043 	    po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
4044 		optval = (char __user *)get_compat_bpf_fprog(optval);
4045 		if (!optval)
4046 			return -EFAULT;
4047 		optlen = sizeof(struct sock_fprog);
4048 	}
4049 
4050 	return packet_setsockopt(sock, level, optname, optval, optlen);
4051 }
4052 #endif
4053 
4054 static int packet_notifier(struct notifier_block *this,
4055 			   unsigned long msg, void *ptr)
4056 {
4057 	struct sock *sk;
4058 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4059 	struct net *net = dev_net(dev);
4060 
4061 	rcu_read_lock();
4062 	sk_for_each_rcu(sk, &net->packet.sklist) {
4063 		struct packet_sock *po = pkt_sk(sk);
4064 
4065 		switch (msg) {
4066 		case NETDEV_UNREGISTER:
4067 			if (po->mclist)
4068 				packet_dev_mclist_delete(dev, &po->mclist);
4069 			/* fallthrough */
4070 
4071 		case NETDEV_DOWN:
4072 			if (dev->ifindex == po->ifindex) {
4073 				spin_lock(&po->bind_lock);
4074 				if (po->running) {
4075 					__unregister_prot_hook(sk, false);
4076 					sk->sk_err = ENETDOWN;
4077 					if (!sock_flag(sk, SOCK_DEAD))
4078 						sk->sk_error_report(sk);
4079 				}
4080 				if (msg == NETDEV_UNREGISTER) {
4081 					packet_cached_dev_reset(po);
4082 					po->ifindex = -1;
4083 					if (po->prot_hook.dev)
4084 						dev_put(po->prot_hook.dev);
4085 					po->prot_hook.dev = NULL;
4086 				}
4087 				spin_unlock(&po->bind_lock);
4088 			}
4089 			break;
4090 		case NETDEV_UP:
4091 			if (dev->ifindex == po->ifindex) {
4092 				spin_lock(&po->bind_lock);
4093 				if (po->num)
4094 					register_prot_hook(sk);
4095 				spin_unlock(&po->bind_lock);
4096 			}
4097 			break;
4098 		}
4099 	}
4100 	rcu_read_unlock();
4101 	return NOTIFY_DONE;
4102 }
4103 
4104 
4105 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4106 			unsigned long arg)
4107 {
4108 	struct sock *sk = sock->sk;
4109 
4110 	switch (cmd) {
4111 	case SIOCOUTQ:
4112 	{
4113 		int amount = sk_wmem_alloc_get(sk);
4114 
4115 		return put_user(amount, (int __user *)arg);
4116 	}
4117 	case SIOCINQ:
4118 	{
4119 		struct sk_buff *skb;
4120 		int amount = 0;
4121 
4122 		spin_lock_bh(&sk->sk_receive_queue.lock);
4123 		skb = skb_peek(&sk->sk_receive_queue);
4124 		if (skb)
4125 			amount = skb->len;
4126 		spin_unlock_bh(&sk->sk_receive_queue.lock);
4127 		return put_user(amount, (int __user *)arg);
4128 	}
4129 #ifdef CONFIG_INET
4130 	case SIOCADDRT:
4131 	case SIOCDELRT:
4132 	case SIOCDARP:
4133 	case SIOCGARP:
4134 	case SIOCSARP:
4135 	case SIOCGIFADDR:
4136 	case SIOCSIFADDR:
4137 	case SIOCGIFBRDADDR:
4138 	case SIOCSIFBRDADDR:
4139 	case SIOCGIFNETMASK:
4140 	case SIOCSIFNETMASK:
4141 	case SIOCGIFDSTADDR:
4142 	case SIOCSIFDSTADDR:
4143 	case SIOCSIFFLAGS:
4144 		return inet_dgram_ops.ioctl(sock, cmd, arg);
4145 #endif
4146 
4147 	default:
4148 		return -ENOIOCTLCMD;
4149 	}
4150 	return 0;
4151 }
4152 
4153 static __poll_t packet_poll(struct file *file, struct socket *sock,
4154 				poll_table *wait)
4155 {
4156 	struct sock *sk = sock->sk;
4157 	struct packet_sock *po = pkt_sk(sk);
4158 	__poll_t mask = datagram_poll(file, sock, wait);
4159 
4160 	spin_lock_bh(&sk->sk_receive_queue.lock);
4161 	if (po->rx_ring.pg_vec) {
4162 		if (!packet_previous_rx_frame(po, &po->rx_ring,
4163 			TP_STATUS_KERNEL))
4164 			mask |= EPOLLIN | EPOLLRDNORM;
4165 	}
4166 	packet_rcv_try_clear_pressure(po);
4167 	spin_unlock_bh(&sk->sk_receive_queue.lock);
4168 	spin_lock_bh(&sk->sk_write_queue.lock);
4169 	if (po->tx_ring.pg_vec) {
4170 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4171 			mask |= EPOLLOUT | EPOLLWRNORM;
4172 	}
4173 	spin_unlock_bh(&sk->sk_write_queue.lock);
4174 	return mask;
4175 }
4176 
4177 
4178 /* Dirty? Well, I still did not learn better way to account
4179  * for user mmaps.
4180  */
4181 
4182 static void packet_mm_open(struct vm_area_struct *vma)
4183 {
4184 	struct file *file = vma->vm_file;
4185 	struct socket *sock = file->private_data;
4186 	struct sock *sk = sock->sk;
4187 
4188 	if (sk)
4189 		atomic_inc(&pkt_sk(sk)->mapped);
4190 }
4191 
4192 static void packet_mm_close(struct vm_area_struct *vma)
4193 {
4194 	struct file *file = vma->vm_file;
4195 	struct socket *sock = file->private_data;
4196 	struct sock *sk = sock->sk;
4197 
4198 	if (sk)
4199 		atomic_dec(&pkt_sk(sk)->mapped);
4200 }
4201 
4202 static const struct vm_operations_struct packet_mmap_ops = {
4203 	.open	=	packet_mm_open,
4204 	.close	=	packet_mm_close,
4205 };
4206 
4207 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4208 			unsigned int len)
4209 {
4210 	int i;
4211 
4212 	for (i = 0; i < len; i++) {
4213 		if (likely(pg_vec[i].buffer)) {
4214 			if (is_vmalloc_addr(pg_vec[i].buffer))
4215 				vfree(pg_vec[i].buffer);
4216 			else
4217 				free_pages((unsigned long)pg_vec[i].buffer,
4218 					   order);
4219 			pg_vec[i].buffer = NULL;
4220 		}
4221 	}
4222 	kfree(pg_vec);
4223 }
4224 
4225 static char *alloc_one_pg_vec_page(unsigned long order)
4226 {
4227 	char *buffer;
4228 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4229 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4230 
4231 	buffer = (char *) __get_free_pages(gfp_flags, order);
4232 	if (buffer)
4233 		return buffer;
4234 
4235 	/* __get_free_pages failed, fall back to vmalloc */
4236 	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4237 	if (buffer)
4238 		return buffer;
4239 
4240 	/* vmalloc failed, lets dig into swap here */
4241 	gfp_flags &= ~__GFP_NORETRY;
4242 	buffer = (char *) __get_free_pages(gfp_flags, order);
4243 	if (buffer)
4244 		return buffer;
4245 
4246 	/* complete and utter failure */
4247 	return NULL;
4248 }
4249 
4250 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4251 {
4252 	unsigned int block_nr = req->tp_block_nr;
4253 	struct pgv *pg_vec;
4254 	int i;
4255 
4256 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4257 	if (unlikely(!pg_vec))
4258 		goto out;
4259 
4260 	for (i = 0; i < block_nr; i++) {
4261 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4262 		if (unlikely(!pg_vec[i].buffer))
4263 			goto out_free_pgvec;
4264 	}
4265 
4266 out:
4267 	return pg_vec;
4268 
4269 out_free_pgvec:
4270 	free_pg_vec(pg_vec, order, block_nr);
4271 	pg_vec = NULL;
4272 	goto out;
4273 }
4274 
4275 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4276 		int closing, int tx_ring)
4277 {
4278 	struct pgv *pg_vec = NULL;
4279 	struct packet_sock *po = pkt_sk(sk);
4280 	int was_running, order = 0;
4281 	struct packet_ring_buffer *rb;
4282 	struct sk_buff_head *rb_queue;
4283 	__be16 num;
4284 	int err = -EINVAL;
4285 	/* Added to avoid minimal code churn */
4286 	struct tpacket_req *req = &req_u->req;
4287 
4288 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4289 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4290 
4291 	err = -EBUSY;
4292 	if (!closing) {
4293 		if (atomic_read(&po->mapped))
4294 			goto out;
4295 		if (packet_read_pending(rb))
4296 			goto out;
4297 	}
4298 
4299 	if (req->tp_block_nr) {
4300 		unsigned int min_frame_size;
4301 
4302 		/* Sanity tests and some calculations */
4303 		err = -EBUSY;
4304 		if (unlikely(rb->pg_vec))
4305 			goto out;
4306 
4307 		switch (po->tp_version) {
4308 		case TPACKET_V1:
4309 			po->tp_hdrlen = TPACKET_HDRLEN;
4310 			break;
4311 		case TPACKET_V2:
4312 			po->tp_hdrlen = TPACKET2_HDRLEN;
4313 			break;
4314 		case TPACKET_V3:
4315 			po->tp_hdrlen = TPACKET3_HDRLEN;
4316 			break;
4317 		}
4318 
4319 		err = -EINVAL;
4320 		if (unlikely((int)req->tp_block_size <= 0))
4321 			goto out;
4322 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4323 			goto out;
4324 		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4325 		if (po->tp_version >= TPACKET_V3 &&
4326 		    req->tp_block_size <
4327 		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4328 			goto out;
4329 		if (unlikely(req->tp_frame_size < min_frame_size))
4330 			goto out;
4331 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4332 			goto out;
4333 
4334 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4335 		if (unlikely(rb->frames_per_block == 0))
4336 			goto out;
4337 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4338 			goto out;
4339 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4340 					req->tp_frame_nr))
4341 			goto out;
4342 
4343 		err = -ENOMEM;
4344 		order = get_order(req->tp_block_size);
4345 		pg_vec = alloc_pg_vec(req, order);
4346 		if (unlikely(!pg_vec))
4347 			goto out;
4348 		switch (po->tp_version) {
4349 		case TPACKET_V3:
4350 			/* Block transmit is not supported yet */
4351 			if (!tx_ring) {
4352 				init_prb_bdqc(po, rb, pg_vec, req_u);
4353 			} else {
4354 				struct tpacket_req3 *req3 = &req_u->req3;
4355 
4356 				if (req3->tp_retire_blk_tov ||
4357 				    req3->tp_sizeof_priv ||
4358 				    req3->tp_feature_req_word) {
4359 					err = -EINVAL;
4360 					goto out_free_pg_vec;
4361 				}
4362 			}
4363 			break;
4364 		default:
4365 			break;
4366 		}
4367 	}
4368 	/* Done */
4369 	else {
4370 		err = -EINVAL;
4371 		if (unlikely(req->tp_frame_nr))
4372 			goto out;
4373 	}
4374 
4375 
4376 	/* Detach socket from network */
4377 	spin_lock(&po->bind_lock);
4378 	was_running = po->running;
4379 	num = po->num;
4380 	if (was_running) {
4381 		po->num = 0;
4382 		__unregister_prot_hook(sk, false);
4383 	}
4384 	spin_unlock(&po->bind_lock);
4385 
4386 	synchronize_net();
4387 
4388 	err = -EBUSY;
4389 	mutex_lock(&po->pg_vec_lock);
4390 	if (closing || atomic_read(&po->mapped) == 0) {
4391 		err = 0;
4392 		spin_lock_bh(&rb_queue->lock);
4393 		swap(rb->pg_vec, pg_vec);
4394 		rb->frame_max = (req->tp_frame_nr - 1);
4395 		rb->head = 0;
4396 		rb->frame_size = req->tp_frame_size;
4397 		spin_unlock_bh(&rb_queue->lock);
4398 
4399 		swap(rb->pg_vec_order, order);
4400 		swap(rb->pg_vec_len, req->tp_block_nr);
4401 
4402 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4403 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4404 						tpacket_rcv : packet_rcv;
4405 		skb_queue_purge(rb_queue);
4406 		if (atomic_read(&po->mapped))
4407 			pr_err("packet_mmap: vma is busy: %d\n",
4408 			       atomic_read(&po->mapped));
4409 	}
4410 	mutex_unlock(&po->pg_vec_lock);
4411 
4412 	spin_lock(&po->bind_lock);
4413 	if (was_running) {
4414 		po->num = num;
4415 		register_prot_hook(sk);
4416 	}
4417 	spin_unlock(&po->bind_lock);
4418 	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4419 		/* Because we don't support block-based V3 on tx-ring */
4420 		if (!tx_ring)
4421 			prb_shutdown_retire_blk_timer(po, rb_queue);
4422 	}
4423 
4424 out_free_pg_vec:
4425 	if (pg_vec)
4426 		free_pg_vec(pg_vec, order, req->tp_block_nr);
4427 out:
4428 	return err;
4429 }
4430 
4431 static int packet_mmap(struct file *file, struct socket *sock,
4432 		struct vm_area_struct *vma)
4433 {
4434 	struct sock *sk = sock->sk;
4435 	struct packet_sock *po = pkt_sk(sk);
4436 	unsigned long size, expected_size;
4437 	struct packet_ring_buffer *rb;
4438 	unsigned long start;
4439 	int err = -EINVAL;
4440 	int i;
4441 
4442 	if (vma->vm_pgoff)
4443 		return -EINVAL;
4444 
4445 	mutex_lock(&po->pg_vec_lock);
4446 
4447 	expected_size = 0;
4448 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4449 		if (rb->pg_vec) {
4450 			expected_size += rb->pg_vec_len
4451 						* rb->pg_vec_pages
4452 						* PAGE_SIZE;
4453 		}
4454 	}
4455 
4456 	if (expected_size == 0)
4457 		goto out;
4458 
4459 	size = vma->vm_end - vma->vm_start;
4460 	if (size != expected_size)
4461 		goto out;
4462 
4463 	start = vma->vm_start;
4464 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4465 		if (rb->pg_vec == NULL)
4466 			continue;
4467 
4468 		for (i = 0; i < rb->pg_vec_len; i++) {
4469 			struct page *page;
4470 			void *kaddr = rb->pg_vec[i].buffer;
4471 			int pg_num;
4472 
4473 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4474 				page = pgv_to_page(kaddr);
4475 				err = vm_insert_page(vma, start, page);
4476 				if (unlikely(err))
4477 					goto out;
4478 				start += PAGE_SIZE;
4479 				kaddr += PAGE_SIZE;
4480 			}
4481 		}
4482 	}
4483 
4484 	atomic_inc(&po->mapped);
4485 	vma->vm_ops = &packet_mmap_ops;
4486 	err = 0;
4487 
4488 out:
4489 	mutex_unlock(&po->pg_vec_lock);
4490 	return err;
4491 }
4492 
4493 static const struct proto_ops packet_ops_spkt = {
4494 	.family =	PF_PACKET,
4495 	.owner =	THIS_MODULE,
4496 	.release =	packet_release,
4497 	.bind =		packet_bind_spkt,
4498 	.connect =	sock_no_connect,
4499 	.socketpair =	sock_no_socketpair,
4500 	.accept =	sock_no_accept,
4501 	.getname =	packet_getname_spkt,
4502 	.poll =		datagram_poll,
4503 	.ioctl =	packet_ioctl,
4504 	.gettstamp =	sock_gettstamp,
4505 	.listen =	sock_no_listen,
4506 	.shutdown =	sock_no_shutdown,
4507 	.setsockopt =	sock_no_setsockopt,
4508 	.getsockopt =	sock_no_getsockopt,
4509 	.sendmsg =	packet_sendmsg_spkt,
4510 	.recvmsg =	packet_recvmsg,
4511 	.mmap =		sock_no_mmap,
4512 	.sendpage =	sock_no_sendpage,
4513 };
4514 
4515 static const struct proto_ops packet_ops = {
4516 	.family =	PF_PACKET,
4517 	.owner =	THIS_MODULE,
4518 	.release =	packet_release,
4519 	.bind =		packet_bind,
4520 	.connect =	sock_no_connect,
4521 	.socketpair =	sock_no_socketpair,
4522 	.accept =	sock_no_accept,
4523 	.getname =	packet_getname,
4524 	.poll =		packet_poll,
4525 	.ioctl =	packet_ioctl,
4526 	.gettstamp =	sock_gettstamp,
4527 	.listen =	sock_no_listen,
4528 	.shutdown =	sock_no_shutdown,
4529 	.setsockopt =	packet_setsockopt,
4530 	.getsockopt =	packet_getsockopt,
4531 #ifdef CONFIG_COMPAT
4532 	.compat_setsockopt = compat_packet_setsockopt,
4533 #endif
4534 	.sendmsg =	packet_sendmsg,
4535 	.recvmsg =	packet_recvmsg,
4536 	.mmap =		packet_mmap,
4537 	.sendpage =	sock_no_sendpage,
4538 };
4539 
4540 static const struct net_proto_family packet_family_ops = {
4541 	.family =	PF_PACKET,
4542 	.create =	packet_create,
4543 	.owner	=	THIS_MODULE,
4544 };
4545 
4546 static struct notifier_block packet_netdev_notifier = {
4547 	.notifier_call =	packet_notifier,
4548 };
4549 
4550 #ifdef CONFIG_PROC_FS
4551 
4552 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4553 	__acquires(RCU)
4554 {
4555 	struct net *net = seq_file_net(seq);
4556 
4557 	rcu_read_lock();
4558 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4559 }
4560 
4561 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4562 {
4563 	struct net *net = seq_file_net(seq);
4564 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4565 }
4566 
4567 static void packet_seq_stop(struct seq_file *seq, void *v)
4568 	__releases(RCU)
4569 {
4570 	rcu_read_unlock();
4571 }
4572 
4573 static int packet_seq_show(struct seq_file *seq, void *v)
4574 {
4575 	if (v == SEQ_START_TOKEN)
4576 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4577 	else {
4578 		struct sock *s = sk_entry(v);
4579 		const struct packet_sock *po = pkt_sk(s);
4580 
4581 		seq_printf(seq,
4582 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4583 			   s,
4584 			   refcount_read(&s->sk_refcnt),
4585 			   s->sk_type,
4586 			   ntohs(po->num),
4587 			   po->ifindex,
4588 			   po->running,
4589 			   atomic_read(&s->sk_rmem_alloc),
4590 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4591 			   sock_i_ino(s));
4592 	}
4593 
4594 	return 0;
4595 }
4596 
4597 static const struct seq_operations packet_seq_ops = {
4598 	.start	= packet_seq_start,
4599 	.next	= packet_seq_next,
4600 	.stop	= packet_seq_stop,
4601 	.show	= packet_seq_show,
4602 };
4603 #endif
4604 
4605 static int __net_init packet_net_init(struct net *net)
4606 {
4607 	mutex_init(&net->packet.sklist_lock);
4608 	INIT_HLIST_HEAD(&net->packet.sklist);
4609 
4610 	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4611 			sizeof(struct seq_net_private)))
4612 		return -ENOMEM;
4613 
4614 	return 0;
4615 }
4616 
4617 static void __net_exit packet_net_exit(struct net *net)
4618 {
4619 	remove_proc_entry("packet", net->proc_net);
4620 	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4621 }
4622 
4623 static struct pernet_operations packet_net_ops = {
4624 	.init = packet_net_init,
4625 	.exit = packet_net_exit,
4626 };
4627 
4628 
4629 static void __exit packet_exit(void)
4630 {
4631 	unregister_netdevice_notifier(&packet_netdev_notifier);
4632 	unregister_pernet_subsys(&packet_net_ops);
4633 	sock_unregister(PF_PACKET);
4634 	proto_unregister(&packet_proto);
4635 }
4636 
4637 static int __init packet_init(void)
4638 {
4639 	int rc;
4640 
4641 	rc = proto_register(&packet_proto, 0);
4642 	if (rc)
4643 		goto out;
4644 	rc = sock_register(&packet_family_ops);
4645 	if (rc)
4646 		goto out_proto;
4647 	rc = register_pernet_subsys(&packet_net_ops);
4648 	if (rc)
4649 		goto out_sock;
4650 	rc = register_netdevice_notifier(&packet_netdev_notifier);
4651 	if (rc)
4652 		goto out_pernet;
4653 
4654 	return 0;
4655 
4656 out_pernet:
4657 	unregister_pernet_subsys(&packet_net_ops);
4658 out_sock:
4659 	sock_unregister(PF_PACKET);
4660 out_proto:
4661 	proto_unregister(&packet_proto);
4662 out:
4663 	return rc;
4664 }
4665 
4666 module_init(packet_init);
4667 module_exit(packet_exit);
4668 MODULE_LICENSE("GPL");
4669 MODULE_ALIAS_NETPROTO(PF_PACKET);
4670