xref: /openbmc/linux/net/packet/af_packet.c (revision 7663edc1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PACKET - implements raw packet sockets.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *		Alan Cox	:	verify_area() now used correctly
15  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
16  *		Alan Cox	:	tidied skbuff lists.
17  *		Alan Cox	:	Now uses generic datagram routines I
18  *					added. Also fixed the peek/read crash
19  *					from all old Linux datagram code.
20  *		Alan Cox	:	Uses the improved datagram code.
21  *		Alan Cox	:	Added NULL's for socket options.
22  *		Alan Cox	:	Re-commented the code.
23  *		Alan Cox	:	Use new kernel side addressing
24  *		Rob Janssen	:	Correct MTU usage.
25  *		Dave Platt	:	Counter leaks caused by incorrect
26  *					interrupt locking and some slightly
27  *					dubious gcc output. Can you read
28  *					compiler: it said _VOLATILE_
29  *	Richard Kooijman	:	Timestamp fixes.
30  *		Alan Cox	:	New buffers. Use sk->mac.raw.
31  *		Alan Cox	:	sendmsg/recvmsg support.
32  *		Alan Cox	:	Protocol setting support
33  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
34  *	Cyrus Durgin		:	Fixed kerneld for kmod.
35  *	Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
39  *					The convention is that longer addresses
40  *					will simply extend the hardware address
41  *					byte arrays at the end of sockaddr_ll
42  *					and packet_mreq.
43  *		Johann Baudy	:	Added TX RING.
44  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
45  *					layer.
46  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48 
49 #include <linux/types.h>
50 #include <linux/mm.h>
51 #include <linux/capability.h>
52 #include <linux/fcntl.h>
53 #include <linux/socket.h>
54 #include <linux/in.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/if_packet.h>
58 #include <linux/wireless.h>
59 #include <linux/kernel.h>
60 #include <linux/kmod.h>
61 #include <linux/slab.h>
62 #include <linux/vmalloc.h>
63 #include <net/net_namespace.h>
64 #include <net/ip.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <linux/uaccess.h>
71 #include <asm/ioctls.h>
72 #include <asm/page.h>
73 #include <asm/cacheflush.h>
74 #include <asm/io.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
80 #include <linux/mutex.h>
81 #include <linux/if_vlan.h>
82 #include <linux/virtio_net.h>
83 #include <linux/errqueue.h>
84 #include <linux/net_tstamp.h>
85 #include <linux/percpu.h>
86 #ifdef CONFIG_INET
87 #include <net/inet_common.h>
88 #endif
89 #include <linux/bpf.h>
90 #include <net/compat.h>
91 
92 #include "internal.h"
93 
94 /*
95    Assumptions:
96    - if device has no dev->hard_header routine, it adds and removes ll header
97      inside itself. In this case ll header is invisible outside of device,
98      but higher levels still should reserve dev->hard_header_len.
99      Some devices are enough clever to reallocate skb, when header
100      will not fit to reserved space (tunnel), another ones are silly
101      (PPP).
102    - packet socket receives packets with pulled ll header,
103      so that SOCK_RAW should push it back.
104 
105 On receive:
106 -----------
107 
108 Incoming, dev->hard_header!=NULL
109    mac_header -> ll header
110    data       -> data
111 
112 Outgoing, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> ll header
115 
116 Incoming, dev->hard_header==NULL
117    mac_header -> UNKNOWN position. It is very likely, that it points to ll
118 		 header.  PPP makes it, that is wrong, because introduce
119 		 assymetry between rx and tx paths.
120    data       -> data
121 
122 Outgoing, dev->hard_header==NULL
123    mac_header -> data. ll header is still not built!
124    data       -> data
125 
126 Resume
127   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
128 
129 
130 On transmit:
131 ------------
132 
133 dev->hard_header != NULL
134    mac_header -> ll header
135    data       -> ll header
136 
137 dev->hard_header == NULL (ll header is added by device, we cannot control it)
138    mac_header -> data
139    data       -> data
140 
141    We should set nh.raw on output to correct posistion,
142    packet classifier depends on it.
143  */
144 
145 /* Private packet socket structures. */
146 
147 /* identical to struct packet_mreq except it has
148  * a longer address field.
149  */
150 struct packet_mreq_max {
151 	int		mr_ifindex;
152 	unsigned short	mr_type;
153 	unsigned short	mr_alen;
154 	unsigned char	mr_address[MAX_ADDR_LEN];
155 };
156 
157 union tpacket_uhdr {
158 	struct tpacket_hdr  *h1;
159 	struct tpacket2_hdr *h2;
160 	struct tpacket3_hdr *h3;
161 	void *raw;
162 };
163 
164 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
165 		int closing, int tx_ring);
166 
167 #define V3_ALIGNMENT	(8)
168 
169 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
170 
171 #define BLK_PLUS_PRIV(sz_of_priv) \
172 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
173 
174 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
175 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
176 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
177 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
178 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
179 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
180 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
181 
182 struct packet_sock;
183 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
184 		       struct packet_type *pt, struct net_device *orig_dev);
185 
186 static void *packet_previous_frame(struct packet_sock *po,
187 		struct packet_ring_buffer *rb,
188 		int status);
189 static void packet_increment_head(struct packet_ring_buffer *buff);
190 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
191 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
192 			struct packet_sock *);
193 static void prb_retire_current_block(struct tpacket_kbdq_core *,
194 		struct packet_sock *, unsigned int status);
195 static int prb_queue_frozen(struct tpacket_kbdq_core *);
196 static void prb_open_block(struct tpacket_kbdq_core *,
197 		struct tpacket_block_desc *);
198 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
199 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
200 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
201 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
202 		struct tpacket3_hdr *);
203 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
204 		struct tpacket3_hdr *);
205 static void packet_flush_mclist(struct sock *sk);
206 static u16 packet_pick_tx_queue(struct sk_buff *skb);
207 
208 struct packet_skb_cb {
209 	union {
210 		struct sockaddr_pkt pkt;
211 		union {
212 			/* Trick: alias skb original length with
213 			 * ll.sll_family and ll.protocol in order
214 			 * to save room.
215 			 */
216 			unsigned int origlen;
217 			struct sockaddr_ll ll;
218 		};
219 	} sa;
220 };
221 
222 #define vio_le() virtio_legacy_is_little_endian()
223 
224 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
225 
226 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
227 #define GET_PBLOCK_DESC(x, bid)	\
228 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
229 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
230 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
231 #define GET_NEXT_PRB_BLK_NUM(x) \
232 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
233 	((x)->kactive_blk_num+1) : 0)
234 
235 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
236 static void __fanout_link(struct sock *sk, struct packet_sock *po);
237 
238 static int packet_direct_xmit(struct sk_buff *skb)
239 {
240 	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
241 }
242 
243 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
244 {
245 	struct net_device *dev;
246 
247 	rcu_read_lock();
248 	dev = rcu_dereference(po->cached_dev);
249 	if (likely(dev))
250 		dev_hold(dev);
251 	rcu_read_unlock();
252 
253 	return dev;
254 }
255 
256 static void packet_cached_dev_assign(struct packet_sock *po,
257 				     struct net_device *dev)
258 {
259 	rcu_assign_pointer(po->cached_dev, dev);
260 }
261 
262 static void packet_cached_dev_reset(struct packet_sock *po)
263 {
264 	RCU_INIT_POINTER(po->cached_dev, NULL);
265 }
266 
267 static bool packet_use_direct_xmit(const struct packet_sock *po)
268 {
269 	return po->xmit == packet_direct_xmit;
270 }
271 
272 static u16 packet_pick_tx_queue(struct sk_buff *skb)
273 {
274 	struct net_device *dev = skb->dev;
275 	const struct net_device_ops *ops = dev->netdev_ops;
276 	int cpu = raw_smp_processor_id();
277 	u16 queue_index;
278 
279 #ifdef CONFIG_XPS
280 	skb->sender_cpu = cpu + 1;
281 #endif
282 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
283 	if (ops->ndo_select_queue) {
284 		queue_index = ops->ndo_select_queue(dev, skb, NULL);
285 		queue_index = netdev_cap_txqueue(dev, queue_index);
286 	} else {
287 		queue_index = netdev_pick_tx(dev, skb, NULL);
288 	}
289 
290 	return queue_index;
291 }
292 
293 /* __register_prot_hook must be invoked through register_prot_hook
294  * or from a context in which asynchronous accesses to the packet
295  * socket is not possible (packet_create()).
296  */
297 static void __register_prot_hook(struct sock *sk)
298 {
299 	struct packet_sock *po = pkt_sk(sk);
300 
301 	if (!po->running) {
302 		if (po->fanout)
303 			__fanout_link(sk, po);
304 		else
305 			dev_add_pack(&po->prot_hook);
306 
307 		sock_hold(sk);
308 		po->running = 1;
309 	}
310 }
311 
312 static void register_prot_hook(struct sock *sk)
313 {
314 	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
315 	__register_prot_hook(sk);
316 }
317 
318 /* If the sync parameter is true, we will temporarily drop
319  * the po->bind_lock and do a synchronize_net to make sure no
320  * asynchronous packet processing paths still refer to the elements
321  * of po->prot_hook.  If the sync parameter is false, it is the
322  * callers responsibility to take care of this.
323  */
324 static void __unregister_prot_hook(struct sock *sk, bool sync)
325 {
326 	struct packet_sock *po = pkt_sk(sk);
327 
328 	lockdep_assert_held_once(&po->bind_lock);
329 
330 	po->running = 0;
331 
332 	if (po->fanout)
333 		__fanout_unlink(sk, po);
334 	else
335 		__dev_remove_pack(&po->prot_hook);
336 
337 	__sock_put(sk);
338 
339 	if (sync) {
340 		spin_unlock(&po->bind_lock);
341 		synchronize_net();
342 		spin_lock(&po->bind_lock);
343 	}
344 }
345 
346 static void unregister_prot_hook(struct sock *sk, bool sync)
347 {
348 	struct packet_sock *po = pkt_sk(sk);
349 
350 	if (po->running)
351 		__unregister_prot_hook(sk, sync);
352 }
353 
354 static inline struct page * __pure pgv_to_page(void *addr)
355 {
356 	if (is_vmalloc_addr(addr))
357 		return vmalloc_to_page(addr);
358 	return virt_to_page(addr);
359 }
360 
361 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
362 {
363 	union tpacket_uhdr h;
364 
365 	h.raw = frame;
366 	switch (po->tp_version) {
367 	case TPACKET_V1:
368 		h.h1->tp_status = status;
369 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
370 		break;
371 	case TPACKET_V2:
372 		h.h2->tp_status = status;
373 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
374 		break;
375 	case TPACKET_V3:
376 		h.h3->tp_status = status;
377 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
378 		break;
379 	default:
380 		WARN(1, "TPACKET version not supported.\n");
381 		BUG();
382 	}
383 
384 	smp_wmb();
385 }
386 
387 static int __packet_get_status(const struct packet_sock *po, void *frame)
388 {
389 	union tpacket_uhdr h;
390 
391 	smp_rmb();
392 
393 	h.raw = frame;
394 	switch (po->tp_version) {
395 	case TPACKET_V1:
396 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
397 		return h.h1->tp_status;
398 	case TPACKET_V2:
399 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
400 		return h.h2->tp_status;
401 	case TPACKET_V3:
402 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
403 		return h.h3->tp_status;
404 	default:
405 		WARN(1, "TPACKET version not supported.\n");
406 		BUG();
407 		return 0;
408 	}
409 }
410 
411 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
412 				   unsigned int flags)
413 {
414 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
415 
416 	if (shhwtstamps &&
417 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
418 	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
419 		return TP_STATUS_TS_RAW_HARDWARE;
420 
421 	if (ktime_to_timespec64_cond(skb->tstamp, ts))
422 		return TP_STATUS_TS_SOFTWARE;
423 
424 	return 0;
425 }
426 
427 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
428 				    struct sk_buff *skb)
429 {
430 	union tpacket_uhdr h;
431 	struct timespec64 ts;
432 	__u32 ts_status;
433 
434 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
435 		return 0;
436 
437 	h.raw = frame;
438 	/*
439 	 * versions 1 through 3 overflow the timestamps in y2106, since they
440 	 * all store the seconds in a 32-bit unsigned integer.
441 	 * If we create a version 4, that should have a 64-bit timestamp,
442 	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
443 	 * nanoseconds.
444 	 */
445 	switch (po->tp_version) {
446 	case TPACKET_V1:
447 		h.h1->tp_sec = ts.tv_sec;
448 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
449 		break;
450 	case TPACKET_V2:
451 		h.h2->tp_sec = ts.tv_sec;
452 		h.h2->tp_nsec = ts.tv_nsec;
453 		break;
454 	case TPACKET_V3:
455 		h.h3->tp_sec = ts.tv_sec;
456 		h.h3->tp_nsec = ts.tv_nsec;
457 		break;
458 	default:
459 		WARN(1, "TPACKET version not supported.\n");
460 		BUG();
461 	}
462 
463 	/* one flush is safe, as both fields always lie on the same cacheline */
464 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
465 	smp_wmb();
466 
467 	return ts_status;
468 }
469 
470 static void *packet_lookup_frame(const struct packet_sock *po,
471 				 const struct packet_ring_buffer *rb,
472 				 unsigned int position,
473 				 int status)
474 {
475 	unsigned int pg_vec_pos, frame_offset;
476 	union tpacket_uhdr h;
477 
478 	pg_vec_pos = position / rb->frames_per_block;
479 	frame_offset = position % rb->frames_per_block;
480 
481 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
482 		(frame_offset * rb->frame_size);
483 
484 	if (status != __packet_get_status(po, h.raw))
485 		return NULL;
486 
487 	return h.raw;
488 }
489 
490 static void *packet_current_frame(struct packet_sock *po,
491 		struct packet_ring_buffer *rb,
492 		int status)
493 {
494 	return packet_lookup_frame(po, rb, rb->head, status);
495 }
496 
497 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
498 {
499 	del_timer_sync(&pkc->retire_blk_timer);
500 }
501 
502 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
503 		struct sk_buff_head *rb_queue)
504 {
505 	struct tpacket_kbdq_core *pkc;
506 
507 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
508 
509 	spin_lock_bh(&rb_queue->lock);
510 	pkc->delete_blk_timer = 1;
511 	spin_unlock_bh(&rb_queue->lock);
512 
513 	prb_del_retire_blk_timer(pkc);
514 }
515 
516 static void prb_setup_retire_blk_timer(struct packet_sock *po)
517 {
518 	struct tpacket_kbdq_core *pkc;
519 
520 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
521 	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
522 		    0);
523 	pkc->retire_blk_timer.expires = jiffies;
524 }
525 
526 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
527 				int blk_size_in_bytes)
528 {
529 	struct net_device *dev;
530 	unsigned int mbits, div;
531 	struct ethtool_link_ksettings ecmd;
532 	int err;
533 
534 	rtnl_lock();
535 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
536 	if (unlikely(!dev)) {
537 		rtnl_unlock();
538 		return DEFAULT_PRB_RETIRE_TOV;
539 	}
540 	err = __ethtool_get_link_ksettings(dev, &ecmd);
541 	rtnl_unlock();
542 	if (err)
543 		return DEFAULT_PRB_RETIRE_TOV;
544 
545 	/* If the link speed is so slow you don't really
546 	 * need to worry about perf anyways
547 	 */
548 	if (ecmd.base.speed < SPEED_1000 ||
549 	    ecmd.base.speed == SPEED_UNKNOWN)
550 		return DEFAULT_PRB_RETIRE_TOV;
551 
552 	div = ecmd.base.speed / 1000;
553 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
554 
555 	if (div)
556 		mbits /= div;
557 
558 	if (div)
559 		return mbits + 1;
560 	return mbits;
561 }
562 
563 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
564 			union tpacket_req_u *req_u)
565 {
566 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
567 }
568 
569 static void init_prb_bdqc(struct packet_sock *po,
570 			struct packet_ring_buffer *rb,
571 			struct pgv *pg_vec,
572 			union tpacket_req_u *req_u)
573 {
574 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
575 	struct tpacket_block_desc *pbd;
576 
577 	memset(p1, 0x0, sizeof(*p1));
578 
579 	p1->knxt_seq_num = 1;
580 	p1->pkbdq = pg_vec;
581 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
582 	p1->pkblk_start	= pg_vec[0].buffer;
583 	p1->kblk_size = req_u->req3.tp_block_size;
584 	p1->knum_blocks	= req_u->req3.tp_block_nr;
585 	p1->hdrlen = po->tp_hdrlen;
586 	p1->version = po->tp_version;
587 	p1->last_kactive_blk_num = 0;
588 	po->stats.stats3.tp_freeze_q_cnt = 0;
589 	if (req_u->req3.tp_retire_blk_tov)
590 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
591 	else
592 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
593 						req_u->req3.tp_block_size);
594 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
595 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
596 	rwlock_init(&p1->blk_fill_in_prog_lock);
597 
598 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
599 	prb_init_ft_ops(p1, req_u);
600 	prb_setup_retire_blk_timer(po);
601 	prb_open_block(p1, pbd);
602 }
603 
604 /*  Do NOT update the last_blk_num first.
605  *  Assumes sk_buff_head lock is held.
606  */
607 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
608 {
609 	mod_timer(&pkc->retire_blk_timer,
610 			jiffies + pkc->tov_in_jiffies);
611 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
612 }
613 
614 /*
615  * Timer logic:
616  * 1) We refresh the timer only when we open a block.
617  *    By doing this we don't waste cycles refreshing the timer
618  *	  on packet-by-packet basis.
619  *
620  * With a 1MB block-size, on a 1Gbps line, it will take
621  * i) ~8 ms to fill a block + ii) memcpy etc.
622  * In this cut we are not accounting for the memcpy time.
623  *
624  * So, if the user sets the 'tmo' to 10ms then the timer
625  * will never fire while the block is still getting filled
626  * (which is what we want). However, the user could choose
627  * to close a block early and that's fine.
628  *
629  * But when the timer does fire, we check whether or not to refresh it.
630  * Since the tmo granularity is in msecs, it is not too expensive
631  * to refresh the timer, lets say every '8' msecs.
632  * Either the user can set the 'tmo' or we can derive it based on
633  * a) line-speed and b) block-size.
634  * prb_calc_retire_blk_tmo() calculates the tmo.
635  *
636  */
637 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
638 {
639 	struct packet_sock *po =
640 		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
641 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
642 	unsigned int frozen;
643 	struct tpacket_block_desc *pbd;
644 
645 	spin_lock(&po->sk.sk_receive_queue.lock);
646 
647 	frozen = prb_queue_frozen(pkc);
648 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
649 
650 	if (unlikely(pkc->delete_blk_timer))
651 		goto out;
652 
653 	/* We only need to plug the race when the block is partially filled.
654 	 * tpacket_rcv:
655 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
656 	 *		copy_bits() is in progress ...
657 	 *		timer fires on other cpu:
658 	 *		we can't retire the current block because copy_bits
659 	 *		is in progress.
660 	 *
661 	 */
662 	if (BLOCK_NUM_PKTS(pbd)) {
663 		/* Waiting for skb_copy_bits to finish... */
664 		write_lock(&pkc->blk_fill_in_prog_lock);
665 		write_unlock(&pkc->blk_fill_in_prog_lock);
666 	}
667 
668 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
669 		if (!frozen) {
670 			if (!BLOCK_NUM_PKTS(pbd)) {
671 				/* An empty block. Just refresh the timer. */
672 				goto refresh_timer;
673 			}
674 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
675 			if (!prb_dispatch_next_block(pkc, po))
676 				goto refresh_timer;
677 			else
678 				goto out;
679 		} else {
680 			/* Case 1. Queue was frozen because user-space was
681 			 *	   lagging behind.
682 			 */
683 			if (prb_curr_blk_in_use(pbd)) {
684 				/*
685 				 * Ok, user-space is still behind.
686 				 * So just refresh the timer.
687 				 */
688 				goto refresh_timer;
689 			} else {
690 			       /* Case 2. queue was frozen,user-space caught up,
691 				* now the link went idle && the timer fired.
692 				* We don't have a block to close.So we open this
693 				* block and restart the timer.
694 				* opening a block thaws the queue,restarts timer
695 				* Thawing/timer-refresh is a side effect.
696 				*/
697 				prb_open_block(pkc, pbd);
698 				goto out;
699 			}
700 		}
701 	}
702 
703 refresh_timer:
704 	_prb_refresh_rx_retire_blk_timer(pkc);
705 
706 out:
707 	spin_unlock(&po->sk.sk_receive_queue.lock);
708 }
709 
710 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
711 		struct tpacket_block_desc *pbd1, __u32 status)
712 {
713 	/* Flush everything minus the block header */
714 
715 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
716 	u8 *start, *end;
717 
718 	start = (u8 *)pbd1;
719 
720 	/* Skip the block header(we know header WILL fit in 4K) */
721 	start += PAGE_SIZE;
722 
723 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
724 	for (; start < end; start += PAGE_SIZE)
725 		flush_dcache_page(pgv_to_page(start));
726 
727 	smp_wmb();
728 #endif
729 
730 	/* Now update the block status. */
731 
732 	BLOCK_STATUS(pbd1) = status;
733 
734 	/* Flush the block header */
735 
736 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
737 	start = (u8 *)pbd1;
738 	flush_dcache_page(pgv_to_page(start));
739 
740 	smp_wmb();
741 #endif
742 }
743 
744 /*
745  * Side effect:
746  *
747  * 1) flush the block
748  * 2) Increment active_blk_num
749  *
750  * Note:We DONT refresh the timer on purpose.
751  *	Because almost always the next block will be opened.
752  */
753 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
754 		struct tpacket_block_desc *pbd1,
755 		struct packet_sock *po, unsigned int stat)
756 {
757 	__u32 status = TP_STATUS_USER | stat;
758 
759 	struct tpacket3_hdr *last_pkt;
760 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
761 	struct sock *sk = &po->sk;
762 
763 	if (atomic_read(&po->tp_drops))
764 		status |= TP_STATUS_LOSING;
765 
766 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
767 	last_pkt->tp_next_offset = 0;
768 
769 	/* Get the ts of the last pkt */
770 	if (BLOCK_NUM_PKTS(pbd1)) {
771 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
772 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
773 	} else {
774 		/* Ok, we tmo'd - so get the current time.
775 		 *
776 		 * It shouldn't really happen as we don't close empty
777 		 * blocks. See prb_retire_rx_blk_timer_expired().
778 		 */
779 		struct timespec64 ts;
780 		ktime_get_real_ts64(&ts);
781 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
782 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
783 	}
784 
785 	smp_wmb();
786 
787 	/* Flush the block */
788 	prb_flush_block(pkc1, pbd1, status);
789 
790 	sk->sk_data_ready(sk);
791 
792 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
793 }
794 
795 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
796 {
797 	pkc->reset_pending_on_curr_blk = 0;
798 }
799 
800 /*
801  * Side effect of opening a block:
802  *
803  * 1) prb_queue is thawed.
804  * 2) retire_blk_timer is refreshed.
805  *
806  */
807 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
808 	struct tpacket_block_desc *pbd1)
809 {
810 	struct timespec64 ts;
811 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
812 
813 	smp_rmb();
814 
815 	/* We could have just memset this but we will lose the
816 	 * flexibility of making the priv area sticky
817 	 */
818 
819 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
820 	BLOCK_NUM_PKTS(pbd1) = 0;
821 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
822 
823 	ktime_get_real_ts64(&ts);
824 
825 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
826 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
827 
828 	pkc1->pkblk_start = (char *)pbd1;
829 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830 
831 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
832 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
833 
834 	pbd1->version = pkc1->version;
835 	pkc1->prev = pkc1->nxt_offset;
836 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
837 
838 	prb_thaw_queue(pkc1);
839 	_prb_refresh_rx_retire_blk_timer(pkc1);
840 
841 	smp_wmb();
842 }
843 
844 /*
845  * Queue freeze logic:
846  * 1) Assume tp_block_nr = 8 blocks.
847  * 2) At time 't0', user opens Rx ring.
848  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
849  * 4) user-space is either sleeping or processing block '0'.
850  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
851  *    it will close block-7,loop around and try to fill block '0'.
852  *    call-flow:
853  *    __packet_lookup_frame_in_block
854  *      prb_retire_current_block()
855  *      prb_dispatch_next_block()
856  *        |->(BLOCK_STATUS == USER) evaluates to true
857  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
858  * 6) Now there are two cases:
859  *    6.1) Link goes idle right after the queue is frozen.
860  *         But remember, the last open_block() refreshed the timer.
861  *         When this timer expires,it will refresh itself so that we can
862  *         re-open block-0 in near future.
863  *    6.2) Link is busy and keeps on receiving packets. This is a simple
864  *         case and __packet_lookup_frame_in_block will check if block-0
865  *         is free and can now be re-used.
866  */
867 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
868 				  struct packet_sock *po)
869 {
870 	pkc->reset_pending_on_curr_blk = 1;
871 	po->stats.stats3.tp_freeze_q_cnt++;
872 }
873 
874 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
875 
876 /*
877  * If the next block is free then we will dispatch it
878  * and return a good offset.
879  * Else, we will freeze the queue.
880  * So, caller must check the return value.
881  */
882 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
883 		struct packet_sock *po)
884 {
885 	struct tpacket_block_desc *pbd;
886 
887 	smp_rmb();
888 
889 	/* 1. Get current block num */
890 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
891 
892 	/* 2. If this block is currently in_use then freeze the queue */
893 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
894 		prb_freeze_queue(pkc, po);
895 		return NULL;
896 	}
897 
898 	/*
899 	 * 3.
900 	 * open this block and return the offset where the first packet
901 	 * needs to get stored.
902 	 */
903 	prb_open_block(pkc, pbd);
904 	return (void *)pkc->nxt_offset;
905 }
906 
907 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
908 		struct packet_sock *po, unsigned int status)
909 {
910 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
911 
912 	/* retire/close the current block */
913 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
914 		/*
915 		 * Plug the case where copy_bits() is in progress on
916 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
917 		 * have space to copy the pkt in the current block and
918 		 * called prb_retire_current_block()
919 		 *
920 		 * We don't need to worry about the TMO case because
921 		 * the timer-handler already handled this case.
922 		 */
923 		if (!(status & TP_STATUS_BLK_TMO)) {
924 			/* Waiting for skb_copy_bits to finish... */
925 			write_lock(&pkc->blk_fill_in_prog_lock);
926 			write_unlock(&pkc->blk_fill_in_prog_lock);
927 		}
928 		prb_close_block(pkc, pbd, po, status);
929 		return;
930 	}
931 }
932 
933 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
934 {
935 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
936 }
937 
938 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
939 {
940 	return pkc->reset_pending_on_curr_blk;
941 }
942 
943 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
944 	__releases(&pkc->blk_fill_in_prog_lock)
945 {
946 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
947 
948 	read_unlock(&pkc->blk_fill_in_prog_lock);
949 }
950 
951 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
952 			struct tpacket3_hdr *ppd)
953 {
954 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
955 }
956 
957 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
958 			struct tpacket3_hdr *ppd)
959 {
960 	ppd->hv1.tp_rxhash = 0;
961 }
962 
963 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
964 			struct tpacket3_hdr *ppd)
965 {
966 	if (skb_vlan_tag_present(pkc->skb)) {
967 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
968 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
969 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
970 	} else {
971 		ppd->hv1.tp_vlan_tci = 0;
972 		ppd->hv1.tp_vlan_tpid = 0;
973 		ppd->tp_status = TP_STATUS_AVAILABLE;
974 	}
975 }
976 
977 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
978 			struct tpacket3_hdr *ppd)
979 {
980 	ppd->hv1.tp_padding = 0;
981 	prb_fill_vlan_info(pkc, ppd);
982 
983 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
984 		prb_fill_rxhash(pkc, ppd);
985 	else
986 		prb_clear_rxhash(pkc, ppd);
987 }
988 
989 static void prb_fill_curr_block(char *curr,
990 				struct tpacket_kbdq_core *pkc,
991 				struct tpacket_block_desc *pbd,
992 				unsigned int len)
993 	__acquires(&pkc->blk_fill_in_prog_lock)
994 {
995 	struct tpacket3_hdr *ppd;
996 
997 	ppd  = (struct tpacket3_hdr *)curr;
998 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
999 	pkc->prev = curr;
1000 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1001 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1002 	BLOCK_NUM_PKTS(pbd) += 1;
1003 	read_lock(&pkc->blk_fill_in_prog_lock);
1004 	prb_run_all_ft_ops(pkc, ppd);
1005 }
1006 
1007 /* Assumes caller has the sk->rx_queue.lock */
1008 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1009 					    struct sk_buff *skb,
1010 					    unsigned int len
1011 					    )
1012 {
1013 	struct tpacket_kbdq_core *pkc;
1014 	struct tpacket_block_desc *pbd;
1015 	char *curr, *end;
1016 
1017 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1018 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1019 
1020 	/* Queue is frozen when user space is lagging behind */
1021 	if (prb_queue_frozen(pkc)) {
1022 		/*
1023 		 * Check if that last block which caused the queue to freeze,
1024 		 * is still in_use by user-space.
1025 		 */
1026 		if (prb_curr_blk_in_use(pbd)) {
1027 			/* Can't record this packet */
1028 			return NULL;
1029 		} else {
1030 			/*
1031 			 * Ok, the block was released by user-space.
1032 			 * Now let's open that block.
1033 			 * opening a block also thaws the queue.
1034 			 * Thawing is a side effect.
1035 			 */
1036 			prb_open_block(pkc, pbd);
1037 		}
1038 	}
1039 
1040 	smp_mb();
1041 	curr = pkc->nxt_offset;
1042 	pkc->skb = skb;
1043 	end = (char *)pbd + pkc->kblk_size;
1044 
1045 	/* first try the current block */
1046 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1047 		prb_fill_curr_block(curr, pkc, pbd, len);
1048 		return (void *)curr;
1049 	}
1050 
1051 	/* Ok, close the current block */
1052 	prb_retire_current_block(pkc, po, 0);
1053 
1054 	/* Now, try to dispatch the next block */
1055 	curr = (char *)prb_dispatch_next_block(pkc, po);
1056 	if (curr) {
1057 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1058 		prb_fill_curr_block(curr, pkc, pbd, len);
1059 		return (void *)curr;
1060 	}
1061 
1062 	/*
1063 	 * No free blocks are available.user_space hasn't caught up yet.
1064 	 * Queue was just frozen and now this packet will get dropped.
1065 	 */
1066 	return NULL;
1067 }
1068 
1069 static void *packet_current_rx_frame(struct packet_sock *po,
1070 					    struct sk_buff *skb,
1071 					    int status, unsigned int len)
1072 {
1073 	char *curr = NULL;
1074 	switch (po->tp_version) {
1075 	case TPACKET_V1:
1076 	case TPACKET_V2:
1077 		curr = packet_lookup_frame(po, &po->rx_ring,
1078 					po->rx_ring.head, status);
1079 		return curr;
1080 	case TPACKET_V3:
1081 		return __packet_lookup_frame_in_block(po, skb, len);
1082 	default:
1083 		WARN(1, "TPACKET version not supported\n");
1084 		BUG();
1085 		return NULL;
1086 	}
1087 }
1088 
1089 static void *prb_lookup_block(const struct packet_sock *po,
1090 			      const struct packet_ring_buffer *rb,
1091 			      unsigned int idx,
1092 			      int status)
1093 {
1094 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1095 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1096 
1097 	if (status != BLOCK_STATUS(pbd))
1098 		return NULL;
1099 	return pbd;
1100 }
1101 
1102 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1103 {
1104 	unsigned int prev;
1105 	if (rb->prb_bdqc.kactive_blk_num)
1106 		prev = rb->prb_bdqc.kactive_blk_num-1;
1107 	else
1108 		prev = rb->prb_bdqc.knum_blocks-1;
1109 	return prev;
1110 }
1111 
1112 /* Assumes caller has held the rx_queue.lock */
1113 static void *__prb_previous_block(struct packet_sock *po,
1114 					 struct packet_ring_buffer *rb,
1115 					 int status)
1116 {
1117 	unsigned int previous = prb_previous_blk_num(rb);
1118 	return prb_lookup_block(po, rb, previous, status);
1119 }
1120 
1121 static void *packet_previous_rx_frame(struct packet_sock *po,
1122 					     struct packet_ring_buffer *rb,
1123 					     int status)
1124 {
1125 	if (po->tp_version <= TPACKET_V2)
1126 		return packet_previous_frame(po, rb, status);
1127 
1128 	return __prb_previous_block(po, rb, status);
1129 }
1130 
1131 static void packet_increment_rx_head(struct packet_sock *po,
1132 					    struct packet_ring_buffer *rb)
1133 {
1134 	switch (po->tp_version) {
1135 	case TPACKET_V1:
1136 	case TPACKET_V2:
1137 		return packet_increment_head(rb);
1138 	case TPACKET_V3:
1139 	default:
1140 		WARN(1, "TPACKET version not supported.\n");
1141 		BUG();
1142 		return;
1143 	}
1144 }
1145 
1146 static void *packet_previous_frame(struct packet_sock *po,
1147 		struct packet_ring_buffer *rb,
1148 		int status)
1149 {
1150 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1151 	return packet_lookup_frame(po, rb, previous, status);
1152 }
1153 
1154 static void packet_increment_head(struct packet_ring_buffer *buff)
1155 {
1156 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1157 }
1158 
1159 static void packet_inc_pending(struct packet_ring_buffer *rb)
1160 {
1161 	this_cpu_inc(*rb->pending_refcnt);
1162 }
1163 
1164 static void packet_dec_pending(struct packet_ring_buffer *rb)
1165 {
1166 	this_cpu_dec(*rb->pending_refcnt);
1167 }
1168 
1169 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1170 {
1171 	unsigned int refcnt = 0;
1172 	int cpu;
1173 
1174 	/* We don't use pending refcount in rx_ring. */
1175 	if (rb->pending_refcnt == NULL)
1176 		return 0;
1177 
1178 	for_each_possible_cpu(cpu)
1179 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1180 
1181 	return refcnt;
1182 }
1183 
1184 static int packet_alloc_pending(struct packet_sock *po)
1185 {
1186 	po->rx_ring.pending_refcnt = NULL;
1187 
1188 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1189 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1190 		return -ENOBUFS;
1191 
1192 	return 0;
1193 }
1194 
1195 static void packet_free_pending(struct packet_sock *po)
1196 {
1197 	free_percpu(po->tx_ring.pending_refcnt);
1198 }
1199 
1200 #define ROOM_POW_OFF	2
1201 #define ROOM_NONE	0x0
1202 #define ROOM_LOW	0x1
1203 #define ROOM_NORMAL	0x2
1204 
1205 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1206 {
1207 	int idx, len;
1208 
1209 	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1210 	idx = READ_ONCE(po->rx_ring.head);
1211 	if (pow_off)
1212 		idx += len >> pow_off;
1213 	if (idx >= len)
1214 		idx -= len;
1215 	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1216 }
1217 
1218 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1219 {
1220 	int idx, len;
1221 
1222 	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1223 	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1224 	if (pow_off)
1225 		idx += len >> pow_off;
1226 	if (idx >= len)
1227 		idx -= len;
1228 	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1229 }
1230 
1231 static int __packet_rcv_has_room(const struct packet_sock *po,
1232 				 const struct sk_buff *skb)
1233 {
1234 	const struct sock *sk = &po->sk;
1235 	int ret = ROOM_NONE;
1236 
1237 	if (po->prot_hook.func != tpacket_rcv) {
1238 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1239 		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1240 				   - (skb ? skb->truesize : 0);
1241 
1242 		if (avail > (rcvbuf >> ROOM_POW_OFF))
1243 			return ROOM_NORMAL;
1244 		else if (avail > 0)
1245 			return ROOM_LOW;
1246 		else
1247 			return ROOM_NONE;
1248 	}
1249 
1250 	if (po->tp_version == TPACKET_V3) {
1251 		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1252 			ret = ROOM_NORMAL;
1253 		else if (__tpacket_v3_has_room(po, 0))
1254 			ret = ROOM_LOW;
1255 	} else {
1256 		if (__tpacket_has_room(po, ROOM_POW_OFF))
1257 			ret = ROOM_NORMAL;
1258 		else if (__tpacket_has_room(po, 0))
1259 			ret = ROOM_LOW;
1260 	}
1261 
1262 	return ret;
1263 }
1264 
1265 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266 {
1267 	int pressure, ret;
1268 
1269 	ret = __packet_rcv_has_room(po, skb);
1270 	pressure = ret != ROOM_NORMAL;
1271 
1272 	if (READ_ONCE(po->pressure) != pressure)
1273 		WRITE_ONCE(po->pressure, pressure);
1274 
1275 	return ret;
1276 }
1277 
1278 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1279 {
1280 	if (READ_ONCE(po->pressure) &&
1281 	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1282 		WRITE_ONCE(po->pressure,  0);
1283 }
1284 
1285 static void packet_sock_destruct(struct sock *sk)
1286 {
1287 	skb_queue_purge(&sk->sk_error_queue);
1288 
1289 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1290 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1291 
1292 	if (!sock_flag(sk, SOCK_DEAD)) {
1293 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1294 		return;
1295 	}
1296 
1297 	sk_refcnt_debug_dec(sk);
1298 }
1299 
1300 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1301 {
1302 	u32 *history = po->rollover->history;
1303 	u32 victim, rxhash;
1304 	int i, count = 0;
1305 
1306 	rxhash = skb_get_hash(skb);
1307 	for (i = 0; i < ROLLOVER_HLEN; i++)
1308 		if (READ_ONCE(history[i]) == rxhash)
1309 			count++;
1310 
1311 	victim = prandom_u32() % ROLLOVER_HLEN;
1312 
1313 	/* Avoid dirtying the cache line if possible */
1314 	if (READ_ONCE(history[victim]) != rxhash)
1315 		WRITE_ONCE(history[victim], rxhash);
1316 
1317 	return count > (ROLLOVER_HLEN >> 1);
1318 }
1319 
1320 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1321 				      struct sk_buff *skb,
1322 				      unsigned int num)
1323 {
1324 	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1325 }
1326 
1327 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1328 				    struct sk_buff *skb,
1329 				    unsigned int num)
1330 {
1331 	unsigned int val = atomic_inc_return(&f->rr_cur);
1332 
1333 	return val % num;
1334 }
1335 
1336 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1337 				     struct sk_buff *skb,
1338 				     unsigned int num)
1339 {
1340 	return smp_processor_id() % num;
1341 }
1342 
1343 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1344 				     struct sk_buff *skb,
1345 				     unsigned int num)
1346 {
1347 	return prandom_u32_max(num);
1348 }
1349 
1350 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1351 					  struct sk_buff *skb,
1352 					  unsigned int idx, bool try_self,
1353 					  unsigned int num)
1354 {
1355 	struct packet_sock *po, *po_next, *po_skip = NULL;
1356 	unsigned int i, j, room = ROOM_NONE;
1357 
1358 	po = pkt_sk(f->arr[idx]);
1359 
1360 	if (try_self) {
1361 		room = packet_rcv_has_room(po, skb);
1362 		if (room == ROOM_NORMAL ||
1363 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1364 			return idx;
1365 		po_skip = po;
1366 	}
1367 
1368 	i = j = min_t(int, po->rollover->sock, num - 1);
1369 	do {
1370 		po_next = pkt_sk(f->arr[i]);
1371 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1372 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1373 			if (i != j)
1374 				po->rollover->sock = i;
1375 			atomic_long_inc(&po->rollover->num);
1376 			if (room == ROOM_LOW)
1377 				atomic_long_inc(&po->rollover->num_huge);
1378 			return i;
1379 		}
1380 
1381 		if (++i == num)
1382 			i = 0;
1383 	} while (i != j);
1384 
1385 	atomic_long_inc(&po->rollover->num_failed);
1386 	return idx;
1387 }
1388 
1389 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1390 				    struct sk_buff *skb,
1391 				    unsigned int num)
1392 {
1393 	return skb_get_queue_mapping(skb) % num;
1394 }
1395 
1396 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1397 				     struct sk_buff *skb,
1398 				     unsigned int num)
1399 {
1400 	struct bpf_prog *prog;
1401 	unsigned int ret = 0;
1402 
1403 	rcu_read_lock();
1404 	prog = rcu_dereference(f->bpf_prog);
1405 	if (prog)
1406 		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1407 	rcu_read_unlock();
1408 
1409 	return ret;
1410 }
1411 
1412 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1413 {
1414 	return f->flags & (flag >> 8);
1415 }
1416 
1417 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1418 			     struct packet_type *pt, struct net_device *orig_dev)
1419 {
1420 	struct packet_fanout *f = pt->af_packet_priv;
1421 	unsigned int num = READ_ONCE(f->num_members);
1422 	struct net *net = read_pnet(&f->net);
1423 	struct packet_sock *po;
1424 	unsigned int idx;
1425 
1426 	if (!net_eq(dev_net(dev), net) || !num) {
1427 		kfree_skb(skb);
1428 		return 0;
1429 	}
1430 
1431 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1432 		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1433 		if (!skb)
1434 			return 0;
1435 	}
1436 	switch (f->type) {
1437 	case PACKET_FANOUT_HASH:
1438 	default:
1439 		idx = fanout_demux_hash(f, skb, num);
1440 		break;
1441 	case PACKET_FANOUT_LB:
1442 		idx = fanout_demux_lb(f, skb, num);
1443 		break;
1444 	case PACKET_FANOUT_CPU:
1445 		idx = fanout_demux_cpu(f, skb, num);
1446 		break;
1447 	case PACKET_FANOUT_RND:
1448 		idx = fanout_demux_rnd(f, skb, num);
1449 		break;
1450 	case PACKET_FANOUT_QM:
1451 		idx = fanout_demux_qm(f, skb, num);
1452 		break;
1453 	case PACKET_FANOUT_ROLLOVER:
1454 		idx = fanout_demux_rollover(f, skb, 0, false, num);
1455 		break;
1456 	case PACKET_FANOUT_CBPF:
1457 	case PACKET_FANOUT_EBPF:
1458 		idx = fanout_demux_bpf(f, skb, num);
1459 		break;
1460 	}
1461 
1462 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1463 		idx = fanout_demux_rollover(f, skb, idx, true, num);
1464 
1465 	po = pkt_sk(f->arr[idx]);
1466 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1467 }
1468 
1469 DEFINE_MUTEX(fanout_mutex);
1470 EXPORT_SYMBOL_GPL(fanout_mutex);
1471 static LIST_HEAD(fanout_list);
1472 static u16 fanout_next_id;
1473 
1474 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1475 {
1476 	struct packet_fanout *f = po->fanout;
1477 
1478 	spin_lock(&f->lock);
1479 	f->arr[f->num_members] = sk;
1480 	smp_wmb();
1481 	f->num_members++;
1482 	if (f->num_members == 1)
1483 		dev_add_pack(&f->prot_hook);
1484 	spin_unlock(&f->lock);
1485 }
1486 
1487 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1488 {
1489 	struct packet_fanout *f = po->fanout;
1490 	int i;
1491 
1492 	spin_lock(&f->lock);
1493 	for (i = 0; i < f->num_members; i++) {
1494 		if (f->arr[i] == sk)
1495 			break;
1496 	}
1497 	BUG_ON(i >= f->num_members);
1498 	f->arr[i] = f->arr[f->num_members - 1];
1499 	f->num_members--;
1500 	if (f->num_members == 0)
1501 		__dev_remove_pack(&f->prot_hook);
1502 	spin_unlock(&f->lock);
1503 }
1504 
1505 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1506 {
1507 	if (sk->sk_family != PF_PACKET)
1508 		return false;
1509 
1510 	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1511 }
1512 
1513 static void fanout_init_data(struct packet_fanout *f)
1514 {
1515 	switch (f->type) {
1516 	case PACKET_FANOUT_LB:
1517 		atomic_set(&f->rr_cur, 0);
1518 		break;
1519 	case PACKET_FANOUT_CBPF:
1520 	case PACKET_FANOUT_EBPF:
1521 		RCU_INIT_POINTER(f->bpf_prog, NULL);
1522 		break;
1523 	}
1524 }
1525 
1526 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1527 {
1528 	struct bpf_prog *old;
1529 
1530 	spin_lock(&f->lock);
1531 	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1532 	rcu_assign_pointer(f->bpf_prog, new);
1533 	spin_unlock(&f->lock);
1534 
1535 	if (old) {
1536 		synchronize_net();
1537 		bpf_prog_destroy(old);
1538 	}
1539 }
1540 
1541 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1542 				unsigned int len)
1543 {
1544 	struct bpf_prog *new;
1545 	struct sock_fprog fprog;
1546 	int ret;
1547 
1548 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1549 		return -EPERM;
1550 
1551 	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1552 	if (ret)
1553 		return ret;
1554 
1555 	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1556 	if (ret)
1557 		return ret;
1558 
1559 	__fanout_set_data_bpf(po->fanout, new);
1560 	return 0;
1561 }
1562 
1563 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1564 				unsigned int len)
1565 {
1566 	struct bpf_prog *new;
1567 	u32 fd;
1568 
1569 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1570 		return -EPERM;
1571 	if (len != sizeof(fd))
1572 		return -EINVAL;
1573 	if (copy_from_sockptr(&fd, data, len))
1574 		return -EFAULT;
1575 
1576 	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1577 	if (IS_ERR(new))
1578 		return PTR_ERR(new);
1579 
1580 	__fanout_set_data_bpf(po->fanout, new);
1581 	return 0;
1582 }
1583 
1584 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1585 			   unsigned int len)
1586 {
1587 	switch (po->fanout->type) {
1588 	case PACKET_FANOUT_CBPF:
1589 		return fanout_set_data_cbpf(po, data, len);
1590 	case PACKET_FANOUT_EBPF:
1591 		return fanout_set_data_ebpf(po, data, len);
1592 	default:
1593 		return -EINVAL;
1594 	}
1595 }
1596 
1597 static void fanout_release_data(struct packet_fanout *f)
1598 {
1599 	switch (f->type) {
1600 	case PACKET_FANOUT_CBPF:
1601 	case PACKET_FANOUT_EBPF:
1602 		__fanout_set_data_bpf(f, NULL);
1603 	}
1604 }
1605 
1606 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1607 {
1608 	struct packet_fanout *f;
1609 
1610 	list_for_each_entry(f, &fanout_list, list) {
1611 		if (f->id == candidate_id &&
1612 		    read_pnet(&f->net) == sock_net(sk)) {
1613 			return false;
1614 		}
1615 	}
1616 	return true;
1617 }
1618 
1619 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1620 {
1621 	u16 id = fanout_next_id;
1622 
1623 	do {
1624 		if (__fanout_id_is_free(sk, id)) {
1625 			*new_id = id;
1626 			fanout_next_id = id + 1;
1627 			return true;
1628 		}
1629 
1630 		id++;
1631 	} while (id != fanout_next_id);
1632 
1633 	return false;
1634 }
1635 
1636 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1637 {
1638 	struct packet_rollover *rollover = NULL;
1639 	struct packet_sock *po = pkt_sk(sk);
1640 	struct packet_fanout *f, *match;
1641 	u8 type = type_flags & 0xff;
1642 	u8 flags = type_flags >> 8;
1643 	int err;
1644 
1645 	switch (type) {
1646 	case PACKET_FANOUT_ROLLOVER:
1647 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1648 			return -EINVAL;
1649 	case PACKET_FANOUT_HASH:
1650 	case PACKET_FANOUT_LB:
1651 	case PACKET_FANOUT_CPU:
1652 	case PACKET_FANOUT_RND:
1653 	case PACKET_FANOUT_QM:
1654 	case PACKET_FANOUT_CBPF:
1655 	case PACKET_FANOUT_EBPF:
1656 		break;
1657 	default:
1658 		return -EINVAL;
1659 	}
1660 
1661 	mutex_lock(&fanout_mutex);
1662 
1663 	err = -EALREADY;
1664 	if (po->fanout)
1665 		goto out;
1666 
1667 	if (type == PACKET_FANOUT_ROLLOVER ||
1668 	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1669 		err = -ENOMEM;
1670 		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1671 		if (!rollover)
1672 			goto out;
1673 		atomic_long_set(&rollover->num, 0);
1674 		atomic_long_set(&rollover->num_huge, 0);
1675 		atomic_long_set(&rollover->num_failed, 0);
1676 	}
1677 
1678 	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1679 		if (id != 0) {
1680 			err = -EINVAL;
1681 			goto out;
1682 		}
1683 		if (!fanout_find_new_id(sk, &id)) {
1684 			err = -ENOMEM;
1685 			goto out;
1686 		}
1687 		/* ephemeral flag for the first socket in the group: drop it */
1688 		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1689 	}
1690 
1691 	match = NULL;
1692 	list_for_each_entry(f, &fanout_list, list) {
1693 		if (f->id == id &&
1694 		    read_pnet(&f->net) == sock_net(sk)) {
1695 			match = f;
1696 			break;
1697 		}
1698 	}
1699 	err = -EINVAL;
1700 	if (match && match->flags != flags)
1701 		goto out;
1702 	if (!match) {
1703 		err = -ENOMEM;
1704 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1705 		if (!match)
1706 			goto out;
1707 		write_pnet(&match->net, sock_net(sk));
1708 		match->id = id;
1709 		match->type = type;
1710 		match->flags = flags;
1711 		INIT_LIST_HEAD(&match->list);
1712 		spin_lock_init(&match->lock);
1713 		refcount_set(&match->sk_ref, 0);
1714 		fanout_init_data(match);
1715 		match->prot_hook.type = po->prot_hook.type;
1716 		match->prot_hook.dev = po->prot_hook.dev;
1717 		match->prot_hook.func = packet_rcv_fanout;
1718 		match->prot_hook.af_packet_priv = match;
1719 		match->prot_hook.id_match = match_fanout_group;
1720 		list_add(&match->list, &fanout_list);
1721 	}
1722 	err = -EINVAL;
1723 
1724 	spin_lock(&po->bind_lock);
1725 	if (po->running &&
1726 	    match->type == type &&
1727 	    match->prot_hook.type == po->prot_hook.type &&
1728 	    match->prot_hook.dev == po->prot_hook.dev) {
1729 		err = -ENOSPC;
1730 		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1731 			__dev_remove_pack(&po->prot_hook);
1732 			po->fanout = match;
1733 			po->rollover = rollover;
1734 			rollover = NULL;
1735 			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1736 			__fanout_link(sk, po);
1737 			err = 0;
1738 		}
1739 	}
1740 	spin_unlock(&po->bind_lock);
1741 
1742 	if (err && !refcount_read(&match->sk_ref)) {
1743 		list_del(&match->list);
1744 		kfree(match);
1745 	}
1746 
1747 out:
1748 	kfree(rollover);
1749 	mutex_unlock(&fanout_mutex);
1750 	return err;
1751 }
1752 
1753 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1754  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1755  * It is the responsibility of the caller to call fanout_release_data() and
1756  * free the returned packet_fanout (after synchronize_net())
1757  */
1758 static struct packet_fanout *fanout_release(struct sock *sk)
1759 {
1760 	struct packet_sock *po = pkt_sk(sk);
1761 	struct packet_fanout *f;
1762 
1763 	mutex_lock(&fanout_mutex);
1764 	f = po->fanout;
1765 	if (f) {
1766 		po->fanout = NULL;
1767 
1768 		if (refcount_dec_and_test(&f->sk_ref))
1769 			list_del(&f->list);
1770 		else
1771 			f = NULL;
1772 	}
1773 	mutex_unlock(&fanout_mutex);
1774 
1775 	return f;
1776 }
1777 
1778 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1779 					  struct sk_buff *skb)
1780 {
1781 	/* Earlier code assumed this would be a VLAN pkt, double-check
1782 	 * this now that we have the actual packet in hand. We can only
1783 	 * do this check on Ethernet devices.
1784 	 */
1785 	if (unlikely(dev->type != ARPHRD_ETHER))
1786 		return false;
1787 
1788 	skb_reset_mac_header(skb);
1789 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1790 }
1791 
1792 static const struct proto_ops packet_ops;
1793 
1794 static const struct proto_ops packet_ops_spkt;
1795 
1796 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1797 			   struct packet_type *pt, struct net_device *orig_dev)
1798 {
1799 	struct sock *sk;
1800 	struct sockaddr_pkt *spkt;
1801 
1802 	/*
1803 	 *	When we registered the protocol we saved the socket in the data
1804 	 *	field for just this event.
1805 	 */
1806 
1807 	sk = pt->af_packet_priv;
1808 
1809 	/*
1810 	 *	Yank back the headers [hope the device set this
1811 	 *	right or kerboom...]
1812 	 *
1813 	 *	Incoming packets have ll header pulled,
1814 	 *	push it back.
1815 	 *
1816 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1817 	 *	so that this procedure is noop.
1818 	 */
1819 
1820 	if (skb->pkt_type == PACKET_LOOPBACK)
1821 		goto out;
1822 
1823 	if (!net_eq(dev_net(dev), sock_net(sk)))
1824 		goto out;
1825 
1826 	skb = skb_share_check(skb, GFP_ATOMIC);
1827 	if (skb == NULL)
1828 		goto oom;
1829 
1830 	/* drop any routing info */
1831 	skb_dst_drop(skb);
1832 
1833 	/* drop conntrack reference */
1834 	nf_reset_ct(skb);
1835 
1836 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1837 
1838 	skb_push(skb, skb->data - skb_mac_header(skb));
1839 
1840 	/*
1841 	 *	The SOCK_PACKET socket receives _all_ frames.
1842 	 */
1843 
1844 	spkt->spkt_family = dev->type;
1845 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1846 	spkt->spkt_protocol = skb->protocol;
1847 
1848 	/*
1849 	 *	Charge the memory to the socket. This is done specifically
1850 	 *	to prevent sockets using all the memory up.
1851 	 */
1852 
1853 	if (sock_queue_rcv_skb(sk, skb) == 0)
1854 		return 0;
1855 
1856 out:
1857 	kfree_skb(skb);
1858 oom:
1859 	return 0;
1860 }
1861 
1862 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1863 {
1864 	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1865 	    sock->type == SOCK_RAW) {
1866 		skb_reset_mac_header(skb);
1867 		skb->protocol = dev_parse_header_protocol(skb);
1868 	}
1869 
1870 	skb_probe_transport_header(skb);
1871 }
1872 
1873 /*
1874  *	Output a raw packet to a device layer. This bypasses all the other
1875  *	protocol layers and you must therefore supply it with a complete frame
1876  */
1877 
1878 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1879 			       size_t len)
1880 {
1881 	struct sock *sk = sock->sk;
1882 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1883 	struct sk_buff *skb = NULL;
1884 	struct net_device *dev;
1885 	struct sockcm_cookie sockc;
1886 	__be16 proto = 0;
1887 	int err;
1888 	int extra_len = 0;
1889 
1890 	/*
1891 	 *	Get and verify the address.
1892 	 */
1893 
1894 	if (saddr) {
1895 		if (msg->msg_namelen < sizeof(struct sockaddr))
1896 			return -EINVAL;
1897 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1898 			proto = saddr->spkt_protocol;
1899 	} else
1900 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1901 
1902 	/*
1903 	 *	Find the device first to size check it
1904 	 */
1905 
1906 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1907 retry:
1908 	rcu_read_lock();
1909 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1910 	err = -ENODEV;
1911 	if (dev == NULL)
1912 		goto out_unlock;
1913 
1914 	err = -ENETDOWN;
1915 	if (!(dev->flags & IFF_UP))
1916 		goto out_unlock;
1917 
1918 	/*
1919 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1920 	 * raw protocol and you must do your own fragmentation at this level.
1921 	 */
1922 
1923 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1924 		if (!netif_supports_nofcs(dev)) {
1925 			err = -EPROTONOSUPPORT;
1926 			goto out_unlock;
1927 		}
1928 		extra_len = 4; /* We're doing our own CRC */
1929 	}
1930 
1931 	err = -EMSGSIZE;
1932 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1933 		goto out_unlock;
1934 
1935 	if (!skb) {
1936 		size_t reserved = LL_RESERVED_SPACE(dev);
1937 		int tlen = dev->needed_tailroom;
1938 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1939 
1940 		rcu_read_unlock();
1941 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1942 		if (skb == NULL)
1943 			return -ENOBUFS;
1944 		/* FIXME: Save some space for broken drivers that write a hard
1945 		 * header at transmission time by themselves. PPP is the notable
1946 		 * one here. This should really be fixed at the driver level.
1947 		 */
1948 		skb_reserve(skb, reserved);
1949 		skb_reset_network_header(skb);
1950 
1951 		/* Try to align data part correctly */
1952 		if (hhlen) {
1953 			skb->data -= hhlen;
1954 			skb->tail -= hhlen;
1955 			if (len < hhlen)
1956 				skb_reset_network_header(skb);
1957 		}
1958 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1959 		if (err)
1960 			goto out_free;
1961 		goto retry;
1962 	}
1963 
1964 	if (!dev_validate_header(dev, skb->data, len)) {
1965 		err = -EINVAL;
1966 		goto out_unlock;
1967 	}
1968 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1969 	    !packet_extra_vlan_len_allowed(dev, skb)) {
1970 		err = -EMSGSIZE;
1971 		goto out_unlock;
1972 	}
1973 
1974 	sockcm_init(&sockc, sk);
1975 	if (msg->msg_controllen) {
1976 		err = sock_cmsg_send(sk, msg, &sockc);
1977 		if (unlikely(err))
1978 			goto out_unlock;
1979 	}
1980 
1981 	skb->protocol = proto;
1982 	skb->dev = dev;
1983 	skb->priority = sk->sk_priority;
1984 	skb->mark = sk->sk_mark;
1985 	skb->tstamp = sockc.transmit_time;
1986 
1987 	skb_setup_tx_timestamp(skb, sockc.tsflags);
1988 
1989 	if (unlikely(extra_len == 4))
1990 		skb->no_fcs = 1;
1991 
1992 	packet_parse_headers(skb, sock);
1993 
1994 	dev_queue_xmit(skb);
1995 	rcu_read_unlock();
1996 	return len;
1997 
1998 out_unlock:
1999 	rcu_read_unlock();
2000 out_free:
2001 	kfree_skb(skb);
2002 	return err;
2003 }
2004 
2005 static unsigned int run_filter(struct sk_buff *skb,
2006 			       const struct sock *sk,
2007 			       unsigned int res)
2008 {
2009 	struct sk_filter *filter;
2010 
2011 	rcu_read_lock();
2012 	filter = rcu_dereference(sk->sk_filter);
2013 	if (filter != NULL)
2014 		res = bpf_prog_run_clear_cb(filter->prog, skb);
2015 	rcu_read_unlock();
2016 
2017 	return res;
2018 }
2019 
2020 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2021 			   size_t *len)
2022 {
2023 	struct virtio_net_hdr vnet_hdr;
2024 
2025 	if (*len < sizeof(vnet_hdr))
2026 		return -EINVAL;
2027 	*len -= sizeof(vnet_hdr);
2028 
2029 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2030 		return -EINVAL;
2031 
2032 	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2033 }
2034 
2035 /*
2036  * This function makes lazy skb cloning in hope that most of packets
2037  * are discarded by BPF.
2038  *
2039  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2040  * and skb->cb are mangled. It works because (and until) packets
2041  * falling here are owned by current CPU. Output packets are cloned
2042  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2043  * sequencially, so that if we return skb to original state on exit,
2044  * we will not harm anyone.
2045  */
2046 
2047 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2048 		      struct packet_type *pt, struct net_device *orig_dev)
2049 {
2050 	struct sock *sk;
2051 	struct sockaddr_ll *sll;
2052 	struct packet_sock *po;
2053 	u8 *skb_head = skb->data;
2054 	int skb_len = skb->len;
2055 	unsigned int snaplen, res;
2056 	bool is_drop_n_account = false;
2057 
2058 	if (skb->pkt_type == PACKET_LOOPBACK)
2059 		goto drop;
2060 
2061 	sk = pt->af_packet_priv;
2062 	po = pkt_sk(sk);
2063 
2064 	if (!net_eq(dev_net(dev), sock_net(sk)))
2065 		goto drop;
2066 
2067 	skb->dev = dev;
2068 
2069 	if (dev->header_ops) {
2070 		/* The device has an explicit notion of ll header,
2071 		 * exported to higher levels.
2072 		 *
2073 		 * Otherwise, the device hides details of its frame
2074 		 * structure, so that corresponding packet head is
2075 		 * never delivered to user.
2076 		 */
2077 		if (sk->sk_type != SOCK_DGRAM)
2078 			skb_push(skb, skb->data - skb_mac_header(skb));
2079 		else if (skb->pkt_type == PACKET_OUTGOING) {
2080 			/* Special case: outgoing packets have ll header at head */
2081 			skb_pull(skb, skb_network_offset(skb));
2082 		}
2083 	}
2084 
2085 	snaplen = skb->len;
2086 
2087 	res = run_filter(skb, sk, snaplen);
2088 	if (!res)
2089 		goto drop_n_restore;
2090 	if (snaplen > res)
2091 		snaplen = res;
2092 
2093 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2094 		goto drop_n_acct;
2095 
2096 	if (skb_shared(skb)) {
2097 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2098 		if (nskb == NULL)
2099 			goto drop_n_acct;
2100 
2101 		if (skb_head != skb->data) {
2102 			skb->data = skb_head;
2103 			skb->len = skb_len;
2104 		}
2105 		consume_skb(skb);
2106 		skb = nskb;
2107 	}
2108 
2109 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2110 
2111 	sll = &PACKET_SKB_CB(skb)->sa.ll;
2112 	sll->sll_hatype = dev->type;
2113 	sll->sll_pkttype = skb->pkt_type;
2114 	if (unlikely(po->origdev))
2115 		sll->sll_ifindex = orig_dev->ifindex;
2116 	else
2117 		sll->sll_ifindex = dev->ifindex;
2118 
2119 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2120 
2121 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2122 	 * Use their space for storing the original skb length.
2123 	 */
2124 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2125 
2126 	if (pskb_trim(skb, snaplen))
2127 		goto drop_n_acct;
2128 
2129 	skb_set_owner_r(skb, sk);
2130 	skb->dev = NULL;
2131 	skb_dst_drop(skb);
2132 
2133 	/* drop conntrack reference */
2134 	nf_reset_ct(skb);
2135 
2136 	spin_lock(&sk->sk_receive_queue.lock);
2137 	po->stats.stats1.tp_packets++;
2138 	sock_skb_set_dropcount(sk, skb);
2139 	__skb_queue_tail(&sk->sk_receive_queue, skb);
2140 	spin_unlock(&sk->sk_receive_queue.lock);
2141 	sk->sk_data_ready(sk);
2142 	return 0;
2143 
2144 drop_n_acct:
2145 	is_drop_n_account = true;
2146 	atomic_inc(&po->tp_drops);
2147 	atomic_inc(&sk->sk_drops);
2148 
2149 drop_n_restore:
2150 	if (skb_head != skb->data && skb_shared(skb)) {
2151 		skb->data = skb_head;
2152 		skb->len = skb_len;
2153 	}
2154 drop:
2155 	if (!is_drop_n_account)
2156 		consume_skb(skb);
2157 	else
2158 		kfree_skb(skb);
2159 	return 0;
2160 }
2161 
2162 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2163 		       struct packet_type *pt, struct net_device *orig_dev)
2164 {
2165 	struct sock *sk;
2166 	struct packet_sock *po;
2167 	struct sockaddr_ll *sll;
2168 	union tpacket_uhdr h;
2169 	u8 *skb_head = skb->data;
2170 	int skb_len = skb->len;
2171 	unsigned int snaplen, res;
2172 	unsigned long status = TP_STATUS_USER;
2173 	unsigned short macoff, hdrlen;
2174 	unsigned int netoff;
2175 	struct sk_buff *copy_skb = NULL;
2176 	struct timespec64 ts;
2177 	__u32 ts_status;
2178 	bool is_drop_n_account = false;
2179 	unsigned int slot_id = 0;
2180 	bool do_vnet = false;
2181 
2182 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2183 	 * We may add members to them until current aligned size without forcing
2184 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2185 	 */
2186 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2187 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2188 
2189 	if (skb->pkt_type == PACKET_LOOPBACK)
2190 		goto drop;
2191 
2192 	sk = pt->af_packet_priv;
2193 	po = pkt_sk(sk);
2194 
2195 	if (!net_eq(dev_net(dev), sock_net(sk)))
2196 		goto drop;
2197 
2198 	if (dev->header_ops) {
2199 		if (sk->sk_type != SOCK_DGRAM)
2200 			skb_push(skb, skb->data - skb_mac_header(skb));
2201 		else if (skb->pkt_type == PACKET_OUTGOING) {
2202 			/* Special case: outgoing packets have ll header at head */
2203 			skb_pull(skb, skb_network_offset(skb));
2204 		}
2205 	}
2206 
2207 	snaplen = skb->len;
2208 
2209 	res = run_filter(skb, sk, snaplen);
2210 	if (!res)
2211 		goto drop_n_restore;
2212 
2213 	/* If we are flooded, just give up */
2214 	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2215 		atomic_inc(&po->tp_drops);
2216 		goto drop_n_restore;
2217 	}
2218 
2219 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2220 		status |= TP_STATUS_CSUMNOTREADY;
2221 	else if (skb->pkt_type != PACKET_OUTGOING &&
2222 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2223 		  skb_csum_unnecessary(skb)))
2224 		status |= TP_STATUS_CSUM_VALID;
2225 
2226 	if (snaplen > res)
2227 		snaplen = res;
2228 
2229 	if (sk->sk_type == SOCK_DGRAM) {
2230 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2231 				  po->tp_reserve;
2232 	} else {
2233 		unsigned int maclen = skb_network_offset(skb);
2234 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2235 				       (maclen < 16 ? 16 : maclen)) +
2236 				       po->tp_reserve;
2237 		if (po->has_vnet_hdr) {
2238 			netoff += sizeof(struct virtio_net_hdr);
2239 			do_vnet = true;
2240 		}
2241 		macoff = netoff - maclen;
2242 	}
2243 	if (netoff > USHRT_MAX) {
2244 		atomic_inc(&po->tp_drops);
2245 		goto drop_n_restore;
2246 	}
2247 	if (po->tp_version <= TPACKET_V2) {
2248 		if (macoff + snaplen > po->rx_ring.frame_size) {
2249 			if (po->copy_thresh &&
2250 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2251 				if (skb_shared(skb)) {
2252 					copy_skb = skb_clone(skb, GFP_ATOMIC);
2253 				} else {
2254 					copy_skb = skb_get(skb);
2255 					skb_head = skb->data;
2256 				}
2257 				if (copy_skb)
2258 					skb_set_owner_r(copy_skb, sk);
2259 			}
2260 			snaplen = po->rx_ring.frame_size - macoff;
2261 			if ((int)snaplen < 0) {
2262 				snaplen = 0;
2263 				do_vnet = false;
2264 			}
2265 		}
2266 	} else if (unlikely(macoff + snaplen >
2267 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2268 		u32 nval;
2269 
2270 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2271 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2272 			    snaplen, nval, macoff);
2273 		snaplen = nval;
2274 		if (unlikely((int)snaplen < 0)) {
2275 			snaplen = 0;
2276 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2277 			do_vnet = false;
2278 		}
2279 	}
2280 	spin_lock(&sk->sk_receive_queue.lock);
2281 	h.raw = packet_current_rx_frame(po, skb,
2282 					TP_STATUS_KERNEL, (macoff+snaplen));
2283 	if (!h.raw)
2284 		goto drop_n_account;
2285 
2286 	if (po->tp_version <= TPACKET_V2) {
2287 		slot_id = po->rx_ring.head;
2288 		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2289 			goto drop_n_account;
2290 		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2291 	}
2292 
2293 	if (do_vnet &&
2294 	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2295 				    sizeof(struct virtio_net_hdr),
2296 				    vio_le(), true, 0)) {
2297 		if (po->tp_version == TPACKET_V3)
2298 			prb_clear_blk_fill_status(&po->rx_ring);
2299 		goto drop_n_account;
2300 	}
2301 
2302 	if (po->tp_version <= TPACKET_V2) {
2303 		packet_increment_rx_head(po, &po->rx_ring);
2304 	/*
2305 	 * LOSING will be reported till you read the stats,
2306 	 * because it's COR - Clear On Read.
2307 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2308 	 * at packet level.
2309 	 */
2310 		if (atomic_read(&po->tp_drops))
2311 			status |= TP_STATUS_LOSING;
2312 	}
2313 
2314 	po->stats.stats1.tp_packets++;
2315 	if (copy_skb) {
2316 		status |= TP_STATUS_COPY;
2317 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2318 	}
2319 	spin_unlock(&sk->sk_receive_queue.lock);
2320 
2321 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2322 
2323 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2324 		ktime_get_real_ts64(&ts);
2325 
2326 	status |= ts_status;
2327 
2328 	switch (po->tp_version) {
2329 	case TPACKET_V1:
2330 		h.h1->tp_len = skb->len;
2331 		h.h1->tp_snaplen = snaplen;
2332 		h.h1->tp_mac = macoff;
2333 		h.h1->tp_net = netoff;
2334 		h.h1->tp_sec = ts.tv_sec;
2335 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2336 		hdrlen = sizeof(*h.h1);
2337 		break;
2338 	case TPACKET_V2:
2339 		h.h2->tp_len = skb->len;
2340 		h.h2->tp_snaplen = snaplen;
2341 		h.h2->tp_mac = macoff;
2342 		h.h2->tp_net = netoff;
2343 		h.h2->tp_sec = ts.tv_sec;
2344 		h.h2->tp_nsec = ts.tv_nsec;
2345 		if (skb_vlan_tag_present(skb)) {
2346 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2347 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2348 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2349 		} else {
2350 			h.h2->tp_vlan_tci = 0;
2351 			h.h2->tp_vlan_tpid = 0;
2352 		}
2353 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2354 		hdrlen = sizeof(*h.h2);
2355 		break;
2356 	case TPACKET_V3:
2357 		/* tp_nxt_offset,vlan are already populated above.
2358 		 * So DONT clear those fields here
2359 		 */
2360 		h.h3->tp_status |= status;
2361 		h.h3->tp_len = skb->len;
2362 		h.h3->tp_snaplen = snaplen;
2363 		h.h3->tp_mac = macoff;
2364 		h.h3->tp_net = netoff;
2365 		h.h3->tp_sec  = ts.tv_sec;
2366 		h.h3->tp_nsec = ts.tv_nsec;
2367 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2368 		hdrlen = sizeof(*h.h3);
2369 		break;
2370 	default:
2371 		BUG();
2372 	}
2373 
2374 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2375 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2376 	sll->sll_family = AF_PACKET;
2377 	sll->sll_hatype = dev->type;
2378 	sll->sll_protocol = skb->protocol;
2379 	sll->sll_pkttype = skb->pkt_type;
2380 	if (unlikely(po->origdev))
2381 		sll->sll_ifindex = orig_dev->ifindex;
2382 	else
2383 		sll->sll_ifindex = dev->ifindex;
2384 
2385 	smp_mb();
2386 
2387 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2388 	if (po->tp_version <= TPACKET_V2) {
2389 		u8 *start, *end;
2390 
2391 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2392 					macoff + snaplen);
2393 
2394 		for (start = h.raw; start < end; start += PAGE_SIZE)
2395 			flush_dcache_page(pgv_to_page(start));
2396 	}
2397 	smp_wmb();
2398 #endif
2399 
2400 	if (po->tp_version <= TPACKET_V2) {
2401 		spin_lock(&sk->sk_receive_queue.lock);
2402 		__packet_set_status(po, h.raw, status);
2403 		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2404 		spin_unlock(&sk->sk_receive_queue.lock);
2405 		sk->sk_data_ready(sk);
2406 	} else if (po->tp_version == TPACKET_V3) {
2407 		prb_clear_blk_fill_status(&po->rx_ring);
2408 	}
2409 
2410 drop_n_restore:
2411 	if (skb_head != skb->data && skb_shared(skb)) {
2412 		skb->data = skb_head;
2413 		skb->len = skb_len;
2414 	}
2415 drop:
2416 	if (!is_drop_n_account)
2417 		consume_skb(skb);
2418 	else
2419 		kfree_skb(skb);
2420 	return 0;
2421 
2422 drop_n_account:
2423 	spin_unlock(&sk->sk_receive_queue.lock);
2424 	atomic_inc(&po->tp_drops);
2425 	is_drop_n_account = true;
2426 
2427 	sk->sk_data_ready(sk);
2428 	kfree_skb(copy_skb);
2429 	goto drop_n_restore;
2430 }
2431 
2432 static void tpacket_destruct_skb(struct sk_buff *skb)
2433 {
2434 	struct packet_sock *po = pkt_sk(skb->sk);
2435 
2436 	if (likely(po->tx_ring.pg_vec)) {
2437 		void *ph;
2438 		__u32 ts;
2439 
2440 		ph = skb_zcopy_get_nouarg(skb);
2441 		packet_dec_pending(&po->tx_ring);
2442 
2443 		ts = __packet_set_timestamp(po, ph, skb);
2444 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2445 
2446 		if (!packet_read_pending(&po->tx_ring))
2447 			complete(&po->skb_completion);
2448 	}
2449 
2450 	sock_wfree(skb);
2451 }
2452 
2453 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2454 {
2455 	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2456 	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2457 	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2458 	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2459 		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2460 			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2461 			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2462 
2463 	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2464 		return -EINVAL;
2465 
2466 	return 0;
2467 }
2468 
2469 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2470 				 struct virtio_net_hdr *vnet_hdr)
2471 {
2472 	if (*len < sizeof(*vnet_hdr))
2473 		return -EINVAL;
2474 	*len -= sizeof(*vnet_hdr);
2475 
2476 	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2477 		return -EFAULT;
2478 
2479 	return __packet_snd_vnet_parse(vnet_hdr, *len);
2480 }
2481 
2482 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2483 		void *frame, struct net_device *dev, void *data, int tp_len,
2484 		__be16 proto, unsigned char *addr, int hlen, int copylen,
2485 		const struct sockcm_cookie *sockc)
2486 {
2487 	union tpacket_uhdr ph;
2488 	int to_write, offset, len, nr_frags, len_max;
2489 	struct socket *sock = po->sk.sk_socket;
2490 	struct page *page;
2491 	int err;
2492 
2493 	ph.raw = frame;
2494 
2495 	skb->protocol = proto;
2496 	skb->dev = dev;
2497 	skb->priority = po->sk.sk_priority;
2498 	skb->mark = po->sk.sk_mark;
2499 	skb->tstamp = sockc->transmit_time;
2500 	skb_setup_tx_timestamp(skb, sockc->tsflags);
2501 	skb_zcopy_set_nouarg(skb, ph.raw);
2502 
2503 	skb_reserve(skb, hlen);
2504 	skb_reset_network_header(skb);
2505 
2506 	to_write = tp_len;
2507 
2508 	if (sock->type == SOCK_DGRAM) {
2509 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2510 				NULL, tp_len);
2511 		if (unlikely(err < 0))
2512 			return -EINVAL;
2513 	} else if (copylen) {
2514 		int hdrlen = min_t(int, copylen, tp_len);
2515 
2516 		skb_push(skb, dev->hard_header_len);
2517 		skb_put(skb, copylen - dev->hard_header_len);
2518 		err = skb_store_bits(skb, 0, data, hdrlen);
2519 		if (unlikely(err))
2520 			return err;
2521 		if (!dev_validate_header(dev, skb->data, hdrlen))
2522 			return -EINVAL;
2523 
2524 		data += hdrlen;
2525 		to_write -= hdrlen;
2526 	}
2527 
2528 	offset = offset_in_page(data);
2529 	len_max = PAGE_SIZE - offset;
2530 	len = ((to_write > len_max) ? len_max : to_write);
2531 
2532 	skb->data_len = to_write;
2533 	skb->len += to_write;
2534 	skb->truesize += to_write;
2535 	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2536 
2537 	while (likely(to_write)) {
2538 		nr_frags = skb_shinfo(skb)->nr_frags;
2539 
2540 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2541 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2542 			       MAX_SKB_FRAGS);
2543 			return -EFAULT;
2544 		}
2545 
2546 		page = pgv_to_page(data);
2547 		data += len;
2548 		flush_dcache_page(page);
2549 		get_page(page);
2550 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2551 		to_write -= len;
2552 		offset = 0;
2553 		len_max = PAGE_SIZE;
2554 		len = ((to_write > len_max) ? len_max : to_write);
2555 	}
2556 
2557 	packet_parse_headers(skb, sock);
2558 
2559 	return tp_len;
2560 }
2561 
2562 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2563 				int size_max, void **data)
2564 {
2565 	union tpacket_uhdr ph;
2566 	int tp_len, off;
2567 
2568 	ph.raw = frame;
2569 
2570 	switch (po->tp_version) {
2571 	case TPACKET_V3:
2572 		if (ph.h3->tp_next_offset != 0) {
2573 			pr_warn_once("variable sized slot not supported");
2574 			return -EINVAL;
2575 		}
2576 		tp_len = ph.h3->tp_len;
2577 		break;
2578 	case TPACKET_V2:
2579 		tp_len = ph.h2->tp_len;
2580 		break;
2581 	default:
2582 		tp_len = ph.h1->tp_len;
2583 		break;
2584 	}
2585 	if (unlikely(tp_len > size_max)) {
2586 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2587 		return -EMSGSIZE;
2588 	}
2589 
2590 	if (unlikely(po->tp_tx_has_off)) {
2591 		int off_min, off_max;
2592 
2593 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594 		off_max = po->tx_ring.frame_size - tp_len;
2595 		if (po->sk.sk_type == SOCK_DGRAM) {
2596 			switch (po->tp_version) {
2597 			case TPACKET_V3:
2598 				off = ph.h3->tp_net;
2599 				break;
2600 			case TPACKET_V2:
2601 				off = ph.h2->tp_net;
2602 				break;
2603 			default:
2604 				off = ph.h1->tp_net;
2605 				break;
2606 			}
2607 		} else {
2608 			switch (po->tp_version) {
2609 			case TPACKET_V3:
2610 				off = ph.h3->tp_mac;
2611 				break;
2612 			case TPACKET_V2:
2613 				off = ph.h2->tp_mac;
2614 				break;
2615 			default:
2616 				off = ph.h1->tp_mac;
2617 				break;
2618 			}
2619 		}
2620 		if (unlikely((off < off_min) || (off_max < off)))
2621 			return -EINVAL;
2622 	} else {
2623 		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2624 	}
2625 
2626 	*data = frame + off;
2627 	return tp_len;
2628 }
2629 
2630 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2631 {
2632 	struct sk_buff *skb = NULL;
2633 	struct net_device *dev;
2634 	struct virtio_net_hdr *vnet_hdr = NULL;
2635 	struct sockcm_cookie sockc;
2636 	__be16 proto;
2637 	int err, reserve = 0;
2638 	void *ph;
2639 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2640 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2641 	unsigned char *addr = NULL;
2642 	int tp_len, size_max;
2643 	void *data;
2644 	int len_sum = 0;
2645 	int status = TP_STATUS_AVAILABLE;
2646 	int hlen, tlen, copylen = 0;
2647 	long timeo = 0;
2648 
2649 	mutex_lock(&po->pg_vec_lock);
2650 
2651 	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2652 	 * we need to confirm it under protection of pg_vec_lock.
2653 	 */
2654 	if (unlikely(!po->tx_ring.pg_vec)) {
2655 		err = -EBUSY;
2656 		goto out;
2657 	}
2658 	if (likely(saddr == NULL)) {
2659 		dev	= packet_cached_dev_get(po);
2660 		proto	= po->num;
2661 	} else {
2662 		err = -EINVAL;
2663 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664 			goto out;
2665 		if (msg->msg_namelen < (saddr->sll_halen
2666 					+ offsetof(struct sockaddr_ll,
2667 						sll_addr)))
2668 			goto out;
2669 		proto	= saddr->sll_protocol;
2670 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2671 		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2672 			if (dev && msg->msg_namelen < dev->addr_len +
2673 				   offsetof(struct sockaddr_ll, sll_addr))
2674 				goto out_put;
2675 			addr = saddr->sll_addr;
2676 		}
2677 	}
2678 
2679 	err = -ENXIO;
2680 	if (unlikely(dev == NULL))
2681 		goto out;
2682 	err = -ENETDOWN;
2683 	if (unlikely(!(dev->flags & IFF_UP)))
2684 		goto out_put;
2685 
2686 	sockcm_init(&sockc, &po->sk);
2687 	if (msg->msg_controllen) {
2688 		err = sock_cmsg_send(&po->sk, msg, &sockc);
2689 		if (unlikely(err))
2690 			goto out_put;
2691 	}
2692 
2693 	if (po->sk.sk_socket->type == SOCK_RAW)
2694 		reserve = dev->hard_header_len;
2695 	size_max = po->tx_ring.frame_size
2696 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2697 
2698 	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2699 		size_max = dev->mtu + reserve + VLAN_HLEN;
2700 
2701 	reinit_completion(&po->skb_completion);
2702 
2703 	do {
2704 		ph = packet_current_frame(po, &po->tx_ring,
2705 					  TP_STATUS_SEND_REQUEST);
2706 		if (unlikely(ph == NULL)) {
2707 			if (need_wait && skb) {
2708 				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2709 				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2710 				if (timeo <= 0) {
2711 					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2712 					goto out_put;
2713 				}
2714 			}
2715 			/* check for additional frames */
2716 			continue;
2717 		}
2718 
2719 		skb = NULL;
2720 		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2721 		if (tp_len < 0)
2722 			goto tpacket_error;
2723 
2724 		status = TP_STATUS_SEND_REQUEST;
2725 		hlen = LL_RESERVED_SPACE(dev);
2726 		tlen = dev->needed_tailroom;
2727 		if (po->has_vnet_hdr) {
2728 			vnet_hdr = data;
2729 			data += sizeof(*vnet_hdr);
2730 			tp_len -= sizeof(*vnet_hdr);
2731 			if (tp_len < 0 ||
2732 			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2733 				tp_len = -EINVAL;
2734 				goto tpacket_error;
2735 			}
2736 			copylen = __virtio16_to_cpu(vio_le(),
2737 						    vnet_hdr->hdr_len);
2738 		}
2739 		copylen = max_t(int, copylen, dev->hard_header_len);
2740 		skb = sock_alloc_send_skb(&po->sk,
2741 				hlen + tlen + sizeof(struct sockaddr_ll) +
2742 				(copylen - dev->hard_header_len),
2743 				!need_wait, &err);
2744 
2745 		if (unlikely(skb == NULL)) {
2746 			/* we assume the socket was initially writeable ... */
2747 			if (likely(len_sum > 0))
2748 				err = len_sum;
2749 			goto out_status;
2750 		}
2751 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2752 					  addr, hlen, copylen, &sockc);
2753 		if (likely(tp_len >= 0) &&
2754 		    tp_len > dev->mtu + reserve &&
2755 		    !po->has_vnet_hdr &&
2756 		    !packet_extra_vlan_len_allowed(dev, skb))
2757 			tp_len = -EMSGSIZE;
2758 
2759 		if (unlikely(tp_len < 0)) {
2760 tpacket_error:
2761 			if (po->tp_loss) {
2762 				__packet_set_status(po, ph,
2763 						TP_STATUS_AVAILABLE);
2764 				packet_increment_head(&po->tx_ring);
2765 				kfree_skb(skb);
2766 				continue;
2767 			} else {
2768 				status = TP_STATUS_WRONG_FORMAT;
2769 				err = tp_len;
2770 				goto out_status;
2771 			}
2772 		}
2773 
2774 		if (po->has_vnet_hdr) {
2775 			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2776 				tp_len = -EINVAL;
2777 				goto tpacket_error;
2778 			}
2779 			virtio_net_hdr_set_proto(skb, vnet_hdr);
2780 		}
2781 
2782 		skb->destructor = tpacket_destruct_skb;
2783 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2784 		packet_inc_pending(&po->tx_ring);
2785 
2786 		status = TP_STATUS_SEND_REQUEST;
2787 		err = po->xmit(skb);
2788 		if (unlikely(err > 0)) {
2789 			err = net_xmit_errno(err);
2790 			if (err && __packet_get_status(po, ph) ==
2791 				   TP_STATUS_AVAILABLE) {
2792 				/* skb was destructed already */
2793 				skb = NULL;
2794 				goto out_status;
2795 			}
2796 			/*
2797 			 * skb was dropped but not destructed yet;
2798 			 * let's treat it like congestion or err < 0
2799 			 */
2800 			err = 0;
2801 		}
2802 		packet_increment_head(&po->tx_ring);
2803 		len_sum += tp_len;
2804 	} while (likely((ph != NULL) ||
2805 		/* Note: packet_read_pending() might be slow if we have
2806 		 * to call it as it's per_cpu variable, but in fast-path
2807 		 * we already short-circuit the loop with the first
2808 		 * condition, and luckily don't have to go that path
2809 		 * anyway.
2810 		 */
2811 		 (need_wait && packet_read_pending(&po->tx_ring))));
2812 
2813 	err = len_sum;
2814 	goto out_put;
2815 
2816 out_status:
2817 	__packet_set_status(po, ph, status);
2818 	kfree_skb(skb);
2819 out_put:
2820 	dev_put(dev);
2821 out:
2822 	mutex_unlock(&po->pg_vec_lock);
2823 	return err;
2824 }
2825 
2826 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2827 				        size_t reserve, size_t len,
2828 				        size_t linear, int noblock,
2829 				        int *err)
2830 {
2831 	struct sk_buff *skb;
2832 
2833 	/* Under a page?  Don't bother with paged skb. */
2834 	if (prepad + len < PAGE_SIZE || !linear)
2835 		linear = len;
2836 
2837 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2838 				   err, 0);
2839 	if (!skb)
2840 		return NULL;
2841 
2842 	skb_reserve(skb, reserve);
2843 	skb_put(skb, linear);
2844 	skb->data_len = len - linear;
2845 	skb->len += len - linear;
2846 
2847 	return skb;
2848 }
2849 
2850 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2851 {
2852 	struct sock *sk = sock->sk;
2853 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2854 	struct sk_buff *skb;
2855 	struct net_device *dev;
2856 	__be16 proto;
2857 	unsigned char *addr = NULL;
2858 	int err, reserve = 0;
2859 	struct sockcm_cookie sockc;
2860 	struct virtio_net_hdr vnet_hdr = { 0 };
2861 	int offset = 0;
2862 	struct packet_sock *po = pkt_sk(sk);
2863 	bool has_vnet_hdr = false;
2864 	int hlen, tlen, linear;
2865 	int extra_len = 0;
2866 
2867 	/*
2868 	 *	Get and verify the address.
2869 	 */
2870 
2871 	if (likely(saddr == NULL)) {
2872 		dev	= packet_cached_dev_get(po);
2873 		proto	= po->num;
2874 	} else {
2875 		err = -EINVAL;
2876 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2877 			goto out;
2878 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2879 			goto out;
2880 		proto	= saddr->sll_protocol;
2881 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2882 		if (sock->type == SOCK_DGRAM) {
2883 			if (dev && msg->msg_namelen < dev->addr_len +
2884 				   offsetof(struct sockaddr_ll, sll_addr))
2885 				goto out_unlock;
2886 			addr = saddr->sll_addr;
2887 		}
2888 	}
2889 
2890 	err = -ENXIO;
2891 	if (unlikely(dev == NULL))
2892 		goto out_unlock;
2893 	err = -ENETDOWN;
2894 	if (unlikely(!(dev->flags & IFF_UP)))
2895 		goto out_unlock;
2896 
2897 	sockcm_init(&sockc, sk);
2898 	sockc.mark = sk->sk_mark;
2899 	if (msg->msg_controllen) {
2900 		err = sock_cmsg_send(sk, msg, &sockc);
2901 		if (unlikely(err))
2902 			goto out_unlock;
2903 	}
2904 
2905 	if (sock->type == SOCK_RAW)
2906 		reserve = dev->hard_header_len;
2907 	if (po->has_vnet_hdr) {
2908 		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2909 		if (err)
2910 			goto out_unlock;
2911 		has_vnet_hdr = true;
2912 	}
2913 
2914 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2915 		if (!netif_supports_nofcs(dev)) {
2916 			err = -EPROTONOSUPPORT;
2917 			goto out_unlock;
2918 		}
2919 		extra_len = 4; /* We're doing our own CRC */
2920 	}
2921 
2922 	err = -EMSGSIZE;
2923 	if (!vnet_hdr.gso_type &&
2924 	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2925 		goto out_unlock;
2926 
2927 	err = -ENOBUFS;
2928 	hlen = LL_RESERVED_SPACE(dev);
2929 	tlen = dev->needed_tailroom;
2930 	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2931 	linear = max(linear, min_t(int, len, dev->hard_header_len));
2932 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2933 			       msg->msg_flags & MSG_DONTWAIT, &err);
2934 	if (skb == NULL)
2935 		goto out_unlock;
2936 
2937 	skb_reset_network_header(skb);
2938 
2939 	err = -EINVAL;
2940 	if (sock->type == SOCK_DGRAM) {
2941 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2942 		if (unlikely(offset < 0))
2943 			goto out_free;
2944 	} else if (reserve) {
2945 		skb_reserve(skb, -reserve);
2946 		if (len < reserve + sizeof(struct ipv6hdr) &&
2947 		    dev->min_header_len != dev->hard_header_len)
2948 			skb_reset_network_header(skb);
2949 	}
2950 
2951 	/* Returns -EFAULT on error */
2952 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2953 	if (err)
2954 		goto out_free;
2955 
2956 	if (sock->type == SOCK_RAW &&
2957 	    !dev_validate_header(dev, skb->data, len)) {
2958 		err = -EINVAL;
2959 		goto out_free;
2960 	}
2961 
2962 	skb_setup_tx_timestamp(skb, sockc.tsflags);
2963 
2964 	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2965 	    !packet_extra_vlan_len_allowed(dev, skb)) {
2966 		err = -EMSGSIZE;
2967 		goto out_free;
2968 	}
2969 
2970 	skb->protocol = proto;
2971 	skb->dev = dev;
2972 	skb->priority = sk->sk_priority;
2973 	skb->mark = sockc.mark;
2974 	skb->tstamp = sockc.transmit_time;
2975 
2976 	if (has_vnet_hdr) {
2977 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2978 		if (err)
2979 			goto out_free;
2980 		len += sizeof(vnet_hdr);
2981 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2982 	}
2983 
2984 	packet_parse_headers(skb, sock);
2985 
2986 	if (unlikely(extra_len == 4))
2987 		skb->no_fcs = 1;
2988 
2989 	err = po->xmit(skb);
2990 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2991 		goto out_unlock;
2992 
2993 	dev_put(dev);
2994 
2995 	return len;
2996 
2997 out_free:
2998 	kfree_skb(skb);
2999 out_unlock:
3000 	if (dev)
3001 		dev_put(dev);
3002 out:
3003 	return err;
3004 }
3005 
3006 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3007 {
3008 	struct sock *sk = sock->sk;
3009 	struct packet_sock *po = pkt_sk(sk);
3010 
3011 	if (po->tx_ring.pg_vec)
3012 		return tpacket_snd(po, msg);
3013 	else
3014 		return packet_snd(sock, msg, len);
3015 }
3016 
3017 /*
3018  *	Close a PACKET socket. This is fairly simple. We immediately go
3019  *	to 'closed' state and remove our protocol entry in the device list.
3020  */
3021 
3022 static int packet_release(struct socket *sock)
3023 {
3024 	struct sock *sk = sock->sk;
3025 	struct packet_sock *po;
3026 	struct packet_fanout *f;
3027 	struct net *net;
3028 	union tpacket_req_u req_u;
3029 
3030 	if (!sk)
3031 		return 0;
3032 
3033 	net = sock_net(sk);
3034 	po = pkt_sk(sk);
3035 
3036 	mutex_lock(&net->packet.sklist_lock);
3037 	sk_del_node_init_rcu(sk);
3038 	mutex_unlock(&net->packet.sklist_lock);
3039 
3040 	preempt_disable();
3041 	sock_prot_inuse_add(net, sk->sk_prot, -1);
3042 	preempt_enable();
3043 
3044 	spin_lock(&po->bind_lock);
3045 	unregister_prot_hook(sk, false);
3046 	packet_cached_dev_reset(po);
3047 
3048 	if (po->prot_hook.dev) {
3049 		dev_put(po->prot_hook.dev);
3050 		po->prot_hook.dev = NULL;
3051 	}
3052 	spin_unlock(&po->bind_lock);
3053 
3054 	packet_flush_mclist(sk);
3055 
3056 	lock_sock(sk);
3057 	if (po->rx_ring.pg_vec) {
3058 		memset(&req_u, 0, sizeof(req_u));
3059 		packet_set_ring(sk, &req_u, 1, 0);
3060 	}
3061 
3062 	if (po->tx_ring.pg_vec) {
3063 		memset(&req_u, 0, sizeof(req_u));
3064 		packet_set_ring(sk, &req_u, 1, 1);
3065 	}
3066 	release_sock(sk);
3067 
3068 	f = fanout_release(sk);
3069 
3070 	synchronize_net();
3071 
3072 	kfree(po->rollover);
3073 	if (f) {
3074 		fanout_release_data(f);
3075 		kfree(f);
3076 	}
3077 	/*
3078 	 *	Now the socket is dead. No more input will appear.
3079 	 */
3080 	sock_orphan(sk);
3081 	sock->sk = NULL;
3082 
3083 	/* Purge queues */
3084 
3085 	skb_queue_purge(&sk->sk_receive_queue);
3086 	packet_free_pending(po);
3087 	sk_refcnt_debug_release(sk);
3088 
3089 	sock_put(sk);
3090 	return 0;
3091 }
3092 
3093 /*
3094  *	Attach a packet hook.
3095  */
3096 
3097 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3098 			  __be16 proto)
3099 {
3100 	struct packet_sock *po = pkt_sk(sk);
3101 	struct net_device *dev_curr;
3102 	__be16 proto_curr;
3103 	bool need_rehook;
3104 	struct net_device *dev = NULL;
3105 	int ret = 0;
3106 	bool unlisted = false;
3107 
3108 	lock_sock(sk);
3109 	spin_lock(&po->bind_lock);
3110 	rcu_read_lock();
3111 
3112 	if (po->fanout) {
3113 		ret = -EINVAL;
3114 		goto out_unlock;
3115 	}
3116 
3117 	if (name) {
3118 		dev = dev_get_by_name_rcu(sock_net(sk), name);
3119 		if (!dev) {
3120 			ret = -ENODEV;
3121 			goto out_unlock;
3122 		}
3123 	} else if (ifindex) {
3124 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3125 		if (!dev) {
3126 			ret = -ENODEV;
3127 			goto out_unlock;
3128 		}
3129 	}
3130 
3131 	if (dev)
3132 		dev_hold(dev);
3133 
3134 	proto_curr = po->prot_hook.type;
3135 	dev_curr = po->prot_hook.dev;
3136 
3137 	need_rehook = proto_curr != proto || dev_curr != dev;
3138 
3139 	if (need_rehook) {
3140 		if (po->running) {
3141 			rcu_read_unlock();
3142 			/* prevents packet_notifier() from calling
3143 			 * register_prot_hook()
3144 			 */
3145 			po->num = 0;
3146 			__unregister_prot_hook(sk, true);
3147 			rcu_read_lock();
3148 			dev_curr = po->prot_hook.dev;
3149 			if (dev)
3150 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3151 								 dev->ifindex);
3152 		}
3153 
3154 		BUG_ON(po->running);
3155 		po->num = proto;
3156 		po->prot_hook.type = proto;
3157 
3158 		if (unlikely(unlisted)) {
3159 			dev_put(dev);
3160 			po->prot_hook.dev = NULL;
3161 			po->ifindex = -1;
3162 			packet_cached_dev_reset(po);
3163 		} else {
3164 			po->prot_hook.dev = dev;
3165 			po->ifindex = dev ? dev->ifindex : 0;
3166 			packet_cached_dev_assign(po, dev);
3167 		}
3168 	}
3169 	if (dev_curr)
3170 		dev_put(dev_curr);
3171 
3172 	if (proto == 0 || !need_rehook)
3173 		goto out_unlock;
3174 
3175 	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3176 		register_prot_hook(sk);
3177 	} else {
3178 		sk->sk_err = ENETDOWN;
3179 		if (!sock_flag(sk, SOCK_DEAD))
3180 			sk->sk_error_report(sk);
3181 	}
3182 
3183 out_unlock:
3184 	rcu_read_unlock();
3185 	spin_unlock(&po->bind_lock);
3186 	release_sock(sk);
3187 	return ret;
3188 }
3189 
3190 /*
3191  *	Bind a packet socket to a device
3192  */
3193 
3194 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3195 			    int addr_len)
3196 {
3197 	struct sock *sk = sock->sk;
3198 	char name[sizeof(uaddr->sa_data) + 1];
3199 
3200 	/*
3201 	 *	Check legality
3202 	 */
3203 
3204 	if (addr_len != sizeof(struct sockaddr))
3205 		return -EINVAL;
3206 	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3207 	 * zero-terminated.
3208 	 */
3209 	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3210 	name[sizeof(uaddr->sa_data)] = 0;
3211 
3212 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3213 }
3214 
3215 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3216 {
3217 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3218 	struct sock *sk = sock->sk;
3219 
3220 	/*
3221 	 *	Check legality
3222 	 */
3223 
3224 	if (addr_len < sizeof(struct sockaddr_ll))
3225 		return -EINVAL;
3226 	if (sll->sll_family != AF_PACKET)
3227 		return -EINVAL;
3228 
3229 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3230 			      sll->sll_protocol ? : pkt_sk(sk)->num);
3231 }
3232 
3233 static struct proto packet_proto = {
3234 	.name	  = "PACKET",
3235 	.owner	  = THIS_MODULE,
3236 	.obj_size = sizeof(struct packet_sock),
3237 };
3238 
3239 /*
3240  *	Create a packet of type SOCK_PACKET.
3241  */
3242 
3243 static int packet_create(struct net *net, struct socket *sock, int protocol,
3244 			 int kern)
3245 {
3246 	struct sock *sk;
3247 	struct packet_sock *po;
3248 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3249 	int err;
3250 
3251 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3252 		return -EPERM;
3253 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3254 	    sock->type != SOCK_PACKET)
3255 		return -ESOCKTNOSUPPORT;
3256 
3257 	sock->state = SS_UNCONNECTED;
3258 
3259 	err = -ENOBUFS;
3260 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3261 	if (sk == NULL)
3262 		goto out;
3263 
3264 	sock->ops = &packet_ops;
3265 	if (sock->type == SOCK_PACKET)
3266 		sock->ops = &packet_ops_spkt;
3267 
3268 	sock_init_data(sock, sk);
3269 
3270 	po = pkt_sk(sk);
3271 	init_completion(&po->skb_completion);
3272 	sk->sk_family = PF_PACKET;
3273 	po->num = proto;
3274 	po->xmit = dev_queue_xmit;
3275 
3276 	err = packet_alloc_pending(po);
3277 	if (err)
3278 		goto out2;
3279 
3280 	packet_cached_dev_reset(po);
3281 
3282 	sk->sk_destruct = packet_sock_destruct;
3283 	sk_refcnt_debug_inc(sk);
3284 
3285 	/*
3286 	 *	Attach a protocol block
3287 	 */
3288 
3289 	spin_lock_init(&po->bind_lock);
3290 	mutex_init(&po->pg_vec_lock);
3291 	po->rollover = NULL;
3292 	po->prot_hook.func = packet_rcv;
3293 
3294 	if (sock->type == SOCK_PACKET)
3295 		po->prot_hook.func = packet_rcv_spkt;
3296 
3297 	po->prot_hook.af_packet_priv = sk;
3298 
3299 	if (proto) {
3300 		po->prot_hook.type = proto;
3301 		__register_prot_hook(sk);
3302 	}
3303 
3304 	mutex_lock(&net->packet.sklist_lock);
3305 	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3306 	mutex_unlock(&net->packet.sklist_lock);
3307 
3308 	preempt_disable();
3309 	sock_prot_inuse_add(net, &packet_proto, 1);
3310 	preempt_enable();
3311 
3312 	return 0;
3313 out2:
3314 	sk_free(sk);
3315 out:
3316 	return err;
3317 }
3318 
3319 /*
3320  *	Pull a packet from our receive queue and hand it to the user.
3321  *	If necessary we block.
3322  */
3323 
3324 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3325 			  int flags)
3326 {
3327 	struct sock *sk = sock->sk;
3328 	struct sk_buff *skb;
3329 	int copied, err;
3330 	int vnet_hdr_len = 0;
3331 	unsigned int origlen = 0;
3332 
3333 	err = -EINVAL;
3334 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3335 		goto out;
3336 
3337 #if 0
3338 	/* What error should we return now? EUNATTACH? */
3339 	if (pkt_sk(sk)->ifindex < 0)
3340 		return -ENODEV;
3341 #endif
3342 
3343 	if (flags & MSG_ERRQUEUE) {
3344 		err = sock_recv_errqueue(sk, msg, len,
3345 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3346 		goto out;
3347 	}
3348 
3349 	/*
3350 	 *	Call the generic datagram receiver. This handles all sorts
3351 	 *	of horrible races and re-entrancy so we can forget about it
3352 	 *	in the protocol layers.
3353 	 *
3354 	 *	Now it will return ENETDOWN, if device have just gone down,
3355 	 *	but then it will block.
3356 	 */
3357 
3358 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3359 
3360 	/*
3361 	 *	An error occurred so return it. Because skb_recv_datagram()
3362 	 *	handles the blocking we don't see and worry about blocking
3363 	 *	retries.
3364 	 */
3365 
3366 	if (skb == NULL)
3367 		goto out;
3368 
3369 	packet_rcv_try_clear_pressure(pkt_sk(sk));
3370 
3371 	if (pkt_sk(sk)->has_vnet_hdr) {
3372 		err = packet_rcv_vnet(msg, skb, &len);
3373 		if (err)
3374 			goto out_free;
3375 		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3376 	}
3377 
3378 	/* You lose any data beyond the buffer you gave. If it worries
3379 	 * a user program they can ask the device for its MTU
3380 	 * anyway.
3381 	 */
3382 	copied = skb->len;
3383 	if (copied > len) {
3384 		copied = len;
3385 		msg->msg_flags |= MSG_TRUNC;
3386 	}
3387 
3388 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3389 	if (err)
3390 		goto out_free;
3391 
3392 	if (sock->type != SOCK_PACKET) {
3393 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3394 
3395 		/* Original length was stored in sockaddr_ll fields */
3396 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3397 		sll->sll_family = AF_PACKET;
3398 		sll->sll_protocol = skb->protocol;
3399 	}
3400 
3401 	sock_recv_ts_and_drops(msg, sk, skb);
3402 
3403 	if (msg->msg_name) {
3404 		int copy_len;
3405 
3406 		/* If the address length field is there to be filled
3407 		 * in, we fill it in now.
3408 		 */
3409 		if (sock->type == SOCK_PACKET) {
3410 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3411 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3412 			copy_len = msg->msg_namelen;
3413 		} else {
3414 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3415 
3416 			msg->msg_namelen = sll->sll_halen +
3417 				offsetof(struct sockaddr_ll, sll_addr);
3418 			copy_len = msg->msg_namelen;
3419 			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3420 				memset(msg->msg_name +
3421 				       offsetof(struct sockaddr_ll, sll_addr),
3422 				       0, sizeof(sll->sll_addr));
3423 				msg->msg_namelen = sizeof(struct sockaddr_ll);
3424 			}
3425 		}
3426 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3427 	}
3428 
3429 	if (pkt_sk(sk)->auxdata) {
3430 		struct tpacket_auxdata aux;
3431 
3432 		aux.tp_status = TP_STATUS_USER;
3433 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3434 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3435 		else if (skb->pkt_type != PACKET_OUTGOING &&
3436 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3437 			  skb_csum_unnecessary(skb)))
3438 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3439 
3440 		aux.tp_len = origlen;
3441 		aux.tp_snaplen = skb->len;
3442 		aux.tp_mac = 0;
3443 		aux.tp_net = skb_network_offset(skb);
3444 		if (skb_vlan_tag_present(skb)) {
3445 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3446 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3447 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3448 		} else {
3449 			aux.tp_vlan_tci = 0;
3450 			aux.tp_vlan_tpid = 0;
3451 		}
3452 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3453 	}
3454 
3455 	/*
3456 	 *	Free or return the buffer as appropriate. Again this
3457 	 *	hides all the races and re-entrancy issues from us.
3458 	 */
3459 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3460 
3461 out_free:
3462 	skb_free_datagram(sk, skb);
3463 out:
3464 	return err;
3465 }
3466 
3467 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3468 			       int peer)
3469 {
3470 	struct net_device *dev;
3471 	struct sock *sk	= sock->sk;
3472 
3473 	if (peer)
3474 		return -EOPNOTSUPP;
3475 
3476 	uaddr->sa_family = AF_PACKET;
3477 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3478 	rcu_read_lock();
3479 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3480 	if (dev)
3481 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3482 	rcu_read_unlock();
3483 
3484 	return sizeof(*uaddr);
3485 }
3486 
3487 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3488 			  int peer)
3489 {
3490 	struct net_device *dev;
3491 	struct sock *sk = sock->sk;
3492 	struct packet_sock *po = pkt_sk(sk);
3493 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3494 
3495 	if (peer)
3496 		return -EOPNOTSUPP;
3497 
3498 	sll->sll_family = AF_PACKET;
3499 	sll->sll_ifindex = po->ifindex;
3500 	sll->sll_protocol = po->num;
3501 	sll->sll_pkttype = 0;
3502 	rcu_read_lock();
3503 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3504 	if (dev) {
3505 		sll->sll_hatype = dev->type;
3506 		sll->sll_halen = dev->addr_len;
3507 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3508 	} else {
3509 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3510 		sll->sll_halen = 0;
3511 	}
3512 	rcu_read_unlock();
3513 
3514 	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3515 }
3516 
3517 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3518 			 int what)
3519 {
3520 	switch (i->type) {
3521 	case PACKET_MR_MULTICAST:
3522 		if (i->alen != dev->addr_len)
3523 			return -EINVAL;
3524 		if (what > 0)
3525 			return dev_mc_add(dev, i->addr);
3526 		else
3527 			return dev_mc_del(dev, i->addr);
3528 		break;
3529 	case PACKET_MR_PROMISC:
3530 		return dev_set_promiscuity(dev, what);
3531 	case PACKET_MR_ALLMULTI:
3532 		return dev_set_allmulti(dev, what);
3533 	case PACKET_MR_UNICAST:
3534 		if (i->alen != dev->addr_len)
3535 			return -EINVAL;
3536 		if (what > 0)
3537 			return dev_uc_add(dev, i->addr);
3538 		else
3539 			return dev_uc_del(dev, i->addr);
3540 		break;
3541 	default:
3542 		break;
3543 	}
3544 	return 0;
3545 }
3546 
3547 static void packet_dev_mclist_delete(struct net_device *dev,
3548 				     struct packet_mclist **mlp)
3549 {
3550 	struct packet_mclist *ml;
3551 
3552 	while ((ml = *mlp) != NULL) {
3553 		if (ml->ifindex == dev->ifindex) {
3554 			packet_dev_mc(dev, ml, -1);
3555 			*mlp = ml->next;
3556 			kfree(ml);
3557 		} else
3558 			mlp = &ml->next;
3559 	}
3560 }
3561 
3562 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3563 {
3564 	struct packet_sock *po = pkt_sk(sk);
3565 	struct packet_mclist *ml, *i;
3566 	struct net_device *dev;
3567 	int err;
3568 
3569 	rtnl_lock();
3570 
3571 	err = -ENODEV;
3572 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3573 	if (!dev)
3574 		goto done;
3575 
3576 	err = -EINVAL;
3577 	if (mreq->mr_alen > dev->addr_len)
3578 		goto done;
3579 
3580 	err = -ENOBUFS;
3581 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3582 	if (i == NULL)
3583 		goto done;
3584 
3585 	err = 0;
3586 	for (ml = po->mclist; ml; ml = ml->next) {
3587 		if (ml->ifindex == mreq->mr_ifindex &&
3588 		    ml->type == mreq->mr_type &&
3589 		    ml->alen == mreq->mr_alen &&
3590 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3591 			ml->count++;
3592 			/* Free the new element ... */
3593 			kfree(i);
3594 			goto done;
3595 		}
3596 	}
3597 
3598 	i->type = mreq->mr_type;
3599 	i->ifindex = mreq->mr_ifindex;
3600 	i->alen = mreq->mr_alen;
3601 	memcpy(i->addr, mreq->mr_address, i->alen);
3602 	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3603 	i->count = 1;
3604 	i->next = po->mclist;
3605 	po->mclist = i;
3606 	err = packet_dev_mc(dev, i, 1);
3607 	if (err) {
3608 		po->mclist = i->next;
3609 		kfree(i);
3610 	}
3611 
3612 done:
3613 	rtnl_unlock();
3614 	return err;
3615 }
3616 
3617 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3618 {
3619 	struct packet_mclist *ml, **mlp;
3620 
3621 	rtnl_lock();
3622 
3623 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3624 		if (ml->ifindex == mreq->mr_ifindex &&
3625 		    ml->type == mreq->mr_type &&
3626 		    ml->alen == mreq->mr_alen &&
3627 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3628 			if (--ml->count == 0) {
3629 				struct net_device *dev;
3630 				*mlp = ml->next;
3631 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3632 				if (dev)
3633 					packet_dev_mc(dev, ml, -1);
3634 				kfree(ml);
3635 			}
3636 			break;
3637 		}
3638 	}
3639 	rtnl_unlock();
3640 	return 0;
3641 }
3642 
3643 static void packet_flush_mclist(struct sock *sk)
3644 {
3645 	struct packet_sock *po = pkt_sk(sk);
3646 	struct packet_mclist *ml;
3647 
3648 	if (!po->mclist)
3649 		return;
3650 
3651 	rtnl_lock();
3652 	while ((ml = po->mclist) != NULL) {
3653 		struct net_device *dev;
3654 
3655 		po->mclist = ml->next;
3656 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3657 		if (dev != NULL)
3658 			packet_dev_mc(dev, ml, -1);
3659 		kfree(ml);
3660 	}
3661 	rtnl_unlock();
3662 }
3663 
3664 static int
3665 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3666 		  unsigned int optlen)
3667 {
3668 	struct sock *sk = sock->sk;
3669 	struct packet_sock *po = pkt_sk(sk);
3670 	int ret;
3671 
3672 	if (level != SOL_PACKET)
3673 		return -ENOPROTOOPT;
3674 
3675 	switch (optname) {
3676 	case PACKET_ADD_MEMBERSHIP:
3677 	case PACKET_DROP_MEMBERSHIP:
3678 	{
3679 		struct packet_mreq_max mreq;
3680 		int len = optlen;
3681 		memset(&mreq, 0, sizeof(mreq));
3682 		if (len < sizeof(struct packet_mreq))
3683 			return -EINVAL;
3684 		if (len > sizeof(mreq))
3685 			len = sizeof(mreq);
3686 		if (copy_from_sockptr(&mreq, optval, len))
3687 			return -EFAULT;
3688 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3689 			return -EINVAL;
3690 		if (optname == PACKET_ADD_MEMBERSHIP)
3691 			ret = packet_mc_add(sk, &mreq);
3692 		else
3693 			ret = packet_mc_drop(sk, &mreq);
3694 		return ret;
3695 	}
3696 
3697 	case PACKET_RX_RING:
3698 	case PACKET_TX_RING:
3699 	{
3700 		union tpacket_req_u req_u;
3701 		int len;
3702 
3703 		lock_sock(sk);
3704 		switch (po->tp_version) {
3705 		case TPACKET_V1:
3706 		case TPACKET_V2:
3707 			len = sizeof(req_u.req);
3708 			break;
3709 		case TPACKET_V3:
3710 		default:
3711 			len = sizeof(req_u.req3);
3712 			break;
3713 		}
3714 		if (optlen < len) {
3715 			ret = -EINVAL;
3716 		} else {
3717 			if (copy_from_sockptr(&req_u.req, optval, len))
3718 				ret = -EFAULT;
3719 			else
3720 				ret = packet_set_ring(sk, &req_u, 0,
3721 						    optname == PACKET_TX_RING);
3722 		}
3723 		release_sock(sk);
3724 		return ret;
3725 	}
3726 	case PACKET_COPY_THRESH:
3727 	{
3728 		int val;
3729 
3730 		if (optlen != sizeof(val))
3731 			return -EINVAL;
3732 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3733 			return -EFAULT;
3734 
3735 		pkt_sk(sk)->copy_thresh = val;
3736 		return 0;
3737 	}
3738 	case PACKET_VERSION:
3739 	{
3740 		int val;
3741 
3742 		if (optlen != sizeof(val))
3743 			return -EINVAL;
3744 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3745 			return -EFAULT;
3746 		switch (val) {
3747 		case TPACKET_V1:
3748 		case TPACKET_V2:
3749 		case TPACKET_V3:
3750 			break;
3751 		default:
3752 			return -EINVAL;
3753 		}
3754 		lock_sock(sk);
3755 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3756 			ret = -EBUSY;
3757 		} else {
3758 			po->tp_version = val;
3759 			ret = 0;
3760 		}
3761 		release_sock(sk);
3762 		return ret;
3763 	}
3764 	case PACKET_RESERVE:
3765 	{
3766 		unsigned int val;
3767 
3768 		if (optlen != sizeof(val))
3769 			return -EINVAL;
3770 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3771 			return -EFAULT;
3772 		if (val > INT_MAX)
3773 			return -EINVAL;
3774 		lock_sock(sk);
3775 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3776 			ret = -EBUSY;
3777 		} else {
3778 			po->tp_reserve = val;
3779 			ret = 0;
3780 		}
3781 		release_sock(sk);
3782 		return ret;
3783 	}
3784 	case PACKET_LOSS:
3785 	{
3786 		unsigned int val;
3787 
3788 		if (optlen != sizeof(val))
3789 			return -EINVAL;
3790 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3791 			return -EFAULT;
3792 
3793 		lock_sock(sk);
3794 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3795 			ret = -EBUSY;
3796 		} else {
3797 			po->tp_loss = !!val;
3798 			ret = 0;
3799 		}
3800 		release_sock(sk);
3801 		return ret;
3802 	}
3803 	case PACKET_AUXDATA:
3804 	{
3805 		int val;
3806 
3807 		if (optlen < sizeof(val))
3808 			return -EINVAL;
3809 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3810 			return -EFAULT;
3811 
3812 		lock_sock(sk);
3813 		po->auxdata = !!val;
3814 		release_sock(sk);
3815 		return 0;
3816 	}
3817 	case PACKET_ORIGDEV:
3818 	{
3819 		int val;
3820 
3821 		if (optlen < sizeof(val))
3822 			return -EINVAL;
3823 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3824 			return -EFAULT;
3825 
3826 		lock_sock(sk);
3827 		po->origdev = !!val;
3828 		release_sock(sk);
3829 		return 0;
3830 	}
3831 	case PACKET_VNET_HDR:
3832 	{
3833 		int val;
3834 
3835 		if (sock->type != SOCK_RAW)
3836 			return -EINVAL;
3837 		if (optlen < sizeof(val))
3838 			return -EINVAL;
3839 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3840 			return -EFAULT;
3841 
3842 		lock_sock(sk);
3843 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3844 			ret = -EBUSY;
3845 		} else {
3846 			po->has_vnet_hdr = !!val;
3847 			ret = 0;
3848 		}
3849 		release_sock(sk);
3850 		return ret;
3851 	}
3852 	case PACKET_TIMESTAMP:
3853 	{
3854 		int val;
3855 
3856 		if (optlen != sizeof(val))
3857 			return -EINVAL;
3858 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3859 			return -EFAULT;
3860 
3861 		po->tp_tstamp = val;
3862 		return 0;
3863 	}
3864 	case PACKET_FANOUT:
3865 	{
3866 		int val;
3867 
3868 		if (optlen != sizeof(val))
3869 			return -EINVAL;
3870 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3871 			return -EFAULT;
3872 
3873 		return fanout_add(sk, val & 0xffff, val >> 16);
3874 	}
3875 	case PACKET_FANOUT_DATA:
3876 	{
3877 		if (!po->fanout)
3878 			return -EINVAL;
3879 
3880 		return fanout_set_data(po, optval, optlen);
3881 	}
3882 	case PACKET_IGNORE_OUTGOING:
3883 	{
3884 		int val;
3885 
3886 		if (optlen != sizeof(val))
3887 			return -EINVAL;
3888 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3889 			return -EFAULT;
3890 		if (val < 0 || val > 1)
3891 			return -EINVAL;
3892 
3893 		po->prot_hook.ignore_outgoing = !!val;
3894 		return 0;
3895 	}
3896 	case PACKET_TX_HAS_OFF:
3897 	{
3898 		unsigned int val;
3899 
3900 		if (optlen != sizeof(val))
3901 			return -EINVAL;
3902 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3903 			return -EFAULT;
3904 
3905 		lock_sock(sk);
3906 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3907 			ret = -EBUSY;
3908 		} else {
3909 			po->tp_tx_has_off = !!val;
3910 			ret = 0;
3911 		}
3912 		release_sock(sk);
3913 		return 0;
3914 	}
3915 	case PACKET_QDISC_BYPASS:
3916 	{
3917 		int val;
3918 
3919 		if (optlen != sizeof(val))
3920 			return -EINVAL;
3921 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3922 			return -EFAULT;
3923 
3924 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3925 		return 0;
3926 	}
3927 	default:
3928 		return -ENOPROTOOPT;
3929 	}
3930 }
3931 
3932 static int packet_getsockopt(struct socket *sock, int level, int optname,
3933 			     char __user *optval, int __user *optlen)
3934 {
3935 	int len;
3936 	int val, lv = sizeof(val);
3937 	struct sock *sk = sock->sk;
3938 	struct packet_sock *po = pkt_sk(sk);
3939 	void *data = &val;
3940 	union tpacket_stats_u st;
3941 	struct tpacket_rollover_stats rstats;
3942 	int drops;
3943 
3944 	if (level != SOL_PACKET)
3945 		return -ENOPROTOOPT;
3946 
3947 	if (get_user(len, optlen))
3948 		return -EFAULT;
3949 
3950 	if (len < 0)
3951 		return -EINVAL;
3952 
3953 	switch (optname) {
3954 	case PACKET_STATISTICS:
3955 		spin_lock_bh(&sk->sk_receive_queue.lock);
3956 		memcpy(&st, &po->stats, sizeof(st));
3957 		memset(&po->stats, 0, sizeof(po->stats));
3958 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3959 		drops = atomic_xchg(&po->tp_drops, 0);
3960 
3961 		if (po->tp_version == TPACKET_V3) {
3962 			lv = sizeof(struct tpacket_stats_v3);
3963 			st.stats3.tp_drops = drops;
3964 			st.stats3.tp_packets += drops;
3965 			data = &st.stats3;
3966 		} else {
3967 			lv = sizeof(struct tpacket_stats);
3968 			st.stats1.tp_drops = drops;
3969 			st.stats1.tp_packets += drops;
3970 			data = &st.stats1;
3971 		}
3972 
3973 		break;
3974 	case PACKET_AUXDATA:
3975 		val = po->auxdata;
3976 		break;
3977 	case PACKET_ORIGDEV:
3978 		val = po->origdev;
3979 		break;
3980 	case PACKET_VNET_HDR:
3981 		val = po->has_vnet_hdr;
3982 		break;
3983 	case PACKET_VERSION:
3984 		val = po->tp_version;
3985 		break;
3986 	case PACKET_HDRLEN:
3987 		if (len > sizeof(int))
3988 			len = sizeof(int);
3989 		if (len < sizeof(int))
3990 			return -EINVAL;
3991 		if (copy_from_user(&val, optval, len))
3992 			return -EFAULT;
3993 		switch (val) {
3994 		case TPACKET_V1:
3995 			val = sizeof(struct tpacket_hdr);
3996 			break;
3997 		case TPACKET_V2:
3998 			val = sizeof(struct tpacket2_hdr);
3999 			break;
4000 		case TPACKET_V3:
4001 			val = sizeof(struct tpacket3_hdr);
4002 			break;
4003 		default:
4004 			return -EINVAL;
4005 		}
4006 		break;
4007 	case PACKET_RESERVE:
4008 		val = po->tp_reserve;
4009 		break;
4010 	case PACKET_LOSS:
4011 		val = po->tp_loss;
4012 		break;
4013 	case PACKET_TIMESTAMP:
4014 		val = po->tp_tstamp;
4015 		break;
4016 	case PACKET_FANOUT:
4017 		val = (po->fanout ?
4018 		       ((u32)po->fanout->id |
4019 			((u32)po->fanout->type << 16) |
4020 			((u32)po->fanout->flags << 24)) :
4021 		       0);
4022 		break;
4023 	case PACKET_IGNORE_OUTGOING:
4024 		val = po->prot_hook.ignore_outgoing;
4025 		break;
4026 	case PACKET_ROLLOVER_STATS:
4027 		if (!po->rollover)
4028 			return -EINVAL;
4029 		rstats.tp_all = atomic_long_read(&po->rollover->num);
4030 		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4031 		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4032 		data = &rstats;
4033 		lv = sizeof(rstats);
4034 		break;
4035 	case PACKET_TX_HAS_OFF:
4036 		val = po->tp_tx_has_off;
4037 		break;
4038 	case PACKET_QDISC_BYPASS:
4039 		val = packet_use_direct_xmit(po);
4040 		break;
4041 	default:
4042 		return -ENOPROTOOPT;
4043 	}
4044 
4045 	if (len > lv)
4046 		len = lv;
4047 	if (put_user(len, optlen))
4048 		return -EFAULT;
4049 	if (copy_to_user(optval, data, len))
4050 		return -EFAULT;
4051 	return 0;
4052 }
4053 
4054 static int packet_notifier(struct notifier_block *this,
4055 			   unsigned long msg, void *ptr)
4056 {
4057 	struct sock *sk;
4058 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4059 	struct net *net = dev_net(dev);
4060 
4061 	rcu_read_lock();
4062 	sk_for_each_rcu(sk, &net->packet.sklist) {
4063 		struct packet_sock *po = pkt_sk(sk);
4064 
4065 		switch (msg) {
4066 		case NETDEV_UNREGISTER:
4067 			if (po->mclist)
4068 				packet_dev_mclist_delete(dev, &po->mclist);
4069 			fallthrough;
4070 
4071 		case NETDEV_DOWN:
4072 			if (dev->ifindex == po->ifindex) {
4073 				spin_lock(&po->bind_lock);
4074 				if (po->running) {
4075 					__unregister_prot_hook(sk, false);
4076 					sk->sk_err = ENETDOWN;
4077 					if (!sock_flag(sk, SOCK_DEAD))
4078 						sk->sk_error_report(sk);
4079 				}
4080 				if (msg == NETDEV_UNREGISTER) {
4081 					packet_cached_dev_reset(po);
4082 					po->ifindex = -1;
4083 					if (po->prot_hook.dev)
4084 						dev_put(po->prot_hook.dev);
4085 					po->prot_hook.dev = NULL;
4086 				}
4087 				spin_unlock(&po->bind_lock);
4088 			}
4089 			break;
4090 		case NETDEV_UP:
4091 			if (dev->ifindex == po->ifindex) {
4092 				spin_lock(&po->bind_lock);
4093 				if (po->num)
4094 					register_prot_hook(sk);
4095 				spin_unlock(&po->bind_lock);
4096 			}
4097 			break;
4098 		}
4099 	}
4100 	rcu_read_unlock();
4101 	return NOTIFY_DONE;
4102 }
4103 
4104 
4105 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4106 			unsigned long arg)
4107 {
4108 	struct sock *sk = sock->sk;
4109 
4110 	switch (cmd) {
4111 	case SIOCOUTQ:
4112 	{
4113 		int amount = sk_wmem_alloc_get(sk);
4114 
4115 		return put_user(amount, (int __user *)arg);
4116 	}
4117 	case SIOCINQ:
4118 	{
4119 		struct sk_buff *skb;
4120 		int amount = 0;
4121 
4122 		spin_lock_bh(&sk->sk_receive_queue.lock);
4123 		skb = skb_peek(&sk->sk_receive_queue);
4124 		if (skb)
4125 			amount = skb->len;
4126 		spin_unlock_bh(&sk->sk_receive_queue.lock);
4127 		return put_user(amount, (int __user *)arg);
4128 	}
4129 #ifdef CONFIG_INET
4130 	case SIOCADDRT:
4131 	case SIOCDELRT:
4132 	case SIOCDARP:
4133 	case SIOCGARP:
4134 	case SIOCSARP:
4135 	case SIOCGIFADDR:
4136 	case SIOCSIFADDR:
4137 	case SIOCGIFBRDADDR:
4138 	case SIOCSIFBRDADDR:
4139 	case SIOCGIFNETMASK:
4140 	case SIOCSIFNETMASK:
4141 	case SIOCGIFDSTADDR:
4142 	case SIOCSIFDSTADDR:
4143 	case SIOCSIFFLAGS:
4144 		return inet_dgram_ops.ioctl(sock, cmd, arg);
4145 #endif
4146 
4147 	default:
4148 		return -ENOIOCTLCMD;
4149 	}
4150 	return 0;
4151 }
4152 
4153 static __poll_t packet_poll(struct file *file, struct socket *sock,
4154 				poll_table *wait)
4155 {
4156 	struct sock *sk = sock->sk;
4157 	struct packet_sock *po = pkt_sk(sk);
4158 	__poll_t mask = datagram_poll(file, sock, wait);
4159 
4160 	spin_lock_bh(&sk->sk_receive_queue.lock);
4161 	if (po->rx_ring.pg_vec) {
4162 		if (!packet_previous_rx_frame(po, &po->rx_ring,
4163 			TP_STATUS_KERNEL))
4164 			mask |= EPOLLIN | EPOLLRDNORM;
4165 	}
4166 	packet_rcv_try_clear_pressure(po);
4167 	spin_unlock_bh(&sk->sk_receive_queue.lock);
4168 	spin_lock_bh(&sk->sk_write_queue.lock);
4169 	if (po->tx_ring.pg_vec) {
4170 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4171 			mask |= EPOLLOUT | EPOLLWRNORM;
4172 	}
4173 	spin_unlock_bh(&sk->sk_write_queue.lock);
4174 	return mask;
4175 }
4176 
4177 
4178 /* Dirty? Well, I still did not learn better way to account
4179  * for user mmaps.
4180  */
4181 
4182 static void packet_mm_open(struct vm_area_struct *vma)
4183 {
4184 	struct file *file = vma->vm_file;
4185 	struct socket *sock = file->private_data;
4186 	struct sock *sk = sock->sk;
4187 
4188 	if (sk)
4189 		atomic_inc(&pkt_sk(sk)->mapped);
4190 }
4191 
4192 static void packet_mm_close(struct vm_area_struct *vma)
4193 {
4194 	struct file *file = vma->vm_file;
4195 	struct socket *sock = file->private_data;
4196 	struct sock *sk = sock->sk;
4197 
4198 	if (sk)
4199 		atomic_dec(&pkt_sk(sk)->mapped);
4200 }
4201 
4202 static const struct vm_operations_struct packet_mmap_ops = {
4203 	.open	=	packet_mm_open,
4204 	.close	=	packet_mm_close,
4205 };
4206 
4207 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4208 			unsigned int len)
4209 {
4210 	int i;
4211 
4212 	for (i = 0; i < len; i++) {
4213 		if (likely(pg_vec[i].buffer)) {
4214 			if (is_vmalloc_addr(pg_vec[i].buffer))
4215 				vfree(pg_vec[i].buffer);
4216 			else
4217 				free_pages((unsigned long)pg_vec[i].buffer,
4218 					   order);
4219 			pg_vec[i].buffer = NULL;
4220 		}
4221 	}
4222 	kfree(pg_vec);
4223 }
4224 
4225 static char *alloc_one_pg_vec_page(unsigned long order)
4226 {
4227 	char *buffer;
4228 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4229 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4230 
4231 	buffer = (char *) __get_free_pages(gfp_flags, order);
4232 	if (buffer)
4233 		return buffer;
4234 
4235 	/* __get_free_pages failed, fall back to vmalloc */
4236 	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4237 	if (buffer)
4238 		return buffer;
4239 
4240 	/* vmalloc failed, lets dig into swap here */
4241 	gfp_flags &= ~__GFP_NORETRY;
4242 	buffer = (char *) __get_free_pages(gfp_flags, order);
4243 	if (buffer)
4244 		return buffer;
4245 
4246 	/* complete and utter failure */
4247 	return NULL;
4248 }
4249 
4250 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4251 {
4252 	unsigned int block_nr = req->tp_block_nr;
4253 	struct pgv *pg_vec;
4254 	int i;
4255 
4256 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4257 	if (unlikely(!pg_vec))
4258 		goto out;
4259 
4260 	for (i = 0; i < block_nr; i++) {
4261 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4262 		if (unlikely(!pg_vec[i].buffer))
4263 			goto out_free_pgvec;
4264 	}
4265 
4266 out:
4267 	return pg_vec;
4268 
4269 out_free_pgvec:
4270 	free_pg_vec(pg_vec, order, block_nr);
4271 	pg_vec = NULL;
4272 	goto out;
4273 }
4274 
4275 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4276 		int closing, int tx_ring)
4277 {
4278 	struct pgv *pg_vec = NULL;
4279 	struct packet_sock *po = pkt_sk(sk);
4280 	unsigned long *rx_owner_map = NULL;
4281 	int was_running, order = 0;
4282 	struct packet_ring_buffer *rb;
4283 	struct sk_buff_head *rb_queue;
4284 	__be16 num;
4285 	int err;
4286 	/* Added to avoid minimal code churn */
4287 	struct tpacket_req *req = &req_u->req;
4288 
4289 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4290 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4291 
4292 	err = -EBUSY;
4293 	if (!closing) {
4294 		if (atomic_read(&po->mapped))
4295 			goto out;
4296 		if (packet_read_pending(rb))
4297 			goto out;
4298 	}
4299 
4300 	if (req->tp_block_nr) {
4301 		unsigned int min_frame_size;
4302 
4303 		/* Sanity tests and some calculations */
4304 		err = -EBUSY;
4305 		if (unlikely(rb->pg_vec))
4306 			goto out;
4307 
4308 		switch (po->tp_version) {
4309 		case TPACKET_V1:
4310 			po->tp_hdrlen = TPACKET_HDRLEN;
4311 			break;
4312 		case TPACKET_V2:
4313 			po->tp_hdrlen = TPACKET2_HDRLEN;
4314 			break;
4315 		case TPACKET_V3:
4316 			po->tp_hdrlen = TPACKET3_HDRLEN;
4317 			break;
4318 		}
4319 
4320 		err = -EINVAL;
4321 		if (unlikely((int)req->tp_block_size <= 0))
4322 			goto out;
4323 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4324 			goto out;
4325 		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4326 		if (po->tp_version >= TPACKET_V3 &&
4327 		    req->tp_block_size <
4328 		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4329 			goto out;
4330 		if (unlikely(req->tp_frame_size < min_frame_size))
4331 			goto out;
4332 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4333 			goto out;
4334 
4335 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4336 		if (unlikely(rb->frames_per_block == 0))
4337 			goto out;
4338 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4339 			goto out;
4340 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4341 					req->tp_frame_nr))
4342 			goto out;
4343 
4344 		err = -ENOMEM;
4345 		order = get_order(req->tp_block_size);
4346 		pg_vec = alloc_pg_vec(req, order);
4347 		if (unlikely(!pg_vec))
4348 			goto out;
4349 		switch (po->tp_version) {
4350 		case TPACKET_V3:
4351 			/* Block transmit is not supported yet */
4352 			if (!tx_ring) {
4353 				init_prb_bdqc(po, rb, pg_vec, req_u);
4354 			} else {
4355 				struct tpacket_req3 *req3 = &req_u->req3;
4356 
4357 				if (req3->tp_retire_blk_tov ||
4358 				    req3->tp_sizeof_priv ||
4359 				    req3->tp_feature_req_word) {
4360 					err = -EINVAL;
4361 					goto out_free_pg_vec;
4362 				}
4363 			}
4364 			break;
4365 		default:
4366 			if (!tx_ring) {
4367 				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4368 					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4369 				if (!rx_owner_map)
4370 					goto out_free_pg_vec;
4371 			}
4372 			break;
4373 		}
4374 	}
4375 	/* Done */
4376 	else {
4377 		err = -EINVAL;
4378 		if (unlikely(req->tp_frame_nr))
4379 			goto out;
4380 	}
4381 
4382 
4383 	/* Detach socket from network */
4384 	spin_lock(&po->bind_lock);
4385 	was_running = po->running;
4386 	num = po->num;
4387 	if (was_running) {
4388 		po->num = 0;
4389 		__unregister_prot_hook(sk, false);
4390 	}
4391 	spin_unlock(&po->bind_lock);
4392 
4393 	synchronize_net();
4394 
4395 	err = -EBUSY;
4396 	mutex_lock(&po->pg_vec_lock);
4397 	if (closing || atomic_read(&po->mapped) == 0) {
4398 		err = 0;
4399 		spin_lock_bh(&rb_queue->lock);
4400 		swap(rb->pg_vec, pg_vec);
4401 		if (po->tp_version <= TPACKET_V2)
4402 			swap(rb->rx_owner_map, rx_owner_map);
4403 		rb->frame_max = (req->tp_frame_nr - 1);
4404 		rb->head = 0;
4405 		rb->frame_size = req->tp_frame_size;
4406 		spin_unlock_bh(&rb_queue->lock);
4407 
4408 		swap(rb->pg_vec_order, order);
4409 		swap(rb->pg_vec_len, req->tp_block_nr);
4410 
4411 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4412 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4413 						tpacket_rcv : packet_rcv;
4414 		skb_queue_purge(rb_queue);
4415 		if (atomic_read(&po->mapped))
4416 			pr_err("packet_mmap: vma is busy: %d\n",
4417 			       atomic_read(&po->mapped));
4418 	}
4419 	mutex_unlock(&po->pg_vec_lock);
4420 
4421 	spin_lock(&po->bind_lock);
4422 	if (was_running) {
4423 		po->num = num;
4424 		register_prot_hook(sk);
4425 	}
4426 	spin_unlock(&po->bind_lock);
4427 	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4428 		/* Because we don't support block-based V3 on tx-ring */
4429 		if (!tx_ring)
4430 			prb_shutdown_retire_blk_timer(po, rb_queue);
4431 	}
4432 
4433 out_free_pg_vec:
4434 	bitmap_free(rx_owner_map);
4435 	if (pg_vec)
4436 		free_pg_vec(pg_vec, order, req->tp_block_nr);
4437 out:
4438 	return err;
4439 }
4440 
4441 static int packet_mmap(struct file *file, struct socket *sock,
4442 		struct vm_area_struct *vma)
4443 {
4444 	struct sock *sk = sock->sk;
4445 	struct packet_sock *po = pkt_sk(sk);
4446 	unsigned long size, expected_size;
4447 	struct packet_ring_buffer *rb;
4448 	unsigned long start;
4449 	int err = -EINVAL;
4450 	int i;
4451 
4452 	if (vma->vm_pgoff)
4453 		return -EINVAL;
4454 
4455 	mutex_lock(&po->pg_vec_lock);
4456 
4457 	expected_size = 0;
4458 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4459 		if (rb->pg_vec) {
4460 			expected_size += rb->pg_vec_len
4461 						* rb->pg_vec_pages
4462 						* PAGE_SIZE;
4463 		}
4464 	}
4465 
4466 	if (expected_size == 0)
4467 		goto out;
4468 
4469 	size = vma->vm_end - vma->vm_start;
4470 	if (size != expected_size)
4471 		goto out;
4472 
4473 	start = vma->vm_start;
4474 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4475 		if (rb->pg_vec == NULL)
4476 			continue;
4477 
4478 		for (i = 0; i < rb->pg_vec_len; i++) {
4479 			struct page *page;
4480 			void *kaddr = rb->pg_vec[i].buffer;
4481 			int pg_num;
4482 
4483 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4484 				page = pgv_to_page(kaddr);
4485 				err = vm_insert_page(vma, start, page);
4486 				if (unlikely(err))
4487 					goto out;
4488 				start += PAGE_SIZE;
4489 				kaddr += PAGE_SIZE;
4490 			}
4491 		}
4492 	}
4493 
4494 	atomic_inc(&po->mapped);
4495 	vma->vm_ops = &packet_mmap_ops;
4496 	err = 0;
4497 
4498 out:
4499 	mutex_unlock(&po->pg_vec_lock);
4500 	return err;
4501 }
4502 
4503 static const struct proto_ops packet_ops_spkt = {
4504 	.family =	PF_PACKET,
4505 	.owner =	THIS_MODULE,
4506 	.release =	packet_release,
4507 	.bind =		packet_bind_spkt,
4508 	.connect =	sock_no_connect,
4509 	.socketpair =	sock_no_socketpair,
4510 	.accept =	sock_no_accept,
4511 	.getname =	packet_getname_spkt,
4512 	.poll =		datagram_poll,
4513 	.ioctl =	packet_ioctl,
4514 	.gettstamp =	sock_gettstamp,
4515 	.listen =	sock_no_listen,
4516 	.shutdown =	sock_no_shutdown,
4517 	.sendmsg =	packet_sendmsg_spkt,
4518 	.recvmsg =	packet_recvmsg,
4519 	.mmap =		sock_no_mmap,
4520 	.sendpage =	sock_no_sendpage,
4521 };
4522 
4523 static const struct proto_ops packet_ops = {
4524 	.family =	PF_PACKET,
4525 	.owner =	THIS_MODULE,
4526 	.release =	packet_release,
4527 	.bind =		packet_bind,
4528 	.connect =	sock_no_connect,
4529 	.socketpair =	sock_no_socketpair,
4530 	.accept =	sock_no_accept,
4531 	.getname =	packet_getname,
4532 	.poll =		packet_poll,
4533 	.ioctl =	packet_ioctl,
4534 	.gettstamp =	sock_gettstamp,
4535 	.listen =	sock_no_listen,
4536 	.shutdown =	sock_no_shutdown,
4537 	.setsockopt =	packet_setsockopt,
4538 	.getsockopt =	packet_getsockopt,
4539 	.sendmsg =	packet_sendmsg,
4540 	.recvmsg =	packet_recvmsg,
4541 	.mmap =		packet_mmap,
4542 	.sendpage =	sock_no_sendpage,
4543 };
4544 
4545 static const struct net_proto_family packet_family_ops = {
4546 	.family =	PF_PACKET,
4547 	.create =	packet_create,
4548 	.owner	=	THIS_MODULE,
4549 };
4550 
4551 static struct notifier_block packet_netdev_notifier = {
4552 	.notifier_call =	packet_notifier,
4553 };
4554 
4555 #ifdef CONFIG_PROC_FS
4556 
4557 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4558 	__acquires(RCU)
4559 {
4560 	struct net *net = seq_file_net(seq);
4561 
4562 	rcu_read_lock();
4563 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4564 }
4565 
4566 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4567 {
4568 	struct net *net = seq_file_net(seq);
4569 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4570 }
4571 
4572 static void packet_seq_stop(struct seq_file *seq, void *v)
4573 	__releases(RCU)
4574 {
4575 	rcu_read_unlock();
4576 }
4577 
4578 static int packet_seq_show(struct seq_file *seq, void *v)
4579 {
4580 	if (v == SEQ_START_TOKEN)
4581 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4582 	else {
4583 		struct sock *s = sk_entry(v);
4584 		const struct packet_sock *po = pkt_sk(s);
4585 
4586 		seq_printf(seq,
4587 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4588 			   s,
4589 			   refcount_read(&s->sk_refcnt),
4590 			   s->sk_type,
4591 			   ntohs(po->num),
4592 			   po->ifindex,
4593 			   po->running,
4594 			   atomic_read(&s->sk_rmem_alloc),
4595 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4596 			   sock_i_ino(s));
4597 	}
4598 
4599 	return 0;
4600 }
4601 
4602 static const struct seq_operations packet_seq_ops = {
4603 	.start	= packet_seq_start,
4604 	.next	= packet_seq_next,
4605 	.stop	= packet_seq_stop,
4606 	.show	= packet_seq_show,
4607 };
4608 #endif
4609 
4610 static int __net_init packet_net_init(struct net *net)
4611 {
4612 	mutex_init(&net->packet.sklist_lock);
4613 	INIT_HLIST_HEAD(&net->packet.sklist);
4614 
4615 	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4616 			sizeof(struct seq_net_private)))
4617 		return -ENOMEM;
4618 
4619 	return 0;
4620 }
4621 
4622 static void __net_exit packet_net_exit(struct net *net)
4623 {
4624 	remove_proc_entry("packet", net->proc_net);
4625 	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4626 }
4627 
4628 static struct pernet_operations packet_net_ops = {
4629 	.init = packet_net_init,
4630 	.exit = packet_net_exit,
4631 };
4632 
4633 
4634 static void __exit packet_exit(void)
4635 {
4636 	unregister_netdevice_notifier(&packet_netdev_notifier);
4637 	unregister_pernet_subsys(&packet_net_ops);
4638 	sock_unregister(PF_PACKET);
4639 	proto_unregister(&packet_proto);
4640 }
4641 
4642 static int __init packet_init(void)
4643 {
4644 	int rc;
4645 
4646 	rc = proto_register(&packet_proto, 0);
4647 	if (rc)
4648 		goto out;
4649 	rc = sock_register(&packet_family_ops);
4650 	if (rc)
4651 		goto out_proto;
4652 	rc = register_pernet_subsys(&packet_net_ops);
4653 	if (rc)
4654 		goto out_sock;
4655 	rc = register_netdevice_notifier(&packet_netdev_notifier);
4656 	if (rc)
4657 		goto out_pernet;
4658 
4659 	return 0;
4660 
4661 out_pernet:
4662 	unregister_pernet_subsys(&packet_net_ops);
4663 out_sock:
4664 	sock_unregister(PF_PACKET);
4665 out_proto:
4666 	proto_unregister(&packet_proto);
4667 out:
4668 	return rc;
4669 }
4670 
4671 module_init(packet_init);
4672 module_exit(packet_exit);
4673 MODULE_LICENSE("GPL");
4674 MODULE_ALIAS_NETPROTO(PF_PACKET);
4675