1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PACKET - implements raw packet sockets.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
42 * and packet_mreq.
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * layer.
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
47 */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/filter.h>
53 #include <linux/types.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
65 #include <linux/slab.h>
66 #include <linux/vmalloc.h>
67 #include <net/net_namespace.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <linux/errno.h>
73 #include <linux/timer.h>
74 #include <linux/uaccess.h>
75 #include <asm/ioctls.h>
76 #include <asm/page.h>
77 #include <asm/cacheflush.h>
78 #include <asm/io.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81 #include <linux/poll.h>
82 #include <linux/module.h>
83 #include <linux/init.h>
84 #include <linux/mutex.h>
85 #include <linux/if_vlan.h>
86 #include <linux/virtio_net.h>
87 #include <linux/errqueue.h>
88 #include <linux/net_tstamp.h>
89 #include <linux/percpu.h>
90 #ifdef CONFIG_INET
91 #include <net/inet_common.h>
92 #endif
93 #include <linux/bpf.h>
94 #include <net/compat.h>
95 #include <linux/netfilter_netdev.h>
96
97 #include "internal.h"
98
99 /*
100 Assumptions:
101 - If the device has no dev->header_ops->create, there is no LL header
102 visible above the device. In this case, its hard_header_len should be 0.
103 The device may prepend its own header internally. In this case, its
104 needed_headroom should be set to the space needed for it to add its
105 internal header.
106 For example, a WiFi driver pretending to be an Ethernet driver should
107 set its hard_header_len to be the Ethernet header length, and set its
108 needed_headroom to be (the real WiFi header length - the fake Ethernet
109 header length).
110 - packet socket receives packets with pulled ll header,
111 so that SOCK_RAW should push it back.
112
113 On receive:
114 -----------
115
116 Incoming, dev_has_header(dev) == true
117 mac_header -> ll header
118 data -> data
119
120 Outgoing, dev_has_header(dev) == true
121 mac_header -> ll header
122 data -> ll header
123
124 Incoming, dev_has_header(dev) == false
125 mac_header -> data
126 However drivers often make it point to the ll header.
127 This is incorrect because the ll header should be invisible to us.
128 data -> data
129
130 Outgoing, dev_has_header(dev) == false
131 mac_header -> data. ll header is invisible to us.
132 data -> data
133
134 Resume
135 If dev_has_header(dev) == false we are unable to restore the ll header,
136 because it is invisible to us.
137
138
139 On transmit:
140 ------------
141
142 dev_has_header(dev) == true
143 mac_header -> ll header
144 data -> ll header
145
146 dev_has_header(dev) == false (ll header is invisible to us)
147 mac_header -> data
148 data -> data
149
150 We should set network_header on output to the correct position,
151 packet classifier depends on it.
152 */
153
154 /* Private packet socket structures. */
155
156 /* identical to struct packet_mreq except it has
157 * a longer address field.
158 */
159 struct packet_mreq_max {
160 int mr_ifindex;
161 unsigned short mr_type;
162 unsigned short mr_alen;
163 unsigned char mr_address[MAX_ADDR_LEN];
164 };
165
166 union tpacket_uhdr {
167 struct tpacket_hdr *h1;
168 struct tpacket2_hdr *h2;
169 struct tpacket3_hdr *h3;
170 void *raw;
171 };
172
173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
174 int closing, int tx_ring);
175
176 #define V3_ALIGNMENT (8)
177
178 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
179
180 #define BLK_PLUS_PRIV(sz_of_priv) \
181 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
182
183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
184 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
185 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
186 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
187 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
209 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
212 struct tpacket3_hdr *);
213 static void packet_flush_mclist(struct sock *sk);
214 static u16 packet_pick_tx_queue(struct sk_buff *skb);
215
216 struct packet_skb_cb {
217 union {
218 struct sockaddr_pkt pkt;
219 union {
220 /* Trick: alias skb original length with
221 * ll.sll_family and ll.protocol in order
222 * to save room.
223 */
224 unsigned int origlen;
225 struct sockaddr_ll ll;
226 };
227 } sa;
228 };
229
230 #define vio_le() virtio_legacy_is_little_endian()
231
232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 #ifdef CONFIG_NETFILTER_EGRESS
nf_hook_direct_egress(struct sk_buff * skb)247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
248 {
249 struct sk_buff *next, *head = NULL, *tail;
250 int rc;
251
252 rcu_read_lock();
253 for (; skb != NULL; skb = next) {
254 next = skb->next;
255 skb_mark_not_on_list(skb);
256
257 if (!nf_hook_egress(skb, &rc, skb->dev))
258 continue;
259
260 if (!head)
261 head = skb;
262 else
263 tail->next = skb;
264
265 tail = skb;
266 }
267 rcu_read_unlock();
268
269 return head;
270 }
271 #endif
272
packet_xmit(const struct packet_sock * po,struct sk_buff * skb)273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
274 {
275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
276 return dev_queue_xmit(skb);
277
278 #ifdef CONFIG_NETFILTER_EGRESS
279 if (nf_hook_egress_active()) {
280 skb = nf_hook_direct_egress(skb);
281 if (!skb)
282 return NET_XMIT_DROP;
283 }
284 #endif
285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
286 }
287
packet_cached_dev_get(struct packet_sock * po)288 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
289 {
290 struct net_device *dev;
291
292 rcu_read_lock();
293 dev = rcu_dereference(po->cached_dev);
294 dev_hold(dev);
295 rcu_read_unlock();
296
297 return dev;
298 }
299
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)300 static void packet_cached_dev_assign(struct packet_sock *po,
301 struct net_device *dev)
302 {
303 rcu_assign_pointer(po->cached_dev, dev);
304 }
305
packet_cached_dev_reset(struct packet_sock * po)306 static void packet_cached_dev_reset(struct packet_sock *po)
307 {
308 RCU_INIT_POINTER(po->cached_dev, NULL);
309 }
310
packet_pick_tx_queue(struct sk_buff * skb)311 static u16 packet_pick_tx_queue(struct sk_buff *skb)
312 {
313 struct net_device *dev = skb->dev;
314 const struct net_device_ops *ops = dev->netdev_ops;
315 int cpu = raw_smp_processor_id();
316 u16 queue_index;
317
318 #ifdef CONFIG_XPS
319 skb->sender_cpu = cpu + 1;
320 #endif
321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL);
324 queue_index = netdev_cap_txqueue(dev, queue_index);
325 } else {
326 queue_index = netdev_pick_tx(dev, skb, NULL);
327 }
328
329 return queue_index;
330 }
331
332 /* __register_prot_hook must be invoked through register_prot_hook
333 * or from a context in which asynchronous accesses to the packet
334 * socket is not possible (packet_create()).
335 */
__register_prot_hook(struct sock * sk)336 static void __register_prot_hook(struct sock *sk)
337 {
338 struct packet_sock *po = pkt_sk(sk);
339
340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
341 if (po->fanout)
342 __fanout_link(sk, po);
343 else
344 dev_add_pack(&po->prot_hook);
345
346 sock_hold(sk);
347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
348 }
349 }
350
register_prot_hook(struct sock * sk)351 static void register_prot_hook(struct sock *sk)
352 {
353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
354 __register_prot_hook(sk);
355 }
356
357 /* If the sync parameter is true, we will temporarily drop
358 * the po->bind_lock and do a synchronize_net to make sure no
359 * asynchronous packet processing paths still refer to the elements
360 * of po->prot_hook. If the sync parameter is false, it is the
361 * callers responsibility to take care of this.
362 */
__unregister_prot_hook(struct sock * sk,bool sync)363 static void __unregister_prot_hook(struct sock *sk, bool sync)
364 {
365 struct packet_sock *po = pkt_sk(sk);
366
367 lockdep_assert_held_once(&po->bind_lock);
368
369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
370
371 if (po->fanout)
372 __fanout_unlink(sk, po);
373 else
374 __dev_remove_pack(&po->prot_hook);
375
376 __sock_put(sk);
377
378 if (sync) {
379 spin_unlock(&po->bind_lock);
380 synchronize_net();
381 spin_lock(&po->bind_lock);
382 }
383 }
384
unregister_prot_hook(struct sock * sk,bool sync)385 static void unregister_prot_hook(struct sock *sk, bool sync)
386 {
387 struct packet_sock *po = pkt_sk(sk);
388
389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
390 __unregister_prot_hook(sk, sync);
391 }
392
pgv_to_page(void * addr)393 static inline struct page * __pure pgv_to_page(void *addr)
394 {
395 if (is_vmalloc_addr(addr))
396 return vmalloc_to_page(addr);
397 return virt_to_page(addr);
398 }
399
__packet_set_status(struct packet_sock * po,void * frame,int status)400 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
401 {
402 union tpacket_uhdr h;
403
404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
405
406 h.raw = frame;
407 switch (po->tp_version) {
408 case TPACKET_V1:
409 WRITE_ONCE(h.h1->tp_status, status);
410 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
411 break;
412 case TPACKET_V2:
413 WRITE_ONCE(h.h2->tp_status, status);
414 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
415 break;
416 case TPACKET_V3:
417 WRITE_ONCE(h.h3->tp_status, status);
418 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
419 break;
420 default:
421 WARN(1, "TPACKET version not supported.\n");
422 BUG();
423 }
424
425 smp_wmb();
426 }
427
__packet_get_status(const struct packet_sock * po,void * frame)428 static int __packet_get_status(const struct packet_sock *po, void *frame)
429 {
430 union tpacket_uhdr h;
431
432 smp_rmb();
433
434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
435
436 h.raw = frame;
437 switch (po->tp_version) {
438 case TPACKET_V1:
439 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
440 return READ_ONCE(h.h1->tp_status);
441 case TPACKET_V2:
442 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
443 return READ_ONCE(h.h2->tp_status);
444 case TPACKET_V3:
445 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
446 return READ_ONCE(h.h3->tp_status);
447 default:
448 WARN(1, "TPACKET version not supported.\n");
449 BUG();
450 return 0;
451 }
452 }
453
tpacket_get_timestamp(struct sk_buff * skb,struct timespec64 * ts,unsigned int flags)454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
455 unsigned int flags)
456 {
457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
458
459 if (shhwtstamps &&
460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
462 return TP_STATUS_TS_RAW_HARDWARE;
463
464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
465 ktime_to_timespec64_cond(skb_tstamp(skb), ts))
466 return TP_STATUS_TS_SOFTWARE;
467
468 return 0;
469 }
470
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
472 struct sk_buff *skb)
473 {
474 union tpacket_uhdr h;
475 struct timespec64 ts;
476 __u32 ts_status;
477
478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
479 return 0;
480
481 h.raw = frame;
482 /*
483 * versions 1 through 3 overflow the timestamps in y2106, since they
484 * all store the seconds in a 32-bit unsigned integer.
485 * If we create a version 4, that should have a 64-bit timestamp,
486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
487 * nanoseconds.
488 */
489 switch (po->tp_version) {
490 case TPACKET_V1:
491 h.h1->tp_sec = ts.tv_sec;
492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
493 break;
494 case TPACKET_V2:
495 h.h2->tp_sec = ts.tv_sec;
496 h.h2->tp_nsec = ts.tv_nsec;
497 break;
498 case TPACKET_V3:
499 h.h3->tp_sec = ts.tv_sec;
500 h.h3->tp_nsec = ts.tv_nsec;
501 break;
502 default:
503 WARN(1, "TPACKET version not supported.\n");
504 BUG();
505 }
506
507 /* one flush is safe, as both fields always lie on the same cacheline */
508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
509 smp_wmb();
510
511 return ts_status;
512 }
513
packet_lookup_frame(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int position,int status)514 static void *packet_lookup_frame(const struct packet_sock *po,
515 const struct packet_ring_buffer *rb,
516 unsigned int position,
517 int status)
518 {
519 unsigned int pg_vec_pos, frame_offset;
520 union tpacket_uhdr h;
521
522 pg_vec_pos = position / rb->frames_per_block;
523 frame_offset = position % rb->frames_per_block;
524
525 h.raw = rb->pg_vec[pg_vec_pos].buffer +
526 (frame_offset * rb->frame_size);
527
528 if (status != __packet_get_status(po, h.raw))
529 return NULL;
530
531 return h.raw;
532 }
533
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)534 static void *packet_current_frame(struct packet_sock *po,
535 struct packet_ring_buffer *rb,
536 int status)
537 {
538 return packet_lookup_frame(po, rb, rb->head, status);
539 }
540
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)541 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
542 {
543 del_timer_sync(&pkc->retire_blk_timer);
544 }
545
prb_shutdown_retire_blk_timer(struct packet_sock * po,struct sk_buff_head * rb_queue)546 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
547 struct sk_buff_head *rb_queue)
548 {
549 struct tpacket_kbdq_core *pkc;
550
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
552
553 spin_lock_bh(&rb_queue->lock);
554 pkc->delete_blk_timer = 1;
555 spin_unlock_bh(&rb_queue->lock);
556
557 prb_del_retire_blk_timer(pkc);
558 }
559
prb_setup_retire_blk_timer(struct packet_sock * po)560 static void prb_setup_retire_blk_timer(struct packet_sock *po)
561 {
562 struct tpacket_kbdq_core *pkc;
563
564 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
565 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
566 0);
567 pkc->retire_blk_timer.expires = jiffies;
568 }
569
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)570 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
571 int blk_size_in_bytes)
572 {
573 struct net_device *dev;
574 unsigned int mbits, div;
575 struct ethtool_link_ksettings ecmd;
576 int err;
577
578 rtnl_lock();
579 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
580 if (unlikely(!dev)) {
581 rtnl_unlock();
582 return DEFAULT_PRB_RETIRE_TOV;
583 }
584 err = __ethtool_get_link_ksettings(dev, &ecmd);
585 rtnl_unlock();
586 if (err)
587 return DEFAULT_PRB_RETIRE_TOV;
588
589 /* If the link speed is so slow you don't really
590 * need to worry about perf anyways
591 */
592 if (ecmd.base.speed < SPEED_1000 ||
593 ecmd.base.speed == SPEED_UNKNOWN)
594 return DEFAULT_PRB_RETIRE_TOV;
595
596 div = ecmd.base.speed / 1000;
597 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
598
599 if (div)
600 mbits /= div;
601
602 if (div)
603 return mbits + 1;
604 return mbits;
605 }
606
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)607 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
608 union tpacket_req_u *req_u)
609 {
610 p1->feature_req_word = req_u->req3.tp_feature_req_word;
611 }
612
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u)613 static void init_prb_bdqc(struct packet_sock *po,
614 struct packet_ring_buffer *rb,
615 struct pgv *pg_vec,
616 union tpacket_req_u *req_u)
617 {
618 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
619 struct tpacket_block_desc *pbd;
620
621 memset(p1, 0x0, sizeof(*p1));
622
623 p1->knxt_seq_num = 1;
624 p1->pkbdq = pg_vec;
625 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
626 p1->pkblk_start = pg_vec[0].buffer;
627 p1->kblk_size = req_u->req3.tp_block_size;
628 p1->knum_blocks = req_u->req3.tp_block_nr;
629 p1->hdrlen = po->tp_hdrlen;
630 p1->version = po->tp_version;
631 p1->last_kactive_blk_num = 0;
632 po->stats.stats3.tp_freeze_q_cnt = 0;
633 if (req_u->req3.tp_retire_blk_tov)
634 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
635 else
636 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
637 req_u->req3.tp_block_size);
638 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
639 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
640 rwlock_init(&p1->blk_fill_in_prog_lock);
641
642 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
643 prb_init_ft_ops(p1, req_u);
644 prb_setup_retire_blk_timer(po);
645 prb_open_block(p1, pbd);
646 }
647
648 /* Do NOT update the last_blk_num first.
649 * Assumes sk_buff_head lock is held.
650 */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)651 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
652 {
653 mod_timer(&pkc->retire_blk_timer,
654 jiffies + pkc->tov_in_jiffies);
655 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
656 }
657
658 /*
659 * Timer logic:
660 * 1) We refresh the timer only when we open a block.
661 * By doing this we don't waste cycles refreshing the timer
662 * on packet-by-packet basis.
663 *
664 * With a 1MB block-size, on a 1Gbps line, it will take
665 * i) ~8 ms to fill a block + ii) memcpy etc.
666 * In this cut we are not accounting for the memcpy time.
667 *
668 * So, if the user sets the 'tmo' to 10ms then the timer
669 * will never fire while the block is still getting filled
670 * (which is what we want). However, the user could choose
671 * to close a block early and that's fine.
672 *
673 * But when the timer does fire, we check whether or not to refresh it.
674 * Since the tmo granularity is in msecs, it is not too expensive
675 * to refresh the timer, lets say every '8' msecs.
676 * Either the user can set the 'tmo' or we can derive it based on
677 * a) line-speed and b) block-size.
678 * prb_calc_retire_blk_tmo() calculates the tmo.
679 *
680 */
prb_retire_rx_blk_timer_expired(struct timer_list * t)681 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
682 {
683 struct packet_sock *po =
684 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
685 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
686 unsigned int frozen;
687 struct tpacket_block_desc *pbd;
688
689 spin_lock(&po->sk.sk_receive_queue.lock);
690
691 frozen = prb_queue_frozen(pkc);
692 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
693
694 if (unlikely(pkc->delete_blk_timer))
695 goto out;
696
697 /* We only need to plug the race when the block is partially filled.
698 * tpacket_rcv:
699 * lock(); increment BLOCK_NUM_PKTS; unlock()
700 * copy_bits() is in progress ...
701 * timer fires on other cpu:
702 * we can't retire the current block because copy_bits
703 * is in progress.
704 *
705 */
706 if (BLOCK_NUM_PKTS(pbd)) {
707 /* Waiting for skb_copy_bits to finish... */
708 write_lock(&pkc->blk_fill_in_prog_lock);
709 write_unlock(&pkc->blk_fill_in_prog_lock);
710 }
711
712 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
713 if (!frozen) {
714 if (!BLOCK_NUM_PKTS(pbd)) {
715 /* An empty block. Just refresh the timer. */
716 goto refresh_timer;
717 }
718 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
719 if (!prb_dispatch_next_block(pkc, po))
720 goto refresh_timer;
721 else
722 goto out;
723 } else {
724 /* Case 1. Queue was frozen because user-space was
725 * lagging behind.
726 */
727 if (prb_curr_blk_in_use(pbd)) {
728 /*
729 * Ok, user-space is still behind.
730 * So just refresh the timer.
731 */
732 goto refresh_timer;
733 } else {
734 /* Case 2. queue was frozen,user-space caught up,
735 * now the link went idle && the timer fired.
736 * We don't have a block to close.So we open this
737 * block and restart the timer.
738 * opening a block thaws the queue,restarts timer
739 * Thawing/timer-refresh is a side effect.
740 */
741 prb_open_block(pkc, pbd);
742 goto out;
743 }
744 }
745 }
746
747 refresh_timer:
748 _prb_refresh_rx_retire_blk_timer(pkc);
749
750 out:
751 spin_unlock(&po->sk.sk_receive_queue.lock);
752 }
753
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)754 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
755 struct tpacket_block_desc *pbd1, __u32 status)
756 {
757 /* Flush everything minus the block header */
758
759 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
760 u8 *start, *end;
761
762 start = (u8 *)pbd1;
763
764 /* Skip the block header(we know header WILL fit in 4K) */
765 start += PAGE_SIZE;
766
767 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
768 for (; start < end; start += PAGE_SIZE)
769 flush_dcache_page(pgv_to_page(start));
770
771 smp_wmb();
772 #endif
773
774 /* Now update the block status. */
775
776 BLOCK_STATUS(pbd1) = status;
777
778 /* Flush the block header */
779
780 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
781 start = (u8 *)pbd1;
782 flush_dcache_page(pgv_to_page(start));
783
784 smp_wmb();
785 #endif
786 }
787
788 /*
789 * Side effect:
790 *
791 * 1) flush the block
792 * 2) Increment active_blk_num
793 *
794 * Note:We DONT refresh the timer on purpose.
795 * Because almost always the next block will be opened.
796 */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)797 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
798 struct tpacket_block_desc *pbd1,
799 struct packet_sock *po, unsigned int stat)
800 {
801 __u32 status = TP_STATUS_USER | stat;
802
803 struct tpacket3_hdr *last_pkt;
804 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
805 struct sock *sk = &po->sk;
806
807 if (atomic_read(&po->tp_drops))
808 status |= TP_STATUS_LOSING;
809
810 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
811 last_pkt->tp_next_offset = 0;
812
813 /* Get the ts of the last pkt */
814 if (BLOCK_NUM_PKTS(pbd1)) {
815 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
816 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
817 } else {
818 /* Ok, we tmo'd - so get the current time.
819 *
820 * It shouldn't really happen as we don't close empty
821 * blocks. See prb_retire_rx_blk_timer_expired().
822 */
823 struct timespec64 ts;
824 ktime_get_real_ts64(&ts);
825 h1->ts_last_pkt.ts_sec = ts.tv_sec;
826 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
827 }
828
829 smp_wmb();
830
831 /* Flush the block */
832 prb_flush_block(pkc1, pbd1, status);
833
834 sk->sk_data_ready(sk);
835
836 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
837 }
838
prb_thaw_queue(struct tpacket_kbdq_core * pkc)839 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
840 {
841 pkc->reset_pending_on_curr_blk = 0;
842 }
843
844 /*
845 * Side effect of opening a block:
846 *
847 * 1) prb_queue is thawed.
848 * 2) retire_blk_timer is refreshed.
849 *
850 */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)851 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
852 struct tpacket_block_desc *pbd1)
853 {
854 struct timespec64 ts;
855 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
856
857 smp_rmb();
858
859 /* We could have just memset this but we will lose the
860 * flexibility of making the priv area sticky
861 */
862
863 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
864 BLOCK_NUM_PKTS(pbd1) = 0;
865 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
866
867 ktime_get_real_ts64(&ts);
868
869 h1->ts_first_pkt.ts_sec = ts.tv_sec;
870 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
871
872 pkc1->pkblk_start = (char *)pbd1;
873 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
874
875 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
876 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
877
878 pbd1->version = pkc1->version;
879 pkc1->prev = pkc1->nxt_offset;
880 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
881
882 prb_thaw_queue(pkc1);
883 _prb_refresh_rx_retire_blk_timer(pkc1);
884
885 smp_wmb();
886 }
887
888 /*
889 * Queue freeze logic:
890 * 1) Assume tp_block_nr = 8 blocks.
891 * 2) At time 't0', user opens Rx ring.
892 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
893 * 4) user-space is either sleeping or processing block '0'.
894 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
895 * it will close block-7,loop around and try to fill block '0'.
896 * call-flow:
897 * __packet_lookup_frame_in_block
898 * prb_retire_current_block()
899 * prb_dispatch_next_block()
900 * |->(BLOCK_STATUS == USER) evaluates to true
901 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
902 * 6) Now there are two cases:
903 * 6.1) Link goes idle right after the queue is frozen.
904 * But remember, the last open_block() refreshed the timer.
905 * When this timer expires,it will refresh itself so that we can
906 * re-open block-0 in near future.
907 * 6.2) Link is busy and keeps on receiving packets. This is a simple
908 * case and __packet_lookup_frame_in_block will check if block-0
909 * is free and can now be re-used.
910 */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)911 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
912 struct packet_sock *po)
913 {
914 pkc->reset_pending_on_curr_blk = 1;
915 po->stats.stats3.tp_freeze_q_cnt++;
916 }
917
918 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
919
920 /*
921 * If the next block is free then we will dispatch it
922 * and return a good offset.
923 * Else, we will freeze the queue.
924 * So, caller must check the return value.
925 */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)926 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
927 struct packet_sock *po)
928 {
929 struct tpacket_block_desc *pbd;
930
931 smp_rmb();
932
933 /* 1. Get current block num */
934 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
935
936 /* 2. If this block is currently in_use then freeze the queue */
937 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
938 prb_freeze_queue(pkc, po);
939 return NULL;
940 }
941
942 /*
943 * 3.
944 * open this block and return the offset where the first packet
945 * needs to get stored.
946 */
947 prb_open_block(pkc, pbd);
948 return (void *)pkc->nxt_offset;
949 }
950
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)951 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
952 struct packet_sock *po, unsigned int status)
953 {
954 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
955
956 /* retire/close the current block */
957 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
958 /*
959 * Plug the case where copy_bits() is in progress on
960 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
961 * have space to copy the pkt in the current block and
962 * called prb_retire_current_block()
963 *
964 * We don't need to worry about the TMO case because
965 * the timer-handler already handled this case.
966 */
967 if (!(status & TP_STATUS_BLK_TMO)) {
968 /* Waiting for skb_copy_bits to finish... */
969 write_lock(&pkc->blk_fill_in_prog_lock);
970 write_unlock(&pkc->blk_fill_in_prog_lock);
971 }
972 prb_close_block(pkc, pbd, po, status);
973 return;
974 }
975 }
976
prb_curr_blk_in_use(struct tpacket_block_desc * pbd)977 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
978 {
979 return TP_STATUS_USER & BLOCK_STATUS(pbd);
980 }
981
prb_queue_frozen(struct tpacket_kbdq_core * pkc)982 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
983 {
984 return pkc->reset_pending_on_curr_blk;
985 }
986
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)987 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
988 __releases(&pkc->blk_fill_in_prog_lock)
989 {
990 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
991
992 read_unlock(&pkc->blk_fill_in_prog_lock);
993 }
994
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)995 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
996 struct tpacket3_hdr *ppd)
997 {
998 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
999 }
1000
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1001 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1002 struct tpacket3_hdr *ppd)
1003 {
1004 ppd->hv1.tp_rxhash = 0;
1005 }
1006
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1007 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1008 struct tpacket3_hdr *ppd)
1009 {
1010 if (skb_vlan_tag_present(pkc->skb)) {
1011 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1012 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1013 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1014 } else {
1015 ppd->hv1.tp_vlan_tci = 0;
1016 ppd->hv1.tp_vlan_tpid = 0;
1017 ppd->tp_status = TP_STATUS_AVAILABLE;
1018 }
1019 }
1020
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1021 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1022 struct tpacket3_hdr *ppd)
1023 {
1024 ppd->hv1.tp_padding = 0;
1025 prb_fill_vlan_info(pkc, ppd);
1026
1027 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1028 prb_fill_rxhash(pkc, ppd);
1029 else
1030 prb_clear_rxhash(pkc, ppd);
1031 }
1032
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)1033 static void prb_fill_curr_block(char *curr,
1034 struct tpacket_kbdq_core *pkc,
1035 struct tpacket_block_desc *pbd,
1036 unsigned int len)
1037 __acquires(&pkc->blk_fill_in_prog_lock)
1038 {
1039 struct tpacket3_hdr *ppd;
1040
1041 ppd = (struct tpacket3_hdr *)curr;
1042 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1043 pkc->prev = curr;
1044 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1045 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1046 BLOCK_NUM_PKTS(pbd) += 1;
1047 read_lock(&pkc->blk_fill_in_prog_lock);
1048 prb_run_all_ft_ops(pkc, ppd);
1049 }
1050
1051 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,unsigned int len)1052 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1053 struct sk_buff *skb,
1054 unsigned int len
1055 )
1056 {
1057 struct tpacket_kbdq_core *pkc;
1058 struct tpacket_block_desc *pbd;
1059 char *curr, *end;
1060
1061 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063
1064 /* Queue is frozen when user space is lagging behind */
1065 if (prb_queue_frozen(pkc)) {
1066 /*
1067 * Check if that last block which caused the queue to freeze,
1068 * is still in_use by user-space.
1069 */
1070 if (prb_curr_blk_in_use(pbd)) {
1071 /* Can't record this packet */
1072 return NULL;
1073 } else {
1074 /*
1075 * Ok, the block was released by user-space.
1076 * Now let's open that block.
1077 * opening a block also thaws the queue.
1078 * Thawing is a side effect.
1079 */
1080 prb_open_block(pkc, pbd);
1081 }
1082 }
1083
1084 smp_mb();
1085 curr = pkc->nxt_offset;
1086 pkc->skb = skb;
1087 end = (char *)pbd + pkc->kblk_size;
1088
1089 /* first try the current block */
1090 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1091 prb_fill_curr_block(curr, pkc, pbd, len);
1092 return (void *)curr;
1093 }
1094
1095 /* Ok, close the current block */
1096 prb_retire_current_block(pkc, po, 0);
1097
1098 /* Now, try to dispatch the next block */
1099 curr = (char *)prb_dispatch_next_block(pkc, po);
1100 if (curr) {
1101 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1102 prb_fill_curr_block(curr, pkc, pbd, len);
1103 return (void *)curr;
1104 }
1105
1106 /*
1107 * No free blocks are available.user_space hasn't caught up yet.
1108 * Queue was just frozen and now this packet will get dropped.
1109 */
1110 return NULL;
1111 }
1112
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1113 static void *packet_current_rx_frame(struct packet_sock *po,
1114 struct sk_buff *skb,
1115 int status, unsigned int len)
1116 {
1117 char *curr = NULL;
1118 switch (po->tp_version) {
1119 case TPACKET_V1:
1120 case TPACKET_V2:
1121 curr = packet_lookup_frame(po, &po->rx_ring,
1122 po->rx_ring.head, status);
1123 return curr;
1124 case TPACKET_V3:
1125 return __packet_lookup_frame_in_block(po, skb, len);
1126 default:
1127 WARN(1, "TPACKET version not supported\n");
1128 BUG();
1129 return NULL;
1130 }
1131 }
1132
prb_lookup_block(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int idx,int status)1133 static void *prb_lookup_block(const struct packet_sock *po,
1134 const struct packet_ring_buffer *rb,
1135 unsigned int idx,
1136 int status)
1137 {
1138 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1139 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1140
1141 if (status != BLOCK_STATUS(pbd))
1142 return NULL;
1143 return pbd;
1144 }
1145
prb_previous_blk_num(struct packet_ring_buffer * rb)1146 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1147 {
1148 unsigned int prev;
1149 if (rb->prb_bdqc.kactive_blk_num)
1150 prev = rb->prb_bdqc.kactive_blk_num-1;
1151 else
1152 prev = rb->prb_bdqc.knum_blocks-1;
1153 return prev;
1154 }
1155
1156 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1157 static void *__prb_previous_block(struct packet_sock *po,
1158 struct packet_ring_buffer *rb,
1159 int status)
1160 {
1161 unsigned int previous = prb_previous_blk_num(rb);
1162 return prb_lookup_block(po, rb, previous, status);
1163 }
1164
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1165 static void *packet_previous_rx_frame(struct packet_sock *po,
1166 struct packet_ring_buffer *rb,
1167 int status)
1168 {
1169 if (po->tp_version <= TPACKET_V2)
1170 return packet_previous_frame(po, rb, status);
1171
1172 return __prb_previous_block(po, rb, status);
1173 }
1174
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1175 static void packet_increment_rx_head(struct packet_sock *po,
1176 struct packet_ring_buffer *rb)
1177 {
1178 switch (po->tp_version) {
1179 case TPACKET_V1:
1180 case TPACKET_V2:
1181 return packet_increment_head(rb);
1182 case TPACKET_V3:
1183 default:
1184 WARN(1, "TPACKET version not supported.\n");
1185 BUG();
1186 return;
1187 }
1188 }
1189
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1190 static void *packet_previous_frame(struct packet_sock *po,
1191 struct packet_ring_buffer *rb,
1192 int status)
1193 {
1194 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1195 return packet_lookup_frame(po, rb, previous, status);
1196 }
1197
packet_increment_head(struct packet_ring_buffer * buff)1198 static void packet_increment_head(struct packet_ring_buffer *buff)
1199 {
1200 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1201 }
1202
packet_inc_pending(struct packet_ring_buffer * rb)1203 static void packet_inc_pending(struct packet_ring_buffer *rb)
1204 {
1205 this_cpu_inc(*rb->pending_refcnt);
1206 }
1207
packet_dec_pending(struct packet_ring_buffer * rb)1208 static void packet_dec_pending(struct packet_ring_buffer *rb)
1209 {
1210 this_cpu_dec(*rb->pending_refcnt);
1211 }
1212
packet_read_pending(const struct packet_ring_buffer * rb)1213 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1214 {
1215 unsigned int refcnt = 0;
1216 int cpu;
1217
1218 /* We don't use pending refcount in rx_ring. */
1219 if (rb->pending_refcnt == NULL)
1220 return 0;
1221
1222 for_each_possible_cpu(cpu)
1223 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1224
1225 return refcnt;
1226 }
1227
packet_alloc_pending(struct packet_sock * po)1228 static int packet_alloc_pending(struct packet_sock *po)
1229 {
1230 po->rx_ring.pending_refcnt = NULL;
1231
1232 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1233 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1234 return -ENOBUFS;
1235
1236 return 0;
1237 }
1238
packet_free_pending(struct packet_sock * po)1239 static void packet_free_pending(struct packet_sock *po)
1240 {
1241 free_percpu(po->tx_ring.pending_refcnt);
1242 }
1243
1244 #define ROOM_POW_OFF 2
1245 #define ROOM_NONE 0x0
1246 #define ROOM_LOW 0x1
1247 #define ROOM_NORMAL 0x2
1248
__tpacket_has_room(const struct packet_sock * po,int pow_off)1249 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1250 {
1251 int idx, len;
1252
1253 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1254 idx = READ_ONCE(po->rx_ring.head);
1255 if (pow_off)
1256 idx += len >> pow_off;
1257 if (idx >= len)
1258 idx -= len;
1259 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1260 }
1261
__tpacket_v3_has_room(const struct packet_sock * po,int pow_off)1262 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1263 {
1264 int idx, len;
1265
1266 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1267 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1268 if (pow_off)
1269 idx += len >> pow_off;
1270 if (idx >= len)
1271 idx -= len;
1272 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1273 }
1274
__packet_rcv_has_room(const struct packet_sock * po,const struct sk_buff * skb)1275 static int __packet_rcv_has_room(const struct packet_sock *po,
1276 const struct sk_buff *skb)
1277 {
1278 const struct sock *sk = &po->sk;
1279 int ret = ROOM_NONE;
1280
1281 if (po->prot_hook.func != tpacket_rcv) {
1282 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1283 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1284 - (skb ? skb->truesize : 0);
1285
1286 if (avail > (rcvbuf >> ROOM_POW_OFF))
1287 return ROOM_NORMAL;
1288 else if (avail > 0)
1289 return ROOM_LOW;
1290 else
1291 return ROOM_NONE;
1292 }
1293
1294 if (po->tp_version == TPACKET_V3) {
1295 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1296 ret = ROOM_NORMAL;
1297 else if (__tpacket_v3_has_room(po, 0))
1298 ret = ROOM_LOW;
1299 } else {
1300 if (__tpacket_has_room(po, ROOM_POW_OFF))
1301 ret = ROOM_NORMAL;
1302 else if (__tpacket_has_room(po, 0))
1303 ret = ROOM_LOW;
1304 }
1305
1306 return ret;
1307 }
1308
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1309 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1310 {
1311 bool pressure;
1312 int ret;
1313
1314 ret = __packet_rcv_has_room(po, skb);
1315 pressure = ret != ROOM_NORMAL;
1316
1317 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1318 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1319
1320 return ret;
1321 }
1322
packet_rcv_try_clear_pressure(struct packet_sock * po)1323 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1324 {
1325 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1326 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1327 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1328 }
1329
packet_sock_destruct(struct sock * sk)1330 static void packet_sock_destruct(struct sock *sk)
1331 {
1332 skb_queue_purge(&sk->sk_error_queue);
1333
1334 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1335 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1336
1337 if (!sock_flag(sk, SOCK_DEAD)) {
1338 pr_err("Attempt to release alive packet socket: %p\n", sk);
1339 return;
1340 }
1341 }
1342
fanout_flow_is_huge(struct packet_sock * po,struct sk_buff * skb)1343 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1344 {
1345 u32 *history = po->rollover->history;
1346 u32 victim, rxhash;
1347 int i, count = 0;
1348
1349 rxhash = skb_get_hash(skb);
1350 for (i = 0; i < ROLLOVER_HLEN; i++)
1351 if (READ_ONCE(history[i]) == rxhash)
1352 count++;
1353
1354 victim = get_random_u32_below(ROLLOVER_HLEN);
1355
1356 /* Avoid dirtying the cache line if possible */
1357 if (READ_ONCE(history[victim]) != rxhash)
1358 WRITE_ONCE(history[victim], rxhash);
1359
1360 return count > (ROLLOVER_HLEN >> 1);
1361 }
1362
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1363 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1364 struct sk_buff *skb,
1365 unsigned int num)
1366 {
1367 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1368 }
1369
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1370 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1371 struct sk_buff *skb,
1372 unsigned int num)
1373 {
1374 unsigned int val = atomic_inc_return(&f->rr_cur);
1375
1376 return val % num;
1377 }
1378
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1379 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1380 struct sk_buff *skb,
1381 unsigned int num)
1382 {
1383 return smp_processor_id() % num;
1384 }
1385
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1386 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1387 struct sk_buff *skb,
1388 unsigned int num)
1389 {
1390 return get_random_u32_below(num);
1391 }
1392
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,bool try_self,unsigned int num)1393 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1394 struct sk_buff *skb,
1395 unsigned int idx, bool try_self,
1396 unsigned int num)
1397 {
1398 struct packet_sock *po, *po_next, *po_skip = NULL;
1399 unsigned int i, j, room = ROOM_NONE;
1400
1401 po = pkt_sk(rcu_dereference(f->arr[idx]));
1402
1403 if (try_self) {
1404 room = packet_rcv_has_room(po, skb);
1405 if (room == ROOM_NORMAL ||
1406 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1407 return idx;
1408 po_skip = po;
1409 }
1410
1411 i = j = min_t(int, po->rollover->sock, num - 1);
1412 do {
1413 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1414 if (po_next != po_skip &&
1415 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1416 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1417 if (i != j)
1418 po->rollover->sock = i;
1419 atomic_long_inc(&po->rollover->num);
1420 if (room == ROOM_LOW)
1421 atomic_long_inc(&po->rollover->num_huge);
1422 return i;
1423 }
1424
1425 if (++i == num)
1426 i = 0;
1427 } while (i != j);
1428
1429 atomic_long_inc(&po->rollover->num_failed);
1430 return idx;
1431 }
1432
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1433 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1434 struct sk_buff *skb,
1435 unsigned int num)
1436 {
1437 return skb_get_queue_mapping(skb) % num;
1438 }
1439
fanout_demux_bpf(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1440 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1441 struct sk_buff *skb,
1442 unsigned int num)
1443 {
1444 struct bpf_prog *prog;
1445 unsigned int ret = 0;
1446
1447 rcu_read_lock();
1448 prog = rcu_dereference(f->bpf_prog);
1449 if (prog)
1450 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1451 rcu_read_unlock();
1452
1453 return ret;
1454 }
1455
fanout_has_flag(struct packet_fanout * f,u16 flag)1456 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1457 {
1458 return f->flags & (flag >> 8);
1459 }
1460
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1461 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1462 struct packet_type *pt, struct net_device *orig_dev)
1463 {
1464 struct packet_fanout *f = pt->af_packet_priv;
1465 unsigned int num = READ_ONCE(f->num_members);
1466 struct net *net = read_pnet(&f->net);
1467 struct packet_sock *po;
1468 unsigned int idx;
1469
1470 if (!net_eq(dev_net(dev), net) || !num) {
1471 kfree_skb(skb);
1472 return 0;
1473 }
1474
1475 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1476 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1477 if (!skb)
1478 return 0;
1479 }
1480 switch (f->type) {
1481 case PACKET_FANOUT_HASH:
1482 default:
1483 idx = fanout_demux_hash(f, skb, num);
1484 break;
1485 case PACKET_FANOUT_LB:
1486 idx = fanout_demux_lb(f, skb, num);
1487 break;
1488 case PACKET_FANOUT_CPU:
1489 idx = fanout_demux_cpu(f, skb, num);
1490 break;
1491 case PACKET_FANOUT_RND:
1492 idx = fanout_demux_rnd(f, skb, num);
1493 break;
1494 case PACKET_FANOUT_QM:
1495 idx = fanout_demux_qm(f, skb, num);
1496 break;
1497 case PACKET_FANOUT_ROLLOVER:
1498 idx = fanout_demux_rollover(f, skb, 0, false, num);
1499 break;
1500 case PACKET_FANOUT_CBPF:
1501 case PACKET_FANOUT_EBPF:
1502 idx = fanout_demux_bpf(f, skb, num);
1503 break;
1504 }
1505
1506 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1507 idx = fanout_demux_rollover(f, skb, idx, true, num);
1508
1509 po = pkt_sk(rcu_dereference(f->arr[idx]));
1510 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1511 }
1512
1513 DEFINE_MUTEX(fanout_mutex);
1514 EXPORT_SYMBOL_GPL(fanout_mutex);
1515 static LIST_HEAD(fanout_list);
1516 static u16 fanout_next_id;
1517
__fanout_link(struct sock * sk,struct packet_sock * po)1518 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1519 {
1520 struct packet_fanout *f = po->fanout;
1521
1522 spin_lock(&f->lock);
1523 rcu_assign_pointer(f->arr[f->num_members], sk);
1524 smp_wmb();
1525 f->num_members++;
1526 if (f->num_members == 1)
1527 dev_add_pack(&f->prot_hook);
1528 spin_unlock(&f->lock);
1529 }
1530
__fanout_unlink(struct sock * sk,struct packet_sock * po)1531 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1532 {
1533 struct packet_fanout *f = po->fanout;
1534 int i;
1535
1536 spin_lock(&f->lock);
1537 for (i = 0; i < f->num_members; i++) {
1538 if (rcu_dereference_protected(f->arr[i],
1539 lockdep_is_held(&f->lock)) == sk)
1540 break;
1541 }
1542 BUG_ON(i >= f->num_members);
1543 rcu_assign_pointer(f->arr[i],
1544 rcu_dereference_protected(f->arr[f->num_members - 1],
1545 lockdep_is_held(&f->lock)));
1546 f->num_members--;
1547 if (f->num_members == 0)
1548 __dev_remove_pack(&f->prot_hook);
1549 spin_unlock(&f->lock);
1550 }
1551
match_fanout_group(struct packet_type * ptype,struct sock * sk)1552 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1553 {
1554 if (sk->sk_family != PF_PACKET)
1555 return false;
1556
1557 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1558 }
1559
fanout_init_data(struct packet_fanout * f)1560 static void fanout_init_data(struct packet_fanout *f)
1561 {
1562 switch (f->type) {
1563 case PACKET_FANOUT_LB:
1564 atomic_set(&f->rr_cur, 0);
1565 break;
1566 case PACKET_FANOUT_CBPF:
1567 case PACKET_FANOUT_EBPF:
1568 RCU_INIT_POINTER(f->bpf_prog, NULL);
1569 break;
1570 }
1571 }
1572
__fanout_set_data_bpf(struct packet_fanout * f,struct bpf_prog * new)1573 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1574 {
1575 struct bpf_prog *old;
1576
1577 spin_lock(&f->lock);
1578 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1579 rcu_assign_pointer(f->bpf_prog, new);
1580 spin_unlock(&f->lock);
1581
1582 if (old) {
1583 synchronize_net();
1584 bpf_prog_destroy(old);
1585 }
1586 }
1587
fanout_set_data_cbpf(struct packet_sock * po,sockptr_t data,unsigned int len)1588 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1589 unsigned int len)
1590 {
1591 struct bpf_prog *new;
1592 struct sock_fprog fprog;
1593 int ret;
1594
1595 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1596 return -EPERM;
1597
1598 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1599 if (ret)
1600 return ret;
1601
1602 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1603 if (ret)
1604 return ret;
1605
1606 __fanout_set_data_bpf(po->fanout, new);
1607 return 0;
1608 }
1609
fanout_set_data_ebpf(struct packet_sock * po,sockptr_t data,unsigned int len)1610 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1611 unsigned int len)
1612 {
1613 struct bpf_prog *new;
1614 u32 fd;
1615
1616 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1617 return -EPERM;
1618 if (len != sizeof(fd))
1619 return -EINVAL;
1620 if (copy_from_sockptr(&fd, data, len))
1621 return -EFAULT;
1622
1623 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1624 if (IS_ERR(new))
1625 return PTR_ERR(new);
1626
1627 __fanout_set_data_bpf(po->fanout, new);
1628 return 0;
1629 }
1630
fanout_set_data(struct packet_sock * po,sockptr_t data,unsigned int len)1631 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1632 unsigned int len)
1633 {
1634 switch (po->fanout->type) {
1635 case PACKET_FANOUT_CBPF:
1636 return fanout_set_data_cbpf(po, data, len);
1637 case PACKET_FANOUT_EBPF:
1638 return fanout_set_data_ebpf(po, data, len);
1639 default:
1640 return -EINVAL;
1641 }
1642 }
1643
fanout_release_data(struct packet_fanout * f)1644 static void fanout_release_data(struct packet_fanout *f)
1645 {
1646 switch (f->type) {
1647 case PACKET_FANOUT_CBPF:
1648 case PACKET_FANOUT_EBPF:
1649 __fanout_set_data_bpf(f, NULL);
1650 }
1651 }
1652
__fanout_id_is_free(struct sock * sk,u16 candidate_id)1653 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1654 {
1655 struct packet_fanout *f;
1656
1657 list_for_each_entry(f, &fanout_list, list) {
1658 if (f->id == candidate_id &&
1659 read_pnet(&f->net) == sock_net(sk)) {
1660 return false;
1661 }
1662 }
1663 return true;
1664 }
1665
fanout_find_new_id(struct sock * sk,u16 * new_id)1666 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1667 {
1668 u16 id = fanout_next_id;
1669
1670 do {
1671 if (__fanout_id_is_free(sk, id)) {
1672 *new_id = id;
1673 fanout_next_id = id + 1;
1674 return true;
1675 }
1676
1677 id++;
1678 } while (id != fanout_next_id);
1679
1680 return false;
1681 }
1682
fanout_add(struct sock * sk,struct fanout_args * args)1683 static int fanout_add(struct sock *sk, struct fanout_args *args)
1684 {
1685 struct packet_rollover *rollover = NULL;
1686 struct packet_sock *po = pkt_sk(sk);
1687 u16 type_flags = args->type_flags;
1688 struct packet_fanout *f, *match;
1689 u8 type = type_flags & 0xff;
1690 u8 flags = type_flags >> 8;
1691 u16 id = args->id;
1692 int err;
1693
1694 switch (type) {
1695 case PACKET_FANOUT_ROLLOVER:
1696 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1697 return -EINVAL;
1698 break;
1699 case PACKET_FANOUT_HASH:
1700 case PACKET_FANOUT_LB:
1701 case PACKET_FANOUT_CPU:
1702 case PACKET_FANOUT_RND:
1703 case PACKET_FANOUT_QM:
1704 case PACKET_FANOUT_CBPF:
1705 case PACKET_FANOUT_EBPF:
1706 break;
1707 default:
1708 return -EINVAL;
1709 }
1710
1711 mutex_lock(&fanout_mutex);
1712
1713 err = -EALREADY;
1714 if (po->fanout)
1715 goto out;
1716
1717 if (type == PACKET_FANOUT_ROLLOVER ||
1718 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1719 err = -ENOMEM;
1720 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1721 if (!rollover)
1722 goto out;
1723 atomic_long_set(&rollover->num, 0);
1724 atomic_long_set(&rollover->num_huge, 0);
1725 atomic_long_set(&rollover->num_failed, 0);
1726 }
1727
1728 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1729 if (id != 0) {
1730 err = -EINVAL;
1731 goto out;
1732 }
1733 if (!fanout_find_new_id(sk, &id)) {
1734 err = -ENOMEM;
1735 goto out;
1736 }
1737 /* ephemeral flag for the first socket in the group: drop it */
1738 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1739 }
1740
1741 match = NULL;
1742 list_for_each_entry(f, &fanout_list, list) {
1743 if (f->id == id &&
1744 read_pnet(&f->net) == sock_net(sk)) {
1745 match = f;
1746 break;
1747 }
1748 }
1749 err = -EINVAL;
1750 if (match) {
1751 if (match->flags != flags)
1752 goto out;
1753 if (args->max_num_members &&
1754 args->max_num_members != match->max_num_members)
1755 goto out;
1756 } else {
1757 if (args->max_num_members > PACKET_FANOUT_MAX)
1758 goto out;
1759 if (!args->max_num_members)
1760 /* legacy PACKET_FANOUT_MAX */
1761 args->max_num_members = 256;
1762 err = -ENOMEM;
1763 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1764 GFP_KERNEL);
1765 if (!match)
1766 goto out;
1767 write_pnet(&match->net, sock_net(sk));
1768 match->id = id;
1769 match->type = type;
1770 match->flags = flags;
1771 INIT_LIST_HEAD(&match->list);
1772 spin_lock_init(&match->lock);
1773 refcount_set(&match->sk_ref, 0);
1774 fanout_init_data(match);
1775 match->prot_hook.type = po->prot_hook.type;
1776 match->prot_hook.dev = po->prot_hook.dev;
1777 match->prot_hook.func = packet_rcv_fanout;
1778 match->prot_hook.af_packet_priv = match;
1779 match->prot_hook.af_packet_net = read_pnet(&match->net);
1780 match->prot_hook.id_match = match_fanout_group;
1781 match->max_num_members = args->max_num_members;
1782 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1783 list_add(&match->list, &fanout_list);
1784 }
1785 err = -EINVAL;
1786
1787 spin_lock(&po->bind_lock);
1788 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
1789 match->type == type &&
1790 match->prot_hook.type == po->prot_hook.type &&
1791 match->prot_hook.dev == po->prot_hook.dev) {
1792 err = -ENOSPC;
1793 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1794 __dev_remove_pack(&po->prot_hook);
1795
1796 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1797 WRITE_ONCE(po->fanout, match);
1798
1799 po->rollover = rollover;
1800 rollover = NULL;
1801 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1802 __fanout_link(sk, po);
1803 err = 0;
1804 }
1805 }
1806 spin_unlock(&po->bind_lock);
1807
1808 if (err && !refcount_read(&match->sk_ref)) {
1809 list_del(&match->list);
1810 kvfree(match);
1811 }
1812
1813 out:
1814 kfree(rollover);
1815 mutex_unlock(&fanout_mutex);
1816 return err;
1817 }
1818
1819 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1820 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1821 * It is the responsibility of the caller to call fanout_release_data() and
1822 * free the returned packet_fanout (after synchronize_net())
1823 */
fanout_release(struct sock * sk)1824 static struct packet_fanout *fanout_release(struct sock *sk)
1825 {
1826 struct packet_sock *po = pkt_sk(sk);
1827 struct packet_fanout *f;
1828
1829 mutex_lock(&fanout_mutex);
1830 f = po->fanout;
1831 if (f) {
1832 po->fanout = NULL;
1833
1834 if (refcount_dec_and_test(&f->sk_ref))
1835 list_del(&f->list);
1836 else
1837 f = NULL;
1838 }
1839 mutex_unlock(&fanout_mutex);
1840
1841 return f;
1842 }
1843
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1844 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1845 struct sk_buff *skb)
1846 {
1847 /* Earlier code assumed this would be a VLAN pkt, double-check
1848 * this now that we have the actual packet in hand. We can only
1849 * do this check on Ethernet devices.
1850 */
1851 if (unlikely(dev->type != ARPHRD_ETHER))
1852 return false;
1853
1854 skb_reset_mac_header(skb);
1855 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1856 }
1857
1858 static const struct proto_ops packet_ops;
1859
1860 static const struct proto_ops packet_ops_spkt;
1861
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1862 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1863 struct packet_type *pt, struct net_device *orig_dev)
1864 {
1865 struct sock *sk;
1866 struct sockaddr_pkt *spkt;
1867
1868 /*
1869 * When we registered the protocol we saved the socket in the data
1870 * field for just this event.
1871 */
1872
1873 sk = pt->af_packet_priv;
1874
1875 /*
1876 * Yank back the headers [hope the device set this
1877 * right or kerboom...]
1878 *
1879 * Incoming packets have ll header pulled,
1880 * push it back.
1881 *
1882 * For outgoing ones skb->data == skb_mac_header(skb)
1883 * so that this procedure is noop.
1884 */
1885
1886 if (skb->pkt_type == PACKET_LOOPBACK)
1887 goto out;
1888
1889 if (!net_eq(dev_net(dev), sock_net(sk)))
1890 goto out;
1891
1892 skb = skb_share_check(skb, GFP_ATOMIC);
1893 if (skb == NULL)
1894 goto oom;
1895
1896 /* drop any routing info */
1897 skb_dst_drop(skb);
1898
1899 /* drop conntrack reference */
1900 nf_reset_ct(skb);
1901
1902 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1903
1904 skb_push(skb, skb->data - skb_mac_header(skb));
1905
1906 /*
1907 * The SOCK_PACKET socket receives _all_ frames.
1908 */
1909
1910 spkt->spkt_family = dev->type;
1911 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1912 spkt->spkt_protocol = skb->protocol;
1913
1914 /*
1915 * Charge the memory to the socket. This is done specifically
1916 * to prevent sockets using all the memory up.
1917 */
1918
1919 if (sock_queue_rcv_skb(sk, skb) == 0)
1920 return 0;
1921
1922 out:
1923 kfree_skb(skb);
1924 oom:
1925 return 0;
1926 }
1927
packet_parse_headers(struct sk_buff * skb,struct socket * sock)1928 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1929 {
1930 int depth;
1931
1932 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1933 sock->type == SOCK_RAW) {
1934 skb_reset_mac_header(skb);
1935 skb->protocol = dev_parse_header_protocol(skb);
1936 }
1937
1938 /* Move network header to the right position for VLAN tagged packets */
1939 if (likely(skb->dev->type == ARPHRD_ETHER) &&
1940 eth_type_vlan(skb->protocol) &&
1941 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1942 skb_set_network_header(skb, depth);
1943
1944 skb_probe_transport_header(skb);
1945 }
1946
1947 /*
1948 * Output a raw packet to a device layer. This bypasses all the other
1949 * protocol layers and you must therefore supply it with a complete frame
1950 */
1951
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)1952 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1953 size_t len)
1954 {
1955 struct sock *sk = sock->sk;
1956 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1957 struct sk_buff *skb = NULL;
1958 struct net_device *dev;
1959 struct sockcm_cookie sockc;
1960 __be16 proto = 0;
1961 int err;
1962 int extra_len = 0;
1963
1964 /*
1965 * Get and verify the address.
1966 */
1967
1968 if (saddr) {
1969 if (msg->msg_namelen < sizeof(struct sockaddr))
1970 return -EINVAL;
1971 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1972 proto = saddr->spkt_protocol;
1973 } else
1974 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1975
1976 /*
1977 * Find the device first to size check it
1978 */
1979
1980 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1981 retry:
1982 rcu_read_lock();
1983 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1984 err = -ENODEV;
1985 if (dev == NULL)
1986 goto out_unlock;
1987
1988 err = -ENETDOWN;
1989 if (!(dev->flags & IFF_UP))
1990 goto out_unlock;
1991
1992 /*
1993 * You may not queue a frame bigger than the mtu. This is the lowest level
1994 * raw protocol and you must do your own fragmentation at this level.
1995 */
1996
1997 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1998 if (!netif_supports_nofcs(dev)) {
1999 err = -EPROTONOSUPPORT;
2000 goto out_unlock;
2001 }
2002 extra_len = 4; /* We're doing our own CRC */
2003 }
2004
2005 err = -EMSGSIZE;
2006 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2007 goto out_unlock;
2008
2009 if (!skb) {
2010 size_t reserved = LL_RESERVED_SPACE(dev);
2011 int tlen = dev->needed_tailroom;
2012 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2013
2014 rcu_read_unlock();
2015 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2016 if (skb == NULL)
2017 return -ENOBUFS;
2018 /* FIXME: Save some space for broken drivers that write a hard
2019 * header at transmission time by themselves. PPP is the notable
2020 * one here. This should really be fixed at the driver level.
2021 */
2022 skb_reserve(skb, reserved);
2023 skb_reset_network_header(skb);
2024
2025 /* Try to align data part correctly */
2026 if (hhlen) {
2027 skb->data -= hhlen;
2028 skb->tail -= hhlen;
2029 if (len < hhlen)
2030 skb_reset_network_header(skb);
2031 }
2032 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2033 if (err)
2034 goto out_free;
2035 goto retry;
2036 }
2037
2038 if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2039 err = -EINVAL;
2040 goto out_unlock;
2041 }
2042 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2043 !packet_extra_vlan_len_allowed(dev, skb)) {
2044 err = -EMSGSIZE;
2045 goto out_unlock;
2046 }
2047
2048 sockcm_init(&sockc, sk);
2049 if (msg->msg_controllen) {
2050 err = sock_cmsg_send(sk, msg, &sockc);
2051 if (unlikely(err))
2052 goto out_unlock;
2053 }
2054
2055 skb->protocol = proto;
2056 skb->dev = dev;
2057 skb->priority = READ_ONCE(sk->sk_priority);
2058 skb->mark = READ_ONCE(sk->sk_mark);
2059 skb->tstamp = sockc.transmit_time;
2060
2061 skb_setup_tx_timestamp(skb, sockc.tsflags);
2062
2063 if (unlikely(extra_len == 4))
2064 skb->no_fcs = 1;
2065
2066 packet_parse_headers(skb, sock);
2067
2068 dev_queue_xmit(skb);
2069 rcu_read_unlock();
2070 return len;
2071
2072 out_unlock:
2073 rcu_read_unlock();
2074 out_free:
2075 kfree_skb(skb);
2076 return err;
2077 }
2078
run_filter(struct sk_buff * skb,const struct sock * sk,unsigned int res)2079 static unsigned int run_filter(struct sk_buff *skb,
2080 const struct sock *sk,
2081 unsigned int res)
2082 {
2083 struct sk_filter *filter;
2084
2085 rcu_read_lock();
2086 filter = rcu_dereference(sk->sk_filter);
2087 if (filter != NULL)
2088 res = bpf_prog_run_clear_cb(filter->prog, skb);
2089 rcu_read_unlock();
2090
2091 return res;
2092 }
2093
packet_rcv_vnet(struct msghdr * msg,const struct sk_buff * skb,size_t * len,int vnet_hdr_sz)2094 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2095 size_t *len, int vnet_hdr_sz)
2096 {
2097 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2098
2099 if (*len < vnet_hdr_sz)
2100 return -EINVAL;
2101 *len -= vnet_hdr_sz;
2102
2103 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2104 return -EINVAL;
2105
2106 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2107 }
2108
2109 /*
2110 * This function makes lazy skb cloning in hope that most of packets
2111 * are discarded by BPF.
2112 *
2113 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2114 * and skb->cb are mangled. It works because (and until) packets
2115 * falling here are owned by current CPU. Output packets are cloned
2116 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2117 * sequentially, so that if we return skb to original state on exit,
2118 * we will not harm anyone.
2119 */
2120
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2121 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2122 struct packet_type *pt, struct net_device *orig_dev)
2123 {
2124 struct sock *sk;
2125 struct sockaddr_ll *sll;
2126 struct packet_sock *po;
2127 u8 *skb_head = skb->data;
2128 int skb_len = skb->len;
2129 unsigned int snaplen, res;
2130 bool is_drop_n_account = false;
2131
2132 if (skb->pkt_type == PACKET_LOOPBACK)
2133 goto drop;
2134
2135 sk = pt->af_packet_priv;
2136 po = pkt_sk(sk);
2137
2138 if (!net_eq(dev_net(dev), sock_net(sk)))
2139 goto drop;
2140
2141 skb->dev = dev;
2142
2143 if (dev_has_header(dev)) {
2144 /* The device has an explicit notion of ll header,
2145 * exported to higher levels.
2146 *
2147 * Otherwise, the device hides details of its frame
2148 * structure, so that corresponding packet head is
2149 * never delivered to user.
2150 */
2151 if (sk->sk_type != SOCK_DGRAM)
2152 skb_push(skb, skb->data - skb_mac_header(skb));
2153 else if (skb->pkt_type == PACKET_OUTGOING) {
2154 /* Special case: outgoing packets have ll header at head */
2155 skb_pull(skb, skb_network_offset(skb));
2156 }
2157 }
2158
2159 snaplen = skb->len;
2160
2161 res = run_filter(skb, sk, snaplen);
2162 if (!res)
2163 goto drop_n_restore;
2164 if (snaplen > res)
2165 snaplen = res;
2166
2167 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2168 goto drop_n_acct;
2169
2170 if (skb_shared(skb)) {
2171 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2172 if (nskb == NULL)
2173 goto drop_n_acct;
2174
2175 if (skb_head != skb->data) {
2176 skb->data = skb_head;
2177 skb->len = skb_len;
2178 }
2179 consume_skb(skb);
2180 skb = nskb;
2181 }
2182
2183 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2184
2185 sll = &PACKET_SKB_CB(skb)->sa.ll;
2186 sll->sll_hatype = dev->type;
2187 sll->sll_pkttype = skb->pkt_type;
2188 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2189 sll->sll_ifindex = orig_dev->ifindex;
2190 else
2191 sll->sll_ifindex = dev->ifindex;
2192
2193 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2194
2195 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2196 * Use their space for storing the original skb length.
2197 */
2198 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2199
2200 if (pskb_trim(skb, snaplen))
2201 goto drop_n_acct;
2202
2203 skb_set_owner_r(skb, sk);
2204 skb->dev = NULL;
2205 skb_dst_drop(skb);
2206
2207 /* drop conntrack reference */
2208 nf_reset_ct(skb);
2209
2210 spin_lock(&sk->sk_receive_queue.lock);
2211 po->stats.stats1.tp_packets++;
2212 sock_skb_set_dropcount(sk, skb);
2213 skb_clear_delivery_time(skb);
2214 __skb_queue_tail(&sk->sk_receive_queue, skb);
2215 spin_unlock(&sk->sk_receive_queue.lock);
2216 sk->sk_data_ready(sk);
2217 return 0;
2218
2219 drop_n_acct:
2220 is_drop_n_account = true;
2221 atomic_inc(&po->tp_drops);
2222 atomic_inc(&sk->sk_drops);
2223
2224 drop_n_restore:
2225 if (skb_head != skb->data && skb_shared(skb)) {
2226 skb->data = skb_head;
2227 skb->len = skb_len;
2228 }
2229 drop:
2230 if (!is_drop_n_account)
2231 consume_skb(skb);
2232 else
2233 kfree_skb(skb);
2234 return 0;
2235 }
2236
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2237 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2238 struct packet_type *pt, struct net_device *orig_dev)
2239 {
2240 struct sock *sk;
2241 struct packet_sock *po;
2242 struct sockaddr_ll *sll;
2243 union tpacket_uhdr h;
2244 u8 *skb_head = skb->data;
2245 int skb_len = skb->len;
2246 unsigned int snaplen, res;
2247 unsigned long status = TP_STATUS_USER;
2248 unsigned short macoff, hdrlen;
2249 unsigned int netoff;
2250 struct sk_buff *copy_skb = NULL;
2251 struct timespec64 ts;
2252 __u32 ts_status;
2253 bool is_drop_n_account = false;
2254 unsigned int slot_id = 0;
2255 int vnet_hdr_sz = 0;
2256
2257 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2258 * We may add members to them until current aligned size without forcing
2259 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2260 */
2261 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2262 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2263
2264 if (skb->pkt_type == PACKET_LOOPBACK)
2265 goto drop;
2266
2267 sk = pt->af_packet_priv;
2268 po = pkt_sk(sk);
2269
2270 if (!net_eq(dev_net(dev), sock_net(sk)))
2271 goto drop;
2272
2273 if (dev_has_header(dev)) {
2274 if (sk->sk_type != SOCK_DGRAM)
2275 skb_push(skb, skb->data - skb_mac_header(skb));
2276 else if (skb->pkt_type == PACKET_OUTGOING) {
2277 /* Special case: outgoing packets have ll header at head */
2278 skb_pull(skb, skb_network_offset(skb));
2279 }
2280 }
2281
2282 snaplen = skb->len;
2283
2284 res = run_filter(skb, sk, snaplen);
2285 if (!res)
2286 goto drop_n_restore;
2287
2288 /* If we are flooded, just give up */
2289 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2290 atomic_inc(&po->tp_drops);
2291 goto drop_n_restore;
2292 }
2293
2294 if (skb->ip_summed == CHECKSUM_PARTIAL)
2295 status |= TP_STATUS_CSUMNOTREADY;
2296 else if (skb->pkt_type != PACKET_OUTGOING &&
2297 skb_csum_unnecessary(skb))
2298 status |= TP_STATUS_CSUM_VALID;
2299 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2300 status |= TP_STATUS_GSO_TCP;
2301
2302 if (snaplen > res)
2303 snaplen = res;
2304
2305 if (sk->sk_type == SOCK_DGRAM) {
2306 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2307 po->tp_reserve;
2308 } else {
2309 unsigned int maclen = skb_network_offset(skb);
2310 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2311 (maclen < 16 ? 16 : maclen)) +
2312 po->tp_reserve;
2313 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2314 if (vnet_hdr_sz)
2315 netoff += vnet_hdr_sz;
2316 macoff = netoff - maclen;
2317 }
2318 if (netoff > USHRT_MAX) {
2319 atomic_inc(&po->tp_drops);
2320 goto drop_n_restore;
2321 }
2322 if (po->tp_version <= TPACKET_V2) {
2323 if (macoff + snaplen > po->rx_ring.frame_size) {
2324 if (po->copy_thresh &&
2325 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2326 if (skb_shared(skb)) {
2327 copy_skb = skb_clone(skb, GFP_ATOMIC);
2328 } else {
2329 copy_skb = skb_get(skb);
2330 skb_head = skb->data;
2331 }
2332 if (copy_skb) {
2333 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2334 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2335 skb_set_owner_r(copy_skb, sk);
2336 }
2337 }
2338 snaplen = po->rx_ring.frame_size - macoff;
2339 if ((int)snaplen < 0) {
2340 snaplen = 0;
2341 vnet_hdr_sz = 0;
2342 }
2343 }
2344 } else if (unlikely(macoff + snaplen >
2345 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2346 u32 nval;
2347
2348 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2349 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2350 snaplen, nval, macoff);
2351 snaplen = nval;
2352 if (unlikely((int)snaplen < 0)) {
2353 snaplen = 0;
2354 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2355 vnet_hdr_sz = 0;
2356 }
2357 }
2358 spin_lock(&sk->sk_receive_queue.lock);
2359 h.raw = packet_current_rx_frame(po, skb,
2360 TP_STATUS_KERNEL, (macoff+snaplen));
2361 if (!h.raw)
2362 goto drop_n_account;
2363
2364 if (po->tp_version <= TPACKET_V2) {
2365 slot_id = po->rx_ring.head;
2366 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2367 goto drop_n_account;
2368 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2369 }
2370
2371 if (vnet_hdr_sz &&
2372 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2373 sizeof(struct virtio_net_hdr),
2374 vio_le(), true, 0)) {
2375 if (po->tp_version == TPACKET_V3)
2376 prb_clear_blk_fill_status(&po->rx_ring);
2377 goto drop_n_account;
2378 }
2379
2380 if (po->tp_version <= TPACKET_V2) {
2381 packet_increment_rx_head(po, &po->rx_ring);
2382 /*
2383 * LOSING will be reported till you read the stats,
2384 * because it's COR - Clear On Read.
2385 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2386 * at packet level.
2387 */
2388 if (atomic_read(&po->tp_drops))
2389 status |= TP_STATUS_LOSING;
2390 }
2391
2392 po->stats.stats1.tp_packets++;
2393 if (copy_skb) {
2394 status |= TP_STATUS_COPY;
2395 skb_clear_delivery_time(copy_skb);
2396 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2397 }
2398 spin_unlock(&sk->sk_receive_queue.lock);
2399
2400 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2401
2402 /* Always timestamp; prefer an existing software timestamp taken
2403 * closer to the time of capture.
2404 */
2405 ts_status = tpacket_get_timestamp(skb, &ts,
2406 READ_ONCE(po->tp_tstamp) |
2407 SOF_TIMESTAMPING_SOFTWARE);
2408 if (!ts_status)
2409 ktime_get_real_ts64(&ts);
2410
2411 status |= ts_status;
2412
2413 switch (po->tp_version) {
2414 case TPACKET_V1:
2415 h.h1->tp_len = skb->len;
2416 h.h1->tp_snaplen = snaplen;
2417 h.h1->tp_mac = macoff;
2418 h.h1->tp_net = netoff;
2419 h.h1->tp_sec = ts.tv_sec;
2420 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2421 hdrlen = sizeof(*h.h1);
2422 break;
2423 case TPACKET_V2:
2424 h.h2->tp_len = skb->len;
2425 h.h2->tp_snaplen = snaplen;
2426 h.h2->tp_mac = macoff;
2427 h.h2->tp_net = netoff;
2428 h.h2->tp_sec = ts.tv_sec;
2429 h.h2->tp_nsec = ts.tv_nsec;
2430 if (skb_vlan_tag_present(skb)) {
2431 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2432 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2433 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2434 } else {
2435 h.h2->tp_vlan_tci = 0;
2436 h.h2->tp_vlan_tpid = 0;
2437 }
2438 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2439 hdrlen = sizeof(*h.h2);
2440 break;
2441 case TPACKET_V3:
2442 /* tp_nxt_offset,vlan are already populated above.
2443 * So DONT clear those fields here
2444 */
2445 h.h3->tp_status |= status;
2446 h.h3->tp_len = skb->len;
2447 h.h3->tp_snaplen = snaplen;
2448 h.h3->tp_mac = macoff;
2449 h.h3->tp_net = netoff;
2450 h.h3->tp_sec = ts.tv_sec;
2451 h.h3->tp_nsec = ts.tv_nsec;
2452 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2453 hdrlen = sizeof(*h.h3);
2454 break;
2455 default:
2456 BUG();
2457 }
2458
2459 sll = h.raw + TPACKET_ALIGN(hdrlen);
2460 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2461 sll->sll_family = AF_PACKET;
2462 sll->sll_hatype = dev->type;
2463 sll->sll_protocol = skb->protocol;
2464 sll->sll_pkttype = skb->pkt_type;
2465 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2466 sll->sll_ifindex = orig_dev->ifindex;
2467 else
2468 sll->sll_ifindex = dev->ifindex;
2469
2470 smp_mb();
2471
2472 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2473 if (po->tp_version <= TPACKET_V2) {
2474 u8 *start, *end;
2475
2476 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2477 macoff + snaplen);
2478
2479 for (start = h.raw; start < end; start += PAGE_SIZE)
2480 flush_dcache_page(pgv_to_page(start));
2481 }
2482 smp_wmb();
2483 #endif
2484
2485 if (po->tp_version <= TPACKET_V2) {
2486 spin_lock(&sk->sk_receive_queue.lock);
2487 __packet_set_status(po, h.raw, status);
2488 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2489 spin_unlock(&sk->sk_receive_queue.lock);
2490 sk->sk_data_ready(sk);
2491 } else if (po->tp_version == TPACKET_V3) {
2492 prb_clear_blk_fill_status(&po->rx_ring);
2493 }
2494
2495 drop_n_restore:
2496 if (skb_head != skb->data && skb_shared(skb)) {
2497 skb->data = skb_head;
2498 skb->len = skb_len;
2499 }
2500 drop:
2501 if (!is_drop_n_account)
2502 consume_skb(skb);
2503 else
2504 kfree_skb(skb);
2505 return 0;
2506
2507 drop_n_account:
2508 spin_unlock(&sk->sk_receive_queue.lock);
2509 atomic_inc(&po->tp_drops);
2510 is_drop_n_account = true;
2511
2512 sk->sk_data_ready(sk);
2513 kfree_skb(copy_skb);
2514 goto drop_n_restore;
2515 }
2516
tpacket_destruct_skb(struct sk_buff * skb)2517 static void tpacket_destruct_skb(struct sk_buff *skb)
2518 {
2519 struct packet_sock *po = pkt_sk(skb->sk);
2520
2521 if (likely(po->tx_ring.pg_vec)) {
2522 void *ph;
2523 __u32 ts;
2524
2525 ph = skb_zcopy_get_nouarg(skb);
2526 packet_dec_pending(&po->tx_ring);
2527
2528 ts = __packet_set_timestamp(po, ph, skb);
2529 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2530
2531 complete(&po->skb_completion);
2532 }
2533
2534 sock_wfree(skb);
2535 }
2536
__packet_snd_vnet_parse(struct virtio_net_hdr * vnet_hdr,size_t len)2537 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2538 {
2539 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2540 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2541 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2542 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2543 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2544 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2545 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2546
2547 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2548 return -EINVAL;
2549
2550 return 0;
2551 }
2552
packet_snd_vnet_parse(struct msghdr * msg,size_t * len,struct virtio_net_hdr * vnet_hdr,int vnet_hdr_sz)2553 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2554 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2555 {
2556 int ret;
2557
2558 if (*len < vnet_hdr_sz)
2559 return -EINVAL;
2560 *len -= vnet_hdr_sz;
2561
2562 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2563 return -EFAULT;
2564
2565 ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2566 if (ret)
2567 return ret;
2568
2569 /* move iter to point to the start of mac header */
2570 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2571 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2572
2573 return 0;
2574 }
2575
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,void * data,int tp_len,__be16 proto,unsigned char * addr,int hlen,int copylen,const struct sockcm_cookie * sockc)2576 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2577 void *frame, struct net_device *dev, void *data, int tp_len,
2578 __be16 proto, unsigned char *addr, int hlen, int copylen,
2579 const struct sockcm_cookie *sockc)
2580 {
2581 union tpacket_uhdr ph;
2582 int to_write, offset, len, nr_frags, len_max;
2583 struct socket *sock = po->sk.sk_socket;
2584 struct page *page;
2585 int err;
2586
2587 ph.raw = frame;
2588
2589 skb->protocol = proto;
2590 skb->dev = dev;
2591 skb->priority = READ_ONCE(po->sk.sk_priority);
2592 skb->mark = READ_ONCE(po->sk.sk_mark);
2593 skb->tstamp = sockc->transmit_time;
2594 skb_setup_tx_timestamp(skb, sockc->tsflags);
2595 skb_zcopy_set_nouarg(skb, ph.raw);
2596
2597 skb_reserve(skb, hlen);
2598 skb_reset_network_header(skb);
2599
2600 to_write = tp_len;
2601
2602 if (sock->type == SOCK_DGRAM) {
2603 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2604 NULL, tp_len);
2605 if (unlikely(err < 0))
2606 return -EINVAL;
2607 } else if (copylen) {
2608 int hdrlen = min_t(int, copylen, tp_len);
2609
2610 skb_push(skb, dev->hard_header_len);
2611 skb_put(skb, copylen - dev->hard_header_len);
2612 err = skb_store_bits(skb, 0, data, hdrlen);
2613 if (unlikely(err))
2614 return err;
2615 if (!dev_validate_header(dev, skb->data, hdrlen))
2616 return -EINVAL;
2617
2618 data += hdrlen;
2619 to_write -= hdrlen;
2620 }
2621
2622 offset = offset_in_page(data);
2623 len_max = PAGE_SIZE - offset;
2624 len = ((to_write > len_max) ? len_max : to_write);
2625
2626 skb->data_len = to_write;
2627 skb->len += to_write;
2628 skb->truesize += to_write;
2629 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2630
2631 while (likely(to_write)) {
2632 nr_frags = skb_shinfo(skb)->nr_frags;
2633
2634 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2635 pr_err("Packet exceed the number of skb frags(%u)\n",
2636 (unsigned int)MAX_SKB_FRAGS);
2637 return -EFAULT;
2638 }
2639
2640 page = pgv_to_page(data);
2641 data += len;
2642 flush_dcache_page(page);
2643 get_page(page);
2644 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2645 to_write -= len;
2646 offset = 0;
2647 len_max = PAGE_SIZE;
2648 len = ((to_write > len_max) ? len_max : to_write);
2649 }
2650
2651 packet_parse_headers(skb, sock);
2652
2653 return tp_len;
2654 }
2655
tpacket_parse_header(struct packet_sock * po,void * frame,int size_max,void ** data)2656 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2657 int size_max, void **data)
2658 {
2659 union tpacket_uhdr ph;
2660 int tp_len, off;
2661
2662 ph.raw = frame;
2663
2664 switch (po->tp_version) {
2665 case TPACKET_V3:
2666 if (ph.h3->tp_next_offset != 0) {
2667 pr_warn_once("variable sized slot not supported");
2668 return -EINVAL;
2669 }
2670 tp_len = ph.h3->tp_len;
2671 break;
2672 case TPACKET_V2:
2673 tp_len = ph.h2->tp_len;
2674 break;
2675 default:
2676 tp_len = ph.h1->tp_len;
2677 break;
2678 }
2679 if (unlikely(tp_len > size_max)) {
2680 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2681 return -EMSGSIZE;
2682 }
2683
2684 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2685 int off_min, off_max;
2686
2687 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2688 off_max = po->tx_ring.frame_size - tp_len;
2689 if (po->sk.sk_type == SOCK_DGRAM) {
2690 switch (po->tp_version) {
2691 case TPACKET_V3:
2692 off = ph.h3->tp_net;
2693 break;
2694 case TPACKET_V2:
2695 off = ph.h2->tp_net;
2696 break;
2697 default:
2698 off = ph.h1->tp_net;
2699 break;
2700 }
2701 } else {
2702 switch (po->tp_version) {
2703 case TPACKET_V3:
2704 off = ph.h3->tp_mac;
2705 break;
2706 case TPACKET_V2:
2707 off = ph.h2->tp_mac;
2708 break;
2709 default:
2710 off = ph.h1->tp_mac;
2711 break;
2712 }
2713 }
2714 if (unlikely((off < off_min) || (off_max < off)))
2715 return -EINVAL;
2716 } else {
2717 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2718 }
2719
2720 *data = frame + off;
2721 return tp_len;
2722 }
2723
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2724 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2725 {
2726 struct sk_buff *skb = NULL;
2727 struct net_device *dev;
2728 struct virtio_net_hdr *vnet_hdr = NULL;
2729 struct sockcm_cookie sockc;
2730 __be16 proto;
2731 int err, reserve = 0;
2732 void *ph;
2733 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2734 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2735 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2736 unsigned char *addr = NULL;
2737 int tp_len, size_max;
2738 void *data;
2739 int len_sum = 0;
2740 int status = TP_STATUS_AVAILABLE;
2741 int hlen, tlen, copylen = 0;
2742 long timeo = 0;
2743
2744 mutex_lock(&po->pg_vec_lock);
2745
2746 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2747 * we need to confirm it under protection of pg_vec_lock.
2748 */
2749 if (unlikely(!po->tx_ring.pg_vec)) {
2750 err = -EBUSY;
2751 goto out;
2752 }
2753 if (likely(saddr == NULL)) {
2754 dev = packet_cached_dev_get(po);
2755 proto = READ_ONCE(po->num);
2756 } else {
2757 err = -EINVAL;
2758 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2759 goto out;
2760 if (msg->msg_namelen < (saddr->sll_halen
2761 + offsetof(struct sockaddr_ll,
2762 sll_addr)))
2763 goto out;
2764 proto = saddr->sll_protocol;
2765 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2766 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2767 if (dev && msg->msg_namelen < dev->addr_len +
2768 offsetof(struct sockaddr_ll, sll_addr))
2769 goto out_put;
2770 addr = saddr->sll_addr;
2771 }
2772 }
2773
2774 err = -ENXIO;
2775 if (unlikely(dev == NULL))
2776 goto out;
2777 err = -ENETDOWN;
2778 if (unlikely(!(dev->flags & IFF_UP)))
2779 goto out_put;
2780
2781 sockcm_init(&sockc, &po->sk);
2782 if (msg->msg_controllen) {
2783 err = sock_cmsg_send(&po->sk, msg, &sockc);
2784 if (unlikely(err))
2785 goto out_put;
2786 }
2787
2788 if (po->sk.sk_socket->type == SOCK_RAW)
2789 reserve = dev->hard_header_len;
2790 size_max = po->tx_ring.frame_size
2791 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2792
2793 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2794 size_max = dev->mtu + reserve + VLAN_HLEN;
2795
2796 reinit_completion(&po->skb_completion);
2797
2798 do {
2799 ph = packet_current_frame(po, &po->tx_ring,
2800 TP_STATUS_SEND_REQUEST);
2801 if (unlikely(ph == NULL)) {
2802 if (need_wait && skb) {
2803 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2804 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2805 if (timeo <= 0) {
2806 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2807 goto out_put;
2808 }
2809 }
2810 /* check for additional frames */
2811 continue;
2812 }
2813
2814 skb = NULL;
2815 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2816 if (tp_len < 0)
2817 goto tpacket_error;
2818
2819 status = TP_STATUS_SEND_REQUEST;
2820 hlen = LL_RESERVED_SPACE(dev);
2821 tlen = dev->needed_tailroom;
2822 if (vnet_hdr_sz) {
2823 vnet_hdr = data;
2824 data += vnet_hdr_sz;
2825 tp_len -= vnet_hdr_sz;
2826 if (tp_len < 0 ||
2827 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2828 tp_len = -EINVAL;
2829 goto tpacket_error;
2830 }
2831 copylen = __virtio16_to_cpu(vio_le(),
2832 vnet_hdr->hdr_len);
2833 }
2834 copylen = max_t(int, copylen, dev->hard_header_len);
2835 skb = sock_alloc_send_skb(&po->sk,
2836 hlen + tlen + sizeof(struct sockaddr_ll) +
2837 (copylen - dev->hard_header_len),
2838 !need_wait, &err);
2839
2840 if (unlikely(skb == NULL)) {
2841 /* we assume the socket was initially writeable ... */
2842 if (likely(len_sum > 0))
2843 err = len_sum;
2844 goto out_status;
2845 }
2846 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2847 addr, hlen, copylen, &sockc);
2848 if (likely(tp_len >= 0) &&
2849 tp_len > dev->mtu + reserve &&
2850 !vnet_hdr_sz &&
2851 !packet_extra_vlan_len_allowed(dev, skb))
2852 tp_len = -EMSGSIZE;
2853
2854 if (unlikely(tp_len < 0)) {
2855 tpacket_error:
2856 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2857 __packet_set_status(po, ph,
2858 TP_STATUS_AVAILABLE);
2859 packet_increment_head(&po->tx_ring);
2860 kfree_skb(skb);
2861 continue;
2862 } else {
2863 status = TP_STATUS_WRONG_FORMAT;
2864 err = tp_len;
2865 goto out_status;
2866 }
2867 }
2868
2869 if (vnet_hdr_sz) {
2870 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2871 tp_len = -EINVAL;
2872 goto tpacket_error;
2873 }
2874 virtio_net_hdr_set_proto(skb, vnet_hdr);
2875 }
2876
2877 skb->destructor = tpacket_destruct_skb;
2878 __packet_set_status(po, ph, TP_STATUS_SENDING);
2879 packet_inc_pending(&po->tx_ring);
2880
2881 status = TP_STATUS_SEND_REQUEST;
2882 err = packet_xmit(po, skb);
2883 if (unlikely(err != 0)) {
2884 if (err > 0)
2885 err = net_xmit_errno(err);
2886 if (err && __packet_get_status(po, ph) ==
2887 TP_STATUS_AVAILABLE) {
2888 /* skb was destructed already */
2889 skb = NULL;
2890 goto out_status;
2891 }
2892 /*
2893 * skb was dropped but not destructed yet;
2894 * let's treat it like congestion or err < 0
2895 */
2896 err = 0;
2897 }
2898 packet_increment_head(&po->tx_ring);
2899 len_sum += tp_len;
2900 } while (likely((ph != NULL) ||
2901 /* Note: packet_read_pending() might be slow if we have
2902 * to call it as it's per_cpu variable, but in fast-path
2903 * we already short-circuit the loop with the first
2904 * condition, and luckily don't have to go that path
2905 * anyway.
2906 */
2907 (need_wait && packet_read_pending(&po->tx_ring))));
2908
2909 err = len_sum;
2910 goto out_put;
2911
2912 out_status:
2913 __packet_set_status(po, ph, status);
2914 kfree_skb(skb);
2915 out_put:
2916 dev_put(dev);
2917 out:
2918 mutex_unlock(&po->pg_vec_lock);
2919 return err;
2920 }
2921
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2922 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2923 size_t reserve, size_t len,
2924 size_t linear, int noblock,
2925 int *err)
2926 {
2927 struct sk_buff *skb;
2928
2929 /* Under a page? Don't bother with paged skb. */
2930 if (prepad + len < PAGE_SIZE || !linear)
2931 linear = len;
2932
2933 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2934 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2935 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2936 err, PAGE_ALLOC_COSTLY_ORDER);
2937 if (!skb)
2938 return NULL;
2939
2940 skb_reserve(skb, reserve);
2941 skb_put(skb, linear);
2942 skb->data_len = len - linear;
2943 skb->len += len - linear;
2944
2945 return skb;
2946 }
2947
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)2948 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2949 {
2950 struct sock *sk = sock->sk;
2951 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2952 struct sk_buff *skb;
2953 struct net_device *dev;
2954 __be16 proto;
2955 unsigned char *addr = NULL;
2956 int err, reserve = 0;
2957 struct sockcm_cookie sockc;
2958 struct virtio_net_hdr vnet_hdr = { 0 };
2959 int offset = 0;
2960 struct packet_sock *po = pkt_sk(sk);
2961 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2962 int hlen, tlen, linear;
2963 int extra_len = 0;
2964
2965 /*
2966 * Get and verify the address.
2967 */
2968
2969 if (likely(saddr == NULL)) {
2970 dev = packet_cached_dev_get(po);
2971 proto = READ_ONCE(po->num);
2972 } else {
2973 err = -EINVAL;
2974 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2975 goto out;
2976 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2977 goto out;
2978 proto = saddr->sll_protocol;
2979 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2980 if (sock->type == SOCK_DGRAM) {
2981 if (dev && msg->msg_namelen < dev->addr_len +
2982 offsetof(struct sockaddr_ll, sll_addr))
2983 goto out_unlock;
2984 addr = saddr->sll_addr;
2985 }
2986 }
2987
2988 err = -ENXIO;
2989 if (unlikely(dev == NULL))
2990 goto out_unlock;
2991 err = -ENETDOWN;
2992 if (unlikely(!(dev->flags & IFF_UP)))
2993 goto out_unlock;
2994
2995 sockcm_init(&sockc, sk);
2996 sockc.mark = READ_ONCE(sk->sk_mark);
2997 if (msg->msg_controllen) {
2998 err = sock_cmsg_send(sk, msg, &sockc);
2999 if (unlikely(err))
3000 goto out_unlock;
3001 }
3002
3003 if (sock->type == SOCK_RAW)
3004 reserve = dev->hard_header_len;
3005 if (vnet_hdr_sz) {
3006 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3007 if (err)
3008 goto out_unlock;
3009 }
3010
3011 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3012 if (!netif_supports_nofcs(dev)) {
3013 err = -EPROTONOSUPPORT;
3014 goto out_unlock;
3015 }
3016 extra_len = 4; /* We're doing our own CRC */
3017 }
3018
3019 err = -EMSGSIZE;
3020 if (!vnet_hdr.gso_type &&
3021 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3022 goto out_unlock;
3023
3024 err = -ENOBUFS;
3025 hlen = LL_RESERVED_SPACE(dev);
3026 tlen = dev->needed_tailroom;
3027 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3028 linear = max(linear, min_t(int, len, dev->hard_header_len));
3029 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3030 msg->msg_flags & MSG_DONTWAIT, &err);
3031 if (skb == NULL)
3032 goto out_unlock;
3033
3034 skb_reset_network_header(skb);
3035
3036 err = -EINVAL;
3037 if (sock->type == SOCK_DGRAM) {
3038 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3039 if (unlikely(offset < 0))
3040 goto out_free;
3041 } else if (reserve) {
3042 skb_reserve(skb, -reserve);
3043 if (len < reserve + sizeof(struct ipv6hdr) &&
3044 dev->min_header_len != dev->hard_header_len)
3045 skb_reset_network_header(skb);
3046 }
3047
3048 /* Returns -EFAULT on error */
3049 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3050 if (err)
3051 goto out_free;
3052
3053 if ((sock->type == SOCK_RAW &&
3054 !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3055 err = -EINVAL;
3056 goto out_free;
3057 }
3058
3059 skb_setup_tx_timestamp(skb, sockc.tsflags);
3060
3061 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3062 !packet_extra_vlan_len_allowed(dev, skb)) {
3063 err = -EMSGSIZE;
3064 goto out_free;
3065 }
3066
3067 skb->protocol = proto;
3068 skb->dev = dev;
3069 skb->priority = READ_ONCE(sk->sk_priority);
3070 skb->mark = sockc.mark;
3071 skb->tstamp = sockc.transmit_time;
3072
3073 if (unlikely(extra_len == 4))
3074 skb->no_fcs = 1;
3075
3076 packet_parse_headers(skb, sock);
3077
3078 if (vnet_hdr_sz) {
3079 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3080 if (err)
3081 goto out_free;
3082 len += vnet_hdr_sz;
3083 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3084 }
3085
3086 err = packet_xmit(po, skb);
3087
3088 if (unlikely(err != 0)) {
3089 if (err > 0)
3090 err = net_xmit_errno(err);
3091 if (err)
3092 goto out_unlock;
3093 }
3094
3095 dev_put(dev);
3096
3097 return len;
3098
3099 out_free:
3100 kfree_skb(skb);
3101 out_unlock:
3102 dev_put(dev);
3103 out:
3104 return err;
3105 }
3106
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)3107 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3108 {
3109 struct sock *sk = sock->sk;
3110 struct packet_sock *po = pkt_sk(sk);
3111
3112 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3113 * tpacket_snd() will redo the check safely.
3114 */
3115 if (data_race(po->tx_ring.pg_vec))
3116 return tpacket_snd(po, msg);
3117
3118 return packet_snd(sock, msg, len);
3119 }
3120
3121 /*
3122 * Close a PACKET socket. This is fairly simple. We immediately go
3123 * to 'closed' state and remove our protocol entry in the device list.
3124 */
3125
packet_release(struct socket * sock)3126 static int packet_release(struct socket *sock)
3127 {
3128 struct sock *sk = sock->sk;
3129 struct packet_sock *po;
3130 struct packet_fanout *f;
3131 struct net *net;
3132 union tpacket_req_u req_u;
3133
3134 if (!sk)
3135 return 0;
3136
3137 net = sock_net(sk);
3138 po = pkt_sk(sk);
3139
3140 mutex_lock(&net->packet.sklist_lock);
3141 sk_del_node_init_rcu(sk);
3142 mutex_unlock(&net->packet.sklist_lock);
3143
3144 sock_prot_inuse_add(net, sk->sk_prot, -1);
3145
3146 spin_lock(&po->bind_lock);
3147 unregister_prot_hook(sk, false);
3148 packet_cached_dev_reset(po);
3149
3150 if (po->prot_hook.dev) {
3151 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3152 po->prot_hook.dev = NULL;
3153 }
3154 spin_unlock(&po->bind_lock);
3155
3156 packet_flush_mclist(sk);
3157
3158 lock_sock(sk);
3159 if (po->rx_ring.pg_vec) {
3160 memset(&req_u, 0, sizeof(req_u));
3161 packet_set_ring(sk, &req_u, 1, 0);
3162 }
3163
3164 if (po->tx_ring.pg_vec) {
3165 memset(&req_u, 0, sizeof(req_u));
3166 packet_set_ring(sk, &req_u, 1, 1);
3167 }
3168 release_sock(sk);
3169
3170 f = fanout_release(sk);
3171
3172 synchronize_net();
3173
3174 kfree(po->rollover);
3175 if (f) {
3176 fanout_release_data(f);
3177 kvfree(f);
3178 }
3179 /*
3180 * Now the socket is dead. No more input will appear.
3181 */
3182 sock_orphan(sk);
3183 sock->sk = NULL;
3184
3185 /* Purge queues */
3186
3187 skb_queue_purge(&sk->sk_receive_queue);
3188 packet_free_pending(po);
3189
3190 sock_put(sk);
3191 return 0;
3192 }
3193
3194 /*
3195 * Attach a packet hook.
3196 */
3197
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)3198 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3199 __be16 proto)
3200 {
3201 struct packet_sock *po = pkt_sk(sk);
3202 struct net_device *dev = NULL;
3203 bool unlisted = false;
3204 bool need_rehook;
3205 int ret = 0;
3206
3207 lock_sock(sk);
3208 spin_lock(&po->bind_lock);
3209 if (!proto)
3210 proto = po->num;
3211
3212 rcu_read_lock();
3213
3214 if (po->fanout) {
3215 ret = -EINVAL;
3216 goto out_unlock;
3217 }
3218
3219 if (name) {
3220 dev = dev_get_by_name_rcu(sock_net(sk), name);
3221 if (!dev) {
3222 ret = -ENODEV;
3223 goto out_unlock;
3224 }
3225 } else if (ifindex) {
3226 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3227 if (!dev) {
3228 ret = -ENODEV;
3229 goto out_unlock;
3230 }
3231 }
3232
3233 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3234
3235 if (need_rehook) {
3236 dev_hold(dev);
3237 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3238 rcu_read_unlock();
3239 /* prevents packet_notifier() from calling
3240 * register_prot_hook()
3241 */
3242 WRITE_ONCE(po->num, 0);
3243 __unregister_prot_hook(sk, true);
3244 rcu_read_lock();
3245 if (dev)
3246 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3247 dev->ifindex);
3248 }
3249
3250 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3251 WRITE_ONCE(po->num, proto);
3252 po->prot_hook.type = proto;
3253
3254 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3255
3256 if (unlikely(unlisted)) {
3257 po->prot_hook.dev = NULL;
3258 WRITE_ONCE(po->ifindex, -1);
3259 packet_cached_dev_reset(po);
3260 } else {
3261 netdev_hold(dev, &po->prot_hook.dev_tracker,
3262 GFP_ATOMIC);
3263 po->prot_hook.dev = dev;
3264 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3265 packet_cached_dev_assign(po, dev);
3266 }
3267 dev_put(dev);
3268 }
3269
3270 if (proto == 0 || !need_rehook)
3271 goto out_unlock;
3272
3273 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3274 register_prot_hook(sk);
3275 } else {
3276 sk->sk_err = ENETDOWN;
3277 if (!sock_flag(sk, SOCK_DEAD))
3278 sk_error_report(sk);
3279 }
3280
3281 out_unlock:
3282 rcu_read_unlock();
3283 spin_unlock(&po->bind_lock);
3284 release_sock(sk);
3285 return ret;
3286 }
3287
3288 /*
3289 * Bind a packet socket to a device
3290 */
3291
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)3292 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3293 int addr_len)
3294 {
3295 struct sock *sk = sock->sk;
3296 char name[sizeof(uaddr->sa_data_min) + 1];
3297
3298 /*
3299 * Check legality
3300 */
3301
3302 if (addr_len != sizeof(struct sockaddr))
3303 return -EINVAL;
3304 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3305 * zero-terminated.
3306 */
3307 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3308 name[sizeof(uaddr->sa_data_min)] = 0;
3309
3310 return packet_do_bind(sk, name, 0, 0);
3311 }
3312
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)3313 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3314 {
3315 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3316 struct sock *sk = sock->sk;
3317
3318 /*
3319 * Check legality
3320 */
3321
3322 if (addr_len < sizeof(struct sockaddr_ll))
3323 return -EINVAL;
3324 if (sll->sll_family != AF_PACKET)
3325 return -EINVAL;
3326
3327 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
3328 }
3329
3330 static struct proto packet_proto = {
3331 .name = "PACKET",
3332 .owner = THIS_MODULE,
3333 .obj_size = sizeof(struct packet_sock),
3334 };
3335
3336 /*
3337 * Create a packet of type SOCK_PACKET.
3338 */
3339
packet_create(struct net * net,struct socket * sock,int protocol,int kern)3340 static int packet_create(struct net *net, struct socket *sock, int protocol,
3341 int kern)
3342 {
3343 struct sock *sk;
3344 struct packet_sock *po;
3345 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3346 int err;
3347
3348 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3349 return -EPERM;
3350 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3351 sock->type != SOCK_PACKET)
3352 return -ESOCKTNOSUPPORT;
3353
3354 sock->state = SS_UNCONNECTED;
3355
3356 err = -ENOBUFS;
3357 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3358 if (sk == NULL)
3359 goto out;
3360
3361 sock->ops = &packet_ops;
3362 if (sock->type == SOCK_PACKET)
3363 sock->ops = &packet_ops_spkt;
3364
3365 sock_init_data(sock, sk);
3366
3367 po = pkt_sk(sk);
3368 init_completion(&po->skb_completion);
3369 sk->sk_family = PF_PACKET;
3370 po->num = proto;
3371
3372 err = packet_alloc_pending(po);
3373 if (err)
3374 goto out2;
3375
3376 packet_cached_dev_reset(po);
3377
3378 sk->sk_destruct = packet_sock_destruct;
3379
3380 /*
3381 * Attach a protocol block
3382 */
3383
3384 spin_lock_init(&po->bind_lock);
3385 mutex_init(&po->pg_vec_lock);
3386 po->rollover = NULL;
3387 po->prot_hook.func = packet_rcv;
3388
3389 if (sock->type == SOCK_PACKET)
3390 po->prot_hook.func = packet_rcv_spkt;
3391
3392 po->prot_hook.af_packet_priv = sk;
3393 po->prot_hook.af_packet_net = sock_net(sk);
3394
3395 if (proto) {
3396 po->prot_hook.type = proto;
3397 __register_prot_hook(sk);
3398 }
3399
3400 mutex_lock(&net->packet.sklist_lock);
3401 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3402 mutex_unlock(&net->packet.sklist_lock);
3403
3404 sock_prot_inuse_add(net, &packet_proto, 1);
3405
3406 return 0;
3407 out2:
3408 sk_free(sk);
3409 out:
3410 return err;
3411 }
3412
3413 /*
3414 * Pull a packet from our receive queue and hand it to the user.
3415 * If necessary we block.
3416 */
3417
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)3418 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3419 int flags)
3420 {
3421 struct sock *sk = sock->sk;
3422 struct sk_buff *skb;
3423 int copied, err;
3424 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3425 unsigned int origlen = 0;
3426
3427 err = -EINVAL;
3428 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3429 goto out;
3430
3431 #if 0
3432 /* What error should we return now? EUNATTACH? */
3433 if (pkt_sk(sk)->ifindex < 0)
3434 return -ENODEV;
3435 #endif
3436
3437 if (flags & MSG_ERRQUEUE) {
3438 err = sock_recv_errqueue(sk, msg, len,
3439 SOL_PACKET, PACKET_TX_TIMESTAMP);
3440 goto out;
3441 }
3442
3443 /*
3444 * Call the generic datagram receiver. This handles all sorts
3445 * of horrible races and re-entrancy so we can forget about it
3446 * in the protocol layers.
3447 *
3448 * Now it will return ENETDOWN, if device have just gone down,
3449 * but then it will block.
3450 */
3451
3452 skb = skb_recv_datagram(sk, flags, &err);
3453
3454 /*
3455 * An error occurred so return it. Because skb_recv_datagram()
3456 * handles the blocking we don't see and worry about blocking
3457 * retries.
3458 */
3459
3460 if (skb == NULL)
3461 goto out;
3462
3463 packet_rcv_try_clear_pressure(pkt_sk(sk));
3464
3465 if (vnet_hdr_len) {
3466 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3467 if (err)
3468 goto out_free;
3469 }
3470
3471 /* You lose any data beyond the buffer you gave. If it worries
3472 * a user program they can ask the device for its MTU
3473 * anyway.
3474 */
3475 copied = skb->len;
3476 if (copied > len) {
3477 copied = len;
3478 msg->msg_flags |= MSG_TRUNC;
3479 }
3480
3481 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3482 if (err)
3483 goto out_free;
3484
3485 if (sock->type != SOCK_PACKET) {
3486 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3487
3488 /* Original length was stored in sockaddr_ll fields */
3489 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3490 sll->sll_family = AF_PACKET;
3491 sll->sll_protocol = skb->protocol;
3492 }
3493
3494 sock_recv_cmsgs(msg, sk, skb);
3495
3496 if (msg->msg_name) {
3497 const size_t max_len = min(sizeof(skb->cb),
3498 sizeof(struct sockaddr_storage));
3499 int copy_len;
3500
3501 /* If the address length field is there to be filled
3502 * in, we fill it in now.
3503 */
3504 if (sock->type == SOCK_PACKET) {
3505 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3506 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3507 copy_len = msg->msg_namelen;
3508 } else {
3509 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3510
3511 msg->msg_namelen = sll->sll_halen +
3512 offsetof(struct sockaddr_ll, sll_addr);
3513 copy_len = msg->msg_namelen;
3514 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3515 memset(msg->msg_name +
3516 offsetof(struct sockaddr_ll, sll_addr),
3517 0, sizeof(sll->sll_addr));
3518 msg->msg_namelen = sizeof(struct sockaddr_ll);
3519 }
3520 }
3521 if (WARN_ON_ONCE(copy_len > max_len)) {
3522 copy_len = max_len;
3523 msg->msg_namelen = copy_len;
3524 }
3525 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3526 }
3527
3528 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3529 struct tpacket_auxdata aux;
3530
3531 aux.tp_status = TP_STATUS_USER;
3532 if (skb->ip_summed == CHECKSUM_PARTIAL)
3533 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3534 else if (skb->pkt_type != PACKET_OUTGOING &&
3535 skb_csum_unnecessary(skb))
3536 aux.tp_status |= TP_STATUS_CSUM_VALID;
3537 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3538 aux.tp_status |= TP_STATUS_GSO_TCP;
3539
3540 aux.tp_len = origlen;
3541 aux.tp_snaplen = skb->len;
3542 aux.tp_mac = 0;
3543 aux.tp_net = skb_network_offset(skb);
3544 if (skb_vlan_tag_present(skb)) {
3545 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3546 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3547 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3548 } else {
3549 aux.tp_vlan_tci = 0;
3550 aux.tp_vlan_tpid = 0;
3551 }
3552 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3553 }
3554
3555 /*
3556 * Free or return the buffer as appropriate. Again this
3557 * hides all the races and re-entrancy issues from us.
3558 */
3559 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3560
3561 out_free:
3562 skb_free_datagram(sk, skb);
3563 out:
3564 return err;
3565 }
3566
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int peer)3567 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3568 int peer)
3569 {
3570 struct net_device *dev;
3571 struct sock *sk = sock->sk;
3572
3573 if (peer)
3574 return -EOPNOTSUPP;
3575
3576 uaddr->sa_family = AF_PACKET;
3577 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3578 rcu_read_lock();
3579 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3580 if (dev)
3581 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3582 rcu_read_unlock();
3583
3584 return sizeof(*uaddr);
3585 }
3586
packet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)3587 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3588 int peer)
3589 {
3590 struct net_device *dev;
3591 struct sock *sk = sock->sk;
3592 struct packet_sock *po = pkt_sk(sk);
3593 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3594 int ifindex;
3595
3596 if (peer)
3597 return -EOPNOTSUPP;
3598
3599 ifindex = READ_ONCE(po->ifindex);
3600 sll->sll_family = AF_PACKET;
3601 sll->sll_ifindex = ifindex;
3602 sll->sll_protocol = READ_ONCE(po->num);
3603 sll->sll_pkttype = 0;
3604 rcu_read_lock();
3605 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3606 if (dev) {
3607 sll->sll_hatype = dev->type;
3608 sll->sll_halen = dev->addr_len;
3609
3610 /* Let __fortify_memcpy_chk() know the actual buffer size. */
3611 memcpy(((struct sockaddr_storage *)sll)->__data +
3612 offsetof(struct sockaddr_ll, sll_addr) -
3613 offsetofend(struct sockaddr_ll, sll_family),
3614 dev->dev_addr, dev->addr_len);
3615 } else {
3616 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3617 sll->sll_halen = 0;
3618 }
3619 rcu_read_unlock();
3620
3621 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3622 }
3623
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3624 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3625 int what)
3626 {
3627 switch (i->type) {
3628 case PACKET_MR_MULTICAST:
3629 if (i->alen != dev->addr_len)
3630 return -EINVAL;
3631 if (what > 0)
3632 return dev_mc_add(dev, i->addr);
3633 else
3634 return dev_mc_del(dev, i->addr);
3635 break;
3636 case PACKET_MR_PROMISC:
3637 return dev_set_promiscuity(dev, what);
3638 case PACKET_MR_ALLMULTI:
3639 return dev_set_allmulti(dev, what);
3640 case PACKET_MR_UNICAST:
3641 if (i->alen != dev->addr_len)
3642 return -EINVAL;
3643 if (what > 0)
3644 return dev_uc_add(dev, i->addr);
3645 else
3646 return dev_uc_del(dev, i->addr);
3647 break;
3648 default:
3649 break;
3650 }
3651 return 0;
3652 }
3653
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3654 static void packet_dev_mclist_delete(struct net_device *dev,
3655 struct packet_mclist **mlp)
3656 {
3657 struct packet_mclist *ml;
3658
3659 while ((ml = *mlp) != NULL) {
3660 if (ml->ifindex == dev->ifindex) {
3661 packet_dev_mc(dev, ml, -1);
3662 *mlp = ml->next;
3663 kfree(ml);
3664 } else
3665 mlp = &ml->next;
3666 }
3667 }
3668
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3669 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3670 {
3671 struct packet_sock *po = pkt_sk(sk);
3672 struct packet_mclist *ml, *i;
3673 struct net_device *dev;
3674 int err;
3675
3676 rtnl_lock();
3677
3678 err = -ENODEV;
3679 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3680 if (!dev)
3681 goto done;
3682
3683 err = -EINVAL;
3684 if (mreq->mr_alen > dev->addr_len)
3685 goto done;
3686
3687 err = -ENOBUFS;
3688 i = kmalloc(sizeof(*i), GFP_KERNEL);
3689 if (i == NULL)
3690 goto done;
3691
3692 err = 0;
3693 for (ml = po->mclist; ml; ml = ml->next) {
3694 if (ml->ifindex == mreq->mr_ifindex &&
3695 ml->type == mreq->mr_type &&
3696 ml->alen == mreq->mr_alen &&
3697 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3698 ml->count++;
3699 /* Free the new element ... */
3700 kfree(i);
3701 goto done;
3702 }
3703 }
3704
3705 i->type = mreq->mr_type;
3706 i->ifindex = mreq->mr_ifindex;
3707 i->alen = mreq->mr_alen;
3708 memcpy(i->addr, mreq->mr_address, i->alen);
3709 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3710 i->count = 1;
3711 i->next = po->mclist;
3712 po->mclist = i;
3713 err = packet_dev_mc(dev, i, 1);
3714 if (err) {
3715 po->mclist = i->next;
3716 kfree(i);
3717 }
3718
3719 done:
3720 rtnl_unlock();
3721 return err;
3722 }
3723
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3724 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3725 {
3726 struct packet_mclist *ml, **mlp;
3727
3728 rtnl_lock();
3729
3730 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3731 if (ml->ifindex == mreq->mr_ifindex &&
3732 ml->type == mreq->mr_type &&
3733 ml->alen == mreq->mr_alen &&
3734 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3735 if (--ml->count == 0) {
3736 struct net_device *dev;
3737 *mlp = ml->next;
3738 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3739 if (dev)
3740 packet_dev_mc(dev, ml, -1);
3741 kfree(ml);
3742 }
3743 break;
3744 }
3745 }
3746 rtnl_unlock();
3747 return 0;
3748 }
3749
packet_flush_mclist(struct sock * sk)3750 static void packet_flush_mclist(struct sock *sk)
3751 {
3752 struct packet_sock *po = pkt_sk(sk);
3753 struct packet_mclist *ml;
3754
3755 if (!po->mclist)
3756 return;
3757
3758 rtnl_lock();
3759 while ((ml = po->mclist) != NULL) {
3760 struct net_device *dev;
3761
3762 po->mclist = ml->next;
3763 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3764 if (dev != NULL)
3765 packet_dev_mc(dev, ml, -1);
3766 kfree(ml);
3767 }
3768 rtnl_unlock();
3769 }
3770
3771 static int
packet_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3772 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3773 unsigned int optlen)
3774 {
3775 struct sock *sk = sock->sk;
3776 struct packet_sock *po = pkt_sk(sk);
3777 int ret;
3778
3779 if (level != SOL_PACKET)
3780 return -ENOPROTOOPT;
3781
3782 switch (optname) {
3783 case PACKET_ADD_MEMBERSHIP:
3784 case PACKET_DROP_MEMBERSHIP:
3785 {
3786 struct packet_mreq_max mreq;
3787 int len = optlen;
3788 memset(&mreq, 0, sizeof(mreq));
3789 if (len < sizeof(struct packet_mreq))
3790 return -EINVAL;
3791 if (len > sizeof(mreq))
3792 len = sizeof(mreq);
3793 if (copy_from_sockptr(&mreq, optval, len))
3794 return -EFAULT;
3795 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3796 return -EINVAL;
3797 if (optname == PACKET_ADD_MEMBERSHIP)
3798 ret = packet_mc_add(sk, &mreq);
3799 else
3800 ret = packet_mc_drop(sk, &mreq);
3801 return ret;
3802 }
3803
3804 case PACKET_RX_RING:
3805 case PACKET_TX_RING:
3806 {
3807 union tpacket_req_u req_u;
3808
3809 ret = -EINVAL;
3810 lock_sock(sk);
3811 switch (po->tp_version) {
3812 case TPACKET_V1:
3813 case TPACKET_V2:
3814 if (optlen < sizeof(req_u.req))
3815 break;
3816 ret = copy_from_sockptr(&req_u.req, optval,
3817 sizeof(req_u.req)) ?
3818 -EINVAL : 0;
3819 break;
3820 case TPACKET_V3:
3821 default:
3822 if (optlen < sizeof(req_u.req3))
3823 break;
3824 ret = copy_from_sockptr(&req_u.req3, optval,
3825 sizeof(req_u.req3)) ?
3826 -EINVAL : 0;
3827 break;
3828 }
3829 if (!ret)
3830 ret = packet_set_ring(sk, &req_u, 0,
3831 optname == PACKET_TX_RING);
3832 release_sock(sk);
3833 return ret;
3834 }
3835 case PACKET_COPY_THRESH:
3836 {
3837 int val;
3838
3839 if (optlen != sizeof(val))
3840 return -EINVAL;
3841 if (copy_from_sockptr(&val, optval, sizeof(val)))
3842 return -EFAULT;
3843
3844 pkt_sk(sk)->copy_thresh = val;
3845 return 0;
3846 }
3847 case PACKET_VERSION:
3848 {
3849 int val;
3850
3851 if (optlen != sizeof(val))
3852 return -EINVAL;
3853 if (copy_from_sockptr(&val, optval, sizeof(val)))
3854 return -EFAULT;
3855 switch (val) {
3856 case TPACKET_V1:
3857 case TPACKET_V2:
3858 case TPACKET_V3:
3859 break;
3860 default:
3861 return -EINVAL;
3862 }
3863 lock_sock(sk);
3864 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3865 ret = -EBUSY;
3866 } else {
3867 po->tp_version = val;
3868 ret = 0;
3869 }
3870 release_sock(sk);
3871 return ret;
3872 }
3873 case PACKET_RESERVE:
3874 {
3875 unsigned int val;
3876
3877 if (optlen != sizeof(val))
3878 return -EINVAL;
3879 if (copy_from_sockptr(&val, optval, sizeof(val)))
3880 return -EFAULT;
3881 if (val > INT_MAX)
3882 return -EINVAL;
3883 lock_sock(sk);
3884 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3885 ret = -EBUSY;
3886 } else {
3887 po->tp_reserve = val;
3888 ret = 0;
3889 }
3890 release_sock(sk);
3891 return ret;
3892 }
3893 case PACKET_LOSS:
3894 {
3895 unsigned int val;
3896
3897 if (optlen != sizeof(val))
3898 return -EINVAL;
3899 if (copy_from_sockptr(&val, optval, sizeof(val)))
3900 return -EFAULT;
3901
3902 lock_sock(sk);
3903 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3904 ret = -EBUSY;
3905 } else {
3906 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3907 ret = 0;
3908 }
3909 release_sock(sk);
3910 return ret;
3911 }
3912 case PACKET_AUXDATA:
3913 {
3914 int val;
3915
3916 if (optlen < sizeof(val))
3917 return -EINVAL;
3918 if (copy_from_sockptr(&val, optval, sizeof(val)))
3919 return -EFAULT;
3920
3921 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3922 return 0;
3923 }
3924 case PACKET_ORIGDEV:
3925 {
3926 int val;
3927
3928 if (optlen < sizeof(val))
3929 return -EINVAL;
3930 if (copy_from_sockptr(&val, optval, sizeof(val)))
3931 return -EFAULT;
3932
3933 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
3934 return 0;
3935 }
3936 case PACKET_VNET_HDR:
3937 case PACKET_VNET_HDR_SZ:
3938 {
3939 int val, hdr_len;
3940
3941 if (sock->type != SOCK_RAW)
3942 return -EINVAL;
3943 if (optlen < sizeof(val))
3944 return -EINVAL;
3945 if (copy_from_sockptr(&val, optval, sizeof(val)))
3946 return -EFAULT;
3947
3948 if (optname == PACKET_VNET_HDR_SZ) {
3949 if (val && val != sizeof(struct virtio_net_hdr) &&
3950 val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
3951 return -EINVAL;
3952 hdr_len = val;
3953 } else {
3954 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
3955 }
3956 lock_sock(sk);
3957 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3958 ret = -EBUSY;
3959 } else {
3960 WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
3961 ret = 0;
3962 }
3963 release_sock(sk);
3964 return ret;
3965 }
3966 case PACKET_TIMESTAMP:
3967 {
3968 int val;
3969
3970 if (optlen != sizeof(val))
3971 return -EINVAL;
3972 if (copy_from_sockptr(&val, optval, sizeof(val)))
3973 return -EFAULT;
3974
3975 WRITE_ONCE(po->tp_tstamp, val);
3976 return 0;
3977 }
3978 case PACKET_FANOUT:
3979 {
3980 struct fanout_args args = { 0 };
3981
3982 if (optlen != sizeof(int) && optlen != sizeof(args))
3983 return -EINVAL;
3984 if (copy_from_sockptr(&args, optval, optlen))
3985 return -EFAULT;
3986
3987 return fanout_add(sk, &args);
3988 }
3989 case PACKET_FANOUT_DATA:
3990 {
3991 /* Paired with the WRITE_ONCE() in fanout_add() */
3992 if (!READ_ONCE(po->fanout))
3993 return -EINVAL;
3994
3995 return fanout_set_data(po, optval, optlen);
3996 }
3997 case PACKET_IGNORE_OUTGOING:
3998 {
3999 int val;
4000
4001 if (optlen != sizeof(val))
4002 return -EINVAL;
4003 if (copy_from_sockptr(&val, optval, sizeof(val)))
4004 return -EFAULT;
4005 if (val < 0 || val > 1)
4006 return -EINVAL;
4007
4008 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4009 return 0;
4010 }
4011 case PACKET_TX_HAS_OFF:
4012 {
4013 unsigned int val;
4014
4015 if (optlen != sizeof(val))
4016 return -EINVAL;
4017 if (copy_from_sockptr(&val, optval, sizeof(val)))
4018 return -EFAULT;
4019
4020 lock_sock(sk);
4021 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4022 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4023
4024 release_sock(sk);
4025 return 0;
4026 }
4027 case PACKET_QDISC_BYPASS:
4028 {
4029 int val;
4030
4031 if (optlen != sizeof(val))
4032 return -EINVAL;
4033 if (copy_from_sockptr(&val, optval, sizeof(val)))
4034 return -EFAULT;
4035
4036 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4037 return 0;
4038 }
4039 default:
4040 return -ENOPROTOOPT;
4041 }
4042 }
4043
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)4044 static int packet_getsockopt(struct socket *sock, int level, int optname,
4045 char __user *optval, int __user *optlen)
4046 {
4047 int len;
4048 int val, lv = sizeof(val);
4049 struct sock *sk = sock->sk;
4050 struct packet_sock *po = pkt_sk(sk);
4051 void *data = &val;
4052 union tpacket_stats_u st;
4053 struct tpacket_rollover_stats rstats;
4054 int drops;
4055
4056 if (level != SOL_PACKET)
4057 return -ENOPROTOOPT;
4058
4059 if (get_user(len, optlen))
4060 return -EFAULT;
4061
4062 if (len < 0)
4063 return -EINVAL;
4064
4065 switch (optname) {
4066 case PACKET_STATISTICS:
4067 spin_lock_bh(&sk->sk_receive_queue.lock);
4068 memcpy(&st, &po->stats, sizeof(st));
4069 memset(&po->stats, 0, sizeof(po->stats));
4070 spin_unlock_bh(&sk->sk_receive_queue.lock);
4071 drops = atomic_xchg(&po->tp_drops, 0);
4072
4073 if (po->tp_version == TPACKET_V3) {
4074 lv = sizeof(struct tpacket_stats_v3);
4075 st.stats3.tp_drops = drops;
4076 st.stats3.tp_packets += drops;
4077 data = &st.stats3;
4078 } else {
4079 lv = sizeof(struct tpacket_stats);
4080 st.stats1.tp_drops = drops;
4081 st.stats1.tp_packets += drops;
4082 data = &st.stats1;
4083 }
4084
4085 break;
4086 case PACKET_AUXDATA:
4087 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4088 break;
4089 case PACKET_ORIGDEV:
4090 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4091 break;
4092 case PACKET_VNET_HDR:
4093 val = !!READ_ONCE(po->vnet_hdr_sz);
4094 break;
4095 case PACKET_VNET_HDR_SZ:
4096 val = READ_ONCE(po->vnet_hdr_sz);
4097 break;
4098 case PACKET_VERSION:
4099 val = po->tp_version;
4100 break;
4101 case PACKET_HDRLEN:
4102 if (len > sizeof(int))
4103 len = sizeof(int);
4104 if (len < sizeof(int))
4105 return -EINVAL;
4106 if (copy_from_user(&val, optval, len))
4107 return -EFAULT;
4108 switch (val) {
4109 case TPACKET_V1:
4110 val = sizeof(struct tpacket_hdr);
4111 break;
4112 case TPACKET_V2:
4113 val = sizeof(struct tpacket2_hdr);
4114 break;
4115 case TPACKET_V3:
4116 val = sizeof(struct tpacket3_hdr);
4117 break;
4118 default:
4119 return -EINVAL;
4120 }
4121 break;
4122 case PACKET_RESERVE:
4123 val = po->tp_reserve;
4124 break;
4125 case PACKET_LOSS:
4126 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4127 break;
4128 case PACKET_TIMESTAMP:
4129 val = READ_ONCE(po->tp_tstamp);
4130 break;
4131 case PACKET_FANOUT:
4132 val = (po->fanout ?
4133 ((u32)po->fanout->id |
4134 ((u32)po->fanout->type << 16) |
4135 ((u32)po->fanout->flags << 24)) :
4136 0);
4137 break;
4138 case PACKET_IGNORE_OUTGOING:
4139 val = READ_ONCE(po->prot_hook.ignore_outgoing);
4140 break;
4141 case PACKET_ROLLOVER_STATS:
4142 if (!po->rollover)
4143 return -EINVAL;
4144 rstats.tp_all = atomic_long_read(&po->rollover->num);
4145 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4146 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4147 data = &rstats;
4148 lv = sizeof(rstats);
4149 break;
4150 case PACKET_TX_HAS_OFF:
4151 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4152 break;
4153 case PACKET_QDISC_BYPASS:
4154 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4155 break;
4156 default:
4157 return -ENOPROTOOPT;
4158 }
4159
4160 if (len > lv)
4161 len = lv;
4162 if (put_user(len, optlen))
4163 return -EFAULT;
4164 if (copy_to_user(optval, data, len))
4165 return -EFAULT;
4166 return 0;
4167 }
4168
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)4169 static int packet_notifier(struct notifier_block *this,
4170 unsigned long msg, void *ptr)
4171 {
4172 struct sock *sk;
4173 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4174 struct net *net = dev_net(dev);
4175
4176 rcu_read_lock();
4177 sk_for_each_rcu(sk, &net->packet.sklist) {
4178 struct packet_sock *po = pkt_sk(sk);
4179
4180 switch (msg) {
4181 case NETDEV_UNREGISTER:
4182 if (po->mclist)
4183 packet_dev_mclist_delete(dev, &po->mclist);
4184 fallthrough;
4185
4186 case NETDEV_DOWN:
4187 if (dev->ifindex == po->ifindex) {
4188 spin_lock(&po->bind_lock);
4189 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4190 __unregister_prot_hook(sk, false);
4191 sk->sk_err = ENETDOWN;
4192 if (!sock_flag(sk, SOCK_DEAD))
4193 sk_error_report(sk);
4194 }
4195 if (msg == NETDEV_UNREGISTER) {
4196 packet_cached_dev_reset(po);
4197 WRITE_ONCE(po->ifindex, -1);
4198 netdev_put(po->prot_hook.dev,
4199 &po->prot_hook.dev_tracker);
4200 po->prot_hook.dev = NULL;
4201 }
4202 spin_unlock(&po->bind_lock);
4203 }
4204 break;
4205 case NETDEV_UP:
4206 if (dev->ifindex == po->ifindex) {
4207 spin_lock(&po->bind_lock);
4208 if (po->num)
4209 register_prot_hook(sk);
4210 spin_unlock(&po->bind_lock);
4211 }
4212 break;
4213 }
4214 }
4215 rcu_read_unlock();
4216 return NOTIFY_DONE;
4217 }
4218
4219
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)4220 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4221 unsigned long arg)
4222 {
4223 struct sock *sk = sock->sk;
4224
4225 switch (cmd) {
4226 case SIOCOUTQ:
4227 {
4228 int amount = sk_wmem_alloc_get(sk);
4229
4230 return put_user(amount, (int __user *)arg);
4231 }
4232 case SIOCINQ:
4233 {
4234 struct sk_buff *skb;
4235 int amount = 0;
4236
4237 spin_lock_bh(&sk->sk_receive_queue.lock);
4238 skb = skb_peek(&sk->sk_receive_queue);
4239 if (skb)
4240 amount = skb->len;
4241 spin_unlock_bh(&sk->sk_receive_queue.lock);
4242 return put_user(amount, (int __user *)arg);
4243 }
4244 #ifdef CONFIG_INET
4245 case SIOCADDRT:
4246 case SIOCDELRT:
4247 case SIOCDARP:
4248 case SIOCGARP:
4249 case SIOCSARP:
4250 case SIOCGIFADDR:
4251 case SIOCSIFADDR:
4252 case SIOCGIFBRDADDR:
4253 case SIOCSIFBRDADDR:
4254 case SIOCGIFNETMASK:
4255 case SIOCSIFNETMASK:
4256 case SIOCGIFDSTADDR:
4257 case SIOCSIFDSTADDR:
4258 case SIOCSIFFLAGS:
4259 return inet_dgram_ops.ioctl(sock, cmd, arg);
4260 #endif
4261
4262 default:
4263 return -ENOIOCTLCMD;
4264 }
4265 return 0;
4266 }
4267
packet_poll(struct file * file,struct socket * sock,poll_table * wait)4268 static __poll_t packet_poll(struct file *file, struct socket *sock,
4269 poll_table *wait)
4270 {
4271 struct sock *sk = sock->sk;
4272 struct packet_sock *po = pkt_sk(sk);
4273 __poll_t mask = datagram_poll(file, sock, wait);
4274
4275 spin_lock_bh(&sk->sk_receive_queue.lock);
4276 if (po->rx_ring.pg_vec) {
4277 if (!packet_previous_rx_frame(po, &po->rx_ring,
4278 TP_STATUS_KERNEL))
4279 mask |= EPOLLIN | EPOLLRDNORM;
4280 }
4281 packet_rcv_try_clear_pressure(po);
4282 spin_unlock_bh(&sk->sk_receive_queue.lock);
4283 spin_lock_bh(&sk->sk_write_queue.lock);
4284 if (po->tx_ring.pg_vec) {
4285 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4286 mask |= EPOLLOUT | EPOLLWRNORM;
4287 }
4288 spin_unlock_bh(&sk->sk_write_queue.lock);
4289 return mask;
4290 }
4291
4292
4293 /* Dirty? Well, I still did not learn better way to account
4294 * for user mmaps.
4295 */
4296
packet_mm_open(struct vm_area_struct * vma)4297 static void packet_mm_open(struct vm_area_struct *vma)
4298 {
4299 struct file *file = vma->vm_file;
4300 struct socket *sock = file->private_data;
4301 struct sock *sk = sock->sk;
4302
4303 if (sk)
4304 atomic_long_inc(&pkt_sk(sk)->mapped);
4305 }
4306
packet_mm_close(struct vm_area_struct * vma)4307 static void packet_mm_close(struct vm_area_struct *vma)
4308 {
4309 struct file *file = vma->vm_file;
4310 struct socket *sock = file->private_data;
4311 struct sock *sk = sock->sk;
4312
4313 if (sk)
4314 atomic_long_dec(&pkt_sk(sk)->mapped);
4315 }
4316
4317 static const struct vm_operations_struct packet_mmap_ops = {
4318 .open = packet_mm_open,
4319 .close = packet_mm_close,
4320 };
4321
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)4322 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4323 unsigned int len)
4324 {
4325 int i;
4326
4327 for (i = 0; i < len; i++) {
4328 if (likely(pg_vec[i].buffer)) {
4329 if (is_vmalloc_addr(pg_vec[i].buffer))
4330 vfree(pg_vec[i].buffer);
4331 else
4332 free_pages((unsigned long)pg_vec[i].buffer,
4333 order);
4334 pg_vec[i].buffer = NULL;
4335 }
4336 }
4337 kfree(pg_vec);
4338 }
4339
alloc_one_pg_vec_page(unsigned long order)4340 static char *alloc_one_pg_vec_page(unsigned long order)
4341 {
4342 char *buffer;
4343 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4344 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4345
4346 buffer = (char *) __get_free_pages(gfp_flags, order);
4347 if (buffer)
4348 return buffer;
4349
4350 /* __get_free_pages failed, fall back to vmalloc */
4351 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4352 if (buffer)
4353 return buffer;
4354
4355 /* vmalloc failed, lets dig into swap here */
4356 gfp_flags &= ~__GFP_NORETRY;
4357 buffer = (char *) __get_free_pages(gfp_flags, order);
4358 if (buffer)
4359 return buffer;
4360
4361 /* complete and utter failure */
4362 return NULL;
4363 }
4364
alloc_pg_vec(struct tpacket_req * req,int order)4365 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4366 {
4367 unsigned int block_nr = req->tp_block_nr;
4368 struct pgv *pg_vec;
4369 int i;
4370
4371 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4372 if (unlikely(!pg_vec))
4373 goto out;
4374
4375 for (i = 0; i < block_nr; i++) {
4376 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4377 if (unlikely(!pg_vec[i].buffer))
4378 goto out_free_pgvec;
4379 }
4380
4381 out:
4382 return pg_vec;
4383
4384 out_free_pgvec:
4385 free_pg_vec(pg_vec, order, block_nr);
4386 pg_vec = NULL;
4387 goto out;
4388 }
4389
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)4390 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4391 int closing, int tx_ring)
4392 {
4393 struct pgv *pg_vec = NULL;
4394 struct packet_sock *po = pkt_sk(sk);
4395 unsigned long *rx_owner_map = NULL;
4396 int was_running, order = 0;
4397 struct packet_ring_buffer *rb;
4398 struct sk_buff_head *rb_queue;
4399 __be16 num;
4400 int err;
4401 /* Added to avoid minimal code churn */
4402 struct tpacket_req *req = &req_u->req;
4403
4404 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4405 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4406
4407 err = -EBUSY;
4408 if (!closing) {
4409 if (atomic_long_read(&po->mapped))
4410 goto out;
4411 if (packet_read_pending(rb))
4412 goto out;
4413 }
4414
4415 if (req->tp_block_nr) {
4416 unsigned int min_frame_size;
4417
4418 /* Sanity tests and some calculations */
4419 err = -EBUSY;
4420 if (unlikely(rb->pg_vec))
4421 goto out;
4422
4423 switch (po->tp_version) {
4424 case TPACKET_V1:
4425 po->tp_hdrlen = TPACKET_HDRLEN;
4426 break;
4427 case TPACKET_V2:
4428 po->tp_hdrlen = TPACKET2_HDRLEN;
4429 break;
4430 case TPACKET_V3:
4431 po->tp_hdrlen = TPACKET3_HDRLEN;
4432 break;
4433 }
4434
4435 err = -EINVAL;
4436 if (unlikely((int)req->tp_block_size <= 0))
4437 goto out;
4438 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4439 goto out;
4440 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4441 if (po->tp_version >= TPACKET_V3 &&
4442 req->tp_block_size <
4443 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4444 goto out;
4445 if (unlikely(req->tp_frame_size < min_frame_size))
4446 goto out;
4447 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4448 goto out;
4449
4450 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4451 if (unlikely(rb->frames_per_block == 0))
4452 goto out;
4453 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4454 goto out;
4455 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4456 req->tp_frame_nr))
4457 goto out;
4458
4459 err = -ENOMEM;
4460 order = get_order(req->tp_block_size);
4461 pg_vec = alloc_pg_vec(req, order);
4462 if (unlikely(!pg_vec))
4463 goto out;
4464 switch (po->tp_version) {
4465 case TPACKET_V3:
4466 /* Block transmit is not supported yet */
4467 if (!tx_ring) {
4468 init_prb_bdqc(po, rb, pg_vec, req_u);
4469 } else {
4470 struct tpacket_req3 *req3 = &req_u->req3;
4471
4472 if (req3->tp_retire_blk_tov ||
4473 req3->tp_sizeof_priv ||
4474 req3->tp_feature_req_word) {
4475 err = -EINVAL;
4476 goto out_free_pg_vec;
4477 }
4478 }
4479 break;
4480 default:
4481 if (!tx_ring) {
4482 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4483 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4484 if (!rx_owner_map)
4485 goto out_free_pg_vec;
4486 }
4487 break;
4488 }
4489 }
4490 /* Done */
4491 else {
4492 err = -EINVAL;
4493 if (unlikely(req->tp_frame_nr))
4494 goto out;
4495 }
4496
4497
4498 /* Detach socket from network */
4499 spin_lock(&po->bind_lock);
4500 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4501 num = po->num;
4502 if (was_running) {
4503 WRITE_ONCE(po->num, 0);
4504 __unregister_prot_hook(sk, false);
4505 }
4506 spin_unlock(&po->bind_lock);
4507
4508 synchronize_net();
4509
4510 err = -EBUSY;
4511 mutex_lock(&po->pg_vec_lock);
4512 if (closing || atomic_long_read(&po->mapped) == 0) {
4513 err = 0;
4514 spin_lock_bh(&rb_queue->lock);
4515 swap(rb->pg_vec, pg_vec);
4516 if (po->tp_version <= TPACKET_V2)
4517 swap(rb->rx_owner_map, rx_owner_map);
4518 rb->frame_max = (req->tp_frame_nr - 1);
4519 rb->head = 0;
4520 rb->frame_size = req->tp_frame_size;
4521 spin_unlock_bh(&rb_queue->lock);
4522
4523 swap(rb->pg_vec_order, order);
4524 swap(rb->pg_vec_len, req->tp_block_nr);
4525
4526 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4527 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4528 tpacket_rcv : packet_rcv;
4529 skb_queue_purge(rb_queue);
4530 if (atomic_long_read(&po->mapped))
4531 pr_err("packet_mmap: vma is busy: %ld\n",
4532 atomic_long_read(&po->mapped));
4533 }
4534 mutex_unlock(&po->pg_vec_lock);
4535
4536 spin_lock(&po->bind_lock);
4537 if (was_running) {
4538 WRITE_ONCE(po->num, num);
4539 register_prot_hook(sk);
4540 }
4541 spin_unlock(&po->bind_lock);
4542 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4543 /* Because we don't support block-based V3 on tx-ring */
4544 if (!tx_ring)
4545 prb_shutdown_retire_blk_timer(po, rb_queue);
4546 }
4547
4548 out_free_pg_vec:
4549 if (pg_vec) {
4550 bitmap_free(rx_owner_map);
4551 free_pg_vec(pg_vec, order, req->tp_block_nr);
4552 }
4553 out:
4554 return err;
4555 }
4556
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)4557 static int packet_mmap(struct file *file, struct socket *sock,
4558 struct vm_area_struct *vma)
4559 {
4560 struct sock *sk = sock->sk;
4561 struct packet_sock *po = pkt_sk(sk);
4562 unsigned long size, expected_size;
4563 struct packet_ring_buffer *rb;
4564 unsigned long start;
4565 int err = -EINVAL;
4566 int i;
4567
4568 if (vma->vm_pgoff)
4569 return -EINVAL;
4570
4571 mutex_lock(&po->pg_vec_lock);
4572
4573 expected_size = 0;
4574 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4575 if (rb->pg_vec) {
4576 expected_size += rb->pg_vec_len
4577 * rb->pg_vec_pages
4578 * PAGE_SIZE;
4579 }
4580 }
4581
4582 if (expected_size == 0)
4583 goto out;
4584
4585 size = vma->vm_end - vma->vm_start;
4586 if (size != expected_size)
4587 goto out;
4588
4589 start = vma->vm_start;
4590 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4591 if (rb->pg_vec == NULL)
4592 continue;
4593
4594 for (i = 0; i < rb->pg_vec_len; i++) {
4595 struct page *page;
4596 void *kaddr = rb->pg_vec[i].buffer;
4597 int pg_num;
4598
4599 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4600 page = pgv_to_page(kaddr);
4601 err = vm_insert_page(vma, start, page);
4602 if (unlikely(err))
4603 goto out;
4604 start += PAGE_SIZE;
4605 kaddr += PAGE_SIZE;
4606 }
4607 }
4608 }
4609
4610 atomic_long_inc(&po->mapped);
4611 vma->vm_ops = &packet_mmap_ops;
4612 err = 0;
4613
4614 out:
4615 mutex_unlock(&po->pg_vec_lock);
4616 return err;
4617 }
4618
4619 static const struct proto_ops packet_ops_spkt = {
4620 .family = PF_PACKET,
4621 .owner = THIS_MODULE,
4622 .release = packet_release,
4623 .bind = packet_bind_spkt,
4624 .connect = sock_no_connect,
4625 .socketpair = sock_no_socketpair,
4626 .accept = sock_no_accept,
4627 .getname = packet_getname_spkt,
4628 .poll = datagram_poll,
4629 .ioctl = packet_ioctl,
4630 .gettstamp = sock_gettstamp,
4631 .listen = sock_no_listen,
4632 .shutdown = sock_no_shutdown,
4633 .sendmsg = packet_sendmsg_spkt,
4634 .recvmsg = packet_recvmsg,
4635 .mmap = sock_no_mmap,
4636 };
4637
4638 static const struct proto_ops packet_ops = {
4639 .family = PF_PACKET,
4640 .owner = THIS_MODULE,
4641 .release = packet_release,
4642 .bind = packet_bind,
4643 .connect = sock_no_connect,
4644 .socketpair = sock_no_socketpair,
4645 .accept = sock_no_accept,
4646 .getname = packet_getname,
4647 .poll = packet_poll,
4648 .ioctl = packet_ioctl,
4649 .gettstamp = sock_gettstamp,
4650 .listen = sock_no_listen,
4651 .shutdown = sock_no_shutdown,
4652 .setsockopt = packet_setsockopt,
4653 .getsockopt = packet_getsockopt,
4654 .sendmsg = packet_sendmsg,
4655 .recvmsg = packet_recvmsg,
4656 .mmap = packet_mmap,
4657 };
4658
4659 static const struct net_proto_family packet_family_ops = {
4660 .family = PF_PACKET,
4661 .create = packet_create,
4662 .owner = THIS_MODULE,
4663 };
4664
4665 static struct notifier_block packet_netdev_notifier = {
4666 .notifier_call = packet_notifier,
4667 };
4668
4669 #ifdef CONFIG_PROC_FS
4670
packet_seq_start(struct seq_file * seq,loff_t * pos)4671 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4672 __acquires(RCU)
4673 {
4674 struct net *net = seq_file_net(seq);
4675
4676 rcu_read_lock();
4677 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4678 }
4679
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4680 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4681 {
4682 struct net *net = seq_file_net(seq);
4683 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4684 }
4685
packet_seq_stop(struct seq_file * seq,void * v)4686 static void packet_seq_stop(struct seq_file *seq, void *v)
4687 __releases(RCU)
4688 {
4689 rcu_read_unlock();
4690 }
4691
packet_seq_show(struct seq_file * seq,void * v)4692 static int packet_seq_show(struct seq_file *seq, void *v)
4693 {
4694 if (v == SEQ_START_TOKEN)
4695 seq_printf(seq,
4696 "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
4697 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4698 else {
4699 struct sock *s = sk_entry(v);
4700 const struct packet_sock *po = pkt_sk(s);
4701
4702 seq_printf(seq,
4703 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4704 s,
4705 refcount_read(&s->sk_refcnt),
4706 s->sk_type,
4707 ntohs(READ_ONCE(po->num)),
4708 READ_ONCE(po->ifindex),
4709 packet_sock_flag(po, PACKET_SOCK_RUNNING),
4710 atomic_read(&s->sk_rmem_alloc),
4711 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4712 sock_i_ino(s));
4713 }
4714
4715 return 0;
4716 }
4717
4718 static const struct seq_operations packet_seq_ops = {
4719 .start = packet_seq_start,
4720 .next = packet_seq_next,
4721 .stop = packet_seq_stop,
4722 .show = packet_seq_show,
4723 };
4724 #endif
4725
packet_net_init(struct net * net)4726 static int __net_init packet_net_init(struct net *net)
4727 {
4728 mutex_init(&net->packet.sklist_lock);
4729 INIT_HLIST_HEAD(&net->packet.sklist);
4730
4731 #ifdef CONFIG_PROC_FS
4732 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4733 sizeof(struct seq_net_private)))
4734 return -ENOMEM;
4735 #endif /* CONFIG_PROC_FS */
4736
4737 return 0;
4738 }
4739
packet_net_exit(struct net * net)4740 static void __net_exit packet_net_exit(struct net *net)
4741 {
4742 remove_proc_entry("packet", net->proc_net);
4743 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4744 }
4745
4746 static struct pernet_operations packet_net_ops = {
4747 .init = packet_net_init,
4748 .exit = packet_net_exit,
4749 };
4750
4751
packet_exit(void)4752 static void __exit packet_exit(void)
4753 {
4754 sock_unregister(PF_PACKET);
4755 proto_unregister(&packet_proto);
4756 unregister_netdevice_notifier(&packet_netdev_notifier);
4757 unregister_pernet_subsys(&packet_net_ops);
4758 }
4759
packet_init(void)4760 static int __init packet_init(void)
4761 {
4762 int rc;
4763
4764 rc = register_pernet_subsys(&packet_net_ops);
4765 if (rc)
4766 goto out;
4767 rc = register_netdevice_notifier(&packet_netdev_notifier);
4768 if (rc)
4769 goto out_pernet;
4770 rc = proto_register(&packet_proto, 0);
4771 if (rc)
4772 goto out_notifier;
4773 rc = sock_register(&packet_family_ops);
4774 if (rc)
4775 goto out_proto;
4776
4777 return 0;
4778
4779 out_proto:
4780 proto_unregister(&packet_proto);
4781 out_notifier:
4782 unregister_netdevice_notifier(&packet_netdev_notifier);
4783 out_pernet:
4784 unregister_pernet_subsys(&packet_net_ops);
4785 out:
4786 return rc;
4787 }
4788
4789 module_init(packet_init);
4790 module_exit(packet_exit);
4791 MODULE_LICENSE("GPL");
4792 MODULE_ALIAS_NETPROTO(PF_PACKET);
4793