1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/string.h>
20 #include <linux/if_arp.h>
21 #include <linux/inetdevice.h>
22 #include <linux/inet.h>
23 #include <linux/interrupt.h>
24 #include <linux/netpoll.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/rcupdate.h>
28 #include <linux/workqueue.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/if_vlan.h>
32 #include <net/tcp.h>
33 #include <net/udp.h>
34 #include <net/addrconf.h>
35 #include <net/ndisc.h>
36 #include <net/ip6_checksum.h>
37 #include <asm/unaligned.h>
38 #include <trace/events/napi.h>
39 #include <linux/kconfig.h>
40
41 /*
42 * We maintain a small pool of fully-sized skbs, to make sure the
43 * message gets out even in extreme OOM situations.
44 */
45
46 #define MAX_UDP_CHUNK 1460
47 #define MAX_SKBS 32
48
49 static struct sk_buff_head skb_pool;
50
51 DEFINE_STATIC_SRCU(netpoll_srcu);
52
53 #define USEC_PER_POLL 50
54
55 #define MAX_SKB_SIZE \
56 (sizeof(struct ethhdr) + \
57 sizeof(struct iphdr) + \
58 sizeof(struct udphdr) + \
59 MAX_UDP_CHUNK)
60
61 static void zap_completion_queue(void);
62
63 static unsigned int carrier_timeout = 4;
64 module_param(carrier_timeout, uint, 0644);
65
66 #define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72
netpoll_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq)73 static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74 struct net_device *dev,
75 struct netdev_queue *txq)
76 {
77 netdev_tx_t status = NETDEV_TX_OK;
78 netdev_features_t features;
79
80 features = netif_skb_features(skb);
81
82 if (skb_vlan_tag_present(skb) &&
83 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
84 skb = __vlan_hwaccel_push_inside(skb);
85 if (unlikely(!skb)) {
86 /* This is actually a packet drop, but we
87 * don't want the code that calls this
88 * function to try and operate on a NULL skb.
89 */
90 goto out;
91 }
92 }
93
94 status = netdev_start_xmit(skb, dev, txq, false);
95
96 out:
97 return status;
98 }
99
queue_process(struct work_struct * work)100 static void queue_process(struct work_struct *work)
101 {
102 struct netpoll_info *npinfo =
103 container_of(work, struct netpoll_info, tx_work.work);
104 struct sk_buff *skb;
105 unsigned long flags;
106
107 while ((skb = skb_dequeue(&npinfo->txq))) {
108 struct net_device *dev = skb->dev;
109 struct netdev_queue *txq;
110 unsigned int q_index;
111
112 if (!netif_device_present(dev) || !netif_running(dev)) {
113 kfree_skb(skb);
114 continue;
115 }
116
117 local_irq_save(flags);
118 /* check if skb->queue_mapping is still valid */
119 q_index = skb_get_queue_mapping(skb);
120 if (unlikely(q_index >= dev->real_num_tx_queues)) {
121 q_index = q_index % dev->real_num_tx_queues;
122 skb_set_queue_mapping(skb, q_index);
123 }
124 txq = netdev_get_tx_queue(dev, q_index);
125 HARD_TX_LOCK(dev, txq, smp_processor_id());
126 if (netif_xmit_frozen_or_stopped(txq) ||
127 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128 skb_queue_head(&npinfo->txq, skb);
129 HARD_TX_UNLOCK(dev, txq);
130 local_irq_restore(flags);
131
132 schedule_delayed_work(&npinfo->tx_work, HZ/10);
133 return;
134 }
135 HARD_TX_UNLOCK(dev, txq);
136 local_irq_restore(flags);
137 }
138 }
139
netif_local_xmit_active(struct net_device * dev)140 static int netif_local_xmit_active(struct net_device *dev)
141 {
142 int i;
143
144 for (i = 0; i < dev->num_tx_queues; i++) {
145 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146
147 if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148 return 1;
149 }
150
151 return 0;
152 }
153
poll_one_napi(struct napi_struct * napi)154 static void poll_one_napi(struct napi_struct *napi)
155 {
156 int work;
157
158 /* If we set this bit but see that it has already been set,
159 * that indicates that napi has been disabled and we need
160 * to abort this operation
161 */
162 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
163 return;
164
165 /* We explicilty pass the polling call a budget of 0 to
166 * indicate that we are clearing the Tx path only.
167 */
168 work = napi->poll(napi, 0);
169 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
170 trace_napi_poll(napi, work, 0);
171
172 clear_bit(NAPI_STATE_NPSVC, &napi->state);
173 }
174
poll_napi(struct net_device * dev)175 static void poll_napi(struct net_device *dev)
176 {
177 struct napi_struct *napi;
178 int cpu = smp_processor_id();
179
180 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
181 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182 poll_one_napi(napi);
183 smp_store_release(&napi->poll_owner, -1);
184 }
185 }
186 }
187
netpoll_poll_dev(struct net_device * dev)188 void netpoll_poll_dev(struct net_device *dev)
189 {
190 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
191 const struct net_device_ops *ops;
192
193 /* Don't do any rx activity if the dev_lock mutex is held
194 * the dev_open/close paths use this to block netpoll activity
195 * while changing device state
196 */
197 if (!ni || down_trylock(&ni->dev_lock))
198 return;
199
200 /* Some drivers will take the same locks in poll and xmit,
201 * we can't poll if local CPU is already in xmit.
202 */
203 if (!netif_running(dev) || netif_local_xmit_active(dev)) {
204 up(&ni->dev_lock);
205 return;
206 }
207
208 ops = dev->netdev_ops;
209 if (ops->ndo_poll_controller)
210 ops->ndo_poll_controller(dev);
211
212 poll_napi(dev);
213
214 up(&ni->dev_lock);
215
216 zap_completion_queue();
217 }
218 EXPORT_SYMBOL(netpoll_poll_dev);
219
netpoll_poll_disable(struct net_device * dev)220 void netpoll_poll_disable(struct net_device *dev)
221 {
222 struct netpoll_info *ni;
223 int idx;
224 might_sleep();
225 idx = srcu_read_lock(&netpoll_srcu);
226 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
227 if (ni)
228 down(&ni->dev_lock);
229 srcu_read_unlock(&netpoll_srcu, idx);
230 }
231 EXPORT_SYMBOL(netpoll_poll_disable);
232
netpoll_poll_enable(struct net_device * dev)233 void netpoll_poll_enable(struct net_device *dev)
234 {
235 struct netpoll_info *ni;
236 rcu_read_lock();
237 ni = rcu_dereference(dev->npinfo);
238 if (ni)
239 up(&ni->dev_lock);
240 rcu_read_unlock();
241 }
242 EXPORT_SYMBOL(netpoll_poll_enable);
243
refill_skbs(void)244 static void refill_skbs(void)
245 {
246 struct sk_buff *skb;
247 unsigned long flags;
248
249 spin_lock_irqsave(&skb_pool.lock, flags);
250 while (skb_pool.qlen < MAX_SKBS) {
251 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252 if (!skb)
253 break;
254
255 __skb_queue_tail(&skb_pool, skb);
256 }
257 spin_unlock_irqrestore(&skb_pool.lock, flags);
258 }
259
zap_completion_queue(void)260 static void zap_completion_queue(void)
261 {
262 unsigned long flags;
263 struct softnet_data *sd = &get_cpu_var(softnet_data);
264
265 if (sd->completion_queue) {
266 struct sk_buff *clist;
267
268 local_irq_save(flags);
269 clist = sd->completion_queue;
270 sd->completion_queue = NULL;
271 local_irq_restore(flags);
272
273 while (clist != NULL) {
274 struct sk_buff *skb = clist;
275 clist = clist->next;
276 if (!skb_irq_freeable(skb)) {
277 refcount_set(&skb->users, 1);
278 dev_kfree_skb_any(skb); /* put this one back */
279 } else {
280 __kfree_skb(skb);
281 }
282 }
283 }
284
285 put_cpu_var(softnet_data);
286 }
287
find_skb(struct netpoll * np,int len,int reserve)288 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289 {
290 int count = 0;
291 struct sk_buff *skb;
292
293 zap_completion_queue();
294 refill_skbs();
295 repeat:
296
297 skb = alloc_skb(len, GFP_ATOMIC);
298 if (!skb)
299 skb = skb_dequeue(&skb_pool);
300
301 if (!skb) {
302 if (++count < 10) {
303 netpoll_poll_dev(np->dev);
304 goto repeat;
305 }
306 return NULL;
307 }
308
309 refcount_set(&skb->users, 1);
310 skb_reserve(skb, reserve);
311 return skb;
312 }
313
netpoll_owner_active(struct net_device * dev)314 static int netpoll_owner_active(struct net_device *dev)
315 {
316 struct napi_struct *napi;
317
318 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
319 if (READ_ONCE(napi->poll_owner) == smp_processor_id())
320 return 1;
321 }
322 return 0;
323 }
324
325 /* call with IRQ disabled */
__netpoll_send_skb(struct netpoll * np,struct sk_buff * skb)326 static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
327 {
328 netdev_tx_t status = NETDEV_TX_BUSY;
329 struct net_device *dev;
330 unsigned long tries;
331 /* It is up to the caller to keep npinfo alive. */
332 struct netpoll_info *npinfo;
333
334 lockdep_assert_irqs_disabled();
335
336 dev = np->dev;
337 npinfo = rcu_dereference_bh(dev->npinfo);
338
339 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
340 dev_kfree_skb_irq(skb);
341 return NET_XMIT_DROP;
342 }
343
344 /* don't get messages out of order, and no recursion */
345 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
346 struct netdev_queue *txq;
347
348 txq = netdev_core_pick_tx(dev, skb, NULL);
349
350 /* try until next clock tick */
351 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
352 tries > 0; --tries) {
353 if (HARD_TX_TRYLOCK(dev, txq)) {
354 if (!netif_xmit_stopped(txq))
355 status = netpoll_start_xmit(skb, dev, txq);
356
357 HARD_TX_UNLOCK(dev, txq);
358
359 if (dev_xmit_complete(status))
360 break;
361
362 }
363
364 /* tickle device maybe there is some cleanup */
365 netpoll_poll_dev(np->dev);
366
367 udelay(USEC_PER_POLL);
368 }
369
370 WARN_ONCE(!irqs_disabled(),
371 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372 dev->name, dev->netdev_ops->ndo_start_xmit);
373
374 }
375
376 if (!dev_xmit_complete(status)) {
377 skb_queue_tail(&npinfo->txq, skb);
378 schedule_delayed_work(&npinfo->tx_work,0);
379 }
380 return NETDEV_TX_OK;
381 }
382
netpoll_send_skb(struct netpoll * np,struct sk_buff * skb)383 netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384 {
385 unsigned long flags;
386 netdev_tx_t ret;
387
388 if (unlikely(!np)) {
389 dev_kfree_skb_irq(skb);
390 ret = NET_XMIT_DROP;
391 } else {
392 local_irq_save(flags);
393 ret = __netpoll_send_skb(np, skb);
394 local_irq_restore(flags);
395 }
396 return ret;
397 }
398 EXPORT_SYMBOL(netpoll_send_skb);
399
netpoll_send_udp(struct netpoll * np,const char * msg,int len)400 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
401 {
402 int total_len, ip_len, udp_len;
403 struct sk_buff *skb;
404 struct udphdr *udph;
405 struct iphdr *iph;
406 struct ethhdr *eth;
407 static atomic_t ip_ident;
408 struct ipv6hdr *ip6h;
409
410 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
411 WARN_ON_ONCE(!irqs_disabled());
412
413 udp_len = len + sizeof(*udph);
414 if (np->ipv6)
415 ip_len = udp_len + sizeof(*ip6h);
416 else
417 ip_len = udp_len + sizeof(*iph);
418
419 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
420
421 skb = find_skb(np, total_len + np->dev->needed_tailroom,
422 total_len - len);
423 if (!skb)
424 return;
425
426 skb_copy_to_linear_data(skb, msg, len);
427 skb_put(skb, len);
428
429 skb_push(skb, sizeof(*udph));
430 skb_reset_transport_header(skb);
431 udph = udp_hdr(skb);
432 udph->source = htons(np->local_port);
433 udph->dest = htons(np->remote_port);
434 udph->len = htons(udp_len);
435
436 if (np->ipv6) {
437 udph->check = 0;
438 udph->check = csum_ipv6_magic(&np->local_ip.in6,
439 &np->remote_ip.in6,
440 udp_len, IPPROTO_UDP,
441 csum_partial(udph, udp_len, 0));
442 if (udph->check == 0)
443 udph->check = CSUM_MANGLED_0;
444
445 skb_push(skb, sizeof(*ip6h));
446 skb_reset_network_header(skb);
447 ip6h = ipv6_hdr(skb);
448
449 /* ip6h->version = 6; ip6h->priority = 0; */
450 *(unsigned char *)ip6h = 0x60;
451 ip6h->flow_lbl[0] = 0;
452 ip6h->flow_lbl[1] = 0;
453 ip6h->flow_lbl[2] = 0;
454
455 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
456 ip6h->nexthdr = IPPROTO_UDP;
457 ip6h->hop_limit = 32;
458 ip6h->saddr = np->local_ip.in6;
459 ip6h->daddr = np->remote_ip.in6;
460
461 eth = skb_push(skb, ETH_HLEN);
462 skb_reset_mac_header(skb);
463 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
464 } else {
465 udph->check = 0;
466 udph->check = csum_tcpudp_magic(np->local_ip.ip,
467 np->remote_ip.ip,
468 udp_len, IPPROTO_UDP,
469 csum_partial(udph, udp_len, 0));
470 if (udph->check == 0)
471 udph->check = CSUM_MANGLED_0;
472
473 skb_push(skb, sizeof(*iph));
474 skb_reset_network_header(skb);
475 iph = ip_hdr(skb);
476
477 /* iph->version = 4; iph->ihl = 5; */
478 *(unsigned char *)iph = 0x45;
479 iph->tos = 0;
480 put_unaligned(htons(ip_len), &(iph->tot_len));
481 iph->id = htons(atomic_inc_return(&ip_ident));
482 iph->frag_off = 0;
483 iph->ttl = 64;
484 iph->protocol = IPPROTO_UDP;
485 iph->check = 0;
486 put_unaligned(np->local_ip.ip, &(iph->saddr));
487 put_unaligned(np->remote_ip.ip, &(iph->daddr));
488 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
489
490 eth = skb_push(skb, ETH_HLEN);
491 skb_reset_mac_header(skb);
492 skb->protocol = eth->h_proto = htons(ETH_P_IP);
493 }
494
495 ether_addr_copy(eth->h_source, np->dev->dev_addr);
496 ether_addr_copy(eth->h_dest, np->remote_mac);
497
498 skb->dev = np->dev;
499
500 netpoll_send_skb(np, skb);
501 }
502 EXPORT_SYMBOL(netpoll_send_udp);
503
netpoll_print_options(struct netpoll * np)504 void netpoll_print_options(struct netpoll *np)
505 {
506 np_info(np, "local port %d\n", np->local_port);
507 if (np->ipv6)
508 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
509 else
510 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
511 np_info(np, "interface '%s'\n", np->dev_name);
512 np_info(np, "remote port %d\n", np->remote_port);
513 if (np->ipv6)
514 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
515 else
516 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
517 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
518 }
519 EXPORT_SYMBOL(netpoll_print_options);
520
netpoll_parse_ip_addr(const char * str,union inet_addr * addr)521 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
522 {
523 const char *end;
524
525 if (!strchr(str, ':') &&
526 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
527 if (!*end)
528 return 0;
529 }
530 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
531 #if IS_ENABLED(CONFIG_IPV6)
532 if (!*end)
533 return 1;
534 #else
535 return -1;
536 #endif
537 }
538 return -1;
539 }
540
netpoll_parse_options(struct netpoll * np,char * opt)541 int netpoll_parse_options(struct netpoll *np, char *opt)
542 {
543 char *cur=opt, *delim;
544 int ipv6;
545 bool ipversion_set = false;
546
547 if (*cur != '@') {
548 if ((delim = strchr(cur, '@')) == NULL)
549 goto parse_failed;
550 *delim = 0;
551 if (kstrtou16(cur, 10, &np->local_port))
552 goto parse_failed;
553 cur = delim;
554 }
555 cur++;
556
557 if (*cur != '/') {
558 ipversion_set = true;
559 if ((delim = strchr(cur, '/')) == NULL)
560 goto parse_failed;
561 *delim = 0;
562 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
563 if (ipv6 < 0)
564 goto parse_failed;
565 else
566 np->ipv6 = (bool)ipv6;
567 cur = delim;
568 }
569 cur++;
570
571 if (*cur != ',') {
572 /* parse out dev name */
573 if ((delim = strchr(cur, ',')) == NULL)
574 goto parse_failed;
575 *delim = 0;
576 strscpy(np->dev_name, cur, sizeof(np->dev_name));
577 cur = delim;
578 }
579 cur++;
580
581 if (*cur != '@') {
582 /* dst port */
583 if ((delim = strchr(cur, '@')) == NULL)
584 goto parse_failed;
585 *delim = 0;
586 if (*cur == ' ' || *cur == '\t')
587 np_info(np, "warning: whitespace is not allowed\n");
588 if (kstrtou16(cur, 10, &np->remote_port))
589 goto parse_failed;
590 cur = delim;
591 }
592 cur++;
593
594 /* dst ip */
595 if ((delim = strchr(cur, '/')) == NULL)
596 goto parse_failed;
597 *delim = 0;
598 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
599 if (ipv6 < 0)
600 goto parse_failed;
601 else if (ipversion_set && np->ipv6 != (bool)ipv6)
602 goto parse_failed;
603 else
604 np->ipv6 = (bool)ipv6;
605 cur = delim + 1;
606
607 if (*cur != 0) {
608 /* MAC address */
609 if (!mac_pton(cur, np->remote_mac))
610 goto parse_failed;
611 }
612
613 netpoll_print_options(np);
614
615 return 0;
616
617 parse_failed:
618 np_info(np, "couldn't parse config at '%s'!\n", cur);
619 return -1;
620 }
621 EXPORT_SYMBOL(netpoll_parse_options);
622
__netpoll_setup(struct netpoll * np,struct net_device * ndev)623 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
624 {
625 struct netpoll_info *npinfo;
626 const struct net_device_ops *ops;
627 int err;
628
629 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
630 np_err(np, "%s doesn't support polling, aborting\n",
631 ndev->name);
632 err = -ENOTSUPP;
633 goto out;
634 }
635
636 if (!ndev->npinfo) {
637 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
638 if (!npinfo) {
639 err = -ENOMEM;
640 goto out;
641 }
642
643 sema_init(&npinfo->dev_lock, 1);
644 skb_queue_head_init(&npinfo->txq);
645 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
646
647 refcount_set(&npinfo->refcnt, 1);
648
649 ops = ndev->netdev_ops;
650 if (ops->ndo_netpoll_setup) {
651 err = ops->ndo_netpoll_setup(ndev, npinfo);
652 if (err)
653 goto free_npinfo;
654 }
655 } else {
656 npinfo = rtnl_dereference(ndev->npinfo);
657 refcount_inc(&npinfo->refcnt);
658 }
659
660 np->dev = ndev;
661 strscpy(np->dev_name, ndev->name, IFNAMSIZ);
662 npinfo->netpoll = np;
663
664 /* last thing to do is link it to the net device structure */
665 rcu_assign_pointer(ndev->npinfo, npinfo);
666
667 return 0;
668
669 free_npinfo:
670 kfree(npinfo);
671 out:
672 return err;
673 }
674 EXPORT_SYMBOL_GPL(__netpoll_setup);
675
netpoll_setup(struct netpoll * np)676 int netpoll_setup(struct netpoll *np)
677 {
678 struct net_device *ndev = NULL;
679 bool ip_overwritten = false;
680 struct in_device *in_dev;
681 int err;
682
683 rtnl_lock();
684 if (np->dev_name[0]) {
685 struct net *net = current->nsproxy->net_ns;
686 ndev = __dev_get_by_name(net, np->dev_name);
687 }
688 if (!ndev) {
689 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
690 err = -ENODEV;
691 goto unlock;
692 }
693 netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL);
694
695 if (netdev_master_upper_dev_get(ndev)) {
696 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
697 err = -EBUSY;
698 goto put;
699 }
700
701 if (!netif_running(ndev)) {
702 unsigned long atmost;
703
704 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
705
706 err = dev_open(ndev, NULL);
707
708 if (err) {
709 np_err(np, "failed to open %s\n", ndev->name);
710 goto put;
711 }
712
713 rtnl_unlock();
714 atmost = jiffies + carrier_timeout * HZ;
715 while (!netif_carrier_ok(ndev)) {
716 if (time_after(jiffies, atmost)) {
717 np_notice(np, "timeout waiting for carrier\n");
718 break;
719 }
720 msleep(1);
721 }
722
723 rtnl_lock();
724 }
725
726 if (!np->local_ip.ip) {
727 if (!np->ipv6) {
728 const struct in_ifaddr *ifa;
729
730 in_dev = __in_dev_get_rtnl(ndev);
731 if (!in_dev)
732 goto put_noaddr;
733
734 ifa = rtnl_dereference(in_dev->ifa_list);
735 if (!ifa) {
736 put_noaddr:
737 np_err(np, "no IP address for %s, aborting\n",
738 np->dev_name);
739 err = -EDESTADDRREQ;
740 goto put;
741 }
742
743 np->local_ip.ip = ifa->ifa_local;
744 ip_overwritten = true;
745 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
746 } else {
747 #if IS_ENABLED(CONFIG_IPV6)
748 struct inet6_dev *idev;
749
750 err = -EDESTADDRREQ;
751 idev = __in6_dev_get(ndev);
752 if (idev) {
753 struct inet6_ifaddr *ifp;
754
755 read_lock_bh(&idev->lock);
756 list_for_each_entry(ifp, &idev->addr_list, if_list) {
757 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
758 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
759 continue;
760 np->local_ip.in6 = ifp->addr;
761 ip_overwritten = true;
762 err = 0;
763 break;
764 }
765 read_unlock_bh(&idev->lock);
766 }
767 if (err) {
768 np_err(np, "no IPv6 address for %s, aborting\n",
769 np->dev_name);
770 goto put;
771 } else
772 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
773 #else
774 np_err(np, "IPv6 is not supported %s, aborting\n",
775 np->dev_name);
776 err = -EINVAL;
777 goto put;
778 #endif
779 }
780 }
781
782 /* fill up the skb queue */
783 refill_skbs();
784
785 err = __netpoll_setup(np, ndev);
786 if (err)
787 goto put;
788 rtnl_unlock();
789 return 0;
790
791 put:
792 DEBUG_NET_WARN_ON_ONCE(np->dev);
793 if (ip_overwritten)
794 memset(&np->local_ip, 0, sizeof(np->local_ip));
795 netdev_put(ndev, &np->dev_tracker);
796 unlock:
797 rtnl_unlock();
798 return err;
799 }
800 EXPORT_SYMBOL(netpoll_setup);
801
netpoll_init(void)802 static int __init netpoll_init(void)
803 {
804 skb_queue_head_init(&skb_pool);
805 return 0;
806 }
807 core_initcall(netpoll_init);
808
rcu_cleanup_netpoll_info(struct rcu_head * rcu_head)809 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
810 {
811 struct netpoll_info *npinfo =
812 container_of(rcu_head, struct netpoll_info, rcu);
813
814 skb_queue_purge(&npinfo->txq);
815
816 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
817 cancel_delayed_work(&npinfo->tx_work);
818
819 /* clean after last, unfinished work */
820 __skb_queue_purge(&npinfo->txq);
821 /* now cancel it again */
822 cancel_delayed_work(&npinfo->tx_work);
823 kfree(npinfo);
824 }
825
__netpoll_cleanup(struct netpoll * np)826 void __netpoll_cleanup(struct netpoll *np)
827 {
828 struct netpoll_info *npinfo;
829
830 npinfo = rtnl_dereference(np->dev->npinfo);
831 if (!npinfo)
832 return;
833
834 synchronize_srcu(&netpoll_srcu);
835
836 if (refcount_dec_and_test(&npinfo->refcnt)) {
837 const struct net_device_ops *ops;
838
839 ops = np->dev->netdev_ops;
840 if (ops->ndo_netpoll_cleanup)
841 ops->ndo_netpoll_cleanup(np->dev);
842
843 RCU_INIT_POINTER(np->dev->npinfo, NULL);
844 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
845 } else
846 RCU_INIT_POINTER(np->dev->npinfo, NULL);
847 }
848 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
849
__netpoll_free(struct netpoll * np)850 void __netpoll_free(struct netpoll *np)
851 {
852 ASSERT_RTNL();
853
854 /* Wait for transmitting packets to finish before freeing. */
855 synchronize_rcu();
856 __netpoll_cleanup(np);
857 kfree(np);
858 }
859 EXPORT_SYMBOL_GPL(__netpoll_free);
860
netpoll_cleanup(struct netpoll * np)861 void netpoll_cleanup(struct netpoll *np)
862 {
863 rtnl_lock();
864 if (!np->dev)
865 goto out;
866 __netpoll_cleanup(np);
867 netdev_put(np->dev, &np->dev_tracker);
868 np->dev = NULL;
869 out:
870 rtnl_unlock();
871 }
872 EXPORT_SYMBOL(netpoll_cleanup);
873