xref: /openbmc/linux/net/core/netpoll.c (revision 6d99a79c)
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
31 #include <net/tcp.h>
32 #include <net/udp.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
38 
39 /*
40  * We maintain a small pool of fully-sized skbs, to make sure the
41  * message gets out even in extreme OOM situations.
42  */
43 
44 #define MAX_UDP_CHUNK 1460
45 #define MAX_SKBS 32
46 
47 static struct sk_buff_head skb_pool;
48 
49 DEFINE_STATIC_SRCU(netpoll_srcu);
50 
51 #define USEC_PER_POLL	50
52 
53 #define MAX_SKB_SIZE							\
54 	(sizeof(struct ethhdr) +					\
55 	 sizeof(struct iphdr) +						\
56 	 sizeof(struct udphdr) +					\
57 	 MAX_UDP_CHUNK)
58 
59 static void zap_completion_queue(void);
60 
61 static unsigned int carrier_timeout = 4;
62 module_param(carrier_timeout, uint, 0644);
63 
64 #define np_info(np, fmt, ...)				\
65 	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
66 #define np_err(np, fmt, ...)				\
67 	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_notice(np, fmt, ...)				\
69 	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
70 
71 static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
72 			      struct netdev_queue *txq)
73 {
74 	int status = NETDEV_TX_OK;
75 	netdev_features_t features;
76 
77 	features = netif_skb_features(skb);
78 
79 	if (skb_vlan_tag_present(skb) &&
80 	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
81 		skb = __vlan_hwaccel_push_inside(skb);
82 		if (unlikely(!skb)) {
83 			/* This is actually a packet drop, but we
84 			 * don't want the code that calls this
85 			 * function to try and operate on a NULL skb.
86 			 */
87 			goto out;
88 		}
89 	}
90 
91 	status = netdev_start_xmit(skb, dev, txq, false);
92 
93 out:
94 	return status;
95 }
96 
97 static void queue_process(struct work_struct *work)
98 {
99 	struct netpoll_info *npinfo =
100 		container_of(work, struct netpoll_info, tx_work.work);
101 	struct sk_buff *skb;
102 	unsigned long flags;
103 
104 	while ((skb = skb_dequeue(&npinfo->txq))) {
105 		struct net_device *dev = skb->dev;
106 		struct netdev_queue *txq;
107 		unsigned int q_index;
108 
109 		if (!netif_device_present(dev) || !netif_running(dev)) {
110 			kfree_skb(skb);
111 			continue;
112 		}
113 
114 		local_irq_save(flags);
115 		/* check if skb->queue_mapping is still valid */
116 		q_index = skb_get_queue_mapping(skb);
117 		if (unlikely(q_index >= dev->real_num_tx_queues)) {
118 			q_index = q_index % dev->real_num_tx_queues;
119 			skb_set_queue_mapping(skb, q_index);
120 		}
121 		txq = netdev_get_tx_queue(dev, q_index);
122 		HARD_TX_LOCK(dev, txq, smp_processor_id());
123 		if (netif_xmit_frozen_or_stopped(txq) ||
124 		    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
125 			skb_queue_head(&npinfo->txq, skb);
126 			HARD_TX_UNLOCK(dev, txq);
127 			local_irq_restore(flags);
128 
129 			schedule_delayed_work(&npinfo->tx_work, HZ/10);
130 			return;
131 		}
132 		HARD_TX_UNLOCK(dev, txq);
133 		local_irq_restore(flags);
134 	}
135 }
136 
137 static void poll_one_napi(struct napi_struct *napi)
138 {
139 	int work;
140 
141 	/* If we set this bit but see that it has already been set,
142 	 * that indicates that napi has been disabled and we need
143 	 * to abort this operation
144 	 */
145 	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
146 		return;
147 
148 	/* We explicilty pass the polling call a budget of 0 to
149 	 * indicate that we are clearing the Tx path only.
150 	 */
151 	work = napi->poll(napi, 0);
152 	WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
153 	trace_napi_poll(napi, work, 0);
154 
155 	clear_bit(NAPI_STATE_NPSVC, &napi->state);
156 }
157 
158 static void poll_napi(struct net_device *dev)
159 {
160 	struct napi_struct *napi;
161 	int cpu = smp_processor_id();
162 
163 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
164 		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
165 			poll_one_napi(napi);
166 			smp_store_release(&napi->poll_owner, -1);
167 		}
168 	}
169 }
170 
171 void netpoll_poll_dev(struct net_device *dev)
172 {
173 	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
174 	const struct net_device_ops *ops;
175 
176 	/* Don't do any rx activity if the dev_lock mutex is held
177 	 * the dev_open/close paths use this to block netpoll activity
178 	 * while changing device state
179 	 */
180 	if (!ni || down_trylock(&ni->dev_lock))
181 		return;
182 
183 	if (!netif_running(dev)) {
184 		up(&ni->dev_lock);
185 		return;
186 	}
187 
188 	ops = dev->netdev_ops;
189 	if (ops->ndo_poll_controller)
190 		ops->ndo_poll_controller(dev);
191 
192 	poll_napi(dev);
193 
194 	up(&ni->dev_lock);
195 
196 	zap_completion_queue();
197 }
198 EXPORT_SYMBOL(netpoll_poll_dev);
199 
200 void netpoll_poll_disable(struct net_device *dev)
201 {
202 	struct netpoll_info *ni;
203 	int idx;
204 	might_sleep();
205 	idx = srcu_read_lock(&netpoll_srcu);
206 	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
207 	if (ni)
208 		down(&ni->dev_lock);
209 	srcu_read_unlock(&netpoll_srcu, idx);
210 }
211 EXPORT_SYMBOL(netpoll_poll_disable);
212 
213 void netpoll_poll_enable(struct net_device *dev)
214 {
215 	struct netpoll_info *ni;
216 	rcu_read_lock();
217 	ni = rcu_dereference(dev->npinfo);
218 	if (ni)
219 		up(&ni->dev_lock);
220 	rcu_read_unlock();
221 }
222 EXPORT_SYMBOL(netpoll_poll_enable);
223 
224 static void refill_skbs(void)
225 {
226 	struct sk_buff *skb;
227 	unsigned long flags;
228 
229 	spin_lock_irqsave(&skb_pool.lock, flags);
230 	while (skb_pool.qlen < MAX_SKBS) {
231 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
232 		if (!skb)
233 			break;
234 
235 		__skb_queue_tail(&skb_pool, skb);
236 	}
237 	spin_unlock_irqrestore(&skb_pool.lock, flags);
238 }
239 
240 static void zap_completion_queue(void)
241 {
242 	unsigned long flags;
243 	struct softnet_data *sd = &get_cpu_var(softnet_data);
244 
245 	if (sd->completion_queue) {
246 		struct sk_buff *clist;
247 
248 		local_irq_save(flags);
249 		clist = sd->completion_queue;
250 		sd->completion_queue = NULL;
251 		local_irq_restore(flags);
252 
253 		while (clist != NULL) {
254 			struct sk_buff *skb = clist;
255 			clist = clist->next;
256 			if (!skb_irq_freeable(skb)) {
257 				refcount_set(&skb->users, 1);
258 				dev_kfree_skb_any(skb); /* put this one back */
259 			} else {
260 				__kfree_skb(skb);
261 			}
262 		}
263 	}
264 
265 	put_cpu_var(softnet_data);
266 }
267 
268 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
269 {
270 	int count = 0;
271 	struct sk_buff *skb;
272 
273 	zap_completion_queue();
274 	refill_skbs();
275 repeat:
276 
277 	skb = alloc_skb(len, GFP_ATOMIC);
278 	if (!skb)
279 		skb = skb_dequeue(&skb_pool);
280 
281 	if (!skb) {
282 		if (++count < 10) {
283 			netpoll_poll_dev(np->dev);
284 			goto repeat;
285 		}
286 		return NULL;
287 	}
288 
289 	refcount_set(&skb->users, 1);
290 	skb_reserve(skb, reserve);
291 	return skb;
292 }
293 
294 static int netpoll_owner_active(struct net_device *dev)
295 {
296 	struct napi_struct *napi;
297 
298 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
299 		if (napi->poll_owner == smp_processor_id())
300 			return 1;
301 	}
302 	return 0;
303 }
304 
305 /* call with IRQ disabled */
306 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
307 			     struct net_device *dev)
308 {
309 	int status = NETDEV_TX_BUSY;
310 	unsigned long tries;
311 	/* It is up to the caller to keep npinfo alive. */
312 	struct netpoll_info *npinfo;
313 
314 	lockdep_assert_irqs_disabled();
315 
316 	npinfo = rcu_dereference_bh(np->dev->npinfo);
317 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
318 		dev_kfree_skb_irq(skb);
319 		return;
320 	}
321 
322 	/* don't get messages out of order, and no recursion */
323 	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
324 		struct netdev_queue *txq;
325 
326 		txq = netdev_pick_tx(dev, skb, NULL);
327 
328 		/* try until next clock tick */
329 		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
330 		     tries > 0; --tries) {
331 			if (HARD_TX_TRYLOCK(dev, txq)) {
332 				if (!netif_xmit_stopped(txq))
333 					status = netpoll_start_xmit(skb, dev, txq);
334 
335 				HARD_TX_UNLOCK(dev, txq);
336 
337 				if (status == NETDEV_TX_OK)
338 					break;
339 
340 			}
341 
342 			/* tickle device maybe there is some cleanup */
343 			netpoll_poll_dev(np->dev);
344 
345 			udelay(USEC_PER_POLL);
346 		}
347 
348 		WARN_ONCE(!irqs_disabled(),
349 			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
350 			dev->name, dev->netdev_ops->ndo_start_xmit);
351 
352 	}
353 
354 	if (status != NETDEV_TX_OK) {
355 		skb_queue_tail(&npinfo->txq, skb);
356 		schedule_delayed_work(&npinfo->tx_work,0);
357 	}
358 }
359 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
360 
361 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
362 {
363 	int total_len, ip_len, udp_len;
364 	struct sk_buff *skb;
365 	struct udphdr *udph;
366 	struct iphdr *iph;
367 	struct ethhdr *eth;
368 	static atomic_t ip_ident;
369 	struct ipv6hdr *ip6h;
370 
371 	WARN_ON_ONCE(!irqs_disabled());
372 
373 	udp_len = len + sizeof(*udph);
374 	if (np->ipv6)
375 		ip_len = udp_len + sizeof(*ip6h);
376 	else
377 		ip_len = udp_len + sizeof(*iph);
378 
379 	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
380 
381 	skb = find_skb(np, total_len + np->dev->needed_tailroom,
382 		       total_len - len);
383 	if (!skb)
384 		return;
385 
386 	skb_copy_to_linear_data(skb, msg, len);
387 	skb_put(skb, len);
388 
389 	skb_push(skb, sizeof(*udph));
390 	skb_reset_transport_header(skb);
391 	udph = udp_hdr(skb);
392 	udph->source = htons(np->local_port);
393 	udph->dest = htons(np->remote_port);
394 	udph->len = htons(udp_len);
395 
396 	if (np->ipv6) {
397 		udph->check = 0;
398 		udph->check = csum_ipv6_magic(&np->local_ip.in6,
399 					      &np->remote_ip.in6,
400 					      udp_len, IPPROTO_UDP,
401 					      csum_partial(udph, udp_len, 0));
402 		if (udph->check == 0)
403 			udph->check = CSUM_MANGLED_0;
404 
405 		skb_push(skb, sizeof(*ip6h));
406 		skb_reset_network_header(skb);
407 		ip6h = ipv6_hdr(skb);
408 
409 		/* ip6h->version = 6; ip6h->priority = 0; */
410 		put_unaligned(0x60, (unsigned char *)ip6h);
411 		ip6h->flow_lbl[0] = 0;
412 		ip6h->flow_lbl[1] = 0;
413 		ip6h->flow_lbl[2] = 0;
414 
415 		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
416 		ip6h->nexthdr = IPPROTO_UDP;
417 		ip6h->hop_limit = 32;
418 		ip6h->saddr = np->local_ip.in6;
419 		ip6h->daddr = np->remote_ip.in6;
420 
421 		eth = skb_push(skb, ETH_HLEN);
422 		skb_reset_mac_header(skb);
423 		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
424 	} else {
425 		udph->check = 0;
426 		udph->check = csum_tcpudp_magic(np->local_ip.ip,
427 						np->remote_ip.ip,
428 						udp_len, IPPROTO_UDP,
429 						csum_partial(udph, udp_len, 0));
430 		if (udph->check == 0)
431 			udph->check = CSUM_MANGLED_0;
432 
433 		skb_push(skb, sizeof(*iph));
434 		skb_reset_network_header(skb);
435 		iph = ip_hdr(skb);
436 
437 		/* iph->version = 4; iph->ihl = 5; */
438 		put_unaligned(0x45, (unsigned char *)iph);
439 		iph->tos      = 0;
440 		put_unaligned(htons(ip_len), &(iph->tot_len));
441 		iph->id       = htons(atomic_inc_return(&ip_ident));
442 		iph->frag_off = 0;
443 		iph->ttl      = 64;
444 		iph->protocol = IPPROTO_UDP;
445 		iph->check    = 0;
446 		put_unaligned(np->local_ip.ip, &(iph->saddr));
447 		put_unaligned(np->remote_ip.ip, &(iph->daddr));
448 		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
449 
450 		eth = skb_push(skb, ETH_HLEN);
451 		skb_reset_mac_header(skb);
452 		skb->protocol = eth->h_proto = htons(ETH_P_IP);
453 	}
454 
455 	ether_addr_copy(eth->h_source, np->dev->dev_addr);
456 	ether_addr_copy(eth->h_dest, np->remote_mac);
457 
458 	skb->dev = np->dev;
459 
460 	netpoll_send_skb(np, skb);
461 }
462 EXPORT_SYMBOL(netpoll_send_udp);
463 
464 void netpoll_print_options(struct netpoll *np)
465 {
466 	np_info(np, "local port %d\n", np->local_port);
467 	if (np->ipv6)
468 		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
469 	else
470 		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
471 	np_info(np, "interface '%s'\n", np->dev_name);
472 	np_info(np, "remote port %d\n", np->remote_port);
473 	if (np->ipv6)
474 		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
475 	else
476 		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
477 	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
478 }
479 EXPORT_SYMBOL(netpoll_print_options);
480 
481 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
482 {
483 	const char *end;
484 
485 	if (!strchr(str, ':') &&
486 	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
487 		if (!*end)
488 			return 0;
489 	}
490 	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
491 #if IS_ENABLED(CONFIG_IPV6)
492 		if (!*end)
493 			return 1;
494 #else
495 		return -1;
496 #endif
497 	}
498 	return -1;
499 }
500 
501 int netpoll_parse_options(struct netpoll *np, char *opt)
502 {
503 	char *cur=opt, *delim;
504 	int ipv6;
505 	bool ipversion_set = false;
506 
507 	if (*cur != '@') {
508 		if ((delim = strchr(cur, '@')) == NULL)
509 			goto parse_failed;
510 		*delim = 0;
511 		if (kstrtou16(cur, 10, &np->local_port))
512 			goto parse_failed;
513 		cur = delim;
514 	}
515 	cur++;
516 
517 	if (*cur != '/') {
518 		ipversion_set = true;
519 		if ((delim = strchr(cur, '/')) == NULL)
520 			goto parse_failed;
521 		*delim = 0;
522 		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
523 		if (ipv6 < 0)
524 			goto parse_failed;
525 		else
526 			np->ipv6 = (bool)ipv6;
527 		cur = delim;
528 	}
529 	cur++;
530 
531 	if (*cur != ',') {
532 		/* parse out dev name */
533 		if ((delim = strchr(cur, ',')) == NULL)
534 			goto parse_failed;
535 		*delim = 0;
536 		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
537 		cur = delim;
538 	}
539 	cur++;
540 
541 	if (*cur != '@') {
542 		/* dst port */
543 		if ((delim = strchr(cur, '@')) == NULL)
544 			goto parse_failed;
545 		*delim = 0;
546 		if (*cur == ' ' || *cur == '\t')
547 			np_info(np, "warning: whitespace is not allowed\n");
548 		if (kstrtou16(cur, 10, &np->remote_port))
549 			goto parse_failed;
550 		cur = delim;
551 	}
552 	cur++;
553 
554 	/* dst ip */
555 	if ((delim = strchr(cur, '/')) == NULL)
556 		goto parse_failed;
557 	*delim = 0;
558 	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
559 	if (ipv6 < 0)
560 		goto parse_failed;
561 	else if (ipversion_set && np->ipv6 != (bool)ipv6)
562 		goto parse_failed;
563 	else
564 		np->ipv6 = (bool)ipv6;
565 	cur = delim + 1;
566 
567 	if (*cur != 0) {
568 		/* MAC address */
569 		if (!mac_pton(cur, np->remote_mac))
570 			goto parse_failed;
571 	}
572 
573 	netpoll_print_options(np);
574 
575 	return 0;
576 
577  parse_failed:
578 	np_info(np, "couldn't parse config at '%s'!\n", cur);
579 	return -1;
580 }
581 EXPORT_SYMBOL(netpoll_parse_options);
582 
583 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
584 {
585 	struct netpoll_info *npinfo;
586 	const struct net_device_ops *ops;
587 	int err;
588 
589 	np->dev = ndev;
590 	strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
591 
592 	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
593 		np_err(np, "%s doesn't support polling, aborting\n",
594 		       np->dev_name);
595 		err = -ENOTSUPP;
596 		goto out;
597 	}
598 
599 	if (!ndev->npinfo) {
600 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
601 		if (!npinfo) {
602 			err = -ENOMEM;
603 			goto out;
604 		}
605 
606 		sema_init(&npinfo->dev_lock, 1);
607 		skb_queue_head_init(&npinfo->txq);
608 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
609 
610 		refcount_set(&npinfo->refcnt, 1);
611 
612 		ops = np->dev->netdev_ops;
613 		if (ops->ndo_netpoll_setup) {
614 			err = ops->ndo_netpoll_setup(ndev, npinfo);
615 			if (err)
616 				goto free_npinfo;
617 		}
618 	} else {
619 		npinfo = rtnl_dereference(ndev->npinfo);
620 		refcount_inc(&npinfo->refcnt);
621 	}
622 
623 	npinfo->netpoll = np;
624 
625 	/* last thing to do is link it to the net device structure */
626 	rcu_assign_pointer(ndev->npinfo, npinfo);
627 
628 	return 0;
629 
630 free_npinfo:
631 	kfree(npinfo);
632 out:
633 	return err;
634 }
635 EXPORT_SYMBOL_GPL(__netpoll_setup);
636 
637 int netpoll_setup(struct netpoll *np)
638 {
639 	struct net_device *ndev = NULL;
640 	struct in_device *in_dev;
641 	int err;
642 
643 	rtnl_lock();
644 	if (np->dev_name[0]) {
645 		struct net *net = current->nsproxy->net_ns;
646 		ndev = __dev_get_by_name(net, np->dev_name);
647 	}
648 	if (!ndev) {
649 		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
650 		err = -ENODEV;
651 		goto unlock;
652 	}
653 	dev_hold(ndev);
654 
655 	if (netdev_master_upper_dev_get(ndev)) {
656 		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
657 		err = -EBUSY;
658 		goto put;
659 	}
660 
661 	if (!netif_running(ndev)) {
662 		unsigned long atmost, atleast;
663 
664 		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
665 
666 		err = dev_open(ndev);
667 
668 		if (err) {
669 			np_err(np, "failed to open %s\n", ndev->name);
670 			goto put;
671 		}
672 
673 		rtnl_unlock();
674 		atleast = jiffies + HZ/10;
675 		atmost = jiffies + carrier_timeout * HZ;
676 		while (!netif_carrier_ok(ndev)) {
677 			if (time_after(jiffies, atmost)) {
678 				np_notice(np, "timeout waiting for carrier\n");
679 				break;
680 			}
681 			msleep(1);
682 		}
683 
684 		/* If carrier appears to come up instantly, we don't
685 		 * trust it and pause so that we don't pump all our
686 		 * queued console messages into the bitbucket.
687 		 */
688 
689 		if (time_before(jiffies, atleast)) {
690 			np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
691 			msleep(4000);
692 		}
693 		rtnl_lock();
694 	}
695 
696 	if (!np->local_ip.ip) {
697 		if (!np->ipv6) {
698 			in_dev = __in_dev_get_rtnl(ndev);
699 
700 			if (!in_dev || !in_dev->ifa_list) {
701 				np_err(np, "no IP address for %s, aborting\n",
702 				       np->dev_name);
703 				err = -EDESTADDRREQ;
704 				goto put;
705 			}
706 
707 			np->local_ip.ip = in_dev->ifa_list->ifa_local;
708 			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
709 		} else {
710 #if IS_ENABLED(CONFIG_IPV6)
711 			struct inet6_dev *idev;
712 
713 			err = -EDESTADDRREQ;
714 			idev = __in6_dev_get(ndev);
715 			if (idev) {
716 				struct inet6_ifaddr *ifp;
717 
718 				read_lock_bh(&idev->lock);
719 				list_for_each_entry(ifp, &idev->addr_list, if_list) {
720 					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
721 					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
722 						continue;
723 					np->local_ip.in6 = ifp->addr;
724 					err = 0;
725 					break;
726 				}
727 				read_unlock_bh(&idev->lock);
728 			}
729 			if (err) {
730 				np_err(np, "no IPv6 address for %s, aborting\n",
731 				       np->dev_name);
732 				goto put;
733 			} else
734 				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
735 #else
736 			np_err(np, "IPv6 is not supported %s, aborting\n",
737 			       np->dev_name);
738 			err = -EINVAL;
739 			goto put;
740 #endif
741 		}
742 	}
743 
744 	/* fill up the skb queue */
745 	refill_skbs();
746 
747 	err = __netpoll_setup(np, ndev);
748 	if (err)
749 		goto put;
750 
751 	rtnl_unlock();
752 	return 0;
753 
754 put:
755 	dev_put(ndev);
756 unlock:
757 	rtnl_unlock();
758 	return err;
759 }
760 EXPORT_SYMBOL(netpoll_setup);
761 
762 static int __init netpoll_init(void)
763 {
764 	skb_queue_head_init(&skb_pool);
765 	return 0;
766 }
767 core_initcall(netpoll_init);
768 
769 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
770 {
771 	struct netpoll_info *npinfo =
772 			container_of(rcu_head, struct netpoll_info, rcu);
773 
774 	skb_queue_purge(&npinfo->txq);
775 
776 	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
777 	cancel_delayed_work(&npinfo->tx_work);
778 
779 	/* clean after last, unfinished work */
780 	__skb_queue_purge(&npinfo->txq);
781 	/* now cancel it again */
782 	cancel_delayed_work(&npinfo->tx_work);
783 	kfree(npinfo);
784 }
785 
786 void __netpoll_cleanup(struct netpoll *np)
787 {
788 	struct netpoll_info *npinfo;
789 
790 	npinfo = rtnl_dereference(np->dev->npinfo);
791 	if (!npinfo)
792 		return;
793 
794 	synchronize_srcu(&netpoll_srcu);
795 
796 	if (refcount_dec_and_test(&npinfo->refcnt)) {
797 		const struct net_device_ops *ops;
798 
799 		ops = np->dev->netdev_ops;
800 		if (ops->ndo_netpoll_cleanup)
801 			ops->ndo_netpoll_cleanup(np->dev);
802 
803 		RCU_INIT_POINTER(np->dev->npinfo, NULL);
804 		call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
805 	} else
806 		RCU_INIT_POINTER(np->dev->npinfo, NULL);
807 }
808 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
809 
810 void __netpoll_free(struct netpoll *np)
811 {
812 	ASSERT_RTNL();
813 
814 	/* Wait for transmitting packets to finish before freeing. */
815 	synchronize_rcu_bh();
816 	__netpoll_cleanup(np);
817 	kfree(np);
818 }
819 EXPORT_SYMBOL_GPL(__netpoll_free);
820 
821 void netpoll_cleanup(struct netpoll *np)
822 {
823 	rtnl_lock();
824 	if (!np->dev)
825 		goto out;
826 	__netpoll_cleanup(np);
827 	dev_put(np->dev);
828 	np->dev = NULL;
829 out:
830 	rtnl_unlock();
831 }
832 EXPORT_SYMBOL(netpoll_cleanup);
833