1 /* 2 * Common framework for low-level network console, dump, and debugger code 3 * 4 * Sep 8 2003 Matt Mackall <mpm@selenic.com> 5 * 6 * based on the netconsole code from: 7 * 8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2002 Red Hat, Inc. 10 */ 11 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/string.h> 15 #include <linux/if_arp.h> 16 #include <linux/inetdevice.h> 17 #include <linux/inet.h> 18 #include <linux/interrupt.h> 19 #include <linux/netpoll.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/rcupdate.h> 23 #include <linux/workqueue.h> 24 #include <net/tcp.h> 25 #include <net/udp.h> 26 #include <asm/unaligned.h> 27 28 /* 29 * We maintain a small pool of fully-sized skbs, to make sure the 30 * message gets out even in extreme OOM situations. 31 */ 32 33 #define MAX_UDP_CHUNK 1460 34 #define MAX_SKBS 32 35 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 36 37 static struct sk_buff_head skb_pool; 38 39 static atomic_t trapped; 40 41 #define USEC_PER_POLL 50 42 #define NETPOLL_RX_ENABLED 1 43 #define NETPOLL_RX_DROP 2 44 45 #define MAX_SKB_SIZE \ 46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 47 sizeof(struct iphdr) + sizeof(struct ethhdr)) 48 49 static void zap_completion_queue(void); 50 static void arp_reply(struct sk_buff *skb); 51 52 static void queue_process(struct work_struct *work) 53 { 54 struct netpoll_info *npinfo = 55 container_of(work, struct netpoll_info, tx_work.work); 56 struct sk_buff *skb; 57 unsigned long flags; 58 59 while ((skb = skb_dequeue(&npinfo->txq))) { 60 struct net_device *dev = skb->dev; 61 62 if (!netif_device_present(dev) || !netif_running(dev)) { 63 __kfree_skb(skb); 64 continue; 65 } 66 67 local_irq_save(flags); 68 netif_tx_lock(dev); 69 if ((netif_queue_stopped(dev) || 70 netif_subqueue_stopped(dev, skb->queue_mapping)) || 71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 72 skb_queue_head(&npinfo->txq, skb); 73 netif_tx_unlock(dev); 74 local_irq_restore(flags); 75 76 schedule_delayed_work(&npinfo->tx_work, HZ/10); 77 return; 78 } 79 netif_tx_unlock(dev); 80 local_irq_restore(flags); 81 } 82 } 83 84 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, 85 unsigned short ulen, __be32 saddr, __be32 daddr) 86 { 87 __wsum psum; 88 89 if (uh->check == 0 || skb_csum_unnecessary(skb)) 90 return 0; 91 92 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); 93 94 if (skb->ip_summed == CHECKSUM_COMPLETE && 95 !csum_fold(csum_add(psum, skb->csum))) 96 return 0; 97 98 skb->csum = psum; 99 100 return __skb_checksum_complete(skb); 101 } 102 103 /* 104 * Check whether delayed processing was scheduled for our NIC. If so, 105 * we attempt to grab the poll lock and use ->poll() to pump the card. 106 * If this fails, either we've recursed in ->poll() or it's already 107 * running on another CPU. 108 * 109 * Note: we don't mask interrupts with this lock because we're using 110 * trylock here and interrupts are already disabled in the softirq 111 * case. Further, we test the poll_owner to avoid recursion on UP 112 * systems where the lock doesn't exist. 113 * 114 * In cases where there is bi-directional communications, reading only 115 * one message at a time can lead to packets being dropped by the 116 * network adapter, forcing superfluous retries and possibly timeouts. 117 * Thus, we set our budget to greater than 1. 118 */ 119 static void poll_napi(struct netpoll *np) 120 { 121 struct netpoll_info *npinfo = np->dev->npinfo; 122 struct napi_struct *napi; 123 int budget = 16; 124 125 list_for_each_entry(napi, &np->dev->napi_list, dev_list) { 126 if (test_bit(NAPI_STATE_SCHED, &napi->state) && 127 napi->poll_owner != smp_processor_id() && 128 spin_trylock(&napi->poll_lock)) { 129 npinfo->rx_flags |= NETPOLL_RX_DROP; 130 atomic_inc(&trapped); 131 132 napi->poll(napi, budget); 133 134 atomic_dec(&trapped); 135 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 136 spin_unlock(&napi->poll_lock); 137 } 138 } 139 } 140 141 static void service_arp_queue(struct netpoll_info *npi) 142 { 143 struct sk_buff *skb; 144 145 if (unlikely(!npi)) 146 return; 147 148 skb = skb_dequeue(&npi->arp_tx); 149 150 while (skb != NULL) { 151 arp_reply(skb); 152 skb = skb_dequeue(&npi->arp_tx); 153 } 154 } 155 156 void netpoll_poll(struct netpoll *np) 157 { 158 if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) 159 return; 160 161 /* Process pending work on NIC */ 162 np->dev->poll_controller(np->dev); 163 if (!list_empty(&np->dev->napi_list)) 164 poll_napi(np); 165 166 service_arp_queue(np->dev->npinfo); 167 168 zap_completion_queue(); 169 } 170 171 static void refill_skbs(void) 172 { 173 struct sk_buff *skb; 174 unsigned long flags; 175 176 spin_lock_irqsave(&skb_pool.lock, flags); 177 while (skb_pool.qlen < MAX_SKBS) { 178 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); 179 if (!skb) 180 break; 181 182 __skb_queue_tail(&skb_pool, skb); 183 } 184 spin_unlock_irqrestore(&skb_pool.lock, flags); 185 } 186 187 static void zap_completion_queue(void) 188 { 189 unsigned long flags; 190 struct softnet_data *sd = &get_cpu_var(softnet_data); 191 192 if (sd->completion_queue) { 193 struct sk_buff *clist; 194 195 local_irq_save(flags); 196 clist = sd->completion_queue; 197 sd->completion_queue = NULL; 198 local_irq_restore(flags); 199 200 while (clist != NULL) { 201 struct sk_buff *skb = clist; 202 clist = clist->next; 203 if (skb->destructor) 204 dev_kfree_skb_any(skb); /* put this one back */ 205 else 206 __kfree_skb(skb); 207 } 208 } 209 210 put_cpu_var(softnet_data); 211 } 212 213 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) 214 { 215 int count = 0; 216 struct sk_buff *skb; 217 218 zap_completion_queue(); 219 refill_skbs(); 220 repeat: 221 222 skb = alloc_skb(len, GFP_ATOMIC); 223 if (!skb) 224 skb = skb_dequeue(&skb_pool); 225 226 if (!skb) { 227 if (++count < 10) { 228 netpoll_poll(np); 229 goto repeat; 230 } 231 return NULL; 232 } 233 234 atomic_set(&skb->users, 1); 235 skb_reserve(skb, reserve); 236 return skb; 237 } 238 239 static int netpoll_owner_active(struct net_device *dev) 240 { 241 struct napi_struct *napi; 242 243 list_for_each_entry(napi, &dev->napi_list, dev_list) { 244 if (napi->poll_owner == smp_processor_id()) 245 return 1; 246 } 247 return 0; 248 } 249 250 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 251 { 252 int status = NETDEV_TX_BUSY; 253 unsigned long tries; 254 struct net_device *dev = np->dev; 255 struct netpoll_info *npinfo = np->dev->npinfo; 256 257 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 258 __kfree_skb(skb); 259 return; 260 } 261 262 /* don't get messages out of order, and no recursion */ 263 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 264 unsigned long flags; 265 266 local_irq_save(flags); 267 /* try until next clock tick */ 268 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 269 tries > 0; --tries) { 270 if (netif_tx_trylock(dev)) { 271 if (!netif_queue_stopped(dev) && 272 !netif_subqueue_stopped(dev, skb->queue_mapping)) 273 status = dev->hard_start_xmit(skb, dev); 274 netif_tx_unlock(dev); 275 276 if (status == NETDEV_TX_OK) 277 break; 278 279 } 280 281 /* tickle device maybe there is some cleanup */ 282 netpoll_poll(np); 283 284 udelay(USEC_PER_POLL); 285 } 286 local_irq_restore(flags); 287 } 288 289 if (status != NETDEV_TX_OK) { 290 skb_queue_tail(&npinfo->txq, skb); 291 schedule_delayed_work(&npinfo->tx_work,0); 292 } 293 } 294 295 void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 296 { 297 int total_len, eth_len, ip_len, udp_len; 298 struct sk_buff *skb; 299 struct udphdr *udph; 300 struct iphdr *iph; 301 struct ethhdr *eth; 302 303 udp_len = len + sizeof(*udph); 304 ip_len = eth_len = udp_len + sizeof(*iph); 305 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 306 307 skb = find_skb(np, total_len, total_len - len); 308 if (!skb) 309 return; 310 311 skb_copy_to_linear_data(skb, msg, len); 312 skb->len += len; 313 314 skb_push(skb, sizeof(*udph)); 315 skb_reset_transport_header(skb); 316 udph = udp_hdr(skb); 317 udph->source = htons(np->local_port); 318 udph->dest = htons(np->remote_port); 319 udph->len = htons(udp_len); 320 udph->check = 0; 321 udph->check = csum_tcpudp_magic(htonl(np->local_ip), 322 htonl(np->remote_ip), 323 udp_len, IPPROTO_UDP, 324 csum_partial((unsigned char *)udph, udp_len, 0)); 325 if (udph->check == 0) 326 udph->check = CSUM_MANGLED_0; 327 328 skb_push(skb, sizeof(*iph)); 329 skb_reset_network_header(skb); 330 iph = ip_hdr(skb); 331 332 /* iph->version = 4; iph->ihl = 5; */ 333 put_unaligned(0x45, (unsigned char *)iph); 334 iph->tos = 0; 335 put_unaligned(htons(ip_len), &(iph->tot_len)); 336 iph->id = 0; 337 iph->frag_off = 0; 338 iph->ttl = 64; 339 iph->protocol = IPPROTO_UDP; 340 iph->check = 0; 341 put_unaligned(htonl(np->local_ip), &(iph->saddr)); 342 put_unaligned(htonl(np->remote_ip), &(iph->daddr)); 343 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 344 345 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); 346 skb_reset_mac_header(skb); 347 skb->protocol = eth->h_proto = htons(ETH_P_IP); 348 memcpy(eth->h_source, np->local_mac, 6); 349 memcpy(eth->h_dest, np->remote_mac, 6); 350 351 skb->dev = np->dev; 352 353 netpoll_send_skb(np, skb); 354 } 355 356 static void arp_reply(struct sk_buff *skb) 357 { 358 struct netpoll_info *npinfo = skb->dev->npinfo; 359 struct arphdr *arp; 360 unsigned char *arp_ptr; 361 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 362 __be32 sip, tip; 363 unsigned char *sha; 364 struct sk_buff *send_skb; 365 struct netpoll *np = NULL; 366 367 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 368 np = npinfo->rx_np; 369 if (!np) 370 return; 371 372 /* No arp on this interface */ 373 if (skb->dev->flags & IFF_NOARP) 374 return; 375 376 if (!pskb_may_pull(skb, (sizeof(struct arphdr) + 377 (2 * skb->dev->addr_len) + 378 (2 * sizeof(u32))))) 379 return; 380 381 skb_reset_network_header(skb); 382 skb_reset_transport_header(skb); 383 arp = arp_hdr(skb); 384 385 if ((arp->ar_hrd != htons(ARPHRD_ETHER) && 386 arp->ar_hrd != htons(ARPHRD_IEEE802)) || 387 arp->ar_pro != htons(ETH_P_IP) || 388 arp->ar_op != htons(ARPOP_REQUEST)) 389 return; 390 391 arp_ptr = (unsigned char *)(arp+1); 392 /* save the location of the src hw addr */ 393 sha = arp_ptr; 394 arp_ptr += skb->dev->addr_len; 395 memcpy(&sip, arp_ptr, 4); 396 arp_ptr += 4; 397 /* if we actually cared about dst hw addr, it would get copied here */ 398 arp_ptr += skb->dev->addr_len; 399 memcpy(&tip, arp_ptr, 4); 400 401 /* Should we ignore arp? */ 402 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip)) 403 return; 404 405 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); 406 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), 407 LL_RESERVED_SPACE(np->dev)); 408 409 if (!send_skb) 410 return; 411 412 skb_reset_network_header(send_skb); 413 arp = (struct arphdr *) skb_put(send_skb, size); 414 send_skb->dev = skb->dev; 415 send_skb->protocol = htons(ETH_P_ARP); 416 417 /* Fill the device header for the ARP frame */ 418 if (dev_hard_header(send_skb, skb->dev, ptype, 419 sha, np->local_mac, 420 send_skb->len) < 0) { 421 kfree_skb(send_skb); 422 return; 423 } 424 425 /* 426 * Fill out the arp protocol part. 427 * 428 * we only support ethernet device type, 429 * which (according to RFC 1390) should always equal 1 (Ethernet). 430 */ 431 432 arp->ar_hrd = htons(np->dev->type); 433 arp->ar_pro = htons(ETH_P_IP); 434 arp->ar_hln = np->dev->addr_len; 435 arp->ar_pln = 4; 436 arp->ar_op = htons(type); 437 438 arp_ptr=(unsigned char *)(arp + 1); 439 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 440 arp_ptr += np->dev->addr_len; 441 memcpy(arp_ptr, &tip, 4); 442 arp_ptr += 4; 443 memcpy(arp_ptr, sha, np->dev->addr_len); 444 arp_ptr += np->dev->addr_len; 445 memcpy(arp_ptr, &sip, 4); 446 447 netpoll_send_skb(np, send_skb); 448 } 449 450 int __netpoll_rx(struct sk_buff *skb) 451 { 452 int proto, len, ulen; 453 struct iphdr *iph; 454 struct udphdr *uh; 455 struct netpoll_info *npi = skb->dev->npinfo; 456 struct netpoll *np = npi->rx_np; 457 458 if (!np) 459 goto out; 460 if (skb->dev->type != ARPHRD_ETHER) 461 goto out; 462 463 /* check if netpoll clients need ARP */ 464 if (skb->protocol == htons(ETH_P_ARP) && 465 atomic_read(&trapped)) { 466 skb_queue_tail(&npi->arp_tx, skb); 467 return 1; 468 } 469 470 proto = ntohs(eth_hdr(skb)->h_proto); 471 if (proto != ETH_P_IP) 472 goto out; 473 if (skb->pkt_type == PACKET_OTHERHOST) 474 goto out; 475 if (skb_shared(skb)) 476 goto out; 477 478 iph = (struct iphdr *)skb->data; 479 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 480 goto out; 481 if (iph->ihl < 5 || iph->version != 4) 482 goto out; 483 if (!pskb_may_pull(skb, iph->ihl*4)) 484 goto out; 485 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 486 goto out; 487 488 len = ntohs(iph->tot_len); 489 if (skb->len < len || len < iph->ihl*4) 490 goto out; 491 492 /* 493 * Our transport medium may have padded the buffer out. 494 * Now We trim to the true length of the frame. 495 */ 496 if (pskb_trim_rcsum(skb, len)) 497 goto out; 498 499 if (iph->protocol != IPPROTO_UDP) 500 goto out; 501 502 len -= iph->ihl*4; 503 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); 504 ulen = ntohs(uh->len); 505 506 if (ulen != len) 507 goto out; 508 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 509 goto out; 510 if (np->local_ip && np->local_ip != ntohl(iph->daddr)) 511 goto out; 512 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr)) 513 goto out; 514 if (np->local_port && np->local_port != ntohs(uh->dest)) 515 goto out; 516 517 np->rx_hook(np, ntohs(uh->source), 518 (char *)(uh+1), 519 ulen - sizeof(struct udphdr)); 520 521 kfree_skb(skb); 522 return 1; 523 524 out: 525 if (atomic_read(&trapped)) { 526 kfree_skb(skb); 527 return 1; 528 } 529 530 return 0; 531 } 532 533 void netpoll_print_options(struct netpoll *np) 534 { 535 DECLARE_MAC_BUF(mac); 536 printk(KERN_INFO "%s: local port %d\n", 537 np->name, np->local_port); 538 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", 539 np->name, HIPQUAD(np->local_ip)); 540 printk(KERN_INFO "%s: interface %s\n", 541 np->name, np->dev_name); 542 printk(KERN_INFO "%s: remote port %d\n", 543 np->name, np->remote_port); 544 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", 545 np->name, HIPQUAD(np->remote_ip)); 546 printk(KERN_INFO "%s: remote ethernet address %s\n", 547 np->name, print_mac(mac, np->remote_mac)); 548 } 549 550 int netpoll_parse_options(struct netpoll *np, char *opt) 551 { 552 char *cur=opt, *delim; 553 554 if (*cur != '@') { 555 if ((delim = strchr(cur, '@')) == NULL) 556 goto parse_failed; 557 *delim = 0; 558 np->local_port = simple_strtol(cur, NULL, 10); 559 cur = delim; 560 } 561 cur++; 562 563 if (*cur != '/') { 564 if ((delim = strchr(cur, '/')) == NULL) 565 goto parse_failed; 566 *delim = 0; 567 np->local_ip = ntohl(in_aton(cur)); 568 cur = delim; 569 } 570 cur++; 571 572 if (*cur != ',') { 573 /* parse out dev name */ 574 if ((delim = strchr(cur, ',')) == NULL) 575 goto parse_failed; 576 *delim = 0; 577 strlcpy(np->dev_name, cur, sizeof(np->dev_name)); 578 cur = delim; 579 } 580 cur++; 581 582 if (*cur != '@') { 583 /* dst port */ 584 if ((delim = strchr(cur, '@')) == NULL) 585 goto parse_failed; 586 *delim = 0; 587 np->remote_port = simple_strtol(cur, NULL, 10); 588 cur = delim; 589 } 590 cur++; 591 592 /* dst ip */ 593 if ((delim = strchr(cur, '/')) == NULL) 594 goto parse_failed; 595 *delim = 0; 596 np->remote_ip = ntohl(in_aton(cur)); 597 cur = delim + 1; 598 599 if (*cur != 0) { 600 /* MAC address */ 601 if ((delim = strchr(cur, ':')) == NULL) 602 goto parse_failed; 603 *delim = 0; 604 np->remote_mac[0] = simple_strtol(cur, NULL, 16); 605 cur = delim + 1; 606 if ((delim = strchr(cur, ':')) == NULL) 607 goto parse_failed; 608 *delim = 0; 609 np->remote_mac[1] = simple_strtol(cur, NULL, 16); 610 cur = delim + 1; 611 if ((delim = strchr(cur, ':')) == NULL) 612 goto parse_failed; 613 *delim = 0; 614 np->remote_mac[2] = simple_strtol(cur, NULL, 16); 615 cur = delim + 1; 616 if ((delim = strchr(cur, ':')) == NULL) 617 goto parse_failed; 618 *delim = 0; 619 np->remote_mac[3] = simple_strtol(cur, NULL, 16); 620 cur = delim + 1; 621 if ((delim = strchr(cur, ':')) == NULL) 622 goto parse_failed; 623 *delim = 0; 624 np->remote_mac[4] = simple_strtol(cur, NULL, 16); 625 cur = delim + 1; 626 np->remote_mac[5] = simple_strtol(cur, NULL, 16); 627 } 628 629 netpoll_print_options(np); 630 631 return 0; 632 633 parse_failed: 634 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 635 np->name, cur); 636 return -1; 637 } 638 639 int netpoll_setup(struct netpoll *np) 640 { 641 struct net_device *ndev = NULL; 642 struct in_device *in_dev; 643 struct netpoll_info *npinfo; 644 unsigned long flags; 645 int err; 646 647 if (np->dev_name) 648 ndev = dev_get_by_name(&init_net, np->dev_name); 649 if (!ndev) { 650 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 651 np->name, np->dev_name); 652 return -ENODEV; 653 } 654 655 np->dev = ndev; 656 if (!ndev->npinfo) { 657 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 658 if (!npinfo) { 659 err = -ENOMEM; 660 goto release; 661 } 662 663 npinfo->rx_flags = 0; 664 npinfo->rx_np = NULL; 665 666 spin_lock_init(&npinfo->rx_lock); 667 skb_queue_head_init(&npinfo->arp_tx); 668 skb_queue_head_init(&npinfo->txq); 669 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 670 671 atomic_set(&npinfo->refcnt, 1); 672 } else { 673 npinfo = ndev->npinfo; 674 atomic_inc(&npinfo->refcnt); 675 } 676 677 if (!ndev->poll_controller) { 678 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 679 np->name, np->dev_name); 680 err = -ENOTSUPP; 681 goto release; 682 } 683 684 if (!netif_running(ndev)) { 685 unsigned long atmost, atleast; 686 687 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 688 np->name, np->dev_name); 689 690 rtnl_lock(); 691 err = dev_open(ndev); 692 rtnl_unlock(); 693 694 if (err) { 695 printk(KERN_ERR "%s: failed to open %s\n", 696 np->name, ndev->name); 697 goto release; 698 } 699 700 atleast = jiffies + HZ/10; 701 atmost = jiffies + 4*HZ; 702 while (!netif_carrier_ok(ndev)) { 703 if (time_after(jiffies, atmost)) { 704 printk(KERN_NOTICE 705 "%s: timeout waiting for carrier\n", 706 np->name); 707 break; 708 } 709 cond_resched(); 710 } 711 712 /* If carrier appears to come up instantly, we don't 713 * trust it and pause so that we don't pump all our 714 * queued console messages into the bitbucket. 715 */ 716 717 if (time_before(jiffies, atleast)) { 718 printk(KERN_NOTICE "%s: carrier detect appears" 719 " untrustworthy, waiting 4 seconds\n", 720 np->name); 721 msleep(4000); 722 } 723 } 724 725 if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr) 726 memcpy(np->local_mac, ndev->dev_addr, 6); 727 728 if (!np->local_ip) { 729 rcu_read_lock(); 730 in_dev = __in_dev_get_rcu(ndev); 731 732 if (!in_dev || !in_dev->ifa_list) { 733 rcu_read_unlock(); 734 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 735 np->name, np->dev_name); 736 err = -EDESTADDRREQ; 737 goto release; 738 } 739 740 np->local_ip = ntohl(in_dev->ifa_list->ifa_local); 741 rcu_read_unlock(); 742 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", 743 np->name, HIPQUAD(np->local_ip)); 744 } 745 746 if (np->rx_hook) { 747 spin_lock_irqsave(&npinfo->rx_lock, flags); 748 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 749 npinfo->rx_np = np; 750 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 751 } 752 753 /* fill up the skb queue */ 754 refill_skbs(); 755 756 /* last thing to do is link it to the net device structure */ 757 ndev->npinfo = npinfo; 758 759 /* avoid racing with NAPI reading npinfo */ 760 synchronize_rcu(); 761 762 return 0; 763 764 release: 765 if (!ndev->npinfo) 766 kfree(npinfo); 767 np->dev = NULL; 768 dev_put(ndev); 769 return err; 770 } 771 772 static int __init netpoll_init(void) 773 { 774 skb_queue_head_init(&skb_pool); 775 return 0; 776 } 777 core_initcall(netpoll_init); 778 779 void netpoll_cleanup(struct netpoll *np) 780 { 781 struct netpoll_info *npinfo; 782 unsigned long flags; 783 784 if (np->dev) { 785 npinfo = np->dev->npinfo; 786 if (npinfo) { 787 if (npinfo->rx_np == np) { 788 spin_lock_irqsave(&npinfo->rx_lock, flags); 789 npinfo->rx_np = NULL; 790 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 791 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 792 } 793 794 if (atomic_dec_and_test(&npinfo->refcnt)) { 795 skb_queue_purge(&npinfo->arp_tx); 796 skb_queue_purge(&npinfo->txq); 797 cancel_rearming_delayed_work(&npinfo->tx_work); 798 799 /* clean after last, unfinished work */ 800 if (!skb_queue_empty(&npinfo->txq)) { 801 struct sk_buff *skb; 802 skb = __skb_dequeue(&npinfo->txq); 803 kfree_skb(skb); 804 } 805 kfree(npinfo); 806 np->dev->npinfo = NULL; 807 } 808 } 809 810 dev_put(np->dev); 811 } 812 813 np->dev = NULL; 814 } 815 816 int netpoll_trap(void) 817 { 818 return atomic_read(&trapped); 819 } 820 821 void netpoll_set_trap(int trap) 822 { 823 if (trap) 824 atomic_inc(&trapped); 825 else 826 atomic_dec(&trapped); 827 } 828 829 EXPORT_SYMBOL(netpoll_set_trap); 830 EXPORT_SYMBOL(netpoll_trap); 831 EXPORT_SYMBOL(netpoll_print_options); 832 EXPORT_SYMBOL(netpoll_parse_options); 833 EXPORT_SYMBOL(netpoll_setup); 834 EXPORT_SYMBOL(netpoll_cleanup); 835 EXPORT_SYMBOL(netpoll_send_udp); 836 EXPORT_SYMBOL(netpoll_poll); 837