1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and 4 * James Leu (jleu@mindspring.net). 5 * Copyright (C) 2001 by various other people who didn't put their name here. 6 * Licensed under the GPL. 7 */ 8 9 #include <linux/bootmem.h> 10 #include <linux/etherdevice.h> 11 #include <linux/ethtool.h> 12 #include <linux/inetdevice.h> 13 #include <linux/init.h> 14 #include <linux/list.h> 15 #include <linux/netdevice.h> 16 #include <linux/platform_device.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/skbuff.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <init.h> 22 #include <irq_kern.h> 23 #include <irq_user.h> 24 #include "mconsole_kern.h" 25 #include <net_kern.h> 26 #include <net_user.h> 27 28 #define DRIVER_NAME "uml-netdev" 29 30 static DEFINE_SPINLOCK(opened_lock); 31 static LIST_HEAD(opened); 32 33 /* 34 * The drop_skb is used when we can't allocate an skb. The 35 * packet is read into drop_skb in order to get the data off the 36 * connection to the host. 37 * It is reallocated whenever a maximum packet size is seen which is 38 * larger than any seen before. update_drop_skb is called from 39 * eth_configure when a new interface is added. 40 */ 41 static DEFINE_SPINLOCK(drop_lock); 42 static struct sk_buff *drop_skb; 43 static int drop_max; 44 45 static int update_drop_skb(int max) 46 { 47 struct sk_buff *new; 48 unsigned long flags; 49 int err = 0; 50 51 spin_lock_irqsave(&drop_lock, flags); 52 53 if (max <= drop_max) 54 goto out; 55 56 err = -ENOMEM; 57 new = dev_alloc_skb(max); 58 if (new == NULL) 59 goto out; 60 61 skb_put(new, max); 62 63 kfree_skb(drop_skb); 64 drop_skb = new; 65 drop_max = max; 66 err = 0; 67 out: 68 spin_unlock_irqrestore(&drop_lock, flags); 69 70 return err; 71 } 72 73 static int uml_net_rx(struct net_device *dev) 74 { 75 struct uml_net_private *lp = netdev_priv(dev); 76 int pkt_len; 77 struct sk_buff *skb; 78 79 /* If we can't allocate memory, try again next round. */ 80 skb = dev_alloc_skb(lp->max_packet); 81 if (skb == NULL) { 82 drop_skb->dev = dev; 83 /* Read a packet into drop_skb and don't do anything with it. */ 84 (*lp->read)(lp->fd, drop_skb, lp); 85 dev->stats.rx_dropped++; 86 return 0; 87 } 88 89 skb->dev = dev; 90 skb_put(skb, lp->max_packet); 91 skb_reset_mac_header(skb); 92 pkt_len = (*lp->read)(lp->fd, skb, lp); 93 94 if (pkt_len > 0) { 95 skb_trim(skb, pkt_len); 96 skb->protocol = (*lp->protocol)(skb); 97 98 dev->stats.rx_bytes += skb->len; 99 dev->stats.rx_packets++; 100 netif_rx(skb); 101 return pkt_len; 102 } 103 104 kfree_skb(skb); 105 return pkt_len; 106 } 107 108 static void uml_dev_close(struct work_struct *work) 109 { 110 struct uml_net_private *lp = 111 container_of(work, struct uml_net_private, work); 112 dev_close(lp->dev); 113 } 114 115 static irqreturn_t uml_net_interrupt(int irq, void *dev_id) 116 { 117 struct net_device *dev = dev_id; 118 struct uml_net_private *lp = netdev_priv(dev); 119 int err; 120 121 if (!netif_running(dev)) 122 return IRQ_NONE; 123 124 spin_lock(&lp->lock); 125 while ((err = uml_net_rx(dev)) > 0) ; 126 if (err < 0) { 127 printk(KERN_ERR 128 "Device '%s' read returned %d, shutting it down\n", 129 dev->name, err); 130 /* dev_close can't be called in interrupt context, and takes 131 * again lp->lock. 132 * And dev_close() can be safely called multiple times on the 133 * same device, since it tests for (dev->flags & IFF_UP). So 134 * there's no harm in delaying the device shutdown. 135 * Furthermore, the workqueue will not re-enqueue an already 136 * enqueued work item. */ 137 schedule_work(&lp->work); 138 goto out; 139 } 140 reactivate_fd(lp->fd, UM_ETH_IRQ); 141 142 out: 143 spin_unlock(&lp->lock); 144 return IRQ_HANDLED; 145 } 146 147 static int uml_net_open(struct net_device *dev) 148 { 149 struct uml_net_private *lp = netdev_priv(dev); 150 int err; 151 152 if (lp->fd >= 0) { 153 err = -ENXIO; 154 goto out; 155 } 156 157 lp->fd = (*lp->open)(&lp->user); 158 if (lp->fd < 0) { 159 err = lp->fd; 160 goto out; 161 } 162 163 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, 164 IRQF_SHARED, dev->name, dev); 165 if (err != 0) { 166 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); 167 err = -ENETUNREACH; 168 goto out_close; 169 } 170 171 lp->tl.data = (unsigned long) &lp->user; 172 netif_start_queue(dev); 173 174 /* clear buffer - it can happen that the host side of the interface 175 * is full when we get here. In this case, new data is never queued, 176 * SIGIOs never arrive, and the net never works. 177 */ 178 while ((err = uml_net_rx(dev)) > 0) ; 179 180 spin_lock(&opened_lock); 181 list_add(&lp->list, &opened); 182 spin_unlock(&opened_lock); 183 184 return 0; 185 out_close: 186 if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); 187 lp->fd = -1; 188 out: 189 return err; 190 } 191 192 static int uml_net_close(struct net_device *dev) 193 { 194 struct uml_net_private *lp = netdev_priv(dev); 195 196 netif_stop_queue(dev); 197 198 um_free_irq(dev->irq, dev); 199 if (lp->close != NULL) 200 (*lp->close)(lp->fd, &lp->user); 201 lp->fd = -1; 202 203 spin_lock(&opened_lock); 204 list_del(&lp->list); 205 spin_unlock(&opened_lock); 206 207 return 0; 208 } 209 210 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) 211 { 212 struct uml_net_private *lp = netdev_priv(dev); 213 unsigned long flags; 214 int len; 215 216 netif_stop_queue(dev); 217 218 spin_lock_irqsave(&lp->lock, flags); 219 220 len = (*lp->write)(lp->fd, skb, lp); 221 skb_tx_timestamp(skb); 222 223 if (len == skb->len) { 224 dev->stats.tx_packets++; 225 dev->stats.tx_bytes += skb->len; 226 netif_trans_update(dev); 227 netif_start_queue(dev); 228 229 /* this is normally done in the interrupt when tx finishes */ 230 netif_wake_queue(dev); 231 } 232 else if (len == 0) { 233 netif_start_queue(dev); 234 dev->stats.tx_dropped++; 235 } 236 else { 237 netif_start_queue(dev); 238 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); 239 } 240 241 spin_unlock_irqrestore(&lp->lock, flags); 242 243 dev_consume_skb_any(skb); 244 245 return NETDEV_TX_OK; 246 } 247 248 static void uml_net_set_multicast_list(struct net_device *dev) 249 { 250 return; 251 } 252 253 static void uml_net_tx_timeout(struct net_device *dev) 254 { 255 netif_trans_update(dev); 256 netif_wake_queue(dev); 257 } 258 259 #ifdef CONFIG_NET_POLL_CONTROLLER 260 static void uml_net_poll_controller(struct net_device *dev) 261 { 262 disable_irq(dev->irq); 263 uml_net_interrupt(dev->irq, dev); 264 enable_irq(dev->irq); 265 } 266 #endif 267 268 static void uml_net_get_drvinfo(struct net_device *dev, 269 struct ethtool_drvinfo *info) 270 { 271 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 272 strlcpy(info->version, "42", sizeof(info->version)); 273 } 274 275 static const struct ethtool_ops uml_net_ethtool_ops = { 276 .get_drvinfo = uml_net_get_drvinfo, 277 .get_link = ethtool_op_get_link, 278 .get_ts_info = ethtool_op_get_ts_info, 279 }; 280 281 static void uml_net_user_timer_expire(unsigned long _conn) 282 { 283 #ifdef undef 284 struct connection *conn = (struct connection *)_conn; 285 286 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); 287 do_connect(conn); 288 #endif 289 } 290 291 static void setup_etheraddr(struct net_device *dev, char *str) 292 { 293 unsigned char *addr = dev->dev_addr; 294 char *end; 295 int i; 296 297 if (str == NULL) 298 goto random; 299 300 for (i = 0; i < 6; i++) { 301 addr[i] = simple_strtoul(str, &end, 16); 302 if ((end == str) || 303 ((*end != ':') && (*end != ',') && (*end != '\0'))) { 304 printk(KERN_ERR 305 "setup_etheraddr: failed to parse '%s' " 306 "as an ethernet address\n", str); 307 goto random; 308 } 309 str = end + 1; 310 } 311 if (is_multicast_ether_addr(addr)) { 312 printk(KERN_ERR 313 "Attempt to assign a multicast ethernet address to a " 314 "device disallowed\n"); 315 goto random; 316 } 317 if (!is_valid_ether_addr(addr)) { 318 printk(KERN_ERR 319 "Attempt to assign an invalid ethernet address to a " 320 "device disallowed\n"); 321 goto random; 322 } 323 if (!is_local_ether_addr(addr)) { 324 printk(KERN_WARNING 325 "Warning: Assigning a globally valid ethernet " 326 "address to a device\n"); 327 printk(KERN_WARNING "You should set the 2nd rightmost bit in " 328 "the first byte of the MAC,\n"); 329 printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", 330 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], 331 addr[5]); 332 } 333 return; 334 335 random: 336 printk(KERN_INFO 337 "Choosing a random ethernet address for device %s\n", dev->name); 338 eth_hw_addr_random(dev); 339 } 340 341 static DEFINE_SPINLOCK(devices_lock); 342 static LIST_HEAD(devices); 343 344 static struct platform_driver uml_net_driver = { 345 .driver = { 346 .name = DRIVER_NAME, 347 }, 348 }; 349 350 static void net_device_release(struct device *dev) 351 { 352 struct uml_net *device = dev_get_drvdata(dev); 353 struct net_device *netdev = device->dev; 354 struct uml_net_private *lp = netdev_priv(netdev); 355 356 if (lp->remove != NULL) 357 (*lp->remove)(&lp->user); 358 list_del(&device->list); 359 kfree(device); 360 free_netdev(netdev); 361 } 362 363 static const struct net_device_ops uml_netdev_ops = { 364 .ndo_open = uml_net_open, 365 .ndo_stop = uml_net_close, 366 .ndo_start_xmit = uml_net_start_xmit, 367 .ndo_set_rx_mode = uml_net_set_multicast_list, 368 .ndo_tx_timeout = uml_net_tx_timeout, 369 .ndo_set_mac_address = eth_mac_addr, 370 .ndo_validate_addr = eth_validate_addr, 371 #ifdef CONFIG_NET_POLL_CONTROLLER 372 .ndo_poll_controller = uml_net_poll_controller, 373 #endif 374 }; 375 376 /* 377 * Ensures that platform_driver_register is called only once by 378 * eth_configure. Will be set in an initcall. 379 */ 380 static int driver_registered; 381 382 static void eth_configure(int n, void *init, char *mac, 383 struct transport *transport, gfp_t gfp_mask) 384 { 385 struct uml_net *device; 386 struct net_device *dev; 387 struct uml_net_private *lp; 388 int err, size; 389 390 size = transport->private_size + sizeof(struct uml_net_private); 391 392 device = kzalloc(sizeof(*device), gfp_mask); 393 if (device == NULL) { 394 printk(KERN_ERR "eth_configure failed to allocate struct " 395 "uml_net\n"); 396 return; 397 } 398 399 dev = alloc_etherdev(size); 400 if (dev == NULL) { 401 printk(KERN_ERR "eth_configure: failed to allocate struct " 402 "net_device for eth%d\n", n); 403 goto out_free_device; 404 } 405 406 INIT_LIST_HEAD(&device->list); 407 device->index = n; 408 409 /* If this name ends up conflicting with an existing registered 410 * netdevice, that is OK, register_netdev{,ice}() will notice this 411 * and fail. 412 */ 413 snprintf(dev->name, sizeof(dev->name), "eth%d", n); 414 415 setup_etheraddr(dev, mac); 416 417 printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr); 418 419 lp = netdev_priv(dev); 420 /* This points to the transport private data. It's still clear, but we 421 * must memset it to 0 *now*. Let's help the drivers. */ 422 memset(lp, 0, size); 423 INIT_WORK(&lp->work, uml_dev_close); 424 425 /* sysfs register */ 426 if (!driver_registered) { 427 platform_driver_register(¨_net_driver); 428 driver_registered = 1; 429 } 430 device->pdev.id = n; 431 device->pdev.name = DRIVER_NAME; 432 device->pdev.dev.release = net_device_release; 433 dev_set_drvdata(&device->pdev.dev, device); 434 if (platform_device_register(&device->pdev)) 435 goto out_free_netdev; 436 SET_NETDEV_DEV(dev,&device->pdev.dev); 437 438 device->dev = dev; 439 440 /* 441 * These just fill in a data structure, so there's no failure 442 * to be worried about. 443 */ 444 (*transport->kern->init)(dev, init); 445 446 *lp = ((struct uml_net_private) 447 { .list = LIST_HEAD_INIT(lp->list), 448 .dev = dev, 449 .fd = -1, 450 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, 451 .max_packet = transport->user->max_packet, 452 .protocol = transport->kern->protocol, 453 .open = transport->user->open, 454 .close = transport->user->close, 455 .remove = transport->user->remove, 456 .read = transport->kern->read, 457 .write = transport->kern->write, 458 .add_address = transport->user->add_address, 459 .delete_address = transport->user->delete_address }); 460 461 init_timer(&lp->tl); 462 spin_lock_init(&lp->lock); 463 lp->tl.function = uml_net_user_timer_expire; 464 memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac)); 465 466 if ((transport->user->init != NULL) && 467 ((*transport->user->init)(&lp->user, dev) != 0)) 468 goto out_unregister; 469 470 dev->mtu = transport->user->mtu; 471 dev->netdev_ops = ¨_netdev_ops; 472 dev->ethtool_ops = ¨_net_ethtool_ops; 473 dev->watchdog_timeo = (HZ >> 1); 474 dev->irq = UM_ETH_IRQ; 475 476 err = update_drop_skb(lp->max_packet); 477 if (err) 478 goto out_undo_user_init; 479 480 rtnl_lock(); 481 err = register_netdevice(dev); 482 rtnl_unlock(); 483 if (err) 484 goto out_undo_user_init; 485 486 spin_lock(&devices_lock); 487 list_add(&device->list, &devices); 488 spin_unlock(&devices_lock); 489 490 return; 491 492 out_undo_user_init: 493 if (transport->user->remove != NULL) 494 (*transport->user->remove)(&lp->user); 495 out_unregister: 496 platform_device_unregister(&device->pdev); 497 return; /* platform_device_unregister frees dev and device */ 498 out_free_netdev: 499 free_netdev(dev); 500 out_free_device: 501 kfree(device); 502 } 503 504 static struct uml_net *find_device(int n) 505 { 506 struct uml_net *device; 507 struct list_head *ele; 508 509 spin_lock(&devices_lock); 510 list_for_each(ele, &devices) { 511 device = list_entry(ele, struct uml_net, list); 512 if (device->index == n) 513 goto out; 514 } 515 device = NULL; 516 out: 517 spin_unlock(&devices_lock); 518 return device; 519 } 520 521 static int eth_parse(char *str, int *index_out, char **str_out, 522 char **error_out) 523 { 524 char *end; 525 int n, err = -EINVAL; 526 527 n = simple_strtoul(str, &end, 0); 528 if (end == str) { 529 *error_out = "Bad device number"; 530 return err; 531 } 532 533 str = end; 534 if (*str != '=') { 535 *error_out = "Expected '=' after device number"; 536 return err; 537 } 538 539 str++; 540 if (find_device(n)) { 541 *error_out = "Device already configured"; 542 return err; 543 } 544 545 *index_out = n; 546 *str_out = str; 547 return 0; 548 } 549 550 struct eth_init { 551 struct list_head list; 552 char *init; 553 int index; 554 }; 555 556 static DEFINE_SPINLOCK(transports_lock); 557 static LIST_HEAD(transports); 558 559 /* Filled in during early boot */ 560 static LIST_HEAD(eth_cmd_line); 561 562 static int check_transport(struct transport *transport, char *eth, int n, 563 void **init_out, char **mac_out, gfp_t gfp_mask) 564 { 565 int len; 566 567 len = strlen(transport->name); 568 if (strncmp(eth, transport->name, len)) 569 return 0; 570 571 eth += len; 572 if (*eth == ',') 573 eth++; 574 else if (*eth != '\0') 575 return 0; 576 577 *init_out = kmalloc(transport->setup_size, gfp_mask); 578 if (*init_out == NULL) 579 return 1; 580 581 if (!transport->setup(eth, mac_out, *init_out)) { 582 kfree(*init_out); 583 *init_out = NULL; 584 } 585 return 1; 586 } 587 588 void register_transport(struct transport *new) 589 { 590 struct list_head *ele, *next; 591 struct eth_init *eth; 592 void *init; 593 char *mac = NULL; 594 int match; 595 596 spin_lock(&transports_lock); 597 BUG_ON(!list_empty(&new->list)); 598 list_add(&new->list, &transports); 599 spin_unlock(&transports_lock); 600 601 list_for_each_safe(ele, next, ð_cmd_line) { 602 eth = list_entry(ele, struct eth_init, list); 603 match = check_transport(new, eth->init, eth->index, &init, 604 &mac, GFP_KERNEL); 605 if (!match) 606 continue; 607 else if (init != NULL) { 608 eth_configure(eth->index, init, mac, new, GFP_KERNEL); 609 kfree(init); 610 } 611 list_del(ð->list); 612 } 613 } 614 615 static int eth_setup_common(char *str, int index) 616 { 617 struct list_head *ele; 618 struct transport *transport; 619 void *init; 620 char *mac = NULL; 621 int found = 0; 622 623 spin_lock(&transports_lock); 624 list_for_each(ele, &transports) { 625 transport = list_entry(ele, struct transport, list); 626 if (!check_transport(transport, str, index, &init, 627 &mac, GFP_ATOMIC)) 628 continue; 629 if (init != NULL) { 630 eth_configure(index, init, mac, transport, GFP_ATOMIC); 631 kfree(init); 632 } 633 found = 1; 634 break; 635 } 636 637 spin_unlock(&transports_lock); 638 return found; 639 } 640 641 static int __init eth_setup(char *str) 642 { 643 struct eth_init *new; 644 char *error; 645 int n, err; 646 647 err = eth_parse(str, &n, &str, &error); 648 if (err) { 649 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", 650 str, error); 651 return 1; 652 } 653 654 new = alloc_bootmem(sizeof(*new)); 655 656 INIT_LIST_HEAD(&new->list); 657 new->index = n; 658 new->init = str; 659 660 list_add_tail(&new->list, ð_cmd_line); 661 return 1; 662 } 663 664 __setup("eth", eth_setup); 665 __uml_help(eth_setup, 666 "eth[0-9]+=<transport>,<options>\n" 667 " Configure a network device.\n\n" 668 ); 669 670 static int net_config(char *str, char **error_out) 671 { 672 int n, err; 673 674 err = eth_parse(str, &n, &str, error_out); 675 if (err) 676 return err; 677 678 /* This string is broken up and the pieces used by the underlying 679 * driver. So, it is freed only if eth_setup_common fails. 680 */ 681 str = kstrdup(str, GFP_KERNEL); 682 if (str == NULL) { 683 *error_out = "net_config failed to strdup string"; 684 return -ENOMEM; 685 } 686 err = !eth_setup_common(str, n); 687 if (err) 688 kfree(str); 689 return err; 690 } 691 692 static int net_id(char **str, int *start_out, int *end_out) 693 { 694 char *end; 695 int n; 696 697 n = simple_strtoul(*str, &end, 0); 698 if ((*end != '\0') || (end == *str)) 699 return -1; 700 701 *start_out = n; 702 *end_out = n; 703 *str = end; 704 return n; 705 } 706 707 static int net_remove(int n, char **error_out) 708 { 709 struct uml_net *device; 710 struct net_device *dev; 711 struct uml_net_private *lp; 712 713 device = find_device(n); 714 if (device == NULL) 715 return -ENODEV; 716 717 dev = device->dev; 718 lp = netdev_priv(dev); 719 if (lp->fd > 0) 720 return -EBUSY; 721 unregister_netdev(dev); 722 platform_device_unregister(&device->pdev); 723 724 return 0; 725 } 726 727 static struct mc_device net_mc = { 728 .list = LIST_HEAD_INIT(net_mc.list), 729 .name = "eth", 730 .config = net_config, 731 .get_config = NULL, 732 .id = net_id, 733 .remove = net_remove, 734 }; 735 736 #ifdef CONFIG_INET 737 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, 738 void *ptr) 739 { 740 struct in_ifaddr *ifa = ptr; 741 struct net_device *dev = ifa->ifa_dev->dev; 742 struct uml_net_private *lp; 743 void (*proc)(unsigned char *, unsigned char *, void *); 744 unsigned char addr_buf[4], netmask_buf[4]; 745 746 if (dev->netdev_ops->ndo_open != uml_net_open) 747 return NOTIFY_DONE; 748 749 lp = netdev_priv(dev); 750 751 proc = NULL; 752 switch (event) { 753 case NETDEV_UP: 754 proc = lp->add_address; 755 break; 756 case NETDEV_DOWN: 757 proc = lp->delete_address; 758 break; 759 } 760 if (proc != NULL) { 761 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); 762 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); 763 (*proc)(addr_buf, netmask_buf, &lp->user); 764 } 765 return NOTIFY_DONE; 766 } 767 768 /* uml_net_init shouldn't be called twice on two CPUs at the same time */ 769 static struct notifier_block uml_inetaddr_notifier = { 770 .notifier_call = uml_inetaddr_event, 771 }; 772 773 static void inet_register(void) 774 { 775 struct list_head *ele; 776 struct uml_net_private *lp; 777 struct in_device *ip; 778 struct in_ifaddr *in; 779 780 register_inetaddr_notifier(¨_inetaddr_notifier); 781 782 /* Devices may have been opened already, so the uml_inetaddr_notifier 783 * didn't get a chance to run for them. This fakes it so that 784 * addresses which have already been set up get handled properly. 785 */ 786 spin_lock(&opened_lock); 787 list_for_each(ele, &opened) { 788 lp = list_entry(ele, struct uml_net_private, list); 789 ip = lp->dev->ip_ptr; 790 if (ip == NULL) 791 continue; 792 in = ip->ifa_list; 793 while (in != NULL) { 794 uml_inetaddr_event(NULL, NETDEV_UP, in); 795 in = in->ifa_next; 796 } 797 } 798 spin_unlock(&opened_lock); 799 } 800 #else 801 static inline void inet_register(void) 802 { 803 } 804 #endif 805 806 static int uml_net_init(void) 807 { 808 mconsole_register_dev(&net_mc); 809 inet_register(); 810 return 0; 811 } 812 813 __initcall(uml_net_init); 814 815 static void close_devices(void) 816 { 817 struct list_head *ele; 818 struct uml_net_private *lp; 819 820 spin_lock(&opened_lock); 821 list_for_each(ele, &opened) { 822 lp = list_entry(ele, struct uml_net_private, list); 823 um_free_irq(lp->dev->irq, lp->dev); 824 if ((lp->close != NULL) && (lp->fd >= 0)) 825 (*lp->close)(lp->fd, &lp->user); 826 if (lp->remove != NULL) 827 (*lp->remove)(&lp->user); 828 } 829 spin_unlock(&opened_lock); 830 } 831 832 __uml_exitcall(close_devices); 833 834 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, 835 void *), 836 void *arg) 837 { 838 struct net_device *dev = d; 839 struct in_device *ip = dev->ip_ptr; 840 struct in_ifaddr *in; 841 unsigned char address[4], netmask[4]; 842 843 if (ip == NULL) return; 844 in = ip->ifa_list; 845 while (in != NULL) { 846 memcpy(address, &in->ifa_address, sizeof(address)); 847 memcpy(netmask, &in->ifa_mask, sizeof(netmask)); 848 (*cb)(address, netmask, arg); 849 in = in->ifa_next; 850 } 851 } 852 853 int dev_netmask(void *d, void *m) 854 { 855 struct net_device *dev = d; 856 struct in_device *ip = dev->ip_ptr; 857 struct in_ifaddr *in; 858 __be32 *mask_out = m; 859 860 if (ip == NULL) 861 return 1; 862 863 in = ip->ifa_list; 864 if (in == NULL) 865 return 1; 866 867 *mask_out = in->ifa_mask; 868 return 0; 869 } 870 871 void *get_output_buffer(int *len_out) 872 { 873 void *ret; 874 875 ret = (void *) __get_free_pages(GFP_KERNEL, 0); 876 if (ret) *len_out = PAGE_SIZE; 877 else *len_out = 0; 878 return ret; 879 } 880 881 void free_output_buffer(void *buffer) 882 { 883 free_pages((unsigned long) buffer, 0); 884 } 885 886 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, 887 char **gate_addr) 888 { 889 char *remain; 890 891 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); 892 if (remain != NULL) { 893 printk(KERN_ERR "tap_setup_common - Extra garbage on " 894 "specification : '%s'\n", remain); 895 return 1; 896 } 897 898 return 0; 899 } 900 901 unsigned short eth_protocol(struct sk_buff *skb) 902 { 903 return eth_type_trans(skb, skb->dev); 904 } 905