dev.c (398007f863a4af2b4a5a07219c5a617f1a098115) dev.c (a2835763e130c343ace5320c20d33c281e7097b7)
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 1434 unchanged lines hidden (view full) ---

1443 skb_orphan(skb);
1444
1445 if (!(dev->flags & IFF_UP))
1446 return NET_RX_DROP;
1447
1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP;
1450
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 1434 unchanged lines hidden (view full) ---

1443 skb_orphan(skb);
1444
1445 if (!(dev->flags & IFF_UP))
1446 return NET_RX_DROP;
1447
1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP;
1450
1451 skb_dst_drop(skb);
1451 skb_set_dev(skb, dev);
1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev);
1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb);
1459}
1460EXPORT_SYMBOL_GPL(dev_forward_skb);
1461
1462/*
1463 * Support routine. Sends outgoing frames to any network
1464 * taps currently in use.
1465 */

--- 143 unchanged lines hidden (view full) ---

1609 if (can_checksum_protocol(dev->features & dev->vlan_features,
1610 veh->h_vlan_encapsulated_proto))
1611 return true;
1612 }
1613
1614 return false;
1615}
1616
1455 return netif_rx(skb);
1456}
1457EXPORT_SYMBOL_GPL(dev_forward_skb);
1458
1459/*
1460 * Support routine. Sends outgoing frames to any network
1461 * taps currently in use.
1462 */

--- 143 unchanged lines hidden (view full) ---

1606 if (can_checksum_protocol(dev->features & dev->vlan_features,
1607 veh->h_vlan_encapsulated_proto))
1608 return true;
1609 }
1610
1611 return false;
1612}
1613
1614/**
1615 * skb_dev_set -- assign a new device to a buffer
1616 * @skb: buffer for the new device
1617 * @dev: network device
1618 *
1619 * If an skb is owned by a device already, we have to reset
1620 * all data private to the namespace a device belongs to
1621 * before assigning it a new device.
1622 */
1623#ifdef CONFIG_NET_NS
1624void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1625{
1626 skb_dst_drop(skb);
1627 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1628 secpath_reset(skb);
1629 nf_reset(skb);
1630 skb_init_secmark(skb);
1631 skb->mark = 0;
1632 skb->priority = 0;
1633 skb->nf_trace = 0;
1634 skb->ipvs_property = 0;
1635#ifdef CONFIG_NET_SCHED
1636 skb->tc_index = 0;
1637#endif
1638 }
1639 skb->dev = dev;
1640}
1641EXPORT_SYMBOL(skb_set_dev);
1642#endif /* CONFIG_NET_NS */
1643
1617/*
1618 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path.
1620 */
1621int skb_checksum_help(struct sk_buff *skb)
1622{
1623 __wsum csum;
1624 int ret = 0, offset;

--- 223 unchanged lines hidden (view full) ---

1848 }
1849
1850gso:
1851 do {
1852 struct sk_buff *nskb = skb->next;
1853
1854 skb->next = nskb->next;
1855 nskb->next = NULL;
1644/*
1645 * Invalidate hardware checksum when packet is to be mangled, and
1646 * complete checksum manually on outgoing path.
1647 */
1648int skb_checksum_help(struct sk_buff *skb)
1649{
1650 __wsum csum;
1651 int ret = 0, offset;

--- 223 unchanged lines hidden (view full) ---

1875 }
1876
1877gso:
1878 do {
1879 struct sk_buff *nskb = skb->next;
1880
1881 skb->next = nskb->next;
1882 nskb->next = NULL;
1883
1884 /*
1885 * If device doesnt need nskb->dst, release it right now while
1886 * its hot in this cpu cache
1887 */
1888 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1889 skb_dst_drop(nskb);
1890
1856 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK)
1859 goto out_kfree_gso_skb;
1860 nskb->next = skb->next;
1861 skb->next = nskb;
1862 return rc;
1863 }

--- 105 unchanged lines hidden (view full) ---

1969 rc = qdisc_enqueue_root(skb, q);
1970 qdisc_run(q);
1971 }
1972 spin_unlock(root_lock);
1973
1974 return rc;
1975}
1976
1891 rc = ops->ndo_start_xmit(nskb, dev);
1892 if (unlikely(rc != NETDEV_TX_OK)) {
1893 if (rc & ~NETDEV_TX_MASK)
1894 goto out_kfree_gso_skb;
1895 nskb->next = skb->next;
1896 skb->next = nskb;
1897 return rc;
1898 }

--- 105 unchanged lines hidden (view full) ---

2004 rc = qdisc_enqueue_root(skb, q);
2005 qdisc_run(q);
2006 }
2007 spin_unlock(root_lock);
2008
2009 return rc;
2010}
2011
2012/*
2013 * Returns true if either:
2014 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2015 * 2. skb is fragmented and the device does not support SG, or if
2016 * at least one of fragments is in highmem and device does not
2017 * support DMA from it.
2018 */
2019static inline int skb_needs_linearize(struct sk_buff *skb,
2020 struct net_device *dev)
2021{
2022 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2023 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2024 illegal_highdma(dev, skb)));
2025}
2026
1977/**
1978 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit
1980 *
1981 * Queue a buffer for transmission to a network device. The caller must
1982 * have set the device and priority and built the buffer before calling
1983 * this function. The function can be called from an interrupt.
1984 *

--- 20 unchanged lines hidden (view full) ---

2005 struct netdev_queue *txq;
2006 struct Qdisc *q;
2007 int rc = -ENOMEM;
2008
2009 /* GSO will handle the following emulations directly. */
2010 if (netif_needs_gso(dev, skb))
2011 goto gso;
2012
2027/**
2028 * dev_queue_xmit - transmit a buffer
2029 * @skb: buffer to transmit
2030 *
2031 * Queue a buffer for transmission to a network device. The caller must
2032 * have set the device and priority and built the buffer before calling
2033 * this function. The function can be called from an interrupt.
2034 *

--- 20 unchanged lines hidden (view full) ---

2055 struct netdev_queue *txq;
2056 struct Qdisc *q;
2057 int rc = -ENOMEM;
2058
2059 /* GSO will handle the following emulations directly. */
2060 if (netif_needs_gso(dev, skb))
2061 goto gso;
2062
2013 if (skb_has_frags(skb) &&
2014 !(dev->features & NETIF_F_FRAGLIST) &&
2015 __skb_linearize(skb))
2063 /* Convert a paged skb to linear, if required */
2064 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2065 goto out_kfree_skb;
2066
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb;
2026
2027 /* If packet is not checksummed and device does not support
2028 * checksumming for this protocol, complete checksumming here.
2029 */
2030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2031 skb_set_transport_header(skb, skb->csum_start -
2032 skb_headroom(skb));
2033 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2034 goto out_kfree_skb;

--- 382 unchanged lines hidden (view full) ---

2417 * NET_RX_SUCCESS: no congestion
2418 * NET_RX_DROP: packet was dropped
2419 */
2420int netif_receive_skb(struct sk_buff *skb)
2421{
2422 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev;
2424 struct net_device *null_or_orig;
2067 /* If packet is not checksummed and device does not support
2068 * checksumming for this protocol, complete checksumming here.
2069 */
2070 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2071 skb_set_transport_header(skb, skb->csum_start -
2072 skb_headroom(skb));
2073 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2074 goto out_kfree_skb;

--- 382 unchanged lines hidden (view full) ---

2457 * NET_RX_SUCCESS: no congestion
2458 * NET_RX_DROP: packet was dropped
2459 */
2460int netif_receive_skb(struct sk_buff *skb)
2461{
2462 struct packet_type *ptype, *pt_prev;
2463 struct net_device *orig_dev;
2464 struct net_device *null_or_orig;
2465 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP;
2426 __be16 type;
2427
2428 if (!skb->tstamp.tv64)
2429 net_timestamp(skb);
2430
2431 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2432 return NET_RX_SUCCESS;

--- 49 unchanged lines hidden (view full) ---

2482
2483 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2484 if (!skb)
2485 goto out;
2486 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2487 if (!skb)
2488 goto out;
2489
2466 int ret = NET_RX_DROP;
2467 __be16 type;
2468
2469 if (!skb->tstamp.tv64)
2470 net_timestamp(skb);
2471
2472 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2473 return NET_RX_SUCCESS;

--- 49 unchanged lines hidden (view full) ---

2523
2524 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2525 if (!skb)
2526 goto out;
2527 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2528 if (!skb)
2529 goto out;
2530
2531 /*
2532 * Make sure frames received on VLAN interfaces stacked on
2533 * bonding interfaces still make their way to any base bonding
2534 * device that may have registered for a specific ptype. The
2535 * handler may have to adjust skb->dev and orig_dev.
2536 */
2537 null_or_bond = NULL;
2538 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2539 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2540 null_or_bond = vlan_dev_real_dev(skb->dev);
2541 }
2542
2490 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2543 type = skb->protocol;
2544 list_for_each_entry_rcu(ptype,
2545 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type &&
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2495 ptype->dev == orig_dev)) {
2546 if (ptype->type == type && (ptype->dev == null_or_orig ||
2547 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2548 ptype->dev == null_or_bond)) {
2496 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype;
2499 }
2500 }
2501
2502 if (pt_prev) {
2503 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);

--- 52 unchanged lines hidden (view full) ---

2556 kfree_skb(skb);
2557 return NET_RX_SUCCESS;
2558 }
2559
2560out:
2561 return netif_receive_skb(skb);
2562}
2563
2549 if (pt_prev)
2550 ret = deliver_skb(skb, pt_prev, orig_dev);
2551 pt_prev = ptype;
2552 }
2553 }
2554
2555 if (pt_prev) {
2556 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);

--- 52 unchanged lines hidden (view full) ---

2609 kfree_skb(skb);
2610 return NET_RX_SUCCESS;
2611 }
2612
2613out:
2614 return netif_receive_skb(skb);
2615}
2616
2564void napi_gro_flush(struct napi_struct *napi)
2617static void napi_gro_flush(struct napi_struct *napi)
2565{
2566 struct sk_buff *skb, *next;
2567
2568 for (skb = napi->gro_list; skb; skb = next) {
2569 next = skb->next;
2570 skb->next = NULL;
2571 napi_gro_complete(skb);
2572 }
2573
2574 napi->gro_count = 0;
2575 napi->gro_list = NULL;
2576}
2618{
2619 struct sk_buff *skb, *next;
2620
2621 for (skb = napi->gro_list; skb; skb = next) {
2622 next = skb->next;
2623 skb->next = NULL;
2624 napi_gro_complete(skb);
2625 }
2626
2627 napi->gro_count = 0;
2628 napi->gro_list = NULL;
2629}
2577EXPORT_SYMBOL(napi_gro_flush);
2578
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{
2581 struct sk_buff **pp = NULL;
2582 struct packet_type *ptype;
2583 __be16 type = skb->protocol;
2584 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2585 int same_flow;

--- 375 unchanged lines hidden (view full) ---

2961
2962 local_irq_enable();
2963
2964 /* Even though interrupts have been re-enabled, this
2965 * access is safe because interrupts can only add new
2966 * entries to the tail of this list, and only ->poll()
2967 * calls can remove this head entry from the list.
2968 */
2630
2631enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2632{
2633 struct sk_buff **pp = NULL;
2634 struct packet_type *ptype;
2635 __be16 type = skb->protocol;
2636 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2637 int same_flow;

--- 375 unchanged lines hidden (view full) ---

3013
3014 local_irq_enable();
3015
3016 /* Even though interrupts have been re-enabled, this
3017 * access is safe because interrupts can only add new
3018 * entries to the tail of this list, and only ->poll()
3019 * calls can remove this head entry from the list.
3020 */
2969 n = list_entry(list->next, struct napi_struct, poll_list);
3021 n = list_first_entry(list, struct napi_struct, poll_list);
2970
2971 have = netpoll_poll_lock(n);
2972
2973 weight = n->weight;
2974
2975 /* This NAPI_STATE_SCHED test is for avoiding a race
2976 * with netpoll's poll_napi(). Only the entity which
2977 * obtains the lock and sees NAPI_STATE_SCHED set will

--- 202 unchanged lines hidden (view full) ---

3180{
3181 rcu_read_unlock();
3182}
3183
3184static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{
3186 const struct net_device_stats *stats = dev_get_stats(dev);
3187
3022
3023 have = netpoll_poll_lock(n);
3024
3025 weight = n->weight;
3026
3027 /* This NAPI_STATE_SCHED test is for avoiding a race
3028 * with netpoll's poll_napi(). Only the entity which
3029 * obtains the lock and sees NAPI_STATE_SCHED set will

--- 202 unchanged lines hidden (view full) ---

3232{
3233 rcu_read_unlock();
3234}
3235
3236static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3237{
3238 const struct net_device_stats *stats = dev_get_stats(dev);
3239
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3240 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors,
3192 stats->rx_dropped + stats->rx_missed_errors,
3193 stats->rx_fifo_errors,
3194 stats->rx_length_errors + stats->rx_over_errors +
3195 stats->rx_crc_errors + stats->rx_frame_errors,
3196 stats->rx_compressed, stats->multicast,

--- 438 unchanged lines hidden (view full) ---

3635 return;
3636
3637 if (ops->ndo_set_rx_mode)
3638 ops->ndo_set_rx_mode(dev);
3639 else {
3640 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe.
3642 */
3241 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3242 dev->name, stats->rx_bytes, stats->rx_packets,
3243 stats->rx_errors,
3244 stats->rx_dropped + stats->rx_missed_errors,
3245 stats->rx_fifo_errors,
3246 stats->rx_length_errors + stats->rx_over_errors +
3247 stats->rx_crc_errors + stats->rx_frame_errors,
3248 stats->rx_compressed, stats->multicast,

--- 438 unchanged lines hidden (view full) ---

3687 return;
3688
3689 if (ops->ndo_set_rx_mode)
3690 ops->ndo_set_rx_mode(dev);
3691 else {
3692 /* Unicast addresses changes may only happen under the rtnl,
3693 * therefore calling __dev_set_promiscuity here is safe.
3694 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) {
3695 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1;
3696 __dev_set_promiscuity(dev, 1);
3697 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3698 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0;
3649 }
3650
3651 if (ops->ndo_set_multicast_list)
3652 ops->ndo_set_multicast_list(dev);
3653 }
3654}

--- 551 unchanged lines hidden (view full) ---

4206 }
4207}
4208
4209static void dev_addr_discard(struct net_device *dev)
4210{
4211 netif_addr_lock_bh(dev);
4212
4213 __dev_addr_discard(&dev->mc_list);
3699 __dev_set_promiscuity(dev, -1);
3700 dev->uc_promisc = 0;
3701 }
3702
3703 if (ops->ndo_set_multicast_list)
3704 ops->ndo_set_multicast_list(dev);
3705 }
3706}

--- 551 unchanged lines hidden (view full) ---

4258 }
4259}
4260
4261static void dev_addr_discard(struct net_device *dev)
4262{
4263 netif_addr_lock_bh(dev);
4264
4265 __dev_addr_discard(&dev->mc_list);
4214 dev->mc_count = 0;
4266 netdev_mc_count(dev) = 0;
4215
4216 netif_addr_unlock_bh(dev);
4217}
4218
4219/**
4220 * dev_get_flags - get flags reported to userspace
4221 * @dev: device
4222 *

--- 585 unchanged lines hidden (view full) ---

4808 dev_shutdown(dev);
4809
4810
4811 /* Notify protocols, that we are about to destroy
4812 this device. They should clean all the things.
4813 */
4814 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4815
4267
4268 netif_addr_unlock_bh(dev);
4269}
4270
4271/**
4272 * dev_get_flags - get flags reported to userspace
4273 * @dev: device
4274 *

--- 585 unchanged lines hidden (view full) ---

4860 dev_shutdown(dev);
4861
4862
4863 /* Notify protocols, that we are about to destroy
4864 this device. They should clean all the things.
4865 */
4866 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4867
4868 if (!dev->rtnl_link_ops ||
4869 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4870 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4871
4816 /*
4817 * Flush the unicast and multicast chains
4818 */
4819 dev_unicast_flush(dev);
4820 dev_addr_discard(dev);
4821
4822 if (dev->netdev_ops->ndo_uninit)
4823 dev->netdev_ops->ndo_uninit(dev);
4824
4825 /* Notifier chain MUST detach us from master device. */
4826 WARN_ON(dev->master);
4827
4828 /* Remove entries from kobject tree */
4829 netdev_unregister_kobject(dev);
4830 }
4831
4832 /* Process any work delayed until the end of the batch */
4872 /*
4873 * Flush the unicast and multicast chains
4874 */
4875 dev_unicast_flush(dev);
4876 dev_addr_discard(dev);
4877
4878 if (dev->netdev_ops->ndo_uninit)
4879 dev->netdev_ops->ndo_uninit(dev);
4880
4881 /* Notifier chain MUST detach us from master device. */
4882 WARN_ON(dev->master);
4883
4884 /* Remove entries from kobject tree */
4885 netdev_unregister_kobject(dev);
4886 }
4887
4888 /* Process any work delayed until the end of the batch */
4833 dev = list_entry(head->next, struct net_device, unreg_list);
4889 dev = list_first_entry(head, struct net_device, unreg_list);
4834 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4835
4836 synchronize_net();
4837
4838 list_for_each_entry(dev, head, unreg_list)
4839 dev_put(dev);
4840}
4841

--- 192 unchanged lines hidden (view full) ---

5034 if (ret) {
5035 rollback_registered(dev);
5036 dev->reg_state = NETREG_UNREGISTERED;
5037 }
5038 /*
5039 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications.
5041 */
4890 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4891
4892 synchronize_net();
4893
4894 list_for_each_entry(dev, head, unreg_list)
4895 dev_put(dev);
4896}
4897

--- 192 unchanged lines hidden (view full) ---

5090 if (ret) {
5091 rollback_registered(dev);
5092 dev->reg_state = NETREG_UNREGISTERED;
5093 }
5094 /*
5095 * Prevent userspace races by waiting until the network
5096 * device is fully setup before sending notifications.
5097 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5098 if (!dev->rtnl_link_ops ||
5099 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5100 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5043
5044out:
5045 return ret;
5046
5047err_uninit:
5048 if (dev->netdev_ops->ndo_uninit)
5049 dev->netdev_ops->ndo_uninit(dev);
5050 goto out;

--- 160 unchanged lines hidden (view full) ---

5211
5212 /* Snapshot list, allow later requests */
5213 list_replace_init(&net_todo_list, &list);
5214
5215 __rtnl_unlock();
5216
5217 while (!list_empty(&list)) {
5218 struct net_device *dev
5101
5102out:
5103 return ret;
5104
5105err_uninit:
5106 if (dev->netdev_ops->ndo_uninit)
5107 dev->netdev_ops->ndo_uninit(dev);
5108 goto out;

--- 160 unchanged lines hidden (view full) ---

5269
5270 /* Snapshot list, allow later requests */
5271 list_replace_init(&net_todo_list, &list);
5272
5273 __rtnl_unlock();
5274
5275 while (!list_empty(&list)) {
5276 struct net_device *dev
5219 = list_entry(list.next, struct net_device, todo_list);
5277 = list_first_entry(&list, struct net_device, todo_list);
5220 list_del(&dev->todo_list);
5221
5222 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5223 printk(KERN_ERR "network todo '%s' but state %d\n",
5224 dev->name, dev->reg_state);
5225 dump_stack();
5226 continue;
5227 }

--- 134 unchanged lines hidden (view full) ---

5362 dev->_tx = tx;
5363 dev->num_tx_queues = queue_count;
5364 dev->real_num_tx_queues = queue_count;
5365
5366 dev->gso_max_size = GSO_MAX_SIZE;
5367
5368 netdev_init_queues(dev);
5369
5278 list_del(&dev->todo_list);
5279
5280 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5281 printk(KERN_ERR "network todo '%s' but state %d\n",
5282 dev->name, dev->reg_state);
5283 dump_stack();
5284 continue;
5285 }

--- 134 unchanged lines hidden (view full) ---

5420 dev->_tx = tx;
5421 dev->num_tx_queues = queue_count;
5422 dev->real_num_tx_queues = queue_count;
5423
5424 dev->gso_max_size = GSO_MAX_SIZE;
5425
5426 netdev_init_queues(dev);
5427
5428 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5429 dev->ethtool_ntuple_list.count = 0;
5370 INIT_LIST_HEAD(&dev->napi_list);
5371 INIT_LIST_HEAD(&dev->unreg_list);
5372 INIT_LIST_HEAD(&dev->link_watch_list);
5373 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5374 setup(dev);
5375 strcpy(dev->name, name);
5376 return dev;
5377

--- 20 unchanged lines hidden (view full) ---

5398
5399 release_net(dev_net(dev));
5400
5401 kfree(dev->_tx);
5402
5403 /* Flush device addresses */
5404 dev_addr_flush(dev);
5405
5430 INIT_LIST_HEAD(&dev->napi_list);
5431 INIT_LIST_HEAD(&dev->unreg_list);
5432 INIT_LIST_HEAD(&dev->link_watch_list);
5433 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5434 setup(dev);
5435 strcpy(dev->name, name);
5436 return dev;
5437

--- 20 unchanged lines hidden (view full) ---

5458
5459 release_net(dev_net(dev));
5460
5461 kfree(dev->_tx);
5462
5463 /* Flush device addresses */
5464 dev_addr_flush(dev);
5465
5466 /* Clear ethtool n-tuple list */
5467 ethtool_ntuple_flush(dev);
5468
5406 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5407 netif_napi_del(p);
5408
5409 /* Compatibility with error handling in drivers */
5410 if (dev->reg_state == NETREG_UNINITIALIZED) {
5411 kfree((char *)dev - dev->padded);
5412 return;
5413 }

--- 510 unchanged lines hidden ---
5469 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5470 netif_napi_del(p);
5471
5472 /* Compatibility with error handling in drivers */
5473 if (dev->reg_state == NETREG_UNINITIALIZED) {
5474 kfree((char *)dev - dev->padded);
5475 return;
5476 }

--- 510 unchanged lines hidden ---