dev.c (f04565ddf52e401880f8ba51de0dff8ba51c99fd) dev.c (d2237d35748e7f448a9c2d9dc6a85ef637466e24)
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 119 unchanged lines hidden (view full) ---

128#include <linux/jhash.h>
129#include <linux/random.h>
130#include <trace/events/napi.h>
131#include <trace/events/net.h>
132#include <trace/events/skb.h>
133#include <linux/pci.h>
134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h>
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 119 unchanged lines hidden (view full) ---

128#include <linux/jhash.h>
129#include <linux/random.h>
130#include <trace/events/napi.h>
131#include <trace/events/net.h>
132#include <trace/events/skb.h>
133#include <linux/pci.h>
134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h>
136#include <linux/if_tunnel.h>
137#include <linux/if_pppox.h>
138#include <linux/ppp_defs.h>
139#include <linux/net_tstamp.h>
140
141#include "net-sysfs.h"
142
143/* Instead of increasing this, you should create a hash table. */
144#define MAX_GRO_SKBS 8
145
146/* This should be increased if a protocol with a bigger head is added. */
147#define GRO_MAX_HEAD (MAX_HEADER + 128)

--- 1325 unchanged lines hidden (view full) ---

1473}
1474
1475static inline void net_timestamp_check(struct sk_buff *skb)
1476{
1477 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1478 __net_timestamp(skb);
1479}
1480
136
137#include "net-sysfs.h"
138
139/* Instead of increasing this, you should create a hash table. */
140#define MAX_GRO_SKBS 8
141
142/* This should be increased if a protocol with a bigger head is added. */
143#define GRO_MAX_HEAD (MAX_HEADER + 128)

--- 1325 unchanged lines hidden (view full) ---

1469}
1470
1471static inline void net_timestamp_check(struct sk_buff *skb)
1472{
1473 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1474 __net_timestamp(skb);
1475}
1476
1481static int net_hwtstamp_validate(struct ifreq *ifr)
1482{
1483 struct hwtstamp_config cfg;
1484 enum hwtstamp_tx_types tx_type;
1485 enum hwtstamp_rx_filters rx_filter;
1486 int tx_type_valid = 0;
1487 int rx_filter_valid = 0;
1488
1489 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1490 return -EFAULT;
1491
1492 if (cfg.flags) /* reserved for future extensions */
1493 return -EINVAL;
1494
1495 tx_type = cfg.tx_type;
1496 rx_filter = cfg.rx_filter;
1497
1498 switch (tx_type) {
1499 case HWTSTAMP_TX_OFF:
1500 case HWTSTAMP_TX_ON:
1501 case HWTSTAMP_TX_ONESTEP_SYNC:
1502 tx_type_valid = 1;
1503 break;
1504 }
1505
1506 switch (rx_filter) {
1507 case HWTSTAMP_FILTER_NONE:
1508 case HWTSTAMP_FILTER_ALL:
1509 case HWTSTAMP_FILTER_SOME:
1510 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1513 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1514 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1515 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1518 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1519 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1520 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1521 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1522 rx_filter_valid = 1;
1523 break;
1524 }
1525
1526 if (!tx_type_valid || !rx_filter_valid)
1527 return -ERANGE;
1528
1529 return 0;
1530}
1531
1532static inline bool is_skb_forwardable(struct net_device *dev,
1533 struct sk_buff *skb)
1534{
1535 unsigned int len;
1536
1537 if (!(dev->flags & IFF_UP))
1538 return false;
1539

--- 465 unchanged lines hidden (view full) ---

2005 * 2. No high memory really exists on this machine.
2006 */
2007
2008static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2009{
2010#ifdef CONFIG_HIGHMEM
2011 int i;
2012 if (!(dev->features & NETIF_F_HIGHDMA)) {
1477static inline bool is_skb_forwardable(struct net_device *dev,
1478 struct sk_buff *skb)
1479{
1480 unsigned int len;
1481
1482 if (!(dev->flags & IFF_UP))
1483 return false;
1484

--- 465 unchanged lines hidden (view full) ---

1950 * 2. No high memory really exists on this machine.
1951 */
1952
1953static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1954{
1955#ifdef CONFIG_HIGHMEM
1956 int i;
1957 if (!(dev->features & NETIF_F_HIGHDMA)) {
2013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2014 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2015 if (PageHighMem(skb_frag_page(frag)))
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1959 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
2016 return 1;
1960 return 1;
2017 }
2018 }
2019
2020 if (PCI_DMA_BUS_IS_PHYS) {
2021 struct device *pdev = dev->dev.parent;
2022
2023 if (!pdev)
2024 return 0;
2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1961 }
1962
1963 if (PCI_DMA_BUS_IS_PHYS) {
1964 struct device *pdev = dev->dev.parent;
1965
1966 if (!pdev)
1967 return 0;
1968 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2026 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2027 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
1969 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
2028 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2029 return 1;
2030 }
2031 }
2032#endif
2033 return 0;
2034}
2035

--- 544 unchanged lines hidden (view full) ---

2580 struct napi_struct *napi)
2581{
2582 list_add_tail(&napi->poll_list, &sd->poll_list);
2583 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2584}
2585
2586/*
2587 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
1970 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1971 return 1;
1972 }
1973 }
1974#endif
1975 return 0;
1976}
1977

--- 544 unchanged lines hidden (view full) ---

2522 struct napi_struct *napi)
2523{
2524 list_add_tail(&napi->poll_list, &sd->poll_list);
2525 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2526}
2527
2528/*
2529 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2588 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2589 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2590 * if hash is a canonical 4-tuple hash over transport ports.
2530 * and src/dst port numbers. Returns a non-zero hash number on success
2531 * and 0 on failure.
2591 */
2532 */
2592void __skb_get_rxhash(struct sk_buff *skb)
2533__u32 __skb_get_rxhash(struct sk_buff *skb)
2593{
2594 int nhoff, hash = 0, poff;
2595 const struct ipv6hdr *ip6;
2596 const struct iphdr *ip;
2534{
2535 int nhoff, hash = 0, poff;
2536 const struct ipv6hdr *ip6;
2537 const struct iphdr *ip;
2597 const struct vlan_hdr *vlan;
2598 u8 ip_proto;
2538 u8 ip_proto;
2599 u32 addr1, addr2;
2600 u16 proto;
2539 u32 addr1, addr2, ihl;
2601 union {
2602 u32 v32;
2603 u16 v16[2];
2604 } ports;
2605
2606 nhoff = skb_network_offset(skb);
2540 union {
2541 u32 v32;
2542 u16 v16[2];
2543 } ports;
2544
2545 nhoff = skb_network_offset(skb);
2607 proto = skb->protocol;
2608
2546
2609again:
2610 switch (proto) {
2547 switch (skb->protocol) {
2611 case __constant_htons(ETH_P_IP):
2548 case __constant_htons(ETH_P_IP):
2612ip:
2613 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2614 goto done;
2615
2616 ip = (const struct iphdr *) (skb->data + nhoff);
2617 if (ip_is_fragment(ip))
2618 ip_proto = 0;
2619 else
2620 ip_proto = ip->protocol;
2621 addr1 = (__force u32) ip->saddr;
2622 addr2 = (__force u32) ip->daddr;
2549 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2550 goto done;
2551
2552 ip = (const struct iphdr *) (skb->data + nhoff);
2553 if (ip_is_fragment(ip))
2554 ip_proto = 0;
2555 else
2556 ip_proto = ip->protocol;
2557 addr1 = (__force u32) ip->saddr;
2558 addr2 = (__force u32) ip->daddr;
2623 nhoff += ip->ihl * 4;
2559 ihl = ip->ihl;
2624 break;
2625 case __constant_htons(ETH_P_IPV6):
2560 break;
2561 case __constant_htons(ETH_P_IPV6):
2626ipv6:
2627 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2628 goto done;
2629
2630 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2631 ip_proto = ip6->nexthdr;
2632 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2633 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2562 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2563 goto done;
2564
2565 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2566 ip_proto = ip6->nexthdr;
2567 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2568 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2634 nhoff += 40;
2569 ihl = (40 >> 2);
2635 break;
2570 break;
2636 case __constant_htons(ETH_P_8021Q):
2637 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2638 goto done;
2639 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2640 proto = vlan->h_vlan_encapsulated_proto;
2641 nhoff += sizeof(*vlan);
2642 goto again;
2643 case __constant_htons(ETH_P_PPP_SES):
2644 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2645 goto done;
2646 proto = *((__be16 *) (skb->data + nhoff +
2647 sizeof(struct pppoe_hdr)));
2648 nhoff += PPPOE_SES_HLEN;
2649 switch (proto) {
2650 case __constant_htons(PPP_IP):
2651 goto ip;
2652 case __constant_htons(PPP_IPV6):
2653 goto ipv6;
2654 default:
2655 goto done;
2656 }
2657 default:
2658 goto done;
2659 }
2660
2571 default:
2572 goto done;
2573 }
2574
2661 switch (ip_proto) {
2662 case IPPROTO_GRE:
2663 if (pskb_may_pull(skb, nhoff + 16)) {
2664 u8 *h = skb->data + nhoff;
2665 __be16 flags = *(__be16 *)h;
2666
2667 /*
2668 * Only look inside GRE if version zero and no
2669 * routing
2670 */
2671 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2672 proto = *(__be16 *)(h + 2);
2673 nhoff += 4;
2674 if (flags & GRE_CSUM)
2675 nhoff += 4;
2676 if (flags & GRE_KEY)
2677 nhoff += 4;
2678 if (flags & GRE_SEQ)
2679 nhoff += 4;
2680 goto again;
2681 }
2682 }
2683 break;
2684 case IPPROTO_IPIP:
2685 goto again;
2686 default:
2687 break;
2688 }
2689
2690 ports.v32 = 0;
2691 poff = proto_ports_offset(ip_proto);
2692 if (poff >= 0) {
2575 ports.v32 = 0;
2576 poff = proto_ports_offset(ip_proto);
2577 if (poff >= 0) {
2693 nhoff += poff;
2578 nhoff += ihl * 4 + poff;
2694 if (pskb_may_pull(skb, nhoff + 4)) {
2695 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2696 if (ports.v16[1] < ports.v16[0])
2697 swap(ports.v16[0], ports.v16[1]);
2579 if (pskb_may_pull(skb, nhoff + 4)) {
2580 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2581 if (ports.v16[1] < ports.v16[0])
2582 swap(ports.v16[0], ports.v16[1]);
2698 skb->l4_rxhash = 1;
2699 }
2700 }
2701
2702 /* get a consistent hash (same value on both flow directions) */
2703 if (addr2 < addr1)
2704 swap(addr1, addr2);
2705
2706 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2707 if (!hash)
2708 hash = 1;
2709
2710done:
2583 }
2584 }
2585
2586 /* get a consistent hash (same value on both flow directions) */
2587 if (addr2 < addr1)
2588 swap(addr1, addr2);
2589
2590 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2591 if (!hash)
2592 hash = 1;
2593
2594done:
2711 skb->rxhash = hash;
2595 return hash;
2712}
2713EXPORT_SYMBOL(__skb_get_rxhash);
2714
2715#ifdef CONFIG_RPS
2716
2717/* One global table that all flow-based protocols share. */
2718struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2719EXPORT_SYMBOL(rps_sock_flow_table);
2720
2721static struct rps_dev_flow *
2722set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2723 struct rps_dev_flow *rflow, u16 next_cpu)
2724{
2596}
2597EXPORT_SYMBOL(__skb_get_rxhash);
2598
2599#ifdef CONFIG_RPS
2600
2601/* One global table that all flow-based protocols share. */
2602struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2603EXPORT_SYMBOL(rps_sock_flow_table);
2604
2605static struct rps_dev_flow *
2606set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2607 struct rps_dev_flow *rflow, u16 next_cpu)
2608{
2725 if (next_cpu != RPS_NO_CPU) {
2609 u16 tcpu;
2610
2611 tcpu = rflow->cpu = next_cpu;
2612 if (tcpu != RPS_NO_CPU) {
2726#ifdef CONFIG_RFS_ACCEL
2727 struct netdev_rx_queue *rxqueue;
2728 struct rps_dev_flow_table *flow_table;
2729 struct rps_dev_flow *old_rflow;
2730 u32 flow_id;
2731 u16 rxq_index;
2732 int rc;
2733

--- 11 unchanged lines hidden (view full) ---

2745 goto out;
2746 flow_id = skb->rxhash & flow_table->mask;
2747 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2748 rxq_index, flow_id);
2749 if (rc < 0)
2750 goto out;
2751 old_rflow = rflow;
2752 rflow = &flow_table->flows[flow_id];
2613#ifdef CONFIG_RFS_ACCEL
2614 struct netdev_rx_queue *rxqueue;
2615 struct rps_dev_flow_table *flow_table;
2616 struct rps_dev_flow *old_rflow;
2617 u32 flow_id;
2618 u16 rxq_index;
2619 int rc;
2620

--- 11 unchanged lines hidden (view full) ---

2632 goto out;
2633 flow_id = skb->rxhash & flow_table->mask;
2634 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2635 rxq_index, flow_id);
2636 if (rc < 0)
2637 goto out;
2638 old_rflow = rflow;
2639 rflow = &flow_table->flows[flow_id];
2640 rflow->cpu = next_cpu;
2753 rflow->filter = rc;
2754 if (old_rflow->filter == rflow->filter)
2755 old_rflow->filter = RPS_NO_FILTER;
2756 out:
2757#endif
2758 rflow->last_qtail =
2641 rflow->filter = rc;
2642 if (old_rflow->filter == rflow->filter)
2643 old_rflow->filter = RPS_NO_FILTER;
2644 out:
2645#endif
2646 rflow->last_qtail =
2759 per_cpu(softnet_data, next_cpu).input_queue_head;
2647 per_cpu(softnet_data, tcpu).input_queue_head;
2760 }
2761
2648 }
2649
2762 rflow->cpu = next_cpu;
2763 return rflow;
2764}
2765
2766/*
2767 * get_rps_cpu is called from netif_receive_skb and returns the target
2768 * CPU from the RPS map of the receiving queue for a given skb.
2769 * rcu_read_lock must be held on entry.
2770 */

--- 18 unchanged lines hidden (view full) ---

2789 }
2790 rxqueue = dev->_rx + index;
2791 } else
2792 rxqueue = dev->_rx;
2793
2794 map = rcu_dereference(rxqueue->rps_map);
2795 if (map) {
2796 if (map->len == 1 &&
2650 return rflow;
2651}
2652
2653/*
2654 * get_rps_cpu is called from netif_receive_skb and returns the target
2655 * CPU from the RPS map of the receiving queue for a given skb.
2656 * rcu_read_lock must be held on entry.
2657 */

--- 18 unchanged lines hidden (view full) ---

2676 }
2677 rxqueue = dev->_rx + index;
2678 } else
2679 rxqueue = dev->_rx;
2680
2681 map = rcu_dereference(rxqueue->rps_map);
2682 if (map) {
2683 if (map->len == 1 &&
2797 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2684 !rcu_dereference_raw(rxqueue->rps_flow_table)) {
2798 tcpu = map->cpus[0];
2799 if (cpu_online(tcpu))
2800 cpu = tcpu;
2801 goto done;
2802 }
2685 tcpu = map->cpus[0];
2686 if (cpu_online(tcpu))
2687 cpu = tcpu;
2688 goto done;
2689 }
2803 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2690 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
2804 goto done;
2805 }
2806
2807 skb_reset_network_header(skb);
2808 if (!skb_get_rxhash(skb))
2809 goto done;
2810
2811 flow_table = rcu_dereference(rxqueue->rps_flow_table);

--- 398 unchanged lines hidden (view full) ---

3210 * Unregister a receive hander from a device.
3211 *
3212 * The caller must hold the rtnl_mutex.
3213 */
3214void netdev_rx_handler_unregister(struct net_device *dev)
3215{
3216
3217 ASSERT_RTNL();
2691 goto done;
2692 }
2693
2694 skb_reset_network_header(skb);
2695 if (!skb_get_rxhash(skb))
2696 goto done;
2697
2698 flow_table = rcu_dereference(rxqueue->rps_flow_table);

--- 398 unchanged lines hidden (view full) ---

3097 * Unregister a receive hander from a device.
3098 *
3099 * The caller must hold the rtnl_mutex.
3100 */
3101void netdev_rx_handler_unregister(struct net_device *dev)
3102{
3103
3104 ASSERT_RTNL();
3218 RCU_INIT_POINTER(dev->rx_handler, NULL);
3219 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3105 rcu_assign_pointer(dev->rx_handler, NULL);
3106 rcu_assign_pointer(dev->rx_handler_data, NULL);
3220}
3221EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3222
3223static int __netif_receive_skb(struct sk_buff *skb)
3224{
3225 struct packet_type *ptype, *pt_prev;
3226 rx_handler_func_t *rx_handler;
3227 struct net_device *orig_dev;

--- 50 unchanged lines hidden (view full) ---

3278
3279#ifdef CONFIG_NET_CLS_ACT
3280 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3281 if (!skb)
3282 goto out;
3283ncls:
3284#endif
3285
3107}
3108EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3109
3110static int __netif_receive_skb(struct sk_buff *skb)
3111{
3112 struct packet_type *ptype, *pt_prev;
3113 rx_handler_func_t *rx_handler;
3114 struct net_device *orig_dev;

--- 50 unchanged lines hidden (view full) ---

3165
3166#ifdef CONFIG_NET_CLS_ACT
3167 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3168 if (!skb)
3169 goto out;
3170ncls:
3171#endif
3172
3286 if (vlan_tx_tag_present(skb)) {
3287 if (pt_prev) {
3288 ret = deliver_skb(skb, pt_prev, orig_dev);
3289 pt_prev = NULL;
3290 }
3291 if (vlan_do_receive(&skb))
3292 goto another_round;
3293 else if (unlikely(!skb))
3294 goto out;
3295 }
3296
3297 rx_handler = rcu_dereference(skb->dev->rx_handler);
3298 if (rx_handler) {
3299 if (pt_prev) {
3300 ret = deliver_skb(skb, pt_prev, orig_dev);
3301 pt_prev = NULL;
3302 }
3303 switch (rx_handler(&skb)) {
3304 case RX_HANDLER_CONSUMED:

--- 4 unchanged lines hidden (view full) ---

3309 deliver_exact = true;
3310 case RX_HANDLER_PASS:
3311 break;
3312 default:
3313 BUG();
3314 }
3315 }
3316
3173 rx_handler = rcu_dereference(skb->dev->rx_handler);
3174 if (rx_handler) {
3175 if (pt_prev) {
3176 ret = deliver_skb(skb, pt_prev, orig_dev);
3177 pt_prev = NULL;
3178 }
3179 switch (rx_handler(&skb)) {
3180 case RX_HANDLER_CONSUMED:

--- 4 unchanged lines hidden (view full) ---

3185 deliver_exact = true;
3186 case RX_HANDLER_PASS:
3187 break;
3188 default:
3189 BUG();
3190 }
3191 }
3192
3193 if (vlan_tx_tag_present(skb)) {
3194 if (pt_prev) {
3195 ret = deliver_skb(skb, pt_prev, orig_dev);
3196 pt_prev = NULL;
3197 }
3198 if (vlan_do_receive(&skb)) {
3199 ret = __netif_receive_skb(skb);
3200 goto out;
3201 } else if (unlikely(!skb))
3202 goto out;
3203 }
3204
3317 /* deliver only exact match when indicated */
3318 null_or_dev = deliver_exact ? skb->dev : NULL;
3319
3320 type = skb->protocol;
3321 list_for_each_entry_rcu(ptype,
3322 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3323 if (ptype->type == type &&
3324 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||

--- 211 unchanged lines hidden (view full) ---

3536 BUG_ON(skb->end - skb->tail < grow);
3537
3538 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3539
3540 skb->tail += grow;
3541 skb->data_len -= grow;
3542
3543 skb_shinfo(skb)->frags[0].page_offset += grow;
3205 /* deliver only exact match when indicated */
3206 null_or_dev = deliver_exact ? skb->dev : NULL;
3207
3208 type = skb->protocol;
3209 list_for_each_entry_rcu(ptype,
3210 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3211 if (ptype->type == type &&
3212 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||

--- 211 unchanged lines hidden (view full) ---

3424 BUG_ON(skb->end - skb->tail < grow);
3425
3426 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3427
3428 skb->tail += grow;
3429 skb->data_len -= grow;
3430
3431 skb_shinfo(skb)->frags[0].page_offset += grow;
3544 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3432 skb_shinfo(skb)->frags[0].size -= grow;
3545
3433
3546 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3547 skb_frag_unref(skb, 0);
3434 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3435 put_page(skb_shinfo(skb)->frags[0].page);
3548 memmove(skb_shinfo(skb)->frags,
3549 skb_shinfo(skb)->frags + 1,
3550 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3551 }
3552 }
3553
3554ok:
3555 return ret;

--- 47 unchanged lines hidden (view full) ---

3603
3604void skb_gro_reset_offset(struct sk_buff *skb)
3605{
3606 NAPI_GRO_CB(skb)->data_offset = 0;
3607 NAPI_GRO_CB(skb)->frag0 = NULL;
3608 NAPI_GRO_CB(skb)->frag0_len = 0;
3609
3610 if (skb->mac_header == skb->tail &&
3436 memmove(skb_shinfo(skb)->frags,
3437 skb_shinfo(skb)->frags + 1,
3438 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3439 }
3440 }
3441
3442ok:
3443 return ret;

--- 47 unchanged lines hidden (view full) ---

3491
3492void skb_gro_reset_offset(struct sk_buff *skb)
3493{
3494 NAPI_GRO_CB(skb)->data_offset = 0;
3495 NAPI_GRO_CB(skb)->frag0 = NULL;
3496 NAPI_GRO_CB(skb)->frag0_len = 0;
3497
3498 if (skb->mac_header == skb->tail &&
3611 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3499 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3612 NAPI_GRO_CB(skb)->frag0 =
3500 NAPI_GRO_CB(skb)->frag0 =
3613 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3614 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3501 page_address(skb_shinfo(skb)->frags[0].page) +
3502 skb_shinfo(skb)->frags[0].page_offset;
3503 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3615 }
3616}
3617EXPORT_SYMBOL(skb_gro_reset_offset);
3618
3619gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3620{
3621 skb_gro_reset_offset(skb);
3622

--- 465 unchanged lines hidden (view full) ---

4088
4089 /*
4090 * Both BSD and Solaris return 0 here, so we do too.
4091 */
4092 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4093}
4094
4095#ifdef CONFIG_PROC_FS
3504 }
3505}
3506EXPORT_SYMBOL(skb_gro_reset_offset);
3507
3508gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3509{
3510 skb_gro_reset_offset(skb);
3511

--- 465 unchanged lines hidden (view full) ---

3977
3978 /*
3979 * Both BSD and Solaris return 0 here, so we do too.
3980 */
3981 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3982}
3983
3984#ifdef CONFIG_PROC_FS
4096
4097#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
4098
4099struct dev_iter_state {
4100 struct seq_net_private p;
4101 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4102};
4103
4104#define get_bucket(x) ((x) >> BUCKET_SPACE)
4105#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4106#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4107
4108static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
4109{
4110 struct dev_iter_state *state = seq->private;
4111 struct net *net = seq_file_net(seq);
4112 struct net_device *dev;
4113 struct hlist_node *p;
4114 struct hlist_head *h;
4115 unsigned int count, bucket, offset;
4116
4117 bucket = get_bucket(state->pos);
4118 offset = get_offset(state->pos);
4119 h = &net->dev_name_head[bucket];
4120 count = 0;
4121 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4122 if (count++ == offset) {
4123 state->pos = set_bucket_offset(bucket, count);
4124 return dev;
4125 }
4126 }
4127
4128 return NULL;
4129}
4130
4131static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4132{
4133 struct dev_iter_state *state = seq->private;
4134 struct net_device *dev;
4135 unsigned int bucket;
4136
4137 bucket = get_bucket(state->pos);
4138 do {
4139 dev = dev_from_same_bucket(seq);
4140 if (dev)
4141 return dev;
4142
4143 bucket++;
4144 state->pos = set_bucket_offset(bucket, 0);
4145 } while (bucket < NETDEV_HASHENTRIES);
4146
4147 return NULL;
4148}
4149
4150/*
4151 * This is invoked by the /proc filesystem handler to display a device
4152 * in detail.
4153 */
4154void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4155 __acquires(RCU)
4156{
3985/*
3986 * This is invoked by the /proc filesystem handler to display a device
3987 * in detail.
3988 */
3989void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3990 __acquires(RCU)
3991{
4157 struct dev_iter_state *state = seq->private;
3992 struct net *net = seq_file_net(seq);
3993 loff_t off;
3994 struct net_device *dev;
4158
4159 rcu_read_lock();
4160 if (!*pos)
4161 return SEQ_START_TOKEN;
4162
3995
3996 rcu_read_lock();
3997 if (!*pos)
3998 return SEQ_START_TOKEN;
3999
4163 /* check for end of the hash */
4164 if (state->pos == 0 && *pos > 1)
4165 return NULL;
4000 off = 1;
4001 for_each_netdev_rcu(net, dev)
4002 if (off++ == *pos)
4003 return dev;
4166
4004
4167 return dev_from_new_bucket(seq);
4005 return NULL;
4168}
4169
4170void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4171{
4006}
4007
4008void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4009{
4172 struct net_device *dev;
4010 struct net_device *dev = v;
4173
4011
4174 ++*pos;
4175
4176 if (v == SEQ_START_TOKEN)
4012 if (v == SEQ_START_TOKEN)
4177 return dev_from_new_bucket(seq);
4013 dev = first_net_device_rcu(seq_file_net(seq));
4014 else
4015 dev = next_net_device_rcu(dev);
4178
4016
4179 dev = dev_from_same_bucket(seq);
4180 if (dev)
4181 return dev;
4182
4183 return dev_from_new_bucket(seq);
4017 ++*pos;
4018 return dev;
4184}
4185
4186void dev_seq_stop(struct seq_file *seq, void *v)
4187 __releases(RCU)
4188{
4189 rcu_read_unlock();
4190}
4191

--- 82 unchanged lines hidden (view full) ---

4274 .next = dev_seq_next,
4275 .stop = dev_seq_stop,
4276 .show = dev_seq_show,
4277};
4278
4279static int dev_seq_open(struct inode *inode, struct file *file)
4280{
4281 return seq_open_net(inode, file, &dev_seq_ops,
4019}
4020
4021void dev_seq_stop(struct seq_file *seq, void *v)
4022 __releases(RCU)
4023{
4024 rcu_read_unlock();
4025}
4026

--- 82 unchanged lines hidden (view full) ---

4109 .next = dev_seq_next,
4110 .stop = dev_seq_stop,
4111 .show = dev_seq_show,
4112};
4113
4114static int dev_seq_open(struct inode *inode, struct file *file)
4115{
4116 return seq_open_net(inode, file, &dev_seq_ops,
4282 sizeof(struct dev_iter_state));
4117 sizeof(struct seq_net_private));
4283}
4284
4285static const struct file_operations dev_seq_fops = {
4286 .owner = THIS_MODULE,
4287 .open = dev_seq_open,
4288 .read = seq_read,
4289 .llseek = seq_lseek,
4290 .release = seq_release_net,

--- 366 unchanged lines hidden (view full) ---

4657
4658 /* dev_open will call this function so the list will stay sane. */
4659 if (!(dev->flags&IFF_UP))
4660 return;
4661
4662 if (!netif_device_present(dev))
4663 return;
4664
4118}
4119
4120static const struct file_operations dev_seq_fops = {
4121 .owner = THIS_MODULE,
4122 .open = dev_seq_open,
4123 .read = seq_read,
4124 .llseek = seq_lseek,
4125 .release = seq_release_net,

--- 366 unchanged lines hidden (view full) ---

4492
4493 /* dev_open will call this function so the list will stay sane. */
4494 if (!(dev->flags&IFF_UP))
4495 return;
4496
4497 if (!netif_device_present(dev))
4498 return;
4499
4665 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4500 if (ops->ndo_set_rx_mode)
4501 ops->ndo_set_rx_mode(dev);
4502 else {
4666 /* Unicast addresses changes may only happen under the rtnl,
4667 * therefore calling __dev_set_promiscuity here is safe.
4668 */
4669 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4670 __dev_set_promiscuity(dev, 1);
4671 dev->uc_promisc = true;
4672 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4673 __dev_set_promiscuity(dev, -1);
4674 dev->uc_promisc = false;
4675 }
4503 /* Unicast addresses changes may only happen under the rtnl,
4504 * therefore calling __dev_set_promiscuity here is safe.
4505 */
4506 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4507 __dev_set_promiscuity(dev, 1);
4508 dev->uc_promisc = true;
4509 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4510 __dev_set_promiscuity(dev, -1);
4511 dev->uc_promisc = false;
4512 }
4676 }
4677
4513
4678 if (ops->ndo_set_rx_mode)
4679 ops->ndo_set_rx_mode(dev);
4514 if (ops->ndo_set_multicast_list)
4515 ops->ndo_set_multicast_list(dev);
4516 }
4680}
4681
4682void dev_set_rx_mode(struct net_device *dev)
4683{
4684 netif_addr_lock_bh(dev);
4685 __dev_set_rx_mode(dev);
4686 netif_addr_unlock_bh(dev);
4687}
4688
4689/**
4517}
4518
4519void dev_set_rx_mode(struct net_device *dev)
4520{
4521 netif_addr_lock_bh(dev);
4522 __dev_set_rx_mode(dev);
4523 netif_addr_unlock_bh(dev);
4524}
4525
4526/**
4527 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4528 * @dev: device
4529 * @cmd: memory area for ethtool_ops::get_settings() result
4530 *
4531 * The cmd arg is initialized properly (cleared and
4532 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4533 *
4534 * Return device's ethtool_ops::get_settings() result value or
4535 * -EOPNOTSUPP when device doesn't expose
4536 * ethtool_ops::get_settings() operation.
4537 */
4538int dev_ethtool_get_settings(struct net_device *dev,
4539 struct ethtool_cmd *cmd)
4540{
4541 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4542 return -EOPNOTSUPP;
4543
4544 memset(cmd, 0, sizeof(struct ethtool_cmd));
4545 cmd->cmd = ETHTOOL_GSET;
4546 return dev->ethtool_ops->get_settings(dev, cmd);
4547}
4548EXPORT_SYMBOL(dev_ethtool_get_settings);
4549
4550/**
4690 * dev_get_flags - get flags reported to userspace
4691 * @dev: device
4692 *
4693 * Get the combination of flag bits exported through APIs to userspace.
4694 */
4695unsigned dev_get_flags(const struct net_device *dev)
4696{
4697 unsigned flags;

--- 299 unchanged lines hidden (view full) ---

4997 if (ops->ndo_set_config) {
4998 if (!netif_device_present(dev))
4999 return -ENODEV;
5000 return ops->ndo_set_config(dev, &ifr->ifr_map);
5001 }
5002 return -EOPNOTSUPP;
5003
5004 case SIOCADDMULTI:
4551 * dev_get_flags - get flags reported to userspace
4552 * @dev: device
4553 *
4554 * Get the combination of flag bits exported through APIs to userspace.
4555 */
4556unsigned dev_get_flags(const struct net_device *dev)
4557{
4558 unsigned flags;

--- 299 unchanged lines hidden (view full) ---

4858 if (ops->ndo_set_config) {
4859 if (!netif_device_present(dev))
4860 return -ENODEV;
4861 return ops->ndo_set_config(dev, &ifr->ifr_map);
4862 }
4863 return -EOPNOTSUPP;
4864
4865 case SIOCADDMULTI:
5005 if (!ops->ndo_set_rx_mode ||
4866 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
5006 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5007 return -EINVAL;
5008 if (!netif_device_present(dev))
5009 return -ENODEV;
5010 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5011
5012 case SIOCDELMULTI:
4867 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4868 return -EINVAL;
4869 if (!netif_device_present(dev))
4870 return -ENODEV;
4871 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4872
4873 case SIOCDELMULTI:
5013 if (!ops->ndo_set_rx_mode ||
4874 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
5014 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5015 return -EINVAL;
5016 if (!netif_device_present(dev))
5017 return -ENODEV;
5018 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5019
5020 case SIOCSIFTXQLEN:
5021 if (ifr->ifr_qlen < 0)
5022 return -EINVAL;
5023 dev->tx_queue_len = ifr->ifr_qlen;
5024 return 0;
5025
5026 case SIOCSIFNAME:
5027 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5028 return dev_change_name(dev, ifr->ifr_newname);
5029
4875 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4876 return -EINVAL;
4877 if (!netif_device_present(dev))
4878 return -ENODEV;
4879 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4880
4881 case SIOCSIFTXQLEN:
4882 if (ifr->ifr_qlen < 0)
4883 return -EINVAL;
4884 dev->tx_queue_len = ifr->ifr_qlen;
4885 return 0;
4886
4887 case SIOCSIFNAME:
4888 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4889 return dev_change_name(dev, ifr->ifr_newname);
4890
5030 case SIOCSHWTSTAMP:
5031 err = net_hwtstamp_validate(ifr);
5032 if (err)
5033 return err;
5034 /* fall through */
5035
5036 /*
5037 * Unknown or private ioctl
5038 */
5039 default:
5040 if ((cmd >= SIOCDEVPRIVATE &&
5041 cmd <= SIOCDEVPRIVATE + 15) ||
5042 cmd == SIOCBONDENSLAVE ||
5043 cmd == SIOCBONDRELEASE ||

--- 298 unchanged lines hidden (view full) ---

5342 /* Remove entries from kobject tree */
5343 netdev_unregister_kobject(dev);
5344 }
5345
5346 /* Process any work delayed until the end of the batch */
5347 dev = list_first_entry(head, struct net_device, unreg_list);
5348 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5349
4891 /*
4892 * Unknown or private ioctl
4893 */
4894 default:
4895 if ((cmd >= SIOCDEVPRIVATE &&
4896 cmd <= SIOCDEVPRIVATE + 15) ||
4897 cmd == SIOCBONDENSLAVE ||
4898 cmd == SIOCBONDRELEASE ||

--- 298 unchanged lines hidden (view full) ---

5197 /* Remove entries from kobject tree */
5198 netdev_unregister_kobject(dev);
5199 }
5200
5201 /* Process any work delayed until the end of the batch */
5202 dev = list_first_entry(head, struct net_device, unreg_list);
5203 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5204
5350 synchronize_net();
5205 rcu_barrier();
5351
5352 list_for_each_entry(dev, head, unreg_list)
5353 dev_put(dev);
5354}
5355
5356static void rollback_registered(struct net_device *dev)
5357{
5358 LIST_HEAD(single);

--- 496 unchanged lines hidden (view full) ---

5855{
5856 struct list_head list;
5857
5858 /* Snapshot list, allow later requests */
5859 list_replace_init(&net_todo_list, &list);
5860
5861 __rtnl_unlock();
5862
5206
5207 list_for_each_entry(dev, head, unreg_list)
5208 dev_put(dev);
5209}
5210
5211static void rollback_registered(struct net_device *dev)
5212{
5213 LIST_HEAD(single);

--- 496 unchanged lines hidden (view full) ---

5710{
5711 struct list_head list;
5712
5713 /* Snapshot list, allow later requests */
5714 list_replace_init(&net_todo_list, &list);
5715
5716 __rtnl_unlock();
5717
5863 /* Wait for rcu callbacks to finish before attempting to drain
5864 * the device list. This usually avoids a 250ms wait.
5865 */
5866 if (!list_empty(&list))
5867 rcu_barrier();
5868
5869 while (!list_empty(&list)) {
5870 struct net_device *dev
5871 = list_first_entry(&list, struct net_device, todo_list);
5872 list_del(&dev->todo_list);
5873
5874 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5875 printk(KERN_ERR "network todo '%s' but state %d\n",
5876 dev->name, dev->reg_state);

--- 4 unchanged lines hidden (view full) ---

5881 dev->reg_state = NETREG_UNREGISTERED;
5882
5883 on_each_cpu(flush_backlog, dev, 1);
5884
5885 netdev_wait_allrefs(dev);
5886
5887 /* paranoia */
5888 BUG_ON(netdev_refcnt_read(dev));
5718 while (!list_empty(&list)) {
5719 struct net_device *dev
5720 = list_first_entry(&list, struct net_device, todo_list);
5721 list_del(&dev->todo_list);
5722
5723 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5724 printk(KERN_ERR "network todo '%s' but state %d\n",
5725 dev->name, dev->reg_state);

--- 4 unchanged lines hidden (view full) ---

5730 dev->reg_state = NETREG_UNREGISTERED;
5731
5732 on_each_cpu(flush_backlog, dev, 1);
5733
5734 netdev_wait_allrefs(dev);
5735
5736 /* paranoia */
5737 BUG_ON(netdev_refcnt_read(dev));
5889 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5890 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5738 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5739 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
5891 WARN_ON(dev->dn_ptr);
5892
5893 if (dev->destructor)
5894 dev->destructor(dev);
5895
5896 /* Free network device */
5897 kobject_put(&dev->dev.kobj);
5898 }

--- 187 unchanged lines hidden (view full) ---

6086
6087 release_net(dev_net(dev));
6088
6089 kfree(dev->_tx);
6090#ifdef CONFIG_RPS
6091 kfree(dev->_rx);
6092#endif
6093
5740 WARN_ON(dev->dn_ptr);
5741
5742 if (dev->destructor)
5743 dev->destructor(dev);
5744
5745 /* Free network device */
5746 kobject_put(&dev->dev.kobj);
5747 }

--- 187 unchanged lines hidden (view full) ---

5935
5936 release_net(dev_net(dev));
5937
5938 kfree(dev->_tx);
5939#ifdef CONFIG_RPS
5940 kfree(dev->_rx);
5941#endif
5942
6094 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5943 kfree(rcu_dereference_raw(dev->ingress_queue));
6095
6096 /* Flush device addresses */
6097 dev_addr_flush(dev);
6098
6099 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6100 netif_napi_del(p);
6101
6102 free_percpu(dev->pcpu_refcnt);

--- 158 unchanged lines hidden (view full) ---

6261 this device. They should clean all the things.
6262
6263 Note that dev->reg_state stays at NETREG_REGISTERED.
6264 This is wanted because this way 8021q and macvlan know
6265 the device is just moving and can keep their slaves up.
6266 */
6267 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6268 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5944
5945 /* Flush device addresses */
5946 dev_addr_flush(dev);
5947
5948 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5949 netif_napi_del(p);
5950
5951 free_percpu(dev->pcpu_refcnt);

--- 158 unchanged lines hidden (view full) ---

6110 this device. They should clean all the things.
6111
6112 Note that dev->reg_state stays at NETREG_REGISTERED.
6113 This is wanted because this way 8021q and macvlan know
6114 the device is just moving and can keep their slaves up.
6115 */
6116 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6117 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6118 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6269
6270 /*
6271 * Flush the unicast and multicast chains
6272 */
6273 dev_uc_flush(dev);
6274 dev_mc_flush(dev);
6275
6276 /* Actually switch the network namespace */

--- 405 unchanged lines hidden ---
6119
6120 /*
6121 * Flush the unicast and multicast chains
6122 */
6123 dev_uc_flush(dev);
6124 dev_mc_flush(dev);
6125
6126 /* Actually switch the network namespace */

--- 405 unchanged lines hidden ---