dev.c (76771c938e95ce4106c6e8092f4f614d4d1e0ecc) dev.c (02637fce3e0103ba086b9c33b6d529e69460e4b6)
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 104 unchanged lines hidden (view full) ---

113#include <linux/audit.h>
114#include <linux/dmaengine.h>
115#include <linux/err.h>
116#include <linux/ctype.h>
117#include <linux/if_arp.h>
118#include <linux/if_vlan.h>
119#include <linux/ip.h>
120#include <net/ip.h>
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 104 unchanged lines hidden (view full) ---

113#include <linux/audit.h>
114#include <linux/dmaengine.h>
115#include <linux/err.h>
116#include <linux/ctype.h>
117#include <linux/if_arp.h>
118#include <linux/if_vlan.h>
119#include <linux/ip.h>
120#include <net/ip.h>
121#include <net/mpls.h>
121#include <linux/ipv6.h>
122#include <linux/in.h>
123#include <linux/jhash.h>
124#include <linux/random.h>
125#include <trace/events/napi.h>
126#include <trace/events/net.h>
127#include <trace/events/skb.h>
128#include <linux/pci.h>
129#include <linux/inetdevice.h>
130#include <linux/cpu_rmap.h>
131#include <linux/static_key.h>
132#include <linux/hashtable.h>
133#include <linux/vmalloc.h>
134#include <linux/if_macvlan.h>
135#include <linux/errqueue.h>
122#include <linux/ipv6.h>
123#include <linux/in.h>
124#include <linux/jhash.h>
125#include <linux/random.h>
126#include <trace/events/napi.h>
127#include <trace/events/net.h>
128#include <trace/events/skb.h>
129#include <linux/pci.h>
130#include <linux/inetdevice.h>
131#include <linux/cpu_rmap.h>
132#include <linux/static_key.h>
133#include <linux/hashtable.h>
134#include <linux/vmalloc.h>
135#include <linux/if_macvlan.h>
136#include <linux/errqueue.h>
137#include <linux/hrtimer.h>
136
137#include "net-sysfs.h"
138
139/* Instead of increasing this, you should create a hash table. */
140#define MAX_GRO_SKBS 8
141
142/* This should be increased if a protocol with a bigger head is added. */
143#define GRO_MAX_HEAD (MAX_HEADER + 128)

--- 1286 unchanged lines hidden (view full) ---

1430 * @dev: device
1431 *
1432 * Disable Large Receive Offload (LRO) on a net device. Must be
1433 * called under RTNL. This is needed if received packets may be
1434 * forwarded to another interface.
1435 */
1436void dev_disable_lro(struct net_device *dev)
1437{
138
139#include "net-sysfs.h"
140
141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)

--- 1286 unchanged lines hidden (view full) ---

1432 * @dev: device
1433 *
1434 * Disable Large Receive Offload (LRO) on a net device. Must be
1435 * called under RTNL. This is needed if received packets may be
1436 * forwarded to another interface.
1437 */
1438void dev_disable_lro(struct net_device *dev)
1439{
1438 /*
1439 * If we're trying to disable lro on a vlan device
1440 * use the underlying physical device instead
1441 */
1442 if (is_vlan_dev(dev))
1443 dev = vlan_dev_real_dev(dev);
1440 struct net_device *lower_dev;
1441 struct list_head *iter;
1444
1442
1445 /* the same for macvlan devices */
1446 if (netif_is_macvlan(dev))
1447 dev = macvlan_dev_real_dev(dev);
1448
1449 dev->wanted_features &= ~NETIF_F_LRO;
1450 netdev_update_features(dev);
1451
1452 if (unlikely(dev->features & NETIF_F_LRO))
1453 netdev_WARN(dev, "failed to disable LRO!\n");
1443 dev->wanted_features &= ~NETIF_F_LRO;
1444 netdev_update_features(dev);
1445
1446 if (unlikely(dev->features & NETIF_F_LRO))
1447 netdev_WARN(dev, "failed to disable LRO!\n");
1448
1449 netdev_for_each_lower_dev(dev, lower_dev, iter)
1450 dev_disable_lro(lower_dev);
1454}
1455EXPORT_SYMBOL(dev_disable_lro);
1456
1457static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1458 struct net_device *dev)
1459{
1460 struct netdev_notifier_info info;
1461

--- 1063 unchanged lines hidden (view full) ---

2525/* If MPLS offload request, verify we are testing hardware MPLS features
2526 * instead of standard features for the netdev.
2527 */
2528#ifdef CONFIG_NET_MPLS_GSO
2529static netdev_features_t net_mpls_features(struct sk_buff *skb,
2530 netdev_features_t features,
2531 __be16 type)
2532{
1451}
1452EXPORT_SYMBOL(dev_disable_lro);
1453
1454static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1455 struct net_device *dev)
1456{
1457 struct netdev_notifier_info info;
1458

--- 1063 unchanged lines hidden (view full) ---

2522/* If MPLS offload request, verify we are testing hardware MPLS features
2523 * instead of standard features for the netdev.
2524 */
2525#ifdef CONFIG_NET_MPLS_GSO
2526static netdev_features_t net_mpls_features(struct sk_buff *skb,
2527 netdev_features_t features,
2528 __be16 type)
2529{
2533 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2530 if (eth_p_mpls(type))
2534 features &= skb->dev->mpls_features;
2535
2536 return features;
2537}
2538#else
2539static netdev_features_t net_mpls_features(struct sk_buff *skb,
2540 netdev_features_t features,
2541 __be16 type)

--- 100 unchanged lines hidden (view full) ---

2642 *ret = rc;
2643 return skb;
2644}
2645
2646static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2647 netdev_features_t features)
2648{
2649 if (vlan_tx_tag_present(skb) &&
2531 features &= skb->dev->mpls_features;
2532
2533 return features;
2534}
2535#else
2536static netdev_features_t net_mpls_features(struct sk_buff *skb,
2537 netdev_features_t features,
2538 __be16 type)

--- 100 unchanged lines hidden (view full) ---

2639 *ret = rc;
2640 return skb;
2641}
2642
2643static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2644 netdev_features_t features)
2645{
2646 if (vlan_tx_tag_present(skb) &&
2650 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2651 skb = __vlan_put_tag(skb, skb->vlan_proto,
2652 vlan_tx_tag_get(skb));
2653 if (skb)
2654 skb->vlan_tci = 0;
2655 }
2647 !vlan_hw_offload_capable(features, skb->vlan_proto))
2648 skb = __vlan_hwaccel_push_inside(skb);
2656 return skb;
2657}
2658
2659static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2660{
2661 netdev_features_t features;
2662
2663 if (skb->next)

--- 1647 unchanged lines hidden (view full) ---

4311 &remsd->csd);
4312 remsd = next;
4313 }
4314 } else
4315#endif
4316 local_irq_enable();
4317}
4318
2649 return skb;
2650}
2651
2652static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2653{
2654 netdev_features_t features;
2655
2656 if (skb->next)

--- 1647 unchanged lines hidden (view full) ---

4304 &remsd->csd);
4305 remsd = next;
4306 }
4307 } else
4308#endif
4309 local_irq_enable();
4310}
4311
4312static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4313{
4314#ifdef CONFIG_RPS
4315 return sd->rps_ipi_list != NULL;
4316#else
4317 return false;
4318#endif
4319}
4320
4319static int process_backlog(struct napi_struct *napi, int quota)
4320{
4321 int work = 0;
4322 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4323
4321static int process_backlog(struct napi_struct *napi, int quota)
4322{
4323 int work = 0;
4324 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4325
4324#ifdef CONFIG_RPS
4325 /* Check if we have pending ipi, its better to send them now,
4326 * not waiting net_rx_action() end.
4327 */
4326 /* Check if we have pending ipi, its better to send them now,
4327 * not waiting net_rx_action() end.
4328 */
4328 if (sd->rps_ipi_list) {
4329 if (sd_has_rps_ipi_waiting(sd)) {
4329 local_irq_disable();
4330 net_rps_action_and_irq_enable(sd);
4331 }
4330 local_irq_disable();
4331 net_rps_action_and_irq_enable(sd);
4332 }
4332#endif
4333
4333 napi->weight = weight_p;
4334 local_irq_disable();
4335 while (1) {
4336 struct sk_buff *skb;
4337
4338 while ((skb = __skb_dequeue(&sd->process_queue))) {
4339 local_irq_enable();
4340 __netif_receive_skb(skb);

--- 10 unchanged lines hidden (view full) ---

4351 /*
4352 * Inline a custom version of __napi_complete().
4353 * only current cpu owns and manipulates this napi,
4354 * and NAPI_STATE_SCHED is the only possible flag set
4355 * on backlog.
4356 * We can use a plain write instead of clear_bit(),
4357 * and we dont need an smp_mb() memory barrier.
4358 */
4334 napi->weight = weight_p;
4335 local_irq_disable();
4336 while (1) {
4337 struct sk_buff *skb;
4338
4339 while ((skb = __skb_dequeue(&sd->process_queue))) {
4340 local_irq_enable();
4341 __netif_receive_skb(skb);

--- 10 unchanged lines hidden (view full) ---

4352 /*
4353 * Inline a custom version of __napi_complete().
4354 * only current cpu owns and manipulates this napi,
4355 * and NAPI_STATE_SCHED is the only possible flag set
4356 * on backlog.
4357 * We can use a plain write instead of clear_bit(),
4358 * and we dont need an smp_mb() memory barrier.
4359 */
4359 list_del(&napi->poll_list);
4360 napi->state = 0;
4361 rps_unlock(sd);
4362
4363 break;
4364 }
4365
4366 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4367 &sd->process_queue);
4368 rps_unlock(sd);
4369 }
4370 local_irq_enable();
4371
4372 return work;
4373}
4374
4375/**
4376 * __napi_schedule - schedule for receive
4377 * @n: entry to schedule
4378 *
4360 napi->state = 0;
4361 rps_unlock(sd);
4362
4363 break;
4364 }
4365
4366 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4367 &sd->process_queue);
4368 rps_unlock(sd);
4369 }
4370 local_irq_enable();
4371
4372 return work;
4373}
4374
4375/**
4376 * __napi_schedule - schedule for receive
4377 * @n: entry to schedule
4378 *
4379 * The entry's receive function will be scheduled to run
4379 * The entry's receive function will be scheduled to run.
4380 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4380 */
4381void __napi_schedule(struct napi_struct *n)
4382{
4383 unsigned long flags;
4384
4385 local_irq_save(flags);
4386 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4387 local_irq_restore(flags);
4388}
4389EXPORT_SYMBOL(__napi_schedule);
4390
4381 */
4382void __napi_schedule(struct napi_struct *n)
4383{
4384 unsigned long flags;
4385
4386 local_irq_save(flags);
4387 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4388 local_irq_restore(flags);
4389}
4390EXPORT_SYMBOL(__napi_schedule);
4391
4392/**
4393 * __napi_schedule_irqoff - schedule for receive
4394 * @n: entry to schedule
4395 *
4396 * Variant of __napi_schedule() assuming hard irqs are masked
4397 */
4398void __napi_schedule_irqoff(struct napi_struct *n)
4399{
4400 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4401}
4402EXPORT_SYMBOL(__napi_schedule_irqoff);
4403
4391void __napi_complete(struct napi_struct *n)
4392{
4393 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4404void __napi_complete(struct napi_struct *n)
4405{
4406 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4394 BUG_ON(n->gro_list);
4395
4407
4396 list_del(&n->poll_list);
4408 list_del_init(&n->poll_list);
4397 smp_mb__before_atomic();
4398 clear_bit(NAPI_STATE_SCHED, &n->state);
4399}
4400EXPORT_SYMBOL(__napi_complete);
4401
4409 smp_mb__before_atomic();
4410 clear_bit(NAPI_STATE_SCHED, &n->state);
4411}
4412EXPORT_SYMBOL(__napi_complete);
4413
4402void napi_complete(struct napi_struct *n)
4414void napi_complete_done(struct napi_struct *n, int work_done)
4403{
4404 unsigned long flags;
4405
4406 /*
4407 * don't let napi dequeue from the cpu poll list
4408 * just in case its running on a different cpu
4409 */
4410 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4411 return;
4412
4415{
4416 unsigned long flags;
4417
4418 /*
4419 * don't let napi dequeue from the cpu poll list
4420 * just in case its running on a different cpu
4421 */
4422 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4423 return;
4424
4413 napi_gro_flush(n, false);
4414 local_irq_save(flags);
4415 __napi_complete(n);
4416 local_irq_restore(flags);
4425 if (n->gro_list) {
4426 unsigned long timeout = 0;
4427
4428 if (work_done)
4429 timeout = n->dev->gro_flush_timeout;
4430
4431 if (timeout)
4432 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4433 HRTIMER_MODE_REL_PINNED);
4434 else
4435 napi_gro_flush(n, false);
4436 }
4437 if (likely(list_empty(&n->poll_list))) {
4438 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4439 } else {
4440 /* If n->poll_list is not empty, we need to mask irqs */
4441 local_irq_save(flags);
4442 __napi_complete(n);
4443 local_irq_restore(flags);
4444 }
4417}
4445}
4418EXPORT_SYMBOL(napi_complete);
4446EXPORT_SYMBOL(napi_complete_done);
4419
4420/* must be called under rcu_read_lock(), as we dont take a reference */
4421struct napi_struct *napi_by_id(unsigned int napi_id)
4422{
4423 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4424 struct napi_struct *napi;
4425
4426 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)

--- 37 unchanged lines hidden (view full) ---

4464
4465 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4466 hlist_del_rcu(&napi->napi_hash_node);
4467
4468 spin_unlock(&napi_hash_lock);
4469}
4470EXPORT_SYMBOL_GPL(napi_hash_del);
4471
4447
4448/* must be called under rcu_read_lock(), as we dont take a reference */
4449struct napi_struct *napi_by_id(unsigned int napi_id)
4450{
4451 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4452 struct napi_struct *napi;
4453
4454 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)

--- 37 unchanged lines hidden (view full) ---

4492
4493 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4494 hlist_del_rcu(&napi->napi_hash_node);
4495
4496 spin_unlock(&napi_hash_lock);
4497}
4498EXPORT_SYMBOL_GPL(napi_hash_del);
4499
4500static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4501{
4502 struct napi_struct *napi;
4503
4504 napi = container_of(timer, struct napi_struct, timer);
4505 if (napi->gro_list)
4506 napi_schedule(napi);
4507
4508 return HRTIMER_NORESTART;
4509}
4510
4472void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4473 int (*poll)(struct napi_struct *, int), int weight)
4474{
4475 INIT_LIST_HEAD(&napi->poll_list);
4511void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4512 int (*poll)(struct napi_struct *, int), int weight)
4513{
4514 INIT_LIST_HEAD(&napi->poll_list);
4515 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4516 napi->timer.function = napi_watchdog;
4476 napi->gro_count = 0;
4477 napi->gro_list = NULL;
4478 napi->skb = NULL;
4479 napi->poll = poll;
4480 if (weight > NAPI_POLL_WEIGHT)
4481 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4482 weight, dev->name);
4483 napi->weight = weight;
4484 list_add(&napi->dev_list, &dev->napi_list);
4485 napi->dev = dev;
4486#ifdef CONFIG_NETPOLL
4487 spin_lock_init(&napi->poll_lock);
4488 napi->poll_owner = -1;
4489#endif
4490 set_bit(NAPI_STATE_SCHED, &napi->state);
4491}
4492EXPORT_SYMBOL(netif_napi_add);
4493
4517 napi->gro_count = 0;
4518 napi->gro_list = NULL;
4519 napi->skb = NULL;
4520 napi->poll = poll;
4521 if (weight > NAPI_POLL_WEIGHT)
4522 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4523 weight, dev->name);
4524 napi->weight = weight;
4525 list_add(&napi->dev_list, &dev->napi_list);
4526 napi->dev = dev;
4527#ifdef CONFIG_NETPOLL
4528 spin_lock_init(&napi->poll_lock);
4529 napi->poll_owner = -1;
4530#endif
4531 set_bit(NAPI_STATE_SCHED, &napi->state);
4532}
4533EXPORT_SYMBOL(netif_napi_add);
4534
4535void napi_disable(struct napi_struct *n)
4536{
4537 might_sleep();
4538 set_bit(NAPI_STATE_DISABLE, &n->state);
4539
4540 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4541 msleep(1);
4542
4543 hrtimer_cancel(&n->timer);
4544
4545 clear_bit(NAPI_STATE_DISABLE, &n->state);
4546}
4547EXPORT_SYMBOL(napi_disable);
4548
4494void netif_napi_del(struct napi_struct *napi)
4495{
4496 list_del_init(&napi->dev_list);
4497 napi_free_frags(napi);
4498
4499 kfree_skb_list(napi->gro_list);
4500 napi->gro_list = NULL;
4501 napi->gro_count = 0;
4502}
4503EXPORT_SYMBOL(netif_napi_del);
4504
4505static void net_rx_action(struct softirq_action *h)
4506{
4507 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4508 unsigned long time_limit = jiffies + 2;
4509 int budget = netdev_budget;
4549void netif_napi_del(struct napi_struct *napi)
4550{
4551 list_del_init(&napi->dev_list);
4552 napi_free_frags(napi);
4553
4554 kfree_skb_list(napi->gro_list);
4555 napi->gro_list = NULL;
4556 napi->gro_count = 0;
4557}
4558EXPORT_SYMBOL(netif_napi_del);
4559
4560static void net_rx_action(struct softirq_action *h)
4561{
4562 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4563 unsigned long time_limit = jiffies + 2;
4564 int budget = netdev_budget;
4565 LIST_HEAD(list);
4566 LIST_HEAD(repoll);
4510 void *have;
4511
4512 local_irq_disable();
4567 void *have;
4568
4569 local_irq_disable();
4570 list_splice_init(&sd->poll_list, &list);
4571 local_irq_enable();
4513
4572
4514 while (!list_empty(&sd->poll_list)) {
4573 while (!list_empty(&list)) {
4515 struct napi_struct *n;
4516 int work, weight;
4517
4574 struct napi_struct *n;
4575 int work, weight;
4576
4518 /* If softirq window is exhuasted then punt.
4577 /* If softirq window is exhausted then punt.
4519 * Allow this to run for 2 jiffies since which will allow
4520 * an average latency of 1.5/HZ.
4521 */
4522 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4523 goto softnet_break;
4524
4578 * Allow this to run for 2 jiffies since which will allow
4579 * an average latency of 1.5/HZ.
4580 */
4581 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4582 goto softnet_break;
4583
4525 local_irq_enable();
4526
4584
4527 /* Even though interrupts have been re-enabled, this
4528 * access is safe because interrupts can only add new
4529 * entries to the tail of this list, and only ->poll()
4530 * calls can remove this head entry from the list.
4531 */
4532 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4585 n = list_first_entry(&list, struct napi_struct, poll_list);
4586 list_del_init(&n->poll_list);
4533
4534 have = netpoll_poll_lock(n);
4535
4536 weight = n->weight;
4537
4538 /* This NAPI_STATE_SCHED test is for avoiding a race
4539 * with netpoll's poll_napi(). Only the entity which
4540 * obtains the lock and sees NAPI_STATE_SCHED set will

--- 5 unchanged lines hidden (view full) ---

4546 work = n->poll(n, weight);
4547 trace_napi_poll(n);
4548 }
4549
4550 WARN_ON_ONCE(work > weight);
4551
4552 budget -= work;
4553
4587
4588 have = netpoll_poll_lock(n);
4589
4590 weight = n->weight;
4591
4592 /* This NAPI_STATE_SCHED test is for avoiding a race
4593 * with netpoll's poll_napi(). Only the entity which
4594 * obtains the lock and sees NAPI_STATE_SCHED set will

--- 5 unchanged lines hidden (view full) ---

4600 work = n->poll(n, weight);
4601 trace_napi_poll(n);
4602 }
4603
4604 WARN_ON_ONCE(work > weight);
4605
4606 budget -= work;
4607
4554 local_irq_disable();
4555
4556 /* Drivers must not modify the NAPI state if they
4557 * consume the entire weight. In such cases this code
4558 * still "owns" the NAPI instance and therefore can
4559 * move the instance around on the list at-will.
4560 */
4561 if (unlikely(work == weight)) {
4562 if (unlikely(napi_disable_pending(n))) {
4608 /* Drivers must not modify the NAPI state if they
4609 * consume the entire weight. In such cases this code
4610 * still "owns" the NAPI instance and therefore can
4611 * move the instance around on the list at-will.
4612 */
4613 if (unlikely(work == weight)) {
4614 if (unlikely(napi_disable_pending(n))) {
4563 local_irq_enable();
4564 napi_complete(n);
4615 napi_complete(n);
4565 local_irq_disable();
4566 } else {
4567 if (n->gro_list) {
4568 /* flush too old packets
4569 * If HZ < 1000, flush all packets.
4570 */
4616 } else {
4617 if (n->gro_list) {
4618 /* flush too old packets
4619 * If HZ < 1000, flush all packets.
4620 */
4571 local_irq_enable();
4572 napi_gro_flush(n, HZ >= 1000);
4621 napi_gro_flush(n, HZ >= 1000);
4573 local_irq_disable();
4574 }
4622 }
4575 list_move_tail(&n->poll_list, &sd->poll_list);
4623 list_add_tail(&n->poll_list, &repoll);
4576 }
4577 }
4578
4579 netpoll_poll_unlock(have);
4580 }
4624 }
4625 }
4626
4627 netpoll_poll_unlock(have);
4628 }
4629
4630 if (!sd_has_rps_ipi_waiting(sd) &&
4631 list_empty(&list) &&
4632 list_empty(&repoll))
4633 return;
4581out:
4634out:
4635 local_irq_disable();
4636
4637 list_splice_tail_init(&sd->poll_list, &list);
4638 list_splice_tail(&repoll, &list);
4639 list_splice(&list, &sd->poll_list);
4640 if (!list_empty(&sd->poll_list))
4641 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4642
4582 net_rps_action_and_irq_enable(sd);
4583
4584 return;
4585
4586softnet_break:
4587 sd->time_squeeze++;
4643 net_rps_action_and_irq_enable(sd);
4644
4645 return;
4646
4647softnet_break:
4648 sd->time_squeeze++;
4588 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4589 goto out;
4590}
4591
4592struct netdev_adjacent {
4593 struct net_device *dev;
4594
4595 /* upper master flag, there can only be one master device per list */
4596 bool master;

--- 1184 unchanged lines hidden (view full) ---

5781/**
5782 * dev_get_phys_port_id - Get device physical port ID
5783 * @dev: device
5784 * @ppid: port ID
5785 *
5786 * Get device physical port ID
5787 */
5788int dev_get_phys_port_id(struct net_device *dev,
4649 goto out;
4650}
4651
4652struct netdev_adjacent {
4653 struct net_device *dev;
4654
4655 /* upper master flag, there can only be one master device per list */
4656 bool master;

--- 1184 unchanged lines hidden (view full) ---

5841/**
5842 * dev_get_phys_port_id - Get device physical port ID
5843 * @dev: device
5844 * @ppid: port ID
5845 *
5846 * Get device physical port ID
5847 */
5848int dev_get_phys_port_id(struct net_device *dev,
5789 struct netdev_phys_port_id *ppid)
5849 struct netdev_phys_item_id *ppid)
5790{
5791 const struct net_device_ops *ops = dev->netdev_ops;
5792
5793 if (!ops->ndo_get_phys_port_id)
5794 return -EOPNOTSUPP;
5795 return ops->ndo_get_phys_port_id(dev, ppid);
5796}
5797EXPORT_SYMBOL(dev_get_phys_port_id);

--- 1547 unchanged lines hidden ---
5850{
5851 const struct net_device_ops *ops = dev->netdev_ops;
5852
5853 if (!ops->ndo_get_phys_port_id)
5854 return -EOPNOTSUPP;
5855 return ops->ndo_get_phys_port_id(dev, ppid);
5856}
5857EXPORT_SYMBOL(dev_get_phys_port_id);

--- 1547 unchanged lines hidden ---