dev.c (3a37471551cd3b287ce7f02ed25bcf8ec37a191d) dev.c (bf74b20d00b13919db7ae5d1015636e76f56f6ae)
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 1290 unchanged lines hidden (view full) ---

1299 * a device wants to inform the rest of the network about some sort of
1300 * reconfiguration such as a failover event or virtual machine
1301 * migration.
1302 */
1303void netdev_notify_peers(struct net_device *dev)
1304{
1305 rtnl_lock();
1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 1290 unchanged lines hidden (view full) ---

1299 * a device wants to inform the rest of the network about some sort of
1300 * reconfiguration such as a failover event or virtual machine
1301 * migration.
1302 */
1303void netdev_notify_peers(struct net_device *dev)
1304{
1305 rtnl_lock();
1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1307 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1307 rtnl_unlock();
1308}
1309EXPORT_SYMBOL(netdev_notify_peers);
1310
1311static int __dev_open(struct net_device *dev)
1312{
1313 const struct net_device_ops *ops = dev->netdev_ops;
1314 int ret;

--- 3739 unchanged lines hidden (view full) ---

5054 netpoll_poll_unlock(have_poll_lock);
5055 if (rc == BUSY_POLL_BUDGET)
5056 __napi_schedule(napi);
5057 local_bh_enable();
5058 if (local_softirq_pending())
5059 do_softirq();
5060}
5061
1308 rtnl_unlock();
1309}
1310EXPORT_SYMBOL(netdev_notify_peers);
1311
1312static int __dev_open(struct net_device *dev)
1313{
1314 const struct net_device_ops *ops = dev->netdev_ops;
1315 int ret;

--- 3739 unchanged lines hidden (view full) ---

5055 netpoll_poll_unlock(have_poll_lock);
5056 if (rc == BUSY_POLL_BUDGET)
5057 __napi_schedule(napi);
5058 local_bh_enable();
5059 if (local_softirq_pending())
5060 do_softirq();
5061}
5062
5062bool sk_busy_loop(struct sock *sk, int nonblock)
5063void napi_busy_loop(unsigned int napi_id,
5064 bool (*loop_end)(void *, unsigned long),
5065 void *loop_end_arg)
5063{
5066{
5064 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
5067 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
5065 int (*napi_poll)(struct napi_struct *napi, int budget);
5066 void *have_poll_lock = NULL;
5067 struct napi_struct *napi;
5068 int (*napi_poll)(struct napi_struct *napi, int budget);
5069 void *have_poll_lock = NULL;
5070 struct napi_struct *napi;
5068 int rc;
5069
5070restart:
5071
5072restart:
5071 rc = false;
5072 napi_poll = NULL;
5073
5074 rcu_read_lock();
5075
5073 napi_poll = NULL;
5074
5075 rcu_read_lock();
5076
5076 napi = napi_by_id(sk->sk_napi_id);
5077 napi = napi_by_id(napi_id);
5077 if (!napi)
5078 goto out;
5079
5080 preempt_disable();
5081 for (;;) {
5078 if (!napi)
5079 goto out;
5080
5081 preempt_disable();
5082 for (;;) {
5082 rc = 0;
5083 int work = 0;
5084
5083 local_bh_disable();
5084 if (!napi_poll) {
5085 unsigned long val = READ_ONCE(napi->state);
5086
5087 /* If multiple threads are competing for this napi,
5088 * we avoid dirtying napi->state as much as we can.
5089 */
5090 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5091 NAPIF_STATE_IN_BUSY_POLL))
5092 goto count;
5093 if (cmpxchg(&napi->state, val,
5094 val | NAPIF_STATE_IN_BUSY_POLL |
5095 NAPIF_STATE_SCHED) != val)
5096 goto count;
5097 have_poll_lock = netpoll_poll_lock(napi);
5098 napi_poll = napi->poll;
5099 }
5085 local_bh_disable();
5086 if (!napi_poll) {
5087 unsigned long val = READ_ONCE(napi->state);
5088
5089 /* If multiple threads are competing for this napi,
5090 * we avoid dirtying napi->state as much as we can.
5091 */
5092 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5093 NAPIF_STATE_IN_BUSY_POLL))
5094 goto count;
5095 if (cmpxchg(&napi->state, val,
5096 val | NAPIF_STATE_IN_BUSY_POLL |
5097 NAPIF_STATE_SCHED) != val)
5098 goto count;
5099 have_poll_lock = netpoll_poll_lock(napi);
5100 napi_poll = napi->poll;
5101 }
5100 rc = napi_poll(napi, BUSY_POLL_BUDGET);
5101 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5102 work = napi_poll(napi, BUSY_POLL_BUDGET);
5103 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
5102count:
5104count:
5103 if (rc > 0)
5104 __NET_ADD_STATS(sock_net(sk),
5105 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
5105 if (work > 0)
5106 __NET_ADD_STATS(dev_net(napi->dev),
5107 LINUX_MIB_BUSYPOLLRXPACKETS, work);
5106 local_bh_enable();
5107
5108 local_bh_enable();
5109
5108 if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
5109 busy_loop_timeout(end_time))
5110 if (!loop_end || loop_end(loop_end_arg, start_time))
5110 break;
5111
5112 if (unlikely(need_resched())) {
5113 if (napi_poll)
5114 busy_poll_stop(napi, have_poll_lock);
5115 preempt_enable();
5116 rcu_read_unlock();
5117 cond_resched();
5111 break;
5112
5113 if (unlikely(need_resched())) {
5114 if (napi_poll)
5115 busy_poll_stop(napi, have_poll_lock);
5116 preempt_enable();
5117 rcu_read_unlock();
5118 cond_resched();
5118 rc = !skb_queue_empty(&sk->sk_receive_queue);
5119 if (rc || busy_loop_timeout(end_time))
5120 return rc;
5119 if (loop_end(loop_end_arg, start_time))
5120 return;
5121 goto restart;
5122 }
5123 cpu_relax();
5124 }
5125 if (napi_poll)
5126 busy_poll_stop(napi, have_poll_lock);
5127 preempt_enable();
5121 goto restart;
5122 }
5123 cpu_relax();
5124 }
5125 if (napi_poll)
5126 busy_poll_stop(napi, have_poll_lock);
5127 preempt_enable();
5128 rc = !skb_queue_empty(&sk->sk_receive_queue);
5129out:
5130 rcu_read_unlock();
5128out:
5129 rcu_read_unlock();
5131 return rc;
5132}
5130}
5133EXPORT_SYMBOL(sk_busy_loop);
5131EXPORT_SYMBOL(napi_busy_loop);
5134
5135#endif /* CONFIG_NET_RX_BUSY_POLL */
5136
5137static void napi_hash_add(struct napi_struct *napi)
5138{
5139 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5140 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
5141 return;
5142
5143 spin_lock(&napi_hash_lock);
5144
5132
5133#endif /* CONFIG_NET_RX_BUSY_POLL */
5134
5135static void napi_hash_add(struct napi_struct *napi)
5136{
5137 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5138 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
5139 return;
5140
5141 spin_lock(&napi_hash_lock);
5142
5145 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
5143 /* 0..NR_CPUS range is reserved for sender_cpu use */
5146 do {
5144 do {
5147 if (unlikely(++napi_gen_id < NR_CPUS + 1))
5148 napi_gen_id = NR_CPUS + 1;
5145 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5146 napi_gen_id = MIN_NAPI_ID;
5149 } while (napi_by_id(napi_gen_id));
5150 napi->napi_id = napi_gen_id;
5151
5152 hlist_add_head_rcu(&napi->napi_hash_node,
5153 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
5154
5155 spin_unlock(&napi_hash_lock);
5156}

--- 3271 unchanged lines hidden ---
5147 } while (napi_by_id(napi_gen_id));
5148 napi->napi_id = napi_gen_id;
5149
5150 hlist_add_head_rcu(&napi->napi_hash_node,
5151 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
5152
5153 spin_unlock(&napi_hash_lock);
5154}

--- 3271 unchanged lines hidden ---