xref: /openbmc/linux/include/net/busy_poll.h (revision 8b80cda536ea9bceec0364e897868a30ee13b992)
1076bb0c8SEliezer Tamir /*
2*8b80cda5SEliezer Tamir  * net busy poll support
3076bb0c8SEliezer Tamir  * Copyright(c) 2013 Intel Corporation.
4076bb0c8SEliezer Tamir  *
5076bb0c8SEliezer Tamir  * This program is free software; you can redistribute it and/or modify it
6076bb0c8SEliezer Tamir  * under the terms and conditions of the GNU General Public License,
7076bb0c8SEliezer Tamir  * version 2, as published by the Free Software Foundation.
8076bb0c8SEliezer Tamir  *
9076bb0c8SEliezer Tamir  * This program is distributed in the hope it will be useful, but WITHOUT
10076bb0c8SEliezer Tamir  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11076bb0c8SEliezer Tamir  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12076bb0c8SEliezer Tamir  * more details.
13076bb0c8SEliezer Tamir  *
14076bb0c8SEliezer Tamir  * You should have received a copy of the GNU General Public License along with
15076bb0c8SEliezer Tamir  * this program; if not, write to the Free Software Foundation, Inc.,
16076bb0c8SEliezer Tamir  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17076bb0c8SEliezer Tamir  *
18076bb0c8SEliezer Tamir  * Author: Eliezer Tamir
19076bb0c8SEliezer Tamir  *
20076bb0c8SEliezer Tamir  * Contact Information:
21076bb0c8SEliezer Tamir  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22076bb0c8SEliezer Tamir  */
23076bb0c8SEliezer Tamir 
24*8b80cda5SEliezer Tamir #ifndef _LINUX_NET_BUSY_POLL_H
25*8b80cda5SEliezer Tamir #define _LINUX_NET_BUSY_POLL_H
26076bb0c8SEliezer Tamir 
27076bb0c8SEliezer Tamir #include <linux/netdevice.h>
28076bb0c8SEliezer Tamir #include <net/ip.h>
29076bb0c8SEliezer Tamir 
30076bb0c8SEliezer Tamir #ifdef CONFIG_NET_LL_RX_POLL
31076bb0c8SEliezer Tamir 
32076bb0c8SEliezer Tamir struct napi_struct;
33076bb0c8SEliezer Tamir extern unsigned int sysctl_net_ll_read __read_mostly;
34076bb0c8SEliezer Tamir extern unsigned int sysctl_net_ll_poll __read_mostly;
35076bb0c8SEliezer Tamir 
36076bb0c8SEliezer Tamir /* return values from ndo_ll_poll */
37076bb0c8SEliezer Tamir #define LL_FLUSH_FAILED		-1
38076bb0c8SEliezer Tamir #define LL_FLUSH_BUSY		-2
39076bb0c8SEliezer Tamir 
40076bb0c8SEliezer Tamir static inline bool net_busy_loop_on(void)
41076bb0c8SEliezer Tamir {
42076bb0c8SEliezer Tamir 	return sysctl_net_ll_poll;
43076bb0c8SEliezer Tamir }
44076bb0c8SEliezer Tamir 
45076bb0c8SEliezer Tamir /* a wrapper to make debug_smp_processor_id() happy
46076bb0c8SEliezer Tamir  * we can use sched_clock() because we don't care much about precision
47076bb0c8SEliezer Tamir  * we only care that the average is bounded
48076bb0c8SEliezer Tamir  */
49076bb0c8SEliezer Tamir #ifdef CONFIG_DEBUG_PREEMPT
50076bb0c8SEliezer Tamir static inline u64 busy_loop_us_clock(void)
51076bb0c8SEliezer Tamir {
52076bb0c8SEliezer Tamir 	u64 rc;
53076bb0c8SEliezer Tamir 
54076bb0c8SEliezer Tamir 	preempt_disable_notrace();
55076bb0c8SEliezer Tamir 	rc = sched_clock();
56076bb0c8SEliezer Tamir 	preempt_enable_no_resched_notrace();
57076bb0c8SEliezer Tamir 
58076bb0c8SEliezer Tamir 	return rc >> 10;
59076bb0c8SEliezer Tamir }
60076bb0c8SEliezer Tamir #else /* CONFIG_DEBUG_PREEMPT */
61076bb0c8SEliezer Tamir static inline u64 busy_loop_us_clock(void)
62076bb0c8SEliezer Tamir {
63076bb0c8SEliezer Tamir 	return sched_clock() >> 10;
64076bb0c8SEliezer Tamir }
65076bb0c8SEliezer Tamir #endif /* CONFIG_DEBUG_PREEMPT */
66076bb0c8SEliezer Tamir 
67076bb0c8SEliezer Tamir static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
68076bb0c8SEliezer Tamir {
69076bb0c8SEliezer Tamir 	return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
70076bb0c8SEliezer Tamir }
71076bb0c8SEliezer Tamir 
72076bb0c8SEliezer Tamir /* in poll/select we use the global sysctl_net_ll_poll value */
73076bb0c8SEliezer Tamir static inline unsigned long busy_loop_end_time(void)
74076bb0c8SEliezer Tamir {
75076bb0c8SEliezer Tamir 	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
76076bb0c8SEliezer Tamir }
77076bb0c8SEliezer Tamir 
78076bb0c8SEliezer Tamir static inline bool sk_can_busy_loop(struct sock *sk)
79076bb0c8SEliezer Tamir {
80076bb0c8SEliezer Tamir 	return sk->sk_ll_usec && sk->sk_napi_id &&
81076bb0c8SEliezer Tamir 	       !need_resched() && !signal_pending(current);
82076bb0c8SEliezer Tamir }
83076bb0c8SEliezer Tamir 
84076bb0c8SEliezer Tamir 
85076bb0c8SEliezer Tamir static inline bool busy_loop_timeout(unsigned long end_time)
86076bb0c8SEliezer Tamir {
87076bb0c8SEliezer Tamir 	unsigned long now = busy_loop_us_clock();
88076bb0c8SEliezer Tamir 
89076bb0c8SEliezer Tamir 	return time_after(now, end_time);
90076bb0c8SEliezer Tamir }
91076bb0c8SEliezer Tamir 
92076bb0c8SEliezer Tamir /* when used in sock_poll() nonblock is known at compile time to be true
93076bb0c8SEliezer Tamir  * so the loop and end_time will be optimized out
94076bb0c8SEliezer Tamir  */
95076bb0c8SEliezer Tamir static inline bool sk_busy_loop(struct sock *sk, int nonblock)
96076bb0c8SEliezer Tamir {
97076bb0c8SEliezer Tamir 	unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
98076bb0c8SEliezer Tamir 	const struct net_device_ops *ops;
99076bb0c8SEliezer Tamir 	struct napi_struct *napi;
100076bb0c8SEliezer Tamir 	int rc = false;
101076bb0c8SEliezer Tamir 
102076bb0c8SEliezer Tamir 	/*
103076bb0c8SEliezer Tamir 	 * rcu read lock for napi hash
104076bb0c8SEliezer Tamir 	 * bh so we don't race with net_rx_action
105076bb0c8SEliezer Tamir 	 */
106076bb0c8SEliezer Tamir 	rcu_read_lock_bh();
107076bb0c8SEliezer Tamir 
108076bb0c8SEliezer Tamir 	napi = napi_by_id(sk->sk_napi_id);
109076bb0c8SEliezer Tamir 	if (!napi)
110076bb0c8SEliezer Tamir 		goto out;
111076bb0c8SEliezer Tamir 
112076bb0c8SEliezer Tamir 	ops = napi->dev->netdev_ops;
113*8b80cda5SEliezer Tamir 	if (!ops->ndo_busy_poll)
114076bb0c8SEliezer Tamir 		goto out;
115076bb0c8SEliezer Tamir 
116076bb0c8SEliezer Tamir 	do {
117*8b80cda5SEliezer Tamir 		rc = ops->ndo_busy_poll(napi);
118076bb0c8SEliezer Tamir 
119076bb0c8SEliezer Tamir 		if (rc == LL_FLUSH_FAILED)
120076bb0c8SEliezer Tamir 			break; /* permanent failure */
121076bb0c8SEliezer Tamir 
122076bb0c8SEliezer Tamir 		if (rc > 0)
123076bb0c8SEliezer Tamir 			/* local bh are disabled so it is ok to use _BH */
124076bb0c8SEliezer Tamir 			NET_ADD_STATS_BH(sock_net(sk),
125076bb0c8SEliezer Tamir 					 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
126076bb0c8SEliezer Tamir 
127076bb0c8SEliezer Tamir 	} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128076bb0c8SEliezer Tamir 		 !need_resched() && !busy_loop_timeout(end_time));
129076bb0c8SEliezer Tamir 
130076bb0c8SEliezer Tamir 	rc = !skb_queue_empty(&sk->sk_receive_queue);
131076bb0c8SEliezer Tamir out:
132076bb0c8SEliezer Tamir 	rcu_read_unlock_bh();
133076bb0c8SEliezer Tamir 	return rc;
134076bb0c8SEliezer Tamir }
135076bb0c8SEliezer Tamir 
136076bb0c8SEliezer Tamir /* used in the NIC receive handler to mark the skb */
137*8b80cda5SEliezer Tamir static inline void skb_mark_napi_id(struct sk_buff *skb,
138*8b80cda5SEliezer Tamir 				    struct napi_struct *napi)
139076bb0c8SEliezer Tamir {
140076bb0c8SEliezer Tamir 	skb->napi_id = napi->napi_id;
141076bb0c8SEliezer Tamir }
142076bb0c8SEliezer Tamir 
143076bb0c8SEliezer Tamir /* used in the protocol hanlder to propagate the napi_id to the socket */
144*8b80cda5SEliezer Tamir static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
145076bb0c8SEliezer Tamir {
146076bb0c8SEliezer Tamir 	sk->sk_napi_id = skb->napi_id;
147076bb0c8SEliezer Tamir }
148076bb0c8SEliezer Tamir 
149076bb0c8SEliezer Tamir #else /* CONFIG_NET_LL_RX_POLL */
150076bb0c8SEliezer Tamir static inline unsigned long net_busy_loop_on(void)
151076bb0c8SEliezer Tamir {
152076bb0c8SEliezer Tamir 	return 0;
153076bb0c8SEliezer Tamir }
154076bb0c8SEliezer Tamir 
155076bb0c8SEliezer Tamir static inline unsigned long busy_loop_end_time(void)
156076bb0c8SEliezer Tamir {
157076bb0c8SEliezer Tamir 	return 0;
158076bb0c8SEliezer Tamir }
159076bb0c8SEliezer Tamir 
160076bb0c8SEliezer Tamir static inline bool sk_can_busy_loop(struct sock *sk)
161076bb0c8SEliezer Tamir {
162076bb0c8SEliezer Tamir 	return false;
163076bb0c8SEliezer Tamir }
164076bb0c8SEliezer Tamir 
165076bb0c8SEliezer Tamir static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166076bb0c8SEliezer Tamir {
167076bb0c8SEliezer Tamir 	return false;
168076bb0c8SEliezer Tamir }
169076bb0c8SEliezer Tamir 
170*8b80cda5SEliezer Tamir static inline void skb_mark_napi_id(struct sk_buff *skb,
171*8b80cda5SEliezer Tamir 				    struct napi_struct *napi)
172076bb0c8SEliezer Tamir {
173076bb0c8SEliezer Tamir }
174076bb0c8SEliezer Tamir 
175*8b80cda5SEliezer Tamir static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
176076bb0c8SEliezer Tamir {
177076bb0c8SEliezer Tamir }
178076bb0c8SEliezer Tamir 
179076bb0c8SEliezer Tamir static inline bool busy_loop_timeout(unsigned long end_time)
180076bb0c8SEliezer Tamir {
181076bb0c8SEliezer Tamir 	return true;
182076bb0c8SEliezer Tamir }
183076bb0c8SEliezer Tamir 
184076bb0c8SEliezer Tamir #endif /* CONFIG_NET_LL_RX_POLL */
185*8b80cda5SEliezer Tamir #endif /* _LINUX_NET_BUSY_POLL_H */
186