xref: /openbmc/linux/include/net/busy_poll.h (revision 076bb0c82a44fbe46fe2c8527a5b5b64b69f679d)
1*076bb0c8SEliezer Tamir /*
2*076bb0c8SEliezer Tamir  * Low Latency Sockets
3*076bb0c8SEliezer Tamir  * Copyright(c) 2013 Intel Corporation.
4*076bb0c8SEliezer Tamir  *
5*076bb0c8SEliezer Tamir  * This program is free software; you can redistribute it and/or modify it
6*076bb0c8SEliezer Tamir  * under the terms and conditions of the GNU General Public License,
7*076bb0c8SEliezer Tamir  * version 2, as published by the Free Software Foundation.
8*076bb0c8SEliezer Tamir  *
9*076bb0c8SEliezer Tamir  * This program is distributed in the hope it will be useful, but WITHOUT
10*076bb0c8SEliezer Tamir  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*076bb0c8SEliezer Tamir  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12*076bb0c8SEliezer Tamir  * more details.
13*076bb0c8SEliezer Tamir  *
14*076bb0c8SEliezer Tamir  * You should have received a copy of the GNU General Public License along with
15*076bb0c8SEliezer Tamir  * this program; if not, write to the Free Software Foundation, Inc.,
16*076bb0c8SEliezer Tamir  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17*076bb0c8SEliezer Tamir  *
18*076bb0c8SEliezer Tamir  * Author: Eliezer Tamir
19*076bb0c8SEliezer Tamir  *
20*076bb0c8SEliezer Tamir  * Contact Information:
21*076bb0c8SEliezer Tamir  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22*076bb0c8SEliezer Tamir  */
23*076bb0c8SEliezer Tamir 
24*076bb0c8SEliezer Tamir #ifndef _LINUX_NET_LL_POLL_H
25*076bb0c8SEliezer Tamir #define _LINUX_NET_LL_POLL_H
26*076bb0c8SEliezer Tamir 
27*076bb0c8SEliezer Tamir #include <linux/netdevice.h>
28*076bb0c8SEliezer Tamir #include <net/ip.h>
29*076bb0c8SEliezer Tamir 
30*076bb0c8SEliezer Tamir #ifdef CONFIG_NET_LL_RX_POLL
31*076bb0c8SEliezer Tamir 
32*076bb0c8SEliezer Tamir struct napi_struct;
33*076bb0c8SEliezer Tamir extern unsigned int sysctl_net_ll_read __read_mostly;
34*076bb0c8SEliezer Tamir extern unsigned int sysctl_net_ll_poll __read_mostly;
35*076bb0c8SEliezer Tamir 
36*076bb0c8SEliezer Tamir /* return values from ndo_ll_poll */
37*076bb0c8SEliezer Tamir #define LL_FLUSH_FAILED		-1
38*076bb0c8SEliezer Tamir #define LL_FLUSH_BUSY		-2
39*076bb0c8SEliezer Tamir 
40*076bb0c8SEliezer Tamir static inline bool net_busy_loop_on(void)
41*076bb0c8SEliezer Tamir {
42*076bb0c8SEliezer Tamir 	return sysctl_net_ll_poll;
43*076bb0c8SEliezer Tamir }
44*076bb0c8SEliezer Tamir 
45*076bb0c8SEliezer Tamir /* a wrapper to make debug_smp_processor_id() happy
46*076bb0c8SEliezer Tamir  * we can use sched_clock() because we don't care much about precision
47*076bb0c8SEliezer Tamir  * we only care that the average is bounded
48*076bb0c8SEliezer Tamir  */
49*076bb0c8SEliezer Tamir #ifdef CONFIG_DEBUG_PREEMPT
50*076bb0c8SEliezer Tamir static inline u64 busy_loop_us_clock(void)
51*076bb0c8SEliezer Tamir {
52*076bb0c8SEliezer Tamir 	u64 rc;
53*076bb0c8SEliezer Tamir 
54*076bb0c8SEliezer Tamir 	preempt_disable_notrace();
55*076bb0c8SEliezer Tamir 	rc = sched_clock();
56*076bb0c8SEliezer Tamir 	preempt_enable_no_resched_notrace();
57*076bb0c8SEliezer Tamir 
58*076bb0c8SEliezer Tamir 	return rc >> 10;
59*076bb0c8SEliezer Tamir }
60*076bb0c8SEliezer Tamir #else /* CONFIG_DEBUG_PREEMPT */
61*076bb0c8SEliezer Tamir static inline u64 busy_loop_us_clock(void)
62*076bb0c8SEliezer Tamir {
63*076bb0c8SEliezer Tamir 	return sched_clock() >> 10;
64*076bb0c8SEliezer Tamir }
65*076bb0c8SEliezer Tamir #endif /* CONFIG_DEBUG_PREEMPT */
66*076bb0c8SEliezer Tamir 
67*076bb0c8SEliezer Tamir static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
68*076bb0c8SEliezer Tamir {
69*076bb0c8SEliezer Tamir 	return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
70*076bb0c8SEliezer Tamir }
71*076bb0c8SEliezer Tamir 
72*076bb0c8SEliezer Tamir /* in poll/select we use the global sysctl_net_ll_poll value */
73*076bb0c8SEliezer Tamir static inline unsigned long busy_loop_end_time(void)
74*076bb0c8SEliezer Tamir {
75*076bb0c8SEliezer Tamir 	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
76*076bb0c8SEliezer Tamir }
77*076bb0c8SEliezer Tamir 
78*076bb0c8SEliezer Tamir static inline bool sk_can_busy_loop(struct sock *sk)
79*076bb0c8SEliezer Tamir {
80*076bb0c8SEliezer Tamir 	return sk->sk_ll_usec && sk->sk_napi_id &&
81*076bb0c8SEliezer Tamir 	       !need_resched() && !signal_pending(current);
82*076bb0c8SEliezer Tamir }
83*076bb0c8SEliezer Tamir 
84*076bb0c8SEliezer Tamir 
85*076bb0c8SEliezer Tamir static inline bool busy_loop_timeout(unsigned long end_time)
86*076bb0c8SEliezer Tamir {
87*076bb0c8SEliezer Tamir 	unsigned long now = busy_loop_us_clock();
88*076bb0c8SEliezer Tamir 
89*076bb0c8SEliezer Tamir 	return time_after(now, end_time);
90*076bb0c8SEliezer Tamir }
91*076bb0c8SEliezer Tamir 
92*076bb0c8SEliezer Tamir /* when used in sock_poll() nonblock is known at compile time to be true
93*076bb0c8SEliezer Tamir  * so the loop and end_time will be optimized out
94*076bb0c8SEliezer Tamir  */
95*076bb0c8SEliezer Tamir static inline bool sk_busy_loop(struct sock *sk, int nonblock)
96*076bb0c8SEliezer Tamir {
97*076bb0c8SEliezer Tamir 	unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
98*076bb0c8SEliezer Tamir 	const struct net_device_ops *ops;
99*076bb0c8SEliezer Tamir 	struct napi_struct *napi;
100*076bb0c8SEliezer Tamir 	int rc = false;
101*076bb0c8SEliezer Tamir 
102*076bb0c8SEliezer Tamir 	/*
103*076bb0c8SEliezer Tamir 	 * rcu read lock for napi hash
104*076bb0c8SEliezer Tamir 	 * bh so we don't race with net_rx_action
105*076bb0c8SEliezer Tamir 	 */
106*076bb0c8SEliezer Tamir 	rcu_read_lock_bh();
107*076bb0c8SEliezer Tamir 
108*076bb0c8SEliezer Tamir 	napi = napi_by_id(sk->sk_napi_id);
109*076bb0c8SEliezer Tamir 	if (!napi)
110*076bb0c8SEliezer Tamir 		goto out;
111*076bb0c8SEliezer Tamir 
112*076bb0c8SEliezer Tamir 	ops = napi->dev->netdev_ops;
113*076bb0c8SEliezer Tamir 	if (!ops->ndo_ll_poll)
114*076bb0c8SEliezer Tamir 		goto out;
115*076bb0c8SEliezer Tamir 
116*076bb0c8SEliezer Tamir 	do {
117*076bb0c8SEliezer Tamir 		rc = ops->ndo_ll_poll(napi);
118*076bb0c8SEliezer Tamir 
119*076bb0c8SEliezer Tamir 		if (rc == LL_FLUSH_FAILED)
120*076bb0c8SEliezer Tamir 			break; /* permanent failure */
121*076bb0c8SEliezer Tamir 
122*076bb0c8SEliezer Tamir 		if (rc > 0)
123*076bb0c8SEliezer Tamir 			/* local bh are disabled so it is ok to use _BH */
124*076bb0c8SEliezer Tamir 			NET_ADD_STATS_BH(sock_net(sk),
125*076bb0c8SEliezer Tamir 					 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
126*076bb0c8SEliezer Tamir 
127*076bb0c8SEliezer Tamir 	} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128*076bb0c8SEliezer Tamir 		 !need_resched() && !busy_loop_timeout(end_time));
129*076bb0c8SEliezer Tamir 
130*076bb0c8SEliezer Tamir 	rc = !skb_queue_empty(&sk->sk_receive_queue);
131*076bb0c8SEliezer Tamir out:
132*076bb0c8SEliezer Tamir 	rcu_read_unlock_bh();
133*076bb0c8SEliezer Tamir 	return rc;
134*076bb0c8SEliezer Tamir }
135*076bb0c8SEliezer Tamir 
136*076bb0c8SEliezer Tamir /* used in the NIC receive handler to mark the skb */
137*076bb0c8SEliezer Tamir static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
138*076bb0c8SEliezer Tamir {
139*076bb0c8SEliezer Tamir 	skb->napi_id = napi->napi_id;
140*076bb0c8SEliezer Tamir }
141*076bb0c8SEliezer Tamir 
142*076bb0c8SEliezer Tamir /* used in the protocol hanlder to propagate the napi_id to the socket */
143*076bb0c8SEliezer Tamir static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
144*076bb0c8SEliezer Tamir {
145*076bb0c8SEliezer Tamir 	sk->sk_napi_id = skb->napi_id;
146*076bb0c8SEliezer Tamir }
147*076bb0c8SEliezer Tamir 
148*076bb0c8SEliezer Tamir #else /* CONFIG_NET_LL_RX_POLL */
149*076bb0c8SEliezer Tamir static inline unsigned long net_busy_loop_on(void)
150*076bb0c8SEliezer Tamir {
151*076bb0c8SEliezer Tamir 	return 0;
152*076bb0c8SEliezer Tamir }
153*076bb0c8SEliezer Tamir 
154*076bb0c8SEliezer Tamir static inline unsigned long busy_loop_end_time(void)
155*076bb0c8SEliezer Tamir {
156*076bb0c8SEliezer Tamir 	return 0;
157*076bb0c8SEliezer Tamir }
158*076bb0c8SEliezer Tamir 
159*076bb0c8SEliezer Tamir static inline bool sk_can_busy_loop(struct sock *sk)
160*076bb0c8SEliezer Tamir {
161*076bb0c8SEliezer Tamir 	return false;
162*076bb0c8SEliezer Tamir }
163*076bb0c8SEliezer Tamir 
164*076bb0c8SEliezer Tamir static inline bool sk_busy_poll(struct sock *sk, int nonblock)
165*076bb0c8SEliezer Tamir {
166*076bb0c8SEliezer Tamir 	return false;
167*076bb0c8SEliezer Tamir }
168*076bb0c8SEliezer Tamir 
169*076bb0c8SEliezer Tamir static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
170*076bb0c8SEliezer Tamir {
171*076bb0c8SEliezer Tamir }
172*076bb0c8SEliezer Tamir 
173*076bb0c8SEliezer Tamir static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
174*076bb0c8SEliezer Tamir {
175*076bb0c8SEliezer Tamir }
176*076bb0c8SEliezer Tamir 
177*076bb0c8SEliezer Tamir static inline bool busy_loop_timeout(unsigned long end_time)
178*076bb0c8SEliezer Tamir {
179*076bb0c8SEliezer Tamir 	return true;
180*076bb0c8SEliezer Tamir }
181*076bb0c8SEliezer Tamir 
182*076bb0c8SEliezer Tamir #endif /* CONFIG_NET_LL_RX_POLL */
183*076bb0c8SEliezer Tamir #endif /* _LINUX_NET_LL_POLL_H */
184