xref: /openbmc/linux/include/net/busy_poll.h (revision 7c951cafc0cb2e575f1d58677b95ac387ac0a5bd)
1a61127c2SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2076bb0c8SEliezer Tamir /*
38b80cda5SEliezer Tamir  * net busy poll support
4076bb0c8SEliezer Tamir  * Copyright(c) 2013 Intel Corporation.
5076bb0c8SEliezer Tamir  *
6076bb0c8SEliezer Tamir  * Author: Eliezer Tamir
7076bb0c8SEliezer Tamir  *
8076bb0c8SEliezer Tamir  * Contact Information:
9076bb0c8SEliezer Tamir  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
10076bb0c8SEliezer Tamir  */
11076bb0c8SEliezer Tamir 
128b80cda5SEliezer Tamir #ifndef _LINUX_NET_BUSY_POLL_H
138b80cda5SEliezer Tamir #define _LINUX_NET_BUSY_POLL_H
14076bb0c8SEliezer Tamir 
15076bb0c8SEliezer Tamir #include <linux/netdevice.h>
16e6017571SIngo Molnar #include <linux/sched/clock.h>
17174cd4b1SIngo Molnar #include <linux/sched/signal.h>
18076bb0c8SEliezer Tamir #include <net/ip.h>
19076bb0c8SEliezer Tamir 
20545cd5e5SAlexander Duyck /*		0 - Reserved to indicate value not set
21545cd5e5SAlexander Duyck  *     1..NR_CPUS - Reserved for sender_cpu
22545cd5e5SAlexander Duyck  *  NR_CPUS+1..~0 - Region available for NAPI IDs
23545cd5e5SAlexander Duyck  */
24545cd5e5SAlexander Duyck #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
25545cd5e5SAlexander Duyck 
26*7c951cafSBjörn Töpel #define BUSY_POLL_BUDGET 8
27*7c951cafSBjörn Töpel 
28e4dde412SDaniel Borkmann #ifdef CONFIG_NET_RX_BUSY_POLL
29e4dde412SDaniel Borkmann 
30e4dde412SDaniel Borkmann struct napi_struct;
31e4dde412SDaniel Borkmann extern unsigned int sysctl_net_busy_read __read_mostly;
32e4dde412SDaniel Borkmann extern unsigned int sysctl_net_busy_poll __read_mostly;
33e4dde412SDaniel Borkmann 
34076bb0c8SEliezer Tamir static inline bool net_busy_loop_on(void)
35076bb0c8SEliezer Tamir {
3664b0dc51SEliezer Tamir 	return sysctl_net_busy_poll;
37076bb0c8SEliezer Tamir }
38076bb0c8SEliezer Tamir 
3921cb84c4SEric Dumazet static inline bool sk_can_busy_loop(const struct sock *sk)
40076bb0c8SEliezer Tamir {
41545cd5e5SAlexander Duyck 	return sk->sk_ll_usec && !signal_pending(current);
42076bb0c8SEliezer Tamir }
43076bb0c8SEliezer Tamir 
447db6b048SSridhar Samudrala bool sk_busy_loop_end(void *p, unsigned long start_time);
457db6b048SSridhar Samudrala 
467db6b048SSridhar Samudrala void napi_busy_loop(unsigned int napi_id,
477db6b048SSridhar Samudrala 		    bool (*loop_end)(void *, unsigned long),
48*7c951cafSBjörn Töpel 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget);
49076bb0c8SEliezer Tamir 
50e0d1095aSCong Wang #else /* CONFIG_NET_RX_BUSY_POLL */
51076bb0c8SEliezer Tamir static inline unsigned long net_busy_loop_on(void)
52076bb0c8SEliezer Tamir {
53076bb0c8SEliezer Tamir 	return 0;
54076bb0c8SEliezer Tamir }
55076bb0c8SEliezer Tamir 
56076bb0c8SEliezer Tamir static inline bool sk_can_busy_loop(struct sock *sk)
57076bb0c8SEliezer Tamir {
58076bb0c8SEliezer Tamir 	return false;
59076bb0c8SEliezer Tamir }
60076bb0c8SEliezer Tamir 
61e0d1095aSCong Wang #endif /* CONFIG_NET_RX_BUSY_POLL */
62e68b6e50SEric Dumazet 
6337056719SAlexander Duyck static inline unsigned long busy_loop_current_time(void)
6437056719SAlexander Duyck {
6537056719SAlexander Duyck #ifdef CONFIG_NET_RX_BUSY_POLL
6637056719SAlexander Duyck 	return (unsigned long)(local_clock() >> 10);
6737056719SAlexander Duyck #else
6837056719SAlexander Duyck 	return 0;
6937056719SAlexander Duyck #endif
7037056719SAlexander Duyck }
7137056719SAlexander Duyck 
7237056719SAlexander Duyck /* in poll/select we use the global sysctl_net_ll_poll value */
7337056719SAlexander Duyck static inline bool busy_loop_timeout(unsigned long start_time)
7437056719SAlexander Duyck {
7537056719SAlexander Duyck #ifdef CONFIG_NET_RX_BUSY_POLL
7637056719SAlexander Duyck 	unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
7737056719SAlexander Duyck 
7837056719SAlexander Duyck 	if (bp_usec) {
7937056719SAlexander Duyck 		unsigned long end_time = start_time + bp_usec;
8037056719SAlexander Duyck 		unsigned long now = busy_loop_current_time();
8137056719SAlexander Duyck 
8237056719SAlexander Duyck 		return time_after(now, end_time);
8337056719SAlexander Duyck 	}
8437056719SAlexander Duyck #endif
8537056719SAlexander Duyck 	return true;
8637056719SAlexander Duyck }
8737056719SAlexander Duyck 
8837056719SAlexander Duyck static inline bool sk_busy_loop_timeout(struct sock *sk,
8937056719SAlexander Duyck 					unsigned long start_time)
9037056719SAlexander Duyck {
9137056719SAlexander Duyck #ifdef CONFIG_NET_RX_BUSY_POLL
9237056719SAlexander Duyck 	unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
9337056719SAlexander Duyck 
9437056719SAlexander Duyck 	if (bp_usec) {
9537056719SAlexander Duyck 		unsigned long end_time = start_time + bp_usec;
9637056719SAlexander Duyck 		unsigned long now = busy_loop_current_time();
9737056719SAlexander Duyck 
9837056719SAlexander Duyck 		return time_after(now, end_time);
9937056719SAlexander Duyck 	}
10037056719SAlexander Duyck #endif
10137056719SAlexander Duyck 	return true;
10237056719SAlexander Duyck }
10337056719SAlexander Duyck 
1047db6b048SSridhar Samudrala static inline void sk_busy_loop(struct sock *sk, int nonblock)
1057db6b048SSridhar Samudrala {
1067db6b048SSridhar Samudrala #ifdef CONFIG_NET_RX_BUSY_POLL
1077db6b048SSridhar Samudrala 	unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
1087db6b048SSridhar Samudrala 
1097db6b048SSridhar Samudrala 	if (napi_id >= MIN_NAPI_ID)
1107fd3253aSBjörn Töpel 		napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
111*7c951cafSBjörn Töpel 			       READ_ONCE(sk->sk_prefer_busy_poll),
112*7c951cafSBjörn Töpel 			       READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
1137db6b048SSridhar Samudrala #endif
1147db6b048SSridhar Samudrala }
1157db6b048SSridhar Samudrala 
116d2e64dbbSAlexander Duyck /* used in the NIC receive handler to mark the skb */
117d2e64dbbSAlexander Duyck static inline void skb_mark_napi_id(struct sk_buff *skb,
118d2e64dbbSAlexander Duyck 				    struct napi_struct *napi)
119d2e64dbbSAlexander Duyck {
120d2e64dbbSAlexander Duyck #ifdef CONFIG_NET_RX_BUSY_POLL
12178e57f15SAmritha Nambiar 	/* If the skb was already marked with a valid NAPI ID, avoid overwriting
12278e57f15SAmritha Nambiar 	 * it.
12378e57f15SAmritha Nambiar 	 */
12478e57f15SAmritha Nambiar 	if (skb->napi_id < MIN_NAPI_ID)
125d2e64dbbSAlexander Duyck 		skb->napi_id = napi->napi_id;
126d2e64dbbSAlexander Duyck #endif
127d2e64dbbSAlexander Duyck }
128d2e64dbbSAlexander Duyck 
129e68b6e50SEric Dumazet /* used in the protocol hanlder to propagate the napi_id to the socket */
130e68b6e50SEric Dumazet static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
131e68b6e50SEric Dumazet {
132e68b6e50SEric Dumazet #ifdef CONFIG_NET_RX_BUSY_POLL
133ee8d153dSEric Dumazet 	WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
134e68b6e50SEric Dumazet #endif
135c6345ce7SAmritha Nambiar 	sk_rx_queue_set(sk, skb);
136e68b6e50SEric Dumazet }
137e68b6e50SEric Dumazet 
138e68b6e50SEric Dumazet /* variant used for unconnected sockets */
139e68b6e50SEric Dumazet static inline void sk_mark_napi_id_once(struct sock *sk,
140e68b6e50SEric Dumazet 					const struct sk_buff *skb)
141e68b6e50SEric Dumazet {
142e68b6e50SEric Dumazet #ifdef CONFIG_NET_RX_BUSY_POLL
143ee8d153dSEric Dumazet 	if (!READ_ONCE(sk->sk_napi_id))
144ee8d153dSEric Dumazet 		WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
145e68b6e50SEric Dumazet #endif
146e68b6e50SEric Dumazet }
147e68b6e50SEric Dumazet 
1488b80cda5SEliezer Tamir #endif /* _LINUX_NET_BUSY_POLL_H */
149