xref: /openbmc/linux/include/net/netdev_queues.h (revision 1188f7f111c61394ec56beb8e30322305a8220b6)
1c91c46deSJakub Kicinski /* SPDX-License-Identifier: GPL-2.0 */
2c91c46deSJakub Kicinski #ifndef _LINUX_NET_QUEUES_H
3c91c46deSJakub Kicinski #define _LINUX_NET_QUEUES_H
4c91c46deSJakub Kicinski 
5c91c46deSJakub Kicinski #include <linux/netdevice.h>
6c91c46deSJakub Kicinski 
7c91c46deSJakub Kicinski /**
8c91c46deSJakub Kicinski  * DOC: Lockless queue stopping / waking helpers.
9c91c46deSJakub Kicinski  *
10c91c46deSJakub Kicinski  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
11c91c46deSJakub Kicinski  * macros are designed to safely implement stopping
12c91c46deSJakub Kicinski  * and waking netdev queues without full lock protection.
13c91c46deSJakub Kicinski  *
14c91c46deSJakub Kicinski  * We assume that there can be no concurrent stop attempts and no concurrent
15c91c46deSJakub Kicinski  * wake attempts. The try-stop should happen from the xmit handler,
16c91c46deSJakub Kicinski  * while wake up should be triggered from NAPI poll context.
17c91c46deSJakub Kicinski  * The two may run concurrently (single producer, single consumer).
18c91c46deSJakub Kicinski  *
19c91c46deSJakub Kicinski  * The try-stop side is expected to run from the xmit handler and therefore
20c91c46deSJakub Kicinski  * it does not reschedule Tx (netif_tx_start_queue() instead of
21c91c46deSJakub Kicinski  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
22c91c46deSJakub Kicinski  * handler may lead to xmit queue being enabled but not run.
23c91c46deSJakub Kicinski  * The waking side does not have similar context restrictions.
24c91c46deSJakub Kicinski  *
25c91c46deSJakub Kicinski  * The macros guarantee that rings will not remain stopped if there's
26c91c46deSJakub Kicinski  * space available, but they do *not* prevent false wake ups when
27c91c46deSJakub Kicinski  * the ring is full! Drivers should check for ring full at the start
28c91c46deSJakub Kicinski  * for the xmit handler.
29c91c46deSJakub Kicinski  *
30c91c46deSJakub Kicinski  * All descriptor ring indexes (and other relevant shared state) must
31c91c46deSJakub Kicinski  * be updated before invoking the macros.
32c91c46deSJakub Kicinski  */
33c91c46deSJakub Kicinski 
34c91c46deSJakub Kicinski #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
35c91c46deSJakub Kicinski 	({								\
36c91c46deSJakub Kicinski 		int _res;						\
37c91c46deSJakub Kicinski 									\
38c91c46deSJakub Kicinski 		netif_tx_stop_queue(txq);				\
39c91c46deSJakub Kicinski 		/* Producer index and stop bit must be visible		\
40c91c46deSJakub Kicinski 		 * to consumer before we recheck.			\
41301f227fSJakub Kicinski 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
42c91c46deSJakub Kicinski 		 */							\
43c91c46deSJakub Kicinski 		smp_mb__after_atomic();					\
44c91c46deSJakub Kicinski 									\
45c91c46deSJakub Kicinski 		/* We need to check again in a case another		\
46c91c46deSJakub Kicinski 		 * CPU has just made room available.			\
47c91c46deSJakub Kicinski 		 */							\
48c91c46deSJakub Kicinski 		_res = 0;						\
49c91c46deSJakub Kicinski 		if (unlikely(get_desc >= start_thrs)) {			\
50c91c46deSJakub Kicinski 			netif_tx_start_queue(txq);			\
51c91c46deSJakub Kicinski 			_res = -1;					\
52c91c46deSJakub Kicinski 		}							\
53c91c46deSJakub Kicinski 		_res;							\
54c91c46deSJakub Kicinski 	})								\
55c91c46deSJakub Kicinski 
56c91c46deSJakub Kicinski /**
57c91c46deSJakub Kicinski  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
58c91c46deSJakub Kicinski  * @txq:	struct netdev_queue to stop/start
59c91c46deSJakub Kicinski  * @get_desc:	get current number of free descriptors (see requirements below!)
60c91c46deSJakub Kicinski  * @stop_thrs:	minimal number of available descriptors for queue to be left
61c91c46deSJakub Kicinski  *		enabled
62c91c46deSJakub Kicinski  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
63c91c46deSJakub Kicinski  *		equal to @stop_thrs or higher to avoid frequent waking
64c91c46deSJakub Kicinski  *
65c91c46deSJakub Kicinski  * All arguments may be evaluated multiple times, beware of side effects.
66c91c46deSJakub Kicinski  * @get_desc must be a formula or a function call, it must always
67c91c46deSJakub Kicinski  * return up-to-date information when evaluated!
68c91c46deSJakub Kicinski  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
69c91c46deSJakub Kicinski  *
70c91c46deSJakub Kicinski  * Returns:
71c91c46deSJakub Kicinski  *	 0 if the queue was stopped
72c91c46deSJakub Kicinski  *	 1 if the queue was left enabled
73c91c46deSJakub Kicinski  *	-1 if the queue was re-enabled (raced with waking)
74c91c46deSJakub Kicinski  */
75c91c46deSJakub Kicinski #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
76c91c46deSJakub Kicinski 	({								\
77c91c46deSJakub Kicinski 		int _res;						\
78c91c46deSJakub Kicinski 									\
79c91c46deSJakub Kicinski 		_res = 1;						\
80c91c46deSJakub Kicinski 		if (unlikely(get_desc < stop_thrs))			\
81c91c46deSJakub Kicinski 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
82c91c46deSJakub Kicinski 		_res;							\
83c91c46deSJakub Kicinski 	})								\
84c91c46deSJakub Kicinski 
85301f227fSJakub Kicinski /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
86301f227fSJakub Kicinski  * @bytes != 0, regardless of kernel config.
87301f227fSJakub Kicinski  */
88301f227fSJakub Kicinski static inline void
netdev_txq_completed_mb(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)89301f227fSJakub Kicinski netdev_txq_completed_mb(struct netdev_queue *dev_queue,
90301f227fSJakub Kicinski 			unsigned int pkts, unsigned int bytes)
91301f227fSJakub Kicinski {
92301f227fSJakub Kicinski 	if (IS_ENABLED(CONFIG_BQL))
93301f227fSJakub Kicinski 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
94301f227fSJakub Kicinski 	else if (bytes)
95301f227fSJakub Kicinski 		smp_mb();
96301f227fSJakub Kicinski }
97c91c46deSJakub Kicinski 
98c91c46deSJakub Kicinski /**
99301f227fSJakub Kicinski  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
100c91c46deSJakub Kicinski  * @txq:	struct netdev_queue to stop/start
101301f227fSJakub Kicinski  * @pkts:	number of packets completed
102301f227fSJakub Kicinski  * @bytes:	number of bytes completed
103c91c46deSJakub Kicinski  * @get_desc:	get current number of free descriptors (see requirements below!)
104c91c46deSJakub Kicinski  * @start_thrs:	minimal number of descriptors to re-enable the queue
105c91c46deSJakub Kicinski  * @down_cond:	down condition, predicate indicating that the queue should
106c91c46deSJakub Kicinski  *		not be woken up even if descriptors are available
107c91c46deSJakub Kicinski  *
108c91c46deSJakub Kicinski  * All arguments may be evaluated multiple times.
109c91c46deSJakub Kicinski  * @get_desc must be a formula or a function call, it must always
110c91c46deSJakub Kicinski  * return up-to-date information when evaluated!
111301f227fSJakub Kicinski  * Reports completed pkts/bytes to BQL.
112c91c46deSJakub Kicinski  *
113c91c46deSJakub Kicinski  * Returns:
114c91c46deSJakub Kicinski  *	 0 if the queue was woken up
115c91c46deSJakub Kicinski  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
116c91c46deSJakub Kicinski  *	-1 if the queue was left unchanged (@start_thrs not reached)
117c91c46deSJakub Kicinski  */
118301f227fSJakub Kicinski #define __netif_txq_completed_wake(txq, pkts, bytes,			\
119301f227fSJakub Kicinski 				   get_desc, start_thrs, down_cond)	\
120c91c46deSJakub Kicinski 	({								\
121c91c46deSJakub Kicinski 		int _res;						\
122c91c46deSJakub Kicinski 									\
123301f227fSJakub Kicinski 		/* Report to BQL and piggy back on its barrier.		\
124301f227fSJakub Kicinski 		 * Barrier makes sure that anybody stopping the queue	\
125301f227fSJakub Kicinski 		 * after this point sees the new consumer index.	\
126301f227fSJakub Kicinski 		 * Pairs with barrier in netif_txq_try_stop().		\
127c91c46deSJakub Kicinski 		 */							\
128301f227fSJakub Kicinski 		netdev_txq_completed_mb(txq, pkts, bytes);		\
129301f227fSJakub Kicinski 									\
130301f227fSJakub Kicinski 		_res = -1;						\
131*9eb60885SMarc Kleine-Budde 		if (pkts && likely(get_desc >= start_thrs)) {		\
132c91c46deSJakub Kicinski 			_res = 1;					\
133c91c46deSJakub Kicinski 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
134c91c46deSJakub Kicinski 			    !(down_cond)) {				\
135c91c46deSJakub Kicinski 				netif_tx_wake_queue(txq);		\
136c91c46deSJakub Kicinski 				_res = 0;				\
137c91c46deSJakub Kicinski 			}						\
138c91c46deSJakub Kicinski 		}							\
139c91c46deSJakub Kicinski 		_res;							\
140c91c46deSJakub Kicinski 	})
141c91c46deSJakub Kicinski 
142301f227fSJakub Kicinski #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
143301f227fSJakub Kicinski 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
144c91c46deSJakub Kicinski 
145c91c46deSJakub Kicinski /* subqueue variants follow */
146c91c46deSJakub Kicinski 
147c91c46deSJakub Kicinski #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
148c91c46deSJakub Kicinski 	({								\
149c91c46deSJakub Kicinski 		struct netdev_queue *txq;				\
150c91c46deSJakub Kicinski 									\
151c91c46deSJakub Kicinski 		txq = netdev_get_tx_queue(dev, idx);			\
152c91c46deSJakub Kicinski 		netif_txq_try_stop(txq, get_desc, start_thrs);		\
153c91c46deSJakub Kicinski 	})
154c91c46deSJakub Kicinski 
155c91c46deSJakub Kicinski #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
156c91c46deSJakub Kicinski 	({								\
157c91c46deSJakub Kicinski 		struct netdev_queue *txq;				\
158c91c46deSJakub Kicinski 									\
159c91c46deSJakub Kicinski 		txq = netdev_get_tx_queue(dev, idx);			\
160c91c46deSJakub Kicinski 		netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
161c91c46deSJakub Kicinski 	})
162c91c46deSJakub Kicinski 
163cb18e559SHeiner Kallweit #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
164cb18e559SHeiner Kallweit 				      get_desc, start_thrs)		\
165cb18e559SHeiner Kallweit 	({								\
166cb18e559SHeiner Kallweit 		struct netdev_queue *txq;				\
167cb18e559SHeiner Kallweit 									\
168cb18e559SHeiner Kallweit 		txq = netdev_get_tx_queue(dev, idx);			\
169cb18e559SHeiner Kallweit 		netif_txq_completed_wake(txq, pkts, bytes,		\
170cb18e559SHeiner Kallweit 					 get_desc, start_thrs);		\
171cb18e559SHeiner Kallweit 	})
172cb18e559SHeiner Kallweit 
173c91c46deSJakub Kicinski #endif
174