1 /*
2 	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 	<http://rt2x00.serialmonkey.com>
4 
5 	This program is free software; you can redistribute it and/or modify
6 	it under the terms of the GNU General Public License as published by
7 	the Free Software Foundation; either version 2 of the License, or
8 	(at your option) any later version.
9 
10 	This program is distributed in the hope that it will be useful,
11 	but WITHOUT ANY WARRANTY; without even the implied warranty of
12 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 	GNU General Public License for more details.
14 
15 	You should have received a copy of the GNU General Public License
16 	along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 /*
20 	Module: rt2x00
21 	Abstract: rt2x00 queue datastructures and routines
22  */
23 
24 #ifndef RT2X00QUEUE_H
25 #define RT2X00QUEUE_H
26 
27 #include <linux/prefetch.h>
28 
29 /**
30  * DOC: Entry frame size
31  *
32  * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
33  * for USB devices this restriction does not apply, but the value of
34  * 2432 makes sense since it is big enough to contain the maximum fragment
35  * size according to the ieee802.11 specs.
36  * The aggregation size depends on support from the driver, but should
37  * be something around 3840 bytes.
38  */
39 #define DATA_FRAME_SIZE		2432
40 #define MGMT_FRAME_SIZE		256
41 #define AGGREGATION_SIZE	3840
42 
43 /**
44  * enum data_queue_qid: Queue identification
45  *
46  * @QID_AC_VO: AC VO queue
47  * @QID_AC_VI: AC VI queue
48  * @QID_AC_BE: AC BE queue
49  * @QID_AC_BK: AC BK queue
50  * @QID_HCCA: HCCA queue
51  * @QID_MGMT: MGMT queue (prio queue)
52  * @QID_RX: RX queue
53  * @QID_OTHER: None of the above (don't use, only present for completeness)
54  * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
55  * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
56  */
57 enum data_queue_qid {
58 	QID_AC_VO = 0,
59 	QID_AC_VI = 1,
60 	QID_AC_BE = 2,
61 	QID_AC_BK = 3,
62 	QID_HCCA = 4,
63 	QID_MGMT = 13,
64 	QID_RX = 14,
65 	QID_OTHER = 15,
66 	QID_BEACON,
67 	QID_ATIM,
68 };
69 
70 /**
71  * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
72  *
73  * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
74  * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
75  * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
76  *	mac80211 but was stripped for processing by the driver.
77  * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
78  *	don't try to pass it back.
79  * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
80  *	skb, instead of in the desc field.
81  */
82 enum skb_frame_desc_flags {
83 	SKBDESC_DMA_MAPPED_RX = 1 << 0,
84 	SKBDESC_DMA_MAPPED_TX = 1 << 1,
85 	SKBDESC_IV_STRIPPED = 1 << 2,
86 	SKBDESC_NOT_MAC80211 = 1 << 3,
87 	SKBDESC_DESC_IN_SKB = 1 << 4,
88 };
89 
90 /**
91  * struct skb_frame_desc: Descriptor information for the skb buffer
92  *
93  * This structure is placed over the driver_data array, this means that
94  * this structure should not exceed the size of that array (40 bytes).
95  *
96  * @flags: Frame flags, see &enum skb_frame_desc_flags.
97  * @desc_len: Length of the frame descriptor.
98  * @tx_rate_idx: the index of the TX rate, used for TX status reporting
99  * @tx_rate_flags: the TX rate flags, used for TX status reporting
100  * @desc: Pointer to descriptor part of the frame.
101  *	Note that this pointer could point to something outside
102  *	of the scope of the skb->data pointer.
103  * @iv: IV/EIV data used during encryption/decryption.
104  * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
105  * @sta: The station where sk buffer was sent.
106  */
107 struct skb_frame_desc {
108 	u8 flags;
109 
110 	u8 desc_len;
111 	u8 tx_rate_idx;
112 	u8 tx_rate_flags;
113 
114 	void *desc;
115 
116 	__le32 iv[2];
117 
118 	dma_addr_t skb_dma;
119 	struct ieee80211_sta *sta;
120 };
121 
122 /**
123  * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
124  * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
125  */
126 static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
127 {
128 	BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
129 		     IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
130 	return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
131 }
132 
133 /**
134  * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
135  *
136  * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
137  * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
138  * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
139  * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
140  * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
141  * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
142  * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
143  */
144 enum rxdone_entry_desc_flags {
145 	RXDONE_SIGNAL_PLCP = BIT(0),
146 	RXDONE_SIGNAL_BITRATE = BIT(1),
147 	RXDONE_SIGNAL_MCS = BIT(2),
148 	RXDONE_MY_BSS = BIT(3),
149 	RXDONE_CRYPTO_IV = BIT(4),
150 	RXDONE_CRYPTO_ICV = BIT(5),
151 	RXDONE_L2PAD = BIT(6),
152 };
153 
154 /**
155  * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
156  * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
157  * from &rxdone_entry_desc to a signal value type.
158  */
159 #define RXDONE_SIGNAL_MASK \
160 	( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
161 
162 /**
163  * struct rxdone_entry_desc: RX Entry descriptor
164  *
165  * Summary of information that has been read from the RX frame descriptor.
166  *
167  * @timestamp: RX Timestamp
168  * @signal: Signal of the received frame.
169  * @rssi: RSSI of the received frame.
170  * @size: Data size of the received frame.
171  * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
172  * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
173  * @rate_mode: Rate mode (See @enum rate_modulation).
174  * @cipher: Cipher type used during decryption.
175  * @cipher_status: Decryption status.
176  * @iv: IV/EIV data used during decryption.
177  * @icv: ICV data used during decryption.
178  */
179 struct rxdone_entry_desc {
180 	u64 timestamp;
181 	int signal;
182 	int rssi;
183 	int size;
184 	int flags;
185 	int dev_flags;
186 	u16 rate_mode;
187 	u16 enc_flags;
188 	enum mac80211_rx_encoding encoding;
189 	enum rate_info_bw bw;
190 	u8 cipher;
191 	u8 cipher_status;
192 
193 	__le32 iv[2];
194 	__le32 icv;
195 };
196 
197 /**
198  * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
199  *
200  * Every txdone report has to contain the basic result of the
201  * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
202  * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
203  * conjunction with all of these flags but should only be set
204  * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
205  * in conjunction with &TXDONE_FAILURE.
206  *
207  * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
208  * @TXDONE_SUCCESS: Frame was successfully send
209  * @TXDONE_FALLBACK: Hardware used fallback rates for retries
210  * @TXDONE_FAILURE: Frame was not successfully send
211  * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
212  *	frame transmission failed due to excessive retries.
213  */
214 enum txdone_entry_desc_flags {
215 	TXDONE_UNKNOWN,
216 	TXDONE_SUCCESS,
217 	TXDONE_FALLBACK,
218 	TXDONE_FAILURE,
219 	TXDONE_EXCESSIVE_RETRY,
220 	TXDONE_AMPDU,
221 	TXDONE_NO_ACK_REQ,
222 };
223 
224 /**
225  * struct txdone_entry_desc: TX done entry descriptor
226  *
227  * Summary of information that has been read from the TX frame descriptor
228  * after the device is done with transmission.
229  *
230  * @flags: TX done flags (See &enum txdone_entry_desc_flags).
231  * @retry: Retry count.
232  */
233 struct txdone_entry_desc {
234 	unsigned long flags;
235 	int retry;
236 };
237 
238 /**
239  * enum txentry_desc_flags: Status flags for TX entry descriptor
240  *
241  * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
242  * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
243  * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
244  * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
245  * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
246  * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
247  * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
248  * @ENTRY_TXD_ACK: An ACK is required for this frame.
249  * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
250  * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
251  * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
252  * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
253  * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
254  * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
255  * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
256  * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
257  * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
258  */
259 enum txentry_desc_flags {
260 	ENTRY_TXD_RTS_FRAME,
261 	ENTRY_TXD_CTS_FRAME,
262 	ENTRY_TXD_GENERATE_SEQ,
263 	ENTRY_TXD_FIRST_FRAGMENT,
264 	ENTRY_TXD_MORE_FRAG,
265 	ENTRY_TXD_REQ_TIMESTAMP,
266 	ENTRY_TXD_BURST,
267 	ENTRY_TXD_ACK,
268 	ENTRY_TXD_RETRY_MODE,
269 	ENTRY_TXD_ENCRYPT,
270 	ENTRY_TXD_ENCRYPT_PAIRWISE,
271 	ENTRY_TXD_ENCRYPT_IV,
272 	ENTRY_TXD_ENCRYPT_MMIC,
273 	ENTRY_TXD_HT_AMPDU,
274 	ENTRY_TXD_HT_BW_40,
275 	ENTRY_TXD_HT_SHORT_GI,
276 	ENTRY_TXD_HT_MIMO_PS,
277 };
278 
279 /**
280  * struct txentry_desc: TX Entry descriptor
281  *
282  * Summary of information for the frame descriptor before sending a TX frame.
283  *
284  * @flags: Descriptor flags (See &enum queue_entry_flags).
285  * @length: Length of the entire frame.
286  * @header_length: Length of 802.11 header.
287  * @length_high: PLCP length high word.
288  * @length_low: PLCP length low word.
289  * @signal: PLCP signal.
290  * @service: PLCP service.
291  * @msc: MCS.
292  * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
293  * @ba_size: Size of the recepients RX reorder buffer - 1.
294  * @rate_mode: Rate mode (See @enum rate_modulation).
295  * @mpdu_density: MDPU density.
296  * @retry_limit: Max number of retries.
297  * @ifs: IFS value.
298  * @txop: IFS value for 11n capable chips.
299  * @cipher: Cipher type used for encryption.
300  * @key_idx: Key index used for encryption.
301  * @iv_offset: Position where IV should be inserted by hardware.
302  * @iv_len: Length of IV data.
303  */
304 struct txentry_desc {
305 	unsigned long flags;
306 
307 	u16 length;
308 	u16 header_length;
309 
310 	union {
311 		struct {
312 			u16 length_high;
313 			u16 length_low;
314 			u16 signal;
315 			u16 service;
316 			enum ifs ifs;
317 		} plcp;
318 
319 		struct {
320 			u16 mcs;
321 			u8 stbc;
322 			u8 ba_size;
323 			u8 mpdu_density;
324 			enum txop txop;
325 			int wcid;
326 		} ht;
327 	} u;
328 
329 	enum rate_modulation rate_mode;
330 
331 	short retry_limit;
332 
333 	enum cipher cipher;
334 	u16 key_idx;
335 	u16 iv_offset;
336 	u16 iv_len;
337 };
338 
339 /**
340  * enum queue_entry_flags: Status flags for queue entry
341  *
342  * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
343  *	As long as this bit is set, this entry may only be touched
344  *	through the interface structure.
345  * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
346  *	transfer (either TX or RX depending on the queue). The entry should
347  *	only be touched after the device has signaled it is done with it.
348  * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
349  *	for the signal to start sending.
350  * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
351  *	while transferring the data to the hardware. No TX status report will
352  *	be expected from the hardware.
353  * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
354  *	returned. It is now waiting for the status reporting before the
355  *	entry can be reused again.
356  */
357 enum queue_entry_flags {
358 	ENTRY_BCN_ASSIGNED,
359 	ENTRY_BCN_ENABLED,
360 	ENTRY_OWNER_DEVICE_DATA,
361 	ENTRY_DATA_PENDING,
362 	ENTRY_DATA_IO_FAILED,
363 	ENTRY_DATA_STATUS_PENDING,
364 };
365 
366 /**
367  * struct queue_entry: Entry inside the &struct data_queue
368  *
369  * @flags: Entry flags, see &enum queue_entry_flags.
370  * @last_action: Timestamp of last change.
371  * @queue: The data queue (&struct data_queue) to which this entry belongs.
372  * @skb: The buffer which is currently being transmitted (for TX queue),
373  *	or used to directly receive data in (for RX queue).
374  * @entry_idx: The entry index number.
375  * @priv_data: Private data belonging to this queue entry. The pointer
376  *	points to data specific to a particular driver and queue type.
377  * @status: Device specific status
378  */
379 struct queue_entry {
380 	unsigned long flags;
381 	unsigned long last_action;
382 
383 	struct data_queue *queue;
384 
385 	struct sk_buff *skb;
386 
387 	unsigned int entry_idx;
388 
389 	void *priv_data;
390 };
391 
392 /**
393  * enum queue_index: Queue index type
394  *
395  * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
396  *	owned by the hardware then the queue is considered to be full.
397  * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
398  *	transferred to the hardware.
399  * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
400  *	the hardware and for which we need to run the txdone handler. If this
401  *	entry is not owned by the hardware the queue is considered to be empty.
402  * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
403  *	of the index array.
404  */
405 enum queue_index {
406 	Q_INDEX,
407 	Q_INDEX_DMA_DONE,
408 	Q_INDEX_DONE,
409 	Q_INDEX_MAX,
410 };
411 
412 /**
413  * enum data_queue_flags: Status flags for data queues
414  *
415  * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
416  *	device might be DMA'ing skbuffers. TX queues will accept skbuffers to
417  *	be transmitted and beacon queues will start beaconing the configured
418  *	beacons.
419  * @QUEUE_PAUSED: The queue has been started but is currently paused.
420  *	When this bit is set, the queue has been stopped in mac80211,
421  *	preventing new frames to be enqueued. However, a few frames
422  *	might still appear shortly after the pausing...
423  */
424 enum data_queue_flags {
425 	QUEUE_STARTED,
426 	QUEUE_PAUSED,
427 };
428 
429 /**
430  * struct data_queue: Data queue
431  *
432  * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
433  * @entries: Base address of the &struct queue_entry which are
434  *	part of this queue.
435  * @qid: The queue identification, see &enum data_queue_qid.
436  * @flags: Entry flags, see &enum queue_entry_flags.
437  * @status_lock: The mutex for protecting the start/stop/flush
438  *	handling on this queue.
439  * @tx_lock: Spinlock to serialize tx operations on this queue.
440  * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
441  *	@index_crypt needs to be changed this lock should be grabbed to prevent
442  *	index corruption due to concurrency.
443  * @count: Number of frames handled in the queue.
444  * @limit: Maximum number of entries in the queue.
445  * @threshold: Minimum number of free entries before queue is kicked by force.
446  * @length: Number of frames in queue.
447  * @index: Index pointers to entry positions in the queue,
448  *	use &enum queue_index to get a specific index field.
449  * @txop: maximum burst time.
450  * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
451  * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
452  * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
453  * @data_size: Maximum data size for the frames in this queue.
454  * @desc_size: Hardware descriptor size for the data in this queue.
455  * @priv_size: Size of per-queue_entry private data.
456  * @usb_endpoint: Device endpoint used for communication (USB only)
457  * @usb_maxpacket: Max packet size for given endpoint (USB only)
458  */
459 struct data_queue {
460 	struct rt2x00_dev *rt2x00dev;
461 	struct queue_entry *entries;
462 
463 	enum data_queue_qid qid;
464 	unsigned long flags;
465 
466 	struct mutex status_lock;
467 	spinlock_t tx_lock;
468 	spinlock_t index_lock;
469 
470 	unsigned int count;
471 	unsigned short limit;
472 	unsigned short threshold;
473 	unsigned short length;
474 	unsigned short index[Q_INDEX_MAX];
475 
476 	unsigned short txop;
477 	unsigned short aifs;
478 	unsigned short cw_min;
479 	unsigned short cw_max;
480 
481 	unsigned short data_size;
482 	unsigned char  desc_size;
483 	unsigned char  winfo_size;
484 	unsigned short priv_size;
485 
486 	unsigned short usb_endpoint;
487 	unsigned short usb_maxpacket;
488 };
489 
490 /**
491  * queue_end - Return pointer to the last queue (HELPER MACRO).
492  * @__dev: Pointer to &struct rt2x00_dev
493  *
494  * Using the base rx pointer and the maximum number of available queues,
495  * this macro will return the address of 1 position beyond  the end of the
496  * queues array.
497  */
498 #define queue_end(__dev) \
499 	&(__dev)->rx[(__dev)->data_queues]
500 
501 /**
502  * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
503  * @__dev: Pointer to &struct rt2x00_dev
504  *
505  * Using the base tx pointer and the maximum number of available TX
506  * queues, this macro will return the address of 1 position beyond
507  * the end of the TX queue array.
508  */
509 #define tx_queue_end(__dev) \
510 	&(__dev)->tx[(__dev)->ops->tx_queues]
511 
512 /**
513  * queue_next - Return pointer to next queue in list (HELPER MACRO).
514  * @__queue: Current queue for which we need the next queue
515  *
516  * Using the current queue address we take the address directly
517  * after the queue to take the next queue. Note that this macro
518  * should be used carefully since it does not protect against
519  * moving past the end of the list. (See macros &queue_end and
520  * &tx_queue_end for determining the end of the queue).
521  */
522 #define queue_next(__queue) \
523 	&(__queue)[1]
524 
525 /**
526  * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
527  * @__entry: Pointer where the current queue entry will be stored in.
528  * @__start: Start queue pointer.
529  * @__end: End queue pointer.
530  *
531  * This macro will loop through all queues between &__start and &__end.
532  */
533 #define queue_loop(__entry, __start, __end)			\
534 	for ((__entry) = (__start);				\
535 	     prefetch(queue_next(__entry)), (__entry) != (__end);\
536 	     (__entry) = queue_next(__entry))
537 
538 /**
539  * queue_for_each - Loop through all queues
540  * @__dev: Pointer to &struct rt2x00_dev
541  * @__entry: Pointer where the current queue entry will be stored in.
542  *
543  * This macro will loop through all available queues.
544  */
545 #define queue_for_each(__dev, __entry) \
546 	queue_loop(__entry, (__dev)->rx, queue_end(__dev))
547 
548 /**
549  * tx_queue_for_each - Loop through the TX queues
550  * @__dev: Pointer to &struct rt2x00_dev
551  * @__entry: Pointer where the current queue entry will be stored in.
552  *
553  * This macro will loop through all TX related queues excluding
554  * the Beacon and Atim queues.
555  */
556 #define tx_queue_for_each(__dev, __entry) \
557 	queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
558 
559 /**
560  * txall_queue_for_each - Loop through all TX related queues
561  * @__dev: Pointer to &struct rt2x00_dev
562  * @__entry: Pointer where the current queue entry will be stored in.
563  *
564  * This macro will loop through all TX related queues including
565  * the Beacon and Atim queues.
566  */
567 #define txall_queue_for_each(__dev, __entry) \
568 	queue_loop(__entry, (__dev)->tx, queue_end(__dev))
569 
570 /**
571  * rt2x00queue_for_each_entry - Loop through all entries in the queue
572  * @queue: Pointer to @data_queue
573  * @start: &enum queue_index Pointer to start index
574  * @end: &enum queue_index Pointer to end index
575  * @data: Data to pass to the callback function
576  * @fn: The function to call for each &struct queue_entry
577  *
578  * This will walk through all entries in the queue, in chronological
579  * order. This means it will start at the current @start pointer
580  * and will walk through the queue until it reaches the @end pointer.
581  *
582  * If fn returns true for an entry rt2x00queue_for_each_entry will stop
583  * processing and return true as well.
584  */
585 bool rt2x00queue_for_each_entry(struct data_queue *queue,
586 				enum queue_index start,
587 				enum queue_index end,
588 				void *data,
589 				bool (*fn)(struct queue_entry *entry,
590 					   void *data));
591 
592 /**
593  * rt2x00queue_empty - Check if the queue is empty.
594  * @queue: Queue to check if empty.
595  */
596 static inline int rt2x00queue_empty(struct data_queue *queue)
597 {
598 	return queue->length == 0;
599 }
600 
601 /**
602  * rt2x00queue_full - Check if the queue is full.
603  * @queue: Queue to check if full.
604  */
605 static inline int rt2x00queue_full(struct data_queue *queue)
606 {
607 	return queue->length == queue->limit;
608 }
609 
610 /**
611  * rt2x00queue_free - Check the number of available entries in queue.
612  * @queue: Queue to check.
613  */
614 static inline int rt2x00queue_available(struct data_queue *queue)
615 {
616 	return queue->limit - queue->length;
617 }
618 
619 /**
620  * rt2x00queue_threshold - Check if the queue is below threshold
621  * @queue: Queue to check.
622  */
623 static inline int rt2x00queue_threshold(struct data_queue *queue)
624 {
625 	return rt2x00queue_available(queue) < queue->threshold;
626 }
627 /**
628  * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
629  * @entry: Queue entry to check.
630  */
631 static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
632 {
633 	if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
634 		return false;
635 	return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
636 }
637 
638 /**
639  * _rt2x00_desc_read - Read a word from the hardware descriptor.
640  * @desc: Base descriptor address
641  * @word: Word index from where the descriptor should be read.
642  */
643 static inline __le32 _rt2x00_desc_read(__le32 *desc, const u8 word)
644 {
645 	return desc[word];
646 }
647 
648 /**
649  * rt2x00_desc_read - Read a word from the hardware descriptor, this
650  * function will take care of the byte ordering.
651  * @desc: Base descriptor address
652  * @word: Word index from where the descriptor should be read.
653  */
654 static inline u32 rt2x00_desc_read(__le32 *desc, const u8 word)
655 {
656 	return le32_to_cpu(_rt2x00_desc_read(desc, word));
657 }
658 
659 /**
660  * rt2x00_desc_write - write a word to the hardware descriptor, this
661  * function will take care of the byte ordering.
662  * @desc: Base descriptor address
663  * @word: Word index from where the descriptor should be written.
664  * @value: Value that should be written into the descriptor.
665  */
666 static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
667 {
668 	desc[word] = value;
669 }
670 
671 /**
672  * rt2x00_desc_write - write a word to the hardware descriptor.
673  * @desc: Base descriptor address
674  * @word: Word index from where the descriptor should be written.
675  * @value: Value that should be written into the descriptor.
676  */
677 static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
678 {
679 	_rt2x00_desc_write(desc, word, cpu_to_le32(value));
680 }
681 
682 #endif /* RT2X00QUEUE_H */
683