1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
4  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5  * Copyright (C) 2015-2016 Intel Deutschland GmbH
6  */
7 #ifndef __sta_h__
8 #define __sta_h__
9 
10 #include <linux/spinlock.h>
11 #include <net/mac80211.h>
12 #include <linux/wait.h>
13 
14 #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
15 #include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */
16 #include "rs.h"
17 
18 struct iwl_mvm;
19 struct iwl_mvm_vif;
20 
21 /**
22  * DOC: DQA - Dynamic Queue Allocation -introduction
23  *
24  * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
25  * driver to allow dynamic allocation of queues on-demand, rather than allocate
26  * them statically ahead of time. Ideally, we would like to allocate one queue
27  * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
28  * even if it also needs to send traffic to a sleeping STA1, without being
29  * blocked by the sleeping station.
30  *
31  * Although the queues in DQA mode are dynamically allocated, there are still
32  * some queues that are statically allocated:
33  *	TXQ #0 - command queue
34  *	TXQ #1 - aux frames
35  *	TXQ #2 - P2P device frames
36  *	TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
37  *	TXQ #4 - BSS DATA frames queue
38  *	TXQ #5-8 - Non-QoS and MGMT frames queue pool
39  *	TXQ #9 - P2P GO/SoftAP probe responses
40  *	TXQ #10-31 - DATA frames queue pool
41  * The queues are dynamically taken from either the MGMT frames queue pool or
42  * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
43  * queue.
44  *
45  * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
46  * until a queue is allocated for it, and only then can be TXed. Therefore, it
47  * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
48  * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
49  *
50  * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
51  * queues in the pool. If there is no longer a free MGMT queue to allocate, a
52  * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
53  * a problem for aggregations, they too will use a MGMT queue.
54  *
55  * When adding a STA, a DATA queue is reserved for it so that it can TX from
56  * it. If no such free queue exists for reserving, the STA addition will fail.
57  *
58  * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
59  * new RA/TID comes in for an existing STA, one of the STA's queues will become
60  * shared and will serve more than the single TID (but always for the same RA!).
61  *
62  * When a RA/TID needs to become aggregated, no new queue is required to be
63  * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
64  * however, that a shared queue cannot be aggregated, and only after the other
65  * TIDs become inactive and are removed - only then can the queue be
66  * reconfigured and become aggregated.
67  *
68  * When removing a station, its queues are returned to the pool for reuse. Here
69  * we also need to make sure that we are synced with the worker thread that TXes
70  * the deferred frames so we don't get into a situation where the queues are
71  * removed and then the worker puts deferred frames onto the released queues or
72  * tries to allocate new queues for a STA we don't need anymore.
73  */
74 
75 /**
76  * DOC: station table - introduction
77  *
78  * The station table is a list of data structure that reprensent the stations.
79  * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
80  * In GO/AP mode, the driver will have as many stations as associated clients.
81  * All these stations are reflected in the fw's station table. The driver
82  * keeps the fw's station table up to date with the ADD_STA command. Stations
83  * can be removed by the REMOVE_STA command.
84  *
85  * All the data related to a station is held in the structure %iwl_mvm_sta
86  * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
87  * This data includes the index of the station in the fw, per tid information
88  * (sequence numbers, Block-ack state machine, etc...). The stations are
89  * created and deleted by the %sta_state callback from %ieee80211_ops.
90  *
91  * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
92  * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
93  * station index. That way, the driver is able to get the tid related data in
94  * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
95  * paths are triggered by the fw, and the driver needs to get a pointer to the
96  * %ieee80211 structure. This map helps to get that pointer quickly.
97  */
98 
99 /**
100  * DOC: station table - locking
101  *
102  * As stated before, the station is created / deleted by mac80211's %sta_state
103  * callback from %ieee80211_ops which can sleep. The next paragraph explains
104  * the locking of a single stations, the next ones relates to the station
105  * table.
106  *
107  * The station holds the sequence number per tid. So this data needs to be
108  * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
109  * information (the state machine / and the logic that checks if the queues
110  * were drained), so it also needs to be accessible from the Tx response flow.
111  * In short, the station needs to be access from sleepable context as well as
112  * from tasklets, so the station itself needs a spinlock.
113  *
114  * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
115  * the mvm op_mode. This is possible since %sta_state can sleep.
116  * The pointers in this map are RCU protected, hence we won't replace the
117  * station while we have Tx / Tx response / BA notification running.
118  *
119  * If a station is deleted while it still has packets in its A-MPDU queues,
120  * then the reclaim flow will notice that there is no station in the map for
121  * sta_id and it will dump the responses.
122  */
123 
124 /**
125  * DOC: station table - internal stations
126  *
127  * The FW needs a few internal stations that are not reflected in
128  * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
129  * scanning and P2P device (during the GO negotiation).
130  * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
131  * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
132  * Usually the data for these stations is static, so no locking is required,
133  * and no TID data as this is also not needed.
134  * One thing to note, is that these stations have an ID in the fw, but not
135  * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
136  * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
137  * pointers from this mapping need to check that the value is not error
138  * or NULL.
139  *
140  * Currently there is only one auxiliary station for scanning, initialized
141  * on init.
142  */
143 
144 /**
145  * DOC: station table - AP Station in STA mode
146  *
147  * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
148  * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
149  * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
150  * the AP station from the fw before setting the MAC context as unassociated.
151  * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
152  * removed by mac80211, but the station won't be removed in the fw until the
153  * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
154  */
155 
156 /**
157  * DOC: station table - Drain vs. Flush
158  *
159  * Flush means that all the frames in the SCD queue are dumped regardless the
160  * station to which they were sent. We do that when we disassociate and before
161  * we remove the STA of the AP. The flush can be done synchronously against the
162  * fw.
163  * Drain means that the fw will drop all the frames sent to a specific station.
164  * This is useful when a client (if we are IBSS / GO or AP) disassociates.
165  */
166 
167 /**
168  * DOC: station table - fw restart
169  *
170  * When the fw asserts, or we have any other issue that requires to reset the
171  * driver, we require mac80211 to reconfigure the driver. Since the private
172  * data of the stations is embed in mac80211's %ieee80211_sta, that data will
173  * not be zeroed and needs to be reinitialized manually.
174  * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
175  * that we must not allocate a new sta_id but reuse the previous one. This
176  * means that the stations being re-added after the reset will have the same
177  * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
178  * map, since the stations aren't in the fw any more. Internal stations that
179  * are not added by mac80211 will be re-added in the init flow that is called
180  * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
181  * %iwl_mvm_up.
182  */
183 
184 /**
185  * DOC: AP mode - PS
186  *
187  * When a station is asleep, the fw will set it as "asleep". All frames on
188  * shared queues (i.e. non-aggregation queues) to that station will be dropped
189  * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
190  *
191  * AMPDUs are in a separate queue that is stopped by the fw. We just need to
192  * let mac80211 know when there are frames in these queues so that it can
193  * properly handle trigger frames.
194  *
195  * When a trigger frame is received, mac80211 tells the driver to send frames
196  * from the AMPDU queues or sends frames to non-aggregation queues itself,
197  * depending on which ACs are delivery-enabled and what TID has frames to
198  * transmit. Note that mac80211 has all the knowledge since all the non-agg
199  * frames are buffered / filtered, and the driver tells mac80211 about agg
200  * frames). The driver needs to tell the fw to let frames out even if the
201  * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
202  *
203  * When we receive a frame from that station with PM bit unset, the driver
204  * needs to let the fw know that this station isn't asleep any more. This is
205  * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
206  * station's wakeup.
207  *
208  * For a GO, the Service Period might be cut short due to an absence period
209  * of the GO. In this (and all other cases) the firmware notifies us with the
210  * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
211  * already sent to the device will be rejected again.
212  *
213  * See also "AP support for powersaving clients" in mac80211.h.
214  */
215 
216 /**
217  * enum iwl_mvm_agg_state
218  *
219  * The state machine of the BA agreement establishment / tear down.
220  * These states relate to a specific RA / TID.
221  *
222  * @IWL_AGG_OFF: aggregation is not used
223  * @IWL_AGG_QUEUED: aggregation start work has been queued
224  * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
225  * @IWL_AGG_ON: aggregation session is up
226  * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
227  *	HW queue to be empty from packets for this RA /TID.
228  * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
229  *	HW queue to be empty from packets for this RA /TID.
230  */
231 enum iwl_mvm_agg_state {
232 	IWL_AGG_OFF = 0,
233 	IWL_AGG_QUEUED,
234 	IWL_AGG_STARTING,
235 	IWL_AGG_ON,
236 	IWL_EMPTYING_HW_QUEUE_ADDBA,
237 	IWL_EMPTYING_HW_QUEUE_DELBA,
238 };
239 
240 /**
241  * struct iwl_mvm_tid_data - holds the states for each RA / TID
242  * @seq_number: the next WiFi sequence number to use
243  * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
244  *	This is basically (last acked packet++).
245  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
246  *	Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
247  * @lq_color: the color of the LQ command as it appears in tx response.
248  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
249  * @state: state of the BA agreement establishment / tear down.
250  * @txq_id: Tx queue used by the BA session / DQA
251  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
252  *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
253  *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
254  *	we are ready to finish the Tx AGG stop / start flow.
255  * @tx_time: medium time consumed by this A-MPDU
256  * @tpt_meas_start: time of the throughput measurements start, is reset every HZ
257  * @tx_count_last: number of frames transmitted during the last second
258  * @tx_count: counts the number of frames transmitted since the last reset of
259  *	 tpt_meas_start
260  */
261 struct iwl_mvm_tid_data {
262 	u16 seq_number;
263 	u16 next_reclaimed;
264 	/* The rest is Tx AGG related */
265 	u32 rate_n_flags;
266 	u8 lq_color;
267 	bool amsdu_in_ampdu_allowed;
268 	enum iwl_mvm_agg_state state;
269 	u16 txq_id;
270 	u16 ssn;
271 	u16 tx_time;
272 	unsigned long tpt_meas_start;
273 	u32 tx_count_last;
274 	u32 tx_count;
275 };
276 
277 struct iwl_mvm_key_pn {
278 	struct rcu_head rcu_head;
279 	struct {
280 		u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
281 	} ____cacheline_aligned_in_smp q[];
282 };
283 
284 /**
285  * enum iwl_mvm_rxq_notif_type - Internal message identifier
286  *
287  * @IWL_MVM_RXQ_EMPTY: empty sync notification
288  * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
289  * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
290  */
291 enum iwl_mvm_rxq_notif_type {
292 	IWL_MVM_RXQ_EMPTY,
293 	IWL_MVM_RXQ_NOTIF_DEL_BA,
294 	IWL_MVM_RXQ_NSSN_SYNC,
295 };
296 
297 /**
298  * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
299  * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
300  * FW is agnostic to the payload, so there are no endianity requirements.
301  *
302  * @type: value from &iwl_mvm_rxq_notif_type
303  * @sync: ctrl path is waiting for all notifications to be received
304  * @cookie: internal cookie to identify old notifications
305  * @data: payload
306  */
307 struct iwl_mvm_internal_rxq_notif {
308 	u16 type;
309 	u16 sync;
310 	u32 cookie;
311 	u8 data[];
312 } __packed;
313 
314 struct iwl_mvm_delba_data {
315 	u32 baid;
316 } __packed;
317 
318 struct iwl_mvm_nssn_sync_data {
319 	u32 baid;
320 	u32 nssn;
321 } __packed;
322 
323 /**
324  * struct iwl_mvm_rxq_dup_data - per station per rx queue data
325  * @last_seq: last sequence per tid for duplicate packet detection
326  * @last_sub_frame: last subframe packet
327  */
328 struct iwl_mvm_rxq_dup_data {
329 	__le16 last_seq[IWL_MAX_TID_COUNT + 1];
330 	u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
331 } ____cacheline_aligned_in_smp;
332 
333 /**
334  * struct iwl_mvm_link_sta - link specific parameters of a station
335  * @rcu_head: used for freeing the data
336  * @sta_id: the index of the station in the fw
337  * @lq_sta: holds rate scaling data, either for the case when RS is done in
338  *	the driver - %rs_drv or in the FW - %rs_fw.
339  * @orig_amsdu_len: used to save the original amsdu_len when it is changed via
340  *      debugfs.  If it's set to 0, it means that it is it's not set via
341  *      debugfs.
342  * @avg_energy: energy as reported by FW statistics notification
343  */
344 struct iwl_mvm_link_sta {
345 	struct rcu_head rcu_head;
346 	u32 sta_id;
347 	union {
348 		struct iwl_lq_sta_rs_fw rs_fw;
349 		struct iwl_lq_sta rs_drv;
350 	} lq_sta;
351 
352 	u16 orig_amsdu_len;
353 
354 	u8 avg_energy;
355 };
356 
357 /**
358  * struct iwl_mvm_sta - representation of a station in the driver
359  * @tfd_queue_msk: the tfd queues used by the station
360  * @mac_id_n_color: the MAC context this station is linked to
361  * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
362  *	tid.
363  * @sta_type: station type
364  * @authorized: indicates station is authorized
365  * @sta_state: station state according to enum %ieee80211_sta_state
366  * @bt_reduced_txpower: is reduced tx power enabled for this station
367  * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
368  *	we need to signal the EOSP
369  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
370  * and from Tx response flow, it needs a spinlock.
371  * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
372  * @tid_to_baid: a simple map of TID to baid
373  * @vif: a vif pointer
374  * @reserved_queue: the queue reserved for this STA for DQA purposes
375  *	Every STA has is given one reserved queue to allow it to operate. If no
376  *	such queue can be guaranteed, the STA addition will fail.
377  * @tx_protection: reference counter for controlling the Tx protection.
378  * @tt_tx_protection: is thermal throttling enable Tx protection?
379  * @disable_tx: is tx to this STA disabled?
380  * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
381  *	In case TLC offload is not active it is either 0xFFFF or 0.
382  * @max_amsdu_len: max AMSDU length
383  * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
384  * @sleeping: sta sleep transitions in power management
385  * @sleep_tx_count: the number of frames that we told the firmware to let out
386  *	even when that station is asleep. This is useful in case the queue
387  *	gets empty before all the frames were sent, which can happen when
388  *	we are sending frames from an AMPDU queue and there was a hole in
389  *	the BA window. To be used for UAPSD only.
390  * @ptk_pn: per-queue PTK PN data structures
391  * @dup_data: per queue duplicate packet detection data
392  * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
393  * @tx_ant: the index of the antenna to use for data tx to this station. Only
394  *	used during connection establishment (e.g. for the 4 way handshake
395  *	exchange).
396  * @pairwise_cipher: used to feed iwlmei upon authorization
397  * @deflink: the default link station, for non-MLO STA, all link specific data
398  *	is accessed via deflink (or link[0]). For MLO, it will hold data of the
399  *	first added link STA.
400  * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
401  *	link[0] points to deflink and link[link_id] is allocated when new link
402  *	sta is added.
403  *
404  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
405  * in the structure for use by driver. This structure is placed in that
406  * space.
407  *
408  */
409 struct iwl_mvm_sta {
410 	u32 tfd_queue_msk;
411 	u32 mac_id_n_color;
412 	u16 tid_disable_agg;
413 	u8 sta_type;
414 	enum ieee80211_sta_state sta_state;
415 	bool bt_reduced_txpower;
416 	bool next_status_eosp;
417 	bool authorized;
418 	spinlock_t lock;
419 	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
420 	u8 tid_to_baid[IWL_MAX_TID_COUNT];
421 	struct ieee80211_vif *vif;
422 	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
423 	struct iwl_mvm_rxq_dup_data *dup_data;
424 
425 	u8 reserved_queue;
426 
427 	/* Temporary, until the new TLC will control the Tx protection */
428 	s8 tx_protection;
429 	bool tt_tx_protection;
430 
431 	bool disable_tx;
432 	u16 amsdu_enabled;
433 	u16 max_amsdu_len;
434 	bool sleeping;
435 	u8 agg_tids;
436 	u8 sleep_tx_count;
437 	u8 tx_ant;
438 	u32 pairwise_cipher;
439 
440 	struct iwl_mvm_link_sta deflink;
441 	struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
442 };
443 
444 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
445 
446 static inline struct iwl_mvm_sta *
447 iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
448 {
449 	return (void *)sta->drv_priv;
450 }
451 
452 /**
453  * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
454  * broadcast)
455  * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
456  * @type: station type
457  * @tfd_queue_msk: the tfd queues used by the station
458  */
459 struct iwl_mvm_int_sta {
460 	u32 sta_id;
461 	u8 type;
462 	u32 tfd_queue_msk;
463 };
464 
465 /**
466  * Send the STA info to the FW.
467  *
468  * @mvm: the iwl_mvm* to use
469  * @sta: the STA
470  * @update: this is true if the FW is being updated about a STA it already knows
471  *	about. Otherwise (if this is a new STA), this should be false.
472  * @flags: if update==true, this marks what is being changed via ORs of values
473  *	from enum iwl_sta_modify_flag. Otherwise, this is ignored.
474  */
475 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
476 			   bool update, unsigned int flags);
477 int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype);
478 int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
479 		     struct ieee80211_sta *sta, int sta_id, u8 sta_type);
480 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
481 		    struct ieee80211_vif *vif,
482 		    struct ieee80211_sta *sta);
483 
484 static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
485 				     struct ieee80211_vif *vif,
486 				     struct ieee80211_sta *sta)
487 {
488 	return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
489 }
490 
491 void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
492 					  struct ieee80211_sta *sta);
493 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
494 				  struct iwl_mvm_sta *mvm_sta);
495 bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
496 		     struct ieee80211_sta *sta,
497 		     struct ieee80211_link_sta *link_sta, int *ret);
498 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
499 		   struct ieee80211_vif *vif,
500 		   struct ieee80211_sta *sta);
501 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
502 		      struct ieee80211_vif *vif,
503 		      u8 sta_id);
504 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
505 			struct ieee80211_vif *vif,
506 			struct ieee80211_sta *sta,
507 			struct ieee80211_key_conf *keyconf,
508 			u8 key_offset);
509 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
510 			   struct ieee80211_vif *vif,
511 			   struct ieee80211_sta *sta,
512 			   struct ieee80211_key_conf *keyconf);
513 
514 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
515 			     struct ieee80211_vif *vif,
516 			     struct ieee80211_key_conf *keyconf,
517 			     struct ieee80211_sta *sta, u32 iv32,
518 			     u16 *phase1key);
519 
520 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
521 			   struct iwl_rx_cmd_buffer *rxb);
522 
523 /* AMPDU */
524 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
525 		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
526 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
527 			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
528 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
529 			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
530 			    bool amsdu);
531 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
532 			    struct ieee80211_sta *sta, u16 tid);
533 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
534 			    struct ieee80211_sta *sta, u16 tid);
535 
536 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
537 		       int tid, u8 queue, bool start);
538 
539 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
540 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
541 
542 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
543 void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
544 				   struct ieee80211_vif *vif);
545 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
546 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
547 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
548 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
549 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
550 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
551 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
552 			     struct iwl_mvm_int_sta *sta,
553 				    u32 qmask, enum nl80211_iftype iftype,
554 				    u8 type);
555 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
556 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
557 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
558 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
559 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
560 
561 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
562 				struct ieee80211_sta *sta);
563 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
564 				       struct ieee80211_sta *sta,
565 				       enum ieee80211_frame_release_type reason,
566 				       u16 cnt, u16 tids, bool more_data,
567 				       bool single_sta_queue);
568 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
569 		      bool drain);
570 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
571 				   struct iwl_mvm_sta *mvmsta, bool disable);
572 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
573 				      struct ieee80211_sta *sta,
574 				      bool disable);
575 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
576 				       struct iwl_mvm_vif *mvmvif,
577 				       bool disable);
578 
579 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
580 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
581 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
582 			 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
583 			 u8 *key, u32 key_len);
584 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
585 				   struct ieee80211_vif *vif,
586 				   u32 id);
587 /* Queues */
588 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
589 			    struct ieee80211_sta *sta,
590 			    u8 sta_id, u8 tid, unsigned int timeout);
591 
592 /* Sta state */
593 /**
594  * struct iwl_mvm_sta_state_ops - callbacks for the sta_state() ops
595  *
596  * Since the only difference between both MLD and
597  * non-MLD versions of sta_state() is these function calls,
598  * each version will send its specific function calls to
599  * %iwl_mvm_mac_sta_state_common().
600  *
601  * @add_sta: pointer to the function that adds a new sta
602  * @update_sta: pointer to the function that updates a sta
603  * @rm_sta: pointer to the functions that removes a sta
604  * @mac_ctxt_changed: pointer to the function that handles a change in mac ctxt
605  */
606 struct iwl_mvm_sta_state_ops {
607 	int (*add_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
608 		       struct ieee80211_sta *sta);
609 	int (*update_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
610 			  struct ieee80211_sta *sta);
611 	int (*rm_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
612 		      struct ieee80211_sta *sta);
613 	int (*mac_ctxt_changed)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
614 				bool force_assoc_off);
615 };
616 
617 int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
618 				 struct ieee80211_vif *vif,
619 				 struct ieee80211_sta *sta,
620 				 enum ieee80211_sta_state old_state,
621 				 enum ieee80211_sta_state new_state,
622 				 const struct iwl_mvm_sta_state_ops *callbacks);
623 
624 /* New MLD STA related APIs */
625 /* STA */
626 int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
627 			      struct ieee80211_bss_conf *link_conf);
628 int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
629 			     struct ieee80211_bss_conf *link_conf);
630 int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
631 			      struct ieee80211_bss_conf *link_conf);
632 int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
633 int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
634 			     struct ieee80211_bss_conf *link_conf);
635 int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
636 int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
637 			     struct ieee80211_bss_conf *link_conf);
638 int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm);
639 int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
640 			struct ieee80211_sta *sta);
641 int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
642 			   struct ieee80211_sta *sta);
643 int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
644 		       struct ieee80211_sta *sta);
645 void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
646 			       struct iwl_mvm_sta *mvm_sta,
647 			       struct iwl_mvm_link_sta *mvm_sta_link,
648 			       unsigned int link_id,
649 			       bool is_in_fw);
650 int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
651 int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
652 				 struct ieee80211_vif *vif,
653 				 struct ieee80211_sta *sta,
654 				 u16 old_links, u16 new_links);
655 u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
656 			   int filter_link_id);
657 int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
658 				       struct iwl_mvm_int_sta *sta,
659 				       const u8 *addr, int link_id,
660 				       u16 *queue, u8 tid,
661 				       unsigned int *_wdg_timeout);
662 
663 /* Queues */
664 void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
665 					   struct iwl_mvm_vif *mvmvif,
666 					   bool disable);
667 void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
668 				       struct iwl_mvm_sta *mvm_sta,
669 				       bool disable);
670 void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
671 					  struct ieee80211_sta *sta,
672 					  bool disable);
673 #endif /* __sta_h__ */
674