1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2017 Intel Deutschland GmbH
6  */
7 #include <linux/jiffies.h>
8 #include <net/mac80211.h>
9 
10 #include "fw/notif-wait.h"
11 #include "iwl-trans.h"
12 #include "fw-api.h"
13 #include "time-event.h"
14 #include "mvm.h"
15 #include "iwl-io.h"
16 #include "iwl-prph.h"
17 
18 /*
19  * For the high priority TE use a time event type that has similar priority to
20  * the FW's action scan priority.
21  */
22 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
23 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
24 
25 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
26 			   struct iwl_mvm_time_event_data *te_data)
27 {
28 	lockdep_assert_held(&mvm->time_event_lock);
29 
30 	if (!te_data || !te_data->vif)
31 		return;
32 
33 	list_del(&te_data->list);
34 	te_data->running = false;
35 	te_data->uid = 0;
36 	te_data->id = TE_MAX;
37 	te_data->vif = NULL;
38 }
39 
40 void iwl_mvm_roc_done_wk(struct work_struct *wk)
41 {
42 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
43 
44 	/*
45 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
46 	 * This will cause the TX path to drop offchannel transmissions.
47 	 * That would also be done by mac80211, but it is racy, in particular
48 	 * in the case that the time event actually completed in the firmware
49 	 * (which is handled in iwl_mvm_te_handle_notif).
50 	 */
51 	clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
52 	clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
53 
54 	synchronize_net();
55 
56 	/*
57 	 * Flush the offchannel queue -- this is called when the time
58 	 * event finishes or is canceled, so that frames queued for it
59 	 * won't get stuck on the queue and be transmitted in the next
60 	 * time event.
61 	 */
62 
63 	mutex_lock(&mvm->mutex);
64 	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
65 		struct iwl_mvm_vif *mvmvif;
66 
67 		/*
68 		 * NB: access to this pointer would be racy, but the flush bit
69 		 * can only be set when we had a P2P-Device VIF, and we have a
70 		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
71 		 * not really racy.
72 		 */
73 
74 		if (!WARN_ON(!mvm->p2p_device_vif)) {
75 			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
76 			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
77 		}
78 	} else {
79 		/* do the same in case of hot spot 2.0 */
80 		iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
81 		/* In newer version of this command an aux station is added only
82 		 * in cases of dedicated tx queue and need to be removed in end
83 		 * of use */
84 		if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
85 					  ADD_STA, 0) >= 12)
86 			iwl_mvm_rm_aux_sta(mvm);
87 	}
88 
89 	mutex_unlock(&mvm->mutex);
90 }
91 
92 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
93 {
94 	/*
95 	 * Of course, our status bit is just as racy as mac80211, so in
96 	 * addition, fire off the work struct which will drop all frames
97 	 * from the hardware queues that made it through the race. First
98 	 * it will of course synchronize the TX path to make sure that
99 	 * any *new* TX will be rejected.
100 	 */
101 	schedule_work(&mvm->roc_done_wk);
102 }
103 
104 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
105 {
106 	struct ieee80211_vif *csa_vif;
107 
108 	rcu_read_lock();
109 
110 	csa_vif = rcu_dereference(mvm->csa_vif);
111 	if (!csa_vif || !csa_vif->csa_active)
112 		goto out_unlock;
113 
114 	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
115 
116 	/*
117 	 * CSA NoA is started but we still have beacons to
118 	 * transmit on the current channel.
119 	 * So we just do nothing here and the switch
120 	 * will be performed on the last TBTT.
121 	 */
122 	if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
123 		IWL_WARN(mvm, "CSA NOA started too early\n");
124 		goto out_unlock;
125 	}
126 
127 	ieee80211_csa_finish(csa_vif);
128 
129 	rcu_read_unlock();
130 
131 	RCU_INIT_POINTER(mvm->csa_vif, NULL);
132 
133 	return;
134 
135 out_unlock:
136 	rcu_read_unlock();
137 }
138 
139 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
140 					struct ieee80211_vif *vif,
141 					const char *errmsg)
142 {
143 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
144 
145 	if (vif->type != NL80211_IFTYPE_STATION)
146 		return false;
147 
148 	if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
149 	    vif->bss_conf.dtim_period)
150 		return false;
151 	if (errmsg)
152 		IWL_ERR(mvm, "%s\n", errmsg);
153 
154 	if (mvmvif->csa_bcn_pending) {
155 		struct iwl_mvm_sta *mvmsta;
156 
157 		rcu_read_lock();
158 		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
159 		if (!WARN_ON(!mvmsta))
160 			iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
161 		rcu_read_unlock();
162 	}
163 
164 	iwl_mvm_connection_loss(mvm, vif, errmsg);
165 	return true;
166 }
167 
168 static void
169 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
170 			     struct iwl_mvm_time_event_data *te_data,
171 			     struct iwl_time_event_notif *notif)
172 {
173 	struct ieee80211_vif *vif = te_data->vif;
174 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
175 
176 	if (!notif->status)
177 		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
178 
179 	switch (te_data->vif->type) {
180 	case NL80211_IFTYPE_AP:
181 		if (!notif->status)
182 			mvmvif->csa_failed = true;
183 		iwl_mvm_csa_noa_start(mvm);
184 		break;
185 	case NL80211_IFTYPE_STATION:
186 		if (!notif->status) {
187 			iwl_mvm_connection_loss(mvm, vif,
188 						"CSA TE failed to start");
189 			break;
190 		}
191 		iwl_mvm_csa_client_absent(mvm, te_data->vif);
192 		cancel_delayed_work(&mvmvif->csa_work);
193 		ieee80211_chswitch_done(te_data->vif, true);
194 		break;
195 	default:
196 		/* should never happen */
197 		WARN_ON_ONCE(1);
198 		break;
199 	}
200 
201 	/* we don't need it anymore */
202 	iwl_mvm_te_clear_data(mvm, te_data);
203 }
204 
205 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
206 				     struct iwl_time_event_notif *notif,
207 				     struct iwl_mvm_time_event_data *te_data)
208 {
209 	struct iwl_fw_dbg_trigger_tlv *trig;
210 	struct iwl_fw_dbg_trigger_time_event *te_trig;
211 	int i;
212 
213 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
214 				     ieee80211_vif_to_wdev(te_data->vif),
215 				     FW_DBG_TRIGGER_TIME_EVENT);
216 	if (!trig)
217 		return;
218 
219 	te_trig = (void *)trig->data;
220 
221 	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
222 		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
223 		u32 trig_action_bitmap =
224 			le32_to_cpu(te_trig->time_events[i].action_bitmap);
225 		u32 trig_status_bitmap =
226 			le32_to_cpu(te_trig->time_events[i].status_bitmap);
227 
228 		if (trig_te_id != te_data->id ||
229 		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
230 		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
231 			continue;
232 
233 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
234 					"Time event %d Action 0x%x received status: %d",
235 					te_data->id,
236 					le32_to_cpu(notif->action),
237 					le32_to_cpu(notif->status));
238 		break;
239 	}
240 }
241 
242 /*
243  * Handles a FW notification for an event that is known to the driver.
244  *
245  * @mvm: the mvm component
246  * @te_data: the time event data
247  * @notif: the notification data corresponding the time event data.
248  */
249 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
250 				    struct iwl_mvm_time_event_data *te_data,
251 				    struct iwl_time_event_notif *notif)
252 {
253 	lockdep_assert_held(&mvm->time_event_lock);
254 
255 	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
256 		     le32_to_cpu(notif->unique_id),
257 		     le32_to_cpu(notif->action));
258 
259 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
260 
261 	/*
262 	 * The FW sends the start/end time event notifications even for events
263 	 * that it fails to schedule. This is indicated in the status field of
264 	 * the notification. This happens in cases that the scheduler cannot
265 	 * find a schedule that can handle the event (for example requesting a
266 	 * P2P Device discoveribility, while there are other higher priority
267 	 * events in the system).
268 	 */
269 	if (!le32_to_cpu(notif->status)) {
270 		const char *msg;
271 
272 		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
273 			msg = "Time Event start notification failure";
274 		else
275 			msg = "Time Event end notification failure";
276 
277 		IWL_DEBUG_TE(mvm, "%s\n", msg);
278 
279 		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
280 			iwl_mvm_te_clear_data(mvm, te_data);
281 			return;
282 		}
283 	}
284 
285 	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
286 		IWL_DEBUG_TE(mvm,
287 			     "TE ended - current time %lu, estimated end %lu\n",
288 			     jiffies, te_data->end_jiffies);
289 
290 		switch (te_data->vif->type) {
291 		case NL80211_IFTYPE_P2P_DEVICE:
292 			ieee80211_remain_on_channel_expired(mvm->hw);
293 			set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
294 			iwl_mvm_roc_finished(mvm);
295 			break;
296 		case NL80211_IFTYPE_STATION:
297 			/*
298 			 * If we are switching channel, don't disconnect
299 			 * if the time event is already done. Beacons can
300 			 * be delayed a bit after the switch.
301 			 */
302 			if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
303 				IWL_DEBUG_TE(mvm,
304 					     "No beacon heard and the CS time event is over, don't disconnect\n");
305 				break;
306 			}
307 
308 			/*
309 			 * By now, we should have finished association
310 			 * and know the dtim period.
311 			 */
312 			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
313 				"No beacon heard and the time event is over already...");
314 			break;
315 		default:
316 			break;
317 		}
318 
319 		iwl_mvm_te_clear_data(mvm, te_data);
320 	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
321 		te_data->running = true;
322 		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
323 
324 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
325 			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
326 			ieee80211_ready_on_channel(mvm->hw);
327 		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
328 			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
329 		}
330 	} else {
331 		IWL_WARN(mvm, "Got TE with unknown action\n");
332 	}
333 }
334 
335 /*
336  * Handle A Aux ROC time event
337  */
338 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
339 					   struct iwl_time_event_notif *notif)
340 {
341 	struct iwl_mvm_time_event_data *te_data, *tmp;
342 	bool aux_roc_te = false;
343 
344 	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
345 		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
346 			aux_roc_te = true;
347 			break;
348 		}
349 	}
350 	if (!aux_roc_te) /* Not a Aux ROC time event */
351 		return -EINVAL;
352 
353 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
354 
355 	IWL_DEBUG_TE(mvm,
356 		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
357 		     le32_to_cpu(notif->unique_id),
358 		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
359 
360 	if (!le32_to_cpu(notif->status) ||
361 	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
362 		/* End TE, notify mac80211 */
363 		ieee80211_remain_on_channel_expired(mvm->hw);
364 		iwl_mvm_roc_finished(mvm); /* flush aux queue */
365 		list_del(&te_data->list); /* remove from list */
366 		te_data->running = false;
367 		te_data->vif = NULL;
368 		te_data->uid = 0;
369 		te_data->id = TE_MAX;
370 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
371 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
372 		te_data->running = true;
373 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
374 	} else {
375 		IWL_DEBUG_TE(mvm,
376 			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
377 			     le32_to_cpu(notif->action));
378 		return -EINVAL;
379 	}
380 
381 	return 0;
382 }
383 
384 /*
385  * The Rx handler for time event notifications
386  */
387 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
388 				 struct iwl_rx_cmd_buffer *rxb)
389 {
390 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
391 	struct iwl_time_event_notif *notif = (void *)pkt->data;
392 	struct iwl_mvm_time_event_data *te_data, *tmp;
393 
394 	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
395 		     le32_to_cpu(notif->unique_id),
396 		     le32_to_cpu(notif->action));
397 
398 	spin_lock_bh(&mvm->time_event_lock);
399 	/* This time event is triggered for Aux ROC request */
400 	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
401 		goto unlock;
402 
403 	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
404 		if (le32_to_cpu(notif->unique_id) == te_data->uid)
405 			iwl_mvm_te_handle_notif(mvm, te_data, notif);
406 	}
407 unlock:
408 	spin_unlock_bh(&mvm->time_event_lock);
409 }
410 
411 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
412 			     struct iwl_rx_packet *pkt, void *data)
413 {
414 	struct iwl_mvm *mvm =
415 		container_of(notif_wait, struct iwl_mvm, notif_wait);
416 	struct iwl_mvm_time_event_data *te_data = data;
417 	struct iwl_time_event_notif *resp;
418 	int resp_len = iwl_rx_packet_payload_len(pkt);
419 
420 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
421 		return true;
422 
423 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
424 		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
425 		return true;
426 	}
427 
428 	resp = (void *)pkt->data;
429 
430 	/* te_data->uid is already set in the TIME_EVENT_CMD response */
431 	if (le32_to_cpu(resp->unique_id) != te_data->uid)
432 		return false;
433 
434 	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
435 		     te_data->uid);
436 	if (!resp->status)
437 		IWL_ERR(mvm,
438 			"TIME_EVENT_NOTIFICATION received but not executed\n");
439 
440 	return true;
441 }
442 
443 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
444 					struct iwl_rx_packet *pkt, void *data)
445 {
446 	struct iwl_mvm *mvm =
447 		container_of(notif_wait, struct iwl_mvm, notif_wait);
448 	struct iwl_mvm_time_event_data *te_data = data;
449 	struct iwl_time_event_resp *resp;
450 	int resp_len = iwl_rx_packet_payload_len(pkt);
451 
452 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
453 		return true;
454 
455 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
456 		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
457 		return true;
458 	}
459 
460 	resp = (void *)pkt->data;
461 
462 	/* we should never get a response to another TIME_EVENT_CMD here */
463 	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
464 		return false;
465 
466 	te_data->uid = le32_to_cpu(resp->unique_id);
467 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
468 		     te_data->uid);
469 	return true;
470 }
471 
472 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
473 				       struct ieee80211_vif *vif,
474 				       struct iwl_mvm_time_event_data *te_data,
475 				       struct iwl_time_event_cmd *te_cmd)
476 {
477 	static const u16 time_event_response[] = { TIME_EVENT_CMD };
478 	struct iwl_notification_wait wait_time_event;
479 	int ret;
480 
481 	lockdep_assert_held(&mvm->mutex);
482 
483 	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
484 		     le32_to_cpu(te_cmd->duration));
485 
486 	spin_lock_bh(&mvm->time_event_lock);
487 	if (WARN_ON(te_data->id != TE_MAX)) {
488 		spin_unlock_bh(&mvm->time_event_lock);
489 		return -EIO;
490 	}
491 	te_data->vif = vif;
492 	te_data->duration = le32_to_cpu(te_cmd->duration);
493 	te_data->id = le32_to_cpu(te_cmd->id);
494 	list_add_tail(&te_data->list, &mvm->time_event_list);
495 	spin_unlock_bh(&mvm->time_event_lock);
496 
497 	/*
498 	 * Use a notification wait, which really just processes the
499 	 * command response and doesn't wait for anything, in order
500 	 * to be able to process the response and get the UID inside
501 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
502 	 * stores the buffer and then wakes up this thread, by which
503 	 * time another notification (that the time event started)
504 	 * might already be processed unsuccessfully.
505 	 */
506 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
507 				   time_event_response,
508 				   ARRAY_SIZE(time_event_response),
509 				   iwl_mvm_time_event_response, te_data);
510 
511 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
512 					    sizeof(*te_cmd), te_cmd);
513 	if (ret) {
514 		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
515 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
516 		goto out_clear_te;
517 	}
518 
519 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
520 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
521 	/* should never fail */
522 	WARN_ON_ONCE(ret);
523 
524 	if (ret) {
525  out_clear_te:
526 		spin_lock_bh(&mvm->time_event_lock);
527 		iwl_mvm_te_clear_data(mvm, te_data);
528 		spin_unlock_bh(&mvm->time_event_lock);
529 	}
530 	return ret;
531 }
532 
533 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
534 			     struct ieee80211_vif *vif,
535 			     u32 duration, u32 min_duration,
536 			     u32 max_delay, bool wait_for_notif)
537 {
538 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
539 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
540 	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
541 	struct iwl_notification_wait wait_te_notif;
542 	struct iwl_time_event_cmd time_cmd = {};
543 
544 	lockdep_assert_held(&mvm->mutex);
545 
546 	if (te_data->running &&
547 	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
548 		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
549 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
550 		return;
551 	}
552 
553 	if (te_data->running) {
554 		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
555 			     te_data->uid,
556 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
557 		/*
558 		 * we don't have enough time
559 		 * cancel the current TE and issue a new one
560 		 * Of course it would be better to remove the old one only
561 		 * when the new one is added, but we don't care if we are off
562 		 * channel for a bit. All we need to do, is not to return
563 		 * before we actually begin to be on the channel.
564 		 */
565 		iwl_mvm_stop_session_protection(mvm, vif);
566 	}
567 
568 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
569 	time_cmd.id_and_color =
570 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
571 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
572 
573 	time_cmd.apply_time = cpu_to_le32(0);
574 
575 	time_cmd.max_frags = TE_V2_FRAG_NONE;
576 	time_cmd.max_delay = cpu_to_le32(max_delay);
577 	/* TODO: why do we need to interval = bi if it is not periodic? */
578 	time_cmd.interval = cpu_to_le32(1);
579 	time_cmd.duration = cpu_to_le32(duration);
580 	time_cmd.repeat = 1;
581 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
582 				      TE_V2_NOTIF_HOST_EVENT_END |
583 				      TE_V2_START_IMMEDIATELY);
584 
585 	if (!wait_for_notif) {
586 		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
587 		return;
588 	}
589 
590 	/*
591 	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
592 	 * right after we send the time event
593 	 */
594 	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
595 				   te_notif_response,
596 				   ARRAY_SIZE(te_notif_response),
597 				   iwl_mvm_te_notif, te_data);
598 
599 	/* If TE was sent OK - wait for the notification that started */
600 	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
601 		IWL_ERR(mvm, "Failed to add TE to protect session\n");
602 		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
603 	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
604 					 TU_TO_JIFFIES(max_delay))) {
605 		IWL_ERR(mvm, "Failed to protect session until TE\n");
606 	}
607 }
608 
609 static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
610 					      struct iwl_mvm_vif *mvmvif)
611 {
612 	struct iwl_mvm_session_prot_cmd cmd = {
613 		.id_and_color =
614 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
615 							mvmvif->color)),
616 		.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
617 		.conf_id = cpu_to_le32(mvmvif->time_event_data.id),
618 	};
619 	int ret;
620 
621 	ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
622 						   MAC_CONF_GROUP, 0),
623 				   0, sizeof(cmd), &cmd);
624 	if (ret)
625 		IWL_ERR(mvm,
626 			"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
627 }
628 
629 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
630 					struct iwl_mvm_time_event_data *te_data,
631 					u32 *uid)
632 {
633 	u32 id;
634 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
635 
636 	/*
637 	 * It is possible that by the time we got to this point the time
638 	 * event was already removed.
639 	 */
640 	spin_lock_bh(&mvm->time_event_lock);
641 
642 	/* Save time event uid before clearing its data */
643 	*uid = te_data->uid;
644 	id = te_data->id;
645 
646 	/*
647 	 * The clear_data function handles time events that were already removed
648 	 */
649 	iwl_mvm_te_clear_data(mvm, te_data);
650 	spin_unlock_bh(&mvm->time_event_lock);
651 
652 	/* When session protection is supported, the te_data->id field
653 	 * is reused to save session protection's configuration.
654 	 */
655 	if (fw_has_capa(&mvm->fw->ucode_capa,
656 			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
657 		if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
658 			/* Session protection is still ongoing. Cancel it */
659 			iwl_mvm_cancel_session_protection(mvm, mvmvif);
660 			if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
661 				set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
662 				iwl_mvm_roc_finished(mvm);
663 			}
664 		}
665 		return false;
666 	} else {
667 		/* It is possible that by the time we try to remove it, the
668 		 * time event has already ended and removed. In such a case
669 		 * there is no need to send a removal command.
670 		 */
671 		if (id == TE_MAX) {
672 			IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
673 			return false;
674 		}
675 	}
676 
677 	return true;
678 }
679 
680 /*
681  * Explicit request to remove a aux roc time event. The removal of a time
682  * event needs to be synchronized with the flow of a time event's end
683  * notification, which also removes the time event from the op mode
684  * data structures.
685  */
686 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
687 				      struct iwl_mvm_vif *mvmvif,
688 				      struct iwl_mvm_time_event_data *te_data)
689 {
690 	struct iwl_hs20_roc_req aux_cmd = {};
691 	u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
692 
693 	u32 uid;
694 	int ret;
695 
696 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
697 		return;
698 
699 	aux_cmd.event_unique_id = cpu_to_le32(uid);
700 	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
701 	aux_cmd.id_and_color =
702 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
703 	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
704 		     le32_to_cpu(aux_cmd.event_unique_id));
705 	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
706 				   len, &aux_cmd);
707 
708 	if (WARN_ON(ret))
709 		return;
710 }
711 
712 /*
713  * Explicit request to remove a time event. The removal of a time event needs to
714  * be synchronized with the flow of a time event's end notification, which also
715  * removes the time event from the op mode data structures.
716  */
717 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
718 			       struct iwl_mvm_vif *mvmvif,
719 			       struct iwl_mvm_time_event_data *te_data)
720 {
721 	struct iwl_time_event_cmd time_cmd = {};
722 	u32 uid;
723 	int ret;
724 
725 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
726 		return;
727 
728 	/* When we remove a TE, the UID is to be set in the id field */
729 	time_cmd.id = cpu_to_le32(uid);
730 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
731 	time_cmd.id_and_color =
732 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
733 
734 	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
735 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
736 				   sizeof(time_cmd), &time_cmd);
737 	if (ret)
738 		IWL_ERR(mvm, "Couldn't remove the time event\n");
739 }
740 
741 /*
742  * When the firmware supports the session protection API,
743  * this is not needed since it'll automatically remove the
744  * session protection after association + beacon reception.
745  */
746 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
747 				     struct ieee80211_vif *vif)
748 {
749 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
750 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
751 	u32 id;
752 
753 	lockdep_assert_held(&mvm->mutex);
754 
755 	spin_lock_bh(&mvm->time_event_lock);
756 	id = te_data->id;
757 	spin_unlock_bh(&mvm->time_event_lock);
758 
759 	if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
760 		IWL_DEBUG_TE(mvm,
761 			     "don't remove TE with id=%u (not session protection)\n",
762 			     id);
763 		return;
764 	}
765 
766 	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
767 }
768 
769 void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
770 				      struct iwl_rx_cmd_buffer *rxb)
771 {
772 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
773 	struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
774 	struct ieee80211_vif *vif;
775 	struct iwl_mvm_vif *mvmvif;
776 
777 	rcu_read_lock();
778 	vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
779 					     true);
780 
781 	if (!vif)
782 		goto out_unlock;
783 
784 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
785 
786 	/* The vif is not a P2P_DEVICE, maintain its time_event_data */
787 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
788 		struct iwl_mvm_time_event_data *te_data =
789 			&mvmvif->time_event_data;
790 
791 		if (!le32_to_cpu(notif->status)) {
792 			iwl_mvm_te_check_disconnect(mvm, vif,
793 						    "Session protection failure");
794 			spin_lock_bh(&mvm->time_event_lock);
795 			iwl_mvm_te_clear_data(mvm, te_data);
796 			spin_unlock_bh(&mvm->time_event_lock);
797 		}
798 
799 		if (le32_to_cpu(notif->start)) {
800 			spin_lock_bh(&mvm->time_event_lock);
801 			te_data->running = le32_to_cpu(notif->start);
802 			te_data->end_jiffies =
803 				TU_TO_EXP_TIME(te_data->duration);
804 			spin_unlock_bh(&mvm->time_event_lock);
805 		} else {
806 			/*
807 			 * By now, we should have finished association
808 			 * and know the dtim period.
809 			 */
810 			iwl_mvm_te_check_disconnect(mvm, vif,
811 						    "No beacon heard and the session protection is over already...");
812 			spin_lock_bh(&mvm->time_event_lock);
813 			iwl_mvm_te_clear_data(mvm, te_data);
814 			spin_unlock_bh(&mvm->time_event_lock);
815 		}
816 
817 		goto out_unlock;
818 	}
819 
820 	if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
821 		/* End TE, notify mac80211 */
822 		mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
823 		ieee80211_remain_on_channel_expired(mvm->hw);
824 		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
825 		iwl_mvm_roc_finished(mvm);
826 	} else if (le32_to_cpu(notif->start)) {
827 		if (WARN_ON(mvmvif->time_event_data.id !=
828 				le32_to_cpu(notif->conf_id)))
829 			goto out_unlock;
830 		set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
831 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
832 	}
833 
834  out_unlock:
835 	rcu_read_unlock();
836 }
837 
838 static int
839 iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
840 					 struct ieee80211_vif *vif,
841 					 int duration,
842 					 enum ieee80211_roc_type type)
843 {
844 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
845 	struct iwl_mvm_session_prot_cmd cmd = {
846 		.id_and_color =
847 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
848 							mvmvif->color)),
849 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
850 		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
851 	};
852 
853 	lockdep_assert_held(&mvm->mutex);
854 
855 	/* The time_event_data.id field is reused to save session
856 	 * protection's configuration.
857 	 */
858 	switch (type) {
859 	case IEEE80211_ROC_TYPE_NORMAL:
860 		mvmvif->time_event_data.id =
861 			SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
862 		break;
863 	case IEEE80211_ROC_TYPE_MGMT_TX:
864 		mvmvif->time_event_data.id =
865 			SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
866 		break;
867 	default:
868 		WARN_ONCE(1, "Got an invalid ROC type\n");
869 		return -EINVAL;
870 	}
871 
872 	cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
873 	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
874 						    MAC_CONF_GROUP, 0),
875 				    0, sizeof(cmd), &cmd);
876 }
877 
878 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
879 			  int duration, enum ieee80211_roc_type type)
880 {
881 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
882 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
883 	struct iwl_time_event_cmd time_cmd = {};
884 
885 	lockdep_assert_held(&mvm->mutex);
886 	if (te_data->running) {
887 		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
888 		return -EBUSY;
889 	}
890 
891 	if (fw_has_capa(&mvm->fw->ucode_capa,
892 			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
893 		return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
894 								duration,
895 								type);
896 
897 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
898 	time_cmd.id_and_color =
899 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
900 
901 	switch (type) {
902 	case IEEE80211_ROC_TYPE_NORMAL:
903 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
904 		break;
905 	case IEEE80211_ROC_TYPE_MGMT_TX:
906 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
907 		break;
908 	default:
909 		WARN_ONCE(1, "Got an invalid ROC type\n");
910 		return -EINVAL;
911 	}
912 
913 	time_cmd.apply_time = cpu_to_le32(0);
914 	time_cmd.interval = cpu_to_le32(1);
915 
916 	/*
917 	 * The P2P Device TEs can have lower priority than other events
918 	 * that are being scheduled by the driver/fw, and thus it might not be
919 	 * scheduled. To improve the chances of it being scheduled, allow them
920 	 * to be fragmented, and in addition allow them to be delayed.
921 	 */
922 	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
923 	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
924 	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
925 	time_cmd.repeat = 1;
926 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
927 				      TE_V2_NOTIF_HOST_EVENT_END |
928 				      TE_V2_START_IMMEDIATELY);
929 
930 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
931 }
932 
933 static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
934 {
935 	struct iwl_mvm_time_event_data *te_data;
936 
937 	lockdep_assert_held(&mvm->mutex);
938 
939 	spin_lock_bh(&mvm->time_event_lock);
940 
941 	/*
942 	 * Iterate over the list of time events and find the time event that is
943 	 * associated with a P2P_DEVICE interface.
944 	 * This assumes that a P2P_DEVICE interface can have only a single time
945 	 * event at any given time and this time event coresponds to a ROC
946 	 * request
947 	 */
948 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
949 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
950 			goto out;
951 	}
952 
953 	/* There can only be at most one AUX ROC time event, we just use the
954 	 * list to simplify/unify code. Remove it if it exists.
955 	 */
956 	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
957 					   struct iwl_mvm_time_event_data,
958 					   list);
959 out:
960 	spin_unlock_bh(&mvm->time_event_lock);
961 	return te_data;
962 }
963 
964 void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
965 {
966 	struct iwl_mvm_time_event_data *te_data;
967 	u32 uid;
968 
969 	te_data = iwl_mvm_get_roc_te(mvm);
970 	if (te_data)
971 		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
972 }
973 
974 void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
975 {
976 	struct iwl_mvm_vif *mvmvif;
977 	struct iwl_mvm_time_event_data *te_data;
978 
979 	if (fw_has_capa(&mvm->fw->ucode_capa,
980 			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
981 		mvmvif = iwl_mvm_vif_from_mac80211(vif);
982 
983 		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
984 			iwl_mvm_cancel_session_protection(mvm, mvmvif);
985 			set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
986 		} else {
987 			iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
988 						  &mvmvif->time_event_data);
989 		}
990 
991 		iwl_mvm_roc_finished(mvm);
992 
993 		return;
994 	}
995 
996 	te_data = iwl_mvm_get_roc_te(mvm);
997 	if (!te_data) {
998 		IWL_WARN(mvm, "No remain on channel event\n");
999 		return;
1000 	}
1001 
1002 	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1003 
1004 	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1005 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1006 		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
1007 	} else {
1008 		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1009 	}
1010 
1011 	iwl_mvm_roc_finished(mvm);
1012 }
1013 
1014 void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
1015 			       struct ieee80211_vif *vif)
1016 {
1017 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1018 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1019 	u32 id;
1020 
1021 	lockdep_assert_held(&mvm->mutex);
1022 
1023 	spin_lock_bh(&mvm->time_event_lock);
1024 	id = te_data->id;
1025 	spin_unlock_bh(&mvm->time_event_lock);
1026 
1027 	if (id != TE_CHANNEL_SWITCH_PERIOD)
1028 		return;
1029 
1030 	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1031 }
1032 
1033 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1034 				struct ieee80211_vif *vif,
1035 				u32 duration, u32 apply_time)
1036 {
1037 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1038 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1039 	struct iwl_time_event_cmd time_cmd = {};
1040 
1041 	lockdep_assert_held(&mvm->mutex);
1042 
1043 	if (te_data->running) {
1044 		u32 id;
1045 
1046 		spin_lock_bh(&mvm->time_event_lock);
1047 		id = te_data->id;
1048 		spin_unlock_bh(&mvm->time_event_lock);
1049 
1050 		if (id == TE_CHANNEL_SWITCH_PERIOD) {
1051 			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1052 			return -EBUSY;
1053 		}
1054 
1055 		/*
1056 		 * Remove the session protection time event to allow the
1057 		 * channel switch. If we got here, we just heard a beacon so
1058 		 * the session protection is not needed anymore anyway.
1059 		 */
1060 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1061 	}
1062 
1063 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1064 	time_cmd.id_and_color =
1065 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1066 	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1067 	time_cmd.apply_time = cpu_to_le32(apply_time);
1068 	time_cmd.max_frags = TE_V2_FRAG_NONE;
1069 	time_cmd.duration = cpu_to_le32(duration);
1070 	time_cmd.repeat = 1;
1071 	time_cmd.interval = cpu_to_le32(1);
1072 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1073 				      TE_V2_ABSENCE);
1074 	if (!apply_time)
1075 		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1076 
1077 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1078 }
1079 
1080 static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1081 				       struct iwl_rx_packet *pkt, void *data)
1082 {
1083 	struct iwl_mvm *mvm =
1084 		container_of(notif_wait, struct iwl_mvm, notif_wait);
1085 	struct iwl_mvm_session_prot_notif *resp;
1086 	int resp_len = iwl_rx_packet_payload_len(pkt);
1087 
1088 	if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1089 		    pkt->hdr.group_id != MAC_CONF_GROUP))
1090 		return true;
1091 
1092 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1093 		IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1094 		return true;
1095 	}
1096 
1097 	resp = (void *)pkt->data;
1098 
1099 	if (!resp->status)
1100 		IWL_ERR(mvm,
1101 			"TIME_EVENT_NOTIFICATION received but not executed\n");
1102 
1103 	return true;
1104 }
1105 
1106 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1107 					 struct ieee80211_vif *vif,
1108 					 u32 duration, u32 min_duration,
1109 					 bool wait_for_notif)
1110 {
1111 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1112 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1113 	const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
1114 					 MAC_CONF_GROUP, 0) };
1115 	struct iwl_notification_wait wait_notif;
1116 	struct iwl_mvm_session_prot_cmd cmd = {
1117 		.id_and_color =
1118 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
1119 							mvmvif->color)),
1120 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1121 		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1122 	};
1123 
1124 	/* The time_event_data.id field is reused to save session
1125 	 * protection's configuration.
1126 	 */
1127 	mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC;
1128 	cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
1129 
1130 	lockdep_assert_held(&mvm->mutex);
1131 
1132 	spin_lock_bh(&mvm->time_event_lock);
1133 	if (te_data->running &&
1134 	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1135 		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1136 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
1137 		spin_unlock_bh(&mvm->time_event_lock);
1138 
1139 		return;
1140 	}
1141 
1142 	iwl_mvm_te_clear_data(mvm, te_data);
1143 	te_data->duration = le32_to_cpu(cmd.duration_tu);
1144 	spin_unlock_bh(&mvm->time_event_lock);
1145 
1146 	IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1147 		     le32_to_cpu(cmd.duration_tu));
1148 
1149 	if (!wait_for_notif) {
1150 		if (iwl_mvm_send_cmd_pdu(mvm,
1151 					 iwl_cmd_id(SESSION_PROTECTION_CMD,
1152 						    MAC_CONF_GROUP, 0),
1153 					 0, sizeof(cmd), &cmd)) {
1154 			IWL_ERR(mvm,
1155 				"Couldn't send the SESSION_PROTECTION_CMD\n");
1156 			spin_lock_bh(&mvm->time_event_lock);
1157 			iwl_mvm_te_clear_data(mvm, te_data);
1158 			spin_unlock_bh(&mvm->time_event_lock);
1159 		}
1160 
1161 		return;
1162 	}
1163 
1164 	iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1165 				   notif, ARRAY_SIZE(notif),
1166 				   iwl_mvm_session_prot_notif, NULL);
1167 
1168 	if (iwl_mvm_send_cmd_pdu(mvm,
1169 				 iwl_cmd_id(SESSION_PROTECTION_CMD,
1170 					    MAC_CONF_GROUP, 0),
1171 				 0, sizeof(cmd), &cmd)) {
1172 		IWL_ERR(mvm,
1173 			"Couldn't send the SESSION_PROTECTION_CMD\n");
1174 		iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1175 	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1176 					 TU_TO_JIFFIES(100))) {
1177 		IWL_ERR(mvm,
1178 			"Failed to protect session until session protection\n");
1179 	}
1180 }
1181