1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 - 2019 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 
65 #include <linux/jiffies.h>
66 #include <net/mac80211.h>
67 
68 #include "fw/notif-wait.h"
69 #include "iwl-trans.h"
70 #include "fw-api.h"
71 #include "time-event.h"
72 #include "mvm.h"
73 #include "iwl-io.h"
74 #include "iwl-prph.h"
75 
76 /*
77  * For the high priority TE use a time event type that has similar priority to
78  * the FW's action scan priority.
79  */
80 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
81 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
82 
83 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
84 			   struct iwl_mvm_time_event_data *te_data)
85 {
86 	lockdep_assert_held(&mvm->time_event_lock);
87 
88 	if (!te_data || !te_data->vif)
89 		return;
90 
91 	list_del(&te_data->list);
92 	te_data->running = false;
93 	te_data->uid = 0;
94 	te_data->id = TE_MAX;
95 	te_data->vif = NULL;
96 }
97 
98 void iwl_mvm_roc_done_wk(struct work_struct *wk)
99 {
100 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
101 
102 	/*
103 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
104 	 * This will cause the TX path to drop offchannel transmissions.
105 	 * That would also be done by mac80211, but it is racy, in particular
106 	 * in the case that the time event actually completed in the firmware
107 	 * (which is handled in iwl_mvm_te_handle_notif).
108 	 */
109 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
110 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
111 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
112 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
113 
114 	synchronize_net();
115 
116 	/*
117 	 * Flush the offchannel queue -- this is called when the time
118 	 * event finishes or is canceled, so that frames queued for it
119 	 * won't get stuck on the queue and be transmitted in the next
120 	 * time event.
121 	 * We have to send the command asynchronously since this cannot
122 	 * be under the mutex for locking reasons, but that's not an
123 	 * issue as it will have to complete before the next command is
124 	 * executed, and a new time event means a new command.
125 	 */
126 	iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
127 
128 	/* Do the same for the P2P device queue (STA) */
129 	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
130 		struct iwl_mvm_vif *mvmvif;
131 
132 		/*
133 		 * NB: access to this pointer would be racy, but the flush bit
134 		 * can only be set when we had a P2P-Device VIF, and we have a
135 		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
136 		 * not really racy.
137 		 */
138 
139 		if (!WARN_ON(!mvm->p2p_device_vif)) {
140 			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
141 			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
142 					  CMD_ASYNC);
143 		}
144 	}
145 }
146 
147 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
148 {
149 	/*
150 	 * Of course, our status bit is just as racy as mac80211, so in
151 	 * addition, fire off the work struct which will drop all frames
152 	 * from the hardware queues that made it through the race. First
153 	 * it will of course synchronize the TX path to make sure that
154 	 * any *new* TX will be rejected.
155 	 */
156 	schedule_work(&mvm->roc_done_wk);
157 }
158 
159 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
160 {
161 	struct ieee80211_vif *csa_vif;
162 
163 	rcu_read_lock();
164 
165 	csa_vif = rcu_dereference(mvm->csa_vif);
166 	if (!csa_vif || !csa_vif->csa_active)
167 		goto out_unlock;
168 
169 	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
170 
171 	/*
172 	 * CSA NoA is started but we still have beacons to
173 	 * transmit on the current channel.
174 	 * So we just do nothing here and the switch
175 	 * will be performed on the last TBTT.
176 	 */
177 	if (!ieee80211_csa_is_complete(csa_vif)) {
178 		IWL_WARN(mvm, "CSA NOA started too early\n");
179 		goto out_unlock;
180 	}
181 
182 	ieee80211_csa_finish(csa_vif);
183 
184 	rcu_read_unlock();
185 
186 	RCU_INIT_POINTER(mvm->csa_vif, NULL);
187 
188 	return;
189 
190 out_unlock:
191 	rcu_read_unlock();
192 }
193 
194 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
195 					struct ieee80211_vif *vif,
196 					const char *errmsg)
197 {
198 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199 
200 	if (vif->type != NL80211_IFTYPE_STATION)
201 		return false;
202 
203 	if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
204 	    vif->bss_conf.dtim_period)
205 		return false;
206 	if (errmsg)
207 		IWL_ERR(mvm, "%s\n", errmsg);
208 
209 	iwl_mvm_connection_loss(mvm, vif, errmsg);
210 	return true;
211 }
212 
213 static void
214 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
215 			     struct iwl_mvm_time_event_data *te_data,
216 			     struct iwl_time_event_notif *notif)
217 {
218 	struct ieee80211_vif *vif = te_data->vif;
219 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
220 
221 	if (!notif->status)
222 		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
223 
224 	switch (te_data->vif->type) {
225 	case NL80211_IFTYPE_AP:
226 		if (!notif->status)
227 			mvmvif->csa_failed = true;
228 		iwl_mvm_csa_noa_start(mvm);
229 		break;
230 	case NL80211_IFTYPE_STATION:
231 		if (!notif->status) {
232 			iwl_mvm_connection_loss(mvm, vif,
233 						"CSA TE failed to start");
234 			break;
235 		}
236 		iwl_mvm_csa_client_absent(mvm, te_data->vif);
237 		cancel_delayed_work(&mvmvif->csa_work);
238 		ieee80211_chswitch_done(te_data->vif, true);
239 		break;
240 	default:
241 		/* should never happen */
242 		WARN_ON_ONCE(1);
243 		break;
244 	}
245 
246 	/* we don't need it anymore */
247 	iwl_mvm_te_clear_data(mvm, te_data);
248 }
249 
250 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
251 				     struct iwl_time_event_notif *notif,
252 				     struct iwl_mvm_time_event_data *te_data)
253 {
254 	struct iwl_fw_dbg_trigger_tlv *trig;
255 	struct iwl_fw_dbg_trigger_time_event *te_trig;
256 	int i;
257 
258 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
259 				     ieee80211_vif_to_wdev(te_data->vif),
260 				     FW_DBG_TRIGGER_TIME_EVENT);
261 	if (!trig)
262 		return;
263 
264 	te_trig = (void *)trig->data;
265 
266 	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
267 		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
268 		u32 trig_action_bitmap =
269 			le32_to_cpu(te_trig->time_events[i].action_bitmap);
270 		u32 trig_status_bitmap =
271 			le32_to_cpu(te_trig->time_events[i].status_bitmap);
272 
273 		if (trig_te_id != te_data->id ||
274 		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
275 		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
276 			continue;
277 
278 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
279 					"Time event %d Action 0x%x received status: %d",
280 					te_data->id,
281 					le32_to_cpu(notif->action),
282 					le32_to_cpu(notif->status));
283 		break;
284 	}
285 }
286 
287 /*
288  * Handles a FW notification for an event that is known to the driver.
289  *
290  * @mvm: the mvm component
291  * @te_data: the time event data
292  * @notif: the notification data corresponding the time event data.
293  */
294 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
295 				    struct iwl_mvm_time_event_data *te_data,
296 				    struct iwl_time_event_notif *notif)
297 {
298 	lockdep_assert_held(&mvm->time_event_lock);
299 
300 	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
301 		     le32_to_cpu(notif->unique_id),
302 		     le32_to_cpu(notif->action));
303 
304 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
305 
306 	/*
307 	 * The FW sends the start/end time event notifications even for events
308 	 * that it fails to schedule. This is indicated in the status field of
309 	 * the notification. This happens in cases that the scheduler cannot
310 	 * find a schedule that can handle the event (for example requesting a
311 	 * P2P Device discoveribility, while there are other higher priority
312 	 * events in the system).
313 	 */
314 	if (!le32_to_cpu(notif->status)) {
315 		const char *msg;
316 
317 		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
318 			msg = "Time Event start notification failure";
319 		else
320 			msg = "Time Event end notification failure";
321 
322 		IWL_DEBUG_TE(mvm, "%s\n", msg);
323 
324 		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
325 			iwl_mvm_te_clear_data(mvm, te_data);
326 			return;
327 		}
328 	}
329 
330 	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
331 		IWL_DEBUG_TE(mvm,
332 			     "TE ended - current time %lu, estimated end %lu\n",
333 			     jiffies, te_data->end_jiffies);
334 
335 		switch (te_data->vif->type) {
336 		case NL80211_IFTYPE_P2P_DEVICE:
337 			ieee80211_remain_on_channel_expired(mvm->hw);
338 			set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
339 			iwl_mvm_roc_finished(mvm);
340 			break;
341 		case NL80211_IFTYPE_STATION:
342 			/*
343 			 * By now, we should have finished association
344 			 * and know the dtim period.
345 			 */
346 			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
347 				"No beacon heard and the time event is over already...");
348 			break;
349 		default:
350 			break;
351 		}
352 
353 		iwl_mvm_te_clear_data(mvm, te_data);
354 	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
355 		te_data->running = true;
356 		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
357 
358 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
359 			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
360 			iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
361 			ieee80211_ready_on_channel(mvm->hw);
362 		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
363 			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
364 		}
365 	} else {
366 		IWL_WARN(mvm, "Got TE with unknown action\n");
367 	}
368 }
369 
370 /*
371  * Handle A Aux ROC time event
372  */
373 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
374 					   struct iwl_time_event_notif *notif)
375 {
376 	struct iwl_mvm_time_event_data *te_data, *tmp;
377 	bool aux_roc_te = false;
378 
379 	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
380 		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
381 			aux_roc_te = true;
382 			break;
383 		}
384 	}
385 	if (!aux_roc_te) /* Not a Aux ROC time event */
386 		return -EINVAL;
387 
388 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
389 
390 	IWL_DEBUG_TE(mvm,
391 		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
392 		     le32_to_cpu(notif->unique_id),
393 		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
394 
395 	if (!le32_to_cpu(notif->status) ||
396 	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
397 		/* End TE, notify mac80211 */
398 		ieee80211_remain_on_channel_expired(mvm->hw);
399 		iwl_mvm_roc_finished(mvm); /* flush aux queue */
400 		list_del(&te_data->list); /* remove from list */
401 		te_data->running = false;
402 		te_data->vif = NULL;
403 		te_data->uid = 0;
404 		te_data->id = TE_MAX;
405 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
406 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
407 		te_data->running = true;
408 		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
409 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
410 	} else {
411 		IWL_DEBUG_TE(mvm,
412 			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
413 			     le32_to_cpu(notif->action));
414 		return -EINVAL;
415 	}
416 
417 	return 0;
418 }
419 
420 /*
421  * The Rx handler for time event notifications
422  */
423 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
424 				 struct iwl_rx_cmd_buffer *rxb)
425 {
426 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
427 	struct iwl_time_event_notif *notif = (void *)pkt->data;
428 	struct iwl_mvm_time_event_data *te_data, *tmp;
429 
430 	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
431 		     le32_to_cpu(notif->unique_id),
432 		     le32_to_cpu(notif->action));
433 
434 	spin_lock_bh(&mvm->time_event_lock);
435 	/* This time event is triggered for Aux ROC request */
436 	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
437 		goto unlock;
438 
439 	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
440 		if (le32_to_cpu(notif->unique_id) == te_data->uid)
441 			iwl_mvm_te_handle_notif(mvm, te_data, notif);
442 	}
443 unlock:
444 	spin_unlock_bh(&mvm->time_event_lock);
445 }
446 
447 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
448 			     struct iwl_rx_packet *pkt, void *data)
449 {
450 	struct iwl_mvm *mvm =
451 		container_of(notif_wait, struct iwl_mvm, notif_wait);
452 	struct iwl_mvm_time_event_data *te_data = data;
453 	struct iwl_time_event_notif *resp;
454 	int resp_len = iwl_rx_packet_payload_len(pkt);
455 
456 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
457 		return true;
458 
459 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
460 		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
461 		return true;
462 	}
463 
464 	resp = (void *)pkt->data;
465 
466 	/* te_data->uid is already set in the TIME_EVENT_CMD response */
467 	if (le32_to_cpu(resp->unique_id) != te_data->uid)
468 		return false;
469 
470 	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
471 		     te_data->uid);
472 	if (!resp->status)
473 		IWL_ERR(mvm,
474 			"TIME_EVENT_NOTIFICATION received but not executed\n");
475 
476 	return true;
477 }
478 
479 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
480 					struct iwl_rx_packet *pkt, void *data)
481 {
482 	struct iwl_mvm *mvm =
483 		container_of(notif_wait, struct iwl_mvm, notif_wait);
484 	struct iwl_mvm_time_event_data *te_data = data;
485 	struct iwl_time_event_resp *resp;
486 	int resp_len = iwl_rx_packet_payload_len(pkt);
487 
488 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
489 		return true;
490 
491 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
492 		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
493 		return true;
494 	}
495 
496 	resp = (void *)pkt->data;
497 
498 	/* we should never get a response to another TIME_EVENT_CMD here */
499 	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
500 		return false;
501 
502 	te_data->uid = le32_to_cpu(resp->unique_id);
503 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
504 		     te_data->uid);
505 	return true;
506 }
507 
508 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
509 				       struct ieee80211_vif *vif,
510 				       struct iwl_mvm_time_event_data *te_data,
511 				       struct iwl_time_event_cmd *te_cmd)
512 {
513 	static const u16 time_event_response[] = { TIME_EVENT_CMD };
514 	struct iwl_notification_wait wait_time_event;
515 	int ret;
516 
517 	lockdep_assert_held(&mvm->mutex);
518 
519 	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
520 		     le32_to_cpu(te_cmd->duration));
521 
522 	spin_lock_bh(&mvm->time_event_lock);
523 	if (WARN_ON(te_data->id != TE_MAX)) {
524 		spin_unlock_bh(&mvm->time_event_lock);
525 		return -EIO;
526 	}
527 	te_data->vif = vif;
528 	te_data->duration = le32_to_cpu(te_cmd->duration);
529 	te_data->id = le32_to_cpu(te_cmd->id);
530 	list_add_tail(&te_data->list, &mvm->time_event_list);
531 	spin_unlock_bh(&mvm->time_event_lock);
532 
533 	/*
534 	 * Use a notification wait, which really just processes the
535 	 * command response and doesn't wait for anything, in order
536 	 * to be able to process the response and get the UID inside
537 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
538 	 * stores the buffer and then wakes up this thread, by which
539 	 * time another notification (that the time event started)
540 	 * might already be processed unsuccessfully.
541 	 */
542 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
543 				   time_event_response,
544 				   ARRAY_SIZE(time_event_response),
545 				   iwl_mvm_time_event_response, te_data);
546 
547 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
548 					    sizeof(*te_cmd), te_cmd);
549 	if (ret) {
550 		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
551 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
552 		goto out_clear_te;
553 	}
554 
555 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
556 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
557 	/* should never fail */
558 	WARN_ON_ONCE(ret);
559 
560 	if (ret) {
561  out_clear_te:
562 		spin_lock_bh(&mvm->time_event_lock);
563 		iwl_mvm_te_clear_data(mvm, te_data);
564 		spin_unlock_bh(&mvm->time_event_lock);
565 	}
566 	return ret;
567 }
568 
569 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
570 			     struct ieee80211_vif *vif,
571 			     u32 duration, u32 min_duration,
572 			     u32 max_delay, bool wait_for_notif)
573 {
574 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
575 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
576 	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
577 	struct iwl_notification_wait wait_te_notif;
578 	struct iwl_time_event_cmd time_cmd = {};
579 
580 	lockdep_assert_held(&mvm->mutex);
581 
582 	if (te_data->running &&
583 	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
584 		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
585 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
586 		return;
587 	}
588 
589 	if (te_data->running) {
590 		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
591 			     te_data->uid,
592 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
593 		/*
594 		 * we don't have enough time
595 		 * cancel the current TE and issue a new one
596 		 * Of course it would be better to remove the old one only
597 		 * when the new one is added, but we don't care if we are off
598 		 * channel for a bit. All we need to do, is not to return
599 		 * before we actually begin to be on the channel.
600 		 */
601 		iwl_mvm_stop_session_protection(mvm, vif);
602 	}
603 
604 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
605 	time_cmd.id_and_color =
606 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
607 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
608 
609 	time_cmd.apply_time = cpu_to_le32(0);
610 
611 	time_cmd.max_frags = TE_V2_FRAG_NONE;
612 	time_cmd.max_delay = cpu_to_le32(max_delay);
613 	/* TODO: why do we need to interval = bi if it is not periodic? */
614 	time_cmd.interval = cpu_to_le32(1);
615 	time_cmd.duration = cpu_to_le32(duration);
616 	time_cmd.repeat = 1;
617 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
618 				      TE_V2_NOTIF_HOST_EVENT_END |
619 				      TE_V2_START_IMMEDIATELY);
620 
621 	if (!wait_for_notif) {
622 		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
623 		return;
624 	}
625 
626 	/*
627 	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
628 	 * right after we send the time event
629 	 */
630 	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
631 				   te_notif_response,
632 				   ARRAY_SIZE(te_notif_response),
633 				   iwl_mvm_te_notif, te_data);
634 
635 	/* If TE was sent OK - wait for the notification that started */
636 	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
637 		IWL_ERR(mvm, "Failed to add TE to protect session\n");
638 		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
639 	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
640 					 TU_TO_JIFFIES(max_delay))) {
641 		IWL_ERR(mvm, "Failed to protect session until TE\n");
642 	}
643 }
644 
645 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
646 					struct iwl_mvm_time_event_data *te_data,
647 					u32 *uid)
648 {
649 	u32 id;
650 
651 	/*
652 	 * It is possible that by the time we got to this point the time
653 	 * event was already removed.
654 	 */
655 	spin_lock_bh(&mvm->time_event_lock);
656 
657 	/* Save time event uid before clearing its data */
658 	*uid = te_data->uid;
659 	id = te_data->id;
660 
661 	/*
662 	 * The clear_data function handles time events that were already removed
663 	 */
664 	iwl_mvm_te_clear_data(mvm, te_data);
665 	spin_unlock_bh(&mvm->time_event_lock);
666 
667 	/*
668 	 * It is possible that by the time we try to remove it, the time event
669 	 * has already ended and removed. In such a case there is no need to
670 	 * send a removal command.
671 	 */
672 	if (id == TE_MAX) {
673 		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
674 		return false;
675 	}
676 
677 	return true;
678 }
679 
680 /*
681  * Explicit request to remove a aux roc time event. The removal of a time
682  * event needs to be synchronized with the flow of a time event's end
683  * notification, which also removes the time event from the op mode
684  * data structures.
685  */
686 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
687 				      struct iwl_mvm_vif *mvmvif,
688 				      struct iwl_mvm_time_event_data *te_data)
689 {
690 	struct iwl_hs20_roc_req aux_cmd = {};
691 	u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
692 
693 	u32 uid;
694 	int ret;
695 
696 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
697 		return;
698 
699 	aux_cmd.event_unique_id = cpu_to_le32(uid);
700 	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
701 	aux_cmd.id_and_color =
702 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
703 	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
704 		     le32_to_cpu(aux_cmd.event_unique_id));
705 	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
706 				   len, &aux_cmd);
707 
708 	if (WARN_ON(ret))
709 		return;
710 }
711 
712 /*
713  * Explicit request to remove a time event. The removal of a time event needs to
714  * be synchronized with the flow of a time event's end notification, which also
715  * removes the time event from the op mode data structures.
716  */
717 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
718 			       struct iwl_mvm_vif *mvmvif,
719 			       struct iwl_mvm_time_event_data *te_data)
720 {
721 	struct iwl_time_event_cmd time_cmd = {};
722 	u32 uid;
723 	int ret;
724 
725 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
726 		return;
727 
728 	/* When we remove a TE, the UID is to be set in the id field */
729 	time_cmd.id = cpu_to_le32(uid);
730 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
731 	time_cmd.id_and_color =
732 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
733 
734 	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
735 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
736 				   sizeof(time_cmd), &time_cmd);
737 	if (WARN_ON(ret))
738 		return;
739 }
740 
741 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
742 				     struct ieee80211_vif *vif)
743 {
744 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
745 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
746 	u32 id;
747 
748 	lockdep_assert_held(&mvm->mutex);
749 
750 	spin_lock_bh(&mvm->time_event_lock);
751 	id = te_data->id;
752 	spin_unlock_bh(&mvm->time_event_lock);
753 
754 	if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
755 		IWL_DEBUG_TE(mvm,
756 			     "don't remove TE with id=%u (not session protection)\n",
757 			     id);
758 		return;
759 	}
760 
761 	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
762 }
763 
764 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
765 			  int duration, enum ieee80211_roc_type type)
766 {
767 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
768 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
769 	struct iwl_time_event_cmd time_cmd = {};
770 
771 	lockdep_assert_held(&mvm->mutex);
772 	if (te_data->running) {
773 		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
774 		return -EBUSY;
775 	}
776 
777 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
778 	time_cmd.id_and_color =
779 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
780 
781 	switch (type) {
782 	case IEEE80211_ROC_TYPE_NORMAL:
783 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
784 		break;
785 	case IEEE80211_ROC_TYPE_MGMT_TX:
786 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
787 		break;
788 	default:
789 		WARN_ONCE(1, "Got an invalid ROC type\n");
790 		return -EINVAL;
791 	}
792 
793 	time_cmd.apply_time = cpu_to_le32(0);
794 	time_cmd.interval = cpu_to_le32(1);
795 
796 	/*
797 	 * The P2P Device TEs can have lower priority than other events
798 	 * that are being scheduled by the driver/fw, and thus it might not be
799 	 * scheduled. To improve the chances of it being scheduled, allow them
800 	 * to be fragmented, and in addition allow them to be delayed.
801 	 */
802 	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
803 	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
804 	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
805 	time_cmd.repeat = 1;
806 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
807 				      TE_V2_NOTIF_HOST_EVENT_END |
808 				      TE_V2_START_IMMEDIATELY);
809 
810 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
811 }
812 
813 static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
814 {
815 	struct iwl_mvm_time_event_data *te_data;
816 
817 	lockdep_assert_held(&mvm->mutex);
818 
819 	spin_lock_bh(&mvm->time_event_lock);
820 
821 	/*
822 	 * Iterate over the list of time events and find the time event that is
823 	 * associated with a P2P_DEVICE interface.
824 	 * This assumes that a P2P_DEVICE interface can have only a single time
825 	 * event at any given time and this time event coresponds to a ROC
826 	 * request
827 	 */
828 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
829 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
830 			goto out;
831 	}
832 
833 	/* There can only be at most one AUX ROC time event, we just use the
834 	 * list to simplify/unify code. Remove it if it exists.
835 	 */
836 	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
837 					   struct iwl_mvm_time_event_data,
838 					   list);
839 out:
840 	spin_unlock_bh(&mvm->time_event_lock);
841 	return te_data;
842 }
843 
844 void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
845 {
846 	struct iwl_mvm_time_event_data *te_data;
847 	u32 uid;
848 
849 	te_data = iwl_mvm_get_roc_te(mvm);
850 	if (te_data)
851 		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
852 }
853 
854 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
855 {
856 	struct iwl_mvm_vif *mvmvif;
857 	struct iwl_mvm_time_event_data *te_data;
858 
859 	te_data = iwl_mvm_get_roc_te(mvm);
860 	if (!te_data) {
861 		IWL_WARN(mvm, "No remain on channel event\n");
862 		return;
863 	}
864 
865 	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
866 
867 	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
868 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
869 		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
870 	} else {
871 		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
872 	}
873 
874 	iwl_mvm_roc_finished(mvm);
875 }
876 
877 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
878 				struct ieee80211_vif *vif,
879 				u32 duration, u32 apply_time)
880 {
881 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
882 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
883 	struct iwl_time_event_cmd time_cmd = {};
884 
885 	lockdep_assert_held(&mvm->mutex);
886 
887 	if (te_data->running) {
888 		u32 id;
889 
890 		spin_lock_bh(&mvm->time_event_lock);
891 		id = te_data->id;
892 		spin_unlock_bh(&mvm->time_event_lock);
893 
894 		if (id == TE_CHANNEL_SWITCH_PERIOD) {
895 			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
896 			return -EBUSY;
897 		}
898 
899 		/*
900 		 * Remove the session protection time event to allow the
901 		 * channel switch. If we got here, we just heard a beacon so
902 		 * the session protection is not needed anymore anyway.
903 		 */
904 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
905 	}
906 
907 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
908 	time_cmd.id_and_color =
909 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
910 	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
911 	time_cmd.apply_time = cpu_to_le32(apply_time);
912 	time_cmd.max_frags = TE_V2_FRAG_NONE;
913 	time_cmd.duration = cpu_to_le32(duration);
914 	time_cmd.repeat = 1;
915 	time_cmd.interval = cpu_to_le32(1);
916 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
917 				      TE_V2_ABSENCE);
918 	if (!apply_time)
919 		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
920 
921 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
922 }
923