1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 
68 #include <linux/jiffies.h>
69 #include <net/mac80211.h>
70 
71 #include "fw/notif-wait.h"
72 #include "iwl-trans.h"
73 #include "fw-api.h"
74 #include "time-event.h"
75 #include "mvm.h"
76 #include "iwl-io.h"
77 #include "iwl-prph.h"
78 
79 /*
80  * For the high priority TE use a time event type that has similar priority to
81  * the FW's action scan priority.
82  */
83 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
84 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
85 
86 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
87 			   struct iwl_mvm_time_event_data *te_data)
88 {
89 	lockdep_assert_held(&mvm->time_event_lock);
90 
91 	if (!te_data->vif)
92 		return;
93 
94 	list_del(&te_data->list);
95 	te_data->running = false;
96 	te_data->uid = 0;
97 	te_data->id = TE_MAX;
98 	te_data->vif = NULL;
99 }
100 
101 void iwl_mvm_roc_done_wk(struct work_struct *wk)
102 {
103 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
104 	u32 queues = 0;
105 
106 	/*
107 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
108 	 * This will cause the TX path to drop offchannel transmissions.
109 	 * That would also be done by mac80211, but it is racy, in particular
110 	 * in the case that the time event actually completed in the firmware
111 	 * (which is handled in iwl_mvm_te_handle_notif).
112 	 */
113 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
114 		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
115 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
116 	}
117 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
118 		queues |= BIT(mvm->aux_queue);
119 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
120 	}
121 
122 	synchronize_net();
123 
124 	/*
125 	 * Flush the offchannel queue -- this is called when the time
126 	 * event finishes or is canceled, so that frames queued for it
127 	 * won't get stuck on the queue and be transmitted in the next
128 	 * time event.
129 	 * We have to send the command asynchronously since this cannot
130 	 * be under the mutex for locking reasons, but that's not an
131 	 * issue as it will have to complete before the next command is
132 	 * executed, and a new time event means a new command.
133 	 */
134 	iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
135 
136 	/* Do the same for the P2P device queue (STA) */
137 	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
138 		struct iwl_mvm_vif *mvmvif;
139 
140 		/*
141 		 * NB: access to this pointer would be racy, but the flush bit
142 		 * can only be set when we had a P2P-Device VIF, and we have a
143 		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
144 		 * not really racy.
145 		 */
146 
147 		if (!WARN_ON(!mvm->p2p_device_vif)) {
148 			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
149 			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
150 					  CMD_ASYNC);
151 		}
152 	}
153 }
154 
155 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
156 {
157 	/*
158 	 * Of course, our status bit is just as racy as mac80211, so in
159 	 * addition, fire off the work struct which will drop all frames
160 	 * from the hardware queues that made it through the race. First
161 	 * it will of course synchronize the TX path to make sure that
162 	 * any *new* TX will be rejected.
163 	 */
164 	schedule_work(&mvm->roc_done_wk);
165 }
166 
167 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
168 {
169 	struct ieee80211_vif *csa_vif;
170 
171 	rcu_read_lock();
172 
173 	csa_vif = rcu_dereference(mvm->csa_vif);
174 	if (!csa_vif || !csa_vif->csa_active)
175 		goto out_unlock;
176 
177 	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
178 
179 	/*
180 	 * CSA NoA is started but we still have beacons to
181 	 * transmit on the current channel.
182 	 * So we just do nothing here and the switch
183 	 * will be performed on the last TBTT.
184 	 */
185 	if (!ieee80211_csa_is_complete(csa_vif)) {
186 		IWL_WARN(mvm, "CSA NOA started too early\n");
187 		goto out_unlock;
188 	}
189 
190 	ieee80211_csa_finish(csa_vif);
191 
192 	rcu_read_unlock();
193 
194 	RCU_INIT_POINTER(mvm->csa_vif, NULL);
195 
196 	return;
197 
198 out_unlock:
199 	rcu_read_unlock();
200 }
201 
202 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
203 					struct ieee80211_vif *vif,
204 					const char *errmsg)
205 {
206 	if (vif->type != NL80211_IFTYPE_STATION)
207 		return false;
208 	if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
209 		return false;
210 	if (errmsg)
211 		IWL_ERR(mvm, "%s\n", errmsg);
212 
213 	iwl_mvm_connection_loss(mvm, vif, errmsg);
214 	return true;
215 }
216 
217 static void
218 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
219 			     struct iwl_mvm_time_event_data *te_data,
220 			     struct iwl_time_event_notif *notif)
221 {
222 	struct ieee80211_vif *vif = te_data->vif;
223 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
224 
225 	if (!notif->status)
226 		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
227 
228 	switch (te_data->vif->type) {
229 	case NL80211_IFTYPE_AP:
230 		if (!notif->status)
231 			mvmvif->csa_failed = true;
232 		iwl_mvm_csa_noa_start(mvm);
233 		break;
234 	case NL80211_IFTYPE_STATION:
235 		if (!notif->status) {
236 			iwl_mvm_connection_loss(mvm, vif,
237 						"CSA TE failed to start");
238 			break;
239 		}
240 		iwl_mvm_csa_client_absent(mvm, te_data->vif);
241 		ieee80211_chswitch_done(te_data->vif, true);
242 		break;
243 	default:
244 		/* should never happen */
245 		WARN_ON_ONCE(1);
246 		break;
247 	}
248 
249 	/* we don't need it anymore */
250 	iwl_mvm_te_clear_data(mvm, te_data);
251 }
252 
253 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
254 				     struct iwl_time_event_notif *notif,
255 				     struct iwl_mvm_time_event_data *te_data)
256 {
257 	struct iwl_fw_dbg_trigger_tlv *trig;
258 	struct iwl_fw_dbg_trigger_time_event *te_trig;
259 	int i;
260 
261 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
262 		return;
263 
264 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
265 	te_trig = (void *)trig->data;
266 
267 	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
268 					   ieee80211_vif_to_wdev(te_data->vif),
269 					   trig))
270 		return;
271 
272 	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
273 		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
274 		u32 trig_action_bitmap =
275 			le32_to_cpu(te_trig->time_events[i].action_bitmap);
276 		u32 trig_status_bitmap =
277 			le32_to_cpu(te_trig->time_events[i].status_bitmap);
278 
279 		if (trig_te_id != te_data->id ||
280 		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
281 		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
282 			continue;
283 
284 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
285 					"Time event %d Action 0x%x received status: %d",
286 					te_data->id,
287 					le32_to_cpu(notif->action),
288 					le32_to_cpu(notif->status));
289 		break;
290 	}
291 }
292 
293 /*
294  * Handles a FW notification for an event that is known to the driver.
295  *
296  * @mvm: the mvm component
297  * @te_data: the time event data
298  * @notif: the notification data corresponding the time event data.
299  */
300 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
301 				    struct iwl_mvm_time_event_data *te_data,
302 				    struct iwl_time_event_notif *notif)
303 {
304 	lockdep_assert_held(&mvm->time_event_lock);
305 
306 	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
307 		     le32_to_cpu(notif->unique_id),
308 		     le32_to_cpu(notif->action));
309 
310 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
311 
312 	/*
313 	 * The FW sends the start/end time event notifications even for events
314 	 * that it fails to schedule. This is indicated in the status field of
315 	 * the notification. This happens in cases that the scheduler cannot
316 	 * find a schedule that can handle the event (for example requesting a
317 	 * P2P Device discoveribility, while there are other higher priority
318 	 * events in the system).
319 	 */
320 	if (!le32_to_cpu(notif->status)) {
321 		const char *msg;
322 
323 		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
324 			msg = "Time Event start notification failure";
325 		else
326 			msg = "Time Event end notification failure";
327 
328 		IWL_DEBUG_TE(mvm, "%s\n", msg);
329 
330 		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
331 			iwl_mvm_te_clear_data(mvm, te_data);
332 			return;
333 		}
334 	}
335 
336 	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
337 		IWL_DEBUG_TE(mvm,
338 			     "TE ended - current time %lu, estimated end %lu\n",
339 			     jiffies, te_data->end_jiffies);
340 
341 		switch (te_data->vif->type) {
342 		case NL80211_IFTYPE_P2P_DEVICE:
343 			ieee80211_remain_on_channel_expired(mvm->hw);
344 			iwl_mvm_roc_finished(mvm);
345 			break;
346 		case NL80211_IFTYPE_STATION:
347 			/*
348 			 * By now, we should have finished association
349 			 * and know the dtim period.
350 			 */
351 			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
352 				"No association and the time event is over already...");
353 			break;
354 		default:
355 			break;
356 		}
357 
358 		iwl_mvm_te_clear_data(mvm, te_data);
359 	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
360 		te_data->running = true;
361 		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
362 
363 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
364 			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
365 			iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
366 			ieee80211_ready_on_channel(mvm->hw);
367 		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
368 			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
369 		}
370 	} else {
371 		IWL_WARN(mvm, "Got TE with unknown action\n");
372 	}
373 }
374 
375 /*
376  * Handle A Aux ROC time event
377  */
378 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
379 					   struct iwl_time_event_notif *notif)
380 {
381 	struct iwl_mvm_time_event_data *te_data, *tmp;
382 	bool aux_roc_te = false;
383 
384 	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
385 		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
386 			aux_roc_te = true;
387 			break;
388 		}
389 	}
390 	if (!aux_roc_te) /* Not a Aux ROC time event */
391 		return -EINVAL;
392 
393 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
394 
395 	IWL_DEBUG_TE(mvm,
396 		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
397 		     le32_to_cpu(notif->unique_id),
398 		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
399 
400 	if (!le32_to_cpu(notif->status) ||
401 	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
402 		/* End TE, notify mac80211 */
403 		ieee80211_remain_on_channel_expired(mvm->hw);
404 		iwl_mvm_roc_finished(mvm); /* flush aux queue */
405 		list_del(&te_data->list); /* remove from list */
406 		te_data->running = false;
407 		te_data->vif = NULL;
408 		te_data->uid = 0;
409 		te_data->id = TE_MAX;
410 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
411 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
412 		te_data->running = true;
413 		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
414 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
415 	} else {
416 		IWL_DEBUG_TE(mvm,
417 			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
418 			     le32_to_cpu(notif->action));
419 		return -EINVAL;
420 	}
421 
422 	return 0;
423 }
424 
425 /*
426  * The Rx handler for time event notifications
427  */
428 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
429 				 struct iwl_rx_cmd_buffer *rxb)
430 {
431 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
432 	struct iwl_time_event_notif *notif = (void *)pkt->data;
433 	struct iwl_mvm_time_event_data *te_data, *tmp;
434 
435 	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
436 		     le32_to_cpu(notif->unique_id),
437 		     le32_to_cpu(notif->action));
438 
439 	spin_lock_bh(&mvm->time_event_lock);
440 	/* This time event is triggered for Aux ROC request */
441 	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
442 		goto unlock;
443 
444 	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
445 		if (le32_to_cpu(notif->unique_id) == te_data->uid)
446 			iwl_mvm_te_handle_notif(mvm, te_data, notif);
447 	}
448 unlock:
449 	spin_unlock_bh(&mvm->time_event_lock);
450 }
451 
452 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
453 			     struct iwl_rx_packet *pkt, void *data)
454 {
455 	struct iwl_mvm *mvm =
456 		container_of(notif_wait, struct iwl_mvm, notif_wait);
457 	struct iwl_mvm_time_event_data *te_data = data;
458 	struct iwl_time_event_notif *resp;
459 	int resp_len = iwl_rx_packet_payload_len(pkt);
460 
461 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
462 		return true;
463 
464 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
465 		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
466 		return true;
467 	}
468 
469 	resp = (void *)pkt->data;
470 
471 	/* te_data->uid is already set in the TIME_EVENT_CMD response */
472 	if (le32_to_cpu(resp->unique_id) != te_data->uid)
473 		return false;
474 
475 	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
476 		     te_data->uid);
477 	if (!resp->status)
478 		IWL_ERR(mvm,
479 			"TIME_EVENT_NOTIFICATION received but not executed\n");
480 
481 	return true;
482 }
483 
484 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
485 					struct iwl_rx_packet *pkt, void *data)
486 {
487 	struct iwl_mvm *mvm =
488 		container_of(notif_wait, struct iwl_mvm, notif_wait);
489 	struct iwl_mvm_time_event_data *te_data = data;
490 	struct iwl_time_event_resp *resp;
491 	int resp_len = iwl_rx_packet_payload_len(pkt);
492 
493 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
494 		return true;
495 
496 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
497 		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
498 		return true;
499 	}
500 
501 	resp = (void *)pkt->data;
502 
503 	/* we should never get a response to another TIME_EVENT_CMD here */
504 	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
505 		return false;
506 
507 	te_data->uid = le32_to_cpu(resp->unique_id);
508 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
509 		     te_data->uid);
510 	return true;
511 }
512 
513 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
514 				       struct ieee80211_vif *vif,
515 				       struct iwl_mvm_time_event_data *te_data,
516 				       struct iwl_time_event_cmd *te_cmd)
517 {
518 	static const u16 time_event_response[] = { TIME_EVENT_CMD };
519 	struct iwl_notification_wait wait_time_event;
520 	int ret;
521 
522 	lockdep_assert_held(&mvm->mutex);
523 
524 	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
525 		     le32_to_cpu(te_cmd->duration));
526 
527 	spin_lock_bh(&mvm->time_event_lock);
528 	if (WARN_ON(te_data->id != TE_MAX)) {
529 		spin_unlock_bh(&mvm->time_event_lock);
530 		return -EIO;
531 	}
532 	te_data->vif = vif;
533 	te_data->duration = le32_to_cpu(te_cmd->duration);
534 	te_data->id = le32_to_cpu(te_cmd->id);
535 	list_add_tail(&te_data->list, &mvm->time_event_list);
536 	spin_unlock_bh(&mvm->time_event_lock);
537 
538 	/*
539 	 * Use a notification wait, which really just processes the
540 	 * command response and doesn't wait for anything, in order
541 	 * to be able to process the response and get the UID inside
542 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
543 	 * stores the buffer and then wakes up this thread, by which
544 	 * time another notification (that the time event started)
545 	 * might already be processed unsuccessfully.
546 	 */
547 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
548 				   time_event_response,
549 				   ARRAY_SIZE(time_event_response),
550 				   iwl_mvm_time_event_response, te_data);
551 
552 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
553 					    sizeof(*te_cmd), te_cmd);
554 	if (ret) {
555 		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
556 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
557 		goto out_clear_te;
558 	}
559 
560 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
561 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
562 	/* should never fail */
563 	WARN_ON_ONCE(ret);
564 
565 	if (ret) {
566  out_clear_te:
567 		spin_lock_bh(&mvm->time_event_lock);
568 		iwl_mvm_te_clear_data(mvm, te_data);
569 		spin_unlock_bh(&mvm->time_event_lock);
570 	}
571 	return ret;
572 }
573 
574 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
575 			     struct ieee80211_vif *vif,
576 			     u32 duration, u32 min_duration,
577 			     u32 max_delay, bool wait_for_notif)
578 {
579 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
580 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
581 	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
582 	struct iwl_notification_wait wait_te_notif;
583 	struct iwl_time_event_cmd time_cmd = {};
584 
585 	lockdep_assert_held(&mvm->mutex);
586 
587 	if (te_data->running &&
588 	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
589 		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
590 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
591 		return;
592 	}
593 
594 	if (te_data->running) {
595 		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
596 			     te_data->uid,
597 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
598 		/*
599 		 * we don't have enough time
600 		 * cancel the current TE and issue a new one
601 		 * Of course it would be better to remove the old one only
602 		 * when the new one is added, but we don't care if we are off
603 		 * channel for a bit. All we need to do, is not to return
604 		 * before we actually begin to be on the channel.
605 		 */
606 		iwl_mvm_stop_session_protection(mvm, vif);
607 	}
608 
609 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
610 	time_cmd.id_and_color =
611 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
612 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
613 
614 	time_cmd.apply_time = cpu_to_le32(0);
615 
616 	time_cmd.max_frags = TE_V2_FRAG_NONE;
617 	time_cmd.max_delay = cpu_to_le32(max_delay);
618 	/* TODO: why do we need to interval = bi if it is not periodic? */
619 	time_cmd.interval = cpu_to_le32(1);
620 	time_cmd.duration = cpu_to_le32(duration);
621 	time_cmd.repeat = 1;
622 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
623 				      TE_V2_NOTIF_HOST_EVENT_END |
624 				      T2_V2_START_IMMEDIATELY);
625 
626 	if (!wait_for_notif) {
627 		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
628 		return;
629 	}
630 
631 	/*
632 	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
633 	 * right after we send the time event
634 	 */
635 	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
636 				   te_notif_response,
637 				   ARRAY_SIZE(te_notif_response),
638 				   iwl_mvm_te_notif, te_data);
639 
640 	/* If TE was sent OK - wait for the notification that started */
641 	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
642 		IWL_ERR(mvm, "Failed to add TE to protect session\n");
643 		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
644 	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
645 					 TU_TO_JIFFIES(max_delay))) {
646 		IWL_ERR(mvm, "Failed to protect session until TE\n");
647 	}
648 }
649 
650 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
651 					struct iwl_mvm_time_event_data *te_data,
652 					u32 *uid)
653 {
654 	u32 id;
655 
656 	/*
657 	 * It is possible that by the time we got to this point the time
658 	 * event was already removed.
659 	 */
660 	spin_lock_bh(&mvm->time_event_lock);
661 
662 	/* Save time event uid before clearing its data */
663 	*uid = te_data->uid;
664 	id = te_data->id;
665 
666 	/*
667 	 * The clear_data function handles time events that were already removed
668 	 */
669 	iwl_mvm_te_clear_data(mvm, te_data);
670 	spin_unlock_bh(&mvm->time_event_lock);
671 
672 	/*
673 	 * It is possible that by the time we try to remove it, the time event
674 	 * has already ended and removed. In such a case there is no need to
675 	 * send a removal command.
676 	 */
677 	if (id == TE_MAX) {
678 		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
679 		return false;
680 	}
681 
682 	return true;
683 }
684 
685 /*
686  * Explicit request to remove a aux roc time event. The removal of a time
687  * event needs to be synchronized with the flow of a time event's end
688  * notification, which also removes the time event from the op mode
689  * data structures.
690  */
691 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
692 				      struct iwl_mvm_vif *mvmvif,
693 				      struct iwl_mvm_time_event_data *te_data)
694 {
695 	struct iwl_hs20_roc_req aux_cmd = {};
696 	u32 uid;
697 	int ret;
698 
699 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
700 		return;
701 
702 	aux_cmd.event_unique_id = cpu_to_le32(uid);
703 	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
704 	aux_cmd.id_and_color =
705 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
706 	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
707 		     le32_to_cpu(aux_cmd.event_unique_id));
708 	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
709 				   sizeof(aux_cmd), &aux_cmd);
710 
711 	if (WARN_ON(ret))
712 		return;
713 }
714 
715 /*
716  * Explicit request to remove a time event. The removal of a time event needs to
717  * be synchronized with the flow of a time event's end notification, which also
718  * removes the time event from the op mode data structures.
719  */
720 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
721 			       struct iwl_mvm_vif *mvmvif,
722 			       struct iwl_mvm_time_event_data *te_data)
723 {
724 	struct iwl_time_event_cmd time_cmd = {};
725 	u32 uid;
726 	int ret;
727 
728 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
729 		return;
730 
731 	/* When we remove a TE, the UID is to be set in the id field */
732 	time_cmd.id = cpu_to_le32(uid);
733 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
734 	time_cmd.id_and_color =
735 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
736 
737 	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
738 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
739 				   sizeof(time_cmd), &time_cmd);
740 	if (WARN_ON(ret))
741 		return;
742 }
743 
744 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
745 				     struct ieee80211_vif *vif)
746 {
747 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
748 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
749 	u32 id;
750 
751 	lockdep_assert_held(&mvm->mutex);
752 
753 	spin_lock_bh(&mvm->time_event_lock);
754 	id = te_data->id;
755 	spin_unlock_bh(&mvm->time_event_lock);
756 
757 	if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
758 		IWL_DEBUG_TE(mvm,
759 			     "don't remove TE with id=%u (not session protection)\n",
760 			     id);
761 		return;
762 	}
763 
764 	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
765 }
766 
767 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
768 			  int duration, enum ieee80211_roc_type type)
769 {
770 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
771 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
772 	struct iwl_time_event_cmd time_cmd = {};
773 
774 	lockdep_assert_held(&mvm->mutex);
775 	if (te_data->running) {
776 		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
777 		return -EBUSY;
778 	}
779 
780 	/*
781 	 * Flush the done work, just in case it's still pending, so that
782 	 * the work it does can complete and we can accept new frames.
783 	 */
784 	flush_work(&mvm->roc_done_wk);
785 
786 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
787 	time_cmd.id_and_color =
788 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
789 
790 	switch (type) {
791 	case IEEE80211_ROC_TYPE_NORMAL:
792 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
793 		break;
794 	case IEEE80211_ROC_TYPE_MGMT_TX:
795 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
796 		break;
797 	default:
798 		WARN_ONCE(1, "Got an invalid ROC type\n");
799 		return -EINVAL;
800 	}
801 
802 	time_cmd.apply_time = cpu_to_le32(0);
803 	time_cmd.interval = cpu_to_le32(1);
804 
805 	/*
806 	 * The P2P Device TEs can have lower priority than other events
807 	 * that are being scheduled by the driver/fw, and thus it might not be
808 	 * scheduled. To improve the chances of it being scheduled, allow them
809 	 * to be fragmented, and in addition allow them to be delayed.
810 	 */
811 	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
812 	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
813 	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
814 	time_cmd.repeat = 1;
815 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
816 				      TE_V2_NOTIF_HOST_EVENT_END |
817 				      T2_V2_START_IMMEDIATELY);
818 
819 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
820 }
821 
822 static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
823 {
824 	struct iwl_mvm_time_event_data *te_data;
825 
826 	lockdep_assert_held(&mvm->mutex);
827 
828 	spin_lock_bh(&mvm->time_event_lock);
829 
830 	/*
831 	 * Iterate over the list of time events and find the time event that is
832 	 * associated with a P2P_DEVICE interface.
833 	 * This assumes that a P2P_DEVICE interface can have only a single time
834 	 * event at any given time and this time event coresponds to a ROC
835 	 * request
836 	 */
837 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
838 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
839 			goto out;
840 	}
841 
842 	/* There can only be at most one AUX ROC time event, we just use the
843 	 * list to simplify/unify code. Remove it if it exists.
844 	 */
845 	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
846 					   struct iwl_mvm_time_event_data,
847 					   list);
848 out:
849 	spin_unlock_bh(&mvm->time_event_lock);
850 	return te_data;
851 }
852 
853 void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
854 {
855 	struct iwl_mvm_time_event_data *te_data;
856 	u32 uid;
857 
858 	te_data = iwl_mvm_get_roc_te(mvm);
859 	if (te_data)
860 		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
861 }
862 
863 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
864 {
865 	struct iwl_mvm_vif *mvmvif;
866 	struct iwl_mvm_time_event_data *te_data;
867 
868 	te_data = iwl_mvm_get_roc_te(mvm);
869 	if (!te_data) {
870 		IWL_WARN(mvm, "No remain on channel event\n");
871 		return;
872 	}
873 
874 	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
875 
876 	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
877 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
878 		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
879 	} else {
880 		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
881 	}
882 
883 	iwl_mvm_roc_finished(mvm);
884 }
885 
886 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
887 				struct ieee80211_vif *vif,
888 				u32 duration, u32 apply_time)
889 {
890 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
891 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
892 	struct iwl_time_event_cmd time_cmd = {};
893 
894 	lockdep_assert_held(&mvm->mutex);
895 
896 	if (te_data->running) {
897 		u32 id;
898 
899 		spin_lock_bh(&mvm->time_event_lock);
900 		id = te_data->id;
901 		spin_unlock_bh(&mvm->time_event_lock);
902 
903 		if (id == TE_CHANNEL_SWITCH_PERIOD) {
904 			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
905 			return -EBUSY;
906 		}
907 
908 		/*
909 		 * Remove the session protection time event to allow the
910 		 * channel switch. If we got here, we just heard a beacon so
911 		 * the session protection is not needed anymore anyway.
912 		 */
913 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
914 	}
915 
916 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
917 	time_cmd.id_and_color =
918 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
919 	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
920 	time_cmd.apply_time = cpu_to_le32(apply_time);
921 	time_cmd.max_frags = TE_V2_FRAG_NONE;
922 	time_cmd.duration = cpu_to_le32(duration);
923 	time_cmd.repeat = 1;
924 	time_cmd.interval = cpu_to_le32(1);
925 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
926 				      TE_V2_ABSENCE);
927 
928 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
929 }
930