1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2014 Intel Mobile Communications GmbH
9  * Copyright(c) 2017 Intel Deutschland GmbH
10  * Copyright(C) 2018 Intel Corporation
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called COPYING.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <linuxwifi@intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  * BSD LICENSE
29  *
30  * Copyright(c) 2014 Intel Mobile Communications GmbH
31  * Copyright(c) 2017 Intel Deutschland GmbH
32  * Copyright(C) 2018 Intel Corporation
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  *
39  *  * Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  *  * Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in
43  *    the documentation and/or other materials provided with the
44  *    distribution.
45  *  * Neither the name Intel Corporation nor the names of its
46  *    contributors may be used to endorse or promote products derived
47  *    from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60  *
61  *****************************************************************************/
62 
63 #include <linux/etherdevice.h>
64 #include "mvm.h"
65 #include "time-event.h"
66 #include "iwl-io.h"
67 #include "iwl-prph.h"
68 
69 #define TU_TO_US(x) (x * 1024)
70 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
71 
72 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
73 {
74 	struct ieee80211_sta *sta;
75 	struct iwl_mvm_sta *mvmsta;
76 	int i;
77 
78 	lockdep_assert_held(&mvm->mutex);
79 
80 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
81 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
82 						lockdep_is_held(&mvm->mutex));
83 		if (!sta || IS_ERR(sta) || !sta->tdls)
84 			continue;
85 
86 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
87 		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
88 				NL80211_TDLS_TEARDOWN,
89 				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
90 				GFP_KERNEL);
91 	}
92 }
93 
94 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
95 {
96 	struct ieee80211_sta *sta;
97 	struct iwl_mvm_sta *mvmsta;
98 	int count = 0;
99 	int i;
100 
101 	lockdep_assert_held(&mvm->mutex);
102 
103 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
104 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
105 						lockdep_is_held(&mvm->mutex));
106 		if (!sta || IS_ERR(sta) || !sta->tdls)
107 			continue;
108 
109 		if (vif) {
110 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
111 			if (mvmsta->vif != vif)
112 				continue;
113 		}
114 
115 		count++;
116 	}
117 
118 	return count;
119 }
120 
121 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
122 {
123 	struct iwl_rx_packet *pkt;
124 	struct iwl_tdls_config_res *resp;
125 	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
126 	struct iwl_host_cmd cmd = {
127 		.id = TDLS_CONFIG_CMD,
128 		.flags = CMD_WANT_SKB,
129 		.data = { &tdls_cfg_cmd, },
130 		.len = { sizeof(struct iwl_tdls_config_cmd), },
131 	};
132 	struct ieee80211_sta *sta;
133 	int ret, i, cnt;
134 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
135 
136 	lockdep_assert_held(&mvm->mutex);
137 
138 	tdls_cfg_cmd.id_and_color =
139 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
140 	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
141 	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
142 
143 	/* for now the Tx cmd is empty and unused */
144 
145 	/* populate TDLS peer data */
146 	cnt = 0;
147 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
148 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
149 						lockdep_is_held(&mvm->mutex));
150 		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
151 			continue;
152 
153 		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
154 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
155 							IWL_MVM_TDLS_FW_TID;
156 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
157 		tdls_cfg_cmd.sta_info[cnt].is_initiator =
158 				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
159 
160 		cnt++;
161 	}
162 
163 	tdls_cfg_cmd.tdls_peer_count = cnt;
164 	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
165 
166 	ret = iwl_mvm_send_cmd(mvm, &cmd);
167 	if (WARN_ON_ONCE(ret))
168 		return;
169 
170 	pkt = cmd.resp_pkt;
171 
172 	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
173 
174 	/* we don't really care about the response at this point */
175 
176 	iwl_free_resp(&cmd);
177 }
178 
179 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
180 			       bool sta_added)
181 {
182 	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
183 
184 	/* when the first peer joins, send a power update first */
185 	if (tdls_sta_cnt == 1 && sta_added)
186 		iwl_mvm_power_update_mac(mvm);
187 
188 	/* Configure the FW with TDLS peer info only if TDLS channel switch
189 	 * capability is set.
190 	 * TDLS config data is used currently only in TDLS channel switch code.
191 	 * Supposed to serve also TDLS buffer station which is not implemneted
192 	 * yet in FW*/
193 	if (fw_has_capa(&mvm->fw->ucode_capa,
194 			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
195 		iwl_mvm_tdls_config(mvm, vif);
196 
197 	/* when the last peer leaves, send a power update last */
198 	if (tdls_sta_cnt == 0 && !sta_added)
199 		iwl_mvm_power_update_mac(mvm);
200 }
201 
202 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
203 					   struct ieee80211_vif *vif)
204 {
205 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
206 	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
207 
208 	/*
209 	 * iwl_mvm_protect_session() reads directly from the device
210 	 * (the system time), so make sure it is available.
211 	 */
212 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
213 		return;
214 
215 	mutex_lock(&mvm->mutex);
216 	/* Protect the session to hear the TDLS setup response on the channel */
217 	iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
218 	mutex_unlock(&mvm->mutex);
219 
220 	iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
221 }
222 
223 static const char *
224 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
225 {
226 	switch (state) {
227 	case IWL_MVM_TDLS_SW_IDLE:
228 		return "IDLE";
229 	case IWL_MVM_TDLS_SW_REQ_SENT:
230 		return "REQ SENT";
231 	case IWL_MVM_TDLS_SW_RESP_RCVD:
232 		return "RESP RECEIVED";
233 	case IWL_MVM_TDLS_SW_REQ_RCVD:
234 		return "REQ RECEIVED";
235 	case IWL_MVM_TDLS_SW_ACTIVE:
236 		return "ACTIVE";
237 	}
238 
239 	return NULL;
240 }
241 
242 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
243 					 enum iwl_mvm_tdls_cs_state state)
244 {
245 	if (mvm->tdls_cs.state == state)
246 		return;
247 
248 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
249 		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
250 		       iwl_mvm_tdls_cs_state_str(state));
251 	mvm->tdls_cs.state = state;
252 
253 	/* we only send requests to our switching peer - update sent time */
254 	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
255 		mvm->tdls_cs.peer.sent_timestamp =
256 			iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
257 
258 	if (state == IWL_MVM_TDLS_SW_IDLE)
259 		mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
260 }
261 
262 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
263 {
264 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
265 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
266 	struct ieee80211_sta *sta;
267 	unsigned int delay;
268 	struct iwl_mvm_sta *mvmsta;
269 	struct ieee80211_vif *vif;
270 	u32 sta_id = le32_to_cpu(notif->sta_id);
271 
272 	lockdep_assert_held(&mvm->mutex);
273 
274 	/* can fail sometimes */
275 	if (!le32_to_cpu(notif->status)) {
276 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
277 		return;
278 	}
279 
280 	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
281 		return;
282 
283 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
284 					lockdep_is_held(&mvm->mutex));
285 	/* the station may not be here, but if it is, it must be a TDLS peer */
286 	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
287 		return;
288 
289 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
290 	vif = mvmsta->vif;
291 
292 	/*
293 	 * Update state and possibly switch again after this is over (DTIM).
294 	 * Also convert TU to msec.
295 	 */
296 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
297 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
298 			 msecs_to_jiffies(delay));
299 
300 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
301 }
302 
303 static int
304 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
305 			  enum iwl_tdls_channel_switch_type type,
306 			  const u8 *peer, bool peer_initiator, u32 timestamp)
307 {
308 	bool same_peer = false;
309 	int ret = 0;
310 
311 	/* get the existing peer if it's there */
312 	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
313 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
314 		struct ieee80211_sta *sta = rcu_dereference_protected(
315 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
316 				lockdep_is_held(&mvm->mutex));
317 		if (!IS_ERR_OR_NULL(sta))
318 			same_peer = ether_addr_equal(peer, sta->addr);
319 	}
320 
321 	switch (mvm->tdls_cs.state) {
322 	case IWL_MVM_TDLS_SW_IDLE:
323 		/*
324 		 * might be spurious packet from the peer after the switch is
325 		 * already done
326 		 */
327 		if (type == TDLS_MOVE_CH)
328 			ret = -EINVAL;
329 		break;
330 	case IWL_MVM_TDLS_SW_REQ_SENT:
331 		/* only allow requests from the same peer */
332 		if (!same_peer)
333 			ret = -EBUSY;
334 		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
335 			 !peer_initiator)
336 			/*
337 			 * We received a ch-switch request while an outgoing
338 			 * one is pending. Allow it if the peer is the link
339 			 * initiator.
340 			 */
341 			ret = -EBUSY;
342 		else if (type == TDLS_SEND_CHAN_SW_REQ)
343 			/* wait for idle before sending another request */
344 			ret = -EBUSY;
345 		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
346 			/* we got a stale response - ignore it */
347 			ret = -EINVAL;
348 		break;
349 	case IWL_MVM_TDLS_SW_RESP_RCVD:
350 		/*
351 		 * we are waiting for the FW to give an "active" notification,
352 		 * so ignore requests in the meantime
353 		 */
354 		ret = -EBUSY;
355 		break;
356 	case IWL_MVM_TDLS_SW_REQ_RCVD:
357 		/* as above, allow the link initiator to proceed */
358 		if (type == TDLS_SEND_CHAN_SW_REQ) {
359 			if (!same_peer)
360 				ret = -EBUSY;
361 			else if (peer_initiator) /* they are the initiator */
362 				ret = -EBUSY;
363 		} else if (type == TDLS_MOVE_CH) {
364 			ret = -EINVAL;
365 		}
366 		break;
367 	case IWL_MVM_TDLS_SW_ACTIVE:
368 		/*
369 		 * the only valid request when active is a request to return
370 		 * to the base channel by the current off-channel peer
371 		 */
372 		if (type != TDLS_MOVE_CH || !same_peer)
373 			ret = -EBUSY;
374 		break;
375 	}
376 
377 	if (ret)
378 		IWL_DEBUG_TDLS(mvm,
379 			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
380 			       type, mvm->tdls_cs.state, peer, same_peer,
381 			       peer_initiator);
382 
383 	return ret;
384 }
385 
386 static int
387 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
388 				   struct ieee80211_vif *vif,
389 				   enum iwl_tdls_channel_switch_type type,
390 				   const u8 *peer, bool peer_initiator,
391 				   u8 oper_class,
392 				   struct cfg80211_chan_def *chandef,
393 				   u32 timestamp, u16 switch_time,
394 				   u16 switch_timeout, struct sk_buff *skb,
395 				   u32 ch_sw_tm_ie)
396 {
397 	struct ieee80211_sta *sta;
398 	struct iwl_mvm_sta *mvmsta;
399 	struct ieee80211_tx_info *info;
400 	struct ieee80211_hdr *hdr;
401 	struct iwl_tdls_channel_switch_cmd cmd = {0};
402 	int ret;
403 
404 	lockdep_assert_held(&mvm->mutex);
405 
406 	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
407 					timestamp);
408 	if (ret)
409 		return ret;
410 
411 	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
412 		ret = -EINVAL;
413 		goto out;
414 	}
415 
416 	cmd.switch_type = type;
417 	cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
418 	cmd.timing.switch_time = cpu_to_le32(switch_time);
419 	cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
420 
421 	rcu_read_lock();
422 	sta = ieee80211_find_sta(vif, peer);
423 	if (!sta) {
424 		rcu_read_unlock();
425 		ret = -ENOENT;
426 		goto out;
427 	}
428 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
429 	cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
430 
431 	if (!chandef) {
432 		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
433 		    mvm->tdls_cs.peer.chandef.chan) {
434 			/* actually moving to the channel */
435 			chandef = &mvm->tdls_cs.peer.chandef;
436 		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
437 			   type == TDLS_MOVE_CH) {
438 			/* we need to return to base channel */
439 			struct ieee80211_chanctx_conf *chanctx =
440 					rcu_dereference(vif->chanctx_conf);
441 
442 			if (WARN_ON_ONCE(!chanctx)) {
443 				rcu_read_unlock();
444 				goto out;
445 			}
446 
447 			chandef = &chanctx->def;
448 		}
449 	}
450 
451 	if (chandef) {
452 		cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
453 			       PHY_BAND_24 : PHY_BAND_5);
454 		cmd.ci.channel = chandef->chan->hw_value;
455 		cmd.ci.width = iwl_mvm_get_channel_width(chandef);
456 		cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
457 	}
458 
459 	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
460 	cmd.timing.max_offchan_duration =
461 			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
462 					     vif->bss_conf.beacon_int) / 2);
463 
464 	/* Switch time is the first element in the switch-timing IE. */
465 	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
466 
467 	info = IEEE80211_SKB_CB(skb);
468 	hdr = (void *)skb->data;
469 	if (info->control.hw_key) {
470 		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
471 			rcu_read_unlock();
472 			ret = -EINVAL;
473 			goto out;
474 		}
475 		iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
476 	}
477 
478 	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
479 			   mvmsta->sta_id);
480 
481 	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
482 				hdr->frame_control);
483 	rcu_read_unlock();
484 
485 	memcpy(cmd.frame.data, skb->data, skb->len);
486 
487 	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
488 				   sizeof(cmd), &cmd);
489 	if (ret) {
490 		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
491 			ret);
492 		goto out;
493 	}
494 
495 	/* channel switch has started, update state */
496 	if (type != TDLS_MOVE_CH) {
497 		mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
498 		iwl_mvm_tdls_update_cs_state(mvm,
499 					     type == TDLS_SEND_CHAN_SW_REQ ?
500 					     IWL_MVM_TDLS_SW_REQ_SENT :
501 					     IWL_MVM_TDLS_SW_REQ_RCVD);
502 	} else {
503 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
504 	}
505 
506 out:
507 
508 	/* channel switch failed - we are idle */
509 	if (ret)
510 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
511 
512 	return ret;
513 }
514 
515 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
516 {
517 	struct iwl_mvm *mvm;
518 	struct ieee80211_sta *sta;
519 	struct iwl_mvm_sta *mvmsta;
520 	struct ieee80211_vif *vif;
521 	unsigned int delay;
522 	int ret;
523 
524 	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
525 	mutex_lock(&mvm->mutex);
526 
527 	/* called after an active channel switch has finished or timed-out */
528 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
529 
530 	/* station might be gone, in that case do nothing */
531 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
532 		goto out;
533 
534 	sta = rcu_dereference_protected(
535 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
536 				lockdep_is_held(&mvm->mutex));
537 	/* the station may not be here, but if it is, it must be a TDLS peer */
538 	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
539 		goto out;
540 
541 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
542 	vif = mvmsta->vif;
543 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
544 						 TDLS_SEND_CHAN_SW_REQ,
545 						 sta->addr,
546 						 mvm->tdls_cs.peer.initiator,
547 						 mvm->tdls_cs.peer.op_class,
548 						 &mvm->tdls_cs.peer.chandef,
549 						 0, 0, 0,
550 						 mvm->tdls_cs.peer.skb,
551 						 mvm->tdls_cs.peer.ch_sw_tm_ie);
552 	if (ret)
553 		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
554 
555 	/* retry after a DTIM if we failed sending now */
556 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
557 	schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
558 out:
559 	mutex_unlock(&mvm->mutex);
560 }
561 
562 int
563 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
564 			    struct ieee80211_vif *vif,
565 			    struct ieee80211_sta *sta, u8 oper_class,
566 			    struct cfg80211_chan_def *chandef,
567 			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
568 {
569 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
570 	struct iwl_mvm_sta *mvmsta;
571 	unsigned int delay;
572 	int ret;
573 
574 	mutex_lock(&mvm->mutex);
575 
576 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
577 		       sta->addr, chandef->chan->center_freq, chandef->width);
578 
579 	/* we only support a single peer for channel switching */
580 	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
581 		IWL_DEBUG_TDLS(mvm,
582 			       "Existing peer. Can't start switch with %pM\n",
583 			       sta->addr);
584 		ret = -EBUSY;
585 		goto out;
586 	}
587 
588 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
589 						 TDLS_SEND_CHAN_SW_REQ,
590 						 sta->addr, sta->tdls_initiator,
591 						 oper_class, chandef, 0, 0, 0,
592 						 tmpl_skb, ch_sw_tm_ie);
593 	if (ret)
594 		goto out;
595 
596 	/*
597 	 * Mark the peer as "in tdls switch" for this vif. We only allow a
598 	 * single such peer per vif.
599 	 */
600 	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
601 	if (!mvm->tdls_cs.peer.skb) {
602 		ret = -ENOMEM;
603 		goto out;
604 	}
605 
606 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
607 	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
608 	mvm->tdls_cs.peer.chandef = *chandef;
609 	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
610 	mvm->tdls_cs.peer.op_class = oper_class;
611 	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
612 
613 	/*
614 	 * Wait for 2 DTIM periods before attempting the next switch. The next
615 	 * switch will be made sooner if the current one completes before that.
616 	 */
617 	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
618 			     vif->bss_conf.beacon_int);
619 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
620 			 msecs_to_jiffies(delay));
621 
622 out:
623 	mutex_unlock(&mvm->mutex);
624 	return ret;
625 }
626 
627 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
628 					struct ieee80211_vif *vif,
629 					struct ieee80211_sta *sta)
630 {
631 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
632 	struct ieee80211_sta *cur_sta;
633 	bool wait_for_phy = false;
634 
635 	mutex_lock(&mvm->mutex);
636 
637 	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
638 
639 	/* we only support a single peer for channel switching */
640 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
641 		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
642 		goto out;
643 	}
644 
645 	cur_sta = rcu_dereference_protected(
646 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
647 				lockdep_is_held(&mvm->mutex));
648 	/* make sure it's the same peer */
649 	if (cur_sta != sta)
650 		goto out;
651 
652 	/*
653 	 * If we're currently in a switch because of the now canceled peer,
654 	 * wait a DTIM here to make sure the phy is back on the base channel.
655 	 * We can't otherwise force it.
656 	 */
657 	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
658 	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
659 		wait_for_phy = true;
660 
661 	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
662 	dev_kfree_skb(mvm->tdls_cs.peer.skb);
663 	mvm->tdls_cs.peer.skb = NULL;
664 
665 out:
666 	mutex_unlock(&mvm->mutex);
667 
668 	/* make sure the phy is on the base channel */
669 	if (wait_for_phy)
670 		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
671 				vif->bss_conf.beacon_int));
672 
673 	/* flush the channel switch state */
674 	flush_delayed_work(&mvm->tdls_cs.dwork);
675 
676 	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
677 }
678 
679 void
680 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
681 				 struct ieee80211_vif *vif,
682 				 struct ieee80211_tdls_ch_sw_params *params)
683 {
684 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
685 	enum iwl_tdls_channel_switch_type type;
686 	unsigned int delay;
687 	const char *action_str =
688 		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
689 		"REQ" : "RESP";
690 
691 	mutex_lock(&mvm->mutex);
692 
693 	IWL_DEBUG_TDLS(mvm,
694 		       "Received TDLS ch switch action %s from %pM status %d\n",
695 		       action_str, params->sta->addr, params->status);
696 
697 	/*
698 	 * we got a non-zero status from a peer we were switching to - move to
699 	 * the idle state and retry again later
700 	 */
701 	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
702 	    params->status != 0 &&
703 	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
704 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
705 		struct ieee80211_sta *cur_sta;
706 
707 		/* make sure it's the same peer */
708 		cur_sta = rcu_dereference_protected(
709 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
710 				lockdep_is_held(&mvm->mutex));
711 		if (cur_sta == params->sta) {
712 			iwl_mvm_tdls_update_cs_state(mvm,
713 						     IWL_MVM_TDLS_SW_IDLE);
714 			goto retry;
715 		}
716 	}
717 
718 	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
719 	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
720 
721 	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
722 					   params->sta->tdls_initiator, 0,
723 					   params->chandef, params->timestamp,
724 					   params->switch_time,
725 					   params->switch_timeout,
726 					   params->tmpl_skb,
727 					   params->ch_sw_tm_ie);
728 
729 retry:
730 	/* register a timeout in case we don't succeed in switching */
731 	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
732 		1024 / 1000;
733 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
734 			 msecs_to_jiffies(delay));
735 	mutex_unlock(&mvm->mutex);
736 }
737