1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2017 Intel Deutschland GmbH 10 * Copyright(C) 2018 - 2019 Intel Corporation 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called COPYING. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <linuxwifi@intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 * BSD LICENSE 29 * 30 * Copyright(c) 2014 Intel Mobile Communications GmbH 31 * Copyright(c) 2017 Intel Deutschland GmbH 32 * Copyright(C) 2018 - 2019 Intel Corporation 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 39 * * Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * * Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in 43 * the documentation and/or other materials provided with the 44 * distribution. 45 * * Neither the name Intel Corporation nor the names of its 46 * contributors may be used to endorse or promote products derived 47 * from this software without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 *****************************************************************************/ 62 63 #include <linux/etherdevice.h> 64 #include "mvm.h" 65 #include "time-event.h" 66 #include "iwl-io.h" 67 #include "iwl-prph.h" 68 69 #define TU_TO_US(x) (x * 1024) 70 #define TU_TO_MS(x) (TU_TO_US(x) / 1000) 71 72 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) 73 { 74 struct ieee80211_sta *sta; 75 struct iwl_mvm_sta *mvmsta; 76 int i; 77 78 lockdep_assert_held(&mvm->mutex); 79 80 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 81 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 82 lockdep_is_held(&mvm->mutex)); 83 if (!sta || IS_ERR(sta) || !sta->tdls) 84 continue; 85 86 mvmsta = iwl_mvm_sta_from_mac80211(sta); 87 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, 88 NL80211_TDLS_TEARDOWN, 89 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, 90 GFP_KERNEL); 91 } 92 } 93 94 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 95 { 96 struct ieee80211_sta *sta; 97 struct iwl_mvm_sta *mvmsta; 98 int count = 0; 99 int i; 100 101 lockdep_assert_held(&mvm->mutex); 102 103 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 104 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 105 lockdep_is_held(&mvm->mutex)); 106 if (!sta || IS_ERR(sta) || !sta->tdls) 107 continue; 108 109 if (vif) { 110 mvmsta = iwl_mvm_sta_from_mac80211(sta); 111 if (mvmsta->vif != vif) 112 continue; 113 } 114 115 count++; 116 } 117 118 return count; 119 } 120 121 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 122 { 123 struct iwl_rx_packet *pkt; 124 struct iwl_tdls_config_res *resp; 125 struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; 126 struct iwl_host_cmd cmd = { 127 .id = TDLS_CONFIG_CMD, 128 .flags = CMD_WANT_SKB, 129 .data = { &tdls_cfg_cmd, }, 130 .len = { sizeof(struct iwl_tdls_config_cmd), }, 131 }; 132 struct ieee80211_sta *sta; 133 int ret, i, cnt; 134 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 135 136 lockdep_assert_held(&mvm->mutex); 137 138 tdls_cfg_cmd.id_and_color = 139 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 140 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; 141 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ 142 143 /* for now the Tx cmd is empty and unused */ 144 145 /* populate TDLS peer data */ 146 cnt = 0; 147 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 148 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 149 lockdep_is_held(&mvm->mutex)); 150 if (IS_ERR_OR_NULL(sta) || !sta->tdls) 151 continue; 152 153 tdls_cfg_cmd.sta_info[cnt].sta_id = i; 154 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = 155 IWL_MVM_TDLS_FW_TID; 156 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); 157 tdls_cfg_cmd.sta_info[cnt].is_initiator = 158 cpu_to_le32(sta->tdls_initiator ? 1 : 0); 159 160 cnt++; 161 } 162 163 tdls_cfg_cmd.tdls_peer_count = cnt; 164 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); 165 166 ret = iwl_mvm_send_cmd(mvm, &cmd); 167 if (WARN_ON_ONCE(ret)) 168 return; 169 170 pkt = cmd.resp_pkt; 171 172 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); 173 174 /* we don't really care about the response at this point */ 175 176 iwl_free_resp(&cmd); 177 } 178 179 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 180 bool sta_added) 181 { 182 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); 183 184 /* when the first peer joins, send a power update first */ 185 if (tdls_sta_cnt == 1 && sta_added) 186 iwl_mvm_power_update_mac(mvm); 187 188 /* Configure the FW with TDLS peer info only if TDLS channel switch 189 * capability is set. 190 * TDLS config data is used currently only in TDLS channel switch code. 191 * Supposed to serve also TDLS buffer station which is not implemneted 192 * yet in FW*/ 193 if (fw_has_capa(&mvm->fw->ucode_capa, 194 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) 195 iwl_mvm_tdls_config(mvm, vif); 196 197 /* when the last peer leaves, send a power update last */ 198 if (tdls_sta_cnt == 0 && !sta_added) 199 iwl_mvm_power_update_mac(mvm); 200 } 201 202 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, 203 struct ieee80211_vif *vif) 204 { 205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; 207 208 /* Protect the session to hear the TDLS setup response on the channel */ 209 mutex_lock(&mvm->mutex); 210 if (fw_has_capa(&mvm->fw->ucode_capa, 211 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 212 iwl_mvm_schedule_session_protection(mvm, vif, duration, 213 duration, true); 214 else 215 iwl_mvm_protect_session(mvm, vif, duration, 216 duration, 100, true); 217 mutex_unlock(&mvm->mutex); 218 } 219 220 static const char * 221 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) 222 { 223 switch (state) { 224 case IWL_MVM_TDLS_SW_IDLE: 225 return "IDLE"; 226 case IWL_MVM_TDLS_SW_REQ_SENT: 227 return "REQ SENT"; 228 case IWL_MVM_TDLS_SW_RESP_RCVD: 229 return "RESP RECEIVED"; 230 case IWL_MVM_TDLS_SW_REQ_RCVD: 231 return "REQ RECEIVED"; 232 case IWL_MVM_TDLS_SW_ACTIVE: 233 return "ACTIVE"; 234 } 235 236 return NULL; 237 } 238 239 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, 240 enum iwl_mvm_tdls_cs_state state) 241 { 242 if (mvm->tdls_cs.state == state) 243 return; 244 245 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", 246 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), 247 iwl_mvm_tdls_cs_state_str(state)); 248 mvm->tdls_cs.state = state; 249 250 /* we only send requests to our switching peer - update sent time */ 251 if (state == IWL_MVM_TDLS_SW_REQ_SENT) 252 mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); 253 254 if (state == IWL_MVM_TDLS_SW_IDLE) 255 mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; 256 } 257 258 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 259 { 260 struct iwl_rx_packet *pkt = rxb_addr(rxb); 261 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; 262 struct ieee80211_sta *sta; 263 unsigned int delay; 264 struct iwl_mvm_sta *mvmsta; 265 struct ieee80211_vif *vif; 266 u32 sta_id = le32_to_cpu(notif->sta_id); 267 268 lockdep_assert_held(&mvm->mutex); 269 270 /* can fail sometimes */ 271 if (!le32_to_cpu(notif->status)) { 272 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 273 return; 274 } 275 276 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) 277 return; 278 279 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 280 lockdep_is_held(&mvm->mutex)); 281 /* the station may not be here, but if it is, it must be a TDLS peer */ 282 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) 283 return; 284 285 mvmsta = iwl_mvm_sta_from_mac80211(sta); 286 vif = mvmsta->vif; 287 288 /* 289 * Update state and possibly switch again after this is over (DTIM). 290 * Also convert TU to msec. 291 */ 292 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); 293 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 294 msecs_to_jiffies(delay)); 295 296 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); 297 } 298 299 static int 300 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, 301 enum iwl_tdls_channel_switch_type type, 302 const u8 *peer, bool peer_initiator, u32 timestamp) 303 { 304 bool same_peer = false; 305 int ret = 0; 306 307 /* get the existing peer if it's there */ 308 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && 309 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { 310 struct ieee80211_sta *sta = rcu_dereference_protected( 311 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], 312 lockdep_is_held(&mvm->mutex)); 313 if (!IS_ERR_OR_NULL(sta)) 314 same_peer = ether_addr_equal(peer, sta->addr); 315 } 316 317 switch (mvm->tdls_cs.state) { 318 case IWL_MVM_TDLS_SW_IDLE: 319 /* 320 * might be spurious packet from the peer after the switch is 321 * already done 322 */ 323 if (type == TDLS_MOVE_CH) 324 ret = -EINVAL; 325 break; 326 case IWL_MVM_TDLS_SW_REQ_SENT: 327 /* only allow requests from the same peer */ 328 if (!same_peer) 329 ret = -EBUSY; 330 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && 331 !peer_initiator) 332 /* 333 * We received a ch-switch request while an outgoing 334 * one is pending. Allow it if the peer is the link 335 * initiator. 336 */ 337 ret = -EBUSY; 338 else if (type == TDLS_SEND_CHAN_SW_REQ) 339 /* wait for idle before sending another request */ 340 ret = -EBUSY; 341 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) 342 /* we got a stale response - ignore it */ 343 ret = -EINVAL; 344 break; 345 case IWL_MVM_TDLS_SW_RESP_RCVD: 346 /* 347 * we are waiting for the FW to give an "active" notification, 348 * so ignore requests in the meantime 349 */ 350 ret = -EBUSY; 351 break; 352 case IWL_MVM_TDLS_SW_REQ_RCVD: 353 /* as above, allow the link initiator to proceed */ 354 if (type == TDLS_SEND_CHAN_SW_REQ) { 355 if (!same_peer) 356 ret = -EBUSY; 357 else if (peer_initiator) /* they are the initiator */ 358 ret = -EBUSY; 359 } else if (type == TDLS_MOVE_CH) { 360 ret = -EINVAL; 361 } 362 break; 363 case IWL_MVM_TDLS_SW_ACTIVE: 364 /* 365 * the only valid request when active is a request to return 366 * to the base channel by the current off-channel peer 367 */ 368 if (type != TDLS_MOVE_CH || !same_peer) 369 ret = -EBUSY; 370 break; 371 } 372 373 if (ret) 374 IWL_DEBUG_TDLS(mvm, 375 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", 376 type, mvm->tdls_cs.state, peer, same_peer, 377 peer_initiator); 378 379 return ret; 380 } 381 382 static int 383 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, 384 struct ieee80211_vif *vif, 385 enum iwl_tdls_channel_switch_type type, 386 const u8 *peer, bool peer_initiator, 387 u8 oper_class, 388 struct cfg80211_chan_def *chandef, 389 u32 timestamp, u16 switch_time, 390 u16 switch_timeout, struct sk_buff *skb, 391 u32 ch_sw_tm_ie) 392 { 393 struct ieee80211_sta *sta; 394 struct iwl_mvm_sta *mvmsta; 395 struct ieee80211_tx_info *info; 396 struct ieee80211_hdr *hdr; 397 struct iwl_tdls_channel_switch_cmd cmd = {0}; 398 struct iwl_tdls_channel_switch_cmd_tail *tail = 399 iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci); 400 u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); 401 int ret; 402 403 lockdep_assert_held(&mvm->mutex); 404 405 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, 406 timestamp); 407 if (ret) 408 return ret; 409 410 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { 411 ret = -EINVAL; 412 goto out; 413 } 414 415 cmd.switch_type = type; 416 tail->timing.frame_timestamp = cpu_to_le32(timestamp); 417 tail->timing.switch_time = cpu_to_le32(switch_time); 418 tail->timing.switch_timeout = cpu_to_le32(switch_timeout); 419 420 rcu_read_lock(); 421 sta = ieee80211_find_sta(vif, peer); 422 if (!sta) { 423 rcu_read_unlock(); 424 ret = -ENOENT; 425 goto out; 426 } 427 mvmsta = iwl_mvm_sta_from_mac80211(sta); 428 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); 429 430 if (!chandef) { 431 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && 432 mvm->tdls_cs.peer.chandef.chan) { 433 /* actually moving to the channel */ 434 chandef = &mvm->tdls_cs.peer.chandef; 435 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && 436 type == TDLS_MOVE_CH) { 437 /* we need to return to base channel */ 438 struct ieee80211_chanctx_conf *chanctx = 439 rcu_dereference(vif->chanctx_conf); 440 441 if (WARN_ON_ONCE(!chanctx)) { 442 rcu_read_unlock(); 443 goto out; 444 } 445 446 chandef = &chanctx->def; 447 } 448 } 449 450 if (chandef) 451 iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef); 452 453 /* keep quota calculation simple for now - 50% of DTIM for TDLS */ 454 tail->timing.max_offchan_duration = 455 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * 456 vif->bss_conf.beacon_int) / 2); 457 458 /* Switch time is the first element in the switch-timing IE. */ 459 tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); 460 461 info = IEEE80211_SKB_CB(skb); 462 hdr = (void *)skb->data; 463 if (info->control.hw_key) { 464 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { 465 rcu_read_unlock(); 466 ret = -EINVAL; 467 goto out; 468 } 469 iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd); 470 } 471 472 iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, 473 mvmsta->sta_id); 474 475 iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, 476 hdr->frame_control); 477 rcu_read_unlock(); 478 479 memcpy(tail->frame.data, skb->data, skb->len); 480 481 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd); 482 if (ret) { 483 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", 484 ret); 485 goto out; 486 } 487 488 /* channel switch has started, update state */ 489 if (type != TDLS_MOVE_CH) { 490 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; 491 iwl_mvm_tdls_update_cs_state(mvm, 492 type == TDLS_SEND_CHAN_SW_REQ ? 493 IWL_MVM_TDLS_SW_REQ_SENT : 494 IWL_MVM_TDLS_SW_REQ_RCVD); 495 } else { 496 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); 497 } 498 499 out: 500 501 /* channel switch failed - we are idle */ 502 if (ret) 503 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 504 505 return ret; 506 } 507 508 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) 509 { 510 struct iwl_mvm *mvm; 511 struct ieee80211_sta *sta; 512 struct iwl_mvm_sta *mvmsta; 513 struct ieee80211_vif *vif; 514 unsigned int delay; 515 int ret; 516 517 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); 518 mutex_lock(&mvm->mutex); 519 520 /* called after an active channel switch has finished or timed-out */ 521 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 522 523 /* station might be gone, in that case do nothing */ 524 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) 525 goto out; 526 527 sta = rcu_dereference_protected( 528 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], 529 lockdep_is_held(&mvm->mutex)); 530 /* the station may not be here, but if it is, it must be a TDLS peer */ 531 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) 532 goto out; 533 534 mvmsta = iwl_mvm_sta_from_mac80211(sta); 535 vif = mvmsta->vif; 536 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, 537 TDLS_SEND_CHAN_SW_REQ, 538 sta->addr, 539 mvm->tdls_cs.peer.initiator, 540 mvm->tdls_cs.peer.op_class, 541 &mvm->tdls_cs.peer.chandef, 542 0, 0, 0, 543 mvm->tdls_cs.peer.skb, 544 mvm->tdls_cs.peer.ch_sw_tm_ie); 545 if (ret) 546 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); 547 548 /* retry after a DTIM if we failed sending now */ 549 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); 550 schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); 551 out: 552 mutex_unlock(&mvm->mutex); 553 } 554 555 int 556 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, 557 struct ieee80211_vif *vif, 558 struct ieee80211_sta *sta, u8 oper_class, 559 struct cfg80211_chan_def *chandef, 560 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) 561 { 562 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 563 struct iwl_mvm_sta *mvmsta; 564 unsigned int delay; 565 int ret; 566 567 mutex_lock(&mvm->mutex); 568 569 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", 570 sta->addr, chandef->chan->center_freq, chandef->width); 571 572 /* we only support a single peer for channel switching */ 573 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { 574 IWL_DEBUG_TDLS(mvm, 575 "Existing peer. Can't start switch with %pM\n", 576 sta->addr); 577 ret = -EBUSY; 578 goto out; 579 } 580 581 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, 582 TDLS_SEND_CHAN_SW_REQ, 583 sta->addr, sta->tdls_initiator, 584 oper_class, chandef, 0, 0, 0, 585 tmpl_skb, ch_sw_tm_ie); 586 if (ret) 587 goto out; 588 589 /* 590 * Mark the peer as "in tdls switch" for this vif. We only allow a 591 * single such peer per vif. 592 */ 593 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); 594 if (!mvm->tdls_cs.peer.skb) { 595 ret = -ENOMEM; 596 goto out; 597 } 598 599 mvmsta = iwl_mvm_sta_from_mac80211(sta); 600 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; 601 mvm->tdls_cs.peer.chandef = *chandef; 602 mvm->tdls_cs.peer.initiator = sta->tdls_initiator; 603 mvm->tdls_cs.peer.op_class = oper_class; 604 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; 605 606 /* 607 * Wait for 2 DTIM periods before attempting the next switch. The next 608 * switch will be made sooner if the current one completes before that. 609 */ 610 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * 611 vif->bss_conf.beacon_int); 612 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 613 msecs_to_jiffies(delay)); 614 615 out: 616 mutex_unlock(&mvm->mutex); 617 return ret; 618 } 619 620 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, 621 struct ieee80211_vif *vif, 622 struct ieee80211_sta *sta) 623 { 624 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 625 struct ieee80211_sta *cur_sta; 626 bool wait_for_phy = false; 627 628 mutex_lock(&mvm->mutex); 629 630 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); 631 632 /* we only support a single peer for channel switching */ 633 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { 634 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); 635 goto out; 636 } 637 638 cur_sta = rcu_dereference_protected( 639 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], 640 lockdep_is_held(&mvm->mutex)); 641 /* make sure it's the same peer */ 642 if (cur_sta != sta) 643 goto out; 644 645 /* 646 * If we're currently in a switch because of the now canceled peer, 647 * wait a DTIM here to make sure the phy is back on the base channel. 648 * We can't otherwise force it. 649 */ 650 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && 651 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) 652 wait_for_phy = true; 653 654 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 655 dev_kfree_skb(mvm->tdls_cs.peer.skb); 656 mvm->tdls_cs.peer.skb = NULL; 657 658 out: 659 mutex_unlock(&mvm->mutex); 660 661 /* make sure the phy is on the base channel */ 662 if (wait_for_phy) 663 msleep(TU_TO_MS(vif->bss_conf.dtim_period * 664 vif->bss_conf.beacon_int)); 665 666 /* flush the channel switch state */ 667 flush_delayed_work(&mvm->tdls_cs.dwork); 668 669 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); 670 } 671 672 void 673 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, 674 struct ieee80211_vif *vif, 675 struct ieee80211_tdls_ch_sw_params *params) 676 { 677 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 678 enum iwl_tdls_channel_switch_type type; 679 unsigned int delay; 680 const char *action_str = 681 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? 682 "REQ" : "RESP"; 683 684 mutex_lock(&mvm->mutex); 685 686 IWL_DEBUG_TDLS(mvm, 687 "Received TDLS ch switch action %s from %pM status %d\n", 688 action_str, params->sta->addr, params->status); 689 690 /* 691 * we got a non-zero status from a peer we were switching to - move to 692 * the idle state and retry again later 693 */ 694 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && 695 params->status != 0 && 696 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && 697 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { 698 struct ieee80211_sta *cur_sta; 699 700 /* make sure it's the same peer */ 701 cur_sta = rcu_dereference_protected( 702 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], 703 lockdep_is_held(&mvm->mutex)); 704 if (cur_sta == params->sta) { 705 iwl_mvm_tdls_update_cs_state(mvm, 706 IWL_MVM_TDLS_SW_IDLE); 707 goto retry; 708 } 709 } 710 711 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? 712 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; 713 714 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, 715 params->sta->tdls_initiator, 0, 716 params->chandef, params->timestamp, 717 params->switch_time, 718 params->switch_timeout, 719 params->tmpl_skb, 720 params->ch_sw_tm_ie); 721 722 retry: 723 /* register a timeout in case we don't succeed in switching */ 724 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 725 1024 / 1000; 726 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 727 msecs_to_jiffies(delay)); 728 mutex_unlock(&mvm->mutex); 729 } 730