1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2014 Intel Mobile Communications GmbH 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 22 * USA 23 * 24 * The full GNU General Public License is included in this distribution 25 * in the file called COPYING. 26 * 27 * Contact Information: 28 * Intel Linux Wireless <linuxwifi@intel.com> 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 * 31 * BSD LICENSE 32 * 33 * Copyright(c) 2014 Intel Mobile Communications GmbH 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 40 * * Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * * Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in 44 * the documentation and/or other materials provided with the 45 * distribution. 46 * * Neither the name Intel Corporation nor the names of its 47 * contributors may be used to endorse or promote products derived 48 * from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 * 62 *****************************************************************************/ 63 64 #include <linux/etherdevice.h> 65 #include "mvm.h" 66 #include "time-event.h" 67 #include "iwl-io.h" 68 #include "iwl-prph.h" 69 70 #define TU_TO_US(x) (x * 1024) 71 #define TU_TO_MS(x) (TU_TO_US(x) / 1000) 72 73 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) 74 { 75 struct ieee80211_sta *sta; 76 struct iwl_mvm_sta *mvmsta; 77 int i; 78 79 lockdep_assert_held(&mvm->mutex); 80 81 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { 82 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 83 lockdep_is_held(&mvm->mutex)); 84 if (!sta || IS_ERR(sta) || !sta->tdls) 85 continue; 86 87 mvmsta = iwl_mvm_sta_from_mac80211(sta); 88 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, 89 NL80211_TDLS_TEARDOWN, 90 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, 91 GFP_KERNEL); 92 } 93 } 94 95 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 96 { 97 struct ieee80211_sta *sta; 98 struct iwl_mvm_sta *mvmsta; 99 int count = 0; 100 int i; 101 102 lockdep_assert_held(&mvm->mutex); 103 104 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { 105 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 106 lockdep_is_held(&mvm->mutex)); 107 if (!sta || IS_ERR(sta) || !sta->tdls) 108 continue; 109 110 if (vif) { 111 mvmsta = iwl_mvm_sta_from_mac80211(sta); 112 if (mvmsta->vif != vif) 113 continue; 114 } 115 116 count++; 117 } 118 119 return count; 120 } 121 122 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 123 { 124 struct iwl_rx_packet *pkt; 125 struct iwl_tdls_config_res *resp; 126 struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; 127 struct iwl_host_cmd cmd = { 128 .id = TDLS_CONFIG_CMD, 129 .flags = CMD_WANT_SKB, 130 .data = { &tdls_cfg_cmd, }, 131 .len = { sizeof(struct iwl_tdls_config_cmd), }, 132 }; 133 struct ieee80211_sta *sta; 134 int ret, i, cnt; 135 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 136 137 lockdep_assert_held(&mvm->mutex); 138 139 tdls_cfg_cmd.id_and_color = 140 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 141 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; 142 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ 143 144 /* for now the Tx cmd is empty and unused */ 145 146 /* populate TDLS peer data */ 147 cnt = 0; 148 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { 149 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 150 lockdep_is_held(&mvm->mutex)); 151 if (IS_ERR_OR_NULL(sta) || !sta->tdls) 152 continue; 153 154 tdls_cfg_cmd.sta_info[cnt].sta_id = i; 155 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = 156 IWL_MVM_TDLS_FW_TID; 157 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); 158 tdls_cfg_cmd.sta_info[cnt].is_initiator = 159 cpu_to_le32(sta->tdls_initiator ? 1 : 0); 160 161 cnt++; 162 } 163 164 tdls_cfg_cmd.tdls_peer_count = cnt; 165 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); 166 167 ret = iwl_mvm_send_cmd(mvm, &cmd); 168 if (WARN_ON_ONCE(ret)) 169 return; 170 171 pkt = cmd.resp_pkt; 172 173 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); 174 175 /* we don't really care about the response at this point */ 176 177 iwl_free_resp(&cmd); 178 } 179 180 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 181 bool sta_added) 182 { 183 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); 184 185 /* when the first peer joins, send a power update first */ 186 if (tdls_sta_cnt == 1 && sta_added) 187 iwl_mvm_power_update_mac(mvm); 188 189 /* configure the FW with TDLS peer info */ 190 iwl_mvm_tdls_config(mvm, vif); 191 192 /* when the last peer leaves, send a power update last */ 193 if (tdls_sta_cnt == 0 && !sta_added) 194 iwl_mvm_power_update_mac(mvm); 195 } 196 197 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, 198 struct ieee80211_vif *vif) 199 { 200 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 201 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; 202 203 /* 204 * iwl_mvm_protect_session() reads directly from the device 205 * (the system time), so make sure it is available. 206 */ 207 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) 208 return; 209 210 mutex_lock(&mvm->mutex); 211 /* Protect the session to hear the TDLS setup response on the channel */ 212 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); 213 mutex_unlock(&mvm->mutex); 214 215 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); 216 } 217 218 static const char * 219 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) 220 { 221 switch (state) { 222 case IWL_MVM_TDLS_SW_IDLE: 223 return "IDLE"; 224 case IWL_MVM_TDLS_SW_REQ_SENT: 225 return "REQ SENT"; 226 case IWL_MVM_TDLS_SW_RESP_RCVD: 227 return "RESP RECEIVED"; 228 case IWL_MVM_TDLS_SW_REQ_RCVD: 229 return "REQ RECEIVED"; 230 case IWL_MVM_TDLS_SW_ACTIVE: 231 return "ACTIVE"; 232 } 233 234 return NULL; 235 } 236 237 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, 238 enum iwl_mvm_tdls_cs_state state) 239 { 240 if (mvm->tdls_cs.state == state) 241 return; 242 243 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", 244 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), 245 iwl_mvm_tdls_cs_state_str(state)); 246 mvm->tdls_cs.state = state; 247 248 /* we only send requests to our switching peer - update sent time */ 249 if (state == IWL_MVM_TDLS_SW_REQ_SENT) 250 mvm->tdls_cs.peer.sent_timestamp = 251 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); 252 253 if (state == IWL_MVM_TDLS_SW_IDLE) 254 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; 255 } 256 257 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 258 { 259 struct iwl_rx_packet *pkt = rxb_addr(rxb); 260 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; 261 struct ieee80211_sta *sta; 262 unsigned int delay; 263 struct iwl_mvm_sta *mvmsta; 264 struct ieee80211_vif *vif; 265 u32 sta_id = le32_to_cpu(notif->sta_id); 266 267 lockdep_assert_held(&mvm->mutex); 268 269 /* can fail sometimes */ 270 if (!le32_to_cpu(notif->status)) { 271 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 272 return; 273 } 274 275 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) 276 return; 277 278 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 279 lockdep_is_held(&mvm->mutex)); 280 /* the station may not be here, but if it is, it must be a TDLS peer */ 281 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) 282 return; 283 284 mvmsta = iwl_mvm_sta_from_mac80211(sta); 285 vif = mvmsta->vif; 286 287 /* 288 * Update state and possibly switch again after this is over (DTIM). 289 * Also convert TU to msec. 290 */ 291 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); 292 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 293 msecs_to_jiffies(delay)); 294 295 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); 296 } 297 298 static int 299 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, 300 enum iwl_tdls_channel_switch_type type, 301 const u8 *peer, bool peer_initiator, u32 timestamp) 302 { 303 bool same_peer = false; 304 int ret = 0; 305 306 /* get the existing peer if it's there */ 307 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && 308 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { 309 struct ieee80211_sta *sta = rcu_dereference_protected( 310 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], 311 lockdep_is_held(&mvm->mutex)); 312 if (!IS_ERR_OR_NULL(sta)) 313 same_peer = ether_addr_equal(peer, sta->addr); 314 } 315 316 switch (mvm->tdls_cs.state) { 317 case IWL_MVM_TDLS_SW_IDLE: 318 /* 319 * might be spurious packet from the peer after the switch is 320 * already done 321 */ 322 if (type == TDLS_MOVE_CH) 323 ret = -EINVAL; 324 break; 325 case IWL_MVM_TDLS_SW_REQ_SENT: 326 /* only allow requests from the same peer */ 327 if (!same_peer) 328 ret = -EBUSY; 329 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && 330 !peer_initiator) 331 /* 332 * We received a ch-switch request while an outgoing 333 * one is pending. Allow it if the peer is the link 334 * initiator. 335 */ 336 ret = -EBUSY; 337 else if (type == TDLS_SEND_CHAN_SW_REQ) 338 /* wait for idle before sending another request */ 339 ret = -EBUSY; 340 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) 341 /* we got a stale response - ignore it */ 342 ret = -EINVAL; 343 break; 344 case IWL_MVM_TDLS_SW_RESP_RCVD: 345 /* 346 * we are waiting for the FW to give an "active" notification, 347 * so ignore requests in the meantime 348 */ 349 ret = -EBUSY; 350 break; 351 case IWL_MVM_TDLS_SW_REQ_RCVD: 352 /* as above, allow the link initiator to proceed */ 353 if (type == TDLS_SEND_CHAN_SW_REQ) { 354 if (!same_peer) 355 ret = -EBUSY; 356 else if (peer_initiator) /* they are the initiator */ 357 ret = -EBUSY; 358 } else if (type == TDLS_MOVE_CH) { 359 ret = -EINVAL; 360 } 361 break; 362 case IWL_MVM_TDLS_SW_ACTIVE: 363 /* 364 * the only valid request when active is a request to return 365 * to the base channel by the current off-channel peer 366 */ 367 if (type != TDLS_MOVE_CH || !same_peer) 368 ret = -EBUSY; 369 break; 370 } 371 372 if (ret) 373 IWL_DEBUG_TDLS(mvm, 374 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", 375 type, mvm->tdls_cs.state, peer, same_peer, 376 peer_initiator); 377 378 return ret; 379 } 380 381 static int 382 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, 383 struct ieee80211_vif *vif, 384 enum iwl_tdls_channel_switch_type type, 385 const u8 *peer, bool peer_initiator, 386 u8 oper_class, 387 struct cfg80211_chan_def *chandef, 388 u32 timestamp, u16 switch_time, 389 u16 switch_timeout, struct sk_buff *skb, 390 u32 ch_sw_tm_ie) 391 { 392 struct ieee80211_sta *sta; 393 struct iwl_mvm_sta *mvmsta; 394 struct ieee80211_tx_info *info; 395 struct ieee80211_hdr *hdr; 396 struct iwl_tdls_channel_switch_cmd cmd = {0}; 397 int ret; 398 399 lockdep_assert_held(&mvm->mutex); 400 401 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, 402 timestamp); 403 if (ret) 404 return ret; 405 406 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { 407 ret = -EINVAL; 408 goto out; 409 } 410 411 cmd.switch_type = type; 412 cmd.timing.frame_timestamp = cpu_to_le32(timestamp); 413 cmd.timing.switch_time = cpu_to_le32(switch_time); 414 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); 415 416 rcu_read_lock(); 417 sta = ieee80211_find_sta(vif, peer); 418 if (!sta) { 419 rcu_read_unlock(); 420 ret = -ENOENT; 421 goto out; 422 } 423 mvmsta = iwl_mvm_sta_from_mac80211(sta); 424 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); 425 426 if (!chandef) { 427 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && 428 mvm->tdls_cs.peer.chandef.chan) { 429 /* actually moving to the channel */ 430 chandef = &mvm->tdls_cs.peer.chandef; 431 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && 432 type == TDLS_MOVE_CH) { 433 /* we need to return to base channel */ 434 struct ieee80211_chanctx_conf *chanctx = 435 rcu_dereference(vif->chanctx_conf); 436 437 if (WARN_ON_ONCE(!chanctx)) { 438 rcu_read_unlock(); 439 goto out; 440 } 441 442 chandef = &chanctx->def; 443 } 444 } 445 446 if (chandef) { 447 cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? 448 PHY_BAND_24 : PHY_BAND_5); 449 cmd.ci.channel = chandef->chan->hw_value; 450 cmd.ci.width = iwl_mvm_get_channel_width(chandef); 451 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); 452 } 453 454 /* keep quota calculation simple for now - 50% of DTIM for TDLS */ 455 cmd.timing.max_offchan_duration = 456 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * 457 vif->bss_conf.beacon_int) / 2); 458 459 /* Switch time is the first element in the switch-timing IE. */ 460 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); 461 462 info = IEEE80211_SKB_CB(skb); 463 hdr = (void *)skb->data; 464 if (info->control.hw_key) { 465 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { 466 rcu_read_unlock(); 467 ret = -EINVAL; 468 goto out; 469 } 470 iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); 471 } 472 473 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, 474 mvmsta->sta_id); 475 476 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, 477 hdr->frame_control); 478 rcu_read_unlock(); 479 480 memcpy(cmd.frame.data, skb->data, skb->len); 481 482 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, 483 sizeof(cmd), &cmd); 484 if (ret) { 485 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", 486 ret); 487 goto out; 488 } 489 490 /* channel switch has started, update state */ 491 if (type != TDLS_MOVE_CH) { 492 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; 493 iwl_mvm_tdls_update_cs_state(mvm, 494 type == TDLS_SEND_CHAN_SW_REQ ? 495 IWL_MVM_TDLS_SW_REQ_SENT : 496 IWL_MVM_TDLS_SW_REQ_RCVD); 497 } else { 498 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); 499 } 500 501 out: 502 503 /* channel switch failed - we are idle */ 504 if (ret) 505 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 506 507 return ret; 508 } 509 510 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) 511 { 512 struct iwl_mvm *mvm; 513 struct ieee80211_sta *sta; 514 struct iwl_mvm_sta *mvmsta; 515 struct ieee80211_vif *vif; 516 unsigned int delay; 517 int ret; 518 519 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); 520 mutex_lock(&mvm->mutex); 521 522 /* called after an active channel switch has finished or timed-out */ 523 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); 524 525 /* station might be gone, in that case do nothing */ 526 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) 527 goto out; 528 529 sta = rcu_dereference_protected( 530 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], 531 lockdep_is_held(&mvm->mutex)); 532 /* the station may not be here, but if it is, it must be a TDLS peer */ 533 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) 534 goto out; 535 536 mvmsta = iwl_mvm_sta_from_mac80211(sta); 537 vif = mvmsta->vif; 538 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, 539 TDLS_SEND_CHAN_SW_REQ, 540 sta->addr, 541 mvm->tdls_cs.peer.initiator, 542 mvm->tdls_cs.peer.op_class, 543 &mvm->tdls_cs.peer.chandef, 544 0, 0, 0, 545 mvm->tdls_cs.peer.skb, 546 mvm->tdls_cs.peer.ch_sw_tm_ie); 547 if (ret) 548 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); 549 550 /* retry after a DTIM if we failed sending now */ 551 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); 552 queue_delayed_work(system_wq, &mvm->tdls_cs.dwork, 553 msecs_to_jiffies(delay)); 554 out: 555 mutex_unlock(&mvm->mutex); 556 } 557 558 int 559 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, 560 struct ieee80211_vif *vif, 561 struct ieee80211_sta *sta, u8 oper_class, 562 struct cfg80211_chan_def *chandef, 563 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) 564 { 565 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 566 struct iwl_mvm_sta *mvmsta; 567 unsigned int delay; 568 int ret; 569 570 mutex_lock(&mvm->mutex); 571 572 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", 573 sta->addr, chandef->chan->center_freq, chandef->width); 574 575 /* we only support a single peer for channel switching */ 576 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { 577 IWL_DEBUG_TDLS(mvm, 578 "Existing peer. Can't start switch with %pM\n", 579 sta->addr); 580 ret = -EBUSY; 581 goto out; 582 } 583 584 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, 585 TDLS_SEND_CHAN_SW_REQ, 586 sta->addr, sta->tdls_initiator, 587 oper_class, chandef, 0, 0, 0, 588 tmpl_skb, ch_sw_tm_ie); 589 if (ret) 590 goto out; 591 592 /* 593 * Mark the peer as "in tdls switch" for this vif. We only allow a 594 * single such peer per vif. 595 */ 596 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); 597 if (!mvm->tdls_cs.peer.skb) { 598 ret = -ENOMEM; 599 goto out; 600 } 601 602 mvmsta = iwl_mvm_sta_from_mac80211(sta); 603 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; 604 mvm->tdls_cs.peer.chandef = *chandef; 605 mvm->tdls_cs.peer.initiator = sta->tdls_initiator; 606 mvm->tdls_cs.peer.op_class = oper_class; 607 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; 608 609 /* 610 * Wait for 2 DTIM periods before attempting the next switch. The next 611 * switch will be made sooner if the current one completes before that. 612 */ 613 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * 614 vif->bss_conf.beacon_int); 615 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 616 msecs_to_jiffies(delay)); 617 618 out: 619 mutex_unlock(&mvm->mutex); 620 return ret; 621 } 622 623 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, 624 struct ieee80211_vif *vif, 625 struct ieee80211_sta *sta) 626 { 627 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 628 struct ieee80211_sta *cur_sta; 629 bool wait_for_phy = false; 630 631 mutex_lock(&mvm->mutex); 632 633 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); 634 635 /* we only support a single peer for channel switching */ 636 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { 637 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); 638 goto out; 639 } 640 641 cur_sta = rcu_dereference_protected( 642 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], 643 lockdep_is_held(&mvm->mutex)); 644 /* make sure it's the same peer */ 645 if (cur_sta != sta) 646 goto out; 647 648 /* 649 * If we're currently in a switch because of the now canceled peer, 650 * wait a DTIM here to make sure the phy is back on the base channel. 651 * We can't otherwise force it. 652 */ 653 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && 654 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) 655 wait_for_phy = true; 656 657 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; 658 dev_kfree_skb(mvm->tdls_cs.peer.skb); 659 mvm->tdls_cs.peer.skb = NULL; 660 661 out: 662 mutex_unlock(&mvm->mutex); 663 664 /* make sure the phy is on the base channel */ 665 if (wait_for_phy) 666 msleep(TU_TO_MS(vif->bss_conf.dtim_period * 667 vif->bss_conf.beacon_int)); 668 669 /* flush the channel switch state */ 670 flush_delayed_work(&mvm->tdls_cs.dwork); 671 672 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); 673 } 674 675 void 676 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, 677 struct ieee80211_vif *vif, 678 struct ieee80211_tdls_ch_sw_params *params) 679 { 680 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 681 enum iwl_tdls_channel_switch_type type; 682 unsigned int delay; 683 const char *action_str = 684 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? 685 "REQ" : "RESP"; 686 687 mutex_lock(&mvm->mutex); 688 689 IWL_DEBUG_TDLS(mvm, 690 "Received TDLS ch switch action %s from %pM status %d\n", 691 action_str, params->sta->addr, params->status); 692 693 /* 694 * we got a non-zero status from a peer we were switching to - move to 695 * the idle state and retry again later 696 */ 697 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && 698 params->status != 0 && 699 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && 700 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { 701 struct ieee80211_sta *cur_sta; 702 703 /* make sure it's the same peer */ 704 cur_sta = rcu_dereference_protected( 705 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], 706 lockdep_is_held(&mvm->mutex)); 707 if (cur_sta == params->sta) { 708 iwl_mvm_tdls_update_cs_state(mvm, 709 IWL_MVM_TDLS_SW_IDLE); 710 goto retry; 711 } 712 } 713 714 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? 715 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; 716 717 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, 718 params->sta->tdls_initiator, 0, 719 params->chandef, params->timestamp, 720 params->switch_time, 721 params->switch_timeout, 722 params->tmpl_skb, 723 params->ch_sw_tm_ie); 724 725 retry: 726 /* register a timeout in case we don't succeed in switching */ 727 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 728 1024 / 1000; 729 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 730 msecs_to_jiffies(delay)); 731 mutex_unlock(&mvm->mutex); 732 } 733