1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "mac.h" 19 20 #include <net/mac80211.h> 21 #include <linux/etherdevice.h> 22 #include <linux/acpi.h> 23 24 #include "hif.h" 25 #include "core.h" 26 #include "debug.h" 27 #include "wmi.h" 28 #include "htt.h" 29 #include "txrx.h" 30 #include "testmode.h" 31 #include "wmi.h" 32 #include "wmi-tlv.h" 33 #include "wmi-ops.h" 34 #include "wow.h" 35 36 /*********/ 37 /* Rates */ 38 /*********/ 39 40 static struct ieee80211_rate ath10k_rates[] = { 41 { .bitrate = 10, 42 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 43 { .bitrate = 20, 44 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 45 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 46 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 47 { .bitrate = 55, 48 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 49 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 50 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 51 { .bitrate = 110, 52 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 53 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 54 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 55 56 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 57 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 58 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 59 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 60 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 61 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 62 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 63 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 64 }; 65 66 static struct ieee80211_rate ath10k_rates_rev2[] = { 67 { .bitrate = 10, 68 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 69 { .bitrate = 20, 70 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 71 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 72 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 73 { .bitrate = 55, 74 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 75 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 76 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 77 { .bitrate = 110, 78 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 79 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 80 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 81 82 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 83 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 84 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 85 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 86 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 87 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 88 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 89 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 90 }; 91 92 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 93 94 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 95 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 96 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 97 #define ath10k_g_rates (ath10k_rates + 0) 98 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 99 100 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 101 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 102 103 static bool ath10k_mac_bitrate_is_cck(int bitrate) 104 { 105 switch (bitrate) { 106 case 10: 107 case 20: 108 case 55: 109 case 110: 110 return true; 111 } 112 113 return false; 114 } 115 116 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 117 { 118 return DIV_ROUND_UP(bitrate, 5) | 119 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 120 } 121 122 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 123 u8 hw_rate, bool cck) 124 { 125 const struct ieee80211_rate *rate; 126 int i; 127 128 for (i = 0; i < sband->n_bitrates; i++) { 129 rate = &sband->bitrates[i]; 130 131 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 132 continue; 133 134 if (rate->hw_value == hw_rate) 135 return i; 136 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 137 rate->hw_value_short == hw_rate) 138 return i; 139 } 140 141 return 0; 142 } 143 144 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 145 u32 bitrate) 146 { 147 int i; 148 149 for (i = 0; i < sband->n_bitrates; i++) 150 if (sband->bitrates[i].bitrate == bitrate) 151 return i; 152 153 return 0; 154 } 155 156 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 157 { 158 switch ((mcs_map >> (2 * nss)) & 0x3) { 159 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 160 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 162 } 163 return 0; 164 } 165 166 static u32 167 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 168 { 169 int nss; 170 171 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 172 if (ht_mcs_mask[nss]) 173 return nss + 1; 174 175 return 1; 176 } 177 178 static u32 179 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 180 { 181 int nss; 182 183 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 184 if (vht_mcs_mask[nss]) 185 return nss + 1; 186 187 return 1; 188 } 189 190 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 191 { 192 enum wmi_host_platform_type platform_type; 193 int ret; 194 195 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 196 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 197 else 198 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 199 200 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 201 202 if (ret && ret != -EOPNOTSUPP) { 203 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 204 return ret; 205 } 206 207 return 0; 208 } 209 210 /**********/ 211 /* Crypto */ 212 /**********/ 213 214 static int ath10k_send_key(struct ath10k_vif *arvif, 215 struct ieee80211_key_conf *key, 216 enum set_key_cmd cmd, 217 const u8 *macaddr, u32 flags) 218 { 219 struct ath10k *ar = arvif->ar; 220 struct wmi_vdev_install_key_arg arg = { 221 .vdev_id = arvif->vdev_id, 222 .key_idx = key->keyidx, 223 .key_len = key->keylen, 224 .key_data = key->key, 225 .key_flags = flags, 226 .macaddr = macaddr, 227 }; 228 229 lockdep_assert_held(&arvif->ar->conf_mutex); 230 231 switch (key->cipher) { 232 case WLAN_CIPHER_SUITE_CCMP: 233 arg.key_cipher = WMI_CIPHER_AES_CCM; 234 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 235 break; 236 case WLAN_CIPHER_SUITE_TKIP: 237 arg.key_cipher = WMI_CIPHER_TKIP; 238 arg.key_txmic_len = 8; 239 arg.key_rxmic_len = 8; 240 break; 241 case WLAN_CIPHER_SUITE_WEP40: 242 case WLAN_CIPHER_SUITE_WEP104: 243 arg.key_cipher = WMI_CIPHER_WEP; 244 break; 245 case WLAN_CIPHER_SUITE_AES_CMAC: 246 WARN_ON(1); 247 return -EINVAL; 248 default: 249 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 250 return -EOPNOTSUPP; 251 } 252 253 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 254 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 255 256 if (cmd == DISABLE_KEY) { 257 arg.key_cipher = WMI_CIPHER_NONE; 258 arg.key_data = NULL; 259 } 260 261 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 262 } 263 264 static int ath10k_install_key(struct ath10k_vif *arvif, 265 struct ieee80211_key_conf *key, 266 enum set_key_cmd cmd, 267 const u8 *macaddr, u32 flags) 268 { 269 struct ath10k *ar = arvif->ar; 270 int ret; 271 unsigned long time_left; 272 273 lockdep_assert_held(&ar->conf_mutex); 274 275 reinit_completion(&ar->install_key_done); 276 277 if (arvif->nohwcrypt) 278 return 1; 279 280 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 281 if (ret) 282 return ret; 283 284 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 285 if (time_left == 0) 286 return -ETIMEDOUT; 287 288 return 0; 289 } 290 291 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 292 const u8 *addr) 293 { 294 struct ath10k *ar = arvif->ar; 295 struct ath10k_peer *peer; 296 int ret; 297 int i; 298 u32 flags; 299 300 lockdep_assert_held(&ar->conf_mutex); 301 302 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 303 arvif->vif->type != NL80211_IFTYPE_ADHOC && 304 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 305 return -EINVAL; 306 307 spin_lock_bh(&ar->data_lock); 308 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 309 spin_unlock_bh(&ar->data_lock); 310 311 if (!peer) 312 return -ENOENT; 313 314 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 315 if (arvif->wep_keys[i] == NULL) 316 continue; 317 318 switch (arvif->vif->type) { 319 case NL80211_IFTYPE_AP: 320 flags = WMI_KEY_PAIRWISE; 321 322 if (arvif->def_wep_key_idx == i) 323 flags |= WMI_KEY_TX_USAGE; 324 325 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 326 SET_KEY, addr, flags); 327 if (ret < 0) 328 return ret; 329 break; 330 case NL80211_IFTYPE_ADHOC: 331 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 332 SET_KEY, addr, 333 WMI_KEY_PAIRWISE); 334 if (ret < 0) 335 return ret; 336 337 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 338 SET_KEY, addr, WMI_KEY_GROUP); 339 if (ret < 0) 340 return ret; 341 break; 342 default: 343 WARN_ON(1); 344 return -EINVAL; 345 } 346 347 spin_lock_bh(&ar->data_lock); 348 peer->keys[i] = arvif->wep_keys[i]; 349 spin_unlock_bh(&ar->data_lock); 350 } 351 352 /* In some cases (notably with static WEP IBSS with multiple keys) 353 * multicast Tx becomes broken. Both pairwise and groupwise keys are 354 * installed already. Using WMI_KEY_TX_USAGE in different combinations 355 * didn't seem help. Using def_keyid vdev parameter seems to be 356 * effective so use that. 357 * 358 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 359 */ 360 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 361 return 0; 362 363 if (arvif->def_wep_key_idx == -1) 364 return 0; 365 366 ret = ath10k_wmi_vdev_set_param(arvif->ar, 367 arvif->vdev_id, 368 arvif->ar->wmi.vdev_param->def_keyid, 369 arvif->def_wep_key_idx); 370 if (ret) { 371 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 372 arvif->vdev_id, ret); 373 return ret; 374 } 375 376 return 0; 377 } 378 379 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 380 const u8 *addr) 381 { 382 struct ath10k *ar = arvif->ar; 383 struct ath10k_peer *peer; 384 int first_errno = 0; 385 int ret; 386 int i; 387 u32 flags = 0; 388 389 lockdep_assert_held(&ar->conf_mutex); 390 391 spin_lock_bh(&ar->data_lock); 392 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 393 spin_unlock_bh(&ar->data_lock); 394 395 if (!peer) 396 return -ENOENT; 397 398 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 399 if (peer->keys[i] == NULL) 400 continue; 401 402 /* key flags are not required to delete the key */ 403 ret = ath10k_install_key(arvif, peer->keys[i], 404 DISABLE_KEY, addr, flags); 405 if (ret < 0 && first_errno == 0) 406 first_errno = ret; 407 408 if (ret < 0) 409 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 410 i, ret); 411 412 spin_lock_bh(&ar->data_lock); 413 peer->keys[i] = NULL; 414 spin_unlock_bh(&ar->data_lock); 415 } 416 417 return first_errno; 418 } 419 420 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 421 u8 keyidx) 422 { 423 struct ath10k_peer *peer; 424 int i; 425 426 lockdep_assert_held(&ar->data_lock); 427 428 /* We don't know which vdev this peer belongs to, 429 * since WMI doesn't give us that information. 430 * 431 * FIXME: multi-bss needs to be handled. 432 */ 433 peer = ath10k_peer_find(ar, 0, addr); 434 if (!peer) 435 return false; 436 437 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 438 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 439 return true; 440 } 441 442 return false; 443 } 444 445 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 446 struct ieee80211_key_conf *key) 447 { 448 struct ath10k *ar = arvif->ar; 449 struct ath10k_peer *peer; 450 u8 addr[ETH_ALEN]; 451 int first_errno = 0; 452 int ret; 453 int i; 454 u32 flags = 0; 455 456 lockdep_assert_held(&ar->conf_mutex); 457 458 for (;;) { 459 /* since ath10k_install_key we can't hold data_lock all the 460 * time, so we try to remove the keys incrementally 461 */ 462 spin_lock_bh(&ar->data_lock); 463 i = 0; 464 list_for_each_entry(peer, &ar->peers, list) { 465 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 466 if (peer->keys[i] == key) { 467 ether_addr_copy(addr, peer->addr); 468 peer->keys[i] = NULL; 469 break; 470 } 471 } 472 473 if (i < ARRAY_SIZE(peer->keys)) 474 break; 475 } 476 spin_unlock_bh(&ar->data_lock); 477 478 if (i == ARRAY_SIZE(peer->keys)) 479 break; 480 /* key flags are not required to delete the key */ 481 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 482 if (ret < 0 && first_errno == 0) 483 first_errno = ret; 484 485 if (ret) 486 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 487 addr, ret); 488 } 489 490 return first_errno; 491 } 492 493 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 494 struct ieee80211_key_conf *key) 495 { 496 struct ath10k *ar = arvif->ar; 497 struct ath10k_peer *peer; 498 int ret; 499 500 lockdep_assert_held(&ar->conf_mutex); 501 502 list_for_each_entry(peer, &ar->peers, list) { 503 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 504 continue; 505 506 if (ether_addr_equal(peer->addr, arvif->bssid)) 507 continue; 508 509 if (peer->keys[key->keyidx] == key) 510 continue; 511 512 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 513 arvif->vdev_id, key->keyidx); 514 515 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 516 if (ret) { 517 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 518 arvif->vdev_id, peer->addr, ret); 519 return ret; 520 } 521 } 522 523 return 0; 524 } 525 526 /*********************/ 527 /* General utilities */ 528 /*********************/ 529 530 static inline enum wmi_phy_mode 531 chan_to_phymode(const struct cfg80211_chan_def *chandef) 532 { 533 enum wmi_phy_mode phymode = MODE_UNKNOWN; 534 535 switch (chandef->chan->band) { 536 case NL80211_BAND_2GHZ: 537 switch (chandef->width) { 538 case NL80211_CHAN_WIDTH_20_NOHT: 539 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 540 phymode = MODE_11B; 541 else 542 phymode = MODE_11G; 543 break; 544 case NL80211_CHAN_WIDTH_20: 545 phymode = MODE_11NG_HT20; 546 break; 547 case NL80211_CHAN_WIDTH_40: 548 phymode = MODE_11NG_HT40; 549 break; 550 case NL80211_CHAN_WIDTH_5: 551 case NL80211_CHAN_WIDTH_10: 552 case NL80211_CHAN_WIDTH_80: 553 case NL80211_CHAN_WIDTH_80P80: 554 case NL80211_CHAN_WIDTH_160: 555 phymode = MODE_UNKNOWN; 556 break; 557 } 558 break; 559 case NL80211_BAND_5GHZ: 560 switch (chandef->width) { 561 case NL80211_CHAN_WIDTH_20_NOHT: 562 phymode = MODE_11A; 563 break; 564 case NL80211_CHAN_WIDTH_20: 565 phymode = MODE_11NA_HT20; 566 break; 567 case NL80211_CHAN_WIDTH_40: 568 phymode = MODE_11NA_HT40; 569 break; 570 case NL80211_CHAN_WIDTH_80: 571 phymode = MODE_11AC_VHT80; 572 break; 573 case NL80211_CHAN_WIDTH_160: 574 phymode = MODE_11AC_VHT160; 575 break; 576 case NL80211_CHAN_WIDTH_80P80: 577 phymode = MODE_11AC_VHT80_80; 578 break; 579 case NL80211_CHAN_WIDTH_5: 580 case NL80211_CHAN_WIDTH_10: 581 phymode = MODE_UNKNOWN; 582 break; 583 } 584 break; 585 default: 586 break; 587 } 588 589 WARN_ON(phymode == MODE_UNKNOWN); 590 return phymode; 591 } 592 593 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 594 { 595 /* 596 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 597 * 0 for no restriction 598 * 1 for 1/4 us 599 * 2 for 1/2 us 600 * 3 for 1 us 601 * 4 for 2 us 602 * 5 for 4 us 603 * 6 for 8 us 604 * 7 for 16 us 605 */ 606 switch (mpdudensity) { 607 case 0: 608 return 0; 609 case 1: 610 case 2: 611 case 3: 612 /* Our lower layer calculations limit our precision to 613 * 1 microsecond 614 */ 615 return 1; 616 case 4: 617 return 2; 618 case 5: 619 return 4; 620 case 6: 621 return 8; 622 case 7: 623 return 16; 624 default: 625 return 0; 626 } 627 } 628 629 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 630 struct cfg80211_chan_def *def) 631 { 632 struct ieee80211_chanctx_conf *conf; 633 634 rcu_read_lock(); 635 conf = rcu_dereference(vif->chanctx_conf); 636 if (!conf) { 637 rcu_read_unlock(); 638 return -ENOENT; 639 } 640 641 *def = conf->def; 642 rcu_read_unlock(); 643 644 return 0; 645 } 646 647 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 648 struct ieee80211_chanctx_conf *conf, 649 void *data) 650 { 651 int *num = data; 652 653 (*num)++; 654 } 655 656 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 657 { 658 int num = 0; 659 660 ieee80211_iter_chan_contexts_atomic(ar->hw, 661 ath10k_mac_num_chanctxs_iter, 662 &num); 663 664 return num; 665 } 666 667 static void 668 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 669 struct ieee80211_chanctx_conf *conf, 670 void *data) 671 { 672 struct cfg80211_chan_def **def = data; 673 674 *def = &conf->def; 675 } 676 677 static int ath10k_peer_create(struct ath10k *ar, 678 struct ieee80211_vif *vif, 679 struct ieee80211_sta *sta, 680 u32 vdev_id, 681 const u8 *addr, 682 enum wmi_peer_type peer_type) 683 { 684 struct ath10k_vif *arvif; 685 struct ath10k_peer *peer; 686 int num_peers = 0; 687 int ret; 688 689 lockdep_assert_held(&ar->conf_mutex); 690 691 num_peers = ar->num_peers; 692 693 /* Each vdev consumes a peer entry as well */ 694 list_for_each_entry(arvif, &ar->arvifs, list) 695 num_peers++; 696 697 if (num_peers >= ar->max_num_peers) 698 return -ENOBUFS; 699 700 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 701 if (ret) { 702 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 703 addr, vdev_id, ret); 704 return ret; 705 } 706 707 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 708 if (ret) { 709 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 710 addr, vdev_id, ret); 711 return ret; 712 } 713 714 spin_lock_bh(&ar->data_lock); 715 716 peer = ath10k_peer_find(ar, vdev_id, addr); 717 if (!peer) { 718 spin_unlock_bh(&ar->data_lock); 719 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 720 addr, vdev_id); 721 ath10k_wmi_peer_delete(ar, vdev_id, addr); 722 return -ENOENT; 723 } 724 725 peer->vif = vif; 726 peer->sta = sta; 727 728 spin_unlock_bh(&ar->data_lock); 729 730 ar->num_peers++; 731 732 return 0; 733 } 734 735 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 736 { 737 struct ath10k *ar = arvif->ar; 738 u32 param; 739 int ret; 740 741 param = ar->wmi.pdev_param->sta_kickout_th; 742 ret = ath10k_wmi_pdev_set_param(ar, param, 743 ATH10K_KICKOUT_THRESHOLD); 744 if (ret) { 745 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 746 arvif->vdev_id, ret); 747 return ret; 748 } 749 750 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 751 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 752 ATH10K_KEEPALIVE_MIN_IDLE); 753 if (ret) { 754 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 755 arvif->vdev_id, ret); 756 return ret; 757 } 758 759 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 760 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 761 ATH10K_KEEPALIVE_MAX_IDLE); 762 if (ret) { 763 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 764 arvif->vdev_id, ret); 765 return ret; 766 } 767 768 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 769 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 770 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 771 if (ret) { 772 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 773 arvif->vdev_id, ret); 774 return ret; 775 } 776 777 return 0; 778 } 779 780 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 781 { 782 struct ath10k *ar = arvif->ar; 783 u32 vdev_param; 784 785 vdev_param = ar->wmi.vdev_param->rts_threshold; 786 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 787 } 788 789 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 790 { 791 int ret; 792 793 lockdep_assert_held(&ar->conf_mutex); 794 795 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 796 if (ret) 797 return ret; 798 799 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 800 if (ret) 801 return ret; 802 803 ar->num_peers--; 804 805 return 0; 806 } 807 808 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 809 { 810 struct ath10k_peer *peer, *tmp; 811 int peer_id; 812 int i; 813 814 lockdep_assert_held(&ar->conf_mutex); 815 816 spin_lock_bh(&ar->data_lock); 817 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 818 if (peer->vdev_id != vdev_id) 819 continue; 820 821 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 822 peer->addr, vdev_id); 823 824 for_each_set_bit(peer_id, peer->peer_ids, 825 ATH10K_MAX_NUM_PEER_IDS) { 826 ar->peer_map[peer_id] = NULL; 827 } 828 829 /* Double check that peer is properly un-referenced from 830 * the peer_map 831 */ 832 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 833 if (ar->peer_map[i] == peer) { 834 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 835 peer->addr, peer, i); 836 ar->peer_map[i] = NULL; 837 } 838 } 839 840 list_del(&peer->list); 841 kfree(peer); 842 ar->num_peers--; 843 } 844 spin_unlock_bh(&ar->data_lock); 845 } 846 847 static void ath10k_peer_cleanup_all(struct ath10k *ar) 848 { 849 struct ath10k_peer *peer, *tmp; 850 int i; 851 852 lockdep_assert_held(&ar->conf_mutex); 853 854 spin_lock_bh(&ar->data_lock); 855 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 856 list_del(&peer->list); 857 kfree(peer); 858 } 859 860 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 861 ar->peer_map[i] = NULL; 862 863 spin_unlock_bh(&ar->data_lock); 864 865 ar->num_peers = 0; 866 ar->num_stations = 0; 867 } 868 869 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 870 struct ieee80211_sta *sta, 871 enum wmi_tdls_peer_state state) 872 { 873 int ret; 874 struct wmi_tdls_peer_update_cmd_arg arg = {}; 875 struct wmi_tdls_peer_capab_arg cap = {}; 876 struct wmi_channel_arg chan_arg = {}; 877 878 lockdep_assert_held(&ar->conf_mutex); 879 880 arg.vdev_id = vdev_id; 881 arg.peer_state = state; 882 ether_addr_copy(arg.addr, sta->addr); 883 884 cap.peer_max_sp = sta->max_sp; 885 cap.peer_uapsd_queues = sta->uapsd_queues; 886 887 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 888 !sta->tdls_initiator) 889 cap.is_peer_responder = 1; 890 891 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 892 if (ret) { 893 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 894 arg.addr, vdev_id, ret); 895 return ret; 896 } 897 898 return 0; 899 } 900 901 /************************/ 902 /* Interface management */ 903 /************************/ 904 905 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 906 { 907 struct ath10k *ar = arvif->ar; 908 909 lockdep_assert_held(&ar->data_lock); 910 911 if (!arvif->beacon) 912 return; 913 914 if (!arvif->beacon_buf) 915 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 916 arvif->beacon->len, DMA_TO_DEVICE); 917 918 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 919 arvif->beacon_state != ATH10K_BEACON_SENT)) 920 return; 921 922 dev_kfree_skb_any(arvif->beacon); 923 924 arvif->beacon = NULL; 925 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 926 } 927 928 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 929 { 930 struct ath10k *ar = arvif->ar; 931 932 lockdep_assert_held(&ar->data_lock); 933 934 ath10k_mac_vif_beacon_free(arvif); 935 936 if (arvif->beacon_buf) { 937 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 938 arvif->beacon_buf, arvif->beacon_paddr); 939 arvif->beacon_buf = NULL; 940 } 941 } 942 943 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 944 { 945 unsigned long time_left; 946 947 lockdep_assert_held(&ar->conf_mutex); 948 949 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 950 return -ESHUTDOWN; 951 952 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 953 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 954 if (time_left == 0) 955 return -ETIMEDOUT; 956 957 return 0; 958 } 959 960 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 961 { 962 struct cfg80211_chan_def *chandef = NULL; 963 struct ieee80211_channel *channel = NULL; 964 struct wmi_vdev_start_request_arg arg = {}; 965 int ret = 0; 966 967 lockdep_assert_held(&ar->conf_mutex); 968 969 ieee80211_iter_chan_contexts_atomic(ar->hw, 970 ath10k_mac_get_any_chandef_iter, 971 &chandef); 972 if (WARN_ON_ONCE(!chandef)) 973 return -ENOENT; 974 975 channel = chandef->chan; 976 977 arg.vdev_id = vdev_id; 978 arg.channel.freq = channel->center_freq; 979 arg.channel.band_center_freq1 = chandef->center_freq1; 980 arg.channel.band_center_freq2 = chandef->center_freq2; 981 982 /* TODO setup this dynamically, what in case we 983 * don't have any vifs? 984 */ 985 arg.channel.mode = chan_to_phymode(chandef); 986 arg.channel.chan_radar = 987 !!(channel->flags & IEEE80211_CHAN_RADAR); 988 989 arg.channel.min_power = 0; 990 arg.channel.max_power = channel->max_power * 2; 991 arg.channel.max_reg_power = channel->max_reg_power * 2; 992 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 993 994 reinit_completion(&ar->vdev_setup_done); 995 996 ret = ath10k_wmi_vdev_start(ar, &arg); 997 if (ret) { 998 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 999 vdev_id, ret); 1000 return ret; 1001 } 1002 1003 ret = ath10k_vdev_setup_sync(ar); 1004 if (ret) { 1005 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1006 vdev_id, ret); 1007 return ret; 1008 } 1009 1010 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1011 if (ret) { 1012 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1013 vdev_id, ret); 1014 goto vdev_stop; 1015 } 1016 1017 ar->monitor_vdev_id = vdev_id; 1018 1019 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1020 ar->monitor_vdev_id); 1021 return 0; 1022 1023 vdev_stop: 1024 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1025 if (ret) 1026 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1027 ar->monitor_vdev_id, ret); 1028 1029 return ret; 1030 } 1031 1032 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1033 { 1034 int ret = 0; 1035 1036 lockdep_assert_held(&ar->conf_mutex); 1037 1038 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1039 if (ret) 1040 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1041 ar->monitor_vdev_id, ret); 1042 1043 reinit_completion(&ar->vdev_setup_done); 1044 1045 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1046 if (ret) 1047 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1048 ar->monitor_vdev_id, ret); 1049 1050 ret = ath10k_vdev_setup_sync(ar); 1051 if (ret) 1052 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1053 ar->monitor_vdev_id, ret); 1054 1055 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1056 ar->monitor_vdev_id); 1057 return ret; 1058 } 1059 1060 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1061 { 1062 int bit, ret = 0; 1063 1064 lockdep_assert_held(&ar->conf_mutex); 1065 1066 if (ar->free_vdev_map == 0) { 1067 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1068 return -ENOMEM; 1069 } 1070 1071 bit = __ffs64(ar->free_vdev_map); 1072 1073 ar->monitor_vdev_id = bit; 1074 1075 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1076 WMI_VDEV_TYPE_MONITOR, 1077 0, ar->mac_addr); 1078 if (ret) { 1079 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1080 ar->monitor_vdev_id, ret); 1081 return ret; 1082 } 1083 1084 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1085 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1086 ar->monitor_vdev_id); 1087 1088 return 0; 1089 } 1090 1091 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1092 { 1093 int ret = 0; 1094 1095 lockdep_assert_held(&ar->conf_mutex); 1096 1097 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1098 if (ret) { 1099 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1100 ar->monitor_vdev_id, ret); 1101 return ret; 1102 } 1103 1104 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1105 1106 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1107 ar->monitor_vdev_id); 1108 return ret; 1109 } 1110 1111 static int ath10k_monitor_start(struct ath10k *ar) 1112 { 1113 int ret; 1114 1115 lockdep_assert_held(&ar->conf_mutex); 1116 1117 ret = ath10k_monitor_vdev_create(ar); 1118 if (ret) { 1119 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1120 return ret; 1121 } 1122 1123 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1124 if (ret) { 1125 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1126 ath10k_monitor_vdev_delete(ar); 1127 return ret; 1128 } 1129 1130 ar->monitor_started = true; 1131 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1132 1133 return 0; 1134 } 1135 1136 static int ath10k_monitor_stop(struct ath10k *ar) 1137 { 1138 int ret; 1139 1140 lockdep_assert_held(&ar->conf_mutex); 1141 1142 ret = ath10k_monitor_vdev_stop(ar); 1143 if (ret) { 1144 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1145 return ret; 1146 } 1147 1148 ret = ath10k_monitor_vdev_delete(ar); 1149 if (ret) { 1150 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1151 return ret; 1152 } 1153 1154 ar->monitor_started = false; 1155 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1156 1157 return 0; 1158 } 1159 1160 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1161 { 1162 int num_ctx; 1163 1164 /* At least one chanctx is required to derive a channel to start 1165 * monitor vdev on. 1166 */ 1167 num_ctx = ath10k_mac_num_chanctxs(ar); 1168 if (num_ctx == 0) 1169 return false; 1170 1171 /* If there's already an existing special monitor interface then don't 1172 * bother creating another monitor vdev. 1173 */ 1174 if (ar->monitor_arvif) 1175 return false; 1176 1177 return ar->monitor || 1178 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1179 ar->running_fw->fw_file.fw_features) && 1180 (ar->filter_flags & FIF_OTHER_BSS)) || 1181 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1182 } 1183 1184 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1185 { 1186 int num_ctx; 1187 1188 num_ctx = ath10k_mac_num_chanctxs(ar); 1189 1190 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1191 * shouldn't allow this but make sure to prevent handling the following 1192 * case anyway since multi-channel DFS hasn't been tested at all. 1193 */ 1194 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1195 return false; 1196 1197 return true; 1198 } 1199 1200 static int ath10k_monitor_recalc(struct ath10k *ar) 1201 { 1202 bool needed; 1203 bool allowed; 1204 int ret; 1205 1206 lockdep_assert_held(&ar->conf_mutex); 1207 1208 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1209 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1210 1211 ath10k_dbg(ar, ATH10K_DBG_MAC, 1212 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1213 ar->monitor_started, needed, allowed); 1214 1215 if (WARN_ON(needed && !allowed)) { 1216 if (ar->monitor_started) { 1217 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1218 1219 ret = ath10k_monitor_stop(ar); 1220 if (ret) 1221 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1222 ret); 1223 /* not serious */ 1224 } 1225 1226 return -EPERM; 1227 } 1228 1229 if (needed == ar->monitor_started) 1230 return 0; 1231 1232 if (needed) 1233 return ath10k_monitor_start(ar); 1234 else 1235 return ath10k_monitor_stop(ar); 1236 } 1237 1238 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1239 { 1240 struct ath10k *ar = arvif->ar; 1241 1242 lockdep_assert_held(&ar->conf_mutex); 1243 1244 if (!arvif->is_started) { 1245 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1246 return false; 1247 } 1248 1249 return true; 1250 } 1251 1252 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1253 { 1254 struct ath10k *ar = arvif->ar; 1255 u32 vdev_param; 1256 1257 lockdep_assert_held(&ar->conf_mutex); 1258 1259 vdev_param = ar->wmi.vdev_param->protection_mode; 1260 1261 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1262 arvif->vdev_id, arvif->use_cts_prot); 1263 1264 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1265 arvif->use_cts_prot ? 1 : 0); 1266 } 1267 1268 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1269 { 1270 struct ath10k *ar = arvif->ar; 1271 u32 vdev_param, rts_cts = 0; 1272 1273 lockdep_assert_held(&ar->conf_mutex); 1274 1275 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1276 1277 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1278 1279 if (arvif->num_legacy_stations > 0) 1280 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1281 WMI_RTSCTS_PROFILE); 1282 else 1283 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1284 WMI_RTSCTS_PROFILE); 1285 1286 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1287 arvif->vdev_id, rts_cts); 1288 1289 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1290 rts_cts); 1291 } 1292 1293 static int ath10k_start_cac(struct ath10k *ar) 1294 { 1295 int ret; 1296 1297 lockdep_assert_held(&ar->conf_mutex); 1298 1299 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1300 1301 ret = ath10k_monitor_recalc(ar); 1302 if (ret) { 1303 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1304 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1305 return ret; 1306 } 1307 1308 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1309 ar->monitor_vdev_id); 1310 1311 return 0; 1312 } 1313 1314 static int ath10k_stop_cac(struct ath10k *ar) 1315 { 1316 lockdep_assert_held(&ar->conf_mutex); 1317 1318 /* CAC is not running - do nothing */ 1319 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1320 return 0; 1321 1322 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1323 ath10k_monitor_stop(ar); 1324 1325 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1326 1327 return 0; 1328 } 1329 1330 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1331 struct ieee80211_chanctx_conf *conf, 1332 void *data) 1333 { 1334 bool *ret = data; 1335 1336 if (!*ret && conf->radar_enabled) 1337 *ret = true; 1338 } 1339 1340 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1341 { 1342 bool has_radar = false; 1343 1344 ieee80211_iter_chan_contexts_atomic(ar->hw, 1345 ath10k_mac_has_radar_iter, 1346 &has_radar); 1347 1348 return has_radar; 1349 } 1350 1351 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1352 { 1353 int ret; 1354 1355 lockdep_assert_held(&ar->conf_mutex); 1356 1357 ath10k_stop_cac(ar); 1358 1359 if (!ath10k_mac_has_radar_enabled(ar)) 1360 return; 1361 1362 if (ar->num_started_vdevs > 0) 1363 return; 1364 1365 ret = ath10k_start_cac(ar); 1366 if (ret) { 1367 /* 1368 * Not possible to start CAC on current channel so starting 1369 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1370 * by indicating that radar was detected. 1371 */ 1372 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1373 ieee80211_radar_detected(ar->hw); 1374 } 1375 } 1376 1377 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1378 { 1379 struct ath10k *ar = arvif->ar; 1380 int ret; 1381 1382 lockdep_assert_held(&ar->conf_mutex); 1383 1384 reinit_completion(&ar->vdev_setup_done); 1385 1386 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1387 if (ret) { 1388 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1389 arvif->vdev_id, ret); 1390 return ret; 1391 } 1392 1393 ret = ath10k_vdev_setup_sync(ar); 1394 if (ret) { 1395 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1396 arvif->vdev_id, ret); 1397 return ret; 1398 } 1399 1400 WARN_ON(ar->num_started_vdevs == 0); 1401 1402 if (ar->num_started_vdevs != 0) { 1403 ar->num_started_vdevs--; 1404 ath10k_recalc_radar_detection(ar); 1405 } 1406 1407 return ret; 1408 } 1409 1410 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1411 const struct cfg80211_chan_def *chandef, 1412 bool restart) 1413 { 1414 struct ath10k *ar = arvif->ar; 1415 struct wmi_vdev_start_request_arg arg = {}; 1416 int ret = 0; 1417 1418 lockdep_assert_held(&ar->conf_mutex); 1419 1420 reinit_completion(&ar->vdev_setup_done); 1421 1422 arg.vdev_id = arvif->vdev_id; 1423 arg.dtim_period = arvif->dtim_period; 1424 arg.bcn_intval = arvif->beacon_interval; 1425 1426 arg.channel.freq = chandef->chan->center_freq; 1427 arg.channel.band_center_freq1 = chandef->center_freq1; 1428 arg.channel.band_center_freq2 = chandef->center_freq2; 1429 arg.channel.mode = chan_to_phymode(chandef); 1430 1431 arg.channel.min_power = 0; 1432 arg.channel.max_power = chandef->chan->max_power * 2; 1433 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1434 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1435 1436 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1437 arg.ssid = arvif->u.ap.ssid; 1438 arg.ssid_len = arvif->u.ap.ssid_len; 1439 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1440 1441 /* For now allow DFS for AP mode */ 1442 arg.channel.chan_radar = 1443 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1444 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1445 arg.ssid = arvif->vif->bss_conf.ssid; 1446 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1447 } 1448 1449 ath10k_dbg(ar, ATH10K_DBG_MAC, 1450 "mac vdev %d start center_freq %d phymode %s\n", 1451 arg.vdev_id, arg.channel.freq, 1452 ath10k_wmi_phymode_str(arg.channel.mode)); 1453 1454 if (restart) 1455 ret = ath10k_wmi_vdev_restart(ar, &arg); 1456 else 1457 ret = ath10k_wmi_vdev_start(ar, &arg); 1458 1459 if (ret) { 1460 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1461 arg.vdev_id, ret); 1462 return ret; 1463 } 1464 1465 ret = ath10k_vdev_setup_sync(ar); 1466 if (ret) { 1467 ath10k_warn(ar, 1468 "failed to synchronize setup for vdev %i restart %d: %d\n", 1469 arg.vdev_id, restart, ret); 1470 return ret; 1471 } 1472 1473 ar->num_started_vdevs++; 1474 ath10k_recalc_radar_detection(ar); 1475 1476 return ret; 1477 } 1478 1479 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1480 const struct cfg80211_chan_def *def) 1481 { 1482 return ath10k_vdev_start_restart(arvif, def, false); 1483 } 1484 1485 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1486 const struct cfg80211_chan_def *def) 1487 { 1488 return ath10k_vdev_start_restart(arvif, def, true); 1489 } 1490 1491 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1492 struct sk_buff *bcn) 1493 { 1494 struct ath10k *ar = arvif->ar; 1495 struct ieee80211_mgmt *mgmt; 1496 const u8 *p2p_ie; 1497 int ret; 1498 1499 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1500 return 0; 1501 1502 mgmt = (void *)bcn->data; 1503 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1504 mgmt->u.beacon.variable, 1505 bcn->len - (mgmt->u.beacon.variable - 1506 bcn->data)); 1507 if (!p2p_ie) 1508 return -ENOENT; 1509 1510 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1511 if (ret) { 1512 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1513 arvif->vdev_id, ret); 1514 return ret; 1515 } 1516 1517 return 0; 1518 } 1519 1520 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1521 u8 oui_type, size_t ie_offset) 1522 { 1523 size_t len; 1524 const u8 *next; 1525 const u8 *end; 1526 u8 *ie; 1527 1528 if (WARN_ON(skb->len < ie_offset)) 1529 return -EINVAL; 1530 1531 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1532 skb->data + ie_offset, 1533 skb->len - ie_offset); 1534 if (!ie) 1535 return -ENOENT; 1536 1537 len = ie[1] + 2; 1538 end = skb->data + skb->len; 1539 next = ie + len; 1540 1541 if (WARN_ON(next > end)) 1542 return -EINVAL; 1543 1544 memmove(ie, next, end - next); 1545 skb_trim(skb, skb->len - len); 1546 1547 return 0; 1548 } 1549 1550 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1551 { 1552 struct ath10k *ar = arvif->ar; 1553 struct ieee80211_hw *hw = ar->hw; 1554 struct ieee80211_vif *vif = arvif->vif; 1555 struct ieee80211_mutable_offsets offs = {}; 1556 struct sk_buff *bcn; 1557 int ret; 1558 1559 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1560 return 0; 1561 1562 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1563 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1564 return 0; 1565 1566 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1567 if (!bcn) { 1568 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1569 return -EPERM; 1570 } 1571 1572 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1573 if (ret) { 1574 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1575 kfree_skb(bcn); 1576 return ret; 1577 } 1578 1579 /* P2P IE is inserted by firmware automatically (as configured above) 1580 * so remove it from the base beacon template to avoid duplicate P2P 1581 * IEs in beacon frames. 1582 */ 1583 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1584 offsetof(struct ieee80211_mgmt, 1585 u.beacon.variable)); 1586 1587 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1588 0, NULL, 0); 1589 kfree_skb(bcn); 1590 1591 if (ret) { 1592 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1593 ret); 1594 return ret; 1595 } 1596 1597 return 0; 1598 } 1599 1600 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1601 { 1602 struct ath10k *ar = arvif->ar; 1603 struct ieee80211_hw *hw = ar->hw; 1604 struct ieee80211_vif *vif = arvif->vif; 1605 struct sk_buff *prb; 1606 int ret; 1607 1608 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1609 return 0; 1610 1611 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1612 return 0; 1613 1614 prb = ieee80211_proberesp_get(hw, vif); 1615 if (!prb) { 1616 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1617 return -EPERM; 1618 } 1619 1620 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1621 kfree_skb(prb); 1622 1623 if (ret) { 1624 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1625 ret); 1626 return ret; 1627 } 1628 1629 return 0; 1630 } 1631 1632 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1633 { 1634 struct ath10k *ar = arvif->ar; 1635 struct cfg80211_chan_def def; 1636 int ret; 1637 1638 /* When originally vdev is started during assign_vif_chanctx() some 1639 * information is missing, notably SSID. Firmware revisions with beacon 1640 * offloading require the SSID to be provided during vdev (re)start to 1641 * handle hidden SSID properly. 1642 * 1643 * Vdev restart must be done after vdev has been both started and 1644 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1645 * deliver vdev restart response event causing timeouts during vdev 1646 * syncing in ath10k. 1647 * 1648 * Note: The vdev down/up and template reinstallation could be skipped 1649 * since only wmi-tlv firmware are known to have beacon offload and 1650 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1651 * response delivery. It's probably more robust to keep it as is. 1652 */ 1653 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1654 return 0; 1655 1656 if (WARN_ON(!arvif->is_started)) 1657 return -EINVAL; 1658 1659 if (WARN_ON(!arvif->is_up)) 1660 return -EINVAL; 1661 1662 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1663 return -EINVAL; 1664 1665 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1666 if (ret) { 1667 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1668 arvif->vdev_id, ret); 1669 return ret; 1670 } 1671 1672 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1673 * firmware will crash upon vdev up. 1674 */ 1675 1676 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1677 if (ret) { 1678 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1679 return ret; 1680 } 1681 1682 ret = ath10k_mac_setup_prb_tmpl(arvif); 1683 if (ret) { 1684 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1685 return ret; 1686 } 1687 1688 ret = ath10k_vdev_restart(arvif, &def); 1689 if (ret) { 1690 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1691 arvif->vdev_id, ret); 1692 return ret; 1693 } 1694 1695 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1696 arvif->bssid); 1697 if (ret) { 1698 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1699 arvif->vdev_id, ret); 1700 return ret; 1701 } 1702 1703 return 0; 1704 } 1705 1706 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1707 struct ieee80211_bss_conf *info) 1708 { 1709 struct ath10k *ar = arvif->ar; 1710 int ret = 0; 1711 1712 lockdep_assert_held(&arvif->ar->conf_mutex); 1713 1714 if (!info->enable_beacon) { 1715 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1716 if (ret) 1717 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1718 arvif->vdev_id, ret); 1719 1720 arvif->is_up = false; 1721 1722 spin_lock_bh(&arvif->ar->data_lock); 1723 ath10k_mac_vif_beacon_free(arvif); 1724 spin_unlock_bh(&arvif->ar->data_lock); 1725 1726 return; 1727 } 1728 1729 arvif->tx_seq_no = 0x1000; 1730 1731 arvif->aid = 0; 1732 ether_addr_copy(arvif->bssid, info->bssid); 1733 1734 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1735 arvif->bssid); 1736 if (ret) { 1737 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1738 arvif->vdev_id, ret); 1739 return; 1740 } 1741 1742 arvif->is_up = true; 1743 1744 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1745 if (ret) { 1746 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1747 arvif->vdev_id, ret); 1748 return; 1749 } 1750 1751 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1752 } 1753 1754 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1755 struct ieee80211_bss_conf *info, 1756 const u8 self_peer[ETH_ALEN]) 1757 { 1758 struct ath10k *ar = arvif->ar; 1759 u32 vdev_param; 1760 int ret = 0; 1761 1762 lockdep_assert_held(&arvif->ar->conf_mutex); 1763 1764 if (!info->ibss_joined) { 1765 if (is_zero_ether_addr(arvif->bssid)) 1766 return; 1767 1768 eth_zero_addr(arvif->bssid); 1769 1770 return; 1771 } 1772 1773 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1774 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1775 ATH10K_DEFAULT_ATIM); 1776 if (ret) 1777 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1778 arvif->vdev_id, ret); 1779 } 1780 1781 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1782 { 1783 struct ath10k *ar = arvif->ar; 1784 u32 param; 1785 u32 value; 1786 int ret; 1787 1788 lockdep_assert_held(&arvif->ar->conf_mutex); 1789 1790 if (arvif->u.sta.uapsd) 1791 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1792 else 1793 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1794 1795 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1796 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1797 if (ret) { 1798 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1799 value, arvif->vdev_id, ret); 1800 return ret; 1801 } 1802 1803 return 0; 1804 } 1805 1806 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1807 { 1808 struct ath10k *ar = arvif->ar; 1809 u32 param; 1810 u32 value; 1811 int ret; 1812 1813 lockdep_assert_held(&arvif->ar->conf_mutex); 1814 1815 if (arvif->u.sta.uapsd) 1816 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1817 else 1818 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1819 1820 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1821 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1822 param, value); 1823 if (ret) { 1824 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1825 value, arvif->vdev_id, ret); 1826 return ret; 1827 } 1828 1829 return 0; 1830 } 1831 1832 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1833 { 1834 struct ath10k_vif *arvif; 1835 int num = 0; 1836 1837 lockdep_assert_held(&ar->conf_mutex); 1838 1839 list_for_each_entry(arvif, &ar->arvifs, list) 1840 if (arvif->is_started) 1841 num++; 1842 1843 return num; 1844 } 1845 1846 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1847 { 1848 struct ath10k *ar = arvif->ar; 1849 struct ieee80211_vif *vif = arvif->vif; 1850 struct ieee80211_conf *conf = &ar->hw->conf; 1851 enum wmi_sta_powersave_param param; 1852 enum wmi_sta_ps_mode psmode; 1853 int ret; 1854 int ps_timeout; 1855 bool enable_ps; 1856 1857 lockdep_assert_held(&arvif->ar->conf_mutex); 1858 1859 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1860 return 0; 1861 1862 enable_ps = arvif->ps; 1863 1864 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1865 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1866 ar->running_fw->fw_file.fw_features)) { 1867 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1868 arvif->vdev_id); 1869 enable_ps = false; 1870 } 1871 1872 if (!arvif->is_started) { 1873 /* mac80211 can update vif powersave state while disconnected. 1874 * Firmware doesn't behave nicely and consumes more power than 1875 * necessary if PS is disabled on a non-started vdev. Hence 1876 * force-enable PS for non-running vdevs. 1877 */ 1878 psmode = WMI_STA_PS_MODE_ENABLED; 1879 } else if (enable_ps) { 1880 psmode = WMI_STA_PS_MODE_ENABLED; 1881 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1882 1883 ps_timeout = conf->dynamic_ps_timeout; 1884 if (ps_timeout == 0) { 1885 /* Firmware doesn't like 0 */ 1886 ps_timeout = ieee80211_tu_to_usec( 1887 vif->bss_conf.beacon_int) / 1000; 1888 } 1889 1890 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1891 ps_timeout); 1892 if (ret) { 1893 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1894 arvif->vdev_id, ret); 1895 return ret; 1896 } 1897 } else { 1898 psmode = WMI_STA_PS_MODE_DISABLED; 1899 } 1900 1901 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1902 arvif->vdev_id, psmode ? "enable" : "disable"); 1903 1904 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1905 if (ret) { 1906 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1907 psmode, arvif->vdev_id, ret); 1908 return ret; 1909 } 1910 1911 return 0; 1912 } 1913 1914 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1915 { 1916 struct ath10k *ar = arvif->ar; 1917 struct wmi_sta_keepalive_arg arg = {}; 1918 int ret; 1919 1920 lockdep_assert_held(&arvif->ar->conf_mutex); 1921 1922 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1923 return 0; 1924 1925 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1926 return 0; 1927 1928 /* Some firmware revisions have a bug and ignore the `enabled` field. 1929 * Instead use the interval to disable the keepalive. 1930 */ 1931 arg.vdev_id = arvif->vdev_id; 1932 arg.enabled = 1; 1933 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1934 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1935 1936 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1937 if (ret) { 1938 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1939 arvif->vdev_id, ret); 1940 return ret; 1941 } 1942 1943 return 0; 1944 } 1945 1946 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1947 { 1948 struct ath10k *ar = arvif->ar; 1949 struct ieee80211_vif *vif = arvif->vif; 1950 int ret; 1951 1952 lockdep_assert_held(&arvif->ar->conf_mutex); 1953 1954 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1955 return; 1956 1957 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1958 return; 1959 1960 if (!vif->csa_active) 1961 return; 1962 1963 if (!arvif->is_up) 1964 return; 1965 1966 if (!ieee80211_csa_is_complete(vif)) { 1967 ieee80211_csa_update_counter(vif); 1968 1969 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1970 if (ret) 1971 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1972 ret); 1973 1974 ret = ath10k_mac_setup_prb_tmpl(arvif); 1975 if (ret) 1976 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1977 ret); 1978 } else { 1979 ieee80211_csa_finish(vif); 1980 } 1981 } 1982 1983 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1984 { 1985 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1986 ap_csa_work); 1987 struct ath10k *ar = arvif->ar; 1988 1989 mutex_lock(&ar->conf_mutex); 1990 ath10k_mac_vif_ap_csa_count_down(arvif); 1991 mutex_unlock(&ar->conf_mutex); 1992 } 1993 1994 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 1995 struct ieee80211_vif *vif) 1996 { 1997 struct sk_buff *skb = data; 1998 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1999 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2000 2001 if (vif->type != NL80211_IFTYPE_STATION) 2002 return; 2003 2004 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2005 return; 2006 2007 cancel_delayed_work(&arvif->connection_loss_work); 2008 } 2009 2010 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2011 { 2012 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2013 IEEE80211_IFACE_ITER_NORMAL, 2014 ath10k_mac_handle_beacon_iter, 2015 skb); 2016 } 2017 2018 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2019 struct ieee80211_vif *vif) 2020 { 2021 u32 *vdev_id = data; 2022 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2023 struct ath10k *ar = arvif->ar; 2024 struct ieee80211_hw *hw = ar->hw; 2025 2026 if (arvif->vdev_id != *vdev_id) 2027 return; 2028 2029 if (!arvif->is_up) 2030 return; 2031 2032 ieee80211_beacon_loss(vif); 2033 2034 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2035 * (done by mac80211) succeeds but beacons do not resume then it 2036 * doesn't make sense to continue operation. Queue connection loss work 2037 * which can be cancelled when beacon is received. 2038 */ 2039 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2040 ATH10K_CONNECTION_LOSS_HZ); 2041 } 2042 2043 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2044 { 2045 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2046 IEEE80211_IFACE_ITER_NORMAL, 2047 ath10k_mac_handle_beacon_miss_iter, 2048 &vdev_id); 2049 } 2050 2051 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2052 { 2053 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2054 connection_loss_work.work); 2055 struct ieee80211_vif *vif = arvif->vif; 2056 2057 if (!arvif->is_up) 2058 return; 2059 2060 ieee80211_connection_loss(vif); 2061 } 2062 2063 /**********************/ 2064 /* Station management */ 2065 /**********************/ 2066 2067 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2068 struct ieee80211_vif *vif) 2069 { 2070 /* Some firmware revisions have unstable STA powersave when listen 2071 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2072 * generate NullFunc frames properly even if buffered frames have been 2073 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2074 * buffered frames. Often pinging the device from AP would simply fail. 2075 * 2076 * As a workaround set it to 1. 2077 */ 2078 if (vif->type == NL80211_IFTYPE_STATION) 2079 return 1; 2080 2081 return ar->hw->conf.listen_interval; 2082 } 2083 2084 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2085 struct ieee80211_vif *vif, 2086 struct ieee80211_sta *sta, 2087 struct wmi_peer_assoc_complete_arg *arg) 2088 { 2089 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2090 u32 aid; 2091 2092 lockdep_assert_held(&ar->conf_mutex); 2093 2094 if (vif->type == NL80211_IFTYPE_STATION) 2095 aid = vif->bss_conf.aid; 2096 else 2097 aid = sta->aid; 2098 2099 ether_addr_copy(arg->addr, sta->addr); 2100 arg->vdev_id = arvif->vdev_id; 2101 arg->peer_aid = aid; 2102 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2103 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2104 arg->peer_num_spatial_streams = 1; 2105 arg->peer_caps = vif->bss_conf.assoc_capability; 2106 } 2107 2108 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2109 struct ieee80211_vif *vif, 2110 struct ieee80211_sta *sta, 2111 struct wmi_peer_assoc_complete_arg *arg) 2112 { 2113 struct ieee80211_bss_conf *info = &vif->bss_conf; 2114 struct cfg80211_chan_def def; 2115 struct cfg80211_bss *bss; 2116 const u8 *rsnie = NULL; 2117 const u8 *wpaie = NULL; 2118 2119 lockdep_assert_held(&ar->conf_mutex); 2120 2121 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2122 return; 2123 2124 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2125 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2126 if (bss) { 2127 const struct cfg80211_bss_ies *ies; 2128 2129 rcu_read_lock(); 2130 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2131 2132 ies = rcu_dereference(bss->ies); 2133 2134 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2135 WLAN_OUI_TYPE_MICROSOFT_WPA, 2136 ies->data, 2137 ies->len); 2138 rcu_read_unlock(); 2139 cfg80211_put_bss(ar->hw->wiphy, bss); 2140 } 2141 2142 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2143 if (rsnie || wpaie) { 2144 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2145 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2146 } 2147 2148 if (wpaie) { 2149 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2150 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2151 } 2152 2153 if (sta->mfp && 2154 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2155 ar->running_fw->fw_file.fw_features)) { 2156 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2157 } 2158 } 2159 2160 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2161 struct ieee80211_vif *vif, 2162 struct ieee80211_sta *sta, 2163 struct wmi_peer_assoc_complete_arg *arg) 2164 { 2165 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2166 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2167 struct cfg80211_chan_def def; 2168 const struct ieee80211_supported_band *sband; 2169 const struct ieee80211_rate *rates; 2170 enum nl80211_band band; 2171 u32 ratemask; 2172 u8 rate; 2173 int i; 2174 2175 lockdep_assert_held(&ar->conf_mutex); 2176 2177 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2178 return; 2179 2180 band = def.chan->band; 2181 sband = ar->hw->wiphy->bands[band]; 2182 ratemask = sta->supp_rates[band]; 2183 ratemask &= arvif->bitrate_mask.control[band].legacy; 2184 rates = sband->bitrates; 2185 2186 rateset->num_rates = 0; 2187 2188 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2189 if (!(ratemask & 1)) 2190 continue; 2191 2192 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2193 rateset->rates[rateset->num_rates] = rate; 2194 rateset->num_rates++; 2195 } 2196 } 2197 2198 static bool 2199 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2200 { 2201 int nss; 2202 2203 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2204 if (ht_mcs_mask[nss]) 2205 return false; 2206 2207 return true; 2208 } 2209 2210 static bool 2211 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2212 { 2213 int nss; 2214 2215 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2216 if (vht_mcs_mask[nss]) 2217 return false; 2218 2219 return true; 2220 } 2221 2222 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2223 struct ieee80211_vif *vif, 2224 struct ieee80211_sta *sta, 2225 struct wmi_peer_assoc_complete_arg *arg) 2226 { 2227 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2228 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2229 struct cfg80211_chan_def def; 2230 enum nl80211_band band; 2231 const u8 *ht_mcs_mask; 2232 const u16 *vht_mcs_mask; 2233 int i, n; 2234 u8 max_nss; 2235 u32 stbc; 2236 2237 lockdep_assert_held(&ar->conf_mutex); 2238 2239 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2240 return; 2241 2242 if (!ht_cap->ht_supported) 2243 return; 2244 2245 band = def.chan->band; 2246 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2247 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2248 2249 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2250 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2251 return; 2252 2253 arg->peer_flags |= ar->wmi.peer_flags->ht; 2254 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2255 ht_cap->ampdu_factor)) - 1; 2256 2257 arg->peer_mpdu_density = 2258 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2259 2260 arg->peer_ht_caps = ht_cap->cap; 2261 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2262 2263 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2264 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2265 2266 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2267 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2268 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2269 } 2270 2271 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2272 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2273 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2274 2275 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2276 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2277 } 2278 2279 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2280 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2281 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2282 } 2283 2284 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2285 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2286 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2287 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2288 arg->peer_rate_caps |= stbc; 2289 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2290 } 2291 2292 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2293 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2294 else if (ht_cap->mcs.rx_mask[1]) 2295 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2296 2297 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2298 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2299 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2300 max_nss = (i / 8) + 1; 2301 arg->peer_ht_rates.rates[n++] = i; 2302 } 2303 2304 /* 2305 * This is a workaround for HT-enabled STAs which break the spec 2306 * and have no HT capabilities RX mask (no HT RX MCS map). 2307 * 2308 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2309 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2310 * 2311 * Firmware asserts if such situation occurs. 2312 */ 2313 if (n == 0) { 2314 arg->peer_ht_rates.num_rates = 8; 2315 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2316 arg->peer_ht_rates.rates[i] = i; 2317 } else { 2318 arg->peer_ht_rates.num_rates = n; 2319 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2320 } 2321 2322 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2323 arg->addr, 2324 arg->peer_ht_rates.num_rates, 2325 arg->peer_num_spatial_streams); 2326 } 2327 2328 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2329 struct ath10k_vif *arvif, 2330 struct ieee80211_sta *sta) 2331 { 2332 u32 uapsd = 0; 2333 u32 max_sp = 0; 2334 int ret = 0; 2335 2336 lockdep_assert_held(&ar->conf_mutex); 2337 2338 if (sta->wme && sta->uapsd_queues) { 2339 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2340 sta->uapsd_queues, sta->max_sp); 2341 2342 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2343 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2344 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2345 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2346 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2347 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2348 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2349 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2350 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2351 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2352 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2353 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2354 2355 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2356 max_sp = sta->max_sp; 2357 2358 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2359 sta->addr, 2360 WMI_AP_PS_PEER_PARAM_UAPSD, 2361 uapsd); 2362 if (ret) { 2363 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2364 arvif->vdev_id, ret); 2365 return ret; 2366 } 2367 2368 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2369 sta->addr, 2370 WMI_AP_PS_PEER_PARAM_MAX_SP, 2371 max_sp); 2372 if (ret) { 2373 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2374 arvif->vdev_id, ret); 2375 return ret; 2376 } 2377 2378 /* TODO setup this based on STA listen interval and 2379 * beacon interval. Currently we don't know 2380 * sta->listen_interval - mac80211 patch required. 2381 * Currently use 10 seconds 2382 */ 2383 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2384 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2385 10); 2386 if (ret) { 2387 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2388 arvif->vdev_id, ret); 2389 return ret; 2390 } 2391 } 2392 2393 return 0; 2394 } 2395 2396 static u16 2397 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2398 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2399 { 2400 int idx_limit; 2401 int nss; 2402 u16 mcs_map; 2403 u16 mcs; 2404 2405 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2406 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2407 vht_mcs_limit[nss]; 2408 2409 if (mcs_map) 2410 idx_limit = fls(mcs_map) - 1; 2411 else 2412 idx_limit = -1; 2413 2414 switch (idx_limit) { 2415 case 0: /* fall through */ 2416 case 1: /* fall through */ 2417 case 2: /* fall through */ 2418 case 3: /* fall through */ 2419 case 4: /* fall through */ 2420 case 5: /* fall through */ 2421 case 6: /* fall through */ 2422 default: 2423 /* see ath10k_mac_can_set_bitrate_mask() */ 2424 WARN_ON(1); 2425 /* fall through */ 2426 case -1: 2427 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2428 break; 2429 case 7: 2430 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2431 break; 2432 case 8: 2433 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2434 break; 2435 case 9: 2436 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2437 break; 2438 } 2439 2440 tx_mcs_set &= ~(0x3 << (nss * 2)); 2441 tx_mcs_set |= mcs << (nss * 2); 2442 } 2443 2444 return tx_mcs_set; 2445 } 2446 2447 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2448 struct ieee80211_vif *vif, 2449 struct ieee80211_sta *sta, 2450 struct wmi_peer_assoc_complete_arg *arg) 2451 { 2452 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2453 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2454 struct cfg80211_chan_def def; 2455 enum nl80211_band band; 2456 const u16 *vht_mcs_mask; 2457 u8 ampdu_factor; 2458 u8 max_nss, vht_mcs; 2459 int i; 2460 2461 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2462 return; 2463 2464 if (!vht_cap->vht_supported) 2465 return; 2466 2467 band = def.chan->band; 2468 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2469 2470 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2471 return; 2472 2473 arg->peer_flags |= ar->wmi.peer_flags->vht; 2474 2475 if (def.chan->band == NL80211_BAND_2GHZ) 2476 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2477 2478 arg->peer_vht_caps = vht_cap->cap; 2479 2480 ampdu_factor = (vht_cap->cap & 2481 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2482 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2483 2484 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2485 * zero in VHT IE. Using it would result in degraded throughput. 2486 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2487 * it if VHT max_mpdu is smaller. 2488 */ 2489 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2490 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2491 ampdu_factor)) - 1); 2492 2493 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2494 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2495 2496 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2497 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2498 2499 /* Calculate peer NSS capability from VHT capabilities if STA 2500 * supports VHT. 2501 */ 2502 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2503 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2504 (2 * i) & 3; 2505 2506 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2507 vht_mcs_mask[i]) 2508 max_nss = i + 1; 2509 } 2510 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2511 arg->peer_vht_rates.rx_max_rate = 2512 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2513 arg->peer_vht_rates.rx_mcs_set = 2514 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2515 arg->peer_vht_rates.tx_max_rate = 2516 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2517 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2518 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2519 2520 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2521 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2522 2523 if (arg->peer_vht_rates.rx_max_rate && 2524 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2525 switch (arg->peer_vht_rates.rx_max_rate) { 2526 case 1560: 2527 /* Must be 2x2 at 160Mhz is all it can do. */ 2528 arg->peer_bw_rxnss_override = 2; 2529 break; 2530 case 780: 2531 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2532 arg->peer_bw_rxnss_override = 1; 2533 break; 2534 } 2535 } 2536 } 2537 2538 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2539 struct ieee80211_vif *vif, 2540 struct ieee80211_sta *sta, 2541 struct wmi_peer_assoc_complete_arg *arg) 2542 { 2543 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2544 2545 switch (arvif->vdev_type) { 2546 case WMI_VDEV_TYPE_AP: 2547 if (sta->wme) 2548 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2549 2550 if (sta->wme && sta->uapsd_queues) { 2551 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2552 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2553 } 2554 break; 2555 case WMI_VDEV_TYPE_STA: 2556 if (vif->bss_conf.qos) 2557 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2558 break; 2559 case WMI_VDEV_TYPE_IBSS: 2560 if (sta->wme) 2561 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2562 break; 2563 default: 2564 break; 2565 } 2566 2567 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2568 sta->addr, !!(arg->peer_flags & 2569 arvif->ar->wmi.peer_flags->qos)); 2570 } 2571 2572 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2573 { 2574 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2575 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2576 } 2577 2578 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2579 struct ieee80211_sta *sta) 2580 { 2581 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2582 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2583 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2584 return MODE_11AC_VHT160; 2585 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2586 return MODE_11AC_VHT80_80; 2587 default: 2588 /* not sure if this is a valid case? */ 2589 return MODE_11AC_VHT160; 2590 } 2591 } 2592 2593 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2594 return MODE_11AC_VHT80; 2595 2596 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2597 return MODE_11AC_VHT40; 2598 2599 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2600 return MODE_11AC_VHT20; 2601 2602 return MODE_UNKNOWN; 2603 } 2604 2605 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2606 struct ieee80211_vif *vif, 2607 struct ieee80211_sta *sta, 2608 struct wmi_peer_assoc_complete_arg *arg) 2609 { 2610 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2611 struct cfg80211_chan_def def; 2612 enum nl80211_band band; 2613 const u8 *ht_mcs_mask; 2614 const u16 *vht_mcs_mask; 2615 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2616 2617 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2618 return; 2619 2620 band = def.chan->band; 2621 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2622 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2623 2624 switch (band) { 2625 case NL80211_BAND_2GHZ: 2626 if (sta->vht_cap.vht_supported && 2627 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2628 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2629 phymode = MODE_11AC_VHT40; 2630 else 2631 phymode = MODE_11AC_VHT20; 2632 } else if (sta->ht_cap.ht_supported && 2633 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2634 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2635 phymode = MODE_11NG_HT40; 2636 else 2637 phymode = MODE_11NG_HT20; 2638 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2639 phymode = MODE_11G; 2640 } else { 2641 phymode = MODE_11B; 2642 } 2643 2644 break; 2645 case NL80211_BAND_5GHZ: 2646 /* 2647 * Check VHT first. 2648 */ 2649 if (sta->vht_cap.vht_supported && 2650 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2651 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2652 } else if (sta->ht_cap.ht_supported && 2653 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2654 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2655 phymode = MODE_11NA_HT40; 2656 else 2657 phymode = MODE_11NA_HT20; 2658 } else { 2659 phymode = MODE_11A; 2660 } 2661 2662 break; 2663 default: 2664 break; 2665 } 2666 2667 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2668 sta->addr, ath10k_wmi_phymode_str(phymode)); 2669 2670 arg->peer_phymode = phymode; 2671 WARN_ON(phymode == MODE_UNKNOWN); 2672 } 2673 2674 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2675 struct ieee80211_vif *vif, 2676 struct ieee80211_sta *sta, 2677 struct wmi_peer_assoc_complete_arg *arg) 2678 { 2679 lockdep_assert_held(&ar->conf_mutex); 2680 2681 memset(arg, 0, sizeof(*arg)); 2682 2683 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2684 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2685 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2686 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2687 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2688 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2689 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2690 2691 return 0; 2692 } 2693 2694 static const u32 ath10k_smps_map[] = { 2695 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2696 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2697 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2698 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2699 }; 2700 2701 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2702 const u8 *addr, 2703 const struct ieee80211_sta_ht_cap *ht_cap) 2704 { 2705 int smps; 2706 2707 if (!ht_cap->ht_supported) 2708 return 0; 2709 2710 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2711 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2712 2713 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2714 return -EINVAL; 2715 2716 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2717 WMI_PEER_SMPS_STATE, 2718 ath10k_smps_map[smps]); 2719 } 2720 2721 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2722 struct ieee80211_vif *vif, 2723 struct ieee80211_sta_vht_cap vht_cap) 2724 { 2725 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2726 int ret; 2727 u32 param; 2728 u32 value; 2729 2730 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2731 return 0; 2732 2733 if (!(ar->vht_cap_info & 2734 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2735 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2736 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2737 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2738 return 0; 2739 2740 param = ar->wmi.vdev_param->txbf; 2741 value = 0; 2742 2743 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2744 return 0; 2745 2746 /* The following logic is correct. If a remote STA advertises support 2747 * for being a beamformer then we should enable us being a beamformee. 2748 */ 2749 2750 if (ar->vht_cap_info & 2751 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2752 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2753 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2754 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2755 2756 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2757 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2758 } 2759 2760 if (ar->vht_cap_info & 2761 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2762 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2763 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2764 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2765 2766 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2767 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2768 } 2769 2770 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2771 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2772 2773 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2774 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2775 2776 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2777 if (ret) { 2778 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2779 value, ret); 2780 return ret; 2781 } 2782 2783 return 0; 2784 } 2785 2786 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2787 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2788 struct ieee80211_vif *vif, 2789 struct ieee80211_bss_conf *bss_conf) 2790 { 2791 struct ath10k *ar = hw->priv; 2792 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2793 struct ieee80211_sta_ht_cap ht_cap; 2794 struct ieee80211_sta_vht_cap vht_cap; 2795 struct wmi_peer_assoc_complete_arg peer_arg; 2796 struct ieee80211_sta *ap_sta; 2797 int ret; 2798 2799 lockdep_assert_held(&ar->conf_mutex); 2800 2801 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2802 arvif->vdev_id, arvif->bssid, arvif->aid); 2803 2804 rcu_read_lock(); 2805 2806 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2807 if (!ap_sta) { 2808 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2809 bss_conf->bssid, arvif->vdev_id); 2810 rcu_read_unlock(); 2811 return; 2812 } 2813 2814 /* ap_sta must be accessed only within rcu section which must be left 2815 * before calling ath10k_setup_peer_smps() which might sleep. 2816 */ 2817 ht_cap = ap_sta->ht_cap; 2818 vht_cap = ap_sta->vht_cap; 2819 2820 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2821 if (ret) { 2822 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2823 bss_conf->bssid, arvif->vdev_id, ret); 2824 rcu_read_unlock(); 2825 return; 2826 } 2827 2828 rcu_read_unlock(); 2829 2830 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2831 if (ret) { 2832 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2833 bss_conf->bssid, arvif->vdev_id, ret); 2834 return; 2835 } 2836 2837 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2838 if (ret) { 2839 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2840 arvif->vdev_id, ret); 2841 return; 2842 } 2843 2844 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2845 if (ret) { 2846 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2847 arvif->vdev_id, bss_conf->bssid, ret); 2848 return; 2849 } 2850 2851 ath10k_dbg(ar, ATH10K_DBG_MAC, 2852 "mac vdev %d up (associated) bssid %pM aid %d\n", 2853 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2854 2855 WARN_ON(arvif->is_up); 2856 2857 arvif->aid = bss_conf->aid; 2858 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2859 2860 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2861 if (ret) { 2862 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2863 arvif->vdev_id, ret); 2864 return; 2865 } 2866 2867 arvif->is_up = true; 2868 2869 /* Workaround: Some firmware revisions (tested with qca6174 2870 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2871 * poked with peer param command. 2872 */ 2873 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2874 WMI_PEER_DUMMY_VAR, 1); 2875 if (ret) { 2876 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2877 arvif->bssid, arvif->vdev_id, ret); 2878 return; 2879 } 2880 } 2881 2882 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2883 struct ieee80211_vif *vif) 2884 { 2885 struct ath10k *ar = hw->priv; 2886 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2887 struct ieee80211_sta_vht_cap vht_cap = {}; 2888 int ret; 2889 2890 lockdep_assert_held(&ar->conf_mutex); 2891 2892 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2893 arvif->vdev_id, arvif->bssid); 2894 2895 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2896 if (ret) 2897 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2898 arvif->vdev_id, ret); 2899 2900 arvif->def_wep_key_idx = -1; 2901 2902 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2903 if (ret) { 2904 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2905 arvif->vdev_id, ret); 2906 return; 2907 } 2908 2909 arvif->is_up = false; 2910 2911 cancel_delayed_work_sync(&arvif->connection_loss_work); 2912 } 2913 2914 static int ath10k_station_assoc(struct ath10k *ar, 2915 struct ieee80211_vif *vif, 2916 struct ieee80211_sta *sta, 2917 bool reassoc) 2918 { 2919 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2920 struct wmi_peer_assoc_complete_arg peer_arg; 2921 int ret = 0; 2922 2923 lockdep_assert_held(&ar->conf_mutex); 2924 2925 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2926 if (ret) { 2927 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2928 sta->addr, arvif->vdev_id, ret); 2929 return ret; 2930 } 2931 2932 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2933 if (ret) { 2934 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2935 sta->addr, arvif->vdev_id, ret); 2936 return ret; 2937 } 2938 2939 /* Re-assoc is run only to update supported rates for given station. It 2940 * doesn't make much sense to reconfigure the peer completely. 2941 */ 2942 if (!reassoc) { 2943 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2944 &sta->ht_cap); 2945 if (ret) { 2946 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2947 arvif->vdev_id, ret); 2948 return ret; 2949 } 2950 2951 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2952 if (ret) { 2953 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2954 sta->addr, arvif->vdev_id, ret); 2955 return ret; 2956 } 2957 2958 if (!sta->wme) { 2959 arvif->num_legacy_stations++; 2960 ret = ath10k_recalc_rtscts_prot(arvif); 2961 if (ret) { 2962 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2963 arvif->vdev_id, ret); 2964 return ret; 2965 } 2966 } 2967 2968 /* Plumb cached keys only for static WEP */ 2969 if (arvif->def_wep_key_idx != -1) { 2970 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2971 if (ret) { 2972 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2973 arvif->vdev_id, ret); 2974 return ret; 2975 } 2976 } 2977 } 2978 2979 return ret; 2980 } 2981 2982 static int ath10k_station_disassoc(struct ath10k *ar, 2983 struct ieee80211_vif *vif, 2984 struct ieee80211_sta *sta) 2985 { 2986 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2987 int ret = 0; 2988 2989 lockdep_assert_held(&ar->conf_mutex); 2990 2991 if (!sta->wme) { 2992 arvif->num_legacy_stations--; 2993 ret = ath10k_recalc_rtscts_prot(arvif); 2994 if (ret) { 2995 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2996 arvif->vdev_id, ret); 2997 return ret; 2998 } 2999 } 3000 3001 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3002 if (ret) { 3003 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3004 arvif->vdev_id, ret); 3005 return ret; 3006 } 3007 3008 return ret; 3009 } 3010 3011 /**************/ 3012 /* Regulatory */ 3013 /**************/ 3014 3015 static int ath10k_update_channel_list(struct ath10k *ar) 3016 { 3017 struct ieee80211_hw *hw = ar->hw; 3018 struct ieee80211_supported_band **bands; 3019 enum nl80211_band band; 3020 struct ieee80211_channel *channel; 3021 struct wmi_scan_chan_list_arg arg = {0}; 3022 struct wmi_channel_arg *ch; 3023 bool passive; 3024 int len; 3025 int ret; 3026 int i; 3027 3028 lockdep_assert_held(&ar->conf_mutex); 3029 3030 bands = hw->wiphy->bands; 3031 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3032 if (!bands[band]) 3033 continue; 3034 3035 for (i = 0; i < bands[band]->n_channels; i++) { 3036 if (bands[band]->channels[i].flags & 3037 IEEE80211_CHAN_DISABLED) 3038 continue; 3039 3040 arg.n_channels++; 3041 } 3042 } 3043 3044 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3045 arg.channels = kzalloc(len, GFP_KERNEL); 3046 if (!arg.channels) 3047 return -ENOMEM; 3048 3049 ch = arg.channels; 3050 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3051 if (!bands[band]) 3052 continue; 3053 3054 for (i = 0; i < bands[band]->n_channels; i++) { 3055 channel = &bands[band]->channels[i]; 3056 3057 if (channel->flags & IEEE80211_CHAN_DISABLED) 3058 continue; 3059 3060 ch->allow_ht = true; 3061 3062 /* FIXME: when should we really allow VHT? */ 3063 ch->allow_vht = true; 3064 3065 ch->allow_ibss = 3066 !(channel->flags & IEEE80211_CHAN_NO_IR); 3067 3068 ch->ht40plus = 3069 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3070 3071 ch->chan_radar = 3072 !!(channel->flags & IEEE80211_CHAN_RADAR); 3073 3074 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3075 ch->passive = passive; 3076 3077 ch->freq = channel->center_freq; 3078 ch->band_center_freq1 = channel->center_freq; 3079 ch->min_power = 0; 3080 ch->max_power = channel->max_power * 2; 3081 ch->max_reg_power = channel->max_reg_power * 2; 3082 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3083 ch->reg_class_id = 0; /* FIXME */ 3084 3085 /* FIXME: why use only legacy modes, why not any 3086 * HT/VHT modes? Would that even make any 3087 * difference? 3088 */ 3089 if (channel->band == NL80211_BAND_2GHZ) 3090 ch->mode = MODE_11G; 3091 else 3092 ch->mode = MODE_11A; 3093 3094 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3095 continue; 3096 3097 ath10k_dbg(ar, ATH10K_DBG_WMI, 3098 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3099 ch - arg.channels, arg.n_channels, 3100 ch->freq, ch->max_power, ch->max_reg_power, 3101 ch->max_antenna_gain, ch->mode); 3102 3103 ch++; 3104 } 3105 } 3106 3107 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3108 kfree(arg.channels); 3109 3110 return ret; 3111 } 3112 3113 static enum wmi_dfs_region 3114 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3115 { 3116 switch (dfs_region) { 3117 case NL80211_DFS_UNSET: 3118 return WMI_UNINIT_DFS_DOMAIN; 3119 case NL80211_DFS_FCC: 3120 return WMI_FCC_DFS_DOMAIN; 3121 case NL80211_DFS_ETSI: 3122 return WMI_ETSI_DFS_DOMAIN; 3123 case NL80211_DFS_JP: 3124 return WMI_MKK4_DFS_DOMAIN; 3125 } 3126 return WMI_UNINIT_DFS_DOMAIN; 3127 } 3128 3129 static void ath10k_regd_update(struct ath10k *ar) 3130 { 3131 struct reg_dmn_pair_mapping *regpair; 3132 int ret; 3133 enum wmi_dfs_region wmi_dfs_reg; 3134 enum nl80211_dfs_regions nl_dfs_reg; 3135 3136 lockdep_assert_held(&ar->conf_mutex); 3137 3138 ret = ath10k_update_channel_list(ar); 3139 if (ret) 3140 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3141 3142 regpair = ar->ath_common.regulatory.regpair; 3143 3144 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3145 nl_dfs_reg = ar->dfs_detector->region; 3146 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3147 } else { 3148 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3149 } 3150 3151 /* Target allows setting up per-band regdomain but ath_common provides 3152 * a combined one only 3153 */ 3154 ret = ath10k_wmi_pdev_set_regdomain(ar, 3155 regpair->reg_domain, 3156 regpair->reg_domain, /* 2ghz */ 3157 regpair->reg_domain, /* 5ghz */ 3158 regpair->reg_2ghz_ctl, 3159 regpair->reg_5ghz_ctl, 3160 wmi_dfs_reg); 3161 if (ret) 3162 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3163 } 3164 3165 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3166 struct ieee80211_supported_band *band) 3167 { 3168 int i; 3169 3170 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3171 for (i = 0; i < band->n_channels; i++) { 3172 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3173 band->channels[i].center_freq > ar->high_5ghz_chan) 3174 band->channels[i].flags |= 3175 IEEE80211_CHAN_DISABLED; 3176 } 3177 } 3178 } 3179 3180 static void ath10k_reg_notifier(struct wiphy *wiphy, 3181 struct regulatory_request *request) 3182 { 3183 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3184 struct ath10k *ar = hw->priv; 3185 bool result; 3186 3187 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3188 3189 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3190 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3191 request->dfs_region); 3192 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3193 request->dfs_region); 3194 if (!result) 3195 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3196 request->dfs_region); 3197 } 3198 3199 mutex_lock(&ar->conf_mutex); 3200 if (ar->state == ATH10K_STATE_ON) 3201 ath10k_regd_update(ar); 3202 mutex_unlock(&ar->conf_mutex); 3203 3204 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3205 ath10k_mac_update_channel_list(ar, 3206 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3207 } 3208 3209 /***************/ 3210 /* TX handlers */ 3211 /***************/ 3212 3213 enum ath10k_mac_tx_path { 3214 ATH10K_MAC_TX_HTT, 3215 ATH10K_MAC_TX_HTT_MGMT, 3216 ATH10K_MAC_TX_WMI_MGMT, 3217 ATH10K_MAC_TX_UNKNOWN, 3218 }; 3219 3220 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3221 { 3222 lockdep_assert_held(&ar->htt.tx_lock); 3223 3224 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3225 ar->tx_paused |= BIT(reason); 3226 ieee80211_stop_queues(ar->hw); 3227 } 3228 3229 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3230 struct ieee80211_vif *vif) 3231 { 3232 struct ath10k *ar = data; 3233 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3234 3235 if (arvif->tx_paused) 3236 return; 3237 3238 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3239 } 3240 3241 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3242 { 3243 lockdep_assert_held(&ar->htt.tx_lock); 3244 3245 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3246 ar->tx_paused &= ~BIT(reason); 3247 3248 if (ar->tx_paused) 3249 return; 3250 3251 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3252 IEEE80211_IFACE_ITER_RESUME_ALL, 3253 ath10k_mac_tx_unlock_iter, 3254 ar); 3255 3256 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3257 } 3258 3259 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3260 { 3261 struct ath10k *ar = arvif->ar; 3262 3263 lockdep_assert_held(&ar->htt.tx_lock); 3264 3265 WARN_ON(reason >= BITS_PER_LONG); 3266 arvif->tx_paused |= BIT(reason); 3267 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3268 } 3269 3270 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3271 { 3272 struct ath10k *ar = arvif->ar; 3273 3274 lockdep_assert_held(&ar->htt.tx_lock); 3275 3276 WARN_ON(reason >= BITS_PER_LONG); 3277 arvif->tx_paused &= ~BIT(reason); 3278 3279 if (ar->tx_paused) 3280 return; 3281 3282 if (arvif->tx_paused) 3283 return; 3284 3285 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3286 } 3287 3288 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3289 enum wmi_tlv_tx_pause_id pause_id, 3290 enum wmi_tlv_tx_pause_action action) 3291 { 3292 struct ath10k *ar = arvif->ar; 3293 3294 lockdep_assert_held(&ar->htt.tx_lock); 3295 3296 switch (action) { 3297 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3298 ath10k_mac_vif_tx_lock(arvif, pause_id); 3299 break; 3300 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3301 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3302 break; 3303 default: 3304 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3305 "received unknown tx pause action %d on vdev %i, ignoring\n", 3306 action, arvif->vdev_id); 3307 break; 3308 } 3309 } 3310 3311 struct ath10k_mac_tx_pause { 3312 u32 vdev_id; 3313 enum wmi_tlv_tx_pause_id pause_id; 3314 enum wmi_tlv_tx_pause_action action; 3315 }; 3316 3317 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3318 struct ieee80211_vif *vif) 3319 { 3320 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3321 struct ath10k_mac_tx_pause *arg = data; 3322 3323 if (arvif->vdev_id != arg->vdev_id) 3324 return; 3325 3326 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3327 } 3328 3329 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3330 enum wmi_tlv_tx_pause_id pause_id, 3331 enum wmi_tlv_tx_pause_action action) 3332 { 3333 struct ath10k_mac_tx_pause arg = { 3334 .vdev_id = vdev_id, 3335 .pause_id = pause_id, 3336 .action = action, 3337 }; 3338 3339 spin_lock_bh(&ar->htt.tx_lock); 3340 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3341 IEEE80211_IFACE_ITER_RESUME_ALL, 3342 ath10k_mac_handle_tx_pause_iter, 3343 &arg); 3344 spin_unlock_bh(&ar->htt.tx_lock); 3345 } 3346 3347 static enum ath10k_hw_txrx_mode 3348 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3349 struct ieee80211_vif *vif, 3350 struct ieee80211_sta *sta, 3351 struct sk_buff *skb) 3352 { 3353 const struct ieee80211_hdr *hdr = (void *)skb->data; 3354 __le16 fc = hdr->frame_control; 3355 3356 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3357 return ATH10K_HW_TXRX_RAW; 3358 3359 if (ieee80211_is_mgmt(fc)) 3360 return ATH10K_HW_TXRX_MGMT; 3361 3362 /* Workaround: 3363 * 3364 * NullFunc frames are mostly used to ping if a client or AP are still 3365 * reachable and responsive. This implies tx status reports must be 3366 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3367 * come to a conclusion that the other end disappeared and tear down 3368 * BSS connection or it can never disconnect from BSS/client (which is 3369 * the case). 3370 * 3371 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3372 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3373 * which seems to deliver correct tx reports for NullFunc frames. The 3374 * downside of using it is it ignores client powersave state so it can 3375 * end up disconnecting sleeping clients in AP mode. It should fix STA 3376 * mode though because AP don't sleep. 3377 */ 3378 if (ar->htt.target_version_major < 3 && 3379 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3380 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3381 ar->running_fw->fw_file.fw_features)) 3382 return ATH10K_HW_TXRX_MGMT; 3383 3384 /* Workaround: 3385 * 3386 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3387 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3388 * to work with Ethernet txmode so use it. 3389 * 3390 * FIXME: Check if raw mode works with TDLS. 3391 */ 3392 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3393 return ATH10K_HW_TXRX_ETHERNET; 3394 3395 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3396 return ATH10K_HW_TXRX_RAW; 3397 3398 return ATH10K_HW_TXRX_NATIVE_WIFI; 3399 } 3400 3401 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3402 struct sk_buff *skb) 3403 { 3404 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3405 const struct ieee80211_hdr *hdr = (void *)skb->data; 3406 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3407 IEEE80211_TX_CTL_INJECTED; 3408 3409 if (!ieee80211_has_protected(hdr->frame_control)) 3410 return false; 3411 3412 if ((info->flags & mask) == mask) 3413 return false; 3414 3415 if (vif) 3416 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3417 3418 return true; 3419 } 3420 3421 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3422 * Control in the header. 3423 */ 3424 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3425 { 3426 struct ieee80211_hdr *hdr = (void *)skb->data; 3427 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3428 u8 *qos_ctl; 3429 3430 if (!ieee80211_is_data_qos(hdr->frame_control)) 3431 return; 3432 3433 qos_ctl = ieee80211_get_qos_ctl(hdr); 3434 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3435 skb->data, (void *)qos_ctl - (void *)skb->data); 3436 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3437 3438 /* Some firmware revisions don't handle sending QoS NullFunc well. 3439 * These frames are mainly used for CQM purposes so it doesn't really 3440 * matter whether QoS NullFunc or NullFunc are sent. 3441 */ 3442 hdr = (void *)skb->data; 3443 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3444 cb->flags &= ~ATH10K_SKB_F_QOS; 3445 3446 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3447 } 3448 3449 static void ath10k_tx_h_8023(struct sk_buff *skb) 3450 { 3451 struct ieee80211_hdr *hdr; 3452 struct rfc1042_hdr *rfc1042; 3453 struct ethhdr *eth; 3454 size_t hdrlen; 3455 u8 da[ETH_ALEN]; 3456 u8 sa[ETH_ALEN]; 3457 __be16 type; 3458 3459 hdr = (void *)skb->data; 3460 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3461 rfc1042 = (void *)skb->data + hdrlen; 3462 3463 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3464 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3465 type = rfc1042->snap_type; 3466 3467 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3468 skb_push(skb, sizeof(*eth)); 3469 3470 eth = (void *)skb->data; 3471 ether_addr_copy(eth->h_dest, da); 3472 ether_addr_copy(eth->h_source, sa); 3473 eth->h_proto = type; 3474 } 3475 3476 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3477 struct ieee80211_vif *vif, 3478 struct sk_buff *skb) 3479 { 3480 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3481 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3482 3483 /* This is case only for P2P_GO */ 3484 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3485 return; 3486 3487 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3488 spin_lock_bh(&ar->data_lock); 3489 if (arvif->u.ap.noa_data) 3490 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3491 GFP_ATOMIC)) 3492 skb_put_data(skb, arvif->u.ap.noa_data, 3493 arvif->u.ap.noa_len); 3494 spin_unlock_bh(&ar->data_lock); 3495 } 3496 } 3497 3498 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3499 struct ieee80211_vif *vif, 3500 struct ieee80211_txq *txq, 3501 struct sk_buff *skb) 3502 { 3503 struct ieee80211_hdr *hdr = (void *)skb->data; 3504 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3505 3506 cb->flags = 0; 3507 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3508 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3509 3510 if (ieee80211_is_mgmt(hdr->frame_control)) 3511 cb->flags |= ATH10K_SKB_F_MGMT; 3512 3513 if (ieee80211_is_data_qos(hdr->frame_control)) 3514 cb->flags |= ATH10K_SKB_F_QOS; 3515 3516 cb->vif = vif; 3517 cb->txq = txq; 3518 } 3519 3520 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3521 { 3522 /* FIXME: Not really sure since when the behaviour changed. At some 3523 * point new firmware stopped requiring creation of peer entries for 3524 * offchannel tx (and actually creating them causes issues with wmi-htc 3525 * tx credit replenishment and reliability). Assuming it's at least 3.4 3526 * because that's when the `freq` was introduced to TX_FRM HTT command. 3527 */ 3528 return (ar->htt.target_version_major >= 3 && 3529 ar->htt.target_version_minor >= 4 && 3530 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3531 } 3532 3533 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3534 { 3535 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3536 int ret = 0; 3537 3538 spin_lock_bh(&ar->data_lock); 3539 3540 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3541 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3542 ret = -ENOSPC; 3543 goto unlock; 3544 } 3545 3546 __skb_queue_tail(q, skb); 3547 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3548 3549 unlock: 3550 spin_unlock_bh(&ar->data_lock); 3551 3552 return ret; 3553 } 3554 3555 static enum ath10k_mac_tx_path 3556 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3557 struct sk_buff *skb, 3558 enum ath10k_hw_txrx_mode txmode) 3559 { 3560 switch (txmode) { 3561 case ATH10K_HW_TXRX_RAW: 3562 case ATH10K_HW_TXRX_NATIVE_WIFI: 3563 case ATH10K_HW_TXRX_ETHERNET: 3564 return ATH10K_MAC_TX_HTT; 3565 case ATH10K_HW_TXRX_MGMT: 3566 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3567 ar->running_fw->fw_file.fw_features)) 3568 return ATH10K_MAC_TX_WMI_MGMT; 3569 else if (ar->htt.target_version_major >= 3) 3570 return ATH10K_MAC_TX_HTT; 3571 else 3572 return ATH10K_MAC_TX_HTT_MGMT; 3573 } 3574 3575 return ATH10K_MAC_TX_UNKNOWN; 3576 } 3577 3578 static int ath10k_mac_tx_submit(struct ath10k *ar, 3579 enum ath10k_hw_txrx_mode txmode, 3580 enum ath10k_mac_tx_path txpath, 3581 struct sk_buff *skb) 3582 { 3583 struct ath10k_htt *htt = &ar->htt; 3584 int ret = -EINVAL; 3585 3586 switch (txpath) { 3587 case ATH10K_MAC_TX_HTT: 3588 ret = ath10k_htt_tx(htt, txmode, skb); 3589 break; 3590 case ATH10K_MAC_TX_HTT_MGMT: 3591 ret = ath10k_htt_mgmt_tx(htt, skb); 3592 break; 3593 case ATH10K_MAC_TX_WMI_MGMT: 3594 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3595 break; 3596 case ATH10K_MAC_TX_UNKNOWN: 3597 WARN_ON_ONCE(1); 3598 ret = -EINVAL; 3599 break; 3600 } 3601 3602 if (ret) { 3603 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3604 ret); 3605 ieee80211_free_txskb(ar->hw, skb); 3606 } 3607 3608 return ret; 3609 } 3610 3611 /* This function consumes the sk_buff regardless of return value as far as 3612 * caller is concerned so no freeing is necessary afterwards. 3613 */ 3614 static int ath10k_mac_tx(struct ath10k *ar, 3615 struct ieee80211_vif *vif, 3616 enum ath10k_hw_txrx_mode txmode, 3617 enum ath10k_mac_tx_path txpath, 3618 struct sk_buff *skb) 3619 { 3620 struct ieee80211_hw *hw = ar->hw; 3621 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3622 int ret; 3623 3624 /* We should disable CCK RATE due to P2P */ 3625 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3626 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3627 3628 switch (txmode) { 3629 case ATH10K_HW_TXRX_MGMT: 3630 case ATH10K_HW_TXRX_NATIVE_WIFI: 3631 ath10k_tx_h_nwifi(hw, skb); 3632 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3633 ath10k_tx_h_seq_no(vif, skb); 3634 break; 3635 case ATH10K_HW_TXRX_ETHERNET: 3636 ath10k_tx_h_8023(skb); 3637 break; 3638 case ATH10K_HW_TXRX_RAW: 3639 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3640 WARN_ON_ONCE(1); 3641 ieee80211_free_txskb(hw, skb); 3642 return -ENOTSUPP; 3643 } 3644 } 3645 3646 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3647 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3648 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3649 skb); 3650 3651 skb_queue_tail(&ar->offchan_tx_queue, skb); 3652 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3653 return 0; 3654 } 3655 } 3656 3657 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3658 if (ret) { 3659 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3660 return ret; 3661 } 3662 3663 return 0; 3664 } 3665 3666 void ath10k_offchan_tx_purge(struct ath10k *ar) 3667 { 3668 struct sk_buff *skb; 3669 3670 for (;;) { 3671 skb = skb_dequeue(&ar->offchan_tx_queue); 3672 if (!skb) 3673 break; 3674 3675 ieee80211_free_txskb(ar->hw, skb); 3676 } 3677 } 3678 3679 void ath10k_offchan_tx_work(struct work_struct *work) 3680 { 3681 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3682 struct ath10k_peer *peer; 3683 struct ath10k_vif *arvif; 3684 enum ath10k_hw_txrx_mode txmode; 3685 enum ath10k_mac_tx_path txpath; 3686 struct ieee80211_hdr *hdr; 3687 struct ieee80211_vif *vif; 3688 struct ieee80211_sta *sta; 3689 struct sk_buff *skb; 3690 const u8 *peer_addr; 3691 int vdev_id; 3692 int ret; 3693 unsigned long time_left; 3694 bool tmp_peer_created = false; 3695 3696 /* FW requirement: We must create a peer before FW will send out 3697 * an offchannel frame. Otherwise the frame will be stuck and 3698 * never transmitted. We delete the peer upon tx completion. 3699 * It is unlikely that a peer for offchannel tx will already be 3700 * present. However it may be in some rare cases so account for that. 3701 * Otherwise we might remove a legitimate peer and break stuff. 3702 */ 3703 3704 for (;;) { 3705 skb = skb_dequeue(&ar->offchan_tx_queue); 3706 if (!skb) 3707 break; 3708 3709 mutex_lock(&ar->conf_mutex); 3710 3711 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3712 skb); 3713 3714 hdr = (struct ieee80211_hdr *)skb->data; 3715 peer_addr = ieee80211_get_DA(hdr); 3716 3717 spin_lock_bh(&ar->data_lock); 3718 vdev_id = ar->scan.vdev_id; 3719 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3720 spin_unlock_bh(&ar->data_lock); 3721 3722 if (peer) 3723 /* FIXME: should this use ath10k_warn()? */ 3724 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3725 peer_addr, vdev_id); 3726 3727 if (!peer) { 3728 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3729 peer_addr, 3730 WMI_PEER_TYPE_DEFAULT); 3731 if (ret) 3732 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3733 peer_addr, vdev_id, ret); 3734 tmp_peer_created = (ret == 0); 3735 } 3736 3737 spin_lock_bh(&ar->data_lock); 3738 reinit_completion(&ar->offchan_tx_completed); 3739 ar->offchan_tx_skb = skb; 3740 spin_unlock_bh(&ar->data_lock); 3741 3742 /* It's safe to access vif and sta - conf_mutex guarantees that 3743 * sta_state() and remove_interface() are locked exclusively 3744 * out wrt to this offchannel worker. 3745 */ 3746 arvif = ath10k_get_arvif(ar, vdev_id); 3747 if (arvif) { 3748 vif = arvif->vif; 3749 sta = ieee80211_find_sta(vif, peer_addr); 3750 } else { 3751 vif = NULL; 3752 sta = NULL; 3753 } 3754 3755 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3756 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3757 3758 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3759 if (ret) { 3760 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3761 ret); 3762 /* not serious */ 3763 } 3764 3765 time_left = 3766 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3767 if (time_left == 0) 3768 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3769 skb); 3770 3771 if (!peer && tmp_peer_created) { 3772 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3773 if (ret) 3774 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3775 peer_addr, vdev_id, ret); 3776 } 3777 3778 mutex_unlock(&ar->conf_mutex); 3779 } 3780 } 3781 3782 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3783 { 3784 struct sk_buff *skb; 3785 3786 for (;;) { 3787 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3788 if (!skb) 3789 break; 3790 3791 ieee80211_free_txskb(ar->hw, skb); 3792 } 3793 } 3794 3795 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3796 { 3797 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3798 struct sk_buff *skb; 3799 int ret; 3800 3801 for (;;) { 3802 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3803 if (!skb) 3804 break; 3805 3806 ret = ath10k_wmi_mgmt_tx(ar, skb); 3807 if (ret) { 3808 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3809 ret); 3810 ieee80211_free_txskb(ar->hw, skb); 3811 } 3812 } 3813 } 3814 3815 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3816 { 3817 struct ath10k_txq *artxq; 3818 3819 if (!txq) 3820 return; 3821 3822 artxq = (void *)txq->drv_priv; 3823 INIT_LIST_HEAD(&artxq->list); 3824 } 3825 3826 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3827 { 3828 struct ath10k_txq *artxq; 3829 struct ath10k_skb_cb *cb; 3830 struct sk_buff *msdu; 3831 int msdu_id; 3832 3833 if (!txq) 3834 return; 3835 3836 artxq = (void *)txq->drv_priv; 3837 spin_lock_bh(&ar->txqs_lock); 3838 if (!list_empty(&artxq->list)) 3839 list_del_init(&artxq->list); 3840 spin_unlock_bh(&ar->txqs_lock); 3841 3842 spin_lock_bh(&ar->htt.tx_lock); 3843 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3844 cb = ATH10K_SKB_CB(msdu); 3845 if (cb->txq == txq) 3846 cb->txq = NULL; 3847 } 3848 spin_unlock_bh(&ar->htt.tx_lock); 3849 } 3850 3851 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3852 u16 peer_id, 3853 u8 tid) 3854 { 3855 struct ath10k_peer *peer; 3856 3857 lockdep_assert_held(&ar->data_lock); 3858 3859 peer = ar->peer_map[peer_id]; 3860 if (!peer) 3861 return NULL; 3862 3863 if (peer->removed) 3864 return NULL; 3865 3866 if (peer->sta) 3867 return peer->sta->txq[tid]; 3868 else if (peer->vif) 3869 return peer->vif->txq; 3870 else 3871 return NULL; 3872 } 3873 3874 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3875 struct ieee80211_txq *txq) 3876 { 3877 struct ath10k *ar = hw->priv; 3878 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3879 3880 /* No need to get locks */ 3881 3882 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3883 return true; 3884 3885 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3886 return true; 3887 3888 if (artxq->num_fw_queued < artxq->num_push_allowed) 3889 return true; 3890 3891 return false; 3892 } 3893 3894 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3895 struct ieee80211_txq *txq) 3896 { 3897 struct ath10k *ar = hw->priv; 3898 struct ath10k_htt *htt = &ar->htt; 3899 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3900 struct ieee80211_vif *vif = txq->vif; 3901 struct ieee80211_sta *sta = txq->sta; 3902 enum ath10k_hw_txrx_mode txmode; 3903 enum ath10k_mac_tx_path txpath; 3904 struct sk_buff *skb; 3905 struct ieee80211_hdr *hdr; 3906 size_t skb_len; 3907 bool is_mgmt, is_presp; 3908 int ret; 3909 3910 spin_lock_bh(&ar->htt.tx_lock); 3911 ret = ath10k_htt_tx_inc_pending(htt); 3912 spin_unlock_bh(&ar->htt.tx_lock); 3913 3914 if (ret) 3915 return ret; 3916 3917 skb = ieee80211_tx_dequeue(hw, txq); 3918 if (!skb) { 3919 spin_lock_bh(&ar->htt.tx_lock); 3920 ath10k_htt_tx_dec_pending(htt); 3921 spin_unlock_bh(&ar->htt.tx_lock); 3922 3923 return -ENOENT; 3924 } 3925 3926 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3927 3928 skb_len = skb->len; 3929 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3930 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3931 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3932 3933 if (is_mgmt) { 3934 hdr = (struct ieee80211_hdr *)skb->data; 3935 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3936 3937 spin_lock_bh(&ar->htt.tx_lock); 3938 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3939 3940 if (ret) { 3941 ath10k_htt_tx_dec_pending(htt); 3942 spin_unlock_bh(&ar->htt.tx_lock); 3943 return ret; 3944 } 3945 spin_unlock_bh(&ar->htt.tx_lock); 3946 } 3947 3948 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3949 if (unlikely(ret)) { 3950 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3951 3952 spin_lock_bh(&ar->htt.tx_lock); 3953 ath10k_htt_tx_dec_pending(htt); 3954 if (is_mgmt) 3955 ath10k_htt_tx_mgmt_dec_pending(htt); 3956 spin_unlock_bh(&ar->htt.tx_lock); 3957 3958 return ret; 3959 } 3960 3961 spin_lock_bh(&ar->htt.tx_lock); 3962 artxq->num_fw_queued++; 3963 spin_unlock_bh(&ar->htt.tx_lock); 3964 3965 return skb_len; 3966 } 3967 3968 void ath10k_mac_tx_push_pending(struct ath10k *ar) 3969 { 3970 struct ieee80211_hw *hw = ar->hw; 3971 struct ieee80211_txq *txq; 3972 struct ath10k_txq *artxq; 3973 struct ath10k_txq *last; 3974 int ret; 3975 int max; 3976 3977 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 3978 return; 3979 3980 spin_lock_bh(&ar->txqs_lock); 3981 rcu_read_lock(); 3982 3983 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 3984 while (!list_empty(&ar->txqs)) { 3985 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 3986 txq = container_of((void *)artxq, struct ieee80211_txq, 3987 drv_priv); 3988 3989 /* Prevent aggressive sta/tid taking over tx queue */ 3990 max = 16; 3991 ret = 0; 3992 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 3993 ret = ath10k_mac_tx_push_txq(hw, txq); 3994 if (ret < 0) 3995 break; 3996 } 3997 3998 list_del_init(&artxq->list); 3999 if (ret != -ENOENT) 4000 list_add_tail(&artxq->list, &ar->txqs); 4001 4002 ath10k_htt_tx_txq_update(hw, txq); 4003 4004 if (artxq == last || (ret < 0 && ret != -ENOENT)) 4005 break; 4006 } 4007 4008 rcu_read_unlock(); 4009 spin_unlock_bh(&ar->txqs_lock); 4010 } 4011 4012 /************/ 4013 /* Scanning */ 4014 /************/ 4015 4016 void __ath10k_scan_finish(struct ath10k *ar) 4017 { 4018 lockdep_assert_held(&ar->data_lock); 4019 4020 switch (ar->scan.state) { 4021 case ATH10K_SCAN_IDLE: 4022 break; 4023 case ATH10K_SCAN_RUNNING: 4024 case ATH10K_SCAN_ABORTING: 4025 if (!ar->scan.is_roc) { 4026 struct cfg80211_scan_info info = { 4027 .aborted = (ar->scan.state == 4028 ATH10K_SCAN_ABORTING), 4029 }; 4030 4031 ieee80211_scan_completed(ar->hw, &info); 4032 } else if (ar->scan.roc_notify) { 4033 ieee80211_remain_on_channel_expired(ar->hw); 4034 } 4035 /* fall through */ 4036 case ATH10K_SCAN_STARTING: 4037 ar->scan.state = ATH10K_SCAN_IDLE; 4038 ar->scan_channel = NULL; 4039 ar->scan.roc_freq = 0; 4040 ath10k_offchan_tx_purge(ar); 4041 cancel_delayed_work(&ar->scan.timeout); 4042 complete(&ar->scan.completed); 4043 break; 4044 } 4045 } 4046 4047 void ath10k_scan_finish(struct ath10k *ar) 4048 { 4049 spin_lock_bh(&ar->data_lock); 4050 __ath10k_scan_finish(ar); 4051 spin_unlock_bh(&ar->data_lock); 4052 } 4053 4054 static int ath10k_scan_stop(struct ath10k *ar) 4055 { 4056 struct wmi_stop_scan_arg arg = { 4057 .req_id = 1, /* FIXME */ 4058 .req_type = WMI_SCAN_STOP_ONE, 4059 .u.scan_id = ATH10K_SCAN_ID, 4060 }; 4061 int ret; 4062 4063 lockdep_assert_held(&ar->conf_mutex); 4064 4065 ret = ath10k_wmi_stop_scan(ar, &arg); 4066 if (ret) { 4067 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4068 goto out; 4069 } 4070 4071 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4072 if (ret == 0) { 4073 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4074 ret = -ETIMEDOUT; 4075 } else if (ret > 0) { 4076 ret = 0; 4077 } 4078 4079 out: 4080 /* Scan state should be updated upon scan completion but in case 4081 * firmware fails to deliver the event (for whatever reason) it is 4082 * desired to clean up scan state anyway. Firmware may have just 4083 * dropped the scan completion event delivery due to transport pipe 4084 * being overflown with data and/or it can recover on its own before 4085 * next scan request is submitted. 4086 */ 4087 spin_lock_bh(&ar->data_lock); 4088 if (ar->scan.state != ATH10K_SCAN_IDLE) 4089 __ath10k_scan_finish(ar); 4090 spin_unlock_bh(&ar->data_lock); 4091 4092 return ret; 4093 } 4094 4095 static void ath10k_scan_abort(struct ath10k *ar) 4096 { 4097 int ret; 4098 4099 lockdep_assert_held(&ar->conf_mutex); 4100 4101 spin_lock_bh(&ar->data_lock); 4102 4103 switch (ar->scan.state) { 4104 case ATH10K_SCAN_IDLE: 4105 /* This can happen if timeout worker kicked in and called 4106 * abortion while scan completion was being processed. 4107 */ 4108 break; 4109 case ATH10K_SCAN_STARTING: 4110 case ATH10K_SCAN_ABORTING: 4111 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4112 ath10k_scan_state_str(ar->scan.state), 4113 ar->scan.state); 4114 break; 4115 case ATH10K_SCAN_RUNNING: 4116 ar->scan.state = ATH10K_SCAN_ABORTING; 4117 spin_unlock_bh(&ar->data_lock); 4118 4119 ret = ath10k_scan_stop(ar); 4120 if (ret) 4121 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4122 4123 spin_lock_bh(&ar->data_lock); 4124 break; 4125 } 4126 4127 spin_unlock_bh(&ar->data_lock); 4128 } 4129 4130 void ath10k_scan_timeout_work(struct work_struct *work) 4131 { 4132 struct ath10k *ar = container_of(work, struct ath10k, 4133 scan.timeout.work); 4134 4135 mutex_lock(&ar->conf_mutex); 4136 ath10k_scan_abort(ar); 4137 mutex_unlock(&ar->conf_mutex); 4138 } 4139 4140 static int ath10k_start_scan(struct ath10k *ar, 4141 const struct wmi_start_scan_arg *arg) 4142 { 4143 int ret; 4144 4145 lockdep_assert_held(&ar->conf_mutex); 4146 4147 ret = ath10k_wmi_start_scan(ar, arg); 4148 if (ret) 4149 return ret; 4150 4151 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4152 if (ret == 0) { 4153 ret = ath10k_scan_stop(ar); 4154 if (ret) 4155 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4156 4157 return -ETIMEDOUT; 4158 } 4159 4160 /* If we failed to start the scan, return error code at 4161 * this point. This is probably due to some issue in the 4162 * firmware, but no need to wedge the driver due to that... 4163 */ 4164 spin_lock_bh(&ar->data_lock); 4165 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4166 spin_unlock_bh(&ar->data_lock); 4167 return -EINVAL; 4168 } 4169 spin_unlock_bh(&ar->data_lock); 4170 4171 return 0; 4172 } 4173 4174 /**********************/ 4175 /* mac80211 callbacks */ 4176 /**********************/ 4177 4178 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4179 struct ieee80211_tx_control *control, 4180 struct sk_buff *skb) 4181 { 4182 struct ath10k *ar = hw->priv; 4183 struct ath10k_htt *htt = &ar->htt; 4184 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4185 struct ieee80211_vif *vif = info->control.vif; 4186 struct ieee80211_sta *sta = control->sta; 4187 struct ieee80211_txq *txq = NULL; 4188 struct ieee80211_hdr *hdr = (void *)skb->data; 4189 enum ath10k_hw_txrx_mode txmode; 4190 enum ath10k_mac_tx_path txpath; 4191 bool is_htt; 4192 bool is_mgmt; 4193 bool is_presp; 4194 int ret; 4195 4196 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4197 4198 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4199 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4200 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4201 txpath == ATH10K_MAC_TX_HTT_MGMT); 4202 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4203 4204 if (is_htt) { 4205 spin_lock_bh(&ar->htt.tx_lock); 4206 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4207 4208 ret = ath10k_htt_tx_inc_pending(htt); 4209 if (ret) { 4210 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4211 ret); 4212 spin_unlock_bh(&ar->htt.tx_lock); 4213 ieee80211_free_txskb(ar->hw, skb); 4214 return; 4215 } 4216 4217 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4218 if (ret) { 4219 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4220 ret); 4221 ath10k_htt_tx_dec_pending(htt); 4222 spin_unlock_bh(&ar->htt.tx_lock); 4223 ieee80211_free_txskb(ar->hw, skb); 4224 return; 4225 } 4226 spin_unlock_bh(&ar->htt.tx_lock); 4227 } 4228 4229 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4230 if (ret) { 4231 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4232 if (is_htt) { 4233 spin_lock_bh(&ar->htt.tx_lock); 4234 ath10k_htt_tx_dec_pending(htt); 4235 if (is_mgmt) 4236 ath10k_htt_tx_mgmt_dec_pending(htt); 4237 spin_unlock_bh(&ar->htt.tx_lock); 4238 } 4239 return; 4240 } 4241 } 4242 4243 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4244 struct ieee80211_txq *txq) 4245 { 4246 struct ath10k *ar = hw->priv; 4247 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4248 struct ieee80211_txq *f_txq; 4249 struct ath10k_txq *f_artxq; 4250 int ret = 0; 4251 int max = 16; 4252 4253 spin_lock_bh(&ar->txqs_lock); 4254 if (list_empty(&artxq->list)) 4255 list_add_tail(&artxq->list, &ar->txqs); 4256 4257 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4258 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4259 list_del_init(&f_artxq->list); 4260 4261 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4262 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4263 if (ret) 4264 break; 4265 } 4266 if (ret != -ENOENT) 4267 list_add_tail(&f_artxq->list, &ar->txqs); 4268 spin_unlock_bh(&ar->txqs_lock); 4269 4270 ath10k_htt_tx_txq_update(hw, f_txq); 4271 ath10k_htt_tx_txq_update(hw, txq); 4272 } 4273 4274 /* Must not be called with conf_mutex held as workers can use that also. */ 4275 void ath10k_drain_tx(struct ath10k *ar) 4276 { 4277 /* make sure rcu-protected mac80211 tx path itself is drained */ 4278 synchronize_net(); 4279 4280 ath10k_offchan_tx_purge(ar); 4281 ath10k_mgmt_over_wmi_tx_purge(ar); 4282 4283 cancel_work_sync(&ar->offchan_tx_work); 4284 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4285 } 4286 4287 void ath10k_halt(struct ath10k *ar) 4288 { 4289 struct ath10k_vif *arvif; 4290 4291 lockdep_assert_held(&ar->conf_mutex); 4292 4293 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4294 ar->filter_flags = 0; 4295 ar->monitor = false; 4296 ar->monitor_arvif = NULL; 4297 4298 if (ar->monitor_started) 4299 ath10k_monitor_stop(ar); 4300 4301 ar->monitor_started = false; 4302 ar->tx_paused = 0; 4303 4304 ath10k_scan_finish(ar); 4305 ath10k_peer_cleanup_all(ar); 4306 ath10k_core_stop(ar); 4307 ath10k_hif_power_down(ar); 4308 4309 spin_lock_bh(&ar->data_lock); 4310 list_for_each_entry(arvif, &ar->arvifs, list) 4311 ath10k_mac_vif_beacon_cleanup(arvif); 4312 spin_unlock_bh(&ar->data_lock); 4313 } 4314 4315 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4316 { 4317 struct ath10k *ar = hw->priv; 4318 4319 mutex_lock(&ar->conf_mutex); 4320 4321 *tx_ant = ar->cfg_tx_chainmask; 4322 *rx_ant = ar->cfg_rx_chainmask; 4323 4324 mutex_unlock(&ar->conf_mutex); 4325 4326 return 0; 4327 } 4328 4329 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4330 { 4331 /* It is not clear that allowing gaps in chainmask 4332 * is helpful. Probably it will not do what user 4333 * is hoping for, so warn in that case. 4334 */ 4335 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4336 return; 4337 4338 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4339 dbg, cm); 4340 } 4341 4342 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4343 { 4344 int nsts = ar->vht_cap_info; 4345 4346 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4347 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4348 4349 /* If firmware does not deliver to host number of space-time 4350 * streams supported, assume it support up to 4 BF STS and return 4351 * the value for VHT CAP: nsts-1) 4352 */ 4353 if (nsts == 0) 4354 return 3; 4355 4356 return nsts; 4357 } 4358 4359 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4360 { 4361 int sound_dim = ar->vht_cap_info; 4362 4363 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4364 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4365 4366 /* If the sounding dimension is not advertised by the firmware, 4367 * let's use a default value of 1 4368 */ 4369 if (sound_dim == 0) 4370 return 1; 4371 4372 return sound_dim; 4373 } 4374 4375 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4376 { 4377 struct ieee80211_sta_vht_cap vht_cap = {0}; 4378 struct ath10k_hw_params *hw = &ar->hw_params; 4379 u16 mcs_map; 4380 u32 val; 4381 int i; 4382 4383 vht_cap.vht_supported = 1; 4384 vht_cap.cap = ar->vht_cap_info; 4385 4386 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4387 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4388 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4389 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4390 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4391 4392 vht_cap.cap |= val; 4393 } 4394 4395 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4396 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4397 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4398 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4399 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4400 4401 vht_cap.cap |= val; 4402 } 4403 4404 /* Currently the firmware seems to be buggy, don't enable 80+80 4405 * mode until that's resolved. 4406 */ 4407 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4408 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4409 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4410 4411 mcs_map = 0; 4412 for (i = 0; i < 8; i++) { 4413 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4414 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4415 else 4416 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4417 } 4418 4419 if (ar->cfg_tx_chainmask <= 1) 4420 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4421 4422 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4423 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4424 4425 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4426 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4427 * user-space a clue if that is the case. 4428 */ 4429 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4430 (hw->vht160_mcs_rx_highest != 0 || 4431 hw->vht160_mcs_tx_highest != 0)) { 4432 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4433 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4434 } 4435 4436 return vht_cap; 4437 } 4438 4439 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4440 { 4441 int i; 4442 struct ieee80211_sta_ht_cap ht_cap = {0}; 4443 4444 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4445 return ht_cap; 4446 4447 ht_cap.ht_supported = 1; 4448 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4449 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4450 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4451 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4452 ht_cap.cap |= 4453 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4454 4455 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4456 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4457 4458 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4459 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4460 4461 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4462 u32 smps; 4463 4464 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4465 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4466 4467 ht_cap.cap |= smps; 4468 } 4469 4470 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4471 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4472 4473 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4474 u32 stbc; 4475 4476 stbc = ar->ht_cap_info; 4477 stbc &= WMI_HT_CAP_RX_STBC; 4478 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4479 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4480 stbc &= IEEE80211_HT_CAP_RX_STBC; 4481 4482 ht_cap.cap |= stbc; 4483 } 4484 4485 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4486 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4487 4488 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4489 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4490 4491 /* max AMSDU is implicitly taken from vht_cap_info */ 4492 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4493 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4494 4495 for (i = 0; i < ar->num_rf_chains; i++) { 4496 if (ar->cfg_rx_chainmask & BIT(i)) 4497 ht_cap.mcs.rx_mask[i] = 0xFF; 4498 } 4499 4500 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4501 4502 return ht_cap; 4503 } 4504 4505 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4506 { 4507 struct ieee80211_supported_band *band; 4508 struct ieee80211_sta_vht_cap vht_cap; 4509 struct ieee80211_sta_ht_cap ht_cap; 4510 4511 ht_cap = ath10k_get_ht_cap(ar); 4512 vht_cap = ath10k_create_vht_cap(ar); 4513 4514 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4515 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4516 band->ht_cap = ht_cap; 4517 } 4518 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4519 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4520 band->ht_cap = ht_cap; 4521 band->vht_cap = vht_cap; 4522 } 4523 } 4524 4525 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4526 { 4527 int ret; 4528 4529 lockdep_assert_held(&ar->conf_mutex); 4530 4531 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4532 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4533 4534 ar->cfg_tx_chainmask = tx_ant; 4535 ar->cfg_rx_chainmask = rx_ant; 4536 4537 if ((ar->state != ATH10K_STATE_ON) && 4538 (ar->state != ATH10K_STATE_RESTARTED)) 4539 return 0; 4540 4541 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4542 tx_ant); 4543 if (ret) { 4544 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4545 ret, tx_ant); 4546 return ret; 4547 } 4548 4549 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4550 rx_ant); 4551 if (ret) { 4552 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4553 ret, rx_ant); 4554 return ret; 4555 } 4556 4557 /* Reload HT/VHT capability */ 4558 ath10k_mac_setup_ht_vht_cap(ar); 4559 4560 return 0; 4561 } 4562 4563 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4564 { 4565 struct ath10k *ar = hw->priv; 4566 int ret; 4567 4568 mutex_lock(&ar->conf_mutex); 4569 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4570 mutex_unlock(&ar->conf_mutex); 4571 return ret; 4572 } 4573 4574 static int ath10k_start(struct ieee80211_hw *hw) 4575 { 4576 struct ath10k *ar = hw->priv; 4577 u32 param; 4578 int ret = 0; 4579 4580 /* 4581 * This makes sense only when restarting hw. It is harmless to call 4582 * unconditionally. This is necessary to make sure no HTT/WMI tx 4583 * commands will be submitted while restarting. 4584 */ 4585 ath10k_drain_tx(ar); 4586 4587 mutex_lock(&ar->conf_mutex); 4588 4589 switch (ar->state) { 4590 case ATH10K_STATE_OFF: 4591 ar->state = ATH10K_STATE_ON; 4592 break; 4593 case ATH10K_STATE_RESTARTING: 4594 ar->state = ATH10K_STATE_RESTARTED; 4595 break; 4596 case ATH10K_STATE_ON: 4597 case ATH10K_STATE_RESTARTED: 4598 case ATH10K_STATE_WEDGED: 4599 WARN_ON(1); 4600 ret = -EINVAL; 4601 goto err; 4602 case ATH10K_STATE_UTF: 4603 ret = -EBUSY; 4604 goto err; 4605 } 4606 4607 ret = ath10k_hif_power_up(ar); 4608 if (ret) { 4609 ath10k_err(ar, "Could not init hif: %d\n", ret); 4610 goto err_off; 4611 } 4612 4613 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4614 &ar->normal_mode_fw); 4615 if (ret) { 4616 ath10k_err(ar, "Could not init core: %d\n", ret); 4617 goto err_power_down; 4618 } 4619 4620 param = ar->wmi.pdev_param->pmf_qos; 4621 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4622 if (ret) { 4623 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4624 goto err_core_stop; 4625 } 4626 4627 param = ar->wmi.pdev_param->dynamic_bw; 4628 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4629 if (ret) { 4630 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4631 goto err_core_stop; 4632 } 4633 4634 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4635 ret = ath10k_wmi_adaptive_qcs(ar, true); 4636 if (ret) { 4637 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4638 ret); 4639 goto err_core_stop; 4640 } 4641 } 4642 4643 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4644 param = ar->wmi.pdev_param->burst_enable; 4645 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4646 if (ret) { 4647 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4648 goto err_core_stop; 4649 } 4650 } 4651 4652 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4653 4654 /* 4655 * By default FW set ARP frames ac to voice (6). In that case ARP 4656 * exchange is not working properly for UAPSD enabled AP. ARP requests 4657 * which arrives with access category 0 are processed by network stack 4658 * and send back with access category 0, but FW changes access category 4659 * to 6. Set ARP frames access category to best effort (0) solves 4660 * this problem. 4661 */ 4662 4663 param = ar->wmi.pdev_param->arp_ac_override; 4664 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4665 if (ret) { 4666 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4667 ret); 4668 goto err_core_stop; 4669 } 4670 4671 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4672 ar->running_fw->fw_file.fw_features)) { 4673 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4674 WMI_CCA_DETECT_LEVEL_AUTO, 4675 WMI_CCA_DETECT_MARGIN_AUTO); 4676 if (ret) { 4677 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4678 ret); 4679 goto err_core_stop; 4680 } 4681 } 4682 4683 param = ar->wmi.pdev_param->ani_enable; 4684 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4685 if (ret) { 4686 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4687 ret); 4688 goto err_core_stop; 4689 } 4690 4691 ar->ani_enabled = true; 4692 4693 if (ath10k_peer_stats_enabled(ar)) { 4694 param = ar->wmi.pdev_param->peer_stats_update_period; 4695 ret = ath10k_wmi_pdev_set_param(ar, param, 4696 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4697 if (ret) { 4698 ath10k_warn(ar, 4699 "failed to set peer stats period : %d\n", 4700 ret); 4701 goto err_core_stop; 4702 } 4703 } 4704 4705 param = ar->wmi.pdev_param->enable_btcoex; 4706 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4707 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4708 ar->running_fw->fw_file.fw_features)) { 4709 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4710 if (ret) { 4711 ath10k_warn(ar, 4712 "failed to set btcoex param: %d\n", ret); 4713 goto err_core_stop; 4714 } 4715 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4716 } 4717 4718 ar->num_started_vdevs = 0; 4719 ath10k_regd_update(ar); 4720 4721 ath10k_spectral_start(ar); 4722 ath10k_thermal_set_throttling(ar); 4723 4724 mutex_unlock(&ar->conf_mutex); 4725 return 0; 4726 4727 err_core_stop: 4728 ath10k_core_stop(ar); 4729 4730 err_power_down: 4731 ath10k_hif_power_down(ar); 4732 4733 err_off: 4734 ar->state = ATH10K_STATE_OFF; 4735 4736 err: 4737 mutex_unlock(&ar->conf_mutex); 4738 return ret; 4739 } 4740 4741 static void ath10k_stop(struct ieee80211_hw *hw) 4742 { 4743 struct ath10k *ar = hw->priv; 4744 4745 ath10k_drain_tx(ar); 4746 4747 mutex_lock(&ar->conf_mutex); 4748 if (ar->state != ATH10K_STATE_OFF) { 4749 ath10k_halt(ar); 4750 ar->state = ATH10K_STATE_OFF; 4751 } 4752 mutex_unlock(&ar->conf_mutex); 4753 4754 cancel_work_sync(&ar->set_coverage_class_work); 4755 cancel_delayed_work_sync(&ar->scan.timeout); 4756 cancel_work_sync(&ar->restart_work); 4757 } 4758 4759 static int ath10k_config_ps(struct ath10k *ar) 4760 { 4761 struct ath10k_vif *arvif; 4762 int ret = 0; 4763 4764 lockdep_assert_held(&ar->conf_mutex); 4765 4766 list_for_each_entry(arvif, &ar->arvifs, list) { 4767 ret = ath10k_mac_vif_setup_ps(arvif); 4768 if (ret) { 4769 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4770 break; 4771 } 4772 } 4773 4774 return ret; 4775 } 4776 4777 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4778 { 4779 int ret; 4780 u32 param; 4781 4782 lockdep_assert_held(&ar->conf_mutex); 4783 4784 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4785 4786 param = ar->wmi.pdev_param->txpower_limit2g; 4787 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4788 if (ret) { 4789 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4790 txpower, ret); 4791 return ret; 4792 } 4793 4794 param = ar->wmi.pdev_param->txpower_limit5g; 4795 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4796 if (ret) { 4797 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4798 txpower, ret); 4799 return ret; 4800 } 4801 4802 return 0; 4803 } 4804 4805 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4806 { 4807 struct ath10k_vif *arvif; 4808 int ret, txpower = -1; 4809 4810 lockdep_assert_held(&ar->conf_mutex); 4811 4812 list_for_each_entry(arvif, &ar->arvifs, list) { 4813 if (arvif->txpower <= 0) 4814 continue; 4815 4816 if (txpower == -1) 4817 txpower = arvif->txpower; 4818 else 4819 txpower = min(txpower, arvif->txpower); 4820 } 4821 4822 if (txpower == -1) 4823 return 0; 4824 4825 ret = ath10k_mac_txpower_setup(ar, txpower); 4826 if (ret) { 4827 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4828 txpower, ret); 4829 return ret; 4830 } 4831 4832 return 0; 4833 } 4834 4835 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4836 { 4837 struct ath10k *ar = hw->priv; 4838 struct ieee80211_conf *conf = &hw->conf; 4839 int ret = 0; 4840 4841 mutex_lock(&ar->conf_mutex); 4842 4843 if (changed & IEEE80211_CONF_CHANGE_PS) 4844 ath10k_config_ps(ar); 4845 4846 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4847 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4848 ret = ath10k_monitor_recalc(ar); 4849 if (ret) 4850 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4851 } 4852 4853 mutex_unlock(&ar->conf_mutex); 4854 return ret; 4855 } 4856 4857 static u32 get_nss_from_chainmask(u16 chain_mask) 4858 { 4859 if ((chain_mask & 0xf) == 0xf) 4860 return 4; 4861 else if ((chain_mask & 0x7) == 0x7) 4862 return 3; 4863 else if ((chain_mask & 0x3) == 0x3) 4864 return 2; 4865 return 1; 4866 } 4867 4868 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4869 { 4870 u32 value = 0; 4871 struct ath10k *ar = arvif->ar; 4872 int nsts; 4873 int sound_dim; 4874 4875 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4876 return 0; 4877 4878 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4879 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4880 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4881 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4882 4883 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4884 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4885 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4886 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4887 4888 if (!value) 4889 return 0; 4890 4891 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4892 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4893 4894 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4895 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4896 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4897 4898 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4899 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4900 4901 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4902 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4903 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4904 4905 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4906 ar->wmi.vdev_param->txbf, value); 4907 } 4908 4909 /* 4910 * TODO: 4911 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4912 * because we will send mgmt frames without CCK. This requirement 4913 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4914 * in the TX packet. 4915 */ 4916 static int ath10k_add_interface(struct ieee80211_hw *hw, 4917 struct ieee80211_vif *vif) 4918 { 4919 struct ath10k *ar = hw->priv; 4920 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4921 struct ath10k_peer *peer; 4922 enum wmi_sta_powersave_param param; 4923 int ret = 0; 4924 u32 value; 4925 int bit; 4926 int i; 4927 u32 vdev_param; 4928 4929 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4930 4931 mutex_lock(&ar->conf_mutex); 4932 4933 memset(arvif, 0, sizeof(*arvif)); 4934 ath10k_mac_txq_init(vif->txq); 4935 4936 arvif->ar = ar; 4937 arvif->vif = vif; 4938 4939 INIT_LIST_HEAD(&arvif->list); 4940 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4941 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4942 ath10k_mac_vif_sta_connection_loss_work); 4943 4944 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4945 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4946 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4947 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4948 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4949 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4950 } 4951 4952 if (ar->num_peers >= ar->max_num_peers) { 4953 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 4954 ret = -ENOBUFS; 4955 goto err; 4956 } 4957 4958 if (ar->free_vdev_map == 0) { 4959 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4960 ret = -EBUSY; 4961 goto err; 4962 } 4963 bit = __ffs64(ar->free_vdev_map); 4964 4965 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 4966 bit, ar->free_vdev_map); 4967 4968 arvif->vdev_id = bit; 4969 arvif->vdev_subtype = 4970 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 4971 4972 switch (vif->type) { 4973 case NL80211_IFTYPE_P2P_DEVICE: 4974 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4975 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4976 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 4977 break; 4978 case NL80211_IFTYPE_UNSPECIFIED: 4979 case NL80211_IFTYPE_STATION: 4980 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4981 if (vif->p2p) 4982 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4983 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 4984 break; 4985 case NL80211_IFTYPE_ADHOC: 4986 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4987 break; 4988 case NL80211_IFTYPE_MESH_POINT: 4989 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 4990 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4991 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 4992 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 4993 ret = -EINVAL; 4994 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 4995 goto err; 4996 } 4997 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4998 break; 4999 case NL80211_IFTYPE_AP: 5000 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5001 5002 if (vif->p2p) 5003 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5004 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5005 break; 5006 case NL80211_IFTYPE_MONITOR: 5007 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5008 break; 5009 default: 5010 WARN_ON(1); 5011 break; 5012 } 5013 5014 /* Using vdev_id as queue number will make it very easy to do per-vif 5015 * tx queue locking. This shouldn't wrap due to interface combinations 5016 * but do a modulo for correctness sake and prevent using offchannel tx 5017 * queues for regular vif tx. 5018 */ 5019 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5020 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5021 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5022 5023 /* Some firmware revisions don't wait for beacon tx completion before 5024 * sending another SWBA event. This could lead to hardware using old 5025 * (freed) beacon data in some cases, e.g. tx credit starvation 5026 * combined with missed TBTT. This is very very rare. 5027 * 5028 * On non-IOMMU-enabled hosts this could be a possible security issue 5029 * because hw could beacon some random data on the air. On 5030 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5031 * device would crash. 5032 * 5033 * Since there are no beacon tx completions (implicit nor explicit) 5034 * propagated to host the only workaround for this is to allocate a 5035 * DMA-coherent buffer for a lifetime of a vif and use it for all 5036 * beacon tx commands. Worst case for this approach is some beacons may 5037 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5038 */ 5039 if (vif->type == NL80211_IFTYPE_ADHOC || 5040 vif->type == NL80211_IFTYPE_MESH_POINT || 5041 vif->type == NL80211_IFTYPE_AP) { 5042 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5043 IEEE80211_MAX_FRAME_LEN, 5044 &arvif->beacon_paddr, 5045 GFP_ATOMIC); 5046 if (!arvif->beacon_buf) { 5047 ret = -ENOMEM; 5048 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5049 ret); 5050 goto err; 5051 } 5052 } 5053 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5054 arvif->nohwcrypt = true; 5055 5056 if (arvif->nohwcrypt && 5057 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5058 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5059 goto err; 5060 } 5061 5062 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5063 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5064 arvif->beacon_buf ? "single-buf" : "per-skb"); 5065 5066 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5067 arvif->vdev_subtype, vif->addr); 5068 if (ret) { 5069 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5070 arvif->vdev_id, ret); 5071 goto err; 5072 } 5073 5074 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5075 spin_lock_bh(&ar->data_lock); 5076 list_add(&arvif->list, &ar->arvifs); 5077 spin_unlock_bh(&ar->data_lock); 5078 5079 /* It makes no sense to have firmware do keepalives. mac80211 already 5080 * takes care of this with idle connection polling. 5081 */ 5082 ret = ath10k_mac_vif_disable_keepalive(arvif); 5083 if (ret) { 5084 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5085 arvif->vdev_id, ret); 5086 goto err_vdev_delete; 5087 } 5088 5089 arvif->def_wep_key_idx = -1; 5090 5091 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5092 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5093 ATH10K_HW_TXRX_NATIVE_WIFI); 5094 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5095 if (ret && ret != -EOPNOTSUPP) { 5096 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5097 arvif->vdev_id, ret); 5098 goto err_vdev_delete; 5099 } 5100 5101 /* Configuring number of spatial stream for monitor interface is causing 5102 * target assert in qca9888 and qca6174. 5103 */ 5104 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5105 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5106 5107 vdev_param = ar->wmi.vdev_param->nss; 5108 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5109 nss); 5110 if (ret) { 5111 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5112 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5113 ret); 5114 goto err_vdev_delete; 5115 } 5116 } 5117 5118 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5119 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5120 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5121 vif->addr, WMI_PEER_TYPE_DEFAULT); 5122 if (ret) { 5123 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5124 arvif->vdev_id, ret); 5125 goto err_vdev_delete; 5126 } 5127 5128 spin_lock_bh(&ar->data_lock); 5129 5130 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5131 if (!peer) { 5132 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5133 vif->addr, arvif->vdev_id); 5134 spin_unlock_bh(&ar->data_lock); 5135 ret = -ENOENT; 5136 goto err_peer_delete; 5137 } 5138 5139 arvif->peer_id = find_first_bit(peer->peer_ids, 5140 ATH10K_MAX_NUM_PEER_IDS); 5141 5142 spin_unlock_bh(&ar->data_lock); 5143 } else { 5144 arvif->peer_id = HTT_INVALID_PEERID; 5145 } 5146 5147 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5148 ret = ath10k_mac_set_kickout(arvif); 5149 if (ret) { 5150 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5151 arvif->vdev_id, ret); 5152 goto err_peer_delete; 5153 } 5154 } 5155 5156 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5157 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5158 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5159 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5160 param, value); 5161 if (ret) { 5162 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5163 arvif->vdev_id, ret); 5164 goto err_peer_delete; 5165 } 5166 5167 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5168 if (ret) { 5169 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5170 arvif->vdev_id, ret); 5171 goto err_peer_delete; 5172 } 5173 5174 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5175 if (ret) { 5176 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5177 arvif->vdev_id, ret); 5178 goto err_peer_delete; 5179 } 5180 } 5181 5182 ret = ath10k_mac_set_txbf_conf(arvif); 5183 if (ret) { 5184 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5185 arvif->vdev_id, ret); 5186 goto err_peer_delete; 5187 } 5188 5189 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5190 if (ret) { 5191 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5192 arvif->vdev_id, ret); 5193 goto err_peer_delete; 5194 } 5195 5196 arvif->txpower = vif->bss_conf.txpower; 5197 ret = ath10k_mac_txpower_recalc(ar); 5198 if (ret) { 5199 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5200 goto err_peer_delete; 5201 } 5202 5203 if (vif->type == NL80211_IFTYPE_MONITOR) { 5204 ar->monitor_arvif = arvif; 5205 ret = ath10k_monitor_recalc(ar); 5206 if (ret) { 5207 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5208 goto err_peer_delete; 5209 } 5210 } 5211 5212 spin_lock_bh(&ar->htt.tx_lock); 5213 if (!ar->tx_paused) 5214 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5215 spin_unlock_bh(&ar->htt.tx_lock); 5216 5217 mutex_unlock(&ar->conf_mutex); 5218 return 0; 5219 5220 err_peer_delete: 5221 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5222 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5223 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5224 5225 err_vdev_delete: 5226 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5227 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5228 spin_lock_bh(&ar->data_lock); 5229 list_del(&arvif->list); 5230 spin_unlock_bh(&ar->data_lock); 5231 5232 err: 5233 if (arvif->beacon_buf) { 5234 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5235 arvif->beacon_buf, arvif->beacon_paddr); 5236 arvif->beacon_buf = NULL; 5237 } 5238 5239 mutex_unlock(&ar->conf_mutex); 5240 5241 return ret; 5242 } 5243 5244 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5245 { 5246 int i; 5247 5248 for (i = 0; i < BITS_PER_LONG; i++) 5249 ath10k_mac_vif_tx_unlock(arvif, i); 5250 } 5251 5252 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5253 struct ieee80211_vif *vif) 5254 { 5255 struct ath10k *ar = hw->priv; 5256 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5257 struct ath10k_peer *peer; 5258 int ret; 5259 int i; 5260 5261 cancel_work_sync(&arvif->ap_csa_work); 5262 cancel_delayed_work_sync(&arvif->connection_loss_work); 5263 5264 mutex_lock(&ar->conf_mutex); 5265 5266 spin_lock_bh(&ar->data_lock); 5267 ath10k_mac_vif_beacon_cleanup(arvif); 5268 spin_unlock_bh(&ar->data_lock); 5269 5270 ret = ath10k_spectral_vif_stop(arvif); 5271 if (ret) 5272 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5273 arvif->vdev_id, ret); 5274 5275 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5276 spin_lock_bh(&ar->data_lock); 5277 list_del(&arvif->list); 5278 spin_unlock_bh(&ar->data_lock); 5279 5280 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5281 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5282 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5283 vif->addr); 5284 if (ret) 5285 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5286 arvif->vdev_id, ret); 5287 5288 kfree(arvif->u.ap.noa_data); 5289 } 5290 5291 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5292 arvif->vdev_id); 5293 5294 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5295 if (ret) 5296 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5297 arvif->vdev_id, ret); 5298 5299 /* Some firmware revisions don't notify host about self-peer removal 5300 * until after associated vdev is deleted. 5301 */ 5302 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5303 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5304 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5305 vif->addr); 5306 if (ret) 5307 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5308 arvif->vdev_id, ret); 5309 5310 spin_lock_bh(&ar->data_lock); 5311 ar->num_peers--; 5312 spin_unlock_bh(&ar->data_lock); 5313 } 5314 5315 spin_lock_bh(&ar->data_lock); 5316 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5317 peer = ar->peer_map[i]; 5318 if (!peer) 5319 continue; 5320 5321 if (peer->vif == vif) { 5322 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5323 vif->addr, arvif->vdev_id); 5324 peer->vif = NULL; 5325 } 5326 } 5327 spin_unlock_bh(&ar->data_lock); 5328 5329 ath10k_peer_cleanup(ar, arvif->vdev_id); 5330 ath10k_mac_txq_unref(ar, vif->txq); 5331 5332 if (vif->type == NL80211_IFTYPE_MONITOR) { 5333 ar->monitor_arvif = NULL; 5334 ret = ath10k_monitor_recalc(ar); 5335 if (ret) 5336 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5337 } 5338 5339 ret = ath10k_mac_txpower_recalc(ar); 5340 if (ret) 5341 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5342 5343 spin_lock_bh(&ar->htt.tx_lock); 5344 ath10k_mac_vif_tx_unlock_all(arvif); 5345 spin_unlock_bh(&ar->htt.tx_lock); 5346 5347 ath10k_mac_txq_unref(ar, vif->txq); 5348 5349 mutex_unlock(&ar->conf_mutex); 5350 } 5351 5352 /* 5353 * FIXME: Has to be verified. 5354 */ 5355 #define SUPPORTED_FILTERS \ 5356 (FIF_ALLMULTI | \ 5357 FIF_CONTROL | \ 5358 FIF_PSPOLL | \ 5359 FIF_OTHER_BSS | \ 5360 FIF_BCN_PRBRESP_PROMISC | \ 5361 FIF_PROBE_REQ | \ 5362 FIF_FCSFAIL) 5363 5364 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5365 unsigned int changed_flags, 5366 unsigned int *total_flags, 5367 u64 multicast) 5368 { 5369 struct ath10k *ar = hw->priv; 5370 int ret; 5371 5372 mutex_lock(&ar->conf_mutex); 5373 5374 changed_flags &= SUPPORTED_FILTERS; 5375 *total_flags &= SUPPORTED_FILTERS; 5376 ar->filter_flags = *total_flags; 5377 5378 ret = ath10k_monitor_recalc(ar); 5379 if (ret) 5380 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5381 5382 mutex_unlock(&ar->conf_mutex); 5383 } 5384 5385 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5386 struct ieee80211_vif *vif, 5387 struct ieee80211_bss_conf *info, 5388 u32 changed) 5389 { 5390 struct ath10k *ar = hw->priv; 5391 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5392 int ret = 0; 5393 u32 vdev_param, pdev_param, slottime, preamble; 5394 5395 mutex_lock(&ar->conf_mutex); 5396 5397 if (changed & BSS_CHANGED_IBSS) 5398 ath10k_control_ibss(arvif, info, vif->addr); 5399 5400 if (changed & BSS_CHANGED_BEACON_INT) { 5401 arvif->beacon_interval = info->beacon_int; 5402 vdev_param = ar->wmi.vdev_param->beacon_interval; 5403 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5404 arvif->beacon_interval); 5405 ath10k_dbg(ar, ATH10K_DBG_MAC, 5406 "mac vdev %d beacon_interval %d\n", 5407 arvif->vdev_id, arvif->beacon_interval); 5408 5409 if (ret) 5410 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5411 arvif->vdev_id, ret); 5412 } 5413 5414 if (changed & BSS_CHANGED_BEACON) { 5415 ath10k_dbg(ar, ATH10K_DBG_MAC, 5416 "vdev %d set beacon tx mode to staggered\n", 5417 arvif->vdev_id); 5418 5419 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5420 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5421 WMI_BEACON_STAGGERED_MODE); 5422 if (ret) 5423 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5424 arvif->vdev_id, ret); 5425 5426 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5427 if (ret) 5428 ath10k_warn(ar, "failed to update beacon template: %d\n", 5429 ret); 5430 5431 if (ieee80211_vif_is_mesh(vif)) { 5432 /* mesh doesn't use SSID but firmware needs it */ 5433 strncpy(arvif->u.ap.ssid, "mesh", 5434 sizeof(arvif->u.ap.ssid)); 5435 arvif->u.ap.ssid_len = 4; 5436 } 5437 } 5438 5439 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5440 ret = ath10k_mac_setup_prb_tmpl(arvif); 5441 if (ret) 5442 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5443 arvif->vdev_id, ret); 5444 } 5445 5446 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5447 arvif->dtim_period = info->dtim_period; 5448 5449 ath10k_dbg(ar, ATH10K_DBG_MAC, 5450 "mac vdev %d dtim_period %d\n", 5451 arvif->vdev_id, arvif->dtim_period); 5452 5453 vdev_param = ar->wmi.vdev_param->dtim_period; 5454 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5455 arvif->dtim_period); 5456 if (ret) 5457 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5458 arvif->vdev_id, ret); 5459 } 5460 5461 if (changed & BSS_CHANGED_SSID && 5462 vif->type == NL80211_IFTYPE_AP) { 5463 arvif->u.ap.ssid_len = info->ssid_len; 5464 if (info->ssid_len) 5465 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5466 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5467 } 5468 5469 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5470 ether_addr_copy(arvif->bssid, info->bssid); 5471 5472 if (changed & BSS_CHANGED_BEACON_ENABLED) 5473 ath10k_control_beaconing(arvif, info); 5474 5475 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5476 arvif->use_cts_prot = info->use_cts_prot; 5477 5478 ret = ath10k_recalc_rtscts_prot(arvif); 5479 if (ret) 5480 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5481 arvif->vdev_id, ret); 5482 5483 if (ath10k_mac_can_set_cts_prot(arvif)) { 5484 ret = ath10k_mac_set_cts_prot(arvif); 5485 if (ret) 5486 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5487 arvif->vdev_id, ret); 5488 } 5489 } 5490 5491 if (changed & BSS_CHANGED_ERP_SLOT) { 5492 if (info->use_short_slot) 5493 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5494 5495 else 5496 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5497 5498 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5499 arvif->vdev_id, slottime); 5500 5501 vdev_param = ar->wmi.vdev_param->slot_time; 5502 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5503 slottime); 5504 if (ret) 5505 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5506 arvif->vdev_id, ret); 5507 } 5508 5509 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5510 if (info->use_short_preamble) 5511 preamble = WMI_VDEV_PREAMBLE_SHORT; 5512 else 5513 preamble = WMI_VDEV_PREAMBLE_LONG; 5514 5515 ath10k_dbg(ar, ATH10K_DBG_MAC, 5516 "mac vdev %d preamble %dn", 5517 arvif->vdev_id, preamble); 5518 5519 vdev_param = ar->wmi.vdev_param->preamble; 5520 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5521 preamble); 5522 if (ret) 5523 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5524 arvif->vdev_id, ret); 5525 } 5526 5527 if (changed & BSS_CHANGED_ASSOC) { 5528 if (info->assoc) { 5529 /* Workaround: Make sure monitor vdev is not running 5530 * when associating to prevent some firmware revisions 5531 * (e.g. 10.1 and 10.2) from crashing. 5532 */ 5533 if (ar->monitor_started) 5534 ath10k_monitor_stop(ar); 5535 ath10k_bss_assoc(hw, vif, info); 5536 ath10k_monitor_recalc(ar); 5537 } else { 5538 ath10k_bss_disassoc(hw, vif); 5539 } 5540 } 5541 5542 if (changed & BSS_CHANGED_TXPOWER) { 5543 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5544 arvif->vdev_id, info->txpower); 5545 5546 arvif->txpower = info->txpower; 5547 ret = ath10k_mac_txpower_recalc(ar); 5548 if (ret) 5549 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5550 } 5551 5552 if (changed & BSS_CHANGED_PS) { 5553 arvif->ps = vif->bss_conf.ps; 5554 5555 ret = ath10k_config_ps(ar); 5556 if (ret) 5557 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5558 arvif->vdev_id, ret); 5559 } 5560 5561 mutex_unlock(&ar->conf_mutex); 5562 } 5563 5564 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5565 { 5566 struct ath10k *ar = hw->priv; 5567 5568 /* This function should never be called if setting the coverage class 5569 * is not supported on this hardware. 5570 */ 5571 if (!ar->hw_params.hw_ops->set_coverage_class) { 5572 WARN_ON_ONCE(1); 5573 return; 5574 } 5575 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5576 } 5577 5578 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5579 struct ieee80211_vif *vif, 5580 struct ieee80211_scan_request *hw_req) 5581 { 5582 struct ath10k *ar = hw->priv; 5583 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5584 struct cfg80211_scan_request *req = &hw_req->req; 5585 struct wmi_start_scan_arg arg; 5586 int ret = 0; 5587 int i; 5588 5589 mutex_lock(&ar->conf_mutex); 5590 5591 spin_lock_bh(&ar->data_lock); 5592 switch (ar->scan.state) { 5593 case ATH10K_SCAN_IDLE: 5594 reinit_completion(&ar->scan.started); 5595 reinit_completion(&ar->scan.completed); 5596 ar->scan.state = ATH10K_SCAN_STARTING; 5597 ar->scan.is_roc = false; 5598 ar->scan.vdev_id = arvif->vdev_id; 5599 ret = 0; 5600 break; 5601 case ATH10K_SCAN_STARTING: 5602 case ATH10K_SCAN_RUNNING: 5603 case ATH10K_SCAN_ABORTING: 5604 ret = -EBUSY; 5605 break; 5606 } 5607 spin_unlock_bh(&ar->data_lock); 5608 5609 if (ret) 5610 goto exit; 5611 5612 memset(&arg, 0, sizeof(arg)); 5613 ath10k_wmi_start_scan_init(ar, &arg); 5614 arg.vdev_id = arvif->vdev_id; 5615 arg.scan_id = ATH10K_SCAN_ID; 5616 5617 if (req->ie_len) { 5618 arg.ie_len = req->ie_len; 5619 memcpy(arg.ie, req->ie, arg.ie_len); 5620 } 5621 5622 if (req->n_ssids) { 5623 arg.n_ssids = req->n_ssids; 5624 for (i = 0; i < arg.n_ssids; i++) { 5625 arg.ssids[i].len = req->ssids[i].ssid_len; 5626 arg.ssids[i].ssid = req->ssids[i].ssid; 5627 } 5628 } else { 5629 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5630 } 5631 5632 if (req->n_channels) { 5633 arg.n_channels = req->n_channels; 5634 for (i = 0; i < arg.n_channels; i++) 5635 arg.channels[i] = req->channels[i]->center_freq; 5636 } 5637 5638 ret = ath10k_start_scan(ar, &arg); 5639 if (ret) { 5640 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5641 spin_lock_bh(&ar->data_lock); 5642 ar->scan.state = ATH10K_SCAN_IDLE; 5643 spin_unlock_bh(&ar->data_lock); 5644 } 5645 5646 /* Add a 200ms margin to account for event/command processing */ 5647 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5648 msecs_to_jiffies(arg.max_scan_time + 5649 200)); 5650 5651 exit: 5652 mutex_unlock(&ar->conf_mutex); 5653 return ret; 5654 } 5655 5656 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5657 struct ieee80211_vif *vif) 5658 { 5659 struct ath10k *ar = hw->priv; 5660 5661 mutex_lock(&ar->conf_mutex); 5662 ath10k_scan_abort(ar); 5663 mutex_unlock(&ar->conf_mutex); 5664 5665 cancel_delayed_work_sync(&ar->scan.timeout); 5666 } 5667 5668 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5669 struct ath10k_vif *arvif, 5670 enum set_key_cmd cmd, 5671 struct ieee80211_key_conf *key) 5672 { 5673 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5674 int ret; 5675 5676 /* 10.1 firmware branch requires default key index to be set to group 5677 * key index after installing it. Otherwise FW/HW Txes corrupted 5678 * frames with multi-vif APs. This is not required for main firmware 5679 * branch (e.g. 636). 5680 * 5681 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5682 * 5683 * FIXME: It remains unknown if this is required for multi-vif STA 5684 * interfaces on 10.1. 5685 */ 5686 5687 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5688 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5689 return; 5690 5691 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5692 return; 5693 5694 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5695 return; 5696 5697 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5698 return; 5699 5700 if (cmd != SET_KEY) 5701 return; 5702 5703 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5704 key->keyidx); 5705 if (ret) 5706 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5707 arvif->vdev_id, ret); 5708 } 5709 5710 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5711 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5712 struct ieee80211_key_conf *key) 5713 { 5714 struct ath10k *ar = hw->priv; 5715 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5716 struct ath10k_peer *peer; 5717 const u8 *peer_addr; 5718 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5719 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5720 int ret = 0; 5721 int ret2; 5722 u32 flags = 0; 5723 u32 flags2; 5724 5725 /* this one needs to be done in software */ 5726 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 5727 return 1; 5728 5729 if (arvif->nohwcrypt) 5730 return 1; 5731 5732 if (key->keyidx > WMI_MAX_KEY_INDEX) 5733 return -ENOSPC; 5734 5735 mutex_lock(&ar->conf_mutex); 5736 5737 if (sta) 5738 peer_addr = sta->addr; 5739 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5740 peer_addr = vif->bss_conf.bssid; 5741 else 5742 peer_addr = vif->addr; 5743 5744 key->hw_key_idx = key->keyidx; 5745 5746 if (is_wep) { 5747 if (cmd == SET_KEY) 5748 arvif->wep_keys[key->keyidx] = key; 5749 else 5750 arvif->wep_keys[key->keyidx] = NULL; 5751 } 5752 5753 /* the peer should not disappear in mid-way (unless FW goes awry) since 5754 * we already hold conf_mutex. we just make sure its there now. 5755 */ 5756 spin_lock_bh(&ar->data_lock); 5757 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5758 spin_unlock_bh(&ar->data_lock); 5759 5760 if (!peer) { 5761 if (cmd == SET_KEY) { 5762 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5763 peer_addr); 5764 ret = -EOPNOTSUPP; 5765 goto exit; 5766 } else { 5767 /* if the peer doesn't exist there is no key to disable anymore */ 5768 goto exit; 5769 } 5770 } 5771 5772 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5773 flags |= WMI_KEY_PAIRWISE; 5774 else 5775 flags |= WMI_KEY_GROUP; 5776 5777 if (is_wep) { 5778 if (cmd == DISABLE_KEY) 5779 ath10k_clear_vdev_key(arvif, key); 5780 5781 /* When WEP keys are uploaded it's possible that there are 5782 * stations associated already (e.g. when merging) without any 5783 * keys. Static WEP needs an explicit per-peer key upload. 5784 */ 5785 if (vif->type == NL80211_IFTYPE_ADHOC && 5786 cmd == SET_KEY) 5787 ath10k_mac_vif_update_wep_key(arvif, key); 5788 5789 /* 802.1x never sets the def_wep_key_idx so each set_key() 5790 * call changes default tx key. 5791 * 5792 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5793 * after first set_key(). 5794 */ 5795 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5796 flags |= WMI_KEY_TX_USAGE; 5797 } 5798 5799 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5800 if (ret) { 5801 WARN_ON(ret > 0); 5802 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5803 arvif->vdev_id, peer_addr, ret); 5804 goto exit; 5805 } 5806 5807 /* mac80211 sets static WEP keys as groupwise while firmware requires 5808 * them to be installed twice as both pairwise and groupwise. 5809 */ 5810 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5811 flags2 = flags; 5812 flags2 &= ~WMI_KEY_GROUP; 5813 flags2 |= WMI_KEY_PAIRWISE; 5814 5815 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5816 if (ret) { 5817 WARN_ON(ret > 0); 5818 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5819 arvif->vdev_id, peer_addr, ret); 5820 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5821 peer_addr, flags); 5822 if (ret2) { 5823 WARN_ON(ret2 > 0); 5824 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5825 arvif->vdev_id, peer_addr, ret2); 5826 } 5827 goto exit; 5828 } 5829 } 5830 5831 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5832 5833 spin_lock_bh(&ar->data_lock); 5834 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5835 if (peer && cmd == SET_KEY) 5836 peer->keys[key->keyidx] = key; 5837 else if (peer && cmd == DISABLE_KEY) 5838 peer->keys[key->keyidx] = NULL; 5839 else if (peer == NULL) 5840 /* impossible unless FW goes crazy */ 5841 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5842 spin_unlock_bh(&ar->data_lock); 5843 5844 exit: 5845 mutex_unlock(&ar->conf_mutex); 5846 return ret; 5847 } 5848 5849 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5850 struct ieee80211_vif *vif, 5851 int keyidx) 5852 { 5853 struct ath10k *ar = hw->priv; 5854 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5855 int ret; 5856 5857 mutex_lock(&arvif->ar->conf_mutex); 5858 5859 if (arvif->ar->state != ATH10K_STATE_ON) 5860 goto unlock; 5861 5862 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5863 arvif->vdev_id, keyidx); 5864 5865 ret = ath10k_wmi_vdev_set_param(arvif->ar, 5866 arvif->vdev_id, 5867 arvif->ar->wmi.vdev_param->def_keyid, 5868 keyidx); 5869 5870 if (ret) { 5871 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 5872 arvif->vdev_id, 5873 ret); 5874 goto unlock; 5875 } 5876 5877 arvif->def_wep_key_idx = keyidx; 5878 5879 unlock: 5880 mutex_unlock(&arvif->ar->conf_mutex); 5881 } 5882 5883 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 5884 { 5885 struct ath10k *ar; 5886 struct ath10k_vif *arvif; 5887 struct ath10k_sta *arsta; 5888 struct ieee80211_sta *sta; 5889 struct cfg80211_chan_def def; 5890 enum nl80211_band band; 5891 const u8 *ht_mcs_mask; 5892 const u16 *vht_mcs_mask; 5893 u32 changed, bw, nss, smps; 5894 int err; 5895 5896 arsta = container_of(wk, struct ath10k_sta, update_wk); 5897 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 5898 arvif = arsta->arvif; 5899 ar = arvif->ar; 5900 5901 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 5902 return; 5903 5904 band = def.chan->band; 5905 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 5906 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 5907 5908 spin_lock_bh(&ar->data_lock); 5909 5910 changed = arsta->changed; 5911 arsta->changed = 0; 5912 5913 bw = arsta->bw; 5914 nss = arsta->nss; 5915 smps = arsta->smps; 5916 5917 spin_unlock_bh(&ar->data_lock); 5918 5919 mutex_lock(&ar->conf_mutex); 5920 5921 nss = max_t(u32, 1, nss); 5922 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 5923 ath10k_mac_max_vht_nss(vht_mcs_mask))); 5924 5925 if (changed & IEEE80211_RC_BW_CHANGED) { 5926 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 5927 sta->addr, bw); 5928 5929 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5930 WMI_PEER_CHAN_WIDTH, bw); 5931 if (err) 5932 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 5933 sta->addr, bw, err); 5934 } 5935 5936 if (changed & IEEE80211_RC_NSS_CHANGED) { 5937 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 5938 sta->addr, nss); 5939 5940 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5941 WMI_PEER_NSS, nss); 5942 if (err) 5943 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 5944 sta->addr, nss, err); 5945 } 5946 5947 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5948 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 5949 sta->addr, smps); 5950 5951 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5952 WMI_PEER_SMPS_STATE, smps); 5953 if (err) 5954 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 5955 sta->addr, smps, err); 5956 } 5957 5958 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 5959 changed & IEEE80211_RC_NSS_CHANGED) { 5960 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", 5961 sta->addr); 5962 5963 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 5964 if (err) 5965 ath10k_warn(ar, "failed to reassociate station: %pM\n", 5966 sta->addr); 5967 } 5968 5969 mutex_unlock(&ar->conf_mutex); 5970 } 5971 5972 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 5973 struct ieee80211_sta *sta) 5974 { 5975 struct ath10k *ar = arvif->ar; 5976 5977 lockdep_assert_held(&ar->conf_mutex); 5978 5979 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5980 return 0; 5981 5982 if (ar->num_stations >= ar->max_num_stations) 5983 return -ENOBUFS; 5984 5985 ar->num_stations++; 5986 5987 return 0; 5988 } 5989 5990 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 5991 struct ieee80211_sta *sta) 5992 { 5993 struct ath10k *ar = arvif->ar; 5994 5995 lockdep_assert_held(&ar->conf_mutex); 5996 5997 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5998 return; 5999 6000 ar->num_stations--; 6001 } 6002 6003 struct ath10k_mac_tdls_iter_data { 6004 u32 num_tdls_stations; 6005 struct ieee80211_vif *curr_vif; 6006 }; 6007 6008 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 6009 struct ieee80211_sta *sta) 6010 { 6011 struct ath10k_mac_tdls_iter_data *iter_data = data; 6012 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6013 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 6014 6015 if (sta->tdls && sta_vif == iter_data->curr_vif) 6016 iter_data->num_tdls_stations++; 6017 } 6018 6019 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 6020 struct ieee80211_vif *vif) 6021 { 6022 struct ath10k_mac_tdls_iter_data data = {}; 6023 6024 data.curr_vif = vif; 6025 6026 ieee80211_iterate_stations_atomic(hw, 6027 ath10k_mac_tdls_vif_stations_count_iter, 6028 &data); 6029 return data.num_tdls_stations; 6030 } 6031 6032 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 6033 struct ieee80211_vif *vif) 6034 { 6035 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6036 int *num_tdls_vifs = data; 6037 6038 if (vif->type != NL80211_IFTYPE_STATION) 6039 return; 6040 6041 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 6042 (*num_tdls_vifs)++; 6043 } 6044 6045 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 6046 { 6047 int num_tdls_vifs = 0; 6048 6049 ieee80211_iterate_active_interfaces_atomic(hw, 6050 IEEE80211_IFACE_ITER_NORMAL, 6051 ath10k_mac_tdls_vifs_count_iter, 6052 &num_tdls_vifs); 6053 return num_tdls_vifs; 6054 } 6055 6056 static int ath10k_sta_state(struct ieee80211_hw *hw, 6057 struct ieee80211_vif *vif, 6058 struct ieee80211_sta *sta, 6059 enum ieee80211_sta_state old_state, 6060 enum ieee80211_sta_state new_state) 6061 { 6062 struct ath10k *ar = hw->priv; 6063 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6064 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6065 struct ath10k_peer *peer; 6066 int ret = 0; 6067 int i; 6068 6069 if (old_state == IEEE80211_STA_NOTEXIST && 6070 new_state == IEEE80211_STA_NONE) { 6071 memset(arsta, 0, sizeof(*arsta)); 6072 arsta->arvif = arvif; 6073 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6074 6075 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6076 ath10k_mac_txq_init(sta->txq[i]); 6077 } 6078 6079 /* cancel must be done outside the mutex to avoid deadlock */ 6080 if ((old_state == IEEE80211_STA_NONE && 6081 new_state == IEEE80211_STA_NOTEXIST)) 6082 cancel_work_sync(&arsta->update_wk); 6083 6084 mutex_lock(&ar->conf_mutex); 6085 6086 if (old_state == IEEE80211_STA_NOTEXIST && 6087 new_state == IEEE80211_STA_NONE) { 6088 /* 6089 * New station addition. 6090 */ 6091 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6092 u32 num_tdls_stations; 6093 u32 num_tdls_vifs; 6094 6095 ath10k_dbg(ar, ATH10K_DBG_MAC, 6096 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6097 arvif->vdev_id, sta->addr, 6098 ar->num_stations + 1, ar->max_num_stations, 6099 ar->num_peers + 1, ar->max_num_peers); 6100 6101 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6102 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6103 6104 if (sta->tdls) { 6105 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6106 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6107 arvif->vdev_id, 6108 ar->max_num_tdls_vdevs); 6109 ret = -ELNRNG; 6110 goto exit; 6111 } 6112 peer_type = WMI_PEER_TYPE_TDLS; 6113 } 6114 6115 ret = ath10k_mac_inc_num_stations(arvif, sta); 6116 if (ret) { 6117 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6118 ar->max_num_stations); 6119 goto exit; 6120 } 6121 6122 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6123 sta->addr, peer_type); 6124 if (ret) { 6125 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6126 sta->addr, arvif->vdev_id, ret); 6127 ath10k_mac_dec_num_stations(arvif, sta); 6128 goto exit; 6129 } 6130 6131 spin_lock_bh(&ar->data_lock); 6132 6133 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6134 if (!peer) { 6135 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6136 vif->addr, arvif->vdev_id); 6137 spin_unlock_bh(&ar->data_lock); 6138 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6139 ath10k_mac_dec_num_stations(arvif, sta); 6140 ret = -ENOENT; 6141 goto exit; 6142 } 6143 6144 arsta->peer_id = find_first_bit(peer->peer_ids, 6145 ATH10K_MAX_NUM_PEER_IDS); 6146 6147 spin_unlock_bh(&ar->data_lock); 6148 6149 if (!sta->tdls) 6150 goto exit; 6151 6152 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6153 WMI_TDLS_ENABLE_ACTIVE); 6154 if (ret) { 6155 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6156 arvif->vdev_id, ret); 6157 ath10k_peer_delete(ar, arvif->vdev_id, 6158 sta->addr); 6159 ath10k_mac_dec_num_stations(arvif, sta); 6160 goto exit; 6161 } 6162 6163 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6164 WMI_TDLS_PEER_STATE_PEERING); 6165 if (ret) { 6166 ath10k_warn(ar, 6167 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6168 sta->addr, arvif->vdev_id, ret); 6169 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6170 ath10k_mac_dec_num_stations(arvif, sta); 6171 6172 if (num_tdls_stations != 0) 6173 goto exit; 6174 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6175 WMI_TDLS_DISABLE); 6176 } 6177 } else if ((old_state == IEEE80211_STA_NONE && 6178 new_state == IEEE80211_STA_NOTEXIST)) { 6179 /* 6180 * Existing station deletion. 6181 */ 6182 ath10k_dbg(ar, ATH10K_DBG_MAC, 6183 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6184 arvif->vdev_id, sta->addr, sta); 6185 6186 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6187 if (ret) 6188 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6189 sta->addr, arvif->vdev_id, ret); 6190 6191 ath10k_mac_dec_num_stations(arvif, sta); 6192 6193 spin_lock_bh(&ar->data_lock); 6194 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6195 peer = ar->peer_map[i]; 6196 if (!peer) 6197 continue; 6198 6199 if (peer->sta == sta) { 6200 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6201 sta->addr, peer, i, arvif->vdev_id); 6202 peer->sta = NULL; 6203 6204 /* Clean up the peer object as well since we 6205 * must have failed to do this above. 6206 */ 6207 list_del(&peer->list); 6208 ar->peer_map[i] = NULL; 6209 kfree(peer); 6210 ar->num_peers--; 6211 } 6212 } 6213 spin_unlock_bh(&ar->data_lock); 6214 6215 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6216 ath10k_mac_txq_unref(ar, sta->txq[i]); 6217 6218 if (!sta->tdls) 6219 goto exit; 6220 6221 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6222 goto exit; 6223 6224 /* This was the last tdls peer in current vif */ 6225 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6226 WMI_TDLS_DISABLE); 6227 if (ret) { 6228 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6229 arvif->vdev_id, ret); 6230 } 6231 } else if (old_state == IEEE80211_STA_AUTH && 6232 new_state == IEEE80211_STA_ASSOC && 6233 (vif->type == NL80211_IFTYPE_AP || 6234 vif->type == NL80211_IFTYPE_MESH_POINT || 6235 vif->type == NL80211_IFTYPE_ADHOC)) { 6236 /* 6237 * New association. 6238 */ 6239 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6240 sta->addr); 6241 6242 ret = ath10k_station_assoc(ar, vif, sta, false); 6243 if (ret) 6244 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6245 sta->addr, arvif->vdev_id, ret); 6246 } else if (old_state == IEEE80211_STA_ASSOC && 6247 new_state == IEEE80211_STA_AUTHORIZED && 6248 sta->tdls) { 6249 /* 6250 * Tdls station authorized. 6251 */ 6252 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6253 sta->addr); 6254 6255 ret = ath10k_station_assoc(ar, vif, sta, false); 6256 if (ret) { 6257 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6258 sta->addr, arvif->vdev_id, ret); 6259 goto exit; 6260 } 6261 6262 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6263 WMI_TDLS_PEER_STATE_CONNECTED); 6264 if (ret) 6265 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6266 sta->addr, arvif->vdev_id, ret); 6267 } else if (old_state == IEEE80211_STA_ASSOC && 6268 new_state == IEEE80211_STA_AUTH && 6269 (vif->type == NL80211_IFTYPE_AP || 6270 vif->type == NL80211_IFTYPE_MESH_POINT || 6271 vif->type == NL80211_IFTYPE_ADHOC)) { 6272 /* 6273 * Disassociation. 6274 */ 6275 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6276 sta->addr); 6277 6278 ret = ath10k_station_disassoc(ar, vif, sta); 6279 if (ret) 6280 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6281 sta->addr, arvif->vdev_id, ret); 6282 } 6283 exit: 6284 mutex_unlock(&ar->conf_mutex); 6285 return ret; 6286 } 6287 6288 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6289 u16 ac, bool enable) 6290 { 6291 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6292 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6293 u32 prio = 0, acc = 0; 6294 u32 value = 0; 6295 int ret = 0; 6296 6297 lockdep_assert_held(&ar->conf_mutex); 6298 6299 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6300 return 0; 6301 6302 switch (ac) { 6303 case IEEE80211_AC_VO: 6304 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6305 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6306 prio = 7; 6307 acc = 3; 6308 break; 6309 case IEEE80211_AC_VI: 6310 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6311 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6312 prio = 5; 6313 acc = 2; 6314 break; 6315 case IEEE80211_AC_BE: 6316 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6317 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6318 prio = 2; 6319 acc = 1; 6320 break; 6321 case IEEE80211_AC_BK: 6322 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6323 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6324 prio = 0; 6325 acc = 0; 6326 break; 6327 } 6328 6329 if (enable) 6330 arvif->u.sta.uapsd |= value; 6331 else 6332 arvif->u.sta.uapsd &= ~value; 6333 6334 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6335 WMI_STA_PS_PARAM_UAPSD, 6336 arvif->u.sta.uapsd); 6337 if (ret) { 6338 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6339 goto exit; 6340 } 6341 6342 if (arvif->u.sta.uapsd) 6343 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6344 else 6345 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6346 6347 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6348 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6349 value); 6350 if (ret) 6351 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6352 6353 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6354 if (ret) { 6355 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6356 arvif->vdev_id, ret); 6357 return ret; 6358 } 6359 6360 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6361 if (ret) { 6362 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6363 arvif->vdev_id, ret); 6364 return ret; 6365 } 6366 6367 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6368 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6369 /* Only userspace can make an educated decision when to send 6370 * trigger frame. The following effectively disables u-UAPSD 6371 * autotrigger in firmware (which is enabled by default 6372 * provided the autotrigger service is available). 6373 */ 6374 6375 arg.wmm_ac = acc; 6376 arg.user_priority = prio; 6377 arg.service_interval = 0; 6378 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6379 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6380 6381 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6382 arvif->bssid, &arg, 1); 6383 if (ret) { 6384 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6385 ret); 6386 return ret; 6387 } 6388 } 6389 6390 exit: 6391 return ret; 6392 } 6393 6394 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6395 struct ieee80211_vif *vif, u16 ac, 6396 const struct ieee80211_tx_queue_params *params) 6397 { 6398 struct ath10k *ar = hw->priv; 6399 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6400 struct wmi_wmm_params_arg *p = NULL; 6401 int ret; 6402 6403 mutex_lock(&ar->conf_mutex); 6404 6405 switch (ac) { 6406 case IEEE80211_AC_VO: 6407 p = &arvif->wmm_params.ac_vo; 6408 break; 6409 case IEEE80211_AC_VI: 6410 p = &arvif->wmm_params.ac_vi; 6411 break; 6412 case IEEE80211_AC_BE: 6413 p = &arvif->wmm_params.ac_be; 6414 break; 6415 case IEEE80211_AC_BK: 6416 p = &arvif->wmm_params.ac_bk; 6417 break; 6418 } 6419 6420 if (WARN_ON(!p)) { 6421 ret = -EINVAL; 6422 goto exit; 6423 } 6424 6425 p->cwmin = params->cw_min; 6426 p->cwmax = params->cw_max; 6427 p->aifs = params->aifs; 6428 6429 /* 6430 * The channel time duration programmed in the HW is in absolute 6431 * microseconds, while mac80211 gives the txop in units of 6432 * 32 microseconds. 6433 */ 6434 p->txop = params->txop * 32; 6435 6436 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6437 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6438 &arvif->wmm_params); 6439 if (ret) { 6440 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6441 arvif->vdev_id, ret); 6442 goto exit; 6443 } 6444 } else { 6445 /* This won't work well with multi-interface cases but it's 6446 * better than nothing. 6447 */ 6448 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6449 if (ret) { 6450 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6451 goto exit; 6452 } 6453 } 6454 6455 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6456 if (ret) 6457 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6458 6459 exit: 6460 mutex_unlock(&ar->conf_mutex); 6461 return ret; 6462 } 6463 6464 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6465 6466 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6467 struct ieee80211_vif *vif, 6468 struct ieee80211_channel *chan, 6469 int duration, 6470 enum ieee80211_roc_type type) 6471 { 6472 struct ath10k *ar = hw->priv; 6473 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6474 struct wmi_start_scan_arg arg; 6475 int ret = 0; 6476 u32 scan_time_msec; 6477 6478 mutex_lock(&ar->conf_mutex); 6479 6480 spin_lock_bh(&ar->data_lock); 6481 switch (ar->scan.state) { 6482 case ATH10K_SCAN_IDLE: 6483 reinit_completion(&ar->scan.started); 6484 reinit_completion(&ar->scan.completed); 6485 reinit_completion(&ar->scan.on_channel); 6486 ar->scan.state = ATH10K_SCAN_STARTING; 6487 ar->scan.is_roc = true; 6488 ar->scan.vdev_id = arvif->vdev_id; 6489 ar->scan.roc_freq = chan->center_freq; 6490 ar->scan.roc_notify = true; 6491 ret = 0; 6492 break; 6493 case ATH10K_SCAN_STARTING: 6494 case ATH10K_SCAN_RUNNING: 6495 case ATH10K_SCAN_ABORTING: 6496 ret = -EBUSY; 6497 break; 6498 } 6499 spin_unlock_bh(&ar->data_lock); 6500 6501 if (ret) 6502 goto exit; 6503 6504 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6505 6506 memset(&arg, 0, sizeof(arg)); 6507 ath10k_wmi_start_scan_init(ar, &arg); 6508 arg.vdev_id = arvif->vdev_id; 6509 arg.scan_id = ATH10K_SCAN_ID; 6510 arg.n_channels = 1; 6511 arg.channels[0] = chan->center_freq; 6512 arg.dwell_time_active = scan_time_msec; 6513 arg.dwell_time_passive = scan_time_msec; 6514 arg.max_scan_time = scan_time_msec; 6515 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6516 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6517 arg.burst_duration_ms = duration; 6518 6519 ret = ath10k_start_scan(ar, &arg); 6520 if (ret) { 6521 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6522 spin_lock_bh(&ar->data_lock); 6523 ar->scan.state = ATH10K_SCAN_IDLE; 6524 spin_unlock_bh(&ar->data_lock); 6525 goto exit; 6526 } 6527 6528 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6529 if (ret == 0) { 6530 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6531 6532 ret = ath10k_scan_stop(ar); 6533 if (ret) 6534 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6535 6536 ret = -ETIMEDOUT; 6537 goto exit; 6538 } 6539 6540 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6541 msecs_to_jiffies(duration)); 6542 6543 ret = 0; 6544 exit: 6545 mutex_unlock(&ar->conf_mutex); 6546 return ret; 6547 } 6548 6549 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6550 { 6551 struct ath10k *ar = hw->priv; 6552 6553 mutex_lock(&ar->conf_mutex); 6554 6555 spin_lock_bh(&ar->data_lock); 6556 ar->scan.roc_notify = false; 6557 spin_unlock_bh(&ar->data_lock); 6558 6559 ath10k_scan_abort(ar); 6560 6561 mutex_unlock(&ar->conf_mutex); 6562 6563 cancel_delayed_work_sync(&ar->scan.timeout); 6564 6565 return 0; 6566 } 6567 6568 /* 6569 * Both RTS and Fragmentation threshold are interface-specific 6570 * in ath10k, but device-specific in mac80211. 6571 */ 6572 6573 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6574 { 6575 struct ath10k *ar = hw->priv; 6576 struct ath10k_vif *arvif; 6577 int ret = 0; 6578 6579 mutex_lock(&ar->conf_mutex); 6580 list_for_each_entry(arvif, &ar->arvifs, list) { 6581 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6582 arvif->vdev_id, value); 6583 6584 ret = ath10k_mac_set_rts(arvif, value); 6585 if (ret) { 6586 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6587 arvif->vdev_id, ret); 6588 break; 6589 } 6590 } 6591 mutex_unlock(&ar->conf_mutex); 6592 6593 return ret; 6594 } 6595 6596 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6597 { 6598 /* Even though there's a WMI enum for fragmentation threshold no known 6599 * firmware actually implements it. Moreover it is not possible to rely 6600 * frame fragmentation to mac80211 because firmware clears the "more 6601 * fragments" bit in frame control making it impossible for remote 6602 * devices to reassemble frames. 6603 * 6604 * Hence implement a dummy callback just to say fragmentation isn't 6605 * supported. This effectively prevents mac80211 from doing frame 6606 * fragmentation in software. 6607 */ 6608 return -EOPNOTSUPP; 6609 } 6610 6611 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6612 u32 queues, bool drop) 6613 { 6614 struct ath10k *ar = hw->priv; 6615 bool skip; 6616 long time_left; 6617 6618 /* mac80211 doesn't care if we really xmit queued frames or not 6619 * we'll collect those frames either way if we stop/delete vdevs 6620 */ 6621 if (drop) 6622 return; 6623 6624 mutex_lock(&ar->conf_mutex); 6625 6626 if (ar->state == ATH10K_STATE_WEDGED) 6627 goto skip; 6628 6629 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6630 bool empty; 6631 6632 spin_lock_bh(&ar->htt.tx_lock); 6633 empty = (ar->htt.num_pending_tx == 0); 6634 spin_unlock_bh(&ar->htt.tx_lock); 6635 6636 skip = (ar->state == ATH10K_STATE_WEDGED) || 6637 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6638 &ar->dev_flags); 6639 6640 (empty || skip); 6641 }), ATH10K_FLUSH_TIMEOUT_HZ); 6642 6643 if (time_left == 0 || skip) 6644 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6645 skip, ar->state, time_left); 6646 6647 skip: 6648 mutex_unlock(&ar->conf_mutex); 6649 } 6650 6651 /* TODO: Implement this function properly 6652 * For now it is needed to reply to Probe Requests in IBSS mode. 6653 * Propably we need this information from FW. 6654 */ 6655 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6656 { 6657 return 1; 6658 } 6659 6660 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6661 enum ieee80211_reconfig_type reconfig_type) 6662 { 6663 struct ath10k *ar = hw->priv; 6664 6665 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6666 return; 6667 6668 mutex_lock(&ar->conf_mutex); 6669 6670 /* If device failed to restart it will be in a different state, e.g. 6671 * ATH10K_STATE_WEDGED 6672 */ 6673 if (ar->state == ATH10K_STATE_RESTARTED) { 6674 ath10k_info(ar, "device successfully recovered\n"); 6675 ar->state = ATH10K_STATE_ON; 6676 ieee80211_wake_queues(ar->hw); 6677 } 6678 6679 mutex_unlock(&ar->conf_mutex); 6680 } 6681 6682 static void 6683 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6684 struct ieee80211_channel *channel) 6685 { 6686 int ret; 6687 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6688 6689 lockdep_assert_held(&ar->conf_mutex); 6690 6691 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6692 (ar->rx_channel != channel)) 6693 return; 6694 6695 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6696 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6697 return; 6698 } 6699 6700 reinit_completion(&ar->bss_survey_done); 6701 6702 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6703 if (ret) { 6704 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6705 return; 6706 } 6707 6708 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6709 if (!ret) { 6710 ath10k_warn(ar, "bss channel survey timed out\n"); 6711 return; 6712 } 6713 } 6714 6715 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6716 struct survey_info *survey) 6717 { 6718 struct ath10k *ar = hw->priv; 6719 struct ieee80211_supported_band *sband; 6720 struct survey_info *ar_survey = &ar->survey[idx]; 6721 int ret = 0; 6722 6723 mutex_lock(&ar->conf_mutex); 6724 6725 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6726 if (sband && idx >= sband->n_channels) { 6727 idx -= sband->n_channels; 6728 sband = NULL; 6729 } 6730 6731 if (!sband) 6732 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6733 6734 if (!sband || idx >= sband->n_channels) { 6735 ret = -ENOENT; 6736 goto exit; 6737 } 6738 6739 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6740 6741 spin_lock_bh(&ar->data_lock); 6742 memcpy(survey, ar_survey, sizeof(*survey)); 6743 spin_unlock_bh(&ar->data_lock); 6744 6745 survey->channel = &sband->channels[idx]; 6746 6747 if (ar->rx_channel == survey->channel) 6748 survey->filled |= SURVEY_INFO_IN_USE; 6749 6750 exit: 6751 mutex_unlock(&ar->conf_mutex); 6752 return ret; 6753 } 6754 6755 static bool 6756 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6757 enum nl80211_band band, 6758 const struct cfg80211_bitrate_mask *mask) 6759 { 6760 int num_rates = 0; 6761 int i; 6762 6763 num_rates += hweight32(mask->control[band].legacy); 6764 6765 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6766 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6767 6768 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6769 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6770 6771 return num_rates == 1; 6772 } 6773 6774 static bool 6775 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6776 enum nl80211_band band, 6777 const struct cfg80211_bitrate_mask *mask, 6778 int *nss) 6779 { 6780 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6781 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6782 u8 ht_nss_mask = 0; 6783 u8 vht_nss_mask = 0; 6784 int i; 6785 6786 if (mask->control[band].legacy) 6787 return false; 6788 6789 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6790 if (mask->control[band].ht_mcs[i] == 0) 6791 continue; 6792 else if (mask->control[band].ht_mcs[i] == 6793 sband->ht_cap.mcs.rx_mask[i]) 6794 ht_nss_mask |= BIT(i); 6795 else 6796 return false; 6797 } 6798 6799 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6800 if (mask->control[band].vht_mcs[i] == 0) 6801 continue; 6802 else if (mask->control[band].vht_mcs[i] == 6803 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6804 vht_nss_mask |= BIT(i); 6805 else 6806 return false; 6807 } 6808 6809 if (ht_nss_mask != vht_nss_mask) 6810 return false; 6811 6812 if (ht_nss_mask == 0) 6813 return false; 6814 6815 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6816 return false; 6817 6818 *nss = fls(ht_nss_mask); 6819 6820 return true; 6821 } 6822 6823 static int 6824 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6825 enum nl80211_band band, 6826 const struct cfg80211_bitrate_mask *mask, 6827 u8 *rate, u8 *nss) 6828 { 6829 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6830 int rate_idx; 6831 int i; 6832 u16 bitrate; 6833 u8 preamble; 6834 u8 hw_rate; 6835 6836 if (hweight32(mask->control[band].legacy) == 1) { 6837 rate_idx = ffs(mask->control[band].legacy) - 1; 6838 6839 hw_rate = sband->bitrates[rate_idx].hw_value; 6840 bitrate = sband->bitrates[rate_idx].bitrate; 6841 6842 if (ath10k_mac_bitrate_is_cck(bitrate)) 6843 preamble = WMI_RATE_PREAMBLE_CCK; 6844 else 6845 preamble = WMI_RATE_PREAMBLE_OFDM; 6846 6847 *nss = 1; 6848 *rate = preamble << 6 | 6849 (*nss - 1) << 4 | 6850 hw_rate << 0; 6851 6852 return 0; 6853 } 6854 6855 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6856 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6857 *nss = i + 1; 6858 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6859 (*nss - 1) << 4 | 6860 (ffs(mask->control[band].ht_mcs[i]) - 1); 6861 6862 return 0; 6863 } 6864 } 6865 6866 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6867 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6868 *nss = i + 1; 6869 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6870 (*nss - 1) << 4 | 6871 (ffs(mask->control[band].vht_mcs[i]) - 1); 6872 6873 return 0; 6874 } 6875 } 6876 6877 return -EINVAL; 6878 } 6879 6880 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6881 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6882 { 6883 struct ath10k *ar = arvif->ar; 6884 u32 vdev_param; 6885 int ret; 6886 6887 lockdep_assert_held(&ar->conf_mutex); 6888 6889 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6890 arvif->vdev_id, rate, nss, sgi); 6891 6892 vdev_param = ar->wmi.vdev_param->fixed_rate; 6893 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6894 if (ret) { 6895 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6896 rate, ret); 6897 return ret; 6898 } 6899 6900 vdev_param = ar->wmi.vdev_param->nss; 6901 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6902 if (ret) { 6903 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 6904 return ret; 6905 } 6906 6907 vdev_param = ar->wmi.vdev_param->sgi; 6908 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 6909 if (ret) { 6910 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 6911 return ret; 6912 } 6913 6914 vdev_param = ar->wmi.vdev_param->ldpc; 6915 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 6916 if (ret) { 6917 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 6918 return ret; 6919 } 6920 6921 return 0; 6922 } 6923 6924 static bool 6925 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6926 enum nl80211_band band, 6927 const struct cfg80211_bitrate_mask *mask) 6928 { 6929 int i; 6930 u16 vht_mcs; 6931 6932 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 6933 * to express all VHT MCS rate masks. Effectively only the following 6934 * ranges can be used: none, 0-7, 0-8 and 0-9. 6935 */ 6936 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 6937 vht_mcs = mask->control[band].vht_mcs[i]; 6938 6939 switch (vht_mcs) { 6940 case 0: 6941 case BIT(8) - 1: 6942 case BIT(9) - 1: 6943 case BIT(10) - 1: 6944 break; 6945 default: 6946 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 6947 return false; 6948 } 6949 } 6950 6951 return true; 6952 } 6953 6954 static void ath10k_mac_set_bitrate_mask_iter(void *data, 6955 struct ieee80211_sta *sta) 6956 { 6957 struct ath10k_vif *arvif = data; 6958 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6959 struct ath10k *ar = arvif->ar; 6960 6961 if (arsta->arvif != arvif) 6962 return; 6963 6964 spin_lock_bh(&ar->data_lock); 6965 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6966 spin_unlock_bh(&ar->data_lock); 6967 6968 ieee80211_queue_work(ar->hw, &arsta->update_wk); 6969 } 6970 6971 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 6972 struct ieee80211_vif *vif, 6973 const struct cfg80211_bitrate_mask *mask) 6974 { 6975 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6976 struct cfg80211_chan_def def; 6977 struct ath10k *ar = arvif->ar; 6978 enum nl80211_band band; 6979 const u8 *ht_mcs_mask; 6980 const u16 *vht_mcs_mask; 6981 u8 rate; 6982 u8 nss; 6983 u8 sgi; 6984 u8 ldpc; 6985 int single_nss; 6986 int ret; 6987 6988 if (ath10k_mac_vif_chan(vif, &def)) 6989 return -EPERM; 6990 6991 band = def.chan->band; 6992 ht_mcs_mask = mask->control[band].ht_mcs; 6993 vht_mcs_mask = mask->control[band].vht_mcs; 6994 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 6995 6996 sgi = mask->control[band].gi; 6997 if (sgi == NL80211_TXRATE_FORCE_LGI) 6998 return -EINVAL; 6999 7000 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7001 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7002 &rate, &nss); 7003 if (ret) { 7004 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7005 arvif->vdev_id, ret); 7006 return ret; 7007 } 7008 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7009 &single_nss)) { 7010 rate = WMI_FIXED_RATE_NONE; 7011 nss = single_nss; 7012 } else { 7013 rate = WMI_FIXED_RATE_NONE; 7014 nss = min(ar->num_rf_chains, 7015 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7016 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7017 7018 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7019 return -EINVAL; 7020 7021 mutex_lock(&ar->conf_mutex); 7022 7023 arvif->bitrate_mask = *mask; 7024 ieee80211_iterate_stations_atomic(ar->hw, 7025 ath10k_mac_set_bitrate_mask_iter, 7026 arvif); 7027 7028 mutex_unlock(&ar->conf_mutex); 7029 } 7030 7031 mutex_lock(&ar->conf_mutex); 7032 7033 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7034 if (ret) { 7035 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7036 arvif->vdev_id, ret); 7037 goto exit; 7038 } 7039 7040 exit: 7041 mutex_unlock(&ar->conf_mutex); 7042 7043 return ret; 7044 } 7045 7046 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7047 struct ieee80211_vif *vif, 7048 struct ieee80211_sta *sta, 7049 u32 changed) 7050 { 7051 struct ath10k *ar = hw->priv; 7052 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7053 u32 bw, smps; 7054 7055 spin_lock_bh(&ar->data_lock); 7056 7057 ath10k_dbg(ar, ATH10K_DBG_MAC, 7058 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7059 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7060 sta->smps_mode); 7061 7062 if (changed & IEEE80211_RC_BW_CHANGED) { 7063 bw = WMI_PEER_CHWIDTH_20MHZ; 7064 7065 switch (sta->bandwidth) { 7066 case IEEE80211_STA_RX_BW_20: 7067 bw = WMI_PEER_CHWIDTH_20MHZ; 7068 break; 7069 case IEEE80211_STA_RX_BW_40: 7070 bw = WMI_PEER_CHWIDTH_40MHZ; 7071 break; 7072 case IEEE80211_STA_RX_BW_80: 7073 bw = WMI_PEER_CHWIDTH_80MHZ; 7074 break; 7075 case IEEE80211_STA_RX_BW_160: 7076 bw = WMI_PEER_CHWIDTH_160MHZ; 7077 break; 7078 default: 7079 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7080 sta->bandwidth, sta->addr); 7081 bw = WMI_PEER_CHWIDTH_20MHZ; 7082 break; 7083 } 7084 7085 arsta->bw = bw; 7086 } 7087 7088 if (changed & IEEE80211_RC_NSS_CHANGED) 7089 arsta->nss = sta->rx_nss; 7090 7091 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7092 smps = WMI_PEER_SMPS_PS_NONE; 7093 7094 switch (sta->smps_mode) { 7095 case IEEE80211_SMPS_AUTOMATIC: 7096 case IEEE80211_SMPS_OFF: 7097 smps = WMI_PEER_SMPS_PS_NONE; 7098 break; 7099 case IEEE80211_SMPS_STATIC: 7100 smps = WMI_PEER_SMPS_STATIC; 7101 break; 7102 case IEEE80211_SMPS_DYNAMIC: 7103 smps = WMI_PEER_SMPS_DYNAMIC; 7104 break; 7105 case IEEE80211_SMPS_NUM_MODES: 7106 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7107 sta->smps_mode, sta->addr); 7108 smps = WMI_PEER_SMPS_PS_NONE; 7109 break; 7110 } 7111 7112 arsta->smps = smps; 7113 } 7114 7115 arsta->changed |= changed; 7116 7117 spin_unlock_bh(&ar->data_lock); 7118 7119 ieee80211_queue_work(hw, &arsta->update_wk); 7120 } 7121 7122 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7123 struct ieee80211_vif *vif, s64 tsf_offset) 7124 { 7125 struct ath10k *ar = hw->priv; 7126 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7127 u32 offset, vdev_param; 7128 int ret; 7129 7130 if (tsf_offset < 0) { 7131 vdev_param = ar->wmi.vdev_param->dec_tsf; 7132 offset = -tsf_offset; 7133 } else { 7134 vdev_param = ar->wmi.vdev_param->inc_tsf; 7135 offset = tsf_offset; 7136 } 7137 7138 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7139 vdev_param, offset); 7140 7141 if (ret && ret != -EOPNOTSUPP) 7142 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7143 offset, vdev_param, ret); 7144 } 7145 7146 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7147 struct ieee80211_vif *vif, 7148 struct ieee80211_ampdu_params *params) 7149 { 7150 struct ath10k *ar = hw->priv; 7151 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7152 struct ieee80211_sta *sta = params->sta; 7153 enum ieee80211_ampdu_mlme_action action = params->action; 7154 u16 tid = params->tid; 7155 7156 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7157 arvif->vdev_id, sta->addr, tid, action); 7158 7159 switch (action) { 7160 case IEEE80211_AMPDU_RX_START: 7161 case IEEE80211_AMPDU_RX_STOP: 7162 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7163 * creation/removal. Do we need to verify this? 7164 */ 7165 return 0; 7166 case IEEE80211_AMPDU_TX_START: 7167 case IEEE80211_AMPDU_TX_STOP_CONT: 7168 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7169 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7170 case IEEE80211_AMPDU_TX_OPERATIONAL: 7171 /* Firmware offloads Tx aggregation entirely so deny mac80211 7172 * Tx aggregation requests. 7173 */ 7174 return -EOPNOTSUPP; 7175 } 7176 7177 return -EINVAL; 7178 } 7179 7180 static void 7181 ath10k_mac_update_rx_channel(struct ath10k *ar, 7182 struct ieee80211_chanctx_conf *ctx, 7183 struct ieee80211_vif_chanctx_switch *vifs, 7184 int n_vifs) 7185 { 7186 struct cfg80211_chan_def *def = NULL; 7187 7188 /* Both locks are required because ar->rx_channel is modified. This 7189 * allows readers to hold either lock. 7190 */ 7191 lockdep_assert_held(&ar->conf_mutex); 7192 lockdep_assert_held(&ar->data_lock); 7193 7194 WARN_ON(ctx && vifs); 7195 WARN_ON(vifs && !n_vifs); 7196 7197 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7198 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7199 * ppdu on Rx may reduce performance on low-end systems. It should be 7200 * possible to make tables/hashmaps to speed the lookup up (be vary of 7201 * cpu data cache lines though regarding sizes) but to keep the initial 7202 * implementation simple and less intrusive fallback to the slow lookup 7203 * only for multi-channel cases. Single-channel cases will remain to 7204 * use the old channel derival and thus performance should not be 7205 * affected much. 7206 */ 7207 rcu_read_lock(); 7208 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7209 ieee80211_iter_chan_contexts_atomic(ar->hw, 7210 ath10k_mac_get_any_chandef_iter, 7211 &def); 7212 7213 if (vifs) 7214 def = &vifs[0].new_ctx->def; 7215 7216 ar->rx_channel = def->chan; 7217 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7218 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7219 /* During driver restart due to firmware assert, since mac80211 7220 * already has valid channel context for given radio, channel 7221 * context iteration return num_chanctx > 0. So fix rx_channel 7222 * when restart is in progress. 7223 */ 7224 ar->rx_channel = ctx->def.chan; 7225 } else { 7226 ar->rx_channel = NULL; 7227 } 7228 rcu_read_unlock(); 7229 } 7230 7231 static void 7232 ath10k_mac_update_vif_chan(struct ath10k *ar, 7233 struct ieee80211_vif_chanctx_switch *vifs, 7234 int n_vifs) 7235 { 7236 struct ath10k_vif *arvif; 7237 int ret; 7238 int i; 7239 7240 lockdep_assert_held(&ar->conf_mutex); 7241 7242 /* First stop monitor interface. Some FW versions crash if there's a 7243 * lone monitor interface. 7244 */ 7245 if (ar->monitor_started) 7246 ath10k_monitor_stop(ar); 7247 7248 for (i = 0; i < n_vifs; i++) { 7249 arvif = (void *)vifs[i].vif->drv_priv; 7250 7251 ath10k_dbg(ar, ATH10K_DBG_MAC, 7252 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7253 arvif->vdev_id, 7254 vifs[i].old_ctx->def.chan->center_freq, 7255 vifs[i].new_ctx->def.chan->center_freq, 7256 vifs[i].old_ctx->def.width, 7257 vifs[i].new_ctx->def.width); 7258 7259 if (WARN_ON(!arvif->is_started)) 7260 continue; 7261 7262 if (WARN_ON(!arvif->is_up)) 7263 continue; 7264 7265 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7266 if (ret) { 7267 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7268 arvif->vdev_id, ret); 7269 continue; 7270 } 7271 } 7272 7273 /* All relevant vdevs are downed and associated channel resources 7274 * should be available for the channel switch now. 7275 */ 7276 7277 spin_lock_bh(&ar->data_lock); 7278 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7279 spin_unlock_bh(&ar->data_lock); 7280 7281 for (i = 0; i < n_vifs; i++) { 7282 arvif = (void *)vifs[i].vif->drv_priv; 7283 7284 if (WARN_ON(!arvif->is_started)) 7285 continue; 7286 7287 if (WARN_ON(!arvif->is_up)) 7288 continue; 7289 7290 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7291 if (ret) 7292 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7293 ret); 7294 7295 ret = ath10k_mac_setup_prb_tmpl(arvif); 7296 if (ret) 7297 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7298 ret); 7299 7300 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7301 if (ret) { 7302 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7303 arvif->vdev_id, ret); 7304 continue; 7305 } 7306 7307 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7308 arvif->bssid); 7309 if (ret) { 7310 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7311 arvif->vdev_id, ret); 7312 continue; 7313 } 7314 } 7315 7316 ath10k_monitor_recalc(ar); 7317 } 7318 7319 static int 7320 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7321 struct ieee80211_chanctx_conf *ctx) 7322 { 7323 struct ath10k *ar = hw->priv; 7324 7325 ath10k_dbg(ar, ATH10K_DBG_MAC, 7326 "mac chanctx add freq %hu width %d ptr %pK\n", 7327 ctx->def.chan->center_freq, ctx->def.width, ctx); 7328 7329 mutex_lock(&ar->conf_mutex); 7330 7331 spin_lock_bh(&ar->data_lock); 7332 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7333 spin_unlock_bh(&ar->data_lock); 7334 7335 ath10k_recalc_radar_detection(ar); 7336 ath10k_monitor_recalc(ar); 7337 7338 mutex_unlock(&ar->conf_mutex); 7339 7340 return 0; 7341 } 7342 7343 static void 7344 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7345 struct ieee80211_chanctx_conf *ctx) 7346 { 7347 struct ath10k *ar = hw->priv; 7348 7349 ath10k_dbg(ar, ATH10K_DBG_MAC, 7350 "mac chanctx remove freq %hu width %d ptr %pK\n", 7351 ctx->def.chan->center_freq, ctx->def.width, ctx); 7352 7353 mutex_lock(&ar->conf_mutex); 7354 7355 spin_lock_bh(&ar->data_lock); 7356 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7357 spin_unlock_bh(&ar->data_lock); 7358 7359 ath10k_recalc_radar_detection(ar); 7360 ath10k_monitor_recalc(ar); 7361 7362 mutex_unlock(&ar->conf_mutex); 7363 } 7364 7365 struct ath10k_mac_change_chanctx_arg { 7366 struct ieee80211_chanctx_conf *ctx; 7367 struct ieee80211_vif_chanctx_switch *vifs; 7368 int n_vifs; 7369 int next_vif; 7370 }; 7371 7372 static void 7373 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7374 struct ieee80211_vif *vif) 7375 { 7376 struct ath10k_mac_change_chanctx_arg *arg = data; 7377 7378 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7379 return; 7380 7381 arg->n_vifs++; 7382 } 7383 7384 static void 7385 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7386 struct ieee80211_vif *vif) 7387 { 7388 struct ath10k_mac_change_chanctx_arg *arg = data; 7389 struct ieee80211_chanctx_conf *ctx; 7390 7391 ctx = rcu_access_pointer(vif->chanctx_conf); 7392 if (ctx != arg->ctx) 7393 return; 7394 7395 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7396 return; 7397 7398 arg->vifs[arg->next_vif].vif = vif; 7399 arg->vifs[arg->next_vif].old_ctx = ctx; 7400 arg->vifs[arg->next_vif].new_ctx = ctx; 7401 arg->next_vif++; 7402 } 7403 7404 static void 7405 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7406 struct ieee80211_chanctx_conf *ctx, 7407 u32 changed) 7408 { 7409 struct ath10k *ar = hw->priv; 7410 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7411 7412 mutex_lock(&ar->conf_mutex); 7413 7414 ath10k_dbg(ar, ATH10K_DBG_MAC, 7415 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7416 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7417 7418 /* This shouldn't really happen because channel switching should use 7419 * switch_vif_chanctx(). 7420 */ 7421 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7422 goto unlock; 7423 7424 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7425 ieee80211_iterate_active_interfaces_atomic( 7426 hw, 7427 IEEE80211_IFACE_ITER_NORMAL, 7428 ath10k_mac_change_chanctx_cnt_iter, 7429 &arg); 7430 if (arg.n_vifs == 0) 7431 goto radar; 7432 7433 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7434 GFP_KERNEL); 7435 if (!arg.vifs) 7436 goto radar; 7437 7438 ieee80211_iterate_active_interfaces_atomic( 7439 hw, 7440 IEEE80211_IFACE_ITER_NORMAL, 7441 ath10k_mac_change_chanctx_fill_iter, 7442 &arg); 7443 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7444 kfree(arg.vifs); 7445 } 7446 7447 radar: 7448 ath10k_recalc_radar_detection(ar); 7449 7450 /* FIXME: How to configure Rx chains properly? */ 7451 7452 /* No other actions are actually necessary. Firmware maintains channel 7453 * definitions per vdev internally and there's no host-side channel 7454 * context abstraction to configure, e.g. channel width. 7455 */ 7456 7457 unlock: 7458 mutex_unlock(&ar->conf_mutex); 7459 } 7460 7461 static int 7462 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7463 struct ieee80211_vif *vif, 7464 struct ieee80211_chanctx_conf *ctx) 7465 { 7466 struct ath10k *ar = hw->priv; 7467 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7468 int ret; 7469 7470 mutex_lock(&ar->conf_mutex); 7471 7472 ath10k_dbg(ar, ATH10K_DBG_MAC, 7473 "mac chanctx assign ptr %pK vdev_id %i\n", 7474 ctx, arvif->vdev_id); 7475 7476 if (WARN_ON(arvif->is_started)) { 7477 mutex_unlock(&ar->conf_mutex); 7478 return -EBUSY; 7479 } 7480 7481 ret = ath10k_vdev_start(arvif, &ctx->def); 7482 if (ret) { 7483 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7484 arvif->vdev_id, vif->addr, 7485 ctx->def.chan->center_freq, ret); 7486 goto err; 7487 } 7488 7489 arvif->is_started = true; 7490 7491 ret = ath10k_mac_vif_setup_ps(arvif); 7492 if (ret) { 7493 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7494 arvif->vdev_id, ret); 7495 goto err_stop; 7496 } 7497 7498 if (vif->type == NL80211_IFTYPE_MONITOR) { 7499 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7500 if (ret) { 7501 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7502 arvif->vdev_id, ret); 7503 goto err_stop; 7504 } 7505 7506 arvif->is_up = true; 7507 } 7508 7509 if (ath10k_mac_can_set_cts_prot(arvif)) { 7510 ret = ath10k_mac_set_cts_prot(arvif); 7511 if (ret) 7512 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7513 arvif->vdev_id, ret); 7514 } 7515 7516 mutex_unlock(&ar->conf_mutex); 7517 return 0; 7518 7519 err_stop: 7520 ath10k_vdev_stop(arvif); 7521 arvif->is_started = false; 7522 ath10k_mac_vif_setup_ps(arvif); 7523 7524 err: 7525 mutex_unlock(&ar->conf_mutex); 7526 return ret; 7527 } 7528 7529 static void 7530 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7531 struct ieee80211_vif *vif, 7532 struct ieee80211_chanctx_conf *ctx) 7533 { 7534 struct ath10k *ar = hw->priv; 7535 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7536 int ret; 7537 7538 mutex_lock(&ar->conf_mutex); 7539 7540 ath10k_dbg(ar, ATH10K_DBG_MAC, 7541 "mac chanctx unassign ptr %pK vdev_id %i\n", 7542 ctx, arvif->vdev_id); 7543 7544 WARN_ON(!arvif->is_started); 7545 7546 if (vif->type == NL80211_IFTYPE_MONITOR) { 7547 WARN_ON(!arvif->is_up); 7548 7549 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7550 if (ret) 7551 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7552 arvif->vdev_id, ret); 7553 7554 arvif->is_up = false; 7555 } 7556 7557 ret = ath10k_vdev_stop(arvif); 7558 if (ret) 7559 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7560 arvif->vdev_id, ret); 7561 7562 arvif->is_started = false; 7563 7564 mutex_unlock(&ar->conf_mutex); 7565 } 7566 7567 static int 7568 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7569 struct ieee80211_vif_chanctx_switch *vifs, 7570 int n_vifs, 7571 enum ieee80211_chanctx_switch_mode mode) 7572 { 7573 struct ath10k *ar = hw->priv; 7574 7575 mutex_lock(&ar->conf_mutex); 7576 7577 ath10k_dbg(ar, ATH10K_DBG_MAC, 7578 "mac chanctx switch n_vifs %d mode %d\n", 7579 n_vifs, mode); 7580 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7581 7582 mutex_unlock(&ar->conf_mutex); 7583 return 0; 7584 } 7585 7586 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7587 struct ieee80211_vif *vif, 7588 struct ieee80211_sta *sta) 7589 { 7590 struct ath10k *ar; 7591 struct ath10k_peer *peer; 7592 7593 ar = hw->priv; 7594 7595 list_for_each_entry(peer, &ar->peers, list) 7596 if (peer->sta == sta) 7597 peer->removed = true; 7598 } 7599 7600 static const struct ieee80211_ops ath10k_ops = { 7601 .tx = ath10k_mac_op_tx, 7602 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7603 .start = ath10k_start, 7604 .stop = ath10k_stop, 7605 .config = ath10k_config, 7606 .add_interface = ath10k_add_interface, 7607 .remove_interface = ath10k_remove_interface, 7608 .configure_filter = ath10k_configure_filter, 7609 .bss_info_changed = ath10k_bss_info_changed, 7610 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7611 .hw_scan = ath10k_hw_scan, 7612 .cancel_hw_scan = ath10k_cancel_hw_scan, 7613 .set_key = ath10k_set_key, 7614 .set_default_unicast_key = ath10k_set_default_unicast_key, 7615 .sta_state = ath10k_sta_state, 7616 .conf_tx = ath10k_conf_tx, 7617 .remain_on_channel = ath10k_remain_on_channel, 7618 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7619 .set_rts_threshold = ath10k_set_rts_threshold, 7620 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7621 .flush = ath10k_flush, 7622 .tx_last_beacon = ath10k_tx_last_beacon, 7623 .set_antenna = ath10k_set_antenna, 7624 .get_antenna = ath10k_get_antenna, 7625 .reconfig_complete = ath10k_reconfig_complete, 7626 .get_survey = ath10k_get_survey, 7627 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7628 .sta_rc_update = ath10k_sta_rc_update, 7629 .offset_tsf = ath10k_offset_tsf, 7630 .ampdu_action = ath10k_ampdu_action, 7631 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7632 .get_et_stats = ath10k_debug_get_et_stats, 7633 .get_et_strings = ath10k_debug_get_et_strings, 7634 .add_chanctx = ath10k_mac_op_add_chanctx, 7635 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7636 .change_chanctx = ath10k_mac_op_change_chanctx, 7637 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7638 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7639 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7640 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7641 7642 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7643 7644 #ifdef CONFIG_PM 7645 .suspend = ath10k_wow_op_suspend, 7646 .resume = ath10k_wow_op_resume, 7647 #endif 7648 #ifdef CONFIG_MAC80211_DEBUGFS 7649 .sta_add_debugfs = ath10k_sta_add_debugfs, 7650 .sta_statistics = ath10k_sta_statistics, 7651 #endif 7652 }; 7653 7654 #define CHAN2G(_channel, _freq, _flags) { \ 7655 .band = NL80211_BAND_2GHZ, \ 7656 .hw_value = (_channel), \ 7657 .center_freq = (_freq), \ 7658 .flags = (_flags), \ 7659 .max_antenna_gain = 0, \ 7660 .max_power = 30, \ 7661 } 7662 7663 #define CHAN5G(_channel, _freq, _flags) { \ 7664 .band = NL80211_BAND_5GHZ, \ 7665 .hw_value = (_channel), \ 7666 .center_freq = (_freq), \ 7667 .flags = (_flags), \ 7668 .max_antenna_gain = 0, \ 7669 .max_power = 30, \ 7670 } 7671 7672 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7673 CHAN2G(1, 2412, 0), 7674 CHAN2G(2, 2417, 0), 7675 CHAN2G(3, 2422, 0), 7676 CHAN2G(4, 2427, 0), 7677 CHAN2G(5, 2432, 0), 7678 CHAN2G(6, 2437, 0), 7679 CHAN2G(7, 2442, 0), 7680 CHAN2G(8, 2447, 0), 7681 CHAN2G(9, 2452, 0), 7682 CHAN2G(10, 2457, 0), 7683 CHAN2G(11, 2462, 0), 7684 CHAN2G(12, 2467, 0), 7685 CHAN2G(13, 2472, 0), 7686 CHAN2G(14, 2484, 0), 7687 }; 7688 7689 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7690 CHAN5G(36, 5180, 0), 7691 CHAN5G(40, 5200, 0), 7692 CHAN5G(44, 5220, 0), 7693 CHAN5G(48, 5240, 0), 7694 CHAN5G(52, 5260, 0), 7695 CHAN5G(56, 5280, 0), 7696 CHAN5G(60, 5300, 0), 7697 CHAN5G(64, 5320, 0), 7698 CHAN5G(100, 5500, 0), 7699 CHAN5G(104, 5520, 0), 7700 CHAN5G(108, 5540, 0), 7701 CHAN5G(112, 5560, 0), 7702 CHAN5G(116, 5580, 0), 7703 CHAN5G(120, 5600, 0), 7704 CHAN5G(124, 5620, 0), 7705 CHAN5G(128, 5640, 0), 7706 CHAN5G(132, 5660, 0), 7707 CHAN5G(136, 5680, 0), 7708 CHAN5G(140, 5700, 0), 7709 CHAN5G(144, 5720, 0), 7710 CHAN5G(149, 5745, 0), 7711 CHAN5G(153, 5765, 0), 7712 CHAN5G(157, 5785, 0), 7713 CHAN5G(161, 5805, 0), 7714 CHAN5G(165, 5825, 0), 7715 CHAN5G(169, 5845, 0), 7716 }; 7717 7718 struct ath10k *ath10k_mac_create(size_t priv_size) 7719 { 7720 struct ieee80211_hw *hw; 7721 struct ieee80211_ops *ops; 7722 struct ath10k *ar; 7723 7724 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7725 if (!ops) 7726 return NULL; 7727 7728 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7729 if (!hw) { 7730 kfree(ops); 7731 return NULL; 7732 } 7733 7734 ar = hw->priv; 7735 ar->hw = hw; 7736 ar->ops = ops; 7737 7738 return ar; 7739 } 7740 7741 void ath10k_mac_destroy(struct ath10k *ar) 7742 { 7743 struct ieee80211_ops *ops = ar->ops; 7744 7745 ieee80211_free_hw(ar->hw); 7746 kfree(ops); 7747 } 7748 7749 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7750 { 7751 .max = 8, 7752 .types = BIT(NL80211_IFTYPE_STATION) 7753 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7754 }, 7755 { 7756 .max = 3, 7757 .types = BIT(NL80211_IFTYPE_P2P_GO) 7758 }, 7759 { 7760 .max = 1, 7761 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7762 }, 7763 { 7764 .max = 7, 7765 .types = BIT(NL80211_IFTYPE_AP) 7766 #ifdef CONFIG_MAC80211_MESH 7767 | BIT(NL80211_IFTYPE_MESH_POINT) 7768 #endif 7769 }, 7770 }; 7771 7772 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7773 { 7774 .max = 8, 7775 .types = BIT(NL80211_IFTYPE_AP) 7776 #ifdef CONFIG_MAC80211_MESH 7777 | BIT(NL80211_IFTYPE_MESH_POINT) 7778 #endif 7779 }, 7780 { 7781 .max = 1, 7782 .types = BIT(NL80211_IFTYPE_STATION) 7783 }, 7784 }; 7785 7786 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7787 { 7788 .limits = ath10k_if_limits, 7789 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7790 .max_interfaces = 8, 7791 .num_different_channels = 1, 7792 .beacon_int_infra_match = true, 7793 }, 7794 }; 7795 7796 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7797 { 7798 .limits = ath10k_10x_if_limits, 7799 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7800 .max_interfaces = 8, 7801 .num_different_channels = 1, 7802 .beacon_int_infra_match = true, 7803 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7804 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7805 BIT(NL80211_CHAN_WIDTH_20) | 7806 BIT(NL80211_CHAN_WIDTH_40) | 7807 BIT(NL80211_CHAN_WIDTH_80), 7808 #endif 7809 }, 7810 }; 7811 7812 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7813 { 7814 .max = 2, 7815 .types = BIT(NL80211_IFTYPE_STATION), 7816 }, 7817 { 7818 .max = 2, 7819 .types = BIT(NL80211_IFTYPE_AP) | 7820 #ifdef CONFIG_MAC80211_MESH 7821 BIT(NL80211_IFTYPE_MESH_POINT) | 7822 #endif 7823 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7824 BIT(NL80211_IFTYPE_P2P_GO), 7825 }, 7826 { 7827 .max = 1, 7828 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7829 }, 7830 }; 7831 7832 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7833 { 7834 .max = 2, 7835 .types = BIT(NL80211_IFTYPE_STATION), 7836 }, 7837 { 7838 .max = 2, 7839 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7840 }, 7841 { 7842 .max = 1, 7843 .types = BIT(NL80211_IFTYPE_AP) | 7844 #ifdef CONFIG_MAC80211_MESH 7845 BIT(NL80211_IFTYPE_MESH_POINT) | 7846 #endif 7847 BIT(NL80211_IFTYPE_P2P_GO), 7848 }, 7849 { 7850 .max = 1, 7851 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7852 }, 7853 }; 7854 7855 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 7856 { 7857 .max = 1, 7858 .types = BIT(NL80211_IFTYPE_STATION), 7859 }, 7860 { 7861 .max = 1, 7862 .types = BIT(NL80211_IFTYPE_ADHOC), 7863 }, 7864 }; 7865 7866 /* FIXME: This is not thouroughly tested. These combinations may over- or 7867 * underestimate hw/fw capabilities. 7868 */ 7869 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 7870 { 7871 .limits = ath10k_tlv_if_limit, 7872 .num_different_channels = 1, 7873 .max_interfaces = 4, 7874 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7875 }, 7876 { 7877 .limits = ath10k_tlv_if_limit_ibss, 7878 .num_different_channels = 1, 7879 .max_interfaces = 2, 7880 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7881 }, 7882 }; 7883 7884 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 7885 { 7886 .limits = ath10k_tlv_if_limit, 7887 .num_different_channels = 1, 7888 .max_interfaces = 4, 7889 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7890 }, 7891 { 7892 .limits = ath10k_tlv_qcs_if_limit, 7893 .num_different_channels = 2, 7894 .max_interfaces = 4, 7895 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 7896 }, 7897 { 7898 .limits = ath10k_tlv_if_limit_ibss, 7899 .num_different_channels = 1, 7900 .max_interfaces = 2, 7901 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7902 }, 7903 }; 7904 7905 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 7906 { 7907 .max = 1, 7908 .types = BIT(NL80211_IFTYPE_STATION), 7909 }, 7910 { 7911 .max = 16, 7912 .types = BIT(NL80211_IFTYPE_AP) 7913 #ifdef CONFIG_MAC80211_MESH 7914 | BIT(NL80211_IFTYPE_MESH_POINT) 7915 #endif 7916 }, 7917 }; 7918 7919 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 7920 { 7921 .limits = ath10k_10_4_if_limits, 7922 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 7923 .max_interfaces = 16, 7924 .num_different_channels = 1, 7925 .beacon_int_infra_match = true, 7926 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7927 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7928 BIT(NL80211_CHAN_WIDTH_20) | 7929 BIT(NL80211_CHAN_WIDTH_40) | 7930 BIT(NL80211_CHAN_WIDTH_80), 7931 #endif 7932 }, 7933 }; 7934 7935 static void ath10k_get_arvif_iter(void *data, u8 *mac, 7936 struct ieee80211_vif *vif) 7937 { 7938 struct ath10k_vif_iter *arvif_iter = data; 7939 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7940 7941 if (arvif->vdev_id == arvif_iter->vdev_id) 7942 arvif_iter->arvif = arvif; 7943 } 7944 7945 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 7946 { 7947 struct ath10k_vif_iter arvif_iter; 7948 u32 flags; 7949 7950 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 7951 arvif_iter.vdev_id = vdev_id; 7952 7953 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 7954 ieee80211_iterate_active_interfaces_atomic(ar->hw, 7955 flags, 7956 ath10k_get_arvif_iter, 7957 &arvif_iter); 7958 if (!arvif_iter.arvif) { 7959 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 7960 return NULL; 7961 } 7962 7963 return arvif_iter.arvif; 7964 } 7965 7966 #define WRD_METHOD "WRDD" 7967 #define WRDD_WIFI (0x07) 7968 7969 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 7970 { 7971 union acpi_object *mcc_pkg; 7972 union acpi_object *domain_type; 7973 union acpi_object *mcc_value; 7974 u32 i; 7975 7976 if (wrdd->type != ACPI_TYPE_PACKAGE || 7977 wrdd->package.count < 2 || 7978 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 7979 wrdd->package.elements[0].integer.value != 0) { 7980 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 7981 return 0; 7982 } 7983 7984 for (i = 1; i < wrdd->package.count; ++i) { 7985 mcc_pkg = &wrdd->package.elements[i]; 7986 7987 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 7988 continue; 7989 if (mcc_pkg->package.count < 2) 7990 continue; 7991 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 7992 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 7993 continue; 7994 7995 domain_type = &mcc_pkg->package.elements[0]; 7996 if (domain_type->integer.value != WRDD_WIFI) 7997 continue; 7998 7999 mcc_value = &mcc_pkg->package.elements[1]; 8000 return mcc_value->integer.value; 8001 } 8002 return 0; 8003 } 8004 8005 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8006 { 8007 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 8008 acpi_handle root_handle; 8009 acpi_handle handle; 8010 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8011 acpi_status status; 8012 u32 alpha2_code; 8013 char alpha2[3]; 8014 8015 root_handle = ACPI_HANDLE(&pdev->dev); 8016 if (!root_handle) 8017 return -EOPNOTSUPP; 8018 8019 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8020 if (ACPI_FAILURE(status)) { 8021 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8022 "failed to get wrd method %d\n", status); 8023 return -EIO; 8024 } 8025 8026 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8027 if (ACPI_FAILURE(status)) { 8028 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8029 "failed to call wrdc %d\n", status); 8030 return -EIO; 8031 } 8032 8033 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8034 kfree(wrdd.pointer); 8035 if (!alpha2_code) 8036 return -EIO; 8037 8038 alpha2[0] = (alpha2_code >> 8) & 0xff; 8039 alpha2[1] = (alpha2_code >> 0) & 0xff; 8040 alpha2[2] = '\0'; 8041 8042 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8043 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8044 8045 *rd = ath_regd_find_country_by_name(alpha2); 8046 if (*rd == 0xffff) 8047 return -EIO; 8048 8049 *rd |= COUNTRY_ERD_FLAG; 8050 return 0; 8051 } 8052 8053 static int ath10k_mac_init_rd(struct ath10k *ar) 8054 { 8055 int ret; 8056 u16 rd; 8057 8058 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8059 if (ret) { 8060 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8061 "fallback to eeprom programmed regulatory settings\n"); 8062 rd = ar->hw_eeprom_rd; 8063 } 8064 8065 ar->ath_common.regulatory.current_rd = rd; 8066 return 0; 8067 } 8068 8069 int ath10k_mac_register(struct ath10k *ar) 8070 { 8071 static const u32 cipher_suites[] = { 8072 WLAN_CIPHER_SUITE_WEP40, 8073 WLAN_CIPHER_SUITE_WEP104, 8074 WLAN_CIPHER_SUITE_TKIP, 8075 WLAN_CIPHER_SUITE_CCMP, 8076 WLAN_CIPHER_SUITE_AES_CMAC, 8077 }; 8078 struct ieee80211_supported_band *band; 8079 void *channels; 8080 int ret; 8081 8082 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8083 8084 SET_IEEE80211_DEV(ar->hw, ar->dev); 8085 8086 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8087 ARRAY_SIZE(ath10k_5ghz_channels)) != 8088 ATH10K_NUM_CHANS); 8089 8090 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8091 channels = kmemdup(ath10k_2ghz_channels, 8092 sizeof(ath10k_2ghz_channels), 8093 GFP_KERNEL); 8094 if (!channels) { 8095 ret = -ENOMEM; 8096 goto err_free; 8097 } 8098 8099 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8100 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8101 band->channels = channels; 8102 8103 if (ar->hw_params.cck_rate_map_rev2) { 8104 band->n_bitrates = ath10k_g_rates_rev2_size; 8105 band->bitrates = ath10k_g_rates_rev2; 8106 } else { 8107 band->n_bitrates = ath10k_g_rates_size; 8108 band->bitrates = ath10k_g_rates; 8109 } 8110 8111 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8112 } 8113 8114 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8115 channels = kmemdup(ath10k_5ghz_channels, 8116 sizeof(ath10k_5ghz_channels), 8117 GFP_KERNEL); 8118 if (!channels) { 8119 ret = -ENOMEM; 8120 goto err_free; 8121 } 8122 8123 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8124 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8125 band->channels = channels; 8126 band->n_bitrates = ath10k_a_rates_size; 8127 band->bitrates = ath10k_a_rates; 8128 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8129 } 8130 8131 ath10k_mac_setup_ht_vht_cap(ar); 8132 8133 ar->hw->wiphy->interface_modes = 8134 BIT(NL80211_IFTYPE_STATION) | 8135 BIT(NL80211_IFTYPE_AP) | 8136 BIT(NL80211_IFTYPE_MESH_POINT); 8137 8138 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8139 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8140 8141 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8142 ar->hw->wiphy->interface_modes |= 8143 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8144 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8145 BIT(NL80211_IFTYPE_P2P_GO); 8146 8147 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8148 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8149 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8150 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8151 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8152 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8153 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8154 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8155 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8156 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8157 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8158 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8159 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8160 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8161 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8162 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8163 8164 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8165 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8166 8167 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8168 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8169 8170 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8171 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8172 8173 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8174 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8175 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8176 } 8177 8178 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8179 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8180 8181 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8182 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8183 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8184 8185 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8186 8187 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8188 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8189 8190 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8191 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8192 * correct Probe Responses. This is more of a hack advert.. 8193 */ 8194 ar->hw->wiphy->probe_resp_offload |= 8195 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8196 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8197 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8198 } 8199 8200 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) 8201 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8202 8203 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8204 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8205 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8206 8207 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8208 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8209 NL80211_FEATURE_AP_SCAN; 8210 8211 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8212 8213 ret = ath10k_wow_init(ar); 8214 if (ret) { 8215 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8216 goto err_free; 8217 } 8218 8219 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8220 8221 /* 8222 * on LL hardware queues are managed entirely by the FW 8223 * so we only advertise to mac we can do the queues thing 8224 */ 8225 ar->hw->queues = IEEE80211_MAX_QUEUES; 8226 8227 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8228 * something that vdev_ids can't reach so that we don't stop the queue 8229 * accidentally. 8230 */ 8231 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8232 8233 switch (ar->running_fw->fw_file.wmi_op_version) { 8234 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8235 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8236 ar->hw->wiphy->n_iface_combinations = 8237 ARRAY_SIZE(ath10k_if_comb); 8238 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8239 break; 8240 case ATH10K_FW_WMI_OP_VERSION_TLV: 8241 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8242 ar->hw->wiphy->iface_combinations = 8243 ath10k_tlv_qcs_if_comb; 8244 ar->hw->wiphy->n_iface_combinations = 8245 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8246 } else { 8247 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8248 ar->hw->wiphy->n_iface_combinations = 8249 ARRAY_SIZE(ath10k_tlv_if_comb); 8250 } 8251 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8252 break; 8253 case ATH10K_FW_WMI_OP_VERSION_10_1: 8254 case ATH10K_FW_WMI_OP_VERSION_10_2: 8255 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8256 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8257 ar->hw->wiphy->n_iface_combinations = 8258 ARRAY_SIZE(ath10k_10x_if_comb); 8259 break; 8260 case ATH10K_FW_WMI_OP_VERSION_10_4: 8261 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8262 ar->hw->wiphy->n_iface_combinations = 8263 ARRAY_SIZE(ath10k_10_4_if_comb); 8264 break; 8265 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8266 case ATH10K_FW_WMI_OP_VERSION_MAX: 8267 WARN_ON(1); 8268 ret = -EINVAL; 8269 goto err_free; 8270 } 8271 8272 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8273 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8274 8275 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8276 /* Init ath dfs pattern detector */ 8277 ar->ath_common.debug_mask = ATH_DBG_DFS; 8278 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8279 NL80211_DFS_UNSET); 8280 8281 if (!ar->dfs_detector) 8282 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8283 } 8284 8285 /* Current wake_tx_queue implementation imposes a significant 8286 * performance penalty in some setups. The tx scheduling code needs 8287 * more work anyway so disable the wake_tx_queue unless firmware 8288 * supports the pull-push mechanism. 8289 */ 8290 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 8291 ar->running_fw->fw_file.fw_features)) 8292 ar->ops->wake_tx_queue = NULL; 8293 8294 ret = ath10k_mac_init_rd(ar); 8295 if (ret) { 8296 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8297 goto err_dfs_detector_exit; 8298 } 8299 8300 /* Disable set_coverage_class for chipsets that do not support it. */ 8301 if (!ar->hw_params.hw_ops->set_coverage_class) 8302 ar->ops->set_coverage_class = NULL; 8303 8304 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8305 ath10k_reg_notifier); 8306 if (ret) { 8307 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8308 goto err_dfs_detector_exit; 8309 } 8310 8311 ar->hw->wiphy->cipher_suites = cipher_suites; 8312 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 8313 8314 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8315 8316 ret = ieee80211_register_hw(ar->hw); 8317 if (ret) { 8318 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8319 goto err_dfs_detector_exit; 8320 } 8321 8322 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8323 ret = regulatory_hint(ar->hw->wiphy, 8324 ar->ath_common.regulatory.alpha2); 8325 if (ret) 8326 goto err_unregister; 8327 } 8328 8329 return 0; 8330 8331 err_unregister: 8332 ieee80211_unregister_hw(ar->hw); 8333 8334 err_dfs_detector_exit: 8335 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8336 ar->dfs_detector->exit(ar->dfs_detector); 8337 8338 err_free: 8339 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8340 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8341 8342 SET_IEEE80211_DEV(ar->hw, NULL); 8343 return ret; 8344 } 8345 8346 void ath10k_mac_unregister(struct ath10k *ar) 8347 { 8348 ieee80211_unregister_hw(ar->hw); 8349 8350 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8351 ar->dfs_detector->exit(ar->dfs_detector); 8352 8353 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8354 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8355 8356 SET_IEEE80211_DEV(ar->hw, NULL); 8357 } 8358