1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "mac.h" 19 20 #include <net/mac80211.h> 21 #include <linux/etherdevice.h> 22 #include <linux/acpi.h> 23 24 #include "hif.h" 25 #include "core.h" 26 #include "debug.h" 27 #include "wmi.h" 28 #include "htt.h" 29 #include "txrx.h" 30 #include "testmode.h" 31 #include "wmi.h" 32 #include "wmi-tlv.h" 33 #include "wmi-ops.h" 34 #include "wow.h" 35 36 /*********/ 37 /* Rates */ 38 /*********/ 39 40 static struct ieee80211_rate ath10k_rates[] = { 41 { .bitrate = 10, 42 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 43 { .bitrate = 20, 44 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 45 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 46 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 47 { .bitrate = 55, 48 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 49 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 50 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 51 { .bitrate = 110, 52 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 53 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 54 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 55 56 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 57 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 58 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 59 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 60 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 61 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 62 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 63 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 64 }; 65 66 static struct ieee80211_rate ath10k_rates_rev2[] = { 67 { .bitrate = 10, 68 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 69 { .bitrate = 20, 70 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 71 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 72 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 73 { .bitrate = 55, 74 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 75 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 76 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 77 { .bitrate = 110, 78 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 79 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 80 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 81 82 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 83 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 84 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 85 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 86 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 87 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 88 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 89 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 90 }; 91 92 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 93 94 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 95 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 96 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 97 #define ath10k_g_rates (ath10k_rates + 0) 98 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 99 100 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 101 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 102 103 static bool ath10k_mac_bitrate_is_cck(int bitrate) 104 { 105 switch (bitrate) { 106 case 10: 107 case 20: 108 case 55: 109 case 110: 110 return true; 111 } 112 113 return false; 114 } 115 116 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 117 { 118 return DIV_ROUND_UP(bitrate, 5) | 119 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 120 } 121 122 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 123 u8 hw_rate, bool cck) 124 { 125 const struct ieee80211_rate *rate; 126 int i; 127 128 for (i = 0; i < sband->n_bitrates; i++) { 129 rate = &sband->bitrates[i]; 130 131 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 132 continue; 133 134 if (rate->hw_value == hw_rate) 135 return i; 136 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 137 rate->hw_value_short == hw_rate) 138 return i; 139 } 140 141 return 0; 142 } 143 144 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 145 u32 bitrate) 146 { 147 int i; 148 149 for (i = 0; i < sband->n_bitrates; i++) 150 if (sband->bitrates[i].bitrate == bitrate) 151 return i; 152 153 return 0; 154 } 155 156 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 157 { 158 switch ((mcs_map >> (2 * nss)) & 0x3) { 159 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 160 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 162 } 163 return 0; 164 } 165 166 static u32 167 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 168 { 169 int nss; 170 171 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 172 if (ht_mcs_mask[nss]) 173 return nss + 1; 174 175 return 1; 176 } 177 178 static u32 179 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 180 { 181 int nss; 182 183 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 184 if (vht_mcs_mask[nss]) 185 return nss + 1; 186 187 return 1; 188 } 189 190 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 191 { 192 enum wmi_host_platform_type platform_type; 193 int ret; 194 195 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 196 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 197 else 198 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 199 200 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 201 202 if (ret && ret != -EOPNOTSUPP) { 203 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 204 return ret; 205 } 206 207 return 0; 208 } 209 210 /**********/ 211 /* Crypto */ 212 /**********/ 213 214 static int ath10k_send_key(struct ath10k_vif *arvif, 215 struct ieee80211_key_conf *key, 216 enum set_key_cmd cmd, 217 const u8 *macaddr, u32 flags) 218 { 219 struct ath10k *ar = arvif->ar; 220 struct wmi_vdev_install_key_arg arg = { 221 .vdev_id = arvif->vdev_id, 222 .key_idx = key->keyidx, 223 .key_len = key->keylen, 224 .key_data = key->key, 225 .key_flags = flags, 226 .macaddr = macaddr, 227 }; 228 229 lockdep_assert_held(&arvif->ar->conf_mutex); 230 231 switch (key->cipher) { 232 case WLAN_CIPHER_SUITE_CCMP: 233 arg.key_cipher = WMI_CIPHER_AES_CCM; 234 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 235 break; 236 case WLAN_CIPHER_SUITE_TKIP: 237 arg.key_cipher = WMI_CIPHER_TKIP; 238 arg.key_txmic_len = 8; 239 arg.key_rxmic_len = 8; 240 break; 241 case WLAN_CIPHER_SUITE_WEP40: 242 case WLAN_CIPHER_SUITE_WEP104: 243 arg.key_cipher = WMI_CIPHER_WEP; 244 break; 245 case WLAN_CIPHER_SUITE_AES_CMAC: 246 WARN_ON(1); 247 return -EINVAL; 248 default: 249 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 250 return -EOPNOTSUPP; 251 } 252 253 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 254 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 255 256 if (cmd == DISABLE_KEY) { 257 arg.key_cipher = WMI_CIPHER_NONE; 258 arg.key_data = NULL; 259 } 260 261 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 262 } 263 264 static int ath10k_install_key(struct ath10k_vif *arvif, 265 struct ieee80211_key_conf *key, 266 enum set_key_cmd cmd, 267 const u8 *macaddr, u32 flags) 268 { 269 struct ath10k *ar = arvif->ar; 270 int ret; 271 unsigned long time_left; 272 273 lockdep_assert_held(&ar->conf_mutex); 274 275 reinit_completion(&ar->install_key_done); 276 277 if (arvif->nohwcrypt) 278 return 1; 279 280 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 281 if (ret) 282 return ret; 283 284 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 285 if (time_left == 0) 286 return -ETIMEDOUT; 287 288 return 0; 289 } 290 291 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 292 const u8 *addr) 293 { 294 struct ath10k *ar = arvif->ar; 295 struct ath10k_peer *peer; 296 int ret; 297 int i; 298 u32 flags; 299 300 lockdep_assert_held(&ar->conf_mutex); 301 302 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 303 arvif->vif->type != NL80211_IFTYPE_ADHOC && 304 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 305 return -EINVAL; 306 307 spin_lock_bh(&ar->data_lock); 308 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 309 spin_unlock_bh(&ar->data_lock); 310 311 if (!peer) 312 return -ENOENT; 313 314 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 315 if (arvif->wep_keys[i] == NULL) 316 continue; 317 318 switch (arvif->vif->type) { 319 case NL80211_IFTYPE_AP: 320 flags = WMI_KEY_PAIRWISE; 321 322 if (arvif->def_wep_key_idx == i) 323 flags |= WMI_KEY_TX_USAGE; 324 325 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 326 SET_KEY, addr, flags); 327 if (ret < 0) 328 return ret; 329 break; 330 case NL80211_IFTYPE_ADHOC: 331 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 332 SET_KEY, addr, 333 WMI_KEY_PAIRWISE); 334 if (ret < 0) 335 return ret; 336 337 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 338 SET_KEY, addr, WMI_KEY_GROUP); 339 if (ret < 0) 340 return ret; 341 break; 342 default: 343 WARN_ON(1); 344 return -EINVAL; 345 } 346 347 spin_lock_bh(&ar->data_lock); 348 peer->keys[i] = arvif->wep_keys[i]; 349 spin_unlock_bh(&ar->data_lock); 350 } 351 352 /* In some cases (notably with static WEP IBSS with multiple keys) 353 * multicast Tx becomes broken. Both pairwise and groupwise keys are 354 * installed already. Using WMI_KEY_TX_USAGE in different combinations 355 * didn't seem help. Using def_keyid vdev parameter seems to be 356 * effective so use that. 357 * 358 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 359 */ 360 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 361 return 0; 362 363 if (arvif->def_wep_key_idx == -1) 364 return 0; 365 366 ret = ath10k_wmi_vdev_set_param(arvif->ar, 367 arvif->vdev_id, 368 arvif->ar->wmi.vdev_param->def_keyid, 369 arvif->def_wep_key_idx); 370 if (ret) { 371 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 372 arvif->vdev_id, ret); 373 return ret; 374 } 375 376 return 0; 377 } 378 379 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 380 const u8 *addr) 381 { 382 struct ath10k *ar = arvif->ar; 383 struct ath10k_peer *peer; 384 int first_errno = 0; 385 int ret; 386 int i; 387 u32 flags = 0; 388 389 lockdep_assert_held(&ar->conf_mutex); 390 391 spin_lock_bh(&ar->data_lock); 392 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 393 spin_unlock_bh(&ar->data_lock); 394 395 if (!peer) 396 return -ENOENT; 397 398 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 399 if (peer->keys[i] == NULL) 400 continue; 401 402 /* key flags are not required to delete the key */ 403 ret = ath10k_install_key(arvif, peer->keys[i], 404 DISABLE_KEY, addr, flags); 405 if (ret < 0 && first_errno == 0) 406 first_errno = ret; 407 408 if (ret < 0) 409 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 410 i, ret); 411 412 spin_lock_bh(&ar->data_lock); 413 peer->keys[i] = NULL; 414 spin_unlock_bh(&ar->data_lock); 415 } 416 417 return first_errno; 418 } 419 420 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 421 u8 keyidx) 422 { 423 struct ath10k_peer *peer; 424 int i; 425 426 lockdep_assert_held(&ar->data_lock); 427 428 /* We don't know which vdev this peer belongs to, 429 * since WMI doesn't give us that information. 430 * 431 * FIXME: multi-bss needs to be handled. 432 */ 433 peer = ath10k_peer_find(ar, 0, addr); 434 if (!peer) 435 return false; 436 437 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 438 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 439 return true; 440 } 441 442 return false; 443 } 444 445 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 446 struct ieee80211_key_conf *key) 447 { 448 struct ath10k *ar = arvif->ar; 449 struct ath10k_peer *peer; 450 u8 addr[ETH_ALEN]; 451 int first_errno = 0; 452 int ret; 453 int i; 454 u32 flags = 0; 455 456 lockdep_assert_held(&ar->conf_mutex); 457 458 for (;;) { 459 /* since ath10k_install_key we can't hold data_lock all the 460 * time, so we try to remove the keys incrementally 461 */ 462 spin_lock_bh(&ar->data_lock); 463 i = 0; 464 list_for_each_entry(peer, &ar->peers, list) { 465 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 466 if (peer->keys[i] == key) { 467 ether_addr_copy(addr, peer->addr); 468 peer->keys[i] = NULL; 469 break; 470 } 471 } 472 473 if (i < ARRAY_SIZE(peer->keys)) 474 break; 475 } 476 spin_unlock_bh(&ar->data_lock); 477 478 if (i == ARRAY_SIZE(peer->keys)) 479 break; 480 /* key flags are not required to delete the key */ 481 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 482 if (ret < 0 && first_errno == 0) 483 first_errno = ret; 484 485 if (ret) 486 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 487 addr, ret); 488 } 489 490 return first_errno; 491 } 492 493 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 494 struct ieee80211_key_conf *key) 495 { 496 struct ath10k *ar = arvif->ar; 497 struct ath10k_peer *peer; 498 int ret; 499 500 lockdep_assert_held(&ar->conf_mutex); 501 502 list_for_each_entry(peer, &ar->peers, list) { 503 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 504 continue; 505 506 if (ether_addr_equal(peer->addr, arvif->bssid)) 507 continue; 508 509 if (peer->keys[key->keyidx] == key) 510 continue; 511 512 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 513 arvif->vdev_id, key->keyidx); 514 515 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 516 if (ret) { 517 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 518 arvif->vdev_id, peer->addr, ret); 519 return ret; 520 } 521 } 522 523 return 0; 524 } 525 526 /*********************/ 527 /* General utilities */ 528 /*********************/ 529 530 static inline enum wmi_phy_mode 531 chan_to_phymode(const struct cfg80211_chan_def *chandef) 532 { 533 enum wmi_phy_mode phymode = MODE_UNKNOWN; 534 535 switch (chandef->chan->band) { 536 case NL80211_BAND_2GHZ: 537 switch (chandef->width) { 538 case NL80211_CHAN_WIDTH_20_NOHT: 539 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 540 phymode = MODE_11B; 541 else 542 phymode = MODE_11G; 543 break; 544 case NL80211_CHAN_WIDTH_20: 545 phymode = MODE_11NG_HT20; 546 break; 547 case NL80211_CHAN_WIDTH_40: 548 phymode = MODE_11NG_HT40; 549 break; 550 case NL80211_CHAN_WIDTH_5: 551 case NL80211_CHAN_WIDTH_10: 552 case NL80211_CHAN_WIDTH_80: 553 case NL80211_CHAN_WIDTH_80P80: 554 case NL80211_CHAN_WIDTH_160: 555 phymode = MODE_UNKNOWN; 556 break; 557 } 558 break; 559 case NL80211_BAND_5GHZ: 560 switch (chandef->width) { 561 case NL80211_CHAN_WIDTH_20_NOHT: 562 phymode = MODE_11A; 563 break; 564 case NL80211_CHAN_WIDTH_20: 565 phymode = MODE_11NA_HT20; 566 break; 567 case NL80211_CHAN_WIDTH_40: 568 phymode = MODE_11NA_HT40; 569 break; 570 case NL80211_CHAN_WIDTH_80: 571 phymode = MODE_11AC_VHT80; 572 break; 573 case NL80211_CHAN_WIDTH_160: 574 phymode = MODE_11AC_VHT160; 575 break; 576 case NL80211_CHAN_WIDTH_80P80: 577 phymode = MODE_11AC_VHT80_80; 578 break; 579 case NL80211_CHAN_WIDTH_5: 580 case NL80211_CHAN_WIDTH_10: 581 phymode = MODE_UNKNOWN; 582 break; 583 } 584 break; 585 default: 586 break; 587 } 588 589 WARN_ON(phymode == MODE_UNKNOWN); 590 return phymode; 591 } 592 593 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 594 { 595 /* 596 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 597 * 0 for no restriction 598 * 1 for 1/4 us 599 * 2 for 1/2 us 600 * 3 for 1 us 601 * 4 for 2 us 602 * 5 for 4 us 603 * 6 for 8 us 604 * 7 for 16 us 605 */ 606 switch (mpdudensity) { 607 case 0: 608 return 0; 609 case 1: 610 case 2: 611 case 3: 612 /* Our lower layer calculations limit our precision to 613 * 1 microsecond 614 */ 615 return 1; 616 case 4: 617 return 2; 618 case 5: 619 return 4; 620 case 6: 621 return 8; 622 case 7: 623 return 16; 624 default: 625 return 0; 626 } 627 } 628 629 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 630 struct cfg80211_chan_def *def) 631 { 632 struct ieee80211_chanctx_conf *conf; 633 634 rcu_read_lock(); 635 conf = rcu_dereference(vif->chanctx_conf); 636 if (!conf) { 637 rcu_read_unlock(); 638 return -ENOENT; 639 } 640 641 *def = conf->def; 642 rcu_read_unlock(); 643 644 return 0; 645 } 646 647 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 648 struct ieee80211_chanctx_conf *conf, 649 void *data) 650 { 651 int *num = data; 652 653 (*num)++; 654 } 655 656 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 657 { 658 int num = 0; 659 660 ieee80211_iter_chan_contexts_atomic(ar->hw, 661 ath10k_mac_num_chanctxs_iter, 662 &num); 663 664 return num; 665 } 666 667 static void 668 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 669 struct ieee80211_chanctx_conf *conf, 670 void *data) 671 { 672 struct cfg80211_chan_def **def = data; 673 674 *def = &conf->def; 675 } 676 677 static int ath10k_peer_create(struct ath10k *ar, 678 struct ieee80211_vif *vif, 679 struct ieee80211_sta *sta, 680 u32 vdev_id, 681 const u8 *addr, 682 enum wmi_peer_type peer_type) 683 { 684 struct ath10k_vif *arvif; 685 struct ath10k_peer *peer; 686 int num_peers = 0; 687 int ret; 688 689 lockdep_assert_held(&ar->conf_mutex); 690 691 num_peers = ar->num_peers; 692 693 /* Each vdev consumes a peer entry as well */ 694 list_for_each_entry(arvif, &ar->arvifs, list) 695 num_peers++; 696 697 if (num_peers >= ar->max_num_peers) 698 return -ENOBUFS; 699 700 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 701 if (ret) { 702 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 703 addr, vdev_id, ret); 704 return ret; 705 } 706 707 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 708 if (ret) { 709 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 710 addr, vdev_id, ret); 711 return ret; 712 } 713 714 spin_lock_bh(&ar->data_lock); 715 716 peer = ath10k_peer_find(ar, vdev_id, addr); 717 if (!peer) { 718 spin_unlock_bh(&ar->data_lock); 719 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 720 addr, vdev_id); 721 ath10k_wmi_peer_delete(ar, vdev_id, addr); 722 return -ENOENT; 723 } 724 725 peer->vif = vif; 726 peer->sta = sta; 727 728 spin_unlock_bh(&ar->data_lock); 729 730 ar->num_peers++; 731 732 return 0; 733 } 734 735 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 736 { 737 struct ath10k *ar = arvif->ar; 738 u32 param; 739 int ret; 740 741 param = ar->wmi.pdev_param->sta_kickout_th; 742 ret = ath10k_wmi_pdev_set_param(ar, param, 743 ATH10K_KICKOUT_THRESHOLD); 744 if (ret) { 745 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 746 arvif->vdev_id, ret); 747 return ret; 748 } 749 750 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 751 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 752 ATH10K_KEEPALIVE_MIN_IDLE); 753 if (ret) { 754 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 755 arvif->vdev_id, ret); 756 return ret; 757 } 758 759 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 760 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 761 ATH10K_KEEPALIVE_MAX_IDLE); 762 if (ret) { 763 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 764 arvif->vdev_id, ret); 765 return ret; 766 } 767 768 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 769 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 770 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 771 if (ret) { 772 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 773 arvif->vdev_id, ret); 774 return ret; 775 } 776 777 return 0; 778 } 779 780 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 781 { 782 struct ath10k *ar = arvif->ar; 783 u32 vdev_param; 784 785 vdev_param = ar->wmi.vdev_param->rts_threshold; 786 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 787 } 788 789 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 790 { 791 int ret; 792 793 lockdep_assert_held(&ar->conf_mutex); 794 795 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 796 if (ret) 797 return ret; 798 799 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 800 if (ret) 801 return ret; 802 803 ar->num_peers--; 804 805 return 0; 806 } 807 808 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 809 { 810 struct ath10k_peer *peer, *tmp; 811 int peer_id; 812 int i; 813 814 lockdep_assert_held(&ar->conf_mutex); 815 816 spin_lock_bh(&ar->data_lock); 817 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 818 if (peer->vdev_id != vdev_id) 819 continue; 820 821 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 822 peer->addr, vdev_id); 823 824 for_each_set_bit(peer_id, peer->peer_ids, 825 ATH10K_MAX_NUM_PEER_IDS) { 826 ar->peer_map[peer_id] = NULL; 827 } 828 829 /* Double check that peer is properly un-referenced from 830 * the peer_map 831 */ 832 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 833 if (ar->peer_map[i] == peer) { 834 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 835 peer->addr, peer, i); 836 ar->peer_map[i] = NULL; 837 } 838 } 839 840 list_del(&peer->list); 841 kfree(peer); 842 ar->num_peers--; 843 } 844 spin_unlock_bh(&ar->data_lock); 845 } 846 847 static void ath10k_peer_cleanup_all(struct ath10k *ar) 848 { 849 struct ath10k_peer *peer, *tmp; 850 int i; 851 852 lockdep_assert_held(&ar->conf_mutex); 853 854 spin_lock_bh(&ar->data_lock); 855 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 856 list_del(&peer->list); 857 kfree(peer); 858 } 859 860 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 861 ar->peer_map[i] = NULL; 862 863 spin_unlock_bh(&ar->data_lock); 864 865 ar->num_peers = 0; 866 ar->num_stations = 0; 867 } 868 869 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 870 struct ieee80211_sta *sta, 871 enum wmi_tdls_peer_state state) 872 { 873 int ret; 874 struct wmi_tdls_peer_update_cmd_arg arg = {}; 875 struct wmi_tdls_peer_capab_arg cap = {}; 876 struct wmi_channel_arg chan_arg = {}; 877 878 lockdep_assert_held(&ar->conf_mutex); 879 880 arg.vdev_id = vdev_id; 881 arg.peer_state = state; 882 ether_addr_copy(arg.addr, sta->addr); 883 884 cap.peer_max_sp = sta->max_sp; 885 cap.peer_uapsd_queues = sta->uapsd_queues; 886 887 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 888 !sta->tdls_initiator) 889 cap.is_peer_responder = 1; 890 891 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 892 if (ret) { 893 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 894 arg.addr, vdev_id, ret); 895 return ret; 896 } 897 898 return 0; 899 } 900 901 /************************/ 902 /* Interface management */ 903 /************************/ 904 905 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 906 { 907 struct ath10k *ar = arvif->ar; 908 909 lockdep_assert_held(&ar->data_lock); 910 911 if (!arvif->beacon) 912 return; 913 914 if (!arvif->beacon_buf) 915 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 916 arvif->beacon->len, DMA_TO_DEVICE); 917 918 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 919 arvif->beacon_state != ATH10K_BEACON_SENT)) 920 return; 921 922 dev_kfree_skb_any(arvif->beacon); 923 924 arvif->beacon = NULL; 925 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 926 } 927 928 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 929 { 930 struct ath10k *ar = arvif->ar; 931 932 lockdep_assert_held(&ar->data_lock); 933 934 ath10k_mac_vif_beacon_free(arvif); 935 936 if (arvif->beacon_buf) { 937 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 938 arvif->beacon_buf, arvif->beacon_paddr); 939 arvif->beacon_buf = NULL; 940 } 941 } 942 943 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 944 { 945 unsigned long time_left; 946 947 lockdep_assert_held(&ar->conf_mutex); 948 949 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 950 return -ESHUTDOWN; 951 952 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 953 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 954 if (time_left == 0) 955 return -ETIMEDOUT; 956 957 return 0; 958 } 959 960 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 961 { 962 struct cfg80211_chan_def *chandef = NULL; 963 struct ieee80211_channel *channel = NULL; 964 struct wmi_vdev_start_request_arg arg = {}; 965 int ret = 0; 966 967 lockdep_assert_held(&ar->conf_mutex); 968 969 ieee80211_iter_chan_contexts_atomic(ar->hw, 970 ath10k_mac_get_any_chandef_iter, 971 &chandef); 972 if (WARN_ON_ONCE(!chandef)) 973 return -ENOENT; 974 975 channel = chandef->chan; 976 977 arg.vdev_id = vdev_id; 978 arg.channel.freq = channel->center_freq; 979 arg.channel.band_center_freq1 = chandef->center_freq1; 980 arg.channel.band_center_freq2 = chandef->center_freq2; 981 982 /* TODO setup this dynamically, what in case we 983 * don't have any vifs? 984 */ 985 arg.channel.mode = chan_to_phymode(chandef); 986 arg.channel.chan_radar = 987 !!(channel->flags & IEEE80211_CHAN_RADAR); 988 989 arg.channel.min_power = 0; 990 arg.channel.max_power = channel->max_power * 2; 991 arg.channel.max_reg_power = channel->max_reg_power * 2; 992 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 993 994 reinit_completion(&ar->vdev_setup_done); 995 996 ret = ath10k_wmi_vdev_start(ar, &arg); 997 if (ret) { 998 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 999 vdev_id, ret); 1000 return ret; 1001 } 1002 1003 ret = ath10k_vdev_setup_sync(ar); 1004 if (ret) { 1005 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1006 vdev_id, ret); 1007 return ret; 1008 } 1009 1010 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1011 if (ret) { 1012 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1013 vdev_id, ret); 1014 goto vdev_stop; 1015 } 1016 1017 ar->monitor_vdev_id = vdev_id; 1018 1019 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1020 ar->monitor_vdev_id); 1021 return 0; 1022 1023 vdev_stop: 1024 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1025 if (ret) 1026 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1027 ar->monitor_vdev_id, ret); 1028 1029 return ret; 1030 } 1031 1032 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1033 { 1034 int ret = 0; 1035 1036 lockdep_assert_held(&ar->conf_mutex); 1037 1038 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1039 if (ret) 1040 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1041 ar->monitor_vdev_id, ret); 1042 1043 reinit_completion(&ar->vdev_setup_done); 1044 1045 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1046 if (ret) 1047 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1048 ar->monitor_vdev_id, ret); 1049 1050 ret = ath10k_vdev_setup_sync(ar); 1051 if (ret) 1052 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1053 ar->monitor_vdev_id, ret); 1054 1055 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1056 ar->monitor_vdev_id); 1057 return ret; 1058 } 1059 1060 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1061 { 1062 int bit, ret = 0; 1063 1064 lockdep_assert_held(&ar->conf_mutex); 1065 1066 if (ar->free_vdev_map == 0) { 1067 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1068 return -ENOMEM; 1069 } 1070 1071 bit = __ffs64(ar->free_vdev_map); 1072 1073 ar->monitor_vdev_id = bit; 1074 1075 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1076 WMI_VDEV_TYPE_MONITOR, 1077 0, ar->mac_addr); 1078 if (ret) { 1079 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1080 ar->monitor_vdev_id, ret); 1081 return ret; 1082 } 1083 1084 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1085 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1086 ar->monitor_vdev_id); 1087 1088 return 0; 1089 } 1090 1091 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1092 { 1093 int ret = 0; 1094 1095 lockdep_assert_held(&ar->conf_mutex); 1096 1097 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1098 if (ret) { 1099 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1100 ar->monitor_vdev_id, ret); 1101 return ret; 1102 } 1103 1104 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1105 1106 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1107 ar->monitor_vdev_id); 1108 return ret; 1109 } 1110 1111 static int ath10k_monitor_start(struct ath10k *ar) 1112 { 1113 int ret; 1114 1115 lockdep_assert_held(&ar->conf_mutex); 1116 1117 ret = ath10k_monitor_vdev_create(ar); 1118 if (ret) { 1119 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1120 return ret; 1121 } 1122 1123 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1124 if (ret) { 1125 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1126 ath10k_monitor_vdev_delete(ar); 1127 return ret; 1128 } 1129 1130 ar->monitor_started = true; 1131 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1132 1133 return 0; 1134 } 1135 1136 static int ath10k_monitor_stop(struct ath10k *ar) 1137 { 1138 int ret; 1139 1140 lockdep_assert_held(&ar->conf_mutex); 1141 1142 ret = ath10k_monitor_vdev_stop(ar); 1143 if (ret) { 1144 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1145 return ret; 1146 } 1147 1148 ret = ath10k_monitor_vdev_delete(ar); 1149 if (ret) { 1150 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1151 return ret; 1152 } 1153 1154 ar->monitor_started = false; 1155 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1156 1157 return 0; 1158 } 1159 1160 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1161 { 1162 int num_ctx; 1163 1164 /* At least one chanctx is required to derive a channel to start 1165 * monitor vdev on. 1166 */ 1167 num_ctx = ath10k_mac_num_chanctxs(ar); 1168 if (num_ctx == 0) 1169 return false; 1170 1171 /* If there's already an existing special monitor interface then don't 1172 * bother creating another monitor vdev. 1173 */ 1174 if (ar->monitor_arvif) 1175 return false; 1176 1177 return ar->monitor || 1178 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1179 ar->running_fw->fw_file.fw_features) && 1180 (ar->filter_flags & FIF_OTHER_BSS)) || 1181 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1182 } 1183 1184 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1185 { 1186 int num_ctx; 1187 1188 num_ctx = ath10k_mac_num_chanctxs(ar); 1189 1190 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1191 * shouldn't allow this but make sure to prevent handling the following 1192 * case anyway since multi-channel DFS hasn't been tested at all. 1193 */ 1194 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1195 return false; 1196 1197 return true; 1198 } 1199 1200 static int ath10k_monitor_recalc(struct ath10k *ar) 1201 { 1202 bool needed; 1203 bool allowed; 1204 int ret; 1205 1206 lockdep_assert_held(&ar->conf_mutex); 1207 1208 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1209 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1210 1211 ath10k_dbg(ar, ATH10K_DBG_MAC, 1212 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1213 ar->monitor_started, needed, allowed); 1214 1215 if (WARN_ON(needed && !allowed)) { 1216 if (ar->monitor_started) { 1217 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1218 1219 ret = ath10k_monitor_stop(ar); 1220 if (ret) 1221 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1222 ret); 1223 /* not serious */ 1224 } 1225 1226 return -EPERM; 1227 } 1228 1229 if (needed == ar->monitor_started) 1230 return 0; 1231 1232 if (needed) 1233 return ath10k_monitor_start(ar); 1234 else 1235 return ath10k_monitor_stop(ar); 1236 } 1237 1238 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1239 { 1240 struct ath10k *ar = arvif->ar; 1241 1242 lockdep_assert_held(&ar->conf_mutex); 1243 1244 if (!arvif->is_started) { 1245 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1246 return false; 1247 } 1248 1249 return true; 1250 } 1251 1252 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1253 { 1254 struct ath10k *ar = arvif->ar; 1255 u32 vdev_param; 1256 1257 lockdep_assert_held(&ar->conf_mutex); 1258 1259 vdev_param = ar->wmi.vdev_param->protection_mode; 1260 1261 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1262 arvif->vdev_id, arvif->use_cts_prot); 1263 1264 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1265 arvif->use_cts_prot ? 1 : 0); 1266 } 1267 1268 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1269 { 1270 struct ath10k *ar = arvif->ar; 1271 u32 vdev_param, rts_cts = 0; 1272 1273 lockdep_assert_held(&ar->conf_mutex); 1274 1275 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1276 1277 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1278 1279 if (arvif->num_legacy_stations > 0) 1280 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1281 WMI_RTSCTS_PROFILE); 1282 else 1283 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1284 WMI_RTSCTS_PROFILE); 1285 1286 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1287 arvif->vdev_id, rts_cts); 1288 1289 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1290 rts_cts); 1291 } 1292 1293 static int ath10k_start_cac(struct ath10k *ar) 1294 { 1295 int ret; 1296 1297 lockdep_assert_held(&ar->conf_mutex); 1298 1299 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1300 1301 ret = ath10k_monitor_recalc(ar); 1302 if (ret) { 1303 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1304 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1305 return ret; 1306 } 1307 1308 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1309 ar->monitor_vdev_id); 1310 1311 return 0; 1312 } 1313 1314 static int ath10k_stop_cac(struct ath10k *ar) 1315 { 1316 lockdep_assert_held(&ar->conf_mutex); 1317 1318 /* CAC is not running - do nothing */ 1319 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1320 return 0; 1321 1322 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1323 ath10k_monitor_stop(ar); 1324 1325 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1326 1327 return 0; 1328 } 1329 1330 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1331 struct ieee80211_chanctx_conf *conf, 1332 void *data) 1333 { 1334 bool *ret = data; 1335 1336 if (!*ret && conf->radar_enabled) 1337 *ret = true; 1338 } 1339 1340 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1341 { 1342 bool has_radar = false; 1343 1344 ieee80211_iter_chan_contexts_atomic(ar->hw, 1345 ath10k_mac_has_radar_iter, 1346 &has_radar); 1347 1348 return has_radar; 1349 } 1350 1351 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1352 { 1353 int ret; 1354 1355 lockdep_assert_held(&ar->conf_mutex); 1356 1357 ath10k_stop_cac(ar); 1358 1359 if (!ath10k_mac_has_radar_enabled(ar)) 1360 return; 1361 1362 if (ar->num_started_vdevs > 0) 1363 return; 1364 1365 ret = ath10k_start_cac(ar); 1366 if (ret) { 1367 /* 1368 * Not possible to start CAC on current channel so starting 1369 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1370 * by indicating that radar was detected. 1371 */ 1372 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1373 ieee80211_radar_detected(ar->hw); 1374 } 1375 } 1376 1377 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1378 { 1379 struct ath10k *ar = arvif->ar; 1380 int ret; 1381 1382 lockdep_assert_held(&ar->conf_mutex); 1383 1384 reinit_completion(&ar->vdev_setup_done); 1385 1386 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1387 if (ret) { 1388 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1389 arvif->vdev_id, ret); 1390 return ret; 1391 } 1392 1393 ret = ath10k_vdev_setup_sync(ar); 1394 if (ret) { 1395 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", 1396 arvif->vdev_id, ret); 1397 return ret; 1398 } 1399 1400 WARN_ON(ar->num_started_vdevs == 0); 1401 1402 if (ar->num_started_vdevs != 0) { 1403 ar->num_started_vdevs--; 1404 ath10k_recalc_radar_detection(ar); 1405 } 1406 1407 return ret; 1408 } 1409 1410 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1411 const struct cfg80211_chan_def *chandef, 1412 bool restart) 1413 { 1414 struct ath10k *ar = arvif->ar; 1415 struct wmi_vdev_start_request_arg arg = {}; 1416 int ret = 0; 1417 1418 lockdep_assert_held(&ar->conf_mutex); 1419 1420 reinit_completion(&ar->vdev_setup_done); 1421 1422 arg.vdev_id = arvif->vdev_id; 1423 arg.dtim_period = arvif->dtim_period; 1424 arg.bcn_intval = arvif->beacon_interval; 1425 1426 arg.channel.freq = chandef->chan->center_freq; 1427 arg.channel.band_center_freq1 = chandef->center_freq1; 1428 arg.channel.band_center_freq2 = chandef->center_freq2; 1429 arg.channel.mode = chan_to_phymode(chandef); 1430 1431 arg.channel.min_power = 0; 1432 arg.channel.max_power = chandef->chan->max_power * 2; 1433 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1434 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1435 1436 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1437 arg.ssid = arvif->u.ap.ssid; 1438 arg.ssid_len = arvif->u.ap.ssid_len; 1439 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1440 1441 /* For now allow DFS for AP mode */ 1442 arg.channel.chan_radar = 1443 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1444 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1445 arg.ssid = arvif->vif->bss_conf.ssid; 1446 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1447 } 1448 1449 ath10k_dbg(ar, ATH10K_DBG_MAC, 1450 "mac vdev %d start center_freq %d phymode %s\n", 1451 arg.vdev_id, arg.channel.freq, 1452 ath10k_wmi_phymode_str(arg.channel.mode)); 1453 1454 if (restart) 1455 ret = ath10k_wmi_vdev_restart(ar, &arg); 1456 else 1457 ret = ath10k_wmi_vdev_start(ar, &arg); 1458 1459 if (ret) { 1460 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1461 arg.vdev_id, ret); 1462 return ret; 1463 } 1464 1465 ret = ath10k_vdev_setup_sync(ar); 1466 if (ret) { 1467 ath10k_warn(ar, 1468 "failed to synchronize setup for vdev %i restart %d: %d\n", 1469 arg.vdev_id, restart, ret); 1470 return ret; 1471 } 1472 1473 ar->num_started_vdevs++; 1474 ath10k_recalc_radar_detection(ar); 1475 1476 return ret; 1477 } 1478 1479 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1480 const struct cfg80211_chan_def *def) 1481 { 1482 return ath10k_vdev_start_restart(arvif, def, false); 1483 } 1484 1485 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1486 const struct cfg80211_chan_def *def) 1487 { 1488 return ath10k_vdev_start_restart(arvif, def, true); 1489 } 1490 1491 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1492 struct sk_buff *bcn) 1493 { 1494 struct ath10k *ar = arvif->ar; 1495 struct ieee80211_mgmt *mgmt; 1496 const u8 *p2p_ie; 1497 int ret; 1498 1499 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1500 return 0; 1501 1502 mgmt = (void *)bcn->data; 1503 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1504 mgmt->u.beacon.variable, 1505 bcn->len - (mgmt->u.beacon.variable - 1506 bcn->data)); 1507 if (!p2p_ie) 1508 return -ENOENT; 1509 1510 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1511 if (ret) { 1512 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1513 arvif->vdev_id, ret); 1514 return ret; 1515 } 1516 1517 return 0; 1518 } 1519 1520 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1521 u8 oui_type, size_t ie_offset) 1522 { 1523 size_t len; 1524 const u8 *next; 1525 const u8 *end; 1526 u8 *ie; 1527 1528 if (WARN_ON(skb->len < ie_offset)) 1529 return -EINVAL; 1530 1531 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1532 skb->data + ie_offset, 1533 skb->len - ie_offset); 1534 if (!ie) 1535 return -ENOENT; 1536 1537 len = ie[1] + 2; 1538 end = skb->data + skb->len; 1539 next = ie + len; 1540 1541 if (WARN_ON(next > end)) 1542 return -EINVAL; 1543 1544 memmove(ie, next, end - next); 1545 skb_trim(skb, skb->len - len); 1546 1547 return 0; 1548 } 1549 1550 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1551 { 1552 struct ath10k *ar = arvif->ar; 1553 struct ieee80211_hw *hw = ar->hw; 1554 struct ieee80211_vif *vif = arvif->vif; 1555 struct ieee80211_mutable_offsets offs = {}; 1556 struct sk_buff *bcn; 1557 int ret; 1558 1559 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1560 return 0; 1561 1562 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1563 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1564 return 0; 1565 1566 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1567 if (!bcn) { 1568 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1569 return -EPERM; 1570 } 1571 1572 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1573 if (ret) { 1574 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1575 kfree_skb(bcn); 1576 return ret; 1577 } 1578 1579 /* P2P IE is inserted by firmware automatically (as configured above) 1580 * so remove it from the base beacon template to avoid duplicate P2P 1581 * IEs in beacon frames. 1582 */ 1583 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1584 offsetof(struct ieee80211_mgmt, 1585 u.beacon.variable)); 1586 1587 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1588 0, NULL, 0); 1589 kfree_skb(bcn); 1590 1591 if (ret) { 1592 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1593 ret); 1594 return ret; 1595 } 1596 1597 return 0; 1598 } 1599 1600 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1601 { 1602 struct ath10k *ar = arvif->ar; 1603 struct ieee80211_hw *hw = ar->hw; 1604 struct ieee80211_vif *vif = arvif->vif; 1605 struct sk_buff *prb; 1606 int ret; 1607 1608 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1609 return 0; 1610 1611 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1612 return 0; 1613 1614 prb = ieee80211_proberesp_get(hw, vif); 1615 if (!prb) { 1616 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1617 return -EPERM; 1618 } 1619 1620 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1621 kfree_skb(prb); 1622 1623 if (ret) { 1624 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1625 ret); 1626 return ret; 1627 } 1628 1629 return 0; 1630 } 1631 1632 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1633 { 1634 struct ath10k *ar = arvif->ar; 1635 struct cfg80211_chan_def def; 1636 int ret; 1637 1638 /* When originally vdev is started during assign_vif_chanctx() some 1639 * information is missing, notably SSID. Firmware revisions with beacon 1640 * offloading require the SSID to be provided during vdev (re)start to 1641 * handle hidden SSID properly. 1642 * 1643 * Vdev restart must be done after vdev has been both started and 1644 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1645 * deliver vdev restart response event causing timeouts during vdev 1646 * syncing in ath10k. 1647 * 1648 * Note: The vdev down/up and template reinstallation could be skipped 1649 * since only wmi-tlv firmware are known to have beacon offload and 1650 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1651 * response delivery. It's probably more robust to keep it as is. 1652 */ 1653 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1654 return 0; 1655 1656 if (WARN_ON(!arvif->is_started)) 1657 return -EINVAL; 1658 1659 if (WARN_ON(!arvif->is_up)) 1660 return -EINVAL; 1661 1662 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1663 return -EINVAL; 1664 1665 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1666 if (ret) { 1667 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1668 arvif->vdev_id, ret); 1669 return ret; 1670 } 1671 1672 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1673 * firmware will crash upon vdev up. 1674 */ 1675 1676 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1677 if (ret) { 1678 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1679 return ret; 1680 } 1681 1682 ret = ath10k_mac_setup_prb_tmpl(arvif); 1683 if (ret) { 1684 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1685 return ret; 1686 } 1687 1688 ret = ath10k_vdev_restart(arvif, &def); 1689 if (ret) { 1690 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1691 arvif->vdev_id, ret); 1692 return ret; 1693 } 1694 1695 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1696 arvif->bssid); 1697 if (ret) { 1698 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1699 arvif->vdev_id, ret); 1700 return ret; 1701 } 1702 1703 return 0; 1704 } 1705 1706 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1707 struct ieee80211_bss_conf *info) 1708 { 1709 struct ath10k *ar = arvif->ar; 1710 int ret = 0; 1711 1712 lockdep_assert_held(&arvif->ar->conf_mutex); 1713 1714 if (!info->enable_beacon) { 1715 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1716 if (ret) 1717 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1718 arvif->vdev_id, ret); 1719 1720 arvif->is_up = false; 1721 1722 spin_lock_bh(&arvif->ar->data_lock); 1723 ath10k_mac_vif_beacon_free(arvif); 1724 spin_unlock_bh(&arvif->ar->data_lock); 1725 1726 return; 1727 } 1728 1729 arvif->tx_seq_no = 0x1000; 1730 1731 arvif->aid = 0; 1732 ether_addr_copy(arvif->bssid, info->bssid); 1733 1734 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1735 arvif->bssid); 1736 if (ret) { 1737 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1738 arvif->vdev_id, ret); 1739 return; 1740 } 1741 1742 arvif->is_up = true; 1743 1744 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1745 if (ret) { 1746 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1747 arvif->vdev_id, ret); 1748 return; 1749 } 1750 1751 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1752 } 1753 1754 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1755 struct ieee80211_bss_conf *info, 1756 const u8 self_peer[ETH_ALEN]) 1757 { 1758 struct ath10k *ar = arvif->ar; 1759 u32 vdev_param; 1760 int ret = 0; 1761 1762 lockdep_assert_held(&arvif->ar->conf_mutex); 1763 1764 if (!info->ibss_joined) { 1765 if (is_zero_ether_addr(arvif->bssid)) 1766 return; 1767 1768 eth_zero_addr(arvif->bssid); 1769 1770 return; 1771 } 1772 1773 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1774 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1775 ATH10K_DEFAULT_ATIM); 1776 if (ret) 1777 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1778 arvif->vdev_id, ret); 1779 } 1780 1781 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1782 { 1783 struct ath10k *ar = arvif->ar; 1784 u32 param; 1785 u32 value; 1786 int ret; 1787 1788 lockdep_assert_held(&arvif->ar->conf_mutex); 1789 1790 if (arvif->u.sta.uapsd) 1791 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1792 else 1793 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1794 1795 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1796 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1797 if (ret) { 1798 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1799 value, arvif->vdev_id, ret); 1800 return ret; 1801 } 1802 1803 return 0; 1804 } 1805 1806 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1807 { 1808 struct ath10k *ar = arvif->ar; 1809 u32 param; 1810 u32 value; 1811 int ret; 1812 1813 lockdep_assert_held(&arvif->ar->conf_mutex); 1814 1815 if (arvif->u.sta.uapsd) 1816 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1817 else 1818 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1819 1820 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1821 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1822 param, value); 1823 if (ret) { 1824 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1825 value, arvif->vdev_id, ret); 1826 return ret; 1827 } 1828 1829 return 0; 1830 } 1831 1832 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1833 { 1834 struct ath10k_vif *arvif; 1835 int num = 0; 1836 1837 lockdep_assert_held(&ar->conf_mutex); 1838 1839 list_for_each_entry(arvif, &ar->arvifs, list) 1840 if (arvif->is_started) 1841 num++; 1842 1843 return num; 1844 } 1845 1846 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1847 { 1848 struct ath10k *ar = arvif->ar; 1849 struct ieee80211_vif *vif = arvif->vif; 1850 struct ieee80211_conf *conf = &ar->hw->conf; 1851 enum wmi_sta_powersave_param param; 1852 enum wmi_sta_ps_mode psmode; 1853 int ret; 1854 int ps_timeout; 1855 bool enable_ps; 1856 1857 lockdep_assert_held(&arvif->ar->conf_mutex); 1858 1859 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1860 return 0; 1861 1862 enable_ps = arvif->ps; 1863 1864 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1865 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1866 ar->running_fw->fw_file.fw_features)) { 1867 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1868 arvif->vdev_id); 1869 enable_ps = false; 1870 } 1871 1872 if (!arvif->is_started) { 1873 /* mac80211 can update vif powersave state while disconnected. 1874 * Firmware doesn't behave nicely and consumes more power than 1875 * necessary if PS is disabled on a non-started vdev. Hence 1876 * force-enable PS for non-running vdevs. 1877 */ 1878 psmode = WMI_STA_PS_MODE_ENABLED; 1879 } else if (enable_ps) { 1880 psmode = WMI_STA_PS_MODE_ENABLED; 1881 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1882 1883 ps_timeout = conf->dynamic_ps_timeout; 1884 if (ps_timeout == 0) { 1885 /* Firmware doesn't like 0 */ 1886 ps_timeout = ieee80211_tu_to_usec( 1887 vif->bss_conf.beacon_int) / 1000; 1888 } 1889 1890 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1891 ps_timeout); 1892 if (ret) { 1893 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1894 arvif->vdev_id, ret); 1895 return ret; 1896 } 1897 } else { 1898 psmode = WMI_STA_PS_MODE_DISABLED; 1899 } 1900 1901 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1902 arvif->vdev_id, psmode ? "enable" : "disable"); 1903 1904 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1905 if (ret) { 1906 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1907 psmode, arvif->vdev_id, ret); 1908 return ret; 1909 } 1910 1911 return 0; 1912 } 1913 1914 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1915 { 1916 struct ath10k *ar = arvif->ar; 1917 struct wmi_sta_keepalive_arg arg = {}; 1918 int ret; 1919 1920 lockdep_assert_held(&arvif->ar->conf_mutex); 1921 1922 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1923 return 0; 1924 1925 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1926 return 0; 1927 1928 /* Some firmware revisions have a bug and ignore the `enabled` field. 1929 * Instead use the interval to disable the keepalive. 1930 */ 1931 arg.vdev_id = arvif->vdev_id; 1932 arg.enabled = 1; 1933 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1934 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1935 1936 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1937 if (ret) { 1938 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1939 arvif->vdev_id, ret); 1940 return ret; 1941 } 1942 1943 return 0; 1944 } 1945 1946 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1947 { 1948 struct ath10k *ar = arvif->ar; 1949 struct ieee80211_vif *vif = arvif->vif; 1950 int ret; 1951 1952 lockdep_assert_held(&arvif->ar->conf_mutex); 1953 1954 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1955 return; 1956 1957 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1958 return; 1959 1960 if (!vif->csa_active) 1961 return; 1962 1963 if (!arvif->is_up) 1964 return; 1965 1966 if (!ieee80211_csa_is_complete(vif)) { 1967 ieee80211_csa_update_counter(vif); 1968 1969 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1970 if (ret) 1971 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1972 ret); 1973 1974 ret = ath10k_mac_setup_prb_tmpl(arvif); 1975 if (ret) 1976 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1977 ret); 1978 } else { 1979 ieee80211_csa_finish(vif); 1980 } 1981 } 1982 1983 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1984 { 1985 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1986 ap_csa_work); 1987 struct ath10k *ar = arvif->ar; 1988 1989 mutex_lock(&ar->conf_mutex); 1990 ath10k_mac_vif_ap_csa_count_down(arvif); 1991 mutex_unlock(&ar->conf_mutex); 1992 } 1993 1994 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 1995 struct ieee80211_vif *vif) 1996 { 1997 struct sk_buff *skb = data; 1998 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1999 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2000 2001 if (vif->type != NL80211_IFTYPE_STATION) 2002 return; 2003 2004 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2005 return; 2006 2007 cancel_delayed_work(&arvif->connection_loss_work); 2008 } 2009 2010 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2011 { 2012 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2013 IEEE80211_IFACE_ITER_NORMAL, 2014 ath10k_mac_handle_beacon_iter, 2015 skb); 2016 } 2017 2018 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2019 struct ieee80211_vif *vif) 2020 { 2021 u32 *vdev_id = data; 2022 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2023 struct ath10k *ar = arvif->ar; 2024 struct ieee80211_hw *hw = ar->hw; 2025 2026 if (arvif->vdev_id != *vdev_id) 2027 return; 2028 2029 if (!arvif->is_up) 2030 return; 2031 2032 ieee80211_beacon_loss(vif); 2033 2034 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2035 * (done by mac80211) succeeds but beacons do not resume then it 2036 * doesn't make sense to continue operation. Queue connection loss work 2037 * which can be cancelled when beacon is received. 2038 */ 2039 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2040 ATH10K_CONNECTION_LOSS_HZ); 2041 } 2042 2043 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2044 { 2045 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2046 IEEE80211_IFACE_ITER_NORMAL, 2047 ath10k_mac_handle_beacon_miss_iter, 2048 &vdev_id); 2049 } 2050 2051 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2052 { 2053 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2054 connection_loss_work.work); 2055 struct ieee80211_vif *vif = arvif->vif; 2056 2057 if (!arvif->is_up) 2058 return; 2059 2060 ieee80211_connection_loss(vif); 2061 } 2062 2063 /**********************/ 2064 /* Station management */ 2065 /**********************/ 2066 2067 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2068 struct ieee80211_vif *vif) 2069 { 2070 /* Some firmware revisions have unstable STA powersave when listen 2071 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2072 * generate NullFunc frames properly even if buffered frames have been 2073 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2074 * buffered frames. Often pinging the device from AP would simply fail. 2075 * 2076 * As a workaround set it to 1. 2077 */ 2078 if (vif->type == NL80211_IFTYPE_STATION) 2079 return 1; 2080 2081 return ar->hw->conf.listen_interval; 2082 } 2083 2084 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2085 struct ieee80211_vif *vif, 2086 struct ieee80211_sta *sta, 2087 struct wmi_peer_assoc_complete_arg *arg) 2088 { 2089 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2090 u32 aid; 2091 2092 lockdep_assert_held(&ar->conf_mutex); 2093 2094 if (vif->type == NL80211_IFTYPE_STATION) 2095 aid = vif->bss_conf.aid; 2096 else 2097 aid = sta->aid; 2098 2099 ether_addr_copy(arg->addr, sta->addr); 2100 arg->vdev_id = arvif->vdev_id; 2101 arg->peer_aid = aid; 2102 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2103 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2104 arg->peer_num_spatial_streams = 1; 2105 arg->peer_caps = vif->bss_conf.assoc_capability; 2106 } 2107 2108 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2109 struct ieee80211_vif *vif, 2110 struct ieee80211_sta *sta, 2111 struct wmi_peer_assoc_complete_arg *arg) 2112 { 2113 struct ieee80211_bss_conf *info = &vif->bss_conf; 2114 struct cfg80211_chan_def def; 2115 struct cfg80211_bss *bss; 2116 const u8 *rsnie = NULL; 2117 const u8 *wpaie = NULL; 2118 2119 lockdep_assert_held(&ar->conf_mutex); 2120 2121 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2122 return; 2123 2124 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2125 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2126 if (bss) { 2127 const struct cfg80211_bss_ies *ies; 2128 2129 rcu_read_lock(); 2130 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2131 2132 ies = rcu_dereference(bss->ies); 2133 2134 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2135 WLAN_OUI_TYPE_MICROSOFT_WPA, 2136 ies->data, 2137 ies->len); 2138 rcu_read_unlock(); 2139 cfg80211_put_bss(ar->hw->wiphy, bss); 2140 } 2141 2142 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2143 if (rsnie || wpaie) { 2144 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2145 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2146 } 2147 2148 if (wpaie) { 2149 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2150 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2151 } 2152 2153 if (sta->mfp && 2154 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2155 ar->running_fw->fw_file.fw_features)) { 2156 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2157 } 2158 } 2159 2160 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2161 struct ieee80211_vif *vif, 2162 struct ieee80211_sta *sta, 2163 struct wmi_peer_assoc_complete_arg *arg) 2164 { 2165 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2166 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2167 struct cfg80211_chan_def def; 2168 const struct ieee80211_supported_band *sband; 2169 const struct ieee80211_rate *rates; 2170 enum nl80211_band band; 2171 u32 ratemask; 2172 u8 rate; 2173 int i; 2174 2175 lockdep_assert_held(&ar->conf_mutex); 2176 2177 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2178 return; 2179 2180 band = def.chan->band; 2181 sband = ar->hw->wiphy->bands[band]; 2182 ratemask = sta->supp_rates[band]; 2183 ratemask &= arvif->bitrate_mask.control[band].legacy; 2184 rates = sband->bitrates; 2185 2186 rateset->num_rates = 0; 2187 2188 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2189 if (!(ratemask & 1)) 2190 continue; 2191 2192 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2193 rateset->rates[rateset->num_rates] = rate; 2194 rateset->num_rates++; 2195 } 2196 } 2197 2198 static bool 2199 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2200 { 2201 int nss; 2202 2203 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2204 if (ht_mcs_mask[nss]) 2205 return false; 2206 2207 return true; 2208 } 2209 2210 static bool 2211 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2212 { 2213 int nss; 2214 2215 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2216 if (vht_mcs_mask[nss]) 2217 return false; 2218 2219 return true; 2220 } 2221 2222 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2223 struct ieee80211_vif *vif, 2224 struct ieee80211_sta *sta, 2225 struct wmi_peer_assoc_complete_arg *arg) 2226 { 2227 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2228 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2229 struct cfg80211_chan_def def; 2230 enum nl80211_band band; 2231 const u8 *ht_mcs_mask; 2232 const u16 *vht_mcs_mask; 2233 int i, n; 2234 u8 max_nss; 2235 u32 stbc; 2236 2237 lockdep_assert_held(&ar->conf_mutex); 2238 2239 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2240 return; 2241 2242 if (!ht_cap->ht_supported) 2243 return; 2244 2245 band = def.chan->band; 2246 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2247 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2248 2249 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2250 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2251 return; 2252 2253 arg->peer_flags |= ar->wmi.peer_flags->ht; 2254 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2255 ht_cap->ampdu_factor)) - 1; 2256 2257 arg->peer_mpdu_density = 2258 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2259 2260 arg->peer_ht_caps = ht_cap->cap; 2261 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2262 2263 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2264 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2265 2266 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2267 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2268 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2269 } 2270 2271 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2272 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2273 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2274 2275 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2276 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2277 } 2278 2279 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2280 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2281 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2282 } 2283 2284 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2285 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2286 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2287 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2288 arg->peer_rate_caps |= stbc; 2289 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2290 } 2291 2292 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2293 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2294 else if (ht_cap->mcs.rx_mask[1]) 2295 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2296 2297 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2298 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2299 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2300 max_nss = (i / 8) + 1; 2301 arg->peer_ht_rates.rates[n++] = i; 2302 } 2303 2304 /* 2305 * This is a workaround for HT-enabled STAs which break the spec 2306 * and have no HT capabilities RX mask (no HT RX MCS map). 2307 * 2308 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2309 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2310 * 2311 * Firmware asserts if such situation occurs. 2312 */ 2313 if (n == 0) { 2314 arg->peer_ht_rates.num_rates = 8; 2315 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2316 arg->peer_ht_rates.rates[i] = i; 2317 } else { 2318 arg->peer_ht_rates.num_rates = n; 2319 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2320 } 2321 2322 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2323 arg->addr, 2324 arg->peer_ht_rates.num_rates, 2325 arg->peer_num_spatial_streams); 2326 } 2327 2328 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2329 struct ath10k_vif *arvif, 2330 struct ieee80211_sta *sta) 2331 { 2332 u32 uapsd = 0; 2333 u32 max_sp = 0; 2334 int ret = 0; 2335 2336 lockdep_assert_held(&ar->conf_mutex); 2337 2338 if (sta->wme && sta->uapsd_queues) { 2339 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2340 sta->uapsd_queues, sta->max_sp); 2341 2342 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2343 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2344 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2345 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2346 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2347 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2348 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2349 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2350 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2351 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2352 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2353 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2354 2355 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2356 max_sp = sta->max_sp; 2357 2358 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2359 sta->addr, 2360 WMI_AP_PS_PEER_PARAM_UAPSD, 2361 uapsd); 2362 if (ret) { 2363 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2364 arvif->vdev_id, ret); 2365 return ret; 2366 } 2367 2368 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2369 sta->addr, 2370 WMI_AP_PS_PEER_PARAM_MAX_SP, 2371 max_sp); 2372 if (ret) { 2373 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2374 arvif->vdev_id, ret); 2375 return ret; 2376 } 2377 2378 /* TODO setup this based on STA listen interval and 2379 * beacon interval. Currently we don't know 2380 * sta->listen_interval - mac80211 patch required. 2381 * Currently use 10 seconds 2382 */ 2383 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2384 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2385 10); 2386 if (ret) { 2387 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2388 arvif->vdev_id, ret); 2389 return ret; 2390 } 2391 } 2392 2393 return 0; 2394 } 2395 2396 static u16 2397 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2398 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2399 { 2400 int idx_limit; 2401 int nss; 2402 u16 mcs_map; 2403 u16 mcs; 2404 2405 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2406 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2407 vht_mcs_limit[nss]; 2408 2409 if (mcs_map) 2410 idx_limit = fls(mcs_map) - 1; 2411 else 2412 idx_limit = -1; 2413 2414 switch (idx_limit) { 2415 case 0: /* fall through */ 2416 case 1: /* fall through */ 2417 case 2: /* fall through */ 2418 case 3: /* fall through */ 2419 case 4: /* fall through */ 2420 case 5: /* fall through */ 2421 case 6: /* fall through */ 2422 default: 2423 /* see ath10k_mac_can_set_bitrate_mask() */ 2424 WARN_ON(1); 2425 /* fall through */ 2426 case -1: 2427 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2428 break; 2429 case 7: 2430 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2431 break; 2432 case 8: 2433 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2434 break; 2435 case 9: 2436 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2437 break; 2438 } 2439 2440 tx_mcs_set &= ~(0x3 << (nss * 2)); 2441 tx_mcs_set |= mcs << (nss * 2); 2442 } 2443 2444 return tx_mcs_set; 2445 } 2446 2447 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2448 struct ieee80211_vif *vif, 2449 struct ieee80211_sta *sta, 2450 struct wmi_peer_assoc_complete_arg *arg) 2451 { 2452 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2453 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2454 struct cfg80211_chan_def def; 2455 enum nl80211_band band; 2456 const u16 *vht_mcs_mask; 2457 u8 ampdu_factor; 2458 u8 max_nss, vht_mcs; 2459 int i; 2460 2461 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2462 return; 2463 2464 if (!vht_cap->vht_supported) 2465 return; 2466 2467 band = def.chan->band; 2468 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2469 2470 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2471 return; 2472 2473 arg->peer_flags |= ar->wmi.peer_flags->vht; 2474 2475 if (def.chan->band == NL80211_BAND_2GHZ) 2476 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2477 2478 arg->peer_vht_caps = vht_cap->cap; 2479 2480 ampdu_factor = (vht_cap->cap & 2481 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2482 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2483 2484 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2485 * zero in VHT IE. Using it would result in degraded throughput. 2486 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2487 * it if VHT max_mpdu is smaller. 2488 */ 2489 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2490 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2491 ampdu_factor)) - 1); 2492 2493 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2494 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2495 2496 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2497 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2498 2499 /* Calculate peer NSS capability from VHT capabilities if STA 2500 * supports VHT. 2501 */ 2502 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2503 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2504 (2 * i) & 3; 2505 2506 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2507 vht_mcs_mask[i]) 2508 max_nss = i + 1; 2509 } 2510 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2511 arg->peer_vht_rates.rx_max_rate = 2512 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2513 arg->peer_vht_rates.rx_mcs_set = 2514 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2515 arg->peer_vht_rates.tx_max_rate = 2516 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2517 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2518 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2519 2520 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2521 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2522 } 2523 2524 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2525 struct ieee80211_vif *vif, 2526 struct ieee80211_sta *sta, 2527 struct wmi_peer_assoc_complete_arg *arg) 2528 { 2529 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2530 2531 switch (arvif->vdev_type) { 2532 case WMI_VDEV_TYPE_AP: 2533 if (sta->wme) 2534 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2535 2536 if (sta->wme && sta->uapsd_queues) { 2537 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2538 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2539 } 2540 break; 2541 case WMI_VDEV_TYPE_STA: 2542 if (vif->bss_conf.qos) 2543 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2544 break; 2545 case WMI_VDEV_TYPE_IBSS: 2546 if (sta->wme) 2547 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2548 break; 2549 default: 2550 break; 2551 } 2552 2553 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2554 sta->addr, !!(arg->peer_flags & 2555 arvif->ar->wmi.peer_flags->qos)); 2556 } 2557 2558 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2559 { 2560 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2561 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2562 } 2563 2564 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2565 struct ieee80211_sta *sta) 2566 { 2567 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2568 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2569 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2570 return MODE_11AC_VHT160; 2571 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2572 return MODE_11AC_VHT80_80; 2573 default: 2574 /* not sure if this is a valid case? */ 2575 return MODE_11AC_VHT160; 2576 } 2577 } 2578 2579 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2580 return MODE_11AC_VHT80; 2581 2582 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2583 return MODE_11AC_VHT40; 2584 2585 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2586 return MODE_11AC_VHT20; 2587 2588 return MODE_UNKNOWN; 2589 } 2590 2591 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2592 struct ieee80211_vif *vif, 2593 struct ieee80211_sta *sta, 2594 struct wmi_peer_assoc_complete_arg *arg) 2595 { 2596 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2597 struct cfg80211_chan_def def; 2598 enum nl80211_band band; 2599 const u8 *ht_mcs_mask; 2600 const u16 *vht_mcs_mask; 2601 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2602 2603 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2604 return; 2605 2606 band = def.chan->band; 2607 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2608 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2609 2610 switch (band) { 2611 case NL80211_BAND_2GHZ: 2612 if (sta->vht_cap.vht_supported && 2613 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2614 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2615 phymode = MODE_11AC_VHT40; 2616 else 2617 phymode = MODE_11AC_VHT20; 2618 } else if (sta->ht_cap.ht_supported && 2619 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2620 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2621 phymode = MODE_11NG_HT40; 2622 else 2623 phymode = MODE_11NG_HT20; 2624 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2625 phymode = MODE_11G; 2626 } else { 2627 phymode = MODE_11B; 2628 } 2629 2630 break; 2631 case NL80211_BAND_5GHZ: 2632 /* 2633 * Check VHT first. 2634 */ 2635 if (sta->vht_cap.vht_supported && 2636 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2637 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2638 } else if (sta->ht_cap.ht_supported && 2639 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2640 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2641 phymode = MODE_11NA_HT40; 2642 else 2643 phymode = MODE_11NA_HT20; 2644 } else { 2645 phymode = MODE_11A; 2646 } 2647 2648 break; 2649 default: 2650 break; 2651 } 2652 2653 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2654 sta->addr, ath10k_wmi_phymode_str(phymode)); 2655 2656 arg->peer_phymode = phymode; 2657 WARN_ON(phymode == MODE_UNKNOWN); 2658 } 2659 2660 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2661 struct ieee80211_vif *vif, 2662 struct ieee80211_sta *sta, 2663 struct wmi_peer_assoc_complete_arg *arg) 2664 { 2665 lockdep_assert_held(&ar->conf_mutex); 2666 2667 memset(arg, 0, sizeof(*arg)); 2668 2669 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2670 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2671 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2672 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2673 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2674 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2675 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2676 2677 return 0; 2678 } 2679 2680 static const u32 ath10k_smps_map[] = { 2681 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2682 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2683 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2684 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2685 }; 2686 2687 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2688 const u8 *addr, 2689 const struct ieee80211_sta_ht_cap *ht_cap) 2690 { 2691 int smps; 2692 2693 if (!ht_cap->ht_supported) 2694 return 0; 2695 2696 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2697 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2698 2699 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2700 return -EINVAL; 2701 2702 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2703 WMI_PEER_SMPS_STATE, 2704 ath10k_smps_map[smps]); 2705 } 2706 2707 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2708 struct ieee80211_vif *vif, 2709 struct ieee80211_sta_vht_cap vht_cap) 2710 { 2711 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2712 int ret; 2713 u32 param; 2714 u32 value; 2715 2716 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2717 return 0; 2718 2719 if (!(ar->vht_cap_info & 2720 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2721 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2722 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2723 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2724 return 0; 2725 2726 param = ar->wmi.vdev_param->txbf; 2727 value = 0; 2728 2729 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2730 return 0; 2731 2732 /* The following logic is correct. If a remote STA advertises support 2733 * for being a beamformer then we should enable us being a beamformee. 2734 */ 2735 2736 if (ar->vht_cap_info & 2737 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2738 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2739 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2740 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2741 2742 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2743 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2744 } 2745 2746 if (ar->vht_cap_info & 2747 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2748 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2749 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2750 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2751 2752 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2753 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2754 } 2755 2756 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2757 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2758 2759 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2760 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2761 2762 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2763 if (ret) { 2764 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2765 value, ret); 2766 return ret; 2767 } 2768 2769 return 0; 2770 } 2771 2772 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2773 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2774 struct ieee80211_vif *vif, 2775 struct ieee80211_bss_conf *bss_conf) 2776 { 2777 struct ath10k *ar = hw->priv; 2778 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2779 struct ieee80211_sta_ht_cap ht_cap; 2780 struct ieee80211_sta_vht_cap vht_cap; 2781 struct wmi_peer_assoc_complete_arg peer_arg; 2782 struct ieee80211_sta *ap_sta; 2783 int ret; 2784 2785 lockdep_assert_held(&ar->conf_mutex); 2786 2787 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2788 arvif->vdev_id, arvif->bssid, arvif->aid); 2789 2790 rcu_read_lock(); 2791 2792 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2793 if (!ap_sta) { 2794 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2795 bss_conf->bssid, arvif->vdev_id); 2796 rcu_read_unlock(); 2797 return; 2798 } 2799 2800 /* ap_sta must be accessed only within rcu section which must be left 2801 * before calling ath10k_setup_peer_smps() which might sleep. 2802 */ 2803 ht_cap = ap_sta->ht_cap; 2804 vht_cap = ap_sta->vht_cap; 2805 2806 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2807 if (ret) { 2808 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2809 bss_conf->bssid, arvif->vdev_id, ret); 2810 rcu_read_unlock(); 2811 return; 2812 } 2813 2814 rcu_read_unlock(); 2815 2816 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2817 if (ret) { 2818 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2819 bss_conf->bssid, arvif->vdev_id, ret); 2820 return; 2821 } 2822 2823 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2824 if (ret) { 2825 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2826 arvif->vdev_id, ret); 2827 return; 2828 } 2829 2830 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2831 if (ret) { 2832 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2833 arvif->vdev_id, bss_conf->bssid, ret); 2834 return; 2835 } 2836 2837 ath10k_dbg(ar, ATH10K_DBG_MAC, 2838 "mac vdev %d up (associated) bssid %pM aid %d\n", 2839 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2840 2841 WARN_ON(arvif->is_up); 2842 2843 arvif->aid = bss_conf->aid; 2844 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2845 2846 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2847 if (ret) { 2848 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2849 arvif->vdev_id, ret); 2850 return; 2851 } 2852 2853 arvif->is_up = true; 2854 2855 /* Workaround: Some firmware revisions (tested with qca6174 2856 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2857 * poked with peer param command. 2858 */ 2859 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2860 WMI_PEER_DUMMY_VAR, 1); 2861 if (ret) { 2862 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2863 arvif->bssid, arvif->vdev_id, ret); 2864 return; 2865 } 2866 } 2867 2868 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2869 struct ieee80211_vif *vif) 2870 { 2871 struct ath10k *ar = hw->priv; 2872 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2873 struct ieee80211_sta_vht_cap vht_cap = {}; 2874 int ret; 2875 2876 lockdep_assert_held(&ar->conf_mutex); 2877 2878 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2879 arvif->vdev_id, arvif->bssid); 2880 2881 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2882 if (ret) 2883 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2884 arvif->vdev_id, ret); 2885 2886 arvif->def_wep_key_idx = -1; 2887 2888 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2889 if (ret) { 2890 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2891 arvif->vdev_id, ret); 2892 return; 2893 } 2894 2895 arvif->is_up = false; 2896 2897 cancel_delayed_work_sync(&arvif->connection_loss_work); 2898 } 2899 2900 static int ath10k_station_assoc(struct ath10k *ar, 2901 struct ieee80211_vif *vif, 2902 struct ieee80211_sta *sta, 2903 bool reassoc) 2904 { 2905 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2906 struct wmi_peer_assoc_complete_arg peer_arg; 2907 int ret = 0; 2908 2909 lockdep_assert_held(&ar->conf_mutex); 2910 2911 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2912 if (ret) { 2913 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2914 sta->addr, arvif->vdev_id, ret); 2915 return ret; 2916 } 2917 2918 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2919 if (ret) { 2920 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2921 sta->addr, arvif->vdev_id, ret); 2922 return ret; 2923 } 2924 2925 /* Re-assoc is run only to update supported rates for given station. It 2926 * doesn't make much sense to reconfigure the peer completely. 2927 */ 2928 if (!reassoc) { 2929 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2930 &sta->ht_cap); 2931 if (ret) { 2932 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2933 arvif->vdev_id, ret); 2934 return ret; 2935 } 2936 2937 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2938 if (ret) { 2939 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2940 sta->addr, arvif->vdev_id, ret); 2941 return ret; 2942 } 2943 2944 if (!sta->wme) { 2945 arvif->num_legacy_stations++; 2946 ret = ath10k_recalc_rtscts_prot(arvif); 2947 if (ret) { 2948 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2949 arvif->vdev_id, ret); 2950 return ret; 2951 } 2952 } 2953 2954 /* Plumb cached keys only for static WEP */ 2955 if (arvif->def_wep_key_idx != -1) { 2956 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2957 if (ret) { 2958 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2959 arvif->vdev_id, ret); 2960 return ret; 2961 } 2962 } 2963 } 2964 2965 return ret; 2966 } 2967 2968 static int ath10k_station_disassoc(struct ath10k *ar, 2969 struct ieee80211_vif *vif, 2970 struct ieee80211_sta *sta) 2971 { 2972 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2973 int ret = 0; 2974 2975 lockdep_assert_held(&ar->conf_mutex); 2976 2977 if (!sta->wme) { 2978 arvif->num_legacy_stations--; 2979 ret = ath10k_recalc_rtscts_prot(arvif); 2980 if (ret) { 2981 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2982 arvif->vdev_id, ret); 2983 return ret; 2984 } 2985 } 2986 2987 ret = ath10k_clear_peer_keys(arvif, sta->addr); 2988 if (ret) { 2989 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 2990 arvif->vdev_id, ret); 2991 return ret; 2992 } 2993 2994 return ret; 2995 } 2996 2997 /**************/ 2998 /* Regulatory */ 2999 /**************/ 3000 3001 static int ath10k_update_channel_list(struct ath10k *ar) 3002 { 3003 struct ieee80211_hw *hw = ar->hw; 3004 struct ieee80211_supported_band **bands; 3005 enum nl80211_band band; 3006 struct ieee80211_channel *channel; 3007 struct wmi_scan_chan_list_arg arg = {0}; 3008 struct wmi_channel_arg *ch; 3009 bool passive; 3010 int len; 3011 int ret; 3012 int i; 3013 3014 lockdep_assert_held(&ar->conf_mutex); 3015 3016 bands = hw->wiphy->bands; 3017 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3018 if (!bands[band]) 3019 continue; 3020 3021 for (i = 0; i < bands[band]->n_channels; i++) { 3022 if (bands[band]->channels[i].flags & 3023 IEEE80211_CHAN_DISABLED) 3024 continue; 3025 3026 arg.n_channels++; 3027 } 3028 } 3029 3030 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3031 arg.channels = kzalloc(len, GFP_KERNEL); 3032 if (!arg.channels) 3033 return -ENOMEM; 3034 3035 ch = arg.channels; 3036 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3037 if (!bands[band]) 3038 continue; 3039 3040 for (i = 0; i < bands[band]->n_channels; i++) { 3041 channel = &bands[band]->channels[i]; 3042 3043 if (channel->flags & IEEE80211_CHAN_DISABLED) 3044 continue; 3045 3046 ch->allow_ht = true; 3047 3048 /* FIXME: when should we really allow VHT? */ 3049 ch->allow_vht = true; 3050 3051 ch->allow_ibss = 3052 !(channel->flags & IEEE80211_CHAN_NO_IR); 3053 3054 ch->ht40plus = 3055 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3056 3057 ch->chan_radar = 3058 !!(channel->flags & IEEE80211_CHAN_RADAR); 3059 3060 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3061 ch->passive = passive; 3062 3063 ch->freq = channel->center_freq; 3064 ch->band_center_freq1 = channel->center_freq; 3065 ch->min_power = 0; 3066 ch->max_power = channel->max_power * 2; 3067 ch->max_reg_power = channel->max_reg_power * 2; 3068 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3069 ch->reg_class_id = 0; /* FIXME */ 3070 3071 /* FIXME: why use only legacy modes, why not any 3072 * HT/VHT modes? Would that even make any 3073 * difference? 3074 */ 3075 if (channel->band == NL80211_BAND_2GHZ) 3076 ch->mode = MODE_11G; 3077 else 3078 ch->mode = MODE_11A; 3079 3080 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3081 continue; 3082 3083 ath10k_dbg(ar, ATH10K_DBG_WMI, 3084 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3085 ch - arg.channels, arg.n_channels, 3086 ch->freq, ch->max_power, ch->max_reg_power, 3087 ch->max_antenna_gain, ch->mode); 3088 3089 ch++; 3090 } 3091 } 3092 3093 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3094 kfree(arg.channels); 3095 3096 return ret; 3097 } 3098 3099 static enum wmi_dfs_region 3100 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3101 { 3102 switch (dfs_region) { 3103 case NL80211_DFS_UNSET: 3104 return WMI_UNINIT_DFS_DOMAIN; 3105 case NL80211_DFS_FCC: 3106 return WMI_FCC_DFS_DOMAIN; 3107 case NL80211_DFS_ETSI: 3108 return WMI_ETSI_DFS_DOMAIN; 3109 case NL80211_DFS_JP: 3110 return WMI_MKK4_DFS_DOMAIN; 3111 } 3112 return WMI_UNINIT_DFS_DOMAIN; 3113 } 3114 3115 static void ath10k_regd_update(struct ath10k *ar) 3116 { 3117 struct reg_dmn_pair_mapping *regpair; 3118 int ret; 3119 enum wmi_dfs_region wmi_dfs_reg; 3120 enum nl80211_dfs_regions nl_dfs_reg; 3121 3122 lockdep_assert_held(&ar->conf_mutex); 3123 3124 ret = ath10k_update_channel_list(ar); 3125 if (ret) 3126 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3127 3128 regpair = ar->ath_common.regulatory.regpair; 3129 3130 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3131 nl_dfs_reg = ar->dfs_detector->region; 3132 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3133 } else { 3134 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3135 } 3136 3137 /* Target allows setting up per-band regdomain but ath_common provides 3138 * a combined one only 3139 */ 3140 ret = ath10k_wmi_pdev_set_regdomain(ar, 3141 regpair->reg_domain, 3142 regpair->reg_domain, /* 2ghz */ 3143 regpair->reg_domain, /* 5ghz */ 3144 regpair->reg_2ghz_ctl, 3145 regpair->reg_5ghz_ctl, 3146 wmi_dfs_reg); 3147 if (ret) 3148 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3149 } 3150 3151 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3152 struct ieee80211_supported_band *band) 3153 { 3154 int i; 3155 3156 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3157 for (i = 0; i < band->n_channels; i++) { 3158 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3159 band->channels[i].center_freq > ar->high_5ghz_chan) 3160 band->channels[i].flags |= 3161 IEEE80211_CHAN_DISABLED; 3162 } 3163 } 3164 } 3165 3166 static void ath10k_reg_notifier(struct wiphy *wiphy, 3167 struct regulatory_request *request) 3168 { 3169 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3170 struct ath10k *ar = hw->priv; 3171 bool result; 3172 3173 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3174 3175 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3176 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3177 request->dfs_region); 3178 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3179 request->dfs_region); 3180 if (!result) 3181 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3182 request->dfs_region); 3183 } 3184 3185 mutex_lock(&ar->conf_mutex); 3186 if (ar->state == ATH10K_STATE_ON) 3187 ath10k_regd_update(ar); 3188 mutex_unlock(&ar->conf_mutex); 3189 3190 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3191 ath10k_mac_update_channel_list(ar, 3192 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3193 } 3194 3195 /***************/ 3196 /* TX handlers */ 3197 /***************/ 3198 3199 enum ath10k_mac_tx_path { 3200 ATH10K_MAC_TX_HTT, 3201 ATH10K_MAC_TX_HTT_MGMT, 3202 ATH10K_MAC_TX_WMI_MGMT, 3203 ATH10K_MAC_TX_UNKNOWN, 3204 }; 3205 3206 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3207 { 3208 lockdep_assert_held(&ar->htt.tx_lock); 3209 3210 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3211 ar->tx_paused |= BIT(reason); 3212 ieee80211_stop_queues(ar->hw); 3213 } 3214 3215 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3216 struct ieee80211_vif *vif) 3217 { 3218 struct ath10k *ar = data; 3219 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3220 3221 if (arvif->tx_paused) 3222 return; 3223 3224 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3225 } 3226 3227 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3228 { 3229 lockdep_assert_held(&ar->htt.tx_lock); 3230 3231 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3232 ar->tx_paused &= ~BIT(reason); 3233 3234 if (ar->tx_paused) 3235 return; 3236 3237 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3238 IEEE80211_IFACE_ITER_RESUME_ALL, 3239 ath10k_mac_tx_unlock_iter, 3240 ar); 3241 3242 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3243 } 3244 3245 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3246 { 3247 struct ath10k *ar = arvif->ar; 3248 3249 lockdep_assert_held(&ar->htt.tx_lock); 3250 3251 WARN_ON(reason >= BITS_PER_LONG); 3252 arvif->tx_paused |= BIT(reason); 3253 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3254 } 3255 3256 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3257 { 3258 struct ath10k *ar = arvif->ar; 3259 3260 lockdep_assert_held(&ar->htt.tx_lock); 3261 3262 WARN_ON(reason >= BITS_PER_LONG); 3263 arvif->tx_paused &= ~BIT(reason); 3264 3265 if (ar->tx_paused) 3266 return; 3267 3268 if (arvif->tx_paused) 3269 return; 3270 3271 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3272 } 3273 3274 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3275 enum wmi_tlv_tx_pause_id pause_id, 3276 enum wmi_tlv_tx_pause_action action) 3277 { 3278 struct ath10k *ar = arvif->ar; 3279 3280 lockdep_assert_held(&ar->htt.tx_lock); 3281 3282 switch (action) { 3283 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3284 ath10k_mac_vif_tx_lock(arvif, pause_id); 3285 break; 3286 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3287 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3288 break; 3289 default: 3290 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3291 "received unknown tx pause action %d on vdev %i, ignoring\n", 3292 action, arvif->vdev_id); 3293 break; 3294 } 3295 } 3296 3297 struct ath10k_mac_tx_pause { 3298 u32 vdev_id; 3299 enum wmi_tlv_tx_pause_id pause_id; 3300 enum wmi_tlv_tx_pause_action action; 3301 }; 3302 3303 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3304 struct ieee80211_vif *vif) 3305 { 3306 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3307 struct ath10k_mac_tx_pause *arg = data; 3308 3309 if (arvif->vdev_id != arg->vdev_id) 3310 return; 3311 3312 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3313 } 3314 3315 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3316 enum wmi_tlv_tx_pause_id pause_id, 3317 enum wmi_tlv_tx_pause_action action) 3318 { 3319 struct ath10k_mac_tx_pause arg = { 3320 .vdev_id = vdev_id, 3321 .pause_id = pause_id, 3322 .action = action, 3323 }; 3324 3325 spin_lock_bh(&ar->htt.tx_lock); 3326 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3327 IEEE80211_IFACE_ITER_RESUME_ALL, 3328 ath10k_mac_handle_tx_pause_iter, 3329 &arg); 3330 spin_unlock_bh(&ar->htt.tx_lock); 3331 } 3332 3333 static enum ath10k_hw_txrx_mode 3334 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3335 struct ieee80211_vif *vif, 3336 struct ieee80211_sta *sta, 3337 struct sk_buff *skb) 3338 { 3339 const struct ieee80211_hdr *hdr = (void *)skb->data; 3340 __le16 fc = hdr->frame_control; 3341 3342 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3343 return ATH10K_HW_TXRX_RAW; 3344 3345 if (ieee80211_is_mgmt(fc)) 3346 return ATH10K_HW_TXRX_MGMT; 3347 3348 /* Workaround: 3349 * 3350 * NullFunc frames are mostly used to ping if a client or AP are still 3351 * reachable and responsive. This implies tx status reports must be 3352 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3353 * come to a conclusion that the other end disappeared and tear down 3354 * BSS connection or it can never disconnect from BSS/client (which is 3355 * the case). 3356 * 3357 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3358 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3359 * which seems to deliver correct tx reports for NullFunc frames. The 3360 * downside of using it is it ignores client powersave state so it can 3361 * end up disconnecting sleeping clients in AP mode. It should fix STA 3362 * mode though because AP don't sleep. 3363 */ 3364 if (ar->htt.target_version_major < 3 && 3365 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3366 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3367 ar->running_fw->fw_file.fw_features)) 3368 return ATH10K_HW_TXRX_MGMT; 3369 3370 /* Workaround: 3371 * 3372 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3373 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3374 * to work with Ethernet txmode so use it. 3375 * 3376 * FIXME: Check if raw mode works with TDLS. 3377 */ 3378 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3379 return ATH10K_HW_TXRX_ETHERNET; 3380 3381 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3382 return ATH10K_HW_TXRX_RAW; 3383 3384 return ATH10K_HW_TXRX_NATIVE_WIFI; 3385 } 3386 3387 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3388 struct sk_buff *skb) 3389 { 3390 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3391 const struct ieee80211_hdr *hdr = (void *)skb->data; 3392 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3393 IEEE80211_TX_CTL_INJECTED; 3394 3395 if (!ieee80211_has_protected(hdr->frame_control)) 3396 return false; 3397 3398 if ((info->flags & mask) == mask) 3399 return false; 3400 3401 if (vif) 3402 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3403 3404 return true; 3405 } 3406 3407 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3408 * Control in the header. 3409 */ 3410 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3411 { 3412 struct ieee80211_hdr *hdr = (void *)skb->data; 3413 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3414 u8 *qos_ctl; 3415 3416 if (!ieee80211_is_data_qos(hdr->frame_control)) 3417 return; 3418 3419 qos_ctl = ieee80211_get_qos_ctl(hdr); 3420 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3421 skb->data, (void *)qos_ctl - (void *)skb->data); 3422 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3423 3424 /* Some firmware revisions don't handle sending QoS NullFunc well. 3425 * These frames are mainly used for CQM purposes so it doesn't really 3426 * matter whether QoS NullFunc or NullFunc are sent. 3427 */ 3428 hdr = (void *)skb->data; 3429 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3430 cb->flags &= ~ATH10K_SKB_F_QOS; 3431 3432 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3433 } 3434 3435 static void ath10k_tx_h_8023(struct sk_buff *skb) 3436 { 3437 struct ieee80211_hdr *hdr; 3438 struct rfc1042_hdr *rfc1042; 3439 struct ethhdr *eth; 3440 size_t hdrlen; 3441 u8 da[ETH_ALEN]; 3442 u8 sa[ETH_ALEN]; 3443 __be16 type; 3444 3445 hdr = (void *)skb->data; 3446 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3447 rfc1042 = (void *)skb->data + hdrlen; 3448 3449 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3450 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3451 type = rfc1042->snap_type; 3452 3453 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3454 skb_push(skb, sizeof(*eth)); 3455 3456 eth = (void *)skb->data; 3457 ether_addr_copy(eth->h_dest, da); 3458 ether_addr_copy(eth->h_source, sa); 3459 eth->h_proto = type; 3460 } 3461 3462 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3463 struct ieee80211_vif *vif, 3464 struct sk_buff *skb) 3465 { 3466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3467 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3468 3469 /* This is case only for P2P_GO */ 3470 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3471 return; 3472 3473 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3474 spin_lock_bh(&ar->data_lock); 3475 if (arvif->u.ap.noa_data) 3476 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3477 GFP_ATOMIC)) 3478 memcpy(skb_put(skb, arvif->u.ap.noa_len), 3479 arvif->u.ap.noa_data, 3480 arvif->u.ap.noa_len); 3481 spin_unlock_bh(&ar->data_lock); 3482 } 3483 } 3484 3485 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3486 struct ieee80211_vif *vif, 3487 struct ieee80211_txq *txq, 3488 struct sk_buff *skb) 3489 { 3490 struct ieee80211_hdr *hdr = (void *)skb->data; 3491 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3492 3493 cb->flags = 0; 3494 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3495 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3496 3497 if (ieee80211_is_mgmt(hdr->frame_control)) 3498 cb->flags |= ATH10K_SKB_F_MGMT; 3499 3500 if (ieee80211_is_data_qos(hdr->frame_control)) 3501 cb->flags |= ATH10K_SKB_F_QOS; 3502 3503 cb->vif = vif; 3504 cb->txq = txq; 3505 } 3506 3507 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3508 { 3509 /* FIXME: Not really sure since when the behaviour changed. At some 3510 * point new firmware stopped requiring creation of peer entries for 3511 * offchannel tx (and actually creating them causes issues with wmi-htc 3512 * tx credit replenishment and reliability). Assuming it's at least 3.4 3513 * because that's when the `freq` was introduced to TX_FRM HTT command. 3514 */ 3515 return (ar->htt.target_version_major >= 3 && 3516 ar->htt.target_version_minor >= 4 && 3517 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3518 } 3519 3520 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3521 { 3522 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3523 int ret = 0; 3524 3525 spin_lock_bh(&ar->data_lock); 3526 3527 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3528 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3529 ret = -ENOSPC; 3530 goto unlock; 3531 } 3532 3533 __skb_queue_tail(q, skb); 3534 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3535 3536 unlock: 3537 spin_unlock_bh(&ar->data_lock); 3538 3539 return ret; 3540 } 3541 3542 static enum ath10k_mac_tx_path 3543 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3544 struct sk_buff *skb, 3545 enum ath10k_hw_txrx_mode txmode) 3546 { 3547 switch (txmode) { 3548 case ATH10K_HW_TXRX_RAW: 3549 case ATH10K_HW_TXRX_NATIVE_WIFI: 3550 case ATH10K_HW_TXRX_ETHERNET: 3551 return ATH10K_MAC_TX_HTT; 3552 case ATH10K_HW_TXRX_MGMT: 3553 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3554 ar->running_fw->fw_file.fw_features)) 3555 return ATH10K_MAC_TX_WMI_MGMT; 3556 else if (ar->htt.target_version_major >= 3) 3557 return ATH10K_MAC_TX_HTT; 3558 else 3559 return ATH10K_MAC_TX_HTT_MGMT; 3560 } 3561 3562 return ATH10K_MAC_TX_UNKNOWN; 3563 } 3564 3565 static int ath10k_mac_tx_submit(struct ath10k *ar, 3566 enum ath10k_hw_txrx_mode txmode, 3567 enum ath10k_mac_tx_path txpath, 3568 struct sk_buff *skb) 3569 { 3570 struct ath10k_htt *htt = &ar->htt; 3571 int ret = -EINVAL; 3572 3573 switch (txpath) { 3574 case ATH10K_MAC_TX_HTT: 3575 ret = ath10k_htt_tx(htt, txmode, skb); 3576 break; 3577 case ATH10K_MAC_TX_HTT_MGMT: 3578 ret = ath10k_htt_mgmt_tx(htt, skb); 3579 break; 3580 case ATH10K_MAC_TX_WMI_MGMT: 3581 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3582 break; 3583 case ATH10K_MAC_TX_UNKNOWN: 3584 WARN_ON_ONCE(1); 3585 ret = -EINVAL; 3586 break; 3587 } 3588 3589 if (ret) { 3590 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3591 ret); 3592 ieee80211_free_txskb(ar->hw, skb); 3593 } 3594 3595 return ret; 3596 } 3597 3598 /* This function consumes the sk_buff regardless of return value as far as 3599 * caller is concerned so no freeing is necessary afterwards. 3600 */ 3601 static int ath10k_mac_tx(struct ath10k *ar, 3602 struct ieee80211_vif *vif, 3603 enum ath10k_hw_txrx_mode txmode, 3604 enum ath10k_mac_tx_path txpath, 3605 struct sk_buff *skb) 3606 { 3607 struct ieee80211_hw *hw = ar->hw; 3608 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3609 int ret; 3610 3611 /* We should disable CCK RATE due to P2P */ 3612 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3613 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3614 3615 switch (txmode) { 3616 case ATH10K_HW_TXRX_MGMT: 3617 case ATH10K_HW_TXRX_NATIVE_WIFI: 3618 ath10k_tx_h_nwifi(hw, skb); 3619 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3620 ath10k_tx_h_seq_no(vif, skb); 3621 break; 3622 case ATH10K_HW_TXRX_ETHERNET: 3623 ath10k_tx_h_8023(skb); 3624 break; 3625 case ATH10K_HW_TXRX_RAW: 3626 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3627 WARN_ON_ONCE(1); 3628 ieee80211_free_txskb(hw, skb); 3629 return -ENOTSUPP; 3630 } 3631 } 3632 3633 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3634 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3635 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3636 skb); 3637 3638 skb_queue_tail(&ar->offchan_tx_queue, skb); 3639 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3640 return 0; 3641 } 3642 } 3643 3644 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3645 if (ret) { 3646 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3647 return ret; 3648 } 3649 3650 return 0; 3651 } 3652 3653 void ath10k_offchan_tx_purge(struct ath10k *ar) 3654 { 3655 struct sk_buff *skb; 3656 3657 for (;;) { 3658 skb = skb_dequeue(&ar->offchan_tx_queue); 3659 if (!skb) 3660 break; 3661 3662 ieee80211_free_txskb(ar->hw, skb); 3663 } 3664 } 3665 3666 void ath10k_offchan_tx_work(struct work_struct *work) 3667 { 3668 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3669 struct ath10k_peer *peer; 3670 struct ath10k_vif *arvif; 3671 enum ath10k_hw_txrx_mode txmode; 3672 enum ath10k_mac_tx_path txpath; 3673 struct ieee80211_hdr *hdr; 3674 struct ieee80211_vif *vif; 3675 struct ieee80211_sta *sta; 3676 struct sk_buff *skb; 3677 const u8 *peer_addr; 3678 int vdev_id; 3679 int ret; 3680 unsigned long time_left; 3681 bool tmp_peer_created = false; 3682 3683 /* FW requirement: We must create a peer before FW will send out 3684 * an offchannel frame. Otherwise the frame will be stuck and 3685 * never transmitted. We delete the peer upon tx completion. 3686 * It is unlikely that a peer for offchannel tx will already be 3687 * present. However it may be in some rare cases so account for that. 3688 * Otherwise we might remove a legitimate peer and break stuff. 3689 */ 3690 3691 for (;;) { 3692 skb = skb_dequeue(&ar->offchan_tx_queue); 3693 if (!skb) 3694 break; 3695 3696 mutex_lock(&ar->conf_mutex); 3697 3698 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3699 skb); 3700 3701 hdr = (struct ieee80211_hdr *)skb->data; 3702 peer_addr = ieee80211_get_DA(hdr); 3703 3704 spin_lock_bh(&ar->data_lock); 3705 vdev_id = ar->scan.vdev_id; 3706 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3707 spin_unlock_bh(&ar->data_lock); 3708 3709 if (peer) 3710 /* FIXME: should this use ath10k_warn()? */ 3711 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3712 peer_addr, vdev_id); 3713 3714 if (!peer) { 3715 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3716 peer_addr, 3717 WMI_PEER_TYPE_DEFAULT); 3718 if (ret) 3719 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3720 peer_addr, vdev_id, ret); 3721 tmp_peer_created = (ret == 0); 3722 } 3723 3724 spin_lock_bh(&ar->data_lock); 3725 reinit_completion(&ar->offchan_tx_completed); 3726 ar->offchan_tx_skb = skb; 3727 spin_unlock_bh(&ar->data_lock); 3728 3729 /* It's safe to access vif and sta - conf_mutex guarantees that 3730 * sta_state() and remove_interface() are locked exclusively 3731 * out wrt to this offchannel worker. 3732 */ 3733 arvif = ath10k_get_arvif(ar, vdev_id); 3734 if (arvif) { 3735 vif = arvif->vif; 3736 sta = ieee80211_find_sta(vif, peer_addr); 3737 } else { 3738 vif = NULL; 3739 sta = NULL; 3740 } 3741 3742 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3743 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3744 3745 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3746 if (ret) { 3747 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3748 ret); 3749 /* not serious */ 3750 } 3751 3752 time_left = 3753 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3754 if (time_left == 0) 3755 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3756 skb); 3757 3758 if (!peer && tmp_peer_created) { 3759 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3760 if (ret) 3761 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3762 peer_addr, vdev_id, ret); 3763 } 3764 3765 mutex_unlock(&ar->conf_mutex); 3766 } 3767 } 3768 3769 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3770 { 3771 struct sk_buff *skb; 3772 3773 for (;;) { 3774 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3775 if (!skb) 3776 break; 3777 3778 ieee80211_free_txskb(ar->hw, skb); 3779 } 3780 } 3781 3782 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3783 { 3784 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3785 struct sk_buff *skb; 3786 int ret; 3787 3788 for (;;) { 3789 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3790 if (!skb) 3791 break; 3792 3793 ret = ath10k_wmi_mgmt_tx(ar, skb); 3794 if (ret) { 3795 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3796 ret); 3797 ieee80211_free_txskb(ar->hw, skb); 3798 } 3799 } 3800 } 3801 3802 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3803 { 3804 struct ath10k_txq *artxq; 3805 3806 if (!txq) 3807 return; 3808 3809 artxq = (void *)txq->drv_priv; 3810 INIT_LIST_HEAD(&artxq->list); 3811 } 3812 3813 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3814 { 3815 struct ath10k_txq *artxq; 3816 struct ath10k_skb_cb *cb; 3817 struct sk_buff *msdu; 3818 int msdu_id; 3819 3820 if (!txq) 3821 return; 3822 3823 artxq = (void *)txq->drv_priv; 3824 spin_lock_bh(&ar->txqs_lock); 3825 if (!list_empty(&artxq->list)) 3826 list_del_init(&artxq->list); 3827 spin_unlock_bh(&ar->txqs_lock); 3828 3829 spin_lock_bh(&ar->htt.tx_lock); 3830 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3831 cb = ATH10K_SKB_CB(msdu); 3832 if (cb->txq == txq) 3833 cb->txq = NULL; 3834 } 3835 spin_unlock_bh(&ar->htt.tx_lock); 3836 } 3837 3838 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3839 u16 peer_id, 3840 u8 tid) 3841 { 3842 struct ath10k_peer *peer; 3843 3844 lockdep_assert_held(&ar->data_lock); 3845 3846 peer = ar->peer_map[peer_id]; 3847 if (!peer) 3848 return NULL; 3849 3850 if (peer->removed) 3851 return NULL; 3852 3853 if (peer->sta) 3854 return peer->sta->txq[tid]; 3855 else if (peer->vif) 3856 return peer->vif->txq; 3857 else 3858 return NULL; 3859 } 3860 3861 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3862 struct ieee80211_txq *txq) 3863 { 3864 struct ath10k *ar = hw->priv; 3865 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3866 3867 /* No need to get locks */ 3868 3869 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3870 return true; 3871 3872 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3873 return true; 3874 3875 if (artxq->num_fw_queued < artxq->num_push_allowed) 3876 return true; 3877 3878 return false; 3879 } 3880 3881 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3882 struct ieee80211_txq *txq) 3883 { 3884 struct ath10k *ar = hw->priv; 3885 struct ath10k_htt *htt = &ar->htt; 3886 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3887 struct ieee80211_vif *vif = txq->vif; 3888 struct ieee80211_sta *sta = txq->sta; 3889 enum ath10k_hw_txrx_mode txmode; 3890 enum ath10k_mac_tx_path txpath; 3891 struct sk_buff *skb; 3892 struct ieee80211_hdr *hdr; 3893 size_t skb_len; 3894 bool is_mgmt, is_presp; 3895 int ret; 3896 3897 spin_lock_bh(&ar->htt.tx_lock); 3898 ret = ath10k_htt_tx_inc_pending(htt); 3899 spin_unlock_bh(&ar->htt.tx_lock); 3900 3901 if (ret) 3902 return ret; 3903 3904 skb = ieee80211_tx_dequeue(hw, txq); 3905 if (!skb) { 3906 spin_lock_bh(&ar->htt.tx_lock); 3907 ath10k_htt_tx_dec_pending(htt); 3908 spin_unlock_bh(&ar->htt.tx_lock); 3909 3910 return -ENOENT; 3911 } 3912 3913 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3914 3915 skb_len = skb->len; 3916 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3917 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3918 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3919 3920 if (is_mgmt) { 3921 hdr = (struct ieee80211_hdr *)skb->data; 3922 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3923 3924 spin_lock_bh(&ar->htt.tx_lock); 3925 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3926 3927 if (ret) { 3928 ath10k_htt_tx_dec_pending(htt); 3929 spin_unlock_bh(&ar->htt.tx_lock); 3930 return ret; 3931 } 3932 spin_unlock_bh(&ar->htt.tx_lock); 3933 } 3934 3935 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3936 if (unlikely(ret)) { 3937 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3938 3939 spin_lock_bh(&ar->htt.tx_lock); 3940 ath10k_htt_tx_dec_pending(htt); 3941 if (is_mgmt) 3942 ath10k_htt_tx_mgmt_dec_pending(htt); 3943 spin_unlock_bh(&ar->htt.tx_lock); 3944 3945 return ret; 3946 } 3947 3948 spin_lock_bh(&ar->htt.tx_lock); 3949 artxq->num_fw_queued++; 3950 spin_unlock_bh(&ar->htt.tx_lock); 3951 3952 return skb_len; 3953 } 3954 3955 void ath10k_mac_tx_push_pending(struct ath10k *ar) 3956 { 3957 struct ieee80211_hw *hw = ar->hw; 3958 struct ieee80211_txq *txq; 3959 struct ath10k_txq *artxq; 3960 struct ath10k_txq *last; 3961 int ret; 3962 int max; 3963 3964 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 3965 return; 3966 3967 spin_lock_bh(&ar->txqs_lock); 3968 rcu_read_lock(); 3969 3970 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 3971 while (!list_empty(&ar->txqs)) { 3972 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 3973 txq = container_of((void *)artxq, struct ieee80211_txq, 3974 drv_priv); 3975 3976 /* Prevent aggressive sta/tid taking over tx queue */ 3977 max = 16; 3978 ret = 0; 3979 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 3980 ret = ath10k_mac_tx_push_txq(hw, txq); 3981 if (ret < 0) 3982 break; 3983 } 3984 3985 list_del_init(&artxq->list); 3986 if (ret != -ENOENT) 3987 list_add_tail(&artxq->list, &ar->txqs); 3988 3989 ath10k_htt_tx_txq_update(hw, txq); 3990 3991 if (artxq == last || (ret < 0 && ret != -ENOENT)) 3992 break; 3993 } 3994 3995 rcu_read_unlock(); 3996 spin_unlock_bh(&ar->txqs_lock); 3997 } 3998 3999 /************/ 4000 /* Scanning */ 4001 /************/ 4002 4003 void __ath10k_scan_finish(struct ath10k *ar) 4004 { 4005 lockdep_assert_held(&ar->data_lock); 4006 4007 switch (ar->scan.state) { 4008 case ATH10K_SCAN_IDLE: 4009 break; 4010 case ATH10K_SCAN_RUNNING: 4011 case ATH10K_SCAN_ABORTING: 4012 if (!ar->scan.is_roc) { 4013 struct cfg80211_scan_info info = { 4014 .aborted = (ar->scan.state == 4015 ATH10K_SCAN_ABORTING), 4016 }; 4017 4018 ieee80211_scan_completed(ar->hw, &info); 4019 } else if (ar->scan.roc_notify) { 4020 ieee80211_remain_on_channel_expired(ar->hw); 4021 } 4022 /* fall through */ 4023 case ATH10K_SCAN_STARTING: 4024 ar->scan.state = ATH10K_SCAN_IDLE; 4025 ar->scan_channel = NULL; 4026 ar->scan.roc_freq = 0; 4027 ath10k_offchan_tx_purge(ar); 4028 cancel_delayed_work(&ar->scan.timeout); 4029 complete(&ar->scan.completed); 4030 break; 4031 } 4032 } 4033 4034 void ath10k_scan_finish(struct ath10k *ar) 4035 { 4036 spin_lock_bh(&ar->data_lock); 4037 __ath10k_scan_finish(ar); 4038 spin_unlock_bh(&ar->data_lock); 4039 } 4040 4041 static int ath10k_scan_stop(struct ath10k *ar) 4042 { 4043 struct wmi_stop_scan_arg arg = { 4044 .req_id = 1, /* FIXME */ 4045 .req_type = WMI_SCAN_STOP_ONE, 4046 .u.scan_id = ATH10K_SCAN_ID, 4047 }; 4048 int ret; 4049 4050 lockdep_assert_held(&ar->conf_mutex); 4051 4052 ret = ath10k_wmi_stop_scan(ar, &arg); 4053 if (ret) { 4054 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4055 goto out; 4056 } 4057 4058 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4059 if (ret == 0) { 4060 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4061 ret = -ETIMEDOUT; 4062 } else if (ret > 0) { 4063 ret = 0; 4064 } 4065 4066 out: 4067 /* Scan state should be updated upon scan completion but in case 4068 * firmware fails to deliver the event (for whatever reason) it is 4069 * desired to clean up scan state anyway. Firmware may have just 4070 * dropped the scan completion event delivery due to transport pipe 4071 * being overflown with data and/or it can recover on its own before 4072 * next scan request is submitted. 4073 */ 4074 spin_lock_bh(&ar->data_lock); 4075 if (ar->scan.state != ATH10K_SCAN_IDLE) 4076 __ath10k_scan_finish(ar); 4077 spin_unlock_bh(&ar->data_lock); 4078 4079 return ret; 4080 } 4081 4082 static void ath10k_scan_abort(struct ath10k *ar) 4083 { 4084 int ret; 4085 4086 lockdep_assert_held(&ar->conf_mutex); 4087 4088 spin_lock_bh(&ar->data_lock); 4089 4090 switch (ar->scan.state) { 4091 case ATH10K_SCAN_IDLE: 4092 /* This can happen if timeout worker kicked in and called 4093 * abortion while scan completion was being processed. 4094 */ 4095 break; 4096 case ATH10K_SCAN_STARTING: 4097 case ATH10K_SCAN_ABORTING: 4098 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4099 ath10k_scan_state_str(ar->scan.state), 4100 ar->scan.state); 4101 break; 4102 case ATH10K_SCAN_RUNNING: 4103 ar->scan.state = ATH10K_SCAN_ABORTING; 4104 spin_unlock_bh(&ar->data_lock); 4105 4106 ret = ath10k_scan_stop(ar); 4107 if (ret) 4108 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4109 4110 spin_lock_bh(&ar->data_lock); 4111 break; 4112 } 4113 4114 spin_unlock_bh(&ar->data_lock); 4115 } 4116 4117 void ath10k_scan_timeout_work(struct work_struct *work) 4118 { 4119 struct ath10k *ar = container_of(work, struct ath10k, 4120 scan.timeout.work); 4121 4122 mutex_lock(&ar->conf_mutex); 4123 ath10k_scan_abort(ar); 4124 mutex_unlock(&ar->conf_mutex); 4125 } 4126 4127 static int ath10k_start_scan(struct ath10k *ar, 4128 const struct wmi_start_scan_arg *arg) 4129 { 4130 int ret; 4131 4132 lockdep_assert_held(&ar->conf_mutex); 4133 4134 ret = ath10k_wmi_start_scan(ar, arg); 4135 if (ret) 4136 return ret; 4137 4138 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4139 if (ret == 0) { 4140 ret = ath10k_scan_stop(ar); 4141 if (ret) 4142 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4143 4144 return -ETIMEDOUT; 4145 } 4146 4147 /* If we failed to start the scan, return error code at 4148 * this point. This is probably due to some issue in the 4149 * firmware, but no need to wedge the driver due to that... 4150 */ 4151 spin_lock_bh(&ar->data_lock); 4152 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4153 spin_unlock_bh(&ar->data_lock); 4154 return -EINVAL; 4155 } 4156 spin_unlock_bh(&ar->data_lock); 4157 4158 return 0; 4159 } 4160 4161 /**********************/ 4162 /* mac80211 callbacks */ 4163 /**********************/ 4164 4165 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4166 struct ieee80211_tx_control *control, 4167 struct sk_buff *skb) 4168 { 4169 struct ath10k *ar = hw->priv; 4170 struct ath10k_htt *htt = &ar->htt; 4171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4172 struct ieee80211_vif *vif = info->control.vif; 4173 struct ieee80211_sta *sta = control->sta; 4174 struct ieee80211_txq *txq = NULL; 4175 struct ieee80211_hdr *hdr = (void *)skb->data; 4176 enum ath10k_hw_txrx_mode txmode; 4177 enum ath10k_mac_tx_path txpath; 4178 bool is_htt; 4179 bool is_mgmt; 4180 bool is_presp; 4181 int ret; 4182 4183 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4184 4185 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4186 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4187 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4188 txpath == ATH10K_MAC_TX_HTT_MGMT); 4189 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4190 4191 if (is_htt) { 4192 spin_lock_bh(&ar->htt.tx_lock); 4193 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4194 4195 ret = ath10k_htt_tx_inc_pending(htt); 4196 if (ret) { 4197 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4198 ret); 4199 spin_unlock_bh(&ar->htt.tx_lock); 4200 ieee80211_free_txskb(ar->hw, skb); 4201 return; 4202 } 4203 4204 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4205 if (ret) { 4206 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4207 ret); 4208 ath10k_htt_tx_dec_pending(htt); 4209 spin_unlock_bh(&ar->htt.tx_lock); 4210 ieee80211_free_txskb(ar->hw, skb); 4211 return; 4212 } 4213 spin_unlock_bh(&ar->htt.tx_lock); 4214 } 4215 4216 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4217 if (ret) { 4218 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4219 if (is_htt) { 4220 spin_lock_bh(&ar->htt.tx_lock); 4221 ath10k_htt_tx_dec_pending(htt); 4222 if (is_mgmt) 4223 ath10k_htt_tx_mgmt_dec_pending(htt); 4224 spin_unlock_bh(&ar->htt.tx_lock); 4225 } 4226 return; 4227 } 4228 } 4229 4230 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4231 struct ieee80211_txq *txq) 4232 { 4233 struct ath10k *ar = hw->priv; 4234 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4235 struct ieee80211_txq *f_txq; 4236 struct ath10k_txq *f_artxq; 4237 int ret = 0; 4238 int max = 16; 4239 4240 spin_lock_bh(&ar->txqs_lock); 4241 if (list_empty(&artxq->list)) 4242 list_add_tail(&artxq->list, &ar->txqs); 4243 4244 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4245 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4246 list_del_init(&f_artxq->list); 4247 4248 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4249 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4250 if (ret) 4251 break; 4252 } 4253 if (ret != -ENOENT) 4254 list_add_tail(&f_artxq->list, &ar->txqs); 4255 spin_unlock_bh(&ar->txqs_lock); 4256 4257 ath10k_htt_tx_txq_update(hw, f_txq); 4258 ath10k_htt_tx_txq_update(hw, txq); 4259 } 4260 4261 /* Must not be called with conf_mutex held as workers can use that also. */ 4262 void ath10k_drain_tx(struct ath10k *ar) 4263 { 4264 /* make sure rcu-protected mac80211 tx path itself is drained */ 4265 synchronize_net(); 4266 4267 ath10k_offchan_tx_purge(ar); 4268 ath10k_mgmt_over_wmi_tx_purge(ar); 4269 4270 cancel_work_sync(&ar->offchan_tx_work); 4271 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4272 } 4273 4274 void ath10k_halt(struct ath10k *ar) 4275 { 4276 struct ath10k_vif *arvif; 4277 4278 lockdep_assert_held(&ar->conf_mutex); 4279 4280 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4281 ar->filter_flags = 0; 4282 ar->monitor = false; 4283 ar->monitor_arvif = NULL; 4284 4285 if (ar->monitor_started) 4286 ath10k_monitor_stop(ar); 4287 4288 ar->monitor_started = false; 4289 ar->tx_paused = 0; 4290 4291 ath10k_scan_finish(ar); 4292 ath10k_peer_cleanup_all(ar); 4293 ath10k_core_stop(ar); 4294 ath10k_hif_power_down(ar); 4295 4296 spin_lock_bh(&ar->data_lock); 4297 list_for_each_entry(arvif, &ar->arvifs, list) 4298 ath10k_mac_vif_beacon_cleanup(arvif); 4299 spin_unlock_bh(&ar->data_lock); 4300 } 4301 4302 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4303 { 4304 struct ath10k *ar = hw->priv; 4305 4306 mutex_lock(&ar->conf_mutex); 4307 4308 *tx_ant = ar->cfg_tx_chainmask; 4309 *rx_ant = ar->cfg_rx_chainmask; 4310 4311 mutex_unlock(&ar->conf_mutex); 4312 4313 return 0; 4314 } 4315 4316 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4317 { 4318 /* It is not clear that allowing gaps in chainmask 4319 * is helpful. Probably it will not do what user 4320 * is hoping for, so warn in that case. 4321 */ 4322 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4323 return; 4324 4325 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4326 dbg, cm); 4327 } 4328 4329 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4330 { 4331 int nsts = ar->vht_cap_info; 4332 4333 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4334 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4335 4336 /* If firmware does not deliver to host number of space-time 4337 * streams supported, assume it support up to 4 BF STS and return 4338 * the value for VHT CAP: nsts-1) 4339 */ 4340 if (nsts == 0) 4341 return 3; 4342 4343 return nsts; 4344 } 4345 4346 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4347 { 4348 int sound_dim = ar->vht_cap_info; 4349 4350 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4351 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4352 4353 /* If the sounding dimension is not advertised by the firmware, 4354 * let's use a default value of 1 4355 */ 4356 if (sound_dim == 0) 4357 return 1; 4358 4359 return sound_dim; 4360 } 4361 4362 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4363 { 4364 struct ieee80211_sta_vht_cap vht_cap = {0}; 4365 u16 mcs_map; 4366 u32 val; 4367 int i; 4368 4369 vht_cap.vht_supported = 1; 4370 vht_cap.cap = ar->vht_cap_info; 4371 4372 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4373 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4374 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4375 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4376 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4377 4378 vht_cap.cap |= val; 4379 } 4380 4381 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4382 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4383 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4384 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4385 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4386 4387 vht_cap.cap |= val; 4388 } 4389 4390 /* Currently the firmware seems to be buggy, don't enable 80+80 4391 * mode until that's resolved. 4392 */ 4393 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4394 !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) 4395 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4396 4397 mcs_map = 0; 4398 for (i = 0; i < 8; i++) { 4399 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4400 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4401 else 4402 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4403 } 4404 4405 if (ar->cfg_tx_chainmask <= 1) 4406 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4407 4408 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4409 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4410 4411 return vht_cap; 4412 } 4413 4414 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4415 { 4416 int i; 4417 struct ieee80211_sta_ht_cap ht_cap = {0}; 4418 4419 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4420 return ht_cap; 4421 4422 ht_cap.ht_supported = 1; 4423 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4424 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4425 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4426 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4427 ht_cap.cap |= 4428 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4429 4430 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4431 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4432 4433 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4434 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4435 4436 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4437 u32 smps; 4438 4439 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4440 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4441 4442 ht_cap.cap |= smps; 4443 } 4444 4445 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4446 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4447 4448 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4449 u32 stbc; 4450 4451 stbc = ar->ht_cap_info; 4452 stbc &= WMI_HT_CAP_RX_STBC; 4453 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4454 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4455 stbc &= IEEE80211_HT_CAP_RX_STBC; 4456 4457 ht_cap.cap |= stbc; 4458 } 4459 4460 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4461 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4462 4463 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4464 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4465 4466 /* max AMSDU is implicitly taken from vht_cap_info */ 4467 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4468 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4469 4470 for (i = 0; i < ar->num_rf_chains; i++) { 4471 if (ar->cfg_rx_chainmask & BIT(i)) 4472 ht_cap.mcs.rx_mask[i] = 0xFF; 4473 } 4474 4475 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4476 4477 return ht_cap; 4478 } 4479 4480 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4481 { 4482 struct ieee80211_supported_band *band; 4483 struct ieee80211_sta_vht_cap vht_cap; 4484 struct ieee80211_sta_ht_cap ht_cap; 4485 4486 ht_cap = ath10k_get_ht_cap(ar); 4487 vht_cap = ath10k_create_vht_cap(ar); 4488 4489 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4490 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4491 band->ht_cap = ht_cap; 4492 } 4493 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4494 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4495 band->ht_cap = ht_cap; 4496 band->vht_cap = vht_cap; 4497 } 4498 } 4499 4500 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4501 { 4502 int ret; 4503 4504 lockdep_assert_held(&ar->conf_mutex); 4505 4506 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4507 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4508 4509 ar->cfg_tx_chainmask = tx_ant; 4510 ar->cfg_rx_chainmask = rx_ant; 4511 4512 if ((ar->state != ATH10K_STATE_ON) && 4513 (ar->state != ATH10K_STATE_RESTARTED)) 4514 return 0; 4515 4516 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4517 tx_ant); 4518 if (ret) { 4519 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4520 ret, tx_ant); 4521 return ret; 4522 } 4523 4524 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4525 rx_ant); 4526 if (ret) { 4527 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4528 ret, rx_ant); 4529 return ret; 4530 } 4531 4532 /* Reload HT/VHT capability */ 4533 ath10k_mac_setup_ht_vht_cap(ar); 4534 4535 return 0; 4536 } 4537 4538 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4539 { 4540 struct ath10k *ar = hw->priv; 4541 int ret; 4542 4543 mutex_lock(&ar->conf_mutex); 4544 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4545 mutex_unlock(&ar->conf_mutex); 4546 return ret; 4547 } 4548 4549 static int ath10k_start(struct ieee80211_hw *hw) 4550 { 4551 struct ath10k *ar = hw->priv; 4552 u32 param; 4553 int ret = 0; 4554 4555 /* 4556 * This makes sense only when restarting hw. It is harmless to call 4557 * unconditionally. This is necessary to make sure no HTT/WMI tx 4558 * commands will be submitted while restarting. 4559 */ 4560 ath10k_drain_tx(ar); 4561 4562 mutex_lock(&ar->conf_mutex); 4563 4564 switch (ar->state) { 4565 case ATH10K_STATE_OFF: 4566 ar->state = ATH10K_STATE_ON; 4567 break; 4568 case ATH10K_STATE_RESTARTING: 4569 ar->state = ATH10K_STATE_RESTARTED; 4570 break; 4571 case ATH10K_STATE_ON: 4572 case ATH10K_STATE_RESTARTED: 4573 case ATH10K_STATE_WEDGED: 4574 WARN_ON(1); 4575 ret = -EINVAL; 4576 goto err; 4577 case ATH10K_STATE_UTF: 4578 ret = -EBUSY; 4579 goto err; 4580 } 4581 4582 ret = ath10k_hif_power_up(ar); 4583 if (ret) { 4584 ath10k_err(ar, "Could not init hif: %d\n", ret); 4585 goto err_off; 4586 } 4587 4588 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4589 &ar->normal_mode_fw); 4590 if (ret) { 4591 ath10k_err(ar, "Could not init core: %d\n", ret); 4592 goto err_power_down; 4593 } 4594 4595 param = ar->wmi.pdev_param->pmf_qos; 4596 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4597 if (ret) { 4598 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4599 goto err_core_stop; 4600 } 4601 4602 param = ar->wmi.pdev_param->dynamic_bw; 4603 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4604 if (ret) { 4605 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4606 goto err_core_stop; 4607 } 4608 4609 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4610 ret = ath10k_wmi_adaptive_qcs(ar, true); 4611 if (ret) { 4612 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4613 ret); 4614 goto err_core_stop; 4615 } 4616 } 4617 4618 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4619 param = ar->wmi.pdev_param->burst_enable; 4620 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4621 if (ret) { 4622 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4623 goto err_core_stop; 4624 } 4625 } 4626 4627 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4628 4629 /* 4630 * By default FW set ARP frames ac to voice (6). In that case ARP 4631 * exchange is not working properly for UAPSD enabled AP. ARP requests 4632 * which arrives with access category 0 are processed by network stack 4633 * and send back with access category 0, but FW changes access category 4634 * to 6. Set ARP frames access category to best effort (0) solves 4635 * this problem. 4636 */ 4637 4638 param = ar->wmi.pdev_param->arp_ac_override; 4639 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4640 if (ret) { 4641 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4642 ret); 4643 goto err_core_stop; 4644 } 4645 4646 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4647 ar->running_fw->fw_file.fw_features)) { 4648 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4649 WMI_CCA_DETECT_LEVEL_AUTO, 4650 WMI_CCA_DETECT_MARGIN_AUTO); 4651 if (ret) { 4652 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4653 ret); 4654 goto err_core_stop; 4655 } 4656 } 4657 4658 param = ar->wmi.pdev_param->ani_enable; 4659 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4660 if (ret) { 4661 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4662 ret); 4663 goto err_core_stop; 4664 } 4665 4666 ar->ani_enabled = true; 4667 4668 if (ath10k_peer_stats_enabled(ar)) { 4669 param = ar->wmi.pdev_param->peer_stats_update_period; 4670 ret = ath10k_wmi_pdev_set_param(ar, param, 4671 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4672 if (ret) { 4673 ath10k_warn(ar, 4674 "failed to set peer stats period : %d\n", 4675 ret); 4676 goto err_core_stop; 4677 } 4678 } 4679 4680 param = ar->wmi.pdev_param->enable_btcoex; 4681 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4682 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4683 ar->running_fw->fw_file.fw_features)) { 4684 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4685 if (ret) { 4686 ath10k_warn(ar, 4687 "failed to set btcoex param: %d\n", ret); 4688 goto err_core_stop; 4689 } 4690 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4691 } 4692 4693 ar->num_started_vdevs = 0; 4694 ath10k_regd_update(ar); 4695 4696 ath10k_spectral_start(ar); 4697 ath10k_thermal_set_throttling(ar); 4698 4699 mutex_unlock(&ar->conf_mutex); 4700 return 0; 4701 4702 err_core_stop: 4703 ath10k_core_stop(ar); 4704 4705 err_power_down: 4706 ath10k_hif_power_down(ar); 4707 4708 err_off: 4709 ar->state = ATH10K_STATE_OFF; 4710 4711 err: 4712 mutex_unlock(&ar->conf_mutex); 4713 return ret; 4714 } 4715 4716 static void ath10k_stop(struct ieee80211_hw *hw) 4717 { 4718 struct ath10k *ar = hw->priv; 4719 4720 ath10k_drain_tx(ar); 4721 4722 mutex_lock(&ar->conf_mutex); 4723 if (ar->state != ATH10K_STATE_OFF) { 4724 ath10k_halt(ar); 4725 ar->state = ATH10K_STATE_OFF; 4726 } 4727 mutex_unlock(&ar->conf_mutex); 4728 4729 cancel_work_sync(&ar->set_coverage_class_work); 4730 cancel_delayed_work_sync(&ar->scan.timeout); 4731 cancel_work_sync(&ar->restart_work); 4732 } 4733 4734 static int ath10k_config_ps(struct ath10k *ar) 4735 { 4736 struct ath10k_vif *arvif; 4737 int ret = 0; 4738 4739 lockdep_assert_held(&ar->conf_mutex); 4740 4741 list_for_each_entry(arvif, &ar->arvifs, list) { 4742 ret = ath10k_mac_vif_setup_ps(arvif); 4743 if (ret) { 4744 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4745 break; 4746 } 4747 } 4748 4749 return ret; 4750 } 4751 4752 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4753 { 4754 int ret; 4755 u32 param; 4756 4757 lockdep_assert_held(&ar->conf_mutex); 4758 4759 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4760 4761 param = ar->wmi.pdev_param->txpower_limit2g; 4762 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4763 if (ret) { 4764 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4765 txpower, ret); 4766 return ret; 4767 } 4768 4769 param = ar->wmi.pdev_param->txpower_limit5g; 4770 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4771 if (ret) { 4772 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4773 txpower, ret); 4774 return ret; 4775 } 4776 4777 return 0; 4778 } 4779 4780 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4781 { 4782 struct ath10k_vif *arvif; 4783 int ret, txpower = -1; 4784 4785 lockdep_assert_held(&ar->conf_mutex); 4786 4787 list_for_each_entry(arvif, &ar->arvifs, list) { 4788 if (arvif->txpower <= 0) 4789 continue; 4790 4791 if (txpower == -1) 4792 txpower = arvif->txpower; 4793 else 4794 txpower = min(txpower, arvif->txpower); 4795 } 4796 4797 if (txpower == -1) 4798 return 0; 4799 4800 ret = ath10k_mac_txpower_setup(ar, txpower); 4801 if (ret) { 4802 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4803 txpower, ret); 4804 return ret; 4805 } 4806 4807 return 0; 4808 } 4809 4810 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4811 { 4812 struct ath10k *ar = hw->priv; 4813 struct ieee80211_conf *conf = &hw->conf; 4814 int ret = 0; 4815 4816 mutex_lock(&ar->conf_mutex); 4817 4818 if (changed & IEEE80211_CONF_CHANGE_PS) 4819 ath10k_config_ps(ar); 4820 4821 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4822 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4823 ret = ath10k_monitor_recalc(ar); 4824 if (ret) 4825 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4826 } 4827 4828 mutex_unlock(&ar->conf_mutex); 4829 return ret; 4830 } 4831 4832 static u32 get_nss_from_chainmask(u16 chain_mask) 4833 { 4834 if ((chain_mask & 0xf) == 0xf) 4835 return 4; 4836 else if ((chain_mask & 0x7) == 0x7) 4837 return 3; 4838 else if ((chain_mask & 0x3) == 0x3) 4839 return 2; 4840 return 1; 4841 } 4842 4843 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4844 { 4845 u32 value = 0; 4846 struct ath10k *ar = arvif->ar; 4847 int nsts; 4848 int sound_dim; 4849 4850 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4851 return 0; 4852 4853 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4854 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4855 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4856 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4857 4858 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4859 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4860 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4861 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4862 4863 if (!value) 4864 return 0; 4865 4866 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4867 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4868 4869 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4870 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4871 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4872 4873 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4874 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4875 4876 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4877 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4878 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4879 4880 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4881 ar->wmi.vdev_param->txbf, value); 4882 } 4883 4884 /* 4885 * TODO: 4886 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4887 * because we will send mgmt frames without CCK. This requirement 4888 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4889 * in the TX packet. 4890 */ 4891 static int ath10k_add_interface(struct ieee80211_hw *hw, 4892 struct ieee80211_vif *vif) 4893 { 4894 struct ath10k *ar = hw->priv; 4895 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4896 struct ath10k_peer *peer; 4897 enum wmi_sta_powersave_param param; 4898 int ret = 0; 4899 u32 value; 4900 int bit; 4901 int i; 4902 u32 vdev_param; 4903 4904 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4905 4906 mutex_lock(&ar->conf_mutex); 4907 4908 memset(arvif, 0, sizeof(*arvif)); 4909 ath10k_mac_txq_init(vif->txq); 4910 4911 arvif->ar = ar; 4912 arvif->vif = vif; 4913 4914 INIT_LIST_HEAD(&arvif->list); 4915 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4916 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4917 ath10k_mac_vif_sta_connection_loss_work); 4918 4919 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4920 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4921 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4922 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4923 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4924 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4925 } 4926 4927 if (ar->num_peers >= ar->max_num_peers) { 4928 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 4929 ret = -ENOBUFS; 4930 goto err; 4931 } 4932 4933 if (ar->free_vdev_map == 0) { 4934 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4935 ret = -EBUSY; 4936 goto err; 4937 } 4938 bit = __ffs64(ar->free_vdev_map); 4939 4940 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 4941 bit, ar->free_vdev_map); 4942 4943 arvif->vdev_id = bit; 4944 arvif->vdev_subtype = 4945 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 4946 4947 switch (vif->type) { 4948 case NL80211_IFTYPE_P2P_DEVICE: 4949 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4950 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4951 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 4952 break; 4953 case NL80211_IFTYPE_UNSPECIFIED: 4954 case NL80211_IFTYPE_STATION: 4955 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4956 if (vif->p2p) 4957 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4958 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 4959 break; 4960 case NL80211_IFTYPE_ADHOC: 4961 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4962 break; 4963 case NL80211_IFTYPE_MESH_POINT: 4964 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 4965 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4966 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 4967 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 4968 ret = -EINVAL; 4969 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 4970 goto err; 4971 } 4972 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4973 break; 4974 case NL80211_IFTYPE_AP: 4975 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4976 4977 if (vif->p2p) 4978 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4979 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 4980 break; 4981 case NL80211_IFTYPE_MONITOR: 4982 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 4983 break; 4984 default: 4985 WARN_ON(1); 4986 break; 4987 } 4988 4989 /* Using vdev_id as queue number will make it very easy to do per-vif 4990 * tx queue locking. This shouldn't wrap due to interface combinations 4991 * but do a modulo for correctness sake and prevent using offchannel tx 4992 * queues for regular vif tx. 4993 */ 4994 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4995 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 4996 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4997 4998 /* Some firmware revisions don't wait for beacon tx completion before 4999 * sending another SWBA event. This could lead to hardware using old 5000 * (freed) beacon data in some cases, e.g. tx credit starvation 5001 * combined with missed TBTT. This is very very rare. 5002 * 5003 * On non-IOMMU-enabled hosts this could be a possible security issue 5004 * because hw could beacon some random data on the air. On 5005 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5006 * device would crash. 5007 * 5008 * Since there are no beacon tx completions (implicit nor explicit) 5009 * propagated to host the only workaround for this is to allocate a 5010 * DMA-coherent buffer for a lifetime of a vif and use it for all 5011 * beacon tx commands. Worst case for this approach is some beacons may 5012 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5013 */ 5014 if (vif->type == NL80211_IFTYPE_ADHOC || 5015 vif->type == NL80211_IFTYPE_MESH_POINT || 5016 vif->type == NL80211_IFTYPE_AP) { 5017 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5018 IEEE80211_MAX_FRAME_LEN, 5019 &arvif->beacon_paddr, 5020 GFP_ATOMIC); 5021 if (!arvif->beacon_buf) { 5022 ret = -ENOMEM; 5023 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5024 ret); 5025 goto err; 5026 } 5027 } 5028 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5029 arvif->nohwcrypt = true; 5030 5031 if (arvif->nohwcrypt && 5032 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5033 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5034 goto err; 5035 } 5036 5037 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5038 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5039 arvif->beacon_buf ? "single-buf" : "per-skb"); 5040 5041 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5042 arvif->vdev_subtype, vif->addr); 5043 if (ret) { 5044 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5045 arvif->vdev_id, ret); 5046 goto err; 5047 } 5048 5049 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5050 spin_lock_bh(&ar->data_lock); 5051 list_add(&arvif->list, &ar->arvifs); 5052 spin_unlock_bh(&ar->data_lock); 5053 5054 /* It makes no sense to have firmware do keepalives. mac80211 already 5055 * takes care of this with idle connection polling. 5056 */ 5057 ret = ath10k_mac_vif_disable_keepalive(arvif); 5058 if (ret) { 5059 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5060 arvif->vdev_id, ret); 5061 goto err_vdev_delete; 5062 } 5063 5064 arvif->def_wep_key_idx = -1; 5065 5066 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5067 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5068 ATH10K_HW_TXRX_NATIVE_WIFI); 5069 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5070 if (ret && ret != -EOPNOTSUPP) { 5071 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5072 arvif->vdev_id, ret); 5073 goto err_vdev_delete; 5074 } 5075 5076 /* Configuring number of spatial stream for monitor interface is causing 5077 * target assert in qca9888 and qca6174. 5078 */ 5079 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5080 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5081 5082 vdev_param = ar->wmi.vdev_param->nss; 5083 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5084 nss); 5085 if (ret) { 5086 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5087 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5088 ret); 5089 goto err_vdev_delete; 5090 } 5091 } 5092 5093 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5094 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5095 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5096 vif->addr, WMI_PEER_TYPE_DEFAULT); 5097 if (ret) { 5098 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5099 arvif->vdev_id, ret); 5100 goto err_vdev_delete; 5101 } 5102 5103 spin_lock_bh(&ar->data_lock); 5104 5105 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5106 if (!peer) { 5107 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5108 vif->addr, arvif->vdev_id); 5109 spin_unlock_bh(&ar->data_lock); 5110 ret = -ENOENT; 5111 goto err_peer_delete; 5112 } 5113 5114 arvif->peer_id = find_first_bit(peer->peer_ids, 5115 ATH10K_MAX_NUM_PEER_IDS); 5116 5117 spin_unlock_bh(&ar->data_lock); 5118 } else { 5119 arvif->peer_id = HTT_INVALID_PEERID; 5120 } 5121 5122 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5123 ret = ath10k_mac_set_kickout(arvif); 5124 if (ret) { 5125 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5126 arvif->vdev_id, ret); 5127 goto err_peer_delete; 5128 } 5129 } 5130 5131 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5132 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5133 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5134 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5135 param, value); 5136 if (ret) { 5137 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5138 arvif->vdev_id, ret); 5139 goto err_peer_delete; 5140 } 5141 5142 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5143 if (ret) { 5144 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5145 arvif->vdev_id, ret); 5146 goto err_peer_delete; 5147 } 5148 5149 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5150 if (ret) { 5151 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5152 arvif->vdev_id, ret); 5153 goto err_peer_delete; 5154 } 5155 } 5156 5157 ret = ath10k_mac_set_txbf_conf(arvif); 5158 if (ret) { 5159 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5160 arvif->vdev_id, ret); 5161 goto err_peer_delete; 5162 } 5163 5164 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5165 if (ret) { 5166 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5167 arvif->vdev_id, ret); 5168 goto err_peer_delete; 5169 } 5170 5171 arvif->txpower = vif->bss_conf.txpower; 5172 ret = ath10k_mac_txpower_recalc(ar); 5173 if (ret) { 5174 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5175 goto err_peer_delete; 5176 } 5177 5178 if (vif->type == NL80211_IFTYPE_MONITOR) { 5179 ar->monitor_arvif = arvif; 5180 ret = ath10k_monitor_recalc(ar); 5181 if (ret) { 5182 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5183 goto err_peer_delete; 5184 } 5185 } 5186 5187 spin_lock_bh(&ar->htt.tx_lock); 5188 if (!ar->tx_paused) 5189 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5190 spin_unlock_bh(&ar->htt.tx_lock); 5191 5192 mutex_unlock(&ar->conf_mutex); 5193 return 0; 5194 5195 err_peer_delete: 5196 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5197 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5198 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5199 5200 err_vdev_delete: 5201 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5202 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5203 spin_lock_bh(&ar->data_lock); 5204 list_del(&arvif->list); 5205 spin_unlock_bh(&ar->data_lock); 5206 5207 err: 5208 if (arvif->beacon_buf) { 5209 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5210 arvif->beacon_buf, arvif->beacon_paddr); 5211 arvif->beacon_buf = NULL; 5212 } 5213 5214 mutex_unlock(&ar->conf_mutex); 5215 5216 return ret; 5217 } 5218 5219 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5220 { 5221 int i; 5222 5223 for (i = 0; i < BITS_PER_LONG; i++) 5224 ath10k_mac_vif_tx_unlock(arvif, i); 5225 } 5226 5227 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5228 struct ieee80211_vif *vif) 5229 { 5230 struct ath10k *ar = hw->priv; 5231 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5232 struct ath10k_peer *peer; 5233 int ret; 5234 int i; 5235 5236 cancel_work_sync(&arvif->ap_csa_work); 5237 cancel_delayed_work_sync(&arvif->connection_loss_work); 5238 5239 mutex_lock(&ar->conf_mutex); 5240 5241 spin_lock_bh(&ar->data_lock); 5242 ath10k_mac_vif_beacon_cleanup(arvif); 5243 spin_unlock_bh(&ar->data_lock); 5244 5245 ret = ath10k_spectral_vif_stop(arvif); 5246 if (ret) 5247 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5248 arvif->vdev_id, ret); 5249 5250 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5251 spin_lock_bh(&ar->data_lock); 5252 list_del(&arvif->list); 5253 spin_unlock_bh(&ar->data_lock); 5254 5255 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5256 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5257 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5258 vif->addr); 5259 if (ret) 5260 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5261 arvif->vdev_id, ret); 5262 5263 kfree(arvif->u.ap.noa_data); 5264 } 5265 5266 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5267 arvif->vdev_id); 5268 5269 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5270 if (ret) 5271 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5272 arvif->vdev_id, ret); 5273 5274 /* Some firmware revisions don't notify host about self-peer removal 5275 * until after associated vdev is deleted. 5276 */ 5277 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5278 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5279 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5280 vif->addr); 5281 if (ret) 5282 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5283 arvif->vdev_id, ret); 5284 5285 spin_lock_bh(&ar->data_lock); 5286 ar->num_peers--; 5287 spin_unlock_bh(&ar->data_lock); 5288 } 5289 5290 spin_lock_bh(&ar->data_lock); 5291 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5292 peer = ar->peer_map[i]; 5293 if (!peer) 5294 continue; 5295 5296 if (peer->vif == vif) { 5297 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5298 vif->addr, arvif->vdev_id); 5299 peer->vif = NULL; 5300 } 5301 } 5302 spin_unlock_bh(&ar->data_lock); 5303 5304 ath10k_peer_cleanup(ar, arvif->vdev_id); 5305 ath10k_mac_txq_unref(ar, vif->txq); 5306 5307 if (vif->type == NL80211_IFTYPE_MONITOR) { 5308 ar->monitor_arvif = NULL; 5309 ret = ath10k_monitor_recalc(ar); 5310 if (ret) 5311 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5312 } 5313 5314 ret = ath10k_mac_txpower_recalc(ar); 5315 if (ret) 5316 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5317 5318 spin_lock_bh(&ar->htt.tx_lock); 5319 ath10k_mac_vif_tx_unlock_all(arvif); 5320 spin_unlock_bh(&ar->htt.tx_lock); 5321 5322 ath10k_mac_txq_unref(ar, vif->txq); 5323 5324 mutex_unlock(&ar->conf_mutex); 5325 } 5326 5327 /* 5328 * FIXME: Has to be verified. 5329 */ 5330 #define SUPPORTED_FILTERS \ 5331 (FIF_ALLMULTI | \ 5332 FIF_CONTROL | \ 5333 FIF_PSPOLL | \ 5334 FIF_OTHER_BSS | \ 5335 FIF_BCN_PRBRESP_PROMISC | \ 5336 FIF_PROBE_REQ | \ 5337 FIF_FCSFAIL) 5338 5339 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5340 unsigned int changed_flags, 5341 unsigned int *total_flags, 5342 u64 multicast) 5343 { 5344 struct ath10k *ar = hw->priv; 5345 int ret; 5346 5347 mutex_lock(&ar->conf_mutex); 5348 5349 changed_flags &= SUPPORTED_FILTERS; 5350 *total_flags &= SUPPORTED_FILTERS; 5351 ar->filter_flags = *total_flags; 5352 5353 ret = ath10k_monitor_recalc(ar); 5354 if (ret) 5355 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5356 5357 mutex_unlock(&ar->conf_mutex); 5358 } 5359 5360 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5361 struct ieee80211_vif *vif, 5362 struct ieee80211_bss_conf *info, 5363 u32 changed) 5364 { 5365 struct ath10k *ar = hw->priv; 5366 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5367 int ret = 0; 5368 u32 vdev_param, pdev_param, slottime, preamble; 5369 5370 mutex_lock(&ar->conf_mutex); 5371 5372 if (changed & BSS_CHANGED_IBSS) 5373 ath10k_control_ibss(arvif, info, vif->addr); 5374 5375 if (changed & BSS_CHANGED_BEACON_INT) { 5376 arvif->beacon_interval = info->beacon_int; 5377 vdev_param = ar->wmi.vdev_param->beacon_interval; 5378 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5379 arvif->beacon_interval); 5380 ath10k_dbg(ar, ATH10K_DBG_MAC, 5381 "mac vdev %d beacon_interval %d\n", 5382 arvif->vdev_id, arvif->beacon_interval); 5383 5384 if (ret) 5385 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5386 arvif->vdev_id, ret); 5387 } 5388 5389 if (changed & BSS_CHANGED_BEACON) { 5390 ath10k_dbg(ar, ATH10K_DBG_MAC, 5391 "vdev %d set beacon tx mode to staggered\n", 5392 arvif->vdev_id); 5393 5394 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5395 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5396 WMI_BEACON_STAGGERED_MODE); 5397 if (ret) 5398 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5399 arvif->vdev_id, ret); 5400 5401 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5402 if (ret) 5403 ath10k_warn(ar, "failed to update beacon template: %d\n", 5404 ret); 5405 5406 if (ieee80211_vif_is_mesh(vif)) { 5407 /* mesh doesn't use SSID but firmware needs it */ 5408 strncpy(arvif->u.ap.ssid, "mesh", 5409 sizeof(arvif->u.ap.ssid)); 5410 arvif->u.ap.ssid_len = 4; 5411 } 5412 } 5413 5414 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5415 ret = ath10k_mac_setup_prb_tmpl(arvif); 5416 if (ret) 5417 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5418 arvif->vdev_id, ret); 5419 } 5420 5421 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5422 arvif->dtim_period = info->dtim_period; 5423 5424 ath10k_dbg(ar, ATH10K_DBG_MAC, 5425 "mac vdev %d dtim_period %d\n", 5426 arvif->vdev_id, arvif->dtim_period); 5427 5428 vdev_param = ar->wmi.vdev_param->dtim_period; 5429 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5430 arvif->dtim_period); 5431 if (ret) 5432 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5433 arvif->vdev_id, ret); 5434 } 5435 5436 if (changed & BSS_CHANGED_SSID && 5437 vif->type == NL80211_IFTYPE_AP) { 5438 arvif->u.ap.ssid_len = info->ssid_len; 5439 if (info->ssid_len) 5440 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5441 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5442 } 5443 5444 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5445 ether_addr_copy(arvif->bssid, info->bssid); 5446 5447 if (changed & BSS_CHANGED_BEACON_ENABLED) 5448 ath10k_control_beaconing(arvif, info); 5449 5450 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5451 arvif->use_cts_prot = info->use_cts_prot; 5452 5453 ret = ath10k_recalc_rtscts_prot(arvif); 5454 if (ret) 5455 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5456 arvif->vdev_id, ret); 5457 5458 if (ath10k_mac_can_set_cts_prot(arvif)) { 5459 ret = ath10k_mac_set_cts_prot(arvif); 5460 if (ret) 5461 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5462 arvif->vdev_id, ret); 5463 } 5464 } 5465 5466 if (changed & BSS_CHANGED_ERP_SLOT) { 5467 if (info->use_short_slot) 5468 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5469 5470 else 5471 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5472 5473 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5474 arvif->vdev_id, slottime); 5475 5476 vdev_param = ar->wmi.vdev_param->slot_time; 5477 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5478 slottime); 5479 if (ret) 5480 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5481 arvif->vdev_id, ret); 5482 } 5483 5484 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5485 if (info->use_short_preamble) 5486 preamble = WMI_VDEV_PREAMBLE_SHORT; 5487 else 5488 preamble = WMI_VDEV_PREAMBLE_LONG; 5489 5490 ath10k_dbg(ar, ATH10K_DBG_MAC, 5491 "mac vdev %d preamble %dn", 5492 arvif->vdev_id, preamble); 5493 5494 vdev_param = ar->wmi.vdev_param->preamble; 5495 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5496 preamble); 5497 if (ret) 5498 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5499 arvif->vdev_id, ret); 5500 } 5501 5502 if (changed & BSS_CHANGED_ASSOC) { 5503 if (info->assoc) { 5504 /* Workaround: Make sure monitor vdev is not running 5505 * when associating to prevent some firmware revisions 5506 * (e.g. 10.1 and 10.2) from crashing. 5507 */ 5508 if (ar->monitor_started) 5509 ath10k_monitor_stop(ar); 5510 ath10k_bss_assoc(hw, vif, info); 5511 ath10k_monitor_recalc(ar); 5512 } else { 5513 ath10k_bss_disassoc(hw, vif); 5514 } 5515 } 5516 5517 if (changed & BSS_CHANGED_TXPOWER) { 5518 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5519 arvif->vdev_id, info->txpower); 5520 5521 arvif->txpower = info->txpower; 5522 ret = ath10k_mac_txpower_recalc(ar); 5523 if (ret) 5524 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5525 } 5526 5527 if (changed & BSS_CHANGED_PS) { 5528 arvif->ps = vif->bss_conf.ps; 5529 5530 ret = ath10k_config_ps(ar); 5531 if (ret) 5532 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5533 arvif->vdev_id, ret); 5534 } 5535 5536 mutex_unlock(&ar->conf_mutex); 5537 } 5538 5539 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5540 { 5541 struct ath10k *ar = hw->priv; 5542 5543 /* This function should never be called if setting the coverage class 5544 * is not supported on this hardware. 5545 */ 5546 if (!ar->hw_params.hw_ops->set_coverage_class) { 5547 WARN_ON_ONCE(1); 5548 return; 5549 } 5550 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5551 } 5552 5553 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5554 struct ieee80211_vif *vif, 5555 struct ieee80211_scan_request *hw_req) 5556 { 5557 struct ath10k *ar = hw->priv; 5558 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5559 struct cfg80211_scan_request *req = &hw_req->req; 5560 struct wmi_start_scan_arg arg; 5561 int ret = 0; 5562 int i; 5563 5564 mutex_lock(&ar->conf_mutex); 5565 5566 spin_lock_bh(&ar->data_lock); 5567 switch (ar->scan.state) { 5568 case ATH10K_SCAN_IDLE: 5569 reinit_completion(&ar->scan.started); 5570 reinit_completion(&ar->scan.completed); 5571 ar->scan.state = ATH10K_SCAN_STARTING; 5572 ar->scan.is_roc = false; 5573 ar->scan.vdev_id = arvif->vdev_id; 5574 ret = 0; 5575 break; 5576 case ATH10K_SCAN_STARTING: 5577 case ATH10K_SCAN_RUNNING: 5578 case ATH10K_SCAN_ABORTING: 5579 ret = -EBUSY; 5580 break; 5581 } 5582 spin_unlock_bh(&ar->data_lock); 5583 5584 if (ret) 5585 goto exit; 5586 5587 memset(&arg, 0, sizeof(arg)); 5588 ath10k_wmi_start_scan_init(ar, &arg); 5589 arg.vdev_id = arvif->vdev_id; 5590 arg.scan_id = ATH10K_SCAN_ID; 5591 5592 if (req->ie_len) { 5593 arg.ie_len = req->ie_len; 5594 memcpy(arg.ie, req->ie, arg.ie_len); 5595 } 5596 5597 if (req->n_ssids) { 5598 arg.n_ssids = req->n_ssids; 5599 for (i = 0; i < arg.n_ssids; i++) { 5600 arg.ssids[i].len = req->ssids[i].ssid_len; 5601 arg.ssids[i].ssid = req->ssids[i].ssid; 5602 } 5603 } else { 5604 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5605 } 5606 5607 if (req->n_channels) { 5608 arg.n_channels = req->n_channels; 5609 for (i = 0; i < arg.n_channels; i++) 5610 arg.channels[i] = req->channels[i]->center_freq; 5611 } 5612 5613 ret = ath10k_start_scan(ar, &arg); 5614 if (ret) { 5615 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5616 spin_lock_bh(&ar->data_lock); 5617 ar->scan.state = ATH10K_SCAN_IDLE; 5618 spin_unlock_bh(&ar->data_lock); 5619 } 5620 5621 /* Add a 200ms margin to account for event/command processing */ 5622 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5623 msecs_to_jiffies(arg.max_scan_time + 5624 200)); 5625 5626 exit: 5627 mutex_unlock(&ar->conf_mutex); 5628 return ret; 5629 } 5630 5631 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5632 struct ieee80211_vif *vif) 5633 { 5634 struct ath10k *ar = hw->priv; 5635 5636 mutex_lock(&ar->conf_mutex); 5637 ath10k_scan_abort(ar); 5638 mutex_unlock(&ar->conf_mutex); 5639 5640 cancel_delayed_work_sync(&ar->scan.timeout); 5641 } 5642 5643 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5644 struct ath10k_vif *arvif, 5645 enum set_key_cmd cmd, 5646 struct ieee80211_key_conf *key) 5647 { 5648 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5649 int ret; 5650 5651 /* 10.1 firmware branch requires default key index to be set to group 5652 * key index after installing it. Otherwise FW/HW Txes corrupted 5653 * frames with multi-vif APs. This is not required for main firmware 5654 * branch (e.g. 636). 5655 * 5656 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5657 * 5658 * FIXME: It remains unknown if this is required for multi-vif STA 5659 * interfaces on 10.1. 5660 */ 5661 5662 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5663 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5664 return; 5665 5666 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5667 return; 5668 5669 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5670 return; 5671 5672 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5673 return; 5674 5675 if (cmd != SET_KEY) 5676 return; 5677 5678 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5679 key->keyidx); 5680 if (ret) 5681 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5682 arvif->vdev_id, ret); 5683 } 5684 5685 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5686 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5687 struct ieee80211_key_conf *key) 5688 { 5689 struct ath10k *ar = hw->priv; 5690 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5691 struct ath10k_peer *peer; 5692 const u8 *peer_addr; 5693 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5694 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5695 int ret = 0; 5696 int ret2; 5697 u32 flags = 0; 5698 u32 flags2; 5699 5700 /* this one needs to be done in software */ 5701 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 5702 return 1; 5703 5704 if (arvif->nohwcrypt) 5705 return 1; 5706 5707 if (key->keyidx > WMI_MAX_KEY_INDEX) 5708 return -ENOSPC; 5709 5710 mutex_lock(&ar->conf_mutex); 5711 5712 if (sta) 5713 peer_addr = sta->addr; 5714 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5715 peer_addr = vif->bss_conf.bssid; 5716 else 5717 peer_addr = vif->addr; 5718 5719 key->hw_key_idx = key->keyidx; 5720 5721 if (is_wep) { 5722 if (cmd == SET_KEY) 5723 arvif->wep_keys[key->keyidx] = key; 5724 else 5725 arvif->wep_keys[key->keyidx] = NULL; 5726 } 5727 5728 /* the peer should not disappear in mid-way (unless FW goes awry) since 5729 * we already hold conf_mutex. we just make sure its there now. 5730 */ 5731 spin_lock_bh(&ar->data_lock); 5732 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5733 spin_unlock_bh(&ar->data_lock); 5734 5735 if (!peer) { 5736 if (cmd == SET_KEY) { 5737 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5738 peer_addr); 5739 ret = -EOPNOTSUPP; 5740 goto exit; 5741 } else { 5742 /* if the peer doesn't exist there is no key to disable anymore */ 5743 goto exit; 5744 } 5745 } 5746 5747 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5748 flags |= WMI_KEY_PAIRWISE; 5749 else 5750 flags |= WMI_KEY_GROUP; 5751 5752 if (is_wep) { 5753 if (cmd == DISABLE_KEY) 5754 ath10k_clear_vdev_key(arvif, key); 5755 5756 /* When WEP keys are uploaded it's possible that there are 5757 * stations associated already (e.g. when merging) without any 5758 * keys. Static WEP needs an explicit per-peer key upload. 5759 */ 5760 if (vif->type == NL80211_IFTYPE_ADHOC && 5761 cmd == SET_KEY) 5762 ath10k_mac_vif_update_wep_key(arvif, key); 5763 5764 /* 802.1x never sets the def_wep_key_idx so each set_key() 5765 * call changes default tx key. 5766 * 5767 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5768 * after first set_key(). 5769 */ 5770 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5771 flags |= WMI_KEY_TX_USAGE; 5772 } 5773 5774 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5775 if (ret) { 5776 WARN_ON(ret > 0); 5777 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5778 arvif->vdev_id, peer_addr, ret); 5779 goto exit; 5780 } 5781 5782 /* mac80211 sets static WEP keys as groupwise while firmware requires 5783 * them to be installed twice as both pairwise and groupwise. 5784 */ 5785 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5786 flags2 = flags; 5787 flags2 &= ~WMI_KEY_GROUP; 5788 flags2 |= WMI_KEY_PAIRWISE; 5789 5790 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5791 if (ret) { 5792 WARN_ON(ret > 0); 5793 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5794 arvif->vdev_id, peer_addr, ret); 5795 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5796 peer_addr, flags); 5797 if (ret2) { 5798 WARN_ON(ret2 > 0); 5799 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5800 arvif->vdev_id, peer_addr, ret2); 5801 } 5802 goto exit; 5803 } 5804 } 5805 5806 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5807 5808 spin_lock_bh(&ar->data_lock); 5809 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5810 if (peer && cmd == SET_KEY) 5811 peer->keys[key->keyidx] = key; 5812 else if (peer && cmd == DISABLE_KEY) 5813 peer->keys[key->keyidx] = NULL; 5814 else if (peer == NULL) 5815 /* impossible unless FW goes crazy */ 5816 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5817 spin_unlock_bh(&ar->data_lock); 5818 5819 exit: 5820 mutex_unlock(&ar->conf_mutex); 5821 return ret; 5822 } 5823 5824 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5825 struct ieee80211_vif *vif, 5826 int keyidx) 5827 { 5828 struct ath10k *ar = hw->priv; 5829 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5830 int ret; 5831 5832 mutex_lock(&arvif->ar->conf_mutex); 5833 5834 if (arvif->ar->state != ATH10K_STATE_ON) 5835 goto unlock; 5836 5837 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5838 arvif->vdev_id, keyidx); 5839 5840 ret = ath10k_wmi_vdev_set_param(arvif->ar, 5841 arvif->vdev_id, 5842 arvif->ar->wmi.vdev_param->def_keyid, 5843 keyidx); 5844 5845 if (ret) { 5846 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 5847 arvif->vdev_id, 5848 ret); 5849 goto unlock; 5850 } 5851 5852 arvif->def_wep_key_idx = keyidx; 5853 5854 unlock: 5855 mutex_unlock(&arvif->ar->conf_mutex); 5856 } 5857 5858 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 5859 { 5860 struct ath10k *ar; 5861 struct ath10k_vif *arvif; 5862 struct ath10k_sta *arsta; 5863 struct ieee80211_sta *sta; 5864 struct cfg80211_chan_def def; 5865 enum nl80211_band band; 5866 const u8 *ht_mcs_mask; 5867 const u16 *vht_mcs_mask; 5868 u32 changed, bw, nss, smps; 5869 int err; 5870 5871 arsta = container_of(wk, struct ath10k_sta, update_wk); 5872 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 5873 arvif = arsta->arvif; 5874 ar = arvif->ar; 5875 5876 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 5877 return; 5878 5879 band = def.chan->band; 5880 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 5881 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 5882 5883 spin_lock_bh(&ar->data_lock); 5884 5885 changed = arsta->changed; 5886 arsta->changed = 0; 5887 5888 bw = arsta->bw; 5889 nss = arsta->nss; 5890 smps = arsta->smps; 5891 5892 spin_unlock_bh(&ar->data_lock); 5893 5894 mutex_lock(&ar->conf_mutex); 5895 5896 nss = max_t(u32, 1, nss); 5897 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 5898 ath10k_mac_max_vht_nss(vht_mcs_mask))); 5899 5900 if (changed & IEEE80211_RC_BW_CHANGED) { 5901 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 5902 sta->addr, bw); 5903 5904 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5905 WMI_PEER_CHAN_WIDTH, bw); 5906 if (err) 5907 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 5908 sta->addr, bw, err); 5909 } 5910 5911 if (changed & IEEE80211_RC_NSS_CHANGED) { 5912 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 5913 sta->addr, nss); 5914 5915 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5916 WMI_PEER_NSS, nss); 5917 if (err) 5918 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 5919 sta->addr, nss, err); 5920 } 5921 5922 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5923 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 5924 sta->addr, smps); 5925 5926 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5927 WMI_PEER_SMPS_STATE, smps); 5928 if (err) 5929 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 5930 sta->addr, smps, err); 5931 } 5932 5933 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 5934 changed & IEEE80211_RC_NSS_CHANGED) { 5935 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", 5936 sta->addr); 5937 5938 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 5939 if (err) 5940 ath10k_warn(ar, "failed to reassociate station: %pM\n", 5941 sta->addr); 5942 } 5943 5944 mutex_unlock(&ar->conf_mutex); 5945 } 5946 5947 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 5948 struct ieee80211_sta *sta) 5949 { 5950 struct ath10k *ar = arvif->ar; 5951 5952 lockdep_assert_held(&ar->conf_mutex); 5953 5954 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5955 return 0; 5956 5957 if (ar->num_stations >= ar->max_num_stations) 5958 return -ENOBUFS; 5959 5960 ar->num_stations++; 5961 5962 return 0; 5963 } 5964 5965 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 5966 struct ieee80211_sta *sta) 5967 { 5968 struct ath10k *ar = arvif->ar; 5969 5970 lockdep_assert_held(&ar->conf_mutex); 5971 5972 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5973 return; 5974 5975 ar->num_stations--; 5976 } 5977 5978 struct ath10k_mac_tdls_iter_data { 5979 u32 num_tdls_stations; 5980 struct ieee80211_vif *curr_vif; 5981 }; 5982 5983 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5984 struct ieee80211_sta *sta) 5985 { 5986 struct ath10k_mac_tdls_iter_data *iter_data = data; 5987 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5988 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5989 5990 if (sta->tdls && sta_vif == iter_data->curr_vif) 5991 iter_data->num_tdls_stations++; 5992 } 5993 5994 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5995 struct ieee80211_vif *vif) 5996 { 5997 struct ath10k_mac_tdls_iter_data data = {}; 5998 5999 data.curr_vif = vif; 6000 6001 ieee80211_iterate_stations_atomic(hw, 6002 ath10k_mac_tdls_vif_stations_count_iter, 6003 &data); 6004 return data.num_tdls_stations; 6005 } 6006 6007 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 6008 struct ieee80211_vif *vif) 6009 { 6010 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6011 int *num_tdls_vifs = data; 6012 6013 if (vif->type != NL80211_IFTYPE_STATION) 6014 return; 6015 6016 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 6017 (*num_tdls_vifs)++; 6018 } 6019 6020 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 6021 { 6022 int num_tdls_vifs = 0; 6023 6024 ieee80211_iterate_active_interfaces_atomic(hw, 6025 IEEE80211_IFACE_ITER_NORMAL, 6026 ath10k_mac_tdls_vifs_count_iter, 6027 &num_tdls_vifs); 6028 return num_tdls_vifs; 6029 } 6030 6031 static int ath10k_sta_state(struct ieee80211_hw *hw, 6032 struct ieee80211_vif *vif, 6033 struct ieee80211_sta *sta, 6034 enum ieee80211_sta_state old_state, 6035 enum ieee80211_sta_state new_state) 6036 { 6037 struct ath10k *ar = hw->priv; 6038 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6039 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6040 struct ath10k_peer *peer; 6041 int ret = 0; 6042 int i; 6043 6044 if (old_state == IEEE80211_STA_NOTEXIST && 6045 new_state == IEEE80211_STA_NONE) { 6046 memset(arsta, 0, sizeof(*arsta)); 6047 arsta->arvif = arvif; 6048 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6049 6050 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6051 ath10k_mac_txq_init(sta->txq[i]); 6052 } 6053 6054 /* cancel must be done outside the mutex to avoid deadlock */ 6055 if ((old_state == IEEE80211_STA_NONE && 6056 new_state == IEEE80211_STA_NOTEXIST)) 6057 cancel_work_sync(&arsta->update_wk); 6058 6059 mutex_lock(&ar->conf_mutex); 6060 6061 if (old_state == IEEE80211_STA_NOTEXIST && 6062 new_state == IEEE80211_STA_NONE) { 6063 /* 6064 * New station addition. 6065 */ 6066 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6067 u32 num_tdls_stations; 6068 u32 num_tdls_vifs; 6069 6070 ath10k_dbg(ar, ATH10K_DBG_MAC, 6071 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6072 arvif->vdev_id, sta->addr, 6073 ar->num_stations + 1, ar->max_num_stations, 6074 ar->num_peers + 1, ar->max_num_peers); 6075 6076 ret = ath10k_mac_inc_num_stations(arvif, sta); 6077 if (ret) { 6078 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6079 ar->max_num_stations); 6080 goto exit; 6081 } 6082 6083 if (sta->tdls) 6084 peer_type = WMI_PEER_TYPE_TDLS; 6085 6086 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6087 sta->addr, peer_type); 6088 if (ret) { 6089 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6090 sta->addr, arvif->vdev_id, ret); 6091 ath10k_mac_dec_num_stations(arvif, sta); 6092 goto exit; 6093 } 6094 6095 spin_lock_bh(&ar->data_lock); 6096 6097 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6098 if (!peer) { 6099 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6100 vif->addr, arvif->vdev_id); 6101 spin_unlock_bh(&ar->data_lock); 6102 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6103 ath10k_mac_dec_num_stations(arvif, sta); 6104 ret = -ENOENT; 6105 goto exit; 6106 } 6107 6108 arsta->peer_id = find_first_bit(peer->peer_ids, 6109 ATH10K_MAX_NUM_PEER_IDS); 6110 6111 spin_unlock_bh(&ar->data_lock); 6112 6113 if (!sta->tdls) 6114 goto exit; 6115 6116 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6117 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6118 6119 if (num_tdls_vifs >= ar->max_num_tdls_vdevs && 6120 num_tdls_stations == 0) { 6121 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6122 arvif->vdev_id, ar->max_num_tdls_vdevs); 6123 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6124 ath10k_mac_dec_num_stations(arvif, sta); 6125 ret = -ENOBUFS; 6126 goto exit; 6127 } 6128 6129 if (num_tdls_stations == 0) { 6130 /* This is the first tdls peer in current vif */ 6131 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE; 6132 6133 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6134 state); 6135 if (ret) { 6136 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6137 arvif->vdev_id, ret); 6138 ath10k_peer_delete(ar, arvif->vdev_id, 6139 sta->addr); 6140 ath10k_mac_dec_num_stations(arvif, sta); 6141 goto exit; 6142 } 6143 } 6144 6145 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6146 WMI_TDLS_PEER_STATE_PEERING); 6147 if (ret) { 6148 ath10k_warn(ar, 6149 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6150 sta->addr, arvif->vdev_id, ret); 6151 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6152 ath10k_mac_dec_num_stations(arvif, sta); 6153 6154 if (num_tdls_stations != 0) 6155 goto exit; 6156 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6157 WMI_TDLS_DISABLE); 6158 } 6159 } else if ((old_state == IEEE80211_STA_NONE && 6160 new_state == IEEE80211_STA_NOTEXIST)) { 6161 /* 6162 * Existing station deletion. 6163 */ 6164 ath10k_dbg(ar, ATH10K_DBG_MAC, 6165 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6166 arvif->vdev_id, sta->addr, sta); 6167 6168 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6169 if (ret) 6170 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6171 sta->addr, arvif->vdev_id, ret); 6172 6173 ath10k_mac_dec_num_stations(arvif, sta); 6174 6175 spin_lock_bh(&ar->data_lock); 6176 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6177 peer = ar->peer_map[i]; 6178 if (!peer) 6179 continue; 6180 6181 if (peer->sta == sta) { 6182 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6183 sta->addr, peer, i, arvif->vdev_id); 6184 peer->sta = NULL; 6185 6186 /* Clean up the peer object as well since we 6187 * must have failed to do this above. 6188 */ 6189 list_del(&peer->list); 6190 ar->peer_map[i] = NULL; 6191 kfree(peer); 6192 ar->num_peers--; 6193 } 6194 } 6195 spin_unlock_bh(&ar->data_lock); 6196 6197 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6198 ath10k_mac_txq_unref(ar, sta->txq[i]); 6199 6200 if (!sta->tdls) 6201 goto exit; 6202 6203 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6204 goto exit; 6205 6206 /* This was the last tdls peer in current vif */ 6207 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6208 WMI_TDLS_DISABLE); 6209 if (ret) { 6210 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6211 arvif->vdev_id, ret); 6212 } 6213 } else if (old_state == IEEE80211_STA_AUTH && 6214 new_state == IEEE80211_STA_ASSOC && 6215 (vif->type == NL80211_IFTYPE_AP || 6216 vif->type == NL80211_IFTYPE_MESH_POINT || 6217 vif->type == NL80211_IFTYPE_ADHOC)) { 6218 /* 6219 * New association. 6220 */ 6221 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6222 sta->addr); 6223 6224 ret = ath10k_station_assoc(ar, vif, sta, false); 6225 if (ret) 6226 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6227 sta->addr, arvif->vdev_id, ret); 6228 } else if (old_state == IEEE80211_STA_ASSOC && 6229 new_state == IEEE80211_STA_AUTHORIZED && 6230 sta->tdls) { 6231 /* 6232 * Tdls station authorized. 6233 */ 6234 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6235 sta->addr); 6236 6237 ret = ath10k_station_assoc(ar, vif, sta, false); 6238 if (ret) { 6239 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6240 sta->addr, arvif->vdev_id, ret); 6241 goto exit; 6242 } 6243 6244 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6245 WMI_TDLS_PEER_STATE_CONNECTED); 6246 if (ret) 6247 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6248 sta->addr, arvif->vdev_id, ret); 6249 } else if (old_state == IEEE80211_STA_ASSOC && 6250 new_state == IEEE80211_STA_AUTH && 6251 (vif->type == NL80211_IFTYPE_AP || 6252 vif->type == NL80211_IFTYPE_MESH_POINT || 6253 vif->type == NL80211_IFTYPE_ADHOC)) { 6254 /* 6255 * Disassociation. 6256 */ 6257 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6258 sta->addr); 6259 6260 ret = ath10k_station_disassoc(ar, vif, sta); 6261 if (ret) 6262 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6263 sta->addr, arvif->vdev_id, ret); 6264 } 6265 exit: 6266 mutex_unlock(&ar->conf_mutex); 6267 return ret; 6268 } 6269 6270 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6271 u16 ac, bool enable) 6272 { 6273 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6274 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6275 u32 prio = 0, acc = 0; 6276 u32 value = 0; 6277 int ret = 0; 6278 6279 lockdep_assert_held(&ar->conf_mutex); 6280 6281 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6282 return 0; 6283 6284 switch (ac) { 6285 case IEEE80211_AC_VO: 6286 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6287 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6288 prio = 7; 6289 acc = 3; 6290 break; 6291 case IEEE80211_AC_VI: 6292 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6293 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6294 prio = 5; 6295 acc = 2; 6296 break; 6297 case IEEE80211_AC_BE: 6298 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6299 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6300 prio = 2; 6301 acc = 1; 6302 break; 6303 case IEEE80211_AC_BK: 6304 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6305 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6306 prio = 0; 6307 acc = 0; 6308 break; 6309 } 6310 6311 if (enable) 6312 arvif->u.sta.uapsd |= value; 6313 else 6314 arvif->u.sta.uapsd &= ~value; 6315 6316 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6317 WMI_STA_PS_PARAM_UAPSD, 6318 arvif->u.sta.uapsd); 6319 if (ret) { 6320 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6321 goto exit; 6322 } 6323 6324 if (arvif->u.sta.uapsd) 6325 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6326 else 6327 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6328 6329 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6330 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6331 value); 6332 if (ret) 6333 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6334 6335 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6336 if (ret) { 6337 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6338 arvif->vdev_id, ret); 6339 return ret; 6340 } 6341 6342 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6343 if (ret) { 6344 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6345 arvif->vdev_id, ret); 6346 return ret; 6347 } 6348 6349 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6350 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6351 /* Only userspace can make an educated decision when to send 6352 * trigger frame. The following effectively disables u-UAPSD 6353 * autotrigger in firmware (which is enabled by default 6354 * provided the autotrigger service is available). 6355 */ 6356 6357 arg.wmm_ac = acc; 6358 arg.user_priority = prio; 6359 arg.service_interval = 0; 6360 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6361 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6362 6363 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6364 arvif->bssid, &arg, 1); 6365 if (ret) { 6366 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6367 ret); 6368 return ret; 6369 } 6370 } 6371 6372 exit: 6373 return ret; 6374 } 6375 6376 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6377 struct ieee80211_vif *vif, u16 ac, 6378 const struct ieee80211_tx_queue_params *params) 6379 { 6380 struct ath10k *ar = hw->priv; 6381 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6382 struct wmi_wmm_params_arg *p = NULL; 6383 int ret; 6384 6385 mutex_lock(&ar->conf_mutex); 6386 6387 switch (ac) { 6388 case IEEE80211_AC_VO: 6389 p = &arvif->wmm_params.ac_vo; 6390 break; 6391 case IEEE80211_AC_VI: 6392 p = &arvif->wmm_params.ac_vi; 6393 break; 6394 case IEEE80211_AC_BE: 6395 p = &arvif->wmm_params.ac_be; 6396 break; 6397 case IEEE80211_AC_BK: 6398 p = &arvif->wmm_params.ac_bk; 6399 break; 6400 } 6401 6402 if (WARN_ON(!p)) { 6403 ret = -EINVAL; 6404 goto exit; 6405 } 6406 6407 p->cwmin = params->cw_min; 6408 p->cwmax = params->cw_max; 6409 p->aifs = params->aifs; 6410 6411 /* 6412 * The channel time duration programmed in the HW is in absolute 6413 * microseconds, while mac80211 gives the txop in units of 6414 * 32 microseconds. 6415 */ 6416 p->txop = params->txop * 32; 6417 6418 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6419 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6420 &arvif->wmm_params); 6421 if (ret) { 6422 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6423 arvif->vdev_id, ret); 6424 goto exit; 6425 } 6426 } else { 6427 /* This won't work well with multi-interface cases but it's 6428 * better than nothing. 6429 */ 6430 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6431 if (ret) { 6432 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6433 goto exit; 6434 } 6435 } 6436 6437 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6438 if (ret) 6439 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6440 6441 exit: 6442 mutex_unlock(&ar->conf_mutex); 6443 return ret; 6444 } 6445 6446 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6447 6448 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6449 struct ieee80211_vif *vif, 6450 struct ieee80211_channel *chan, 6451 int duration, 6452 enum ieee80211_roc_type type) 6453 { 6454 struct ath10k *ar = hw->priv; 6455 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6456 struct wmi_start_scan_arg arg; 6457 int ret = 0; 6458 u32 scan_time_msec; 6459 6460 mutex_lock(&ar->conf_mutex); 6461 6462 spin_lock_bh(&ar->data_lock); 6463 switch (ar->scan.state) { 6464 case ATH10K_SCAN_IDLE: 6465 reinit_completion(&ar->scan.started); 6466 reinit_completion(&ar->scan.completed); 6467 reinit_completion(&ar->scan.on_channel); 6468 ar->scan.state = ATH10K_SCAN_STARTING; 6469 ar->scan.is_roc = true; 6470 ar->scan.vdev_id = arvif->vdev_id; 6471 ar->scan.roc_freq = chan->center_freq; 6472 ar->scan.roc_notify = true; 6473 ret = 0; 6474 break; 6475 case ATH10K_SCAN_STARTING: 6476 case ATH10K_SCAN_RUNNING: 6477 case ATH10K_SCAN_ABORTING: 6478 ret = -EBUSY; 6479 break; 6480 } 6481 spin_unlock_bh(&ar->data_lock); 6482 6483 if (ret) 6484 goto exit; 6485 6486 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6487 6488 memset(&arg, 0, sizeof(arg)); 6489 ath10k_wmi_start_scan_init(ar, &arg); 6490 arg.vdev_id = arvif->vdev_id; 6491 arg.scan_id = ATH10K_SCAN_ID; 6492 arg.n_channels = 1; 6493 arg.channels[0] = chan->center_freq; 6494 arg.dwell_time_active = scan_time_msec; 6495 arg.dwell_time_passive = scan_time_msec; 6496 arg.max_scan_time = scan_time_msec; 6497 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6498 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6499 arg.burst_duration_ms = duration; 6500 6501 ret = ath10k_start_scan(ar, &arg); 6502 if (ret) { 6503 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6504 spin_lock_bh(&ar->data_lock); 6505 ar->scan.state = ATH10K_SCAN_IDLE; 6506 spin_unlock_bh(&ar->data_lock); 6507 goto exit; 6508 } 6509 6510 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6511 if (ret == 0) { 6512 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6513 6514 ret = ath10k_scan_stop(ar); 6515 if (ret) 6516 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6517 6518 ret = -ETIMEDOUT; 6519 goto exit; 6520 } 6521 6522 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6523 msecs_to_jiffies(duration)); 6524 6525 ret = 0; 6526 exit: 6527 mutex_unlock(&ar->conf_mutex); 6528 return ret; 6529 } 6530 6531 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6532 { 6533 struct ath10k *ar = hw->priv; 6534 6535 mutex_lock(&ar->conf_mutex); 6536 6537 spin_lock_bh(&ar->data_lock); 6538 ar->scan.roc_notify = false; 6539 spin_unlock_bh(&ar->data_lock); 6540 6541 ath10k_scan_abort(ar); 6542 6543 mutex_unlock(&ar->conf_mutex); 6544 6545 cancel_delayed_work_sync(&ar->scan.timeout); 6546 6547 return 0; 6548 } 6549 6550 /* 6551 * Both RTS and Fragmentation threshold are interface-specific 6552 * in ath10k, but device-specific in mac80211. 6553 */ 6554 6555 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6556 { 6557 struct ath10k *ar = hw->priv; 6558 struct ath10k_vif *arvif; 6559 int ret = 0; 6560 6561 mutex_lock(&ar->conf_mutex); 6562 list_for_each_entry(arvif, &ar->arvifs, list) { 6563 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6564 arvif->vdev_id, value); 6565 6566 ret = ath10k_mac_set_rts(arvif, value); 6567 if (ret) { 6568 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6569 arvif->vdev_id, ret); 6570 break; 6571 } 6572 } 6573 mutex_unlock(&ar->conf_mutex); 6574 6575 return ret; 6576 } 6577 6578 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6579 { 6580 /* Even though there's a WMI enum for fragmentation threshold no known 6581 * firmware actually implements it. Moreover it is not possible to rely 6582 * frame fragmentation to mac80211 because firmware clears the "more 6583 * fragments" bit in frame control making it impossible for remote 6584 * devices to reassemble frames. 6585 * 6586 * Hence implement a dummy callback just to say fragmentation isn't 6587 * supported. This effectively prevents mac80211 from doing frame 6588 * fragmentation in software. 6589 */ 6590 return -EOPNOTSUPP; 6591 } 6592 6593 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6594 u32 queues, bool drop) 6595 { 6596 struct ath10k *ar = hw->priv; 6597 bool skip; 6598 long time_left; 6599 6600 /* mac80211 doesn't care if we really xmit queued frames or not 6601 * we'll collect those frames either way if we stop/delete vdevs 6602 */ 6603 if (drop) 6604 return; 6605 6606 mutex_lock(&ar->conf_mutex); 6607 6608 if (ar->state == ATH10K_STATE_WEDGED) 6609 goto skip; 6610 6611 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6612 bool empty; 6613 6614 spin_lock_bh(&ar->htt.tx_lock); 6615 empty = (ar->htt.num_pending_tx == 0); 6616 spin_unlock_bh(&ar->htt.tx_lock); 6617 6618 skip = (ar->state == ATH10K_STATE_WEDGED) || 6619 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6620 &ar->dev_flags); 6621 6622 (empty || skip); 6623 }), ATH10K_FLUSH_TIMEOUT_HZ); 6624 6625 if (time_left == 0 || skip) 6626 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6627 skip, ar->state, time_left); 6628 6629 skip: 6630 mutex_unlock(&ar->conf_mutex); 6631 } 6632 6633 /* TODO: Implement this function properly 6634 * For now it is needed to reply to Probe Requests in IBSS mode. 6635 * Propably we need this information from FW. 6636 */ 6637 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6638 { 6639 return 1; 6640 } 6641 6642 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6643 enum ieee80211_reconfig_type reconfig_type) 6644 { 6645 struct ath10k *ar = hw->priv; 6646 6647 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6648 return; 6649 6650 mutex_lock(&ar->conf_mutex); 6651 6652 /* If device failed to restart it will be in a different state, e.g. 6653 * ATH10K_STATE_WEDGED 6654 */ 6655 if (ar->state == ATH10K_STATE_RESTARTED) { 6656 ath10k_info(ar, "device successfully recovered\n"); 6657 ar->state = ATH10K_STATE_ON; 6658 ieee80211_wake_queues(ar->hw); 6659 } 6660 6661 mutex_unlock(&ar->conf_mutex); 6662 } 6663 6664 static void 6665 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6666 struct ieee80211_channel *channel) 6667 { 6668 int ret; 6669 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6670 6671 lockdep_assert_held(&ar->conf_mutex); 6672 6673 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6674 (ar->rx_channel != channel)) 6675 return; 6676 6677 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6678 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6679 return; 6680 } 6681 6682 reinit_completion(&ar->bss_survey_done); 6683 6684 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6685 if (ret) { 6686 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6687 return; 6688 } 6689 6690 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6691 if (!ret) { 6692 ath10k_warn(ar, "bss channel survey timed out\n"); 6693 return; 6694 } 6695 } 6696 6697 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6698 struct survey_info *survey) 6699 { 6700 struct ath10k *ar = hw->priv; 6701 struct ieee80211_supported_band *sband; 6702 struct survey_info *ar_survey = &ar->survey[idx]; 6703 int ret = 0; 6704 6705 mutex_lock(&ar->conf_mutex); 6706 6707 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6708 if (sband && idx >= sband->n_channels) { 6709 idx -= sband->n_channels; 6710 sband = NULL; 6711 } 6712 6713 if (!sband) 6714 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6715 6716 if (!sband || idx >= sband->n_channels) { 6717 ret = -ENOENT; 6718 goto exit; 6719 } 6720 6721 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6722 6723 spin_lock_bh(&ar->data_lock); 6724 memcpy(survey, ar_survey, sizeof(*survey)); 6725 spin_unlock_bh(&ar->data_lock); 6726 6727 survey->channel = &sband->channels[idx]; 6728 6729 if (ar->rx_channel == survey->channel) 6730 survey->filled |= SURVEY_INFO_IN_USE; 6731 6732 exit: 6733 mutex_unlock(&ar->conf_mutex); 6734 return ret; 6735 } 6736 6737 static bool 6738 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6739 enum nl80211_band band, 6740 const struct cfg80211_bitrate_mask *mask) 6741 { 6742 int num_rates = 0; 6743 int i; 6744 6745 num_rates += hweight32(mask->control[band].legacy); 6746 6747 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6748 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6749 6750 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6751 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6752 6753 return num_rates == 1; 6754 } 6755 6756 static bool 6757 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6758 enum nl80211_band band, 6759 const struct cfg80211_bitrate_mask *mask, 6760 int *nss) 6761 { 6762 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6763 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6764 u8 ht_nss_mask = 0; 6765 u8 vht_nss_mask = 0; 6766 int i; 6767 6768 if (mask->control[band].legacy) 6769 return false; 6770 6771 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6772 if (mask->control[band].ht_mcs[i] == 0) 6773 continue; 6774 else if (mask->control[band].ht_mcs[i] == 6775 sband->ht_cap.mcs.rx_mask[i]) 6776 ht_nss_mask |= BIT(i); 6777 else 6778 return false; 6779 } 6780 6781 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6782 if (mask->control[band].vht_mcs[i] == 0) 6783 continue; 6784 else if (mask->control[band].vht_mcs[i] == 6785 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6786 vht_nss_mask |= BIT(i); 6787 else 6788 return false; 6789 } 6790 6791 if (ht_nss_mask != vht_nss_mask) 6792 return false; 6793 6794 if (ht_nss_mask == 0) 6795 return false; 6796 6797 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6798 return false; 6799 6800 *nss = fls(ht_nss_mask); 6801 6802 return true; 6803 } 6804 6805 static int 6806 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6807 enum nl80211_band band, 6808 const struct cfg80211_bitrate_mask *mask, 6809 u8 *rate, u8 *nss) 6810 { 6811 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6812 int rate_idx; 6813 int i; 6814 u16 bitrate; 6815 u8 preamble; 6816 u8 hw_rate; 6817 6818 if (hweight32(mask->control[band].legacy) == 1) { 6819 rate_idx = ffs(mask->control[band].legacy) - 1; 6820 6821 hw_rate = sband->bitrates[rate_idx].hw_value; 6822 bitrate = sband->bitrates[rate_idx].bitrate; 6823 6824 if (ath10k_mac_bitrate_is_cck(bitrate)) 6825 preamble = WMI_RATE_PREAMBLE_CCK; 6826 else 6827 preamble = WMI_RATE_PREAMBLE_OFDM; 6828 6829 *nss = 1; 6830 *rate = preamble << 6 | 6831 (*nss - 1) << 4 | 6832 hw_rate << 0; 6833 6834 return 0; 6835 } 6836 6837 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6838 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6839 *nss = i + 1; 6840 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6841 (*nss - 1) << 4 | 6842 (ffs(mask->control[band].ht_mcs[i]) - 1); 6843 6844 return 0; 6845 } 6846 } 6847 6848 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6849 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6850 *nss = i + 1; 6851 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6852 (*nss - 1) << 4 | 6853 (ffs(mask->control[band].vht_mcs[i]) - 1); 6854 6855 return 0; 6856 } 6857 } 6858 6859 return -EINVAL; 6860 } 6861 6862 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6863 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6864 { 6865 struct ath10k *ar = arvif->ar; 6866 u32 vdev_param; 6867 int ret; 6868 6869 lockdep_assert_held(&ar->conf_mutex); 6870 6871 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6872 arvif->vdev_id, rate, nss, sgi); 6873 6874 vdev_param = ar->wmi.vdev_param->fixed_rate; 6875 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6876 if (ret) { 6877 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6878 rate, ret); 6879 return ret; 6880 } 6881 6882 vdev_param = ar->wmi.vdev_param->nss; 6883 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6884 if (ret) { 6885 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 6886 return ret; 6887 } 6888 6889 vdev_param = ar->wmi.vdev_param->sgi; 6890 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 6891 if (ret) { 6892 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 6893 return ret; 6894 } 6895 6896 vdev_param = ar->wmi.vdev_param->ldpc; 6897 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 6898 if (ret) { 6899 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 6900 return ret; 6901 } 6902 6903 return 0; 6904 } 6905 6906 static bool 6907 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6908 enum nl80211_band band, 6909 const struct cfg80211_bitrate_mask *mask) 6910 { 6911 int i; 6912 u16 vht_mcs; 6913 6914 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 6915 * to express all VHT MCS rate masks. Effectively only the following 6916 * ranges can be used: none, 0-7, 0-8 and 0-9. 6917 */ 6918 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 6919 vht_mcs = mask->control[band].vht_mcs[i]; 6920 6921 switch (vht_mcs) { 6922 case 0: 6923 case BIT(8) - 1: 6924 case BIT(9) - 1: 6925 case BIT(10) - 1: 6926 break; 6927 default: 6928 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 6929 return false; 6930 } 6931 } 6932 6933 return true; 6934 } 6935 6936 static void ath10k_mac_set_bitrate_mask_iter(void *data, 6937 struct ieee80211_sta *sta) 6938 { 6939 struct ath10k_vif *arvif = data; 6940 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6941 struct ath10k *ar = arvif->ar; 6942 6943 if (arsta->arvif != arvif) 6944 return; 6945 6946 spin_lock_bh(&ar->data_lock); 6947 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6948 spin_unlock_bh(&ar->data_lock); 6949 6950 ieee80211_queue_work(ar->hw, &arsta->update_wk); 6951 } 6952 6953 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 6954 struct ieee80211_vif *vif, 6955 const struct cfg80211_bitrate_mask *mask) 6956 { 6957 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6958 struct cfg80211_chan_def def; 6959 struct ath10k *ar = arvif->ar; 6960 enum nl80211_band band; 6961 const u8 *ht_mcs_mask; 6962 const u16 *vht_mcs_mask; 6963 u8 rate; 6964 u8 nss; 6965 u8 sgi; 6966 u8 ldpc; 6967 int single_nss; 6968 int ret; 6969 6970 if (ath10k_mac_vif_chan(vif, &def)) 6971 return -EPERM; 6972 6973 band = def.chan->band; 6974 ht_mcs_mask = mask->control[band].ht_mcs; 6975 vht_mcs_mask = mask->control[band].vht_mcs; 6976 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 6977 6978 sgi = mask->control[band].gi; 6979 if (sgi == NL80211_TXRATE_FORCE_LGI) 6980 return -EINVAL; 6981 6982 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 6983 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 6984 &rate, &nss); 6985 if (ret) { 6986 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 6987 arvif->vdev_id, ret); 6988 return ret; 6989 } 6990 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 6991 &single_nss)) { 6992 rate = WMI_FIXED_RATE_NONE; 6993 nss = single_nss; 6994 } else { 6995 rate = WMI_FIXED_RATE_NONE; 6996 nss = min(ar->num_rf_chains, 6997 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6998 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6999 7000 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7001 return -EINVAL; 7002 7003 mutex_lock(&ar->conf_mutex); 7004 7005 arvif->bitrate_mask = *mask; 7006 ieee80211_iterate_stations_atomic(ar->hw, 7007 ath10k_mac_set_bitrate_mask_iter, 7008 arvif); 7009 7010 mutex_unlock(&ar->conf_mutex); 7011 } 7012 7013 mutex_lock(&ar->conf_mutex); 7014 7015 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7016 if (ret) { 7017 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7018 arvif->vdev_id, ret); 7019 goto exit; 7020 } 7021 7022 exit: 7023 mutex_unlock(&ar->conf_mutex); 7024 7025 return ret; 7026 } 7027 7028 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7029 struct ieee80211_vif *vif, 7030 struct ieee80211_sta *sta, 7031 u32 changed) 7032 { 7033 struct ath10k *ar = hw->priv; 7034 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7035 u32 bw, smps; 7036 7037 spin_lock_bh(&ar->data_lock); 7038 7039 ath10k_dbg(ar, ATH10K_DBG_MAC, 7040 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7041 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7042 sta->smps_mode); 7043 7044 if (changed & IEEE80211_RC_BW_CHANGED) { 7045 bw = WMI_PEER_CHWIDTH_20MHZ; 7046 7047 switch (sta->bandwidth) { 7048 case IEEE80211_STA_RX_BW_20: 7049 bw = WMI_PEER_CHWIDTH_20MHZ; 7050 break; 7051 case IEEE80211_STA_RX_BW_40: 7052 bw = WMI_PEER_CHWIDTH_40MHZ; 7053 break; 7054 case IEEE80211_STA_RX_BW_80: 7055 bw = WMI_PEER_CHWIDTH_80MHZ; 7056 break; 7057 case IEEE80211_STA_RX_BW_160: 7058 bw = WMI_PEER_CHWIDTH_160MHZ; 7059 break; 7060 default: 7061 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7062 sta->bandwidth, sta->addr); 7063 bw = WMI_PEER_CHWIDTH_20MHZ; 7064 break; 7065 } 7066 7067 arsta->bw = bw; 7068 } 7069 7070 if (changed & IEEE80211_RC_NSS_CHANGED) 7071 arsta->nss = sta->rx_nss; 7072 7073 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7074 smps = WMI_PEER_SMPS_PS_NONE; 7075 7076 switch (sta->smps_mode) { 7077 case IEEE80211_SMPS_AUTOMATIC: 7078 case IEEE80211_SMPS_OFF: 7079 smps = WMI_PEER_SMPS_PS_NONE; 7080 break; 7081 case IEEE80211_SMPS_STATIC: 7082 smps = WMI_PEER_SMPS_STATIC; 7083 break; 7084 case IEEE80211_SMPS_DYNAMIC: 7085 smps = WMI_PEER_SMPS_DYNAMIC; 7086 break; 7087 case IEEE80211_SMPS_NUM_MODES: 7088 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7089 sta->smps_mode, sta->addr); 7090 smps = WMI_PEER_SMPS_PS_NONE; 7091 break; 7092 } 7093 7094 arsta->smps = smps; 7095 } 7096 7097 arsta->changed |= changed; 7098 7099 spin_unlock_bh(&ar->data_lock); 7100 7101 ieee80211_queue_work(hw, &arsta->update_wk); 7102 } 7103 7104 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7105 struct ieee80211_vif *vif, s64 tsf_offset) 7106 { 7107 struct ath10k *ar = hw->priv; 7108 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7109 u32 offset, vdev_param; 7110 int ret; 7111 7112 if (tsf_offset < 0) { 7113 vdev_param = ar->wmi.vdev_param->dec_tsf; 7114 offset = -tsf_offset; 7115 } else { 7116 vdev_param = ar->wmi.vdev_param->inc_tsf; 7117 offset = tsf_offset; 7118 } 7119 7120 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7121 vdev_param, offset); 7122 7123 if (ret && ret != -EOPNOTSUPP) 7124 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7125 offset, vdev_param, ret); 7126 } 7127 7128 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7129 struct ieee80211_vif *vif, 7130 struct ieee80211_ampdu_params *params) 7131 { 7132 struct ath10k *ar = hw->priv; 7133 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7134 struct ieee80211_sta *sta = params->sta; 7135 enum ieee80211_ampdu_mlme_action action = params->action; 7136 u16 tid = params->tid; 7137 7138 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7139 arvif->vdev_id, sta->addr, tid, action); 7140 7141 switch (action) { 7142 case IEEE80211_AMPDU_RX_START: 7143 case IEEE80211_AMPDU_RX_STOP: 7144 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7145 * creation/removal. Do we need to verify this? 7146 */ 7147 return 0; 7148 case IEEE80211_AMPDU_TX_START: 7149 case IEEE80211_AMPDU_TX_STOP_CONT: 7150 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7151 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7152 case IEEE80211_AMPDU_TX_OPERATIONAL: 7153 /* Firmware offloads Tx aggregation entirely so deny mac80211 7154 * Tx aggregation requests. 7155 */ 7156 return -EOPNOTSUPP; 7157 } 7158 7159 return -EINVAL; 7160 } 7161 7162 static void 7163 ath10k_mac_update_rx_channel(struct ath10k *ar, 7164 struct ieee80211_chanctx_conf *ctx, 7165 struct ieee80211_vif_chanctx_switch *vifs, 7166 int n_vifs) 7167 { 7168 struct cfg80211_chan_def *def = NULL; 7169 7170 /* Both locks are required because ar->rx_channel is modified. This 7171 * allows readers to hold either lock. 7172 */ 7173 lockdep_assert_held(&ar->conf_mutex); 7174 lockdep_assert_held(&ar->data_lock); 7175 7176 WARN_ON(ctx && vifs); 7177 WARN_ON(vifs && !n_vifs); 7178 7179 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7180 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7181 * ppdu on Rx may reduce performance on low-end systems. It should be 7182 * possible to make tables/hashmaps to speed the lookup up (be vary of 7183 * cpu data cache lines though regarding sizes) but to keep the initial 7184 * implementation simple and less intrusive fallback to the slow lookup 7185 * only for multi-channel cases. Single-channel cases will remain to 7186 * use the old channel derival and thus performance should not be 7187 * affected much. 7188 */ 7189 rcu_read_lock(); 7190 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7191 ieee80211_iter_chan_contexts_atomic(ar->hw, 7192 ath10k_mac_get_any_chandef_iter, 7193 &def); 7194 7195 if (vifs) 7196 def = &vifs[0].new_ctx->def; 7197 7198 ar->rx_channel = def->chan; 7199 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7200 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7201 /* During driver restart due to firmware assert, since mac80211 7202 * already has valid channel context for given radio, channel 7203 * context iteration return num_chanctx > 0. So fix rx_channel 7204 * when restart is in progress. 7205 */ 7206 ar->rx_channel = ctx->def.chan; 7207 } else { 7208 ar->rx_channel = NULL; 7209 } 7210 rcu_read_unlock(); 7211 } 7212 7213 static void 7214 ath10k_mac_update_vif_chan(struct ath10k *ar, 7215 struct ieee80211_vif_chanctx_switch *vifs, 7216 int n_vifs) 7217 { 7218 struct ath10k_vif *arvif; 7219 int ret; 7220 int i; 7221 7222 lockdep_assert_held(&ar->conf_mutex); 7223 7224 /* First stop monitor interface. Some FW versions crash if there's a 7225 * lone monitor interface. 7226 */ 7227 if (ar->monitor_started) 7228 ath10k_monitor_stop(ar); 7229 7230 for (i = 0; i < n_vifs; i++) { 7231 arvif = (void *)vifs[i].vif->drv_priv; 7232 7233 ath10k_dbg(ar, ATH10K_DBG_MAC, 7234 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7235 arvif->vdev_id, 7236 vifs[i].old_ctx->def.chan->center_freq, 7237 vifs[i].new_ctx->def.chan->center_freq, 7238 vifs[i].old_ctx->def.width, 7239 vifs[i].new_ctx->def.width); 7240 7241 if (WARN_ON(!arvif->is_started)) 7242 continue; 7243 7244 if (WARN_ON(!arvif->is_up)) 7245 continue; 7246 7247 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7248 if (ret) { 7249 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7250 arvif->vdev_id, ret); 7251 continue; 7252 } 7253 } 7254 7255 /* All relevant vdevs are downed and associated channel resources 7256 * should be available for the channel switch now. 7257 */ 7258 7259 spin_lock_bh(&ar->data_lock); 7260 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7261 spin_unlock_bh(&ar->data_lock); 7262 7263 for (i = 0; i < n_vifs; i++) { 7264 arvif = (void *)vifs[i].vif->drv_priv; 7265 7266 if (WARN_ON(!arvif->is_started)) 7267 continue; 7268 7269 if (WARN_ON(!arvif->is_up)) 7270 continue; 7271 7272 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7273 if (ret) 7274 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7275 ret); 7276 7277 ret = ath10k_mac_setup_prb_tmpl(arvif); 7278 if (ret) 7279 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7280 ret); 7281 7282 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7283 if (ret) { 7284 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7285 arvif->vdev_id, ret); 7286 continue; 7287 } 7288 7289 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7290 arvif->bssid); 7291 if (ret) { 7292 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7293 arvif->vdev_id, ret); 7294 continue; 7295 } 7296 } 7297 7298 ath10k_monitor_recalc(ar); 7299 } 7300 7301 static int 7302 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7303 struct ieee80211_chanctx_conf *ctx) 7304 { 7305 struct ath10k *ar = hw->priv; 7306 7307 ath10k_dbg(ar, ATH10K_DBG_MAC, 7308 "mac chanctx add freq %hu width %d ptr %pK\n", 7309 ctx->def.chan->center_freq, ctx->def.width, ctx); 7310 7311 mutex_lock(&ar->conf_mutex); 7312 7313 spin_lock_bh(&ar->data_lock); 7314 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7315 spin_unlock_bh(&ar->data_lock); 7316 7317 ath10k_recalc_radar_detection(ar); 7318 ath10k_monitor_recalc(ar); 7319 7320 mutex_unlock(&ar->conf_mutex); 7321 7322 return 0; 7323 } 7324 7325 static void 7326 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7327 struct ieee80211_chanctx_conf *ctx) 7328 { 7329 struct ath10k *ar = hw->priv; 7330 7331 ath10k_dbg(ar, ATH10K_DBG_MAC, 7332 "mac chanctx remove freq %hu width %d ptr %pK\n", 7333 ctx->def.chan->center_freq, ctx->def.width, ctx); 7334 7335 mutex_lock(&ar->conf_mutex); 7336 7337 spin_lock_bh(&ar->data_lock); 7338 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7339 spin_unlock_bh(&ar->data_lock); 7340 7341 ath10k_recalc_radar_detection(ar); 7342 ath10k_monitor_recalc(ar); 7343 7344 mutex_unlock(&ar->conf_mutex); 7345 } 7346 7347 struct ath10k_mac_change_chanctx_arg { 7348 struct ieee80211_chanctx_conf *ctx; 7349 struct ieee80211_vif_chanctx_switch *vifs; 7350 int n_vifs; 7351 int next_vif; 7352 }; 7353 7354 static void 7355 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7356 struct ieee80211_vif *vif) 7357 { 7358 struct ath10k_mac_change_chanctx_arg *arg = data; 7359 7360 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7361 return; 7362 7363 arg->n_vifs++; 7364 } 7365 7366 static void 7367 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7368 struct ieee80211_vif *vif) 7369 { 7370 struct ath10k_mac_change_chanctx_arg *arg = data; 7371 struct ieee80211_chanctx_conf *ctx; 7372 7373 ctx = rcu_access_pointer(vif->chanctx_conf); 7374 if (ctx != arg->ctx) 7375 return; 7376 7377 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7378 return; 7379 7380 arg->vifs[arg->next_vif].vif = vif; 7381 arg->vifs[arg->next_vif].old_ctx = ctx; 7382 arg->vifs[arg->next_vif].new_ctx = ctx; 7383 arg->next_vif++; 7384 } 7385 7386 static void 7387 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7388 struct ieee80211_chanctx_conf *ctx, 7389 u32 changed) 7390 { 7391 struct ath10k *ar = hw->priv; 7392 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7393 7394 mutex_lock(&ar->conf_mutex); 7395 7396 ath10k_dbg(ar, ATH10K_DBG_MAC, 7397 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7398 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7399 7400 /* This shouldn't really happen because channel switching should use 7401 * switch_vif_chanctx(). 7402 */ 7403 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7404 goto unlock; 7405 7406 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7407 ieee80211_iterate_active_interfaces_atomic( 7408 hw, 7409 IEEE80211_IFACE_ITER_NORMAL, 7410 ath10k_mac_change_chanctx_cnt_iter, 7411 &arg); 7412 if (arg.n_vifs == 0) 7413 goto radar; 7414 7415 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7416 GFP_KERNEL); 7417 if (!arg.vifs) 7418 goto radar; 7419 7420 ieee80211_iterate_active_interfaces_atomic( 7421 hw, 7422 IEEE80211_IFACE_ITER_NORMAL, 7423 ath10k_mac_change_chanctx_fill_iter, 7424 &arg); 7425 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7426 kfree(arg.vifs); 7427 } 7428 7429 radar: 7430 ath10k_recalc_radar_detection(ar); 7431 7432 /* FIXME: How to configure Rx chains properly? */ 7433 7434 /* No other actions are actually necessary. Firmware maintains channel 7435 * definitions per vdev internally and there's no host-side channel 7436 * context abstraction to configure, e.g. channel width. 7437 */ 7438 7439 unlock: 7440 mutex_unlock(&ar->conf_mutex); 7441 } 7442 7443 static int 7444 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7445 struct ieee80211_vif *vif, 7446 struct ieee80211_chanctx_conf *ctx) 7447 { 7448 struct ath10k *ar = hw->priv; 7449 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7450 int ret; 7451 7452 mutex_lock(&ar->conf_mutex); 7453 7454 ath10k_dbg(ar, ATH10K_DBG_MAC, 7455 "mac chanctx assign ptr %pK vdev_id %i\n", 7456 ctx, arvif->vdev_id); 7457 7458 if (WARN_ON(arvif->is_started)) { 7459 mutex_unlock(&ar->conf_mutex); 7460 return -EBUSY; 7461 } 7462 7463 ret = ath10k_vdev_start(arvif, &ctx->def); 7464 if (ret) { 7465 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7466 arvif->vdev_id, vif->addr, 7467 ctx->def.chan->center_freq, ret); 7468 goto err; 7469 } 7470 7471 arvif->is_started = true; 7472 7473 ret = ath10k_mac_vif_setup_ps(arvif); 7474 if (ret) { 7475 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7476 arvif->vdev_id, ret); 7477 goto err_stop; 7478 } 7479 7480 if (vif->type == NL80211_IFTYPE_MONITOR) { 7481 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7482 if (ret) { 7483 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7484 arvif->vdev_id, ret); 7485 goto err_stop; 7486 } 7487 7488 arvif->is_up = true; 7489 } 7490 7491 if (ath10k_mac_can_set_cts_prot(arvif)) { 7492 ret = ath10k_mac_set_cts_prot(arvif); 7493 if (ret) 7494 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7495 arvif->vdev_id, ret); 7496 } 7497 7498 mutex_unlock(&ar->conf_mutex); 7499 return 0; 7500 7501 err_stop: 7502 ath10k_vdev_stop(arvif); 7503 arvif->is_started = false; 7504 ath10k_mac_vif_setup_ps(arvif); 7505 7506 err: 7507 mutex_unlock(&ar->conf_mutex); 7508 return ret; 7509 } 7510 7511 static void 7512 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7513 struct ieee80211_vif *vif, 7514 struct ieee80211_chanctx_conf *ctx) 7515 { 7516 struct ath10k *ar = hw->priv; 7517 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7518 int ret; 7519 7520 mutex_lock(&ar->conf_mutex); 7521 7522 ath10k_dbg(ar, ATH10K_DBG_MAC, 7523 "mac chanctx unassign ptr %pK vdev_id %i\n", 7524 ctx, arvif->vdev_id); 7525 7526 WARN_ON(!arvif->is_started); 7527 7528 if (vif->type == NL80211_IFTYPE_MONITOR) { 7529 WARN_ON(!arvif->is_up); 7530 7531 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7532 if (ret) 7533 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7534 arvif->vdev_id, ret); 7535 7536 arvif->is_up = false; 7537 } 7538 7539 ret = ath10k_vdev_stop(arvif); 7540 if (ret) 7541 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7542 arvif->vdev_id, ret); 7543 7544 arvif->is_started = false; 7545 7546 mutex_unlock(&ar->conf_mutex); 7547 } 7548 7549 static int 7550 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7551 struct ieee80211_vif_chanctx_switch *vifs, 7552 int n_vifs, 7553 enum ieee80211_chanctx_switch_mode mode) 7554 { 7555 struct ath10k *ar = hw->priv; 7556 7557 mutex_lock(&ar->conf_mutex); 7558 7559 ath10k_dbg(ar, ATH10K_DBG_MAC, 7560 "mac chanctx switch n_vifs %d mode %d\n", 7561 n_vifs, mode); 7562 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7563 7564 mutex_unlock(&ar->conf_mutex); 7565 return 0; 7566 } 7567 7568 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7569 struct ieee80211_vif *vif, 7570 struct ieee80211_sta *sta) 7571 { 7572 struct ath10k *ar; 7573 struct ath10k_peer *peer; 7574 7575 ar = hw->priv; 7576 7577 list_for_each_entry(peer, &ar->peers, list) 7578 if (peer->sta == sta) 7579 peer->removed = true; 7580 } 7581 7582 static const struct ieee80211_ops ath10k_ops = { 7583 .tx = ath10k_mac_op_tx, 7584 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7585 .start = ath10k_start, 7586 .stop = ath10k_stop, 7587 .config = ath10k_config, 7588 .add_interface = ath10k_add_interface, 7589 .remove_interface = ath10k_remove_interface, 7590 .configure_filter = ath10k_configure_filter, 7591 .bss_info_changed = ath10k_bss_info_changed, 7592 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7593 .hw_scan = ath10k_hw_scan, 7594 .cancel_hw_scan = ath10k_cancel_hw_scan, 7595 .set_key = ath10k_set_key, 7596 .set_default_unicast_key = ath10k_set_default_unicast_key, 7597 .sta_state = ath10k_sta_state, 7598 .conf_tx = ath10k_conf_tx, 7599 .remain_on_channel = ath10k_remain_on_channel, 7600 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7601 .set_rts_threshold = ath10k_set_rts_threshold, 7602 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7603 .flush = ath10k_flush, 7604 .tx_last_beacon = ath10k_tx_last_beacon, 7605 .set_antenna = ath10k_set_antenna, 7606 .get_antenna = ath10k_get_antenna, 7607 .reconfig_complete = ath10k_reconfig_complete, 7608 .get_survey = ath10k_get_survey, 7609 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7610 .sta_rc_update = ath10k_sta_rc_update, 7611 .offset_tsf = ath10k_offset_tsf, 7612 .ampdu_action = ath10k_ampdu_action, 7613 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7614 .get_et_stats = ath10k_debug_get_et_stats, 7615 .get_et_strings = ath10k_debug_get_et_strings, 7616 .add_chanctx = ath10k_mac_op_add_chanctx, 7617 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7618 .change_chanctx = ath10k_mac_op_change_chanctx, 7619 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7620 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7621 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7622 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7623 7624 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7625 7626 #ifdef CONFIG_PM 7627 .suspend = ath10k_wow_op_suspend, 7628 .resume = ath10k_wow_op_resume, 7629 #endif 7630 #ifdef CONFIG_MAC80211_DEBUGFS 7631 .sta_add_debugfs = ath10k_sta_add_debugfs, 7632 .sta_statistics = ath10k_sta_statistics, 7633 #endif 7634 }; 7635 7636 #define CHAN2G(_channel, _freq, _flags) { \ 7637 .band = NL80211_BAND_2GHZ, \ 7638 .hw_value = (_channel), \ 7639 .center_freq = (_freq), \ 7640 .flags = (_flags), \ 7641 .max_antenna_gain = 0, \ 7642 .max_power = 30, \ 7643 } 7644 7645 #define CHAN5G(_channel, _freq, _flags) { \ 7646 .band = NL80211_BAND_5GHZ, \ 7647 .hw_value = (_channel), \ 7648 .center_freq = (_freq), \ 7649 .flags = (_flags), \ 7650 .max_antenna_gain = 0, \ 7651 .max_power = 30, \ 7652 } 7653 7654 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7655 CHAN2G(1, 2412, 0), 7656 CHAN2G(2, 2417, 0), 7657 CHAN2G(3, 2422, 0), 7658 CHAN2G(4, 2427, 0), 7659 CHAN2G(5, 2432, 0), 7660 CHAN2G(6, 2437, 0), 7661 CHAN2G(7, 2442, 0), 7662 CHAN2G(8, 2447, 0), 7663 CHAN2G(9, 2452, 0), 7664 CHAN2G(10, 2457, 0), 7665 CHAN2G(11, 2462, 0), 7666 CHAN2G(12, 2467, 0), 7667 CHAN2G(13, 2472, 0), 7668 CHAN2G(14, 2484, 0), 7669 }; 7670 7671 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7672 CHAN5G(36, 5180, 0), 7673 CHAN5G(40, 5200, 0), 7674 CHAN5G(44, 5220, 0), 7675 CHAN5G(48, 5240, 0), 7676 CHAN5G(52, 5260, 0), 7677 CHAN5G(56, 5280, 0), 7678 CHAN5G(60, 5300, 0), 7679 CHAN5G(64, 5320, 0), 7680 CHAN5G(100, 5500, 0), 7681 CHAN5G(104, 5520, 0), 7682 CHAN5G(108, 5540, 0), 7683 CHAN5G(112, 5560, 0), 7684 CHAN5G(116, 5580, 0), 7685 CHAN5G(120, 5600, 0), 7686 CHAN5G(124, 5620, 0), 7687 CHAN5G(128, 5640, 0), 7688 CHAN5G(132, 5660, 0), 7689 CHAN5G(136, 5680, 0), 7690 CHAN5G(140, 5700, 0), 7691 CHAN5G(144, 5720, 0), 7692 CHAN5G(149, 5745, 0), 7693 CHAN5G(153, 5765, 0), 7694 CHAN5G(157, 5785, 0), 7695 CHAN5G(161, 5805, 0), 7696 CHAN5G(165, 5825, 0), 7697 CHAN5G(169, 5845, 0), 7698 }; 7699 7700 struct ath10k *ath10k_mac_create(size_t priv_size) 7701 { 7702 struct ieee80211_hw *hw; 7703 struct ieee80211_ops *ops; 7704 struct ath10k *ar; 7705 7706 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7707 if (!ops) 7708 return NULL; 7709 7710 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7711 if (!hw) { 7712 kfree(ops); 7713 return NULL; 7714 } 7715 7716 ar = hw->priv; 7717 ar->hw = hw; 7718 ar->ops = ops; 7719 7720 return ar; 7721 } 7722 7723 void ath10k_mac_destroy(struct ath10k *ar) 7724 { 7725 struct ieee80211_ops *ops = ar->ops; 7726 7727 ieee80211_free_hw(ar->hw); 7728 kfree(ops); 7729 } 7730 7731 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7732 { 7733 .max = 8, 7734 .types = BIT(NL80211_IFTYPE_STATION) 7735 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7736 }, 7737 { 7738 .max = 3, 7739 .types = BIT(NL80211_IFTYPE_P2P_GO) 7740 }, 7741 { 7742 .max = 1, 7743 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7744 }, 7745 { 7746 .max = 7, 7747 .types = BIT(NL80211_IFTYPE_AP) 7748 #ifdef CONFIG_MAC80211_MESH 7749 | BIT(NL80211_IFTYPE_MESH_POINT) 7750 #endif 7751 }, 7752 }; 7753 7754 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7755 { 7756 .max = 8, 7757 .types = BIT(NL80211_IFTYPE_AP) 7758 #ifdef CONFIG_MAC80211_MESH 7759 | BIT(NL80211_IFTYPE_MESH_POINT) 7760 #endif 7761 }, 7762 { 7763 .max = 1, 7764 .types = BIT(NL80211_IFTYPE_STATION) 7765 }, 7766 }; 7767 7768 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7769 { 7770 .limits = ath10k_if_limits, 7771 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7772 .max_interfaces = 8, 7773 .num_different_channels = 1, 7774 .beacon_int_infra_match = true, 7775 }, 7776 }; 7777 7778 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7779 { 7780 .limits = ath10k_10x_if_limits, 7781 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7782 .max_interfaces = 8, 7783 .num_different_channels = 1, 7784 .beacon_int_infra_match = true, 7785 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7786 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7787 BIT(NL80211_CHAN_WIDTH_20) | 7788 BIT(NL80211_CHAN_WIDTH_40) | 7789 BIT(NL80211_CHAN_WIDTH_80), 7790 #endif 7791 }, 7792 }; 7793 7794 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7795 { 7796 .max = 2, 7797 .types = BIT(NL80211_IFTYPE_STATION), 7798 }, 7799 { 7800 .max = 2, 7801 .types = BIT(NL80211_IFTYPE_AP) | 7802 #ifdef CONFIG_MAC80211_MESH 7803 BIT(NL80211_IFTYPE_MESH_POINT) | 7804 #endif 7805 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7806 BIT(NL80211_IFTYPE_P2P_GO), 7807 }, 7808 { 7809 .max = 1, 7810 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7811 }, 7812 }; 7813 7814 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7815 { 7816 .max = 2, 7817 .types = BIT(NL80211_IFTYPE_STATION), 7818 }, 7819 { 7820 .max = 2, 7821 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7822 }, 7823 { 7824 .max = 1, 7825 .types = BIT(NL80211_IFTYPE_AP) | 7826 #ifdef CONFIG_MAC80211_MESH 7827 BIT(NL80211_IFTYPE_MESH_POINT) | 7828 #endif 7829 BIT(NL80211_IFTYPE_P2P_GO), 7830 }, 7831 { 7832 .max = 1, 7833 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7834 }, 7835 }; 7836 7837 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 7838 { 7839 .max = 1, 7840 .types = BIT(NL80211_IFTYPE_STATION), 7841 }, 7842 { 7843 .max = 1, 7844 .types = BIT(NL80211_IFTYPE_ADHOC), 7845 }, 7846 }; 7847 7848 /* FIXME: This is not thouroughly tested. These combinations may over- or 7849 * underestimate hw/fw capabilities. 7850 */ 7851 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 7852 { 7853 .limits = ath10k_tlv_if_limit, 7854 .num_different_channels = 1, 7855 .max_interfaces = 4, 7856 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7857 }, 7858 { 7859 .limits = ath10k_tlv_if_limit_ibss, 7860 .num_different_channels = 1, 7861 .max_interfaces = 2, 7862 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7863 }, 7864 }; 7865 7866 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 7867 { 7868 .limits = ath10k_tlv_if_limit, 7869 .num_different_channels = 1, 7870 .max_interfaces = 4, 7871 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7872 }, 7873 { 7874 .limits = ath10k_tlv_qcs_if_limit, 7875 .num_different_channels = 2, 7876 .max_interfaces = 4, 7877 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 7878 }, 7879 { 7880 .limits = ath10k_tlv_if_limit_ibss, 7881 .num_different_channels = 1, 7882 .max_interfaces = 2, 7883 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7884 }, 7885 }; 7886 7887 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 7888 { 7889 .max = 1, 7890 .types = BIT(NL80211_IFTYPE_STATION), 7891 }, 7892 { 7893 .max = 16, 7894 .types = BIT(NL80211_IFTYPE_AP) 7895 #ifdef CONFIG_MAC80211_MESH 7896 | BIT(NL80211_IFTYPE_MESH_POINT) 7897 #endif 7898 }, 7899 }; 7900 7901 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 7902 { 7903 .limits = ath10k_10_4_if_limits, 7904 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 7905 .max_interfaces = 16, 7906 .num_different_channels = 1, 7907 .beacon_int_infra_match = true, 7908 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7909 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7910 BIT(NL80211_CHAN_WIDTH_20) | 7911 BIT(NL80211_CHAN_WIDTH_40) | 7912 BIT(NL80211_CHAN_WIDTH_80), 7913 #endif 7914 }, 7915 }; 7916 7917 static void ath10k_get_arvif_iter(void *data, u8 *mac, 7918 struct ieee80211_vif *vif) 7919 { 7920 struct ath10k_vif_iter *arvif_iter = data; 7921 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7922 7923 if (arvif->vdev_id == arvif_iter->vdev_id) 7924 arvif_iter->arvif = arvif; 7925 } 7926 7927 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 7928 { 7929 struct ath10k_vif_iter arvif_iter; 7930 u32 flags; 7931 7932 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 7933 arvif_iter.vdev_id = vdev_id; 7934 7935 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 7936 ieee80211_iterate_active_interfaces_atomic(ar->hw, 7937 flags, 7938 ath10k_get_arvif_iter, 7939 &arvif_iter); 7940 if (!arvif_iter.arvif) { 7941 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 7942 return NULL; 7943 } 7944 7945 return arvif_iter.arvif; 7946 } 7947 7948 #define WRD_METHOD "WRDD" 7949 #define WRDD_WIFI (0x07) 7950 7951 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 7952 { 7953 union acpi_object *mcc_pkg; 7954 union acpi_object *domain_type; 7955 union acpi_object *mcc_value; 7956 u32 i; 7957 7958 if (wrdd->type != ACPI_TYPE_PACKAGE || 7959 wrdd->package.count < 2 || 7960 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 7961 wrdd->package.elements[0].integer.value != 0) { 7962 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 7963 return 0; 7964 } 7965 7966 for (i = 1; i < wrdd->package.count; ++i) { 7967 mcc_pkg = &wrdd->package.elements[i]; 7968 7969 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 7970 continue; 7971 if (mcc_pkg->package.count < 2) 7972 continue; 7973 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 7974 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 7975 continue; 7976 7977 domain_type = &mcc_pkg->package.elements[0]; 7978 if (domain_type->integer.value != WRDD_WIFI) 7979 continue; 7980 7981 mcc_value = &mcc_pkg->package.elements[1]; 7982 return mcc_value->integer.value; 7983 } 7984 return 0; 7985 } 7986 7987 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 7988 { 7989 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 7990 acpi_handle root_handle; 7991 acpi_handle handle; 7992 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 7993 acpi_status status; 7994 u32 alpha2_code; 7995 char alpha2[3]; 7996 7997 root_handle = ACPI_HANDLE(&pdev->dev); 7998 if (!root_handle) 7999 return -EOPNOTSUPP; 8000 8001 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8002 if (ACPI_FAILURE(status)) { 8003 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8004 "failed to get wrd method %d\n", status); 8005 return -EIO; 8006 } 8007 8008 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8009 if (ACPI_FAILURE(status)) { 8010 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8011 "failed to call wrdc %d\n", status); 8012 return -EIO; 8013 } 8014 8015 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8016 kfree(wrdd.pointer); 8017 if (!alpha2_code) 8018 return -EIO; 8019 8020 alpha2[0] = (alpha2_code >> 8) & 0xff; 8021 alpha2[1] = (alpha2_code >> 0) & 0xff; 8022 alpha2[2] = '\0'; 8023 8024 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8025 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8026 8027 *rd = ath_regd_find_country_by_name(alpha2); 8028 if (*rd == 0xffff) 8029 return -EIO; 8030 8031 *rd |= COUNTRY_ERD_FLAG; 8032 return 0; 8033 } 8034 8035 static int ath10k_mac_init_rd(struct ath10k *ar) 8036 { 8037 int ret; 8038 u16 rd; 8039 8040 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8041 if (ret) { 8042 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8043 "fallback to eeprom programmed regulatory settings\n"); 8044 rd = ar->hw_eeprom_rd; 8045 } 8046 8047 ar->ath_common.regulatory.current_rd = rd; 8048 return 0; 8049 } 8050 8051 int ath10k_mac_register(struct ath10k *ar) 8052 { 8053 static const u32 cipher_suites[] = { 8054 WLAN_CIPHER_SUITE_WEP40, 8055 WLAN_CIPHER_SUITE_WEP104, 8056 WLAN_CIPHER_SUITE_TKIP, 8057 WLAN_CIPHER_SUITE_CCMP, 8058 WLAN_CIPHER_SUITE_AES_CMAC, 8059 }; 8060 struct ieee80211_supported_band *band; 8061 void *channels; 8062 int ret; 8063 8064 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8065 8066 SET_IEEE80211_DEV(ar->hw, ar->dev); 8067 8068 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8069 ARRAY_SIZE(ath10k_5ghz_channels)) != 8070 ATH10K_NUM_CHANS); 8071 8072 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8073 channels = kmemdup(ath10k_2ghz_channels, 8074 sizeof(ath10k_2ghz_channels), 8075 GFP_KERNEL); 8076 if (!channels) { 8077 ret = -ENOMEM; 8078 goto err_free; 8079 } 8080 8081 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8082 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8083 band->channels = channels; 8084 8085 if (ar->hw_params.cck_rate_map_rev2) { 8086 band->n_bitrates = ath10k_g_rates_rev2_size; 8087 band->bitrates = ath10k_g_rates_rev2; 8088 } else { 8089 band->n_bitrates = ath10k_g_rates_size; 8090 band->bitrates = ath10k_g_rates; 8091 } 8092 8093 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8094 } 8095 8096 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8097 channels = kmemdup(ath10k_5ghz_channels, 8098 sizeof(ath10k_5ghz_channels), 8099 GFP_KERNEL); 8100 if (!channels) { 8101 ret = -ENOMEM; 8102 goto err_free; 8103 } 8104 8105 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8106 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8107 band->channels = channels; 8108 band->n_bitrates = ath10k_a_rates_size; 8109 band->bitrates = ath10k_a_rates; 8110 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8111 } 8112 8113 ath10k_mac_setup_ht_vht_cap(ar); 8114 8115 ar->hw->wiphy->interface_modes = 8116 BIT(NL80211_IFTYPE_STATION) | 8117 BIT(NL80211_IFTYPE_AP) | 8118 BIT(NL80211_IFTYPE_MESH_POINT); 8119 8120 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8121 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8122 8123 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8124 ar->hw->wiphy->interface_modes |= 8125 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8126 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8127 BIT(NL80211_IFTYPE_P2P_GO); 8128 8129 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8130 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8131 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8132 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8133 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8134 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8135 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8136 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8137 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8138 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8139 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8140 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8141 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8142 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8143 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8144 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8145 8146 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8147 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8148 8149 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8150 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8151 8152 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8153 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8154 8155 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8156 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8157 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8158 } 8159 8160 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8161 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8162 8163 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8164 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8165 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8166 8167 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8168 8169 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8170 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8171 8172 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8173 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8174 * correct Probe Responses. This is more of a hack advert.. 8175 */ 8176 ar->hw->wiphy->probe_resp_offload |= 8177 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8178 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8179 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8180 } 8181 8182 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) 8183 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8184 8185 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8186 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8187 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8188 8189 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8190 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8191 NL80211_FEATURE_AP_SCAN; 8192 8193 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8194 8195 ret = ath10k_wow_init(ar); 8196 if (ret) { 8197 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8198 goto err_free; 8199 } 8200 8201 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8202 8203 /* 8204 * on LL hardware queues are managed entirely by the FW 8205 * so we only advertise to mac we can do the queues thing 8206 */ 8207 ar->hw->queues = IEEE80211_MAX_QUEUES; 8208 8209 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8210 * something that vdev_ids can't reach so that we don't stop the queue 8211 * accidentally. 8212 */ 8213 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8214 8215 switch (ar->running_fw->fw_file.wmi_op_version) { 8216 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8217 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8218 ar->hw->wiphy->n_iface_combinations = 8219 ARRAY_SIZE(ath10k_if_comb); 8220 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8221 break; 8222 case ATH10K_FW_WMI_OP_VERSION_TLV: 8223 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8224 ar->hw->wiphy->iface_combinations = 8225 ath10k_tlv_qcs_if_comb; 8226 ar->hw->wiphy->n_iface_combinations = 8227 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8228 } else { 8229 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8230 ar->hw->wiphy->n_iface_combinations = 8231 ARRAY_SIZE(ath10k_tlv_if_comb); 8232 } 8233 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8234 break; 8235 case ATH10K_FW_WMI_OP_VERSION_10_1: 8236 case ATH10K_FW_WMI_OP_VERSION_10_2: 8237 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8238 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8239 ar->hw->wiphy->n_iface_combinations = 8240 ARRAY_SIZE(ath10k_10x_if_comb); 8241 break; 8242 case ATH10K_FW_WMI_OP_VERSION_10_4: 8243 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8244 ar->hw->wiphy->n_iface_combinations = 8245 ARRAY_SIZE(ath10k_10_4_if_comb); 8246 break; 8247 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8248 case ATH10K_FW_WMI_OP_VERSION_MAX: 8249 WARN_ON(1); 8250 ret = -EINVAL; 8251 goto err_free; 8252 } 8253 8254 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8255 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8256 8257 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8258 /* Init ath dfs pattern detector */ 8259 ar->ath_common.debug_mask = ATH_DBG_DFS; 8260 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8261 NL80211_DFS_UNSET); 8262 8263 if (!ar->dfs_detector) 8264 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8265 } 8266 8267 /* Current wake_tx_queue implementation imposes a significant 8268 * performance penalty in some setups. The tx scheduling code needs 8269 * more work anyway so disable the wake_tx_queue unless firmware 8270 * supports the pull-push mechanism. 8271 */ 8272 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 8273 ar->running_fw->fw_file.fw_features)) 8274 ar->ops->wake_tx_queue = NULL; 8275 8276 ret = ath10k_mac_init_rd(ar); 8277 if (ret) { 8278 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8279 goto err_dfs_detector_exit; 8280 } 8281 8282 /* Disable set_coverage_class for chipsets that do not support it. */ 8283 if (!ar->hw_params.hw_ops->set_coverage_class) 8284 ar->ops->set_coverage_class = NULL; 8285 8286 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8287 ath10k_reg_notifier); 8288 if (ret) { 8289 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8290 goto err_dfs_detector_exit; 8291 } 8292 8293 ar->hw->wiphy->cipher_suites = cipher_suites; 8294 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 8295 8296 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8297 8298 ret = ieee80211_register_hw(ar->hw); 8299 if (ret) { 8300 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8301 goto err_dfs_detector_exit; 8302 } 8303 8304 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8305 ret = regulatory_hint(ar->hw->wiphy, 8306 ar->ath_common.regulatory.alpha2); 8307 if (ret) 8308 goto err_unregister; 8309 } 8310 8311 return 0; 8312 8313 err_unregister: 8314 ieee80211_unregister_hw(ar->hw); 8315 8316 err_dfs_detector_exit: 8317 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8318 ar->dfs_detector->exit(ar->dfs_detector); 8319 8320 err_free: 8321 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8322 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8323 8324 SET_IEEE80211_DEV(ar->hw, NULL); 8325 return ret; 8326 } 8327 8328 void ath10k_mac_unregister(struct ath10k *ar) 8329 { 8330 ieee80211_unregister_hw(ar->hw); 8331 8332 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8333 ar->dfs_detector->exit(ar->dfs_detector); 8334 8335 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8336 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8337 8338 SET_IEEE80211_DEV(ar->hw, NULL); 8339 } 8340